hutnparser.py 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011
  1. """
  2. Author: Bruno Barroca
  3. Date: October 2014
  4. Description: A top down parser
  5. Modifications by Daniel Riegelhaupt:
  6. *removed test input
  7. *changed pos to startpos because in my humble opinion it makes more sense to have a tupple (startpos, endpos) than (pos, endpos)
  8. *aded parameters to init: tab_size, line_position, hide_implicit
  9. - see init comments for more info on all otions
  10. - line_postion will change startpos and end Pos to instance of class Postion. (changed december 2014)
  11. *Added anonymous terminals: tokens do not have to be defined as tokens but can be typed directly in rules
  12. *changed interleave function to be deep, and start on START
  13. *changed position returned in tree to be relative to line numbers instead of the absolute one
  14. - Did the same for partialresults returned on syntax error change this is error results too
  15. - TODO check efficiency on the previous point checking the whole text for every position might be slow
  16. *Changed usage , instead of Parser(input, grammar).pars() it is now Parser(grammar).parse(input)
  17. - Added a self.reset() method for fields that need to be initializes again when parsing a new input
  18. *Changed findFailure and generateErrorReports:
  19. * i need the the rule/token name as well not only the error text
  20. * hidden elements (like for example comments and newline ) are not included in error reports if hide_implicit is set to true
  21. * same for the interleave rule
  22. """
  23. import re
  24. from copy import deepcopy
  25. from position import Position
  26. tail_cache = {}
  27. line_cache = {}
  28. class Tree(object):
  29. def __init__(self, head, tail, startpos, endpos, inputfile = None):
  30. self.head = head
  31. self.tail = tail
  32. self.startpos = startpos
  33. self.endpos = endpos
  34. self.inputfile = inputfile
  35. # IMPORTANT: self.replaced: replace_child defines self.replaced
  36. def prune(self):
  37. return (self.head, [i.prune() if isinstance(i, Tree) else i for i in self.tail])
  38. def is_rule(self):
  39. return self.head.islower()
  40. def is_token(self):
  41. return not self.is_rule()
  42. def get_tail(self):
  43. global tail_cache
  44. if self.is_rule():
  45. if self not in tail_cache:
  46. tail_cache[self] = [t for t in self.get_raw_tail()
  47. if not t.head.startswith("implicit_autogenerated_")]
  48. return tail_cache[self]
  49. else:
  50. return self.get_raw_tail()
  51. def get_raw_tail(self):
  52. return self.tail
  53. def get_text(self, with_implicit=False):
  54. parts = []
  55. if with_implicit:
  56. tail = Tree.get_raw_tail
  57. else:
  58. tail = Tree.get_tail
  59. def post_order(tree):
  60. for child in tail(tree):
  61. if hasattr(child, "replaced"):
  62. child = child.replaced
  63. if isinstance(child, Tree):
  64. post_order(child)
  65. else:
  66. parts.append(child)
  67. post_order(self)
  68. return ''.join(parts)
  69. def get_child(self, name):
  70. for child in self.get_tail():
  71. if child.head == name:
  72. return child
  73. return None
  74. def get_children(self, name):
  75. children = []
  76. for child in self.get_tail():
  77. if child.head == name:
  78. children.append(child)
  79. return children
  80. def replace_child(self, old_child, new_child):
  81. new_child.replaced = old_child
  82. i = self.get_raw_tail().index(old_child)
  83. self.get_raw_tail()[i] = new_child
  84. i = self.get_tail().index(old_child)
  85. self.get_tail()[i] = new_child
  86. def get_tail_without(self, names):
  87. if self.is_rule():
  88. return [t for t in self.get_tail() if not t.head in names]
  89. else:
  90. return self.get_raw_tail()
  91. def __str__(self):
  92. return "(%s, %s) [%s]" % (
  93. self.head, str((self.startpos, self.endpos)),
  94. ", ".join([str(i) for i in self.get_raw_tail()]))
  95. def get_reference_line(self):
  96. return "%s:%s:%s-%s" % (self.inputfile, self.startpos["line"], self.startpos["column"], self.endpos["column"])
  97. def fix_tracability(self, inputfile):
  98. if self.inputfile is None:
  99. self.inputfile = inputfile
  100. for f in self.tail:
  101. if isinstance(f, Tree):
  102. f.fix_tracability(self.inputfile)
  103. class Parser(object):
  104. class Constants(object):
  105. Token = 'token'
  106. Production = 'prod'
  107. Success = 'success'
  108. Failure = 'failure'
  109. class LR(object):
  110. def __init__(self, seed, rulename, head, nextlr):
  111. self.seed = seed
  112. self.rule = rulename
  113. self.head = head
  114. self.next = nextlr
  115. def copy(self):
  116. return Parser.LR(self.seed, self.rule, self.head, self.next)
  117. class Head(object):
  118. def __init__(self, rulename, involved, evaluation):
  119. self.rule = rulename
  120. self.involved = involved
  121. self.evaluation = evaluation
  122. def __init__(self, grammar, **options):
  123. """
  124. creates a Parser for the given grammar
  125. :param grammar: An instance of the Grammar class
  126. :param options: the following options are supported:
  127. tab_size: default 1. sets the character size of a tab character
  128. hide_implicit: default False. when true implicit tokens are hidden from the returned parse tree and error message.
  129. Note that this this option will not override rules or tokens where the hidden variable has already been set manually in the Grammar class
  130. line_position: default False. when true we use line, column Position object instead of absolute position integer in the parse tree for startpos and endpos
  131. """
  132. #changed by Daniel: members that need to be initialized each time parse is a called have been put in def reset()
  133. #that method is called when the parse() method is called
  134. self.rules = deepcopy(grammar.rules)
  135. self.tokens = deepcopy(grammar.tokens)
  136. self.implicitList = [] #added by Daniel, set in hideImplict so that we can review the implicit list in case of error messages
  137. self.implictRuleName = ""
  138. #options Added by Daniel
  139. self.tabsize = int(options.pop('tab_size', 1)) #the character size of a tab
  140. self.hideImplicit = bool(options.pop('hide_implicit', False))
  141. #whether to hide implicit tokens and rules from the returned parse tree
  142. #Important note: this option will not override rules or tokens where the hidden variable has already been set manually
  143. self.linePosition = bool(options.pop('line_position', False))
  144. #if true the position of the returned parse tree will consist of a line and a column instead of the position in the string array
  145. #preprocess must happen after options, (after hideImplicit has been set)
  146. self.preprocess()
  147. def reset(self):
  148. self.input = ""
  149. self.memotable = {}
  150. self.failure = {}
  151. self.lrstack = None
  152. self.heads = {}
  153. self.countcard = {}
  154. def preprocess(self):
  155. #for elem in self.rules.keys(): #Changed by Daniel: we only check start because it's global
  156. elem = 'start'
  157. if elem in self.rules.keys():
  158. if ('interleave' in self.rules[elem]):
  159. ilist = self.rules[elem]['interleave']
  160. self.setHideImplicit(ilist, self.hideImplicit)
  161. self.interleave(self.rules[elem], ilist)
  162. def setHideImplicit(self, ilist, bool= False):
  163. if ilist:
  164. #ilist = ['?', '@rulename']
  165. rulename= ilist[1][1:]
  166. self.implictRuleName = rulename #used to hide later error reports later
  167. self.rules[rulename]['hidden'] = bool
  168. if rulename in self.rules:
  169. body = self.rules[rulename]['body']
  170. #body = [*, [| ,,,,]]
  171. elems= body[1][1:]
  172. self.implicitList = elems
  173. for elem in elems:
  174. l = None
  175. error = ''
  176. if elem[0] == '@':
  177. l = self.rules
  178. error = ' rule not found in grammar rules.'
  179. elif elem[0]== '$':
  180. l = self.tokens
  181. error = ' token not found in grammar rules.'
  182. #else: in this case it is an anonymous token,
  183. if l:
  184. name = elem[1:]
  185. if name in l:
  186. if not l[name].has_key('hidden'):
  187. #this method will not override anything the user has explicitly specified in the structure
  188. #if there is already a hidden value there it will be kept even if it is not the same one
  189. #an examples use case is whitespaces vs comments:
  190. #both can appear anywhere in the text and so are implicit in the grammar.
  191. #however we dont want spaces in the tree but we do want the comments
  192. l[name]['hidden'] = bool
  193. else:
  194. raise Exception(name + error)
  195. #else: Anon token can't be ignored for the moment unless we create an ignore list for it or something like that.
  196. else:
  197. raise Exception(rulename + ' rule not found in grammar rules.')
  198. def interleave(self, elem, ilist):
  199. #quick and simple interleaving method, will probably contain double interleaving
  200. #but this is as simple as i could make it without taking into account each and every case
  201. def quickInterLeave(lst, inter):
  202. newL = []
  203. newL.append(lst[0])
  204. isSeq = self.isSequence(lst[0])
  205. for item in lst[1:]:
  206. if (isinstance(item, list)):#a sublist
  207. newL.append(quickInterLeave(item,inter))
  208. else:
  209. if(item[0] == '@'): #rule
  210. rulename = item [1:]
  211. if rulename in self.rules:
  212. rule = self.rules[rulename]
  213. if not rule.has_key('visited') or rule['visited'] == False:
  214. self.interleave(rule, inter)
  215. else:
  216. raise Exception(rulename + ' rule not found in grammar rules.')
  217. """
  218. Else:
  219. pass
  220. in this case it is a token or anon token we dont need to do anything special,
  221. just add it to the list interleaved
  222. """
  223. if isSeq: # no need to complicate the data structure if the list is a sequence
  224. if not newL[-1] == inter:
  225. newL.append(inter)
  226. newL.append(item)
  227. newL.append(inter)
  228. else:
  229. newL.append(['.', inter,item ,inter])
  230. """
  231. This way in case the list is not a sequence this doesnt change the meaning of the list:
  232. example: t1, t2 are tokens, i is an optional whitespace being intereleaved
  233. [., t1, t2] -> [., i ,t1, i, t2]
  234. the meaning stays the same:
  235. t1 and t2 both have ot be found for the rule to apply regardless of the ws
  236. [|, t1, t2] -> [|, i ,t1, i, t2]
  237. the meaning changed: if i is encountered the or is satisfied:
  238. so instead we do -> [|, [., i ,t1, i,], [., i ,t2, i,]]
  239. note that while inter has been added to the data stricture 4 times it will only match
  240. for one option so it is not really duplicate.
  241. another way of writing this can be [., inter [|, t1, t2], inter ] but this is easier said than
  242. done especially for big (complex) data structures
  243. """
  244. return newL
  245. #the first thing we do is say that the item has been visited this will avoid infinite loop due to recursion
  246. elem['visited'] = True
  247. if (not 'body' in elem):
  248. return
  249. ls = elem['body']
  250. newbody = quickInterLeave(ls,ilist)
  251. elem['body'] = newbody
  252. def parse(self, text):
  253. self.reset() #Changed by Daniel receive text as param. instead of once at init so first we reset the fields
  254. self.input = text
  255. results = self.applyrule('@start', 0)
  256. if len(results) > 1:
  257. # Handle ambiguity
  258. from prettyprint_visitor import PrettyPrintVisitor
  259. for p in results:
  260. print("===================================")
  261. print("VISIT RESULT")
  262. print("===================================")
  263. visitor = PrettyPrintVisitor([])
  264. visitor.visit(p["tree"])
  265. print(visitor.dump())
  266. result = self.generateErrorReport()
  267. elif (results == [] or results[0]['endpos'] < len(self.input)):
  268. result = self.generateErrorReport()
  269. for elem in result['partialresults']: #Added by Daniel there was no post processing on partial results. I need it
  270. if elem['tree']: #with partial results the tree can be None
  271. elem['tree'] = IgnorePostProcessor(self.rules, self.tokens).visit(elem['tree'])
  272. if self.linePosition:
  273. # elem['tree'].startpos = 0
  274. # elem['tree'].endpos = 0
  275. elem['tree'] = Parser.PositionPostProcessor(self.convertToLineColumn).visit(elem['tree']) #Added by Daniel
  276. elif len(results) == 1:
  277. result = results[0]
  278. result.update({'status': Parser.Constants.Success})
  279. if result['tree'].head != 'start':
  280. result['tree'] = Tree('start', [result['tree']], result['tree'].startpos, result['tree'].endpos)
  281. result['tree'] = IgnorePostProcessor(self.rules, self.tokens).visit(result['tree'])
  282. if self.linePosition: #Added by Daniel
  283. result['tree'] = Parser.PositionPostProcessor(self.convertToLineColumn).visit(result['tree'])
  284. return result
  285. def generate_line_cache(self):
  286. global line_cache
  287. if self in line_cache:
  288. return
  289. line_cache[self] = []
  290. lc = line_cache[self]
  291. l = len(self.input)
  292. line = 1
  293. column = 0
  294. pos = 0
  295. while pos < l:
  296. if self.input[pos] == "\n":
  297. line += 1
  298. column = 0
  299. elif self.input[pos] == "\t":
  300. column += self.tabsize
  301. else:
  302. column += 1
  303. lc.append((line, column))
  304. pos += 1
  305. def convertToLineColumn(self, pos):
  306. global line_cache
  307. self.generate_line_cache()
  308. if pos > len(line_cache[self]):
  309. return {'line': line_cache[self][pos][0], 'column': line_cache[self][pos][1]}
  310. else:
  311. return {'line': line_cache[self][-1][0], 'column': line_cache[self][-1][1] + 1}
  312. def findlargerresultat(self, pos):
  313. endpos = pos
  314. result = None
  315. for key in self.memotable.keys():
  316. elem = self.memotable[key]
  317. if (elem == []):
  318. continue
  319. if (elem[0]['startpos'] == pos and endpos < elem[0]['endpos']):
  320. endpos = elem[0]['endpos']
  321. result = elem[0]
  322. return result
  323. def generateErrorReport(self):
  324. # consult the memotable and collect contiguities until endpos
  325. endpos = len(self.input) - 1
  326. pos = 0
  327. elems = []
  328. while pos <= endpos:
  329. elem = self.findlargerresultat(pos)
  330. if (not elem or (elem and elem['endpos'] == pos)):
  331. break
  332. pos = elem['endpos']
  333. elems.append(elem)
  334. if (pos <= endpos):
  335. elems.append({'tree': None, 'startpos': pos, 'endpos': endpos})
  336. elem = self.getFirstBiggestSpan(elems)
  337. if elem is None:
  338. return {'status': Parser.Constants.Failure, 'line': 0, 'column': 0, 'text': "Empty input file", 'partialresults': [], 'grammarelements': None}
  339. reasons = self.findFailure(elem['startpos'], elem['endpos'])
  340. if (reasons == []):
  341. pos -= 1
  342. else:
  343. pos = reasons[0]['startpos']
  344. read = self.input[pos:pos + 1]
  345. linecolumn = self.convertToLineColumn(pos)
  346. message = 'Syntax error at line ' + str(linecolumn['line']) + ' and column ' + str(linecolumn['column']) + '. '
  347. keys = []
  348. if (not reasons == []):
  349. first = True
  350. for reason in reasons:
  351. if (first):
  352. message += 'Expected ' + reason['text']
  353. first = False
  354. else:
  355. message += ' or ' + reason['text']
  356. keys.append(reason['key'])
  357. message += '. Instead read: ' + repr(read) + '.'
  358. else:
  359. message += 'Read: \'' + read + '\'.'
  360. return {'status': Parser.Constants.Failure, 'line': linecolumn['line'], 'column': linecolumn['column'],
  361. 'text': message, 'partialresults': elems, 'grammarelements': keys}
  362. def getFirstBiggestSpan(self, elems):
  363. biggestspan = 0
  364. result = None
  365. for elem in elems:
  366. span = elem['endpos'] - elem['startpos']
  367. if (biggestspan < span):
  368. result = elem
  369. span = biggestspan
  370. return result
  371. def findFailure(self, pos, endpos):
  372. posreasons = []
  373. endposreasons = []
  374. #changed by Daniel:
  375. #* i need the key as well for autocomplete so in stead of appending elem i return a new dictionary with elem and the key inside
  376. #* checks both condition for posreasons and endposreasons in one for loop instead of 2
  377. #* do not cosider keys that are hidden
  378. for key in self.failure.keys():
  379. #keys are given starting either with $ for tokens or @ for rules
  380. #howver with the the given metagrammar Tokens are all caps and rules are all in small letters so there cant be an overlapp
  381. #and we can safely test both
  382. if self.hideImplicit and\
  383. (('$' + key in self.implicitList) or ('@' + key in self.implicitList) or (key == self.implictRuleName)):
  384. continue
  385. else:
  386. elem = self.failure[key]
  387. if (elem['startpos'] == pos and not elem['text'] == ''):
  388. posreasons.append({'key': key, 'startpos': elem['startpos'] , 'text': elem['text'] })
  389. if (elem['startpos'] == endpos and not elem['text'] == ''):
  390. endposreasons.append({'key': key, 'startpos': elem['startpos'] , 'text': elem['text'] })
  391. if (len(endposreasons) < len(posreasons)):
  392. return posreasons
  393. else:
  394. return endposreasons
  395. def setupLR(self, rule, elem):
  396. if (elem.head == None):
  397. elem.head = Parser.Head(rule, [], [])
  398. s = self.lrstack
  399. while s and not s.rule == elem.head.rule:
  400. s.head = elem.head
  401. if (not s.rule in elem.head.involved):
  402. elem.head.involved.append(s.rule)
  403. s = s.next
  404. def recall(self, rule, j):
  405. newresults = []
  406. if ((rule, j) in self.memotable):
  407. newresults = self.memotable[(rule, j)]
  408. h = None
  409. if (j in self.heads):
  410. h = self.heads[j]
  411. if (not h):
  412. return newresults
  413. if (newresults == [] and not rule in (h.involved + [h.rule])):
  414. return [] # [{'tree': [], 'startpos': j, 'endpos': j}]
  415. if (rule in h.evaluation):
  416. h.evaluation.remove(rule)
  417. newresults = self.eval(rule, j)
  418. self.memotable.update({(rule, j): newresults})
  419. return newresults
  420. def applyrule(self, rule, j):
  421. overallresults = []
  422. newresults = self.recall(rule, j)
  423. if (not newresults == []):
  424. memoresults = []
  425. for elem in newresults:
  426. if (isinstance(elem['tree'], Parser.LR)):
  427. self.setupLR(rule, elem['tree'])
  428. memoresults += elem['tree'].seed
  429. else:
  430. overallresults.append(elem)
  431. if (not memoresults == []):
  432. self.memotable.update({(rule, j): memoresults})
  433. return memoresults
  434. return overallresults
  435. else:
  436. #lr = Parser.LR([], rule, None, deepcopy(self.lrstack))
  437. lr = Parser.LR([], rule, None, None if not self.lrstack else self.lrstack.copy())
  438. self.lrstack = lr
  439. self.memotable.update({(rule, j): [{'tree': lr, 'startpos': j, 'endpos': j}]})
  440. newresults = self.eval(rule, j)
  441. self.lrstack = self.lrstack.next
  442. memoresults = []
  443. if ((rule, j) in self.memotable):
  444. memoresults = self.memotable[(rule, j)]
  445. for melem in memoresults:
  446. if (isinstance(melem['tree'], Parser.LR) and melem['tree'].head):
  447. melem['tree'].seed = newresults
  448. r = self.lr_answer(rule, j, melem)
  449. if (not r == []):
  450. overallresults += r
  451. if (overallresults != []): # prefer grown results
  452. return overallresults
  453. self.memotable.update({(rule, j): newresults})
  454. return newresults
  455. def lr_answer(self, rule, pos, melem):
  456. h = melem['tree'].head
  457. if (not h.rule == rule):
  458. return melem['tree'].seed
  459. else:
  460. melems = melem['tree'].seed
  461. result = []
  462. for melem_i in melems:
  463. if (not melem_i['tree'] == None):
  464. result.append(melem_i)
  465. if (result == []):
  466. return []
  467. else:
  468. newresult = []
  469. for melem_i in result:
  470. newresult.append(self.growLR(rule, pos, melem_i, h))
  471. return newresult
  472. def growLR(self, rule, pos, melem, head=None):
  473. self.heads.update({pos: head})
  474. while (True):
  475. overallresults = []
  476. head.evaluation = deepcopy(head.involved)
  477. newresults = self.eval(rule, pos)
  478. for elem in newresults:
  479. if (elem['endpos'] > melem['endpos']):
  480. melem = elem
  481. overallresults.append(elem)
  482. if (overallresults == []):
  483. self.heads.update({pos: None})
  484. return melem
  485. self.memotable.update({(rule, pos): overallresults})
  486. def eval(self, rulename, j):
  487. # Returns [{'tree':Tree(head=rulename, tail=[...], startpos=j, endpos=x), 'startpos':j, 'endpos':x}]
  488. # Raises Exception if there is no such token/rule
  489. if (rulename[0] == '@'):
  490. rulename = rulename[1:]
  491. if (not rulename in self.rules):
  492. raise Exception(rulename + ' rule not found in grammar rules.')
  493. rule = self.rules[rulename]
  494. elif (rulename[0] == '$'):
  495. rulename = rulename[1:]
  496. if (not rulename in self.tokens):
  497. raise Exception(rulename + ' token not found in grammar tokens.')
  498. rule = self.tokens[rulename]
  499. else:
  500. # raise Exception('Plain terminals not allowed inside grammar rules: ' + str(rulename))
  501. # we create an anonymous token rule
  502. # we can write whatever we want as fake type as long as it is not equal to the type of the prodcution rule
  503. # or to that of the token
  504. rule = {'type': 'anonymous_token'}
  505. if (self.isType(rule, Parser.Constants.Production)):
  506. newresults = []
  507. results = self.eval_body(rulename, rule['body'], j)
  508. for r in results:
  509. if (r['tree']):
  510. head = r['tree'].head
  511. if(head == '*' or head == '+' or head == '?' or head == '|' or head == '.'):
  512. newr = {'tree': Tree(rulename, [r['tree']], r['startpos'], r['endpos']), 'startpos': r['startpos'],
  513. 'endpos': r['endpos']}
  514. r = newr
  515. newresults.append(r)
  516. elif (self.isType(rule, Parser.Constants.Token)):
  517. newresults = self.term(rulename, j)
  518. else: ##Changed by Daniel: if not a production rule or defined token we try an anonymous token:
  519. newresults = self.anonTerm(rulename, j)
  520. return newresults
  521. def eval_body(self, rulename, ls, j):
  522. # Delegates the task to sub-functions: alt, seq, opt, many, more, card
  523. # Returns
  524. # Raises Exception if the first element in the body is not in {'|', '.', '?', '*', '+', '#'}
  525. if (self.isAlternative(ls[0])):
  526. return self.alt(rulename, ls[1:], j)
  527. elif (self.isSequence(ls[0])):
  528. return self.seq(rulename, ls[1:], j)
  529. elif (self.isOptional(ls[0])):
  530. return self.opt(rulename, ls[1:], j)
  531. elif (self.isMany(ls[0])):
  532. return self.many(rulename, ls[1:], j)
  533. elif (self.isMore(ls[0])):
  534. return self.more(rulename, ls[1:], j)
  535. elif (self.isCard(ls[0])):
  536. return self.card(rulename, ls[0][1:], ls[1:], j)
  537. raise Exception('Unrecognized grammar expression: ' + str(ls[0]))
  538. def isSequence(self, operator):
  539. return operator == '.'
  540. def isAlternative(self, operator):
  541. return operator == '|'
  542. def isMany(self, operator):
  543. return operator == '*'
  544. def isCard(self, operator):
  545. return operator.startswith('#')
  546. def isMore(self, operator):
  547. return operator == '+'
  548. def isOptional(self, operator):
  549. return operator == '?'
  550. def isType(self, rule, oftype):
  551. if (rule['type'] == oftype):
  552. return True
  553. def term(self, rulename, j):
  554. if (j >= len(self.input)):
  555. errortext = ''
  556. if (rulename in self.tokens and 'errortext' in self.tokens[rulename]):
  557. errortext = self.tokens[rulename]['errortext']
  558. self.failure.update({rulename: {'startpos': j, 'text': errortext}})
  559. return []
  560. rule = self.tokens[rulename]
  561. mobj = re.match(rule['reg'], self.input[j:])
  562. #Changed by daniel instead of re.match(reg) did re.match(re.compile(reg).patern)
  563. #this is to avoid problems with \ before i did this i had the match the character \ by doing [\\\\]
  564. # because to write only two slashes it would have to be r'[\\]' which cant be done directly in hte grammar so it had to be in string form
  565. #this way reading [\\] will be interpreted correctly instead of giving an error like it used to
  566. if (not mobj):
  567. # this is a failure! nice to register!
  568. self.failure.update({rulename: {'startpos': j, 'text': self.tokens[rulename]['errortext']}})
  569. return []
  570. return [{'tree': Tree(rulename, [mobj.group()], j, j + mobj.end()), 'startpos': j, 'endpos': j + mobj.end()}]
  571. def anonTerm(self, term, j):
  572. """
  573. #Changed by Daniel: added this whole method.
  574. Anonymous term to allow for direct terminals in rules
  575. (write 'Foo' directly instead of having to deine a FOO token)
  576. """
  577. qt = '\''
  578. name = qt + term + qt
  579. if (j >= len(self.input)):
  580. self.failure.update({ name : {'startpos': j, 'text': name}})
  581. return []
  582. mobj = re.match(term, self.input[j:])
  583. if (not mobj):
  584. # this is a failure! nice to register!
  585. self.failure.update({ name : {'startpos': j, 'text': name }})
  586. return []
  587. return [{'tree': Tree(name , [mobj.group()], j, j + mobj.end()), 'startpos': j, 'endpos': j + mobj.end()}]
  588. def many(self, rulename, ls, j):
  589. rule_i = ls[0]
  590. if (isinstance(rule_i, list)):
  591. results = self.eval_body('*', rule_i, j)
  592. else:
  593. results = self.applyrule(rule_i, j)
  594. if (results == []):
  595. return [{'tree': None, 'startpos': j, 'endpos': j}]
  596. seq = ['.'] + ls + [['*'] + ls]
  597. results = self.eval_body('*', seq, j)
  598. overall_results = []
  599. for r in results:
  600. if (r['tree']):
  601. if (len(r['tree'].tail) > 1):
  602. left = r['tree'].tail[0]
  603. right = r['tree'].tail[1].tail
  604. r['tree'].tail = [left] + right
  605. overall_results.append(r)
  606. return overall_results
  607. def more(self, rulename, ls, j):
  608. rule_i = ls[0]
  609. if (isinstance(rule_i, list)):
  610. results = self.eval_body('+', rule_i, j)
  611. else:
  612. results = self.applyrule(rule_i, j)
  613. if (results == []):
  614. return []
  615. seq = ['.'] + ls + [['*'] + ls]
  616. results = self.eval_body('+', seq, j)
  617. overall_results = []
  618. for r in results:
  619. if (r['tree']):
  620. if (len(r['tree'].tail) > 1):
  621. left = r['tree'].tail[0]
  622. right = r['tree'].tail[1].tail
  623. r['tree'].tail = [left] + right
  624. overall_results.append(r)
  625. return overall_results
  626. def opt(self, rulename, ls, j):
  627. if (j >= len(self.input)):
  628. errortext = ''
  629. if (rulename in self.rules and 'errortext' in self.rules[rulename]):
  630. errortext = self.rules[rulename]['errortext']
  631. else:
  632. for item in ls:
  633. if ((not isinstance(item[1:], list)) and item[1:] in self.rules):
  634. errortext = self.rules[item[1:]]['errortext']
  635. self.failure.update({rulename: {'startpos': j, 'text': errortext}})
  636. return [{'tree': None, 'startpos': j, 'endpos': j}]
  637. results = []
  638. rule_i = ls[0]
  639. if (isinstance(rule_i, list)):
  640. results = self.eval_body('?', rule_i, j)
  641. else:
  642. results = self.applyrule(rule_i, j)
  643. if (not results == []):
  644. return results
  645. # empty case
  646. return [{'tree': None, 'startpos': j, 'endpos': j}]
  647. def card(self, rulename, cardrule, ls, j):
  648. count = 0
  649. delta = 1
  650. # a# a#(-1) #indent, #(-1)indent
  651. group = re.match('\((?P<delta>[-+]?\d+)\)(?P<rule>\S+)',cardrule)
  652. if(group):
  653. cardrule = group.group('rule')
  654. delta = int(group.group('delta'))
  655. if (not cardrule in self.countcard):
  656. count = delta
  657. self.countcard.update({cardrule: {j: count}})
  658. else:
  659. if not j in self.countcard[cardrule]: # # if we already know the count for j, then ignore..
  660. d = self.countcard[cardrule]
  661. lastcount = 0
  662. for i in range(0, j):
  663. if i in d:
  664. lastcount = d[i]
  665. count = lastcount + delta
  666. d.update({j: count})
  667. else:
  668. count = self.countcard[cardrule][j]
  669. results = []
  670. rule_i = '@' + cardrule
  671. if(count == 0):
  672. results = [{'tree': None, 'startpos': j, 'endpos': j}]
  673. else:
  674. for i in range(0, count):
  675. if (results == []):
  676. if (isinstance(rule_i, list)):
  677. newresults = self.eval_body(rulename, rule_i, j)
  678. else:
  679. newresults = self.applyrule(rule_i, j)
  680. if (newresults == []):
  681. del self.countcard[cardrule][j]
  682. return []
  683. newresults = self.merge(rulename, newresults, {'startpos': j, 'endpos': j})
  684. else:
  685. for elem_p in results:
  686. if (isinstance(rule_i, list)):
  687. newresults = self.eval_body(rulename, rule_i, elem_p['endpos'])
  688. else:
  689. newresults = self.applyrule(rule_i, elem_p['endpos'])
  690. if (newresults == []):
  691. del self.countcard[cardrule][j]
  692. return []
  693. newresults = self.merge(rulename, newresults, elem_p)
  694. results = newresults
  695. for rule_i in ls:
  696. for elem_p in results:
  697. if (isinstance(rule_i, list)):
  698. newresults = self.eval_body(rulename, rule_i, elem_p['endpos'])
  699. else:
  700. newresults = self.applyrule(rule_i, elem_p['endpos'])
  701. if (newresults == []):
  702. del self.countcard[cardrule][j]
  703. return []
  704. newresults = self.merge(rulename, newresults, elem_p)
  705. results = newresults
  706. del self.countcard[cardrule][j]
  707. return results
  708. def seq(self, rulename, ls, j):
  709. #
  710. results = []
  711. for rule_i in ls:
  712. if (results == []):
  713. if (isinstance(rule_i, list)):
  714. newresults = self.eval_body('.', rule_i, j)
  715. else:
  716. newresults = self.applyrule(rule_i, j)
  717. if (newresults == []):
  718. return []
  719. newresults = self.merge('.', newresults, {'startpos': j, 'endpos': j})
  720. else:
  721. r = []
  722. for elem_p in results:
  723. if (isinstance(rule_i, list)):
  724. newresults = self.eval_body('.', rule_i, elem_p['endpos'])
  725. else:
  726. newresults = self.applyrule(rule_i, elem_p['endpos'])
  727. if (newresults == []):
  728. return []
  729. newresults = self.merge('.', newresults, elem_p)
  730. results = newresults
  731. return results
  732. def merge(self, rulename, newres, elem_p):
  733. # Brief: tail of each new tree needs to be prepended with tail of the previous tree
  734. # rulename: becomes the head of each tree in the returned list
  735. # newres: may have more than one tree in case of alt operator: 'x' ('a' | 'b') 'y'
  736. # tail of each new tree needs to be prepended with tail of previous tree
  737. # Returns same list as eval: [{'tree':Tree(head=rulename, tail=[...], startpos=j, endpos=x), 'startpos':j, 'endpos':x}]
  738. results = []
  739. for elem_n in newres:
  740. tail = []
  741. if ('tree' in elem_p and elem_p['tree']):
  742. tail += elem_p['tree'].tail
  743. if ('tree' in elem_n and elem_n['tree']):
  744. tail.append(elem_n['tree'])
  745. value = {'tree': Tree(rulename, tail, elem_p['startpos'], elem_n['endpos']), 'startpos': elem_p['startpos'],
  746. 'endpos': elem_n['endpos']}
  747. results += [value]
  748. return results
  749. def alt(self, rulename, ls, j):
  750. # Evaluates all alternatives using eval_body or applyrule
  751. # Returns same list as eval: [{'tree':Tree(head=rulename, tail=[...], startpos=j, endpos=x), 'startpos':j, 'endpos':x}]
  752. overall_results = []
  753. results = [] # TODO: remove this variable as it's never used
  754. for rule_i in ls:
  755. if (isinstance(rule_i, list)):
  756. newresults = self.eval_body('|', rule_i, j)
  757. else:
  758. newresults = self.applyrule(rule_i, j)
  759. overall_results += newresults
  760. return overall_results
  761. class PositionPostProcessor(object):
  762. """
  763. This post processor changes absolute position (place in the parsed string )to a line, column position
  764. added by Daniel
  765. """
  766. """
  767. efficiency note:
  768. how effective is this. this might be slowing things down quit a bit having to calculate that for everything
  769. 1) an alternative would be use the method only for the leaves, and that traverse the tree bottom up to create
  770. the interval using the left most and right most children of each subtree. but since tat involves extra tree
  771. traversal that might not help that much.
  772. 2) another thing that might improve efficiency is to create change the position calculating method:
  773. create one that doesnt scan the whole text for new line each time we calculate a position,
  774. but creates a table of them the first time.
  775. we can calculate the line by returning the index in the table of the the new line the closest to the given
  776. position and the column is the difference between the position of that newline and the column (maybe + or - 1,
  777. check that)
  778. in case this method doesn't slow things down too much ignore this
  779. """
  780. def __init__(self, method):
  781. self.calcPosMethod = method
  782. def inner_visit(self,tree):
  783. startDic = self.calcPosMethod(tree.startpos)
  784. endDic = self.calcPosMethod(tree.endpos)
  785. tree.startpos = Position(startDic["line"], startDic["column"])
  786. tree.endpos = Position(endDic["line"], endDic["column"])
  787. for item in tree.tail:
  788. if (isinstance(item, Tree)):
  789. self.inner_visit(item)
  790. def visit(self, tree):
  791. if tree:
  792. self.inner_visit(tree)
  793. return tree
  794. class DefaultPrinter(object):
  795. def __init__(self, output='console'):
  796. self.outputStream = ''
  797. self.output = output
  798. def inner_visit(self, tree):
  799. for item in tree.tail:
  800. if (isinstance(item, Tree)):
  801. self.inner_visit(item)
  802. else:
  803. self.outputStream += item
  804. def visit(self, tree):
  805. self.inner_visit(tree)
  806. if (self.output == 'console'):
  807. print self.outputStream
  808. class PrettyPrinter(object):
  809. def __init__(self, output='console'):
  810. self.outputStream = ''
  811. self.output = output
  812. self.tabcount = -1
  813. def tab(self):
  814. tabspace = ''
  815. for i in range(0, self.tabcount):
  816. tabspace += ' '
  817. return tabspace
  818. def inner_visit(self, tree):
  819. self.tabcount += 1
  820. self.outputStream += self.tab()
  821. self.outputStream += 'node ' + tree.head + ':\n'
  822. for item in tree.tail:
  823. if (isinstance(item, Tree)):
  824. self.inner_visit(item)
  825. else:
  826. self.tabcount += 1
  827. self.outputStream += self.tab() + item + ' @' + str(tree.startpos) + ' to ' + str(
  828. tree.endpos) + ' \n'
  829. self.tabcount -= 1
  830. self.tabcount -= 1
  831. def visit(self, tree):
  832. self.inner_visit(tree)
  833. if (self.output == 'console'):
  834. print self.outputStream
  835. class IgnorePostProcessor(object):
  836. def __init__(self, rules, tokens):
  837. self.rules = rules
  838. self.tokens = tokens
  839. def inner_visit(self, tree):
  840. results = []
  841. if (isinstance(tree, Tree)):
  842. if (self.isHidden(tree.head)):
  843. for item in tree.tail:
  844. ivlist = []
  845. ivresult = self.inner_visit(item)
  846. for elem in ivresult:
  847. if (isinstance(elem, Tree)):
  848. ivlist += [elem]
  849. results += ivlist
  850. else:
  851. tlist = []
  852. for item in tree.tail:
  853. tlist += self.inner_visit(item)
  854. tree.tail = tlist
  855. results += [tree]
  856. return results
  857. return [tree]
  858. def visit(self, tree):
  859. # start cannot be hidden
  860. tlist = []
  861. for item in tree.tail:
  862. tlist += self.inner_visit(item)
  863. tree.tail = tlist
  864. return tree
  865. def isHidden(self, head):
  866. if (head == '*' or head == '+' or head == '?' or head == '|' or head == '.'):
  867. return True
  868. if (head in self.rules):
  869. return 'hidden' in self.rules[head] and self.rules[head]['hidden']
  870. elif (head in self.tokens): #Changed by Daniel: added elif condition and return false otherwise, need for anon tokens
  871. return 'hidden' in self.tokens[head] and self.tokens[head]['hidden']
  872. else:
  873. return False