hutnparser.py 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004
  1. """
  2. Author: Bruno Barroca
  3. Date: October 2014
  4. Description: A top down parser
  5. Modifications by Daniel Riegelhaupt:
  6. *removed test input
  7. *changed pos to startpos because in my humble opinion it makes more sense to have a tupple (startpos, endpos) than (pos, endpos)
  8. *aded parameters to init: tab_size, line_position, hide_implicit
  9. - see init comments for more info on all otions
  10. - line_postion will change startpos and end Pos to instance of class Postion. (changed december 2014)
  11. *Added anonymous terminals: tokens do not have to be defined as tokens but can be typed directly in rules
  12. *changed interleave function to be deep, and start on START
  13. *changed position returned in tree to be relative to line numbers instead of the absolute one
  14. - Did the same for partialresults returned on syntax error change this is error results too
  15. - TODO check efficiency on the previous point checking the whole text for every position might be slow
  16. *Changed usage , instead of Parser(input, grammar).pars() it is now Parser(grammar).parse(input)
  17. - Added a self.reset() method for fields that need to be initializes again when parsing a new input
  18. *Changed findFailure and generateErrorReports:
  19. * i need the the rule/token name as well not only the error text
  20. * hidden elements (like for example comments and newline ) are not included in error reports if hide_implicit is set to true
  21. * same for the interleave rule
  22. """
  23. import re
  24. from copy import deepcopy
  25. from position import Position
  26. tail_cache = {}
  27. line_cache = {}
  28. class Tree(object):
  29. def __init__(self, head, tail, startpos, endpos, inputfile = None):
  30. self.head = head
  31. self.tail = tail
  32. self.startpos = startpos
  33. self.endpos = endpos
  34. self.inputfile = inputfile
  35. # IMPORTANT: self.replaced: replace_child defines self.replaced
  36. def prune(self):
  37. return (self.head, [i.prune() if isinstance(i, Tree) else i for i in self.tail])
  38. def is_rule(self):
  39. return self.head.islower()
  40. def is_token(self):
  41. return not self.is_rule()
  42. def get_tail(self):
  43. global tail_cache
  44. if self.is_rule():
  45. if self not in tail_cache:
  46. tail_cache[self] = [t for t in self.get_raw_tail()
  47. if not t.head.startswith("implicit_autogenerated_")]
  48. return tail_cache[self]
  49. else:
  50. return self.get_raw_tail()
  51. def get_raw_tail(self):
  52. return self.tail
  53. def get_text(self, with_implicit=False):
  54. parts = []
  55. if with_implicit:
  56. tail = Tree.get_raw_tail
  57. else:
  58. tail = Tree.get_tail
  59. def post_order(tree):
  60. for child in tail(tree):
  61. if hasattr(child, "replaced"):
  62. child = child.replaced
  63. if isinstance(child, Tree):
  64. post_order(child)
  65. else:
  66. parts.append(child)
  67. post_order(self)
  68. return ''.join(parts)
  69. def get_child(self, name):
  70. for child in self.get_tail():
  71. if child.head == name:
  72. return child
  73. return None
  74. def get_children(self, name):
  75. children = []
  76. for child in self.get_tail():
  77. if child.head == name:
  78. children.append(child)
  79. return children
  80. def replace_child(self, old_child, new_child):
  81. new_child.replaced = old_child
  82. i = self.get_raw_tail().index(old_child)
  83. self.get_raw_tail()[i] = new_child
  84. i = self.get_tail().index(old_child)
  85. self.get_tail()[i] = new_child
  86. def get_tail_without(self, names):
  87. if self.is_rule():
  88. return [t for t in self.get_tail() if not t.head in names]
  89. else:
  90. return self.get_raw_tail()
  91. def __str__(self):
  92. return "(%s, %s) [%s]" % (
  93. self.head, str((self.startpos, self.endpos)),
  94. ", ".join([str(i) for i in self.get_raw_tail()]))
  95. def get_reference_line(self):
  96. return "%s:%s:%s-%s" % (self.inputfile, self.startpos["line"], self.startpos["column"], self.endpos["column"])
  97. def fix_tracability(self, inputfile):
  98. if self.inputfile is None:
  99. self.inputfile = inputfile
  100. for f in self.tail:
  101. if isinstance(f, Tree):
  102. f.fix_tracability(self.inputfile)
  103. class Parser(object):
  104. class Constants(object):
  105. Token = 'token'
  106. Production = 'prod'
  107. Success = 'success'
  108. Failure = 'failure'
  109. class LR(object):
  110. def __init__(self, seed, rulename, head, nextlr):
  111. self.seed = seed
  112. self.rule = rulename
  113. self.head = head
  114. self.next = nextlr
  115. def copy(self):
  116. return Parser.LR(self.seed, self.rule, self.head, self.next)
  117. class Head(object):
  118. def __init__(self, rulename, involved, evaluation):
  119. self.rule = rulename
  120. self.involved = involved
  121. self.evaluation = evaluation
  122. def __init__(self, grammar, **options):
  123. """
  124. creates a Parser for the given grammar
  125. :param grammar: An instance of the Grammar class
  126. :param options: the following options are supported:
  127. tab_size: default 1. sets the character size of a tab character
  128. hide_implicit: default False. when true implicit tokens are hidden from the returned parse tree and error message.
  129. Note that this this option will not override rules or tokens where the hidden variable has already been set manually in the Grammar class
  130. line_position: default False. when true we use line, column Position object instead of absolute position integer in the parse tree for startpos and endpos
  131. """
  132. #changed by Daniel: members that need to be initialized each time parse is a called have been put in def reset()
  133. #that method is called when the parse() method is called
  134. self.rules = deepcopy(grammar.rules)
  135. self.tokens = deepcopy(grammar.tokens)
  136. self.implicitList = [] #added by Daniel, set in hideImplict so that we can review the implicit list in case of error messages
  137. self.implictRuleName = ""
  138. #options Added by Daniel
  139. self.tabsize = int(options.pop('tab_size', 1)) #the character size of a tab
  140. self.hideImplicit = bool(options.pop('hide_implicit', False))
  141. #whether to hide implicit tokens and rules from the returned parse tree
  142. #Important note: this option will not override rules or tokens where the hidden variable has already been set manually
  143. self.linePosition = bool(options.pop('line_position', False))
  144. #if true the position of the returned parse tree will consist of a line and a column instead of the position in the string array
  145. #preprocess must happen after options, (after hideImplicit has been set)
  146. self.preprocess()
  147. def reset(self):
  148. self.input = ""
  149. self.memotable = {}
  150. self.failure = {}
  151. self.lrstack = None
  152. self.heads = {}
  153. self.countcard = {}
  154. def preprocess(self):
  155. #for elem in self.rules.keys(): #Changed by Daniel: we only check start because it's global
  156. elem = 'start'
  157. if elem in self.rules.keys():
  158. if ('interleave' in self.rules[elem]):
  159. ilist = self.rules[elem]['interleave']
  160. self.setHideImplicit(ilist, self.hideImplicit)
  161. self.interleave(self.rules[elem], ilist)
  162. def setHideImplicit(self, ilist, bool= False):
  163. if ilist:
  164. #ilist = ['?', '@rulename']
  165. rulename= ilist[1][1:]
  166. self.implictRuleName = rulename #used to hide later error reports later
  167. self.rules[rulename]['hidden'] = bool
  168. if rulename in self.rules:
  169. body = self.rules[rulename]['body']
  170. #body = [*, [| ,,,,]]
  171. elems= body[1][1:]
  172. self.implicitList = elems
  173. for elem in elems:
  174. l = None
  175. error = ''
  176. if elem[0] == '@':
  177. l = self.rules
  178. error = ' rule not found in grammar rules.'
  179. elif elem[0]== '$':
  180. l = self.tokens
  181. error = ' token not found in grammar rules.'
  182. #else: in this case it is an anonymous token,
  183. if l:
  184. name = elem[1:]
  185. if name in l:
  186. if not l[name].has_key('hidden'):
  187. #this method will not override anything the user has explicitly specified in the structure
  188. #if there is already a hidden value there it will be kept even if it is not the same one
  189. #an examples use case is whitespaces vs comments:
  190. #both can appear anywhere in the text and so are implicit in the grammar.
  191. #however we dont want spaces in the tree but we do want the comments
  192. l[name]['hidden'] = bool
  193. else:
  194. raise Exception(name + error)
  195. #else: Anon token can't be ignored for the moment unless we create an ignore list for it or something like that.
  196. else:
  197. raise Exception(rulename + ' rule not found in grammar rules.')
  198. def interleave(self, elem, ilist):
  199. #quick and simple interleaving method, will probably contain double interleaving
  200. #but this is as simple as i could make it without taking into account each and every case
  201. def quickInterLeave(lst, inter):
  202. newL = []
  203. newL.append(lst[0])
  204. isSeq = self.isSequence(lst[0])
  205. for item in lst[1:]:
  206. if (isinstance(item, list)):#a sublist
  207. newL.append(quickInterLeave(item,inter))
  208. else:
  209. if(item[0] == '@'): #rule
  210. rulename = item [1:]
  211. if rulename in self.rules:
  212. rule = self.rules[rulename]
  213. if not rule.has_key('visited') or rule['visited'] == False:
  214. self.interleave(rule, inter)
  215. else:
  216. raise Exception(rulename + ' rule not found in grammar rules.')
  217. """
  218. Else:
  219. pass
  220. in this case it is a token or anon token we dont need to do anything special,
  221. just add it to the list interleaved
  222. """
  223. if isSeq: # no need to complicate the data structure if the list is a sequence
  224. if not newL[-1] == inter:
  225. newL.append(inter)
  226. newL.append(item)
  227. newL.append(inter)
  228. else:
  229. newL.append(['.', inter,item ,inter])
  230. """
  231. This way in case the list is not a sequence this doesnt change the meaning of the list:
  232. example: t1, t2 are tokens, i is an optional whitespace being intereleaved
  233. [., t1, t2] -> [., i ,t1, i, t2]
  234. the meaning stays the same:
  235. t1 and t2 both have ot be found for the rule to apply regardless of the ws
  236. [|, t1, t2] -> [|, i ,t1, i, t2]
  237. the meaning changed: if i is encountered the or is satisfied:
  238. so instead we do -> [|, [., i ,t1, i,], [., i ,t2, i,]]
  239. note that while inter has been added to the data stricture 4 times it will only match
  240. for one option so it is not really duplicate.
  241. another way of writing this can be [., inter [|, t1, t2], inter ] but this is easier said than
  242. done especially for big (complex) data structures
  243. """
  244. return newL
  245. #the first thing we do is say that the item has been visited this will avoid infinite loop due to recursion
  246. elem['visited'] = True
  247. if (not 'body' in elem):
  248. return
  249. ls = elem['body']
  250. newbody = quickInterLeave(ls,ilist)
  251. elem['body'] = newbody
  252. def parse(self, text):
  253. self.reset() #Changed by Daniel receive text as param. instead of once at init so first we reset the fields
  254. self.input = text
  255. results = self.applyrule('@start', 0)
  256. if len(results) > 1:
  257. # Handle ambiguity
  258. from prettyprint_visitor import PrettyPrintVisitor
  259. for p in results:
  260. print("===================================")
  261. print("VISIT RESULT")
  262. print("===================================")
  263. visitor = PrettyPrintVisitor([])
  264. visitor.visit(p["tree"])
  265. print(visitor.dump())
  266. result = self.generateErrorReport()
  267. elif (results == [] or results[0]['endpos'] < len(self.input)):
  268. result = self.generateErrorReport()
  269. for elem in result['partialresults']: #Added by Daniel there was no post processing on partial results. I need it
  270. if elem['tree']: #with partial results the tree can be None
  271. elem['tree'] = IgnorePostProcessor(self.rules, self.tokens).visit(elem['tree'])
  272. if self.linePosition:
  273. # elem['tree'].startpos = 0
  274. # elem['tree'].endpos = 0
  275. elem['tree'] = Parser.PositionPostProcessor(self.convertToLineColumn).visit(elem['tree']) #Added by Daniel
  276. elif len(results) == 1:
  277. result = results[0]
  278. result.update({'status': Parser.Constants.Success})
  279. if result['tree'].head != 'start':
  280. result['tree'] = Tree('start', [result['tree']], result['tree'].startpos, result['tree'].endpos)
  281. result['tree'] = IgnorePostProcessor(self.rules, self.tokens).visit(result['tree'])
  282. if self.linePosition: #Added by Daniel
  283. result['tree'] = Parser.PositionPostProcessor(self.convertToLineColumn).visit(result['tree'])
  284. return result
  285. def convertToLineColumn(self, pos):
  286. line = 1
  287. column = 0
  288. l = len(self.input)
  289. for i in range(0, l):
  290. if (i > pos):
  291. break
  292. if self.input[i] == '\n':
  293. line += 1
  294. column = 0
  295. elif self.input[i] == '\t':
  296. column += self.tabsize #changed by Daniel: this used to be 4
  297. else:
  298. column += 1
  299. if pos >= l: #the end of the text
  300. """
  301. added by Daniel: needed for the case of the last word/character.
  302. Assume a text on one word 'foo'
  303. in absolute position the tree says word is from 1 to 4 (as always 'to' means not included)
  304. in this method we only count until the range so we would return line 1 col 1 to line 1 col 3
  305. but we need col 4
  306. we could just says pos == l but i think its better to say any position bigger than the text is simply the end of the text
  307. """
  308. column += 1
  309. return {'line': line, 'column': column}
  310. def findlargerresultat(self, pos):
  311. endpos = pos
  312. result = None
  313. for key in self.memotable.keys():
  314. elem = self.memotable[key]
  315. if (elem == []):
  316. continue
  317. if (elem[0]['startpos'] == pos and endpos < elem[0]['endpos']):
  318. endpos = elem[0]['endpos']
  319. result = elem[0]
  320. return result
  321. def generateErrorReport(self):
  322. # consult the memotable and collect contiguities until endpos
  323. endpos = len(self.input) - 1
  324. pos = 0
  325. elems = []
  326. while pos <= endpos:
  327. elem = self.findlargerresultat(pos)
  328. if (not elem or (elem and elem['endpos'] == pos)):
  329. break
  330. pos = elem['endpos']
  331. elems.append(elem)
  332. if (pos <= endpos):
  333. elems.append({'tree': None, 'startpos': pos, 'endpos': endpos})
  334. elem = self.getFirstBiggestSpan(elems)
  335. if elem is None:
  336. return {'status': Parser.Constants.Failure, 'line': 0, 'column': 0, 'text': "Empty input file", 'partialresults': [], 'grammarelements': None}
  337. reasons = self.findFailure(elem['startpos'], elem['endpos'])
  338. if (reasons == []):
  339. pos -= 1
  340. else:
  341. pos = reasons[0]['startpos']
  342. read = self.input[pos:pos + 1]
  343. linecolumn = self.convertToLineColumn(pos)
  344. message = 'Syntax error at line ' + str(linecolumn['line']) + ' and column ' + str(linecolumn['column']) + '. '
  345. keys = []
  346. if (not reasons == []):
  347. first = True
  348. for reason in reasons:
  349. if (first):
  350. message += 'Expected ' + reason['text']
  351. first = False
  352. else:
  353. message += ' or ' + reason['text']
  354. keys.append(reason['key'])
  355. message += '. Instead read: ' + repr(read) + '.'
  356. else:
  357. message += 'Read: \'' + read + '\'.'
  358. return {'status': Parser.Constants.Failure, 'line': linecolumn['line'], 'column': linecolumn['column'],
  359. 'text': message, 'partialresults': elems, 'grammarelements': keys}
  360. def getFirstBiggestSpan(self, elems):
  361. biggestspan = 0
  362. result = None
  363. for elem in elems:
  364. span = elem['endpos'] - elem['startpos']
  365. if (biggestspan < span):
  366. result = elem
  367. span = biggestspan
  368. return result
  369. def findFailure(self, pos, endpos):
  370. posreasons = []
  371. endposreasons = []
  372. #changed by Daniel:
  373. #* i need the key as well for autocomplete so in stead of appending elem i return a new dictionary with elem and the key inside
  374. #* checks both condition for posreasons and endposreasons in one for loop instead of 2
  375. #* do not cosider keys that are hidden
  376. for key in self.failure.keys():
  377. #keys are given starting either with $ for tokens or @ for rules
  378. #howver with the the given metagrammar Tokens are all caps and rules are all in small letters so there cant be an overlapp
  379. #and we can safely test both
  380. if self.hideImplicit and\
  381. (('$' + key in self.implicitList) or ('@' + key in self.implicitList) or (key == self.implictRuleName)):
  382. continue
  383. else:
  384. elem = self.failure[key]
  385. if (elem['startpos'] == pos and not elem['text'] == ''):
  386. posreasons.append({'key': key, 'startpos': elem['startpos'] , 'text': elem['text'] })
  387. if (elem['startpos'] == endpos and not elem['text'] == ''):
  388. endposreasons.append({'key': key, 'startpos': elem['startpos'] , 'text': elem['text'] })
  389. if (len(endposreasons) < len(posreasons)):
  390. return posreasons
  391. else:
  392. return endposreasons
  393. def setupLR(self, rule, elem):
  394. if (elem.head == None):
  395. elem.head = Parser.Head(rule, [], [])
  396. s = self.lrstack
  397. while s and not s.rule == elem.head.rule:
  398. s.head = elem.head
  399. if (not s.rule in elem.head.involved):
  400. elem.head.involved.append(s.rule)
  401. s = s.next
  402. def recall(self, rule, j):
  403. newresults = []
  404. if ((rule, j) in self.memotable):
  405. newresults = self.memotable[(rule, j)]
  406. h = None
  407. if (j in self.heads):
  408. h = self.heads[j]
  409. if (not h):
  410. return newresults
  411. if (newresults == [] and not rule in (h.involved + [h.rule])):
  412. return [] # [{'tree': [], 'startpos': j, 'endpos': j}]
  413. if (rule in h.evaluation):
  414. h.evaluation.remove(rule)
  415. newresults = self.eval(rule, j)
  416. self.memotable.update({(rule, j): newresults})
  417. return newresults
  418. def applyrule(self, rule, j):
  419. overallresults = []
  420. newresults = self.recall(rule, j)
  421. if (not newresults == []):
  422. memoresults = []
  423. for elem in newresults:
  424. if (isinstance(elem['tree'], Parser.LR)):
  425. self.setupLR(rule, elem['tree'])
  426. memoresults += elem['tree'].seed
  427. else:
  428. overallresults.append(elem)
  429. if (not memoresults == []):
  430. self.memotable.update({(rule, j): memoresults})
  431. return memoresults
  432. return overallresults
  433. else:
  434. #lr = Parser.LR([], rule, None, deepcopy(self.lrstack))
  435. lr = Parser.LR([], rule, None, None if not self.lrstack else self.lrstack.copy())
  436. self.lrstack = lr
  437. self.memotable.update({(rule, j): [{'tree': lr, 'startpos': j, 'endpos': j}]})
  438. newresults = self.eval(rule, j)
  439. self.lrstack = self.lrstack.next
  440. memoresults = []
  441. if ((rule, j) in self.memotable):
  442. memoresults = self.memotable[(rule, j)]
  443. for melem in memoresults:
  444. if (isinstance(melem['tree'], Parser.LR) and melem['tree'].head):
  445. melem['tree'].seed = newresults
  446. r = self.lr_answer(rule, j, melem)
  447. if (not r == []):
  448. overallresults += r
  449. if (overallresults != []): # prefer grown results
  450. return overallresults
  451. self.memotable.update({(rule, j): newresults})
  452. return newresults
  453. def lr_answer(self, rule, pos, melem):
  454. h = melem['tree'].head
  455. if (not h.rule == rule):
  456. return melem['tree'].seed
  457. else:
  458. melems = melem['tree'].seed
  459. result = []
  460. for melem_i in melems:
  461. if (not melem_i['tree'] == None):
  462. result.append(melem_i)
  463. if (result == []):
  464. return []
  465. else:
  466. newresult = []
  467. for melem_i in result:
  468. newresult.append(self.growLR(rule, pos, melem_i, h))
  469. return newresult
  470. def growLR(self, rule, pos, melem, head=None):
  471. self.heads.update({pos: head})
  472. while (True):
  473. overallresults = []
  474. head.evaluation = deepcopy(head.involved)
  475. newresults = self.eval(rule, pos)
  476. for elem in newresults:
  477. if (elem['endpos'] > melem['endpos']):
  478. melem = elem
  479. overallresults.append(elem)
  480. if (overallresults == []):
  481. self.heads.update({pos: None})
  482. return melem
  483. self.memotable.update({(rule, pos): overallresults})
  484. def eval(self, rulename, j):
  485. # Returns [{'tree':Tree(head=rulename, tail=[...], startpos=j, endpos=x), 'startpos':j, 'endpos':x}]
  486. # Raises Exception if there is no such token/rule
  487. if (rulename[0] == '@'):
  488. rulename = rulename[1:]
  489. if (not rulename in self.rules):
  490. raise Exception(rulename + ' rule not found in grammar rules.')
  491. rule = self.rules[rulename]
  492. elif (rulename[0] == '$'):
  493. rulename = rulename[1:]
  494. if (not rulename in self.tokens):
  495. raise Exception(rulename + ' token not found in grammar tokens.')
  496. rule = self.tokens[rulename]
  497. else:
  498. # raise Exception('Plain terminals not allowed inside grammar rules: ' + str(rulename))
  499. # we create an anonymous token rule
  500. # we can write whatever we want as fake type as long as it is not equal to the type of the prodcution rule
  501. # or to that of the token
  502. rule = {'type': 'anonymous_token'}
  503. if (self.isType(rule, Parser.Constants.Production)):
  504. newresults = []
  505. results = self.eval_body(rulename, rule['body'], j)
  506. for r in results:
  507. if (r['tree']):
  508. head = r['tree'].head
  509. if(head == '*' or head == '+' or head == '?' or head == '|' or head == '.'):
  510. newr = {'tree': Tree(rulename, [r['tree']], r['startpos'], r['endpos']), 'startpos': r['startpos'],
  511. 'endpos': r['endpos']}
  512. r = newr
  513. newresults.append(r)
  514. elif (self.isType(rule, Parser.Constants.Token)):
  515. newresults = self.term(rulename, j)
  516. else: ##Changed by Daniel: if not a production rule or defined token we try an anonymous token:
  517. newresults = self.anonTerm(rulename, j)
  518. return newresults
  519. def eval_body(self, rulename, ls, j):
  520. # Delegates the task to sub-functions: alt, seq, opt, many, more, card
  521. # Returns
  522. # Raises Exception if the first element in the body is not in {'|', '.', '?', '*', '+', '#'}
  523. if (self.isAlternative(ls[0])):
  524. return self.alt(rulename, ls[1:], j)
  525. elif (self.isSequence(ls[0])):
  526. return self.seq(rulename, ls[1:], j)
  527. elif (self.isOptional(ls[0])):
  528. return self.opt(rulename, ls[1:], j)
  529. elif (self.isMany(ls[0])):
  530. return self.many(rulename, ls[1:], j)
  531. elif (self.isMore(ls[0])):
  532. return self.more(rulename, ls[1:], j)
  533. elif (self.isCard(ls[0])):
  534. return self.card(rulename, ls[0][1:], ls[1:], j)
  535. raise Exception('Unrecognized grammar expression: ' + str(ls[0]))
  536. def isSequence(self, operator):
  537. return operator == '.'
  538. def isAlternative(self, operator):
  539. return operator == '|'
  540. def isMany(self, operator):
  541. return operator == '*'
  542. def isCard(self, operator):
  543. return operator.startswith('#')
  544. def isMore(self, operator):
  545. return operator == '+'
  546. def isOptional(self, operator):
  547. return operator == '?'
  548. def isType(self, rule, oftype):
  549. if (rule['type'] == oftype):
  550. return True
  551. def term(self, rulename, j):
  552. if (j >= len(self.input)):
  553. errortext = ''
  554. if (rulename in self.tokens and 'errortext' in self.tokens[rulename]):
  555. errortext = self.tokens[rulename]['errortext']
  556. self.failure.update({rulename: {'startpos': j, 'text': errortext}})
  557. return []
  558. rule = self.tokens[rulename]
  559. mobj = re.match(rule['reg'], self.input[j:])
  560. #Changed by daniel instead of re.match(reg) did re.match(re.compile(reg).patern)
  561. #this is to avoid problems with \ before i did this i had the match the character \ by doing [\\\\]
  562. # because to write only two slashes it would have to be r'[\\]' which cant be done directly in hte grammar so it had to be in string form
  563. #this way reading [\\] will be interpreted correctly instead of giving an error like it used to
  564. if (not mobj):
  565. # this is a failure! nice to register!
  566. self.failure.update({rulename: {'startpos': j, 'text': self.tokens[rulename]['errortext']}})
  567. return []
  568. return [{'tree': Tree(rulename, [mobj.group()], j, j + mobj.end()), 'startpos': j, 'endpos': j + mobj.end()}]
  569. def anonTerm(self, term, j):
  570. """
  571. #Changed by Daniel: added this whole method.
  572. Anonymous term to allow for direct terminals in rules
  573. (write 'Foo' directly instead of having to deine a FOO token)
  574. """
  575. qt = '\''
  576. name = qt + term + qt
  577. if (j >= len(self.input)):
  578. self.failure.update({ name : {'startpos': j, 'text': name}})
  579. return []
  580. mobj = re.match(term, self.input[j:])
  581. if (not mobj):
  582. # this is a failure! nice to register!
  583. self.failure.update({ name : {'startpos': j, 'text': name }})
  584. return []
  585. return [{'tree': Tree(name , [mobj.group()], j, j + mobj.end()), 'startpos': j, 'endpos': j + mobj.end()}]
  586. def many(self, rulename, ls, j):
  587. rule_i = ls[0]
  588. if (isinstance(rule_i, list)):
  589. results = self.eval_body('*', rule_i, j)
  590. else:
  591. results = self.applyrule(rule_i, j)
  592. if (results == []):
  593. return [{'tree': None, 'startpos': j, 'endpos': j}]
  594. seq = ['.'] + ls + [['*'] + ls]
  595. results = self.eval_body('*', seq, j)
  596. overall_results = []
  597. for r in results:
  598. if (r['tree']):
  599. if (len(r['tree'].tail) > 1):
  600. left = r['tree'].tail[0]
  601. right = r['tree'].tail[1].tail
  602. r['tree'].tail = [left] + right
  603. overall_results.append(r)
  604. return overall_results
  605. def more(self, rulename, ls, j):
  606. rule_i = ls[0]
  607. if (isinstance(rule_i, list)):
  608. results = self.eval_body('+', rule_i, j)
  609. else:
  610. results = self.applyrule(rule_i, j)
  611. if (results == []):
  612. return []
  613. seq = ['.'] + ls + [['*'] + ls]
  614. results = self.eval_body('+', seq, j)
  615. overall_results = []
  616. for r in results:
  617. if (r['tree']):
  618. if (len(r['tree'].tail) > 1):
  619. left = r['tree'].tail[0]
  620. right = r['tree'].tail[1].tail
  621. r['tree'].tail = [left] + right
  622. overall_results.append(r)
  623. return overall_results
  624. def opt(self, rulename, ls, j):
  625. if (j >= len(self.input)):
  626. errortext = ''
  627. if (rulename in self.rules and 'errortext' in self.rules[rulename]):
  628. errortext = self.rules[rulename]['errortext']
  629. else:
  630. for item in ls:
  631. if ((not isinstance(item[1:], list)) and item[1:] in self.rules):
  632. errortext = self.rules[item[1:]]['errortext']
  633. self.failure.update({rulename: {'startpos': j, 'text': errortext}})
  634. return [{'tree': None, 'startpos': j, 'endpos': j}]
  635. results = []
  636. rule_i = ls[0]
  637. if (isinstance(rule_i, list)):
  638. results = self.eval_body('?', rule_i, j)
  639. else:
  640. results = self.applyrule(rule_i, j)
  641. if (not results == []):
  642. return results
  643. # empty case
  644. return [{'tree': None, 'startpos': j, 'endpos': j}]
  645. def card(self, rulename, cardrule, ls, j):
  646. count = 0
  647. delta = 1
  648. # a# a#(-1) #indent, #(-1)indent
  649. group = re.match('\((?P<delta>[-+]?\d+)\)(?P<rule>\S+)',cardrule)
  650. if(group):
  651. cardrule = group.group('rule')
  652. delta = int(group.group('delta'))
  653. if (not cardrule in self.countcard):
  654. count = delta
  655. self.countcard.update({cardrule: {j: count}})
  656. else:
  657. if not j in self.countcard[cardrule]: # # if we already know the count for j, then ignore..
  658. d = self.countcard[cardrule]
  659. lastcount = 0
  660. for i in range(0, j):
  661. if i in d:
  662. lastcount = d[i]
  663. count = lastcount + delta
  664. d.update({j: count})
  665. else:
  666. count = self.countcard[cardrule][j]
  667. results = []
  668. rule_i = '@' + cardrule
  669. if(count == 0):
  670. results = [{'tree': None, 'startpos': j, 'endpos': j}]
  671. else:
  672. for i in range(0, count):
  673. if (results == []):
  674. if (isinstance(rule_i, list)):
  675. newresults = self.eval_body(rulename, rule_i, j)
  676. else:
  677. newresults = self.applyrule(rule_i, j)
  678. if (newresults == []):
  679. del self.countcard[cardrule][j]
  680. return []
  681. newresults = self.merge(rulename, newresults, {'startpos': j, 'endpos': j})
  682. else:
  683. for elem_p in results:
  684. if (isinstance(rule_i, list)):
  685. newresults = self.eval_body(rulename, rule_i, elem_p['endpos'])
  686. else:
  687. newresults = self.applyrule(rule_i, elem_p['endpos'])
  688. if (newresults == []):
  689. del self.countcard[cardrule][j]
  690. return []
  691. newresults = self.merge(rulename, newresults, elem_p)
  692. results = newresults
  693. for rule_i in ls:
  694. for elem_p in results:
  695. if (isinstance(rule_i, list)):
  696. newresults = self.eval_body(rulename, rule_i, elem_p['endpos'])
  697. else:
  698. newresults = self.applyrule(rule_i, elem_p['endpos'])
  699. if (newresults == []):
  700. del self.countcard[cardrule][j]
  701. return []
  702. newresults = self.merge(rulename, newresults, elem_p)
  703. results = newresults
  704. del self.countcard[cardrule][j]
  705. return results
  706. def seq(self, rulename, ls, j):
  707. #
  708. results = []
  709. for rule_i in ls:
  710. if (results == []):
  711. if (isinstance(rule_i, list)):
  712. newresults = self.eval_body('.', rule_i, j)
  713. else:
  714. newresults = self.applyrule(rule_i, j)
  715. if (newresults == []):
  716. return []
  717. newresults = self.merge('.', newresults, {'startpos': j, 'endpos': j})
  718. else:
  719. r = []
  720. for elem_p in results:
  721. if (isinstance(rule_i, list)):
  722. newresults = self.eval_body('.', rule_i, elem_p['endpos'])
  723. else:
  724. newresults = self.applyrule(rule_i, elem_p['endpos'])
  725. if (newresults == []):
  726. return []
  727. newresults = self.merge('.', newresults, elem_p)
  728. results = newresults
  729. return results
  730. def merge(self, rulename, newres, elem_p):
  731. # Brief: tail of each new tree needs to be prepended with tail of the previous tree
  732. # rulename: becomes the head of each tree in the returned list
  733. # newres: may have more than one tree in case of alt operator: 'x' ('a' | 'b') 'y'
  734. # tail of each new tree needs to be prepended with tail of previous tree
  735. # Returns same list as eval: [{'tree':Tree(head=rulename, tail=[...], startpos=j, endpos=x), 'startpos':j, 'endpos':x}]
  736. results = []
  737. for elem_n in newres:
  738. tail = []
  739. if ('tree' in elem_p and elem_p['tree']):
  740. tail += elem_p['tree'].tail
  741. if ('tree' in elem_n and elem_n['tree']):
  742. tail.append(elem_n['tree'])
  743. value = {'tree': Tree(rulename, tail, elem_p['startpos'], elem_n['endpos']), 'startpos': elem_p['startpos'],
  744. 'endpos': elem_n['endpos']}
  745. results += [value]
  746. return results
  747. def alt(self, rulename, ls, j):
  748. # Evaluates all alternatives using eval_body or applyrule
  749. # Returns same list as eval: [{'tree':Tree(head=rulename, tail=[...], startpos=j, endpos=x), 'startpos':j, 'endpos':x}]
  750. overall_results = []
  751. results = [] # TODO: remove this variable as it's never used
  752. for rule_i in ls:
  753. if (isinstance(rule_i, list)):
  754. newresults = self.eval_body('|', rule_i, j)
  755. else:
  756. newresults = self.applyrule(rule_i, j)
  757. overall_results += newresults
  758. return overall_results
  759. class PositionPostProcessor(object):
  760. """
  761. This post processor changes absolute position (place in the parsed string )to a line, column position
  762. added by Daniel
  763. """
  764. """
  765. efficiency note:
  766. how effective is this. this might be slowing things down quit a bit having to calculate that for everything
  767. 1) an alternative would be use the method only for the leaves, and that traverse the tree bottom up to create
  768. the interval using the left most and right most children of each subtree. but since tat involves extra tree
  769. traversal that might not help that much.
  770. 2) another thing that might improve efficiency is to create change the position calculating method:
  771. create one that doesnt scan the whole text for new line each time we calculate a position,
  772. but creates a table of them the first time.
  773. we can calculate the line by returning the index in the table of the the new line the closest to the given
  774. position and the column is the difference between the position of that newline and the column (maybe + or - 1,
  775. check that)
  776. in case this method doesn't slow things down too much ignore this
  777. """
  778. def __init__(self, method):
  779. self.calcPosMethod = method
  780. def inner_visit(self,tree):
  781. startDic = self.calcPosMethod(tree.startpos)
  782. endDic = self.calcPosMethod(tree.endpos)
  783. tree.startpos = Position(startDic["line"], startDic["column"])
  784. tree.endpos = Position(endDic["line"], endDic["column"])
  785. for item in tree.tail:
  786. if (isinstance(item, Tree)):
  787. self.inner_visit(item)
  788. def visit(self, tree):
  789. if tree:
  790. self.inner_visit(tree)
  791. return tree
  792. class DefaultPrinter(object):
  793. def __init__(self, output='console'):
  794. self.outputStream = ''
  795. self.output = output
  796. def inner_visit(self, tree):
  797. for item in tree.tail:
  798. if (isinstance(item, Tree)):
  799. self.inner_visit(item)
  800. else:
  801. self.outputStream += item
  802. def visit(self, tree):
  803. self.inner_visit(tree)
  804. if (self.output == 'console'):
  805. print self.outputStream
  806. class PrettyPrinter(object):
  807. def __init__(self, output='console'):
  808. self.outputStream = ''
  809. self.output = output
  810. self.tabcount = -1
  811. def tab(self):
  812. tabspace = ''
  813. for i in range(0, self.tabcount):
  814. tabspace += ' '
  815. return tabspace
  816. def inner_visit(self, tree):
  817. self.tabcount += 1
  818. self.outputStream += self.tab()
  819. self.outputStream += 'node ' + tree.head + ':\n'
  820. for item in tree.tail:
  821. if (isinstance(item, Tree)):
  822. self.inner_visit(item)
  823. else:
  824. self.tabcount += 1
  825. self.outputStream += self.tab() + item + ' @' + str(tree.startpos) + ' to ' + str(
  826. tree.endpos) + ' \n'
  827. self.tabcount -= 1
  828. self.tabcount -= 1
  829. def visit(self, tree):
  830. self.inner_visit(tree)
  831. if (self.output == 'console'):
  832. print self.outputStream
  833. class IgnorePostProcessor(object):
  834. def __init__(self, rules, tokens):
  835. self.rules = rules
  836. self.tokens = tokens
  837. def inner_visit(self, tree):
  838. results = []
  839. if (isinstance(tree, Tree)):
  840. if (self.isHidden(tree.head)):
  841. for item in tree.tail:
  842. ivlist = []
  843. ivresult = self.inner_visit(item)
  844. for elem in ivresult:
  845. if (isinstance(elem, Tree)):
  846. ivlist += [elem]
  847. results += ivlist
  848. else:
  849. tlist = []
  850. for item in tree.tail:
  851. tlist += self.inner_visit(item)
  852. tree.tail = tlist
  853. results += [tree]
  854. return results
  855. return [tree]
  856. def visit(self, tree):
  857. # start cannot be hidden
  858. tlist = []
  859. for item in tree.tail:
  860. tlist += self.inner_visit(item)
  861. tree.tail = tlist
  862. return tree
  863. def isHidden(self, head):
  864. if (head == '*' or head == '+' or head == '?' or head == '|' or head == '.'):
  865. return True
  866. if (head in self.rules):
  867. return 'hidden' in self.rules[head] and self.rules[head]['hidden']
  868. elif (head in self.tokens): #Changed by Daniel: added elif condition and return false otherwise, need for anon tokens
  869. return 'hidden' in self.tokens[head] and self.tokens[head]['hidden']
  870. else:
  871. return False