This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

103 lines
3.6 KiB

  1. from ..common import ParseError, UnexpectedToken, is_terminal
  2. from .grammar_analysis import GrammarAnalyzer
  3. class EndToken(str):
  4. type = '$end'
  5. class Item:
  6. def __init__(self, rule, ptr, start, data):
  7. self.rule = rule
  8. self.ptr = ptr
  9. self.start = start
  10. self.data = data
  11. @property
  12. def expect(self):
  13. return self.rule.expansion[self.ptr]
  14. @property
  15. def is_complete(self):
  16. return self.ptr == len(self.rule.expansion)
  17. def advance(self, data):
  18. return Item(self.rule, self.ptr+1, self.start, self.data + [data])
  19. def __eq__(self, other):
  20. return self.start == other.start and self.ptr == other.ptr and self.rule == other.rule
  21. def __hash__(self):
  22. return hash((self.rule, self.ptr, self.start))
  23. class Parser:
  24. def __init__(self, parser_conf):
  25. self.analysis = GrammarAnalyzer(parser_conf.rules, parser_conf.start)
  26. self.start = parser_conf.start
  27. self.postprocess = {}
  28. self.predictions = {}
  29. for rule in self.analysis.rules:
  30. if rule.origin != '$root': # XXX kinda ugly
  31. a = rule.alias
  32. self.postprocess[rule] = a if callable(a) else getattr(parser_conf.callback, a)
  33. self.predictions[rule.origin] = [(x.rule, x.index) for x in self.analysis.expand_rule(rule.origin)]
  34. def parse(self, stream):
  35. # Define parser functions
  36. def predict(symbol, i):
  37. assert not is_terminal(symbol), symbol
  38. return {Item(rule, index, i, []) for rule, index in self.predictions[symbol]}
  39. def complete(item, table):
  40. item.data = self.postprocess[item.rule](item.data)
  41. return {old_item.advance(item.data) for old_item in table[item.start]
  42. if not old_item.is_complete and old_item.expect == item.rule.origin}
  43. def process_column(i, token):
  44. assert i == len(table)-1
  45. cur_set = table[i]
  46. next_set = set()
  47. to_process = cur_set
  48. while to_process:
  49. new_items = set()
  50. for item in to_process:
  51. if item.is_complete:
  52. new_items |= complete(item, table)
  53. else:
  54. if is_terminal(item.expect):
  55. # scan
  56. match = item.expect[0](token) if callable(item.expect[0]) else item.expect[0] == token.type
  57. if match:
  58. next_set.add(item.advance(stream[i]))
  59. else:
  60. if item.ptr: # part of an already predicted batch
  61. new_items |= predict(item.expect, i)
  62. to_process = new_items - cur_set # TODO: is this precaution necessary?
  63. cur_set |= to_process
  64. if not next_set and token.type != '$end':
  65. expect = filter(is_terminal, [x.expect for x in cur_set if not x.is_complete])
  66. raise UnexpectedToken(token, expect, stream, i)
  67. table.append(next_set)
  68. # Main loop starts
  69. table = [predict(self.start, 0)]
  70. for i, char in enumerate(stream):
  71. process_column(i, char)
  72. process_column(len(stream), EndToken())
  73. # Parse ended. Now build a parse tree
  74. solutions = [n.data for n in table[len(stream)]
  75. if n.is_complete and n.rule.origin==self.start and n.start==0]
  76. if not solutions:
  77. raise ParseError('Incomplete parse: Could not find a solution to input')
  78. return solutions