This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

153 lines
6.9 KiB

  1. """This module implements an experimental Earley parser with a dynamic lexer
  2. The core Earley algorithm used here is based on Elizabeth Scott's implementation, here:
  3. https://www.sciencedirect.com/science/article/pii/S1571066108001497
  4. That is probably the best reference for understanding the algorithm here.
  5. The Earley parser outputs an SPPF-tree as per that document. The SPPF tree format
  6. is better documented here:
  7. http://www.bramvandersanden.com/post/2014/06/shared-packed-parse-forest/
  8. Instead of running a lexer beforehand, or using a costy char-by-char method, this parser
  9. uses regular expressions by necessity, achieving high-performance while maintaining all of
  10. Earley's power in parsing any CFG.
  11. """
  12. # Author: Erez Shinan (2017)
  13. # Email : erezshin@gmail.com
  14. from collections import defaultdict, deque
  15. from ..exceptions import ParseError, UnexpectedCharacters
  16. from ..lexer import Token
  17. from .grammar_analysis import GrammarAnalyzer
  18. from ..grammar import NonTerminal, Terminal
  19. from .earley import ApplyCallbacks, Parser as BaseParser
  20. from .earley_common import Item, TransitiveItem
  21. from .earley_forest import ForestToTreeVisitor, ForestToAmbiguousTreeVisitor, ForestSumVisitor, ForestToPyDotVisitor, SymbolNode
  22. class Parser(BaseParser):
  23. def __init__(self, parser_conf, term_matcher, resolve_ambiguity=True, ignore = (), complete_lex = False):
  24. BaseParser.__init__(self, parser_conf, term_matcher, resolve_ambiguity)
  25. self.ignore = [Terminal(t) for t in ignore]
  26. self.complete_lex = complete_lex
  27. def _parse(self, stream, columns, to_scan, start_symbol=None):
  28. def scan(i, to_scan):
  29. """The core Earley Scanner.
  30. This is a custom implementation of the scanner that uses the
  31. Lark lexer to match tokens. The scan list is built by the
  32. Earley predictor, based on the previously completed tokens.
  33. This ensures that at each phase of the parse we have a custom
  34. lexer context, allowing for more complex ambiguities."""
  35. node_cache = {}
  36. # 1) Loop the expectations and ask the lexer to match.
  37. # Since regexp is forward looking on the input stream, and we only
  38. # want to process tokens when we hit the point in the stream at which
  39. # they complete, we push all tokens into a buffer (delayed_matches), to
  40. # be held possibly for a later parse step when we reach the point in the
  41. # input stream at which they complete.
  42. for item in set(to_scan):
  43. m = match(item.expect, stream, i)
  44. if m:
  45. t = Token(item.expect.name, m.group(0), i, text_line, text_column)
  46. delayed_matches[m.end()].append( (item, i, t) )
  47. if self.complete_lex:
  48. s = m.group(0)
  49. for j in range(1, len(s)):
  50. m = match(item.expect, s[:-j])
  51. if m:
  52. t = Token(item.expect.name, m.group(0), i, text_line, text_column)
  53. delayed_matches[i+m.end()].append( (item, i, t) )
  54. # Remove any items that successfully matched in this pass from the to_scan buffer.
  55. # This ensures we don't carry over tokens that already matched, if we're ignoring below.
  56. to_scan.remove(item)
  57. # 3) Process any ignores. This is typically used for e.g. whitespace.
  58. # We carry over any unmatched items from the to_scan buffer to be matched again after
  59. # the ignore. This should allow us to use ignored symbols in non-terminals to implement
  60. # e.g. mandatory spacing.
  61. for x in self.ignore:
  62. m = match(x, stream, i)
  63. if m:
  64. # Carry over any items still in the scan buffer, to past the end of the ignored items.
  65. delayed_matches[m.end()].extend([(item, i, None) for item in to_scan ])
  66. # If we're ignoring up to the end of the file, # carry over the start symbol if it already completed.
  67. delayed_matches[m.end()].extend([(item, i, None) for item in columns[i] if item.is_complete and item.s == start_symbol])
  68. next_to_scan = set()
  69. next_set = set()
  70. columns.append(next_set)
  71. transitives.append({})
  72. ## 4) Process Tokens from delayed_matches.
  73. # This is the core of the Earley scanner. Create an SPPF node for each Token,
  74. # and create the symbol node in the SPPF tree. Advance the item that completed,
  75. # and add the resulting new item to either the Earley set (for processing by the
  76. # completer/predictor) or the to_scan buffer for the next parse step.
  77. for item, start, token in delayed_matches[i+1]:
  78. if token is not None:
  79. token.end_line = text_line
  80. token.end_column = text_column + 1
  81. new_item = item.advance()
  82. label = (new_item.s, new_item.start, i)
  83. new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label))
  84. new_item.node.add_family(new_item.s, item.rule, new_item.start, item.node, token)
  85. else:
  86. new_item = item
  87. if new_item.expect in self.TERMINALS:
  88. # add (B ::= Aai+1.B, h, y) to Q'
  89. next_to_scan.add(new_item)
  90. else:
  91. # add (B ::= Aa+1.B, h, y) to Ei+1
  92. next_set.add(new_item)
  93. del delayed_matches[i+1] # No longer needed, so unburden memory
  94. if not next_set and not delayed_matches and not next_to_scan:
  95. raise UnexpectedCharacters(stream, i, text_line, text_column, {item.expect for item in to_scan}, set(to_scan))
  96. return next_to_scan
  97. delayed_matches = defaultdict(list)
  98. match = self.term_matcher
  99. # Cache for nodes & tokens created in a particular parse step.
  100. transitives = [{}]
  101. text_line = 1
  102. text_column = 1
  103. ## The main Earley loop.
  104. # Run the Prediction/Completion cycle for any Items in the current Earley set.
  105. # Completions will be added to the SPPF tree, and predictions will be recursively
  106. # processed down to terminals/empty nodes to be added to the scanner for the next
  107. # step.
  108. i = 0
  109. for token in stream:
  110. self.predict_and_complete(i, to_scan, columns, transitives)
  111. to_scan = scan(i, to_scan)
  112. if token == '\n':
  113. text_line += 1
  114. text_column = 1
  115. else:
  116. text_column += 1
  117. i += 1
  118. self.predict_and_complete(i, to_scan, columns, transitives)
  119. ## Column is now the final column in the parse.
  120. assert i == len(columns)-1