This repo contains code to mirror other repos. It also contains the code that is getting mirrored.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

205 lines
7.7 KiB

  1. from .utils import STRING_TYPE, logger
  2. ###{standalone
  3. class LarkError(Exception):
  4. pass
  5. class ConfigurationError(LarkError, ValueError):
  6. pass
  7. class GrammarError(LarkError):
  8. pass
  9. class GrammarError_Value(LarkError):
  10. pass
  11. class ParseError(LarkError):
  12. pass
  13. class LexError(LarkError):
  14. pass
  15. class UnexpectedInput(LarkError):
  16. """UnexpectedInput Error.
  17. Used as a base class for the following exceptions:
  18. - ``UnexpectedToken``: The parser received an unexpected token
  19. - ``UnexpectedCharacters``: The lexer encountered an unexpected string
  20. After catching one of these exceptions, you may call the following helper methods to create a nicer error message.
  21. """
  22. pos_in_stream = None
  23. def get_context(self, text, span=40):
  24. """Returns a pretty string pinpointing the error in the text,
  25. with span amount of context characters around it.
  26. Note:
  27. The parser doesn't hold a copy of the text it has to parse,
  28. so you have to provide it again
  29. """
  30. assert self.pos_in_stream is not None, self
  31. pos = self.pos_in_stream
  32. start = max(pos - span, 0)
  33. end = pos + span
  34. if not isinstance(text, bytes):
  35. before = text[start:pos].rsplit('\n', 1)[-1]
  36. after = text[pos:end].split('\n', 1)[0]
  37. return before + after + '\n' + ' ' * len(before.expandtabs()) + '^\n'
  38. else:
  39. before = text[start:pos].rsplit(b'\n', 1)[-1]
  40. after = text[pos:end].split(b'\n', 1)[0]
  41. return (before + after + b'\n' + b' ' * len(before.expandtabs()) + b'^\n').decode("ascii", "backslashreplace")
  42. def match_examples(self, parse_fn, examples, token_type_match_fallback=False, use_accepts=False):
  43. """Allows you to detect what's wrong in the input text by matching
  44. against example errors.
  45. Given a parser instance and a dictionary mapping some label with
  46. some malformed syntax examples, it'll return the label for the
  47. example that bests matches the current error. The function will
  48. iterate the dictionary until it finds a matching error, and
  49. return the corresponding value.
  50. For an example usage, see `examples/error_reporting_lalr.py`
  51. Parameters:
  52. parse_fn: parse function (usually ``lark_instance.parse``)
  53. examples: dictionary of ``{'example_string': value}``.
  54. use_accepts: Recommended to call this with ``use_accepts=True``.
  55. The default is ``False`` for backwards compatibility.
  56. """
  57. assert self.state is not None, "Not supported for this exception"
  58. if isinstance(examples, dict):
  59. examples = examples.items()
  60. candidate = (None, False)
  61. for i, (label, example) in enumerate(examples):
  62. assert not isinstance(example, STRING_TYPE)
  63. for j, malformed in enumerate(example):
  64. try:
  65. parse_fn(malformed)
  66. except UnexpectedInput as ut:
  67. if ut.state == self.state:
  68. if use_accepts and hasattr(self, 'accepts') and ut.accepts != self.accepts:
  69. logger.debug("Different accepts with same state[%d]: %s != %s at example [%s][%s]" %
  70. (self.state, self.accepts, ut.accepts, i, j))
  71. continue
  72. try:
  73. if ut.token == self.token: # Try exact match first
  74. logger.debug("Exact Match at example [%s][%s]" % (i, j))
  75. return label
  76. if token_type_match_fallback:
  77. # Fallback to token types match
  78. if (ut.token.type == self.token.type) and not candidate[-1]:
  79. logger.debug("Token Type Fallback at example [%s][%s]" % (i, j))
  80. candidate = label, True
  81. except AttributeError:
  82. pass
  83. if candidate[0] is None:
  84. logger.debug("Same State match at example [%s][%s]" % (i, j))
  85. candidate = label, False
  86. return candidate[0]
  87. class UnexpectedEOF(ParseError, UnexpectedInput):
  88. def __init__(self, expected, state=None):
  89. self.expected = expected
  90. self.state = state
  91. from .lexer import Token
  92. self.token = Token("<EOF>", "") #, line=-1, column=-1, pos_in_stream=-1)
  93. self.pos_in_stream = -1
  94. self.line = -1
  95. self.column = -1
  96. message = ("Unexpected end-of-input. Expected one of: \n\t* %s\n" % '\n\t* '.join(x.name for x in self.expected))
  97. super(UnexpectedEOF, self).__init__(message)
  98. class UnexpectedCharacters(LexError, UnexpectedInput):
  99. def __init__(self, seq, lex_pos, line, column, allowed=None, considered_tokens=None, state=None, token_history=None):
  100. # TODO considered_tokens and allowed can be figured out using state
  101. self.line = line
  102. self.column = column
  103. self.pos_in_stream = lex_pos
  104. self.state = state
  105. self.allowed = allowed
  106. self.considered_tokens = considered_tokens
  107. if isinstance(seq, bytes):
  108. _s = seq[lex_pos:lex_pos+1].decode("ascii", "backslashreplace")
  109. else:
  110. _s = seq[lex_pos]
  111. message = "No terminal defined for %r at line %d col %d" % (_s, line, column)
  112. message += '\n\n' + self.get_context(seq)
  113. if allowed:
  114. message += '\nExpecting: %s\n' % allowed
  115. if token_history:
  116. message += '\nPrevious tokens: %s\n' % ', '.join(repr(t) for t in token_history)
  117. super(UnexpectedCharacters, self).__init__(message)
  118. class UnexpectedToken(ParseError, UnexpectedInput):
  119. """When the parser throws UnexpectedToken, it instantiates a puppet
  120. with its internal state. Users can then interactively set the puppet to
  121. the desired puppet state, and resume regular parsing.
  122. see: :ref:`ParserPuppet`.
  123. """
  124. def __init__(self, token, expected, considered_rules=None, state=None, puppet=None, token_history=None):
  125. # TODO considered_rules and expected can be figured out using state
  126. self.line = getattr(token, 'line', '?')
  127. self.column = getattr(token, 'column', '?')
  128. self.pos_in_stream = getattr(token, 'pos_in_stream', None)
  129. self.state = state
  130. self.token = token
  131. self.expected = expected # XXX deprecate? `accepts` is better
  132. self.considered_rules = considered_rules
  133. self.puppet = puppet
  134. self.token_history = token_history
  135. # TODO Only calculate `accepts()` when we need to display it to the user
  136. # This will improve performance when doing automatic error handling
  137. self.accepts = puppet and puppet.accepts()
  138. message = ("Unexpected token %r at line %s, column %s.\n"
  139. "Expected one of: \n\t* %s\n"
  140. % (token, self.line, self.column, '\n\t* '.join(self.accepts or self.expected)))
  141. if self.token_history:
  142. message += "Previous tokens: %r\n" % token_history
  143. super(UnexpectedToken, self).__init__(message)
  144. class VisitError(LarkError):
  145. """VisitError is raised when visitors are interrupted by an exception
  146. It provides the following attributes for inspection:
  147. - obj: the tree node or token it was processing when the exception was raised
  148. - orig_exc: the exception that cause it to fail
  149. """
  150. def __init__(self, rule, obj, orig_exc):
  151. self.obj = obj
  152. self.orig_exc = orig_exc
  153. message = 'Error trying to process rule "%s":\n\n%s' % (rule, orig_exc)
  154. super(VisitError, self).__init__(message)
  155. ###}