| @@ -245,7 +245,7 @@ class Parser: | |||||
| if not next_set and not next_to_scan: | if not next_set and not next_to_scan: | ||||
| expect = {i.expect.name for i in to_scan} | expect = {i.expect.name for i in to_scan} | ||||
| raise UnexpectedToken(token, expect, considered_rules=set(to_scan), state=frozenset(i.expect for i in to_scan)) | |||||
| raise UnexpectedToken(token, expect, considered_rules=set(to_scan), state=frozenset(i.s for i in to_scan)) | |||||
| return next_to_scan | return next_to_scan | ||||
| @@ -303,7 +303,7 @@ class Parser: | |||||
| solutions = [n.node for n in columns[-1] if n.is_complete and n.node is not None and n.s == start_symbol and n.start == 0] | solutions = [n.node for n in columns[-1] if n.is_complete and n.node is not None and n.s == start_symbol and n.start == 0] | ||||
| if not solutions: | if not solutions: | ||||
| expected_terminals = [t.expect for t in to_scan] | expected_terminals = [t.expect for t in to_scan] | ||||
| raise UnexpectedEOF(expected_terminals, state=frozenset(i.expect for i in to_scan)) | |||||
| raise UnexpectedEOF(expected_terminals, state=frozenset(i.s for i in to_scan)) | |||||
| if self.debug: | if self.debug: | ||||
| from .earley_forest import ForestToPyDotVisitor | from .earley_forest import ForestToPyDotVisitor | ||||
| @@ -114,7 +114,7 @@ class Parser(BaseParser): | |||||
| if not next_set and not delayed_matches and not next_to_scan: | if not next_set and not delayed_matches and not next_to_scan: | ||||
| raise UnexpectedCharacters(stream, i, text_line, text_column, {item.expect.name for item in to_scan}, | raise UnexpectedCharacters(stream, i, text_line, text_column, {item.expect.name for item in to_scan}, | ||||
| set(to_scan), state=frozenset(i.expect for i in to_scan)) | |||||
| set(to_scan), state=frozenset(i.s for i in to_scan)) | |||||
| return next_to_scan | return next_to_scan | ||||
| @@ -2355,7 +2355,7 @@ def _make_parser_test(LEXER, PARSER): | |||||
| return u.match_examples(p.parse, { | return u.match_examples(p.parse, { | ||||
| 0: ['abe'], | 0: ['abe'], | ||||
| 1: ['ab'], | 1: ['ab'], | ||||
| 2: ['cbc'], | |||||
| 2: ['cbc', 'dbc'], | |||||
| }) | }) | ||||
| assert False | assert False | ||||
| @@ -2364,6 +2364,7 @@ def _make_parser_test(LEXER, PARSER): | |||||
| assert match_error("bbc") == 2 | assert match_error("bbc") == 2 | ||||
| assert match_error("cbc") == 2 | assert match_error("cbc") == 2 | ||||
| self.assertEqual( match_error("dbc"), 2 ) | self.assertEqual( match_error("dbc"), 2 ) | ||||
| self.assertEqual( match_error("ebc"), 2 ) | |||||
| @unittest.skipIf(not regex or sys.version_info[0] == 2, 'Unicode and Python 2 do not place nicely together.') | @unittest.skipIf(not regex or sys.version_info[0] == 2, 'Unicode and Python 2 do not place nicely together.') | ||||