318
|
1 #!/usr/bin/python
|
|
2
|
319
|
3 """
|
|
4 Parser generator utility. This script can generate a python script from a
|
|
5 grammar description.
|
|
6
|
|
7 Invoke the script on a grammar specification file:
|
|
8
|
|
9 .. code::
|
|
10
|
|
11 $ ./yacc.py test.x -o test_parser.py
|
|
12
|
|
13 And use the generated parser by deriving a user class:
|
|
14
|
|
15
|
|
16 .. code::
|
|
17
|
|
18 import test_parser
|
|
19 class MyParser(test_parser.Parser):
|
|
20 pass
|
|
21 p = MyParser()
|
|
22 p.parse()
|
|
23
|
|
24
|
321
|
25 Alternatively you can load the parser on the fly:
|
|
26
|
|
27 .. code::
|
|
28
|
|
29 import yacc
|
|
30 parser_mod = yacc.load_as_module('mygrammar.x')
|
|
31 class MyParser(parser_mod.Parser):
|
|
32 pass
|
|
33 p = MyParser()
|
|
34 p.parse()
|
|
35
|
319
|
36 """
|
318
|
37
|
|
38 import argparse
|
|
39 import re
|
|
40 import sys
|
|
41 import datetime
|
321
|
42 import types
|
|
43 import io
|
318
|
44 from pyyacc import Grammar, print_grammar
|
|
45
|
|
46
|
|
47 class XaccLexer:
|
|
48 def __init__(self):
|
|
49 pass
|
|
50
|
|
51 def feed(self, txt):
|
|
52 # Create a regular expression for the lexing part:
|
|
53 tok_spec = [
|
|
54 ('ID', r'[A-Za-z][A-Za-z\d_]*'),
|
|
55 ('STRING', r"'[^']*'"),
|
|
56 ('BRACEDCODE', r"\{[^\}]*\}"),
|
|
57 ('OTHER', r'[:;\|]'),
|
|
58 ('SKIP', r'[ ]')
|
|
59 ]
|
|
60 tok_re = '|'.join('(?P<%s>%s)' % pair for pair in tok_spec)
|
|
61 gettok = re.compile(tok_re).match
|
|
62
|
|
63 lines = txt.split('\n')
|
|
64
|
|
65 def tokenize_line(line):
|
|
66 """ Generator that splits up a line into tokens """
|
|
67 mo = gettok(line)
|
|
68 pos = 0
|
|
69 while mo:
|
|
70 typ = mo.lastgroup
|
|
71 val = mo.group(typ)
|
|
72 if typ == 'ID':
|
|
73 yield (typ, val)
|
|
74 elif typ == 'STRING':
|
|
75 typ = 'ID'
|
|
76 yield (typ, val[1:-1])
|
|
77 elif typ == 'OTHER':
|
|
78 typ = val
|
|
79 yield (typ, val)
|
|
80 elif typ == 'BRACEDCODE':
|
|
81 yield (typ, val)
|
|
82 elif typ == 'SKIP':
|
|
83 pass
|
|
84 else:
|
|
85 raise NotImplementedError(str(typ))
|
|
86 pos = mo.end()
|
|
87 mo = gettok(line, pos)
|
|
88 if len(line) != pos:
|
|
89 raise ParseError('Lex fault at {}'.format(line))
|
|
90
|
|
91 def tokenize():
|
|
92 section = 0
|
|
93 for line in lines:
|
|
94 line = line.strip()
|
|
95 if not line:
|
|
96 continue # Skip empty lines
|
|
97 if line == '%%':
|
|
98 section += 1
|
|
99 yield('%%', '%%')
|
|
100 continue
|
|
101 if section == 0:
|
|
102 if line.startswith('%tokens'):
|
|
103 yield('%tokens', '%tokens')
|
|
104 yield from tokenize_line(line[7:])
|
|
105 else:
|
|
106 yield ('HEADER', line)
|
|
107 elif section == 1:
|
|
108 yield from tokenize_line(line)
|
|
109 yield ('eof', 'eof')
|
|
110 self.tokens = tokenize()
|
|
111 self.token = self.tokens.__next__()
|
|
112
|
|
113 def next_token(self):
|
|
114 t = self.token
|
|
115 if t[0] != 'eof':
|
|
116 self.token = self.tokens.__next__()
|
|
117 return t
|
|
118
|
|
119
|
|
120 class ParseError(Exception):
|
|
121 pass
|
|
122
|
|
123
|
|
124 class XaccParser:
|
|
125 """ Implements a recursive descent parser to parse grammar rules.
|
|
126 We could have made an generated parser, but that would yield a chicken
|
|
127 egg issue.
|
|
128 """
|
|
129 def __init__(self, lexer):
|
|
130 self.lexer = lexer
|
|
131
|
|
132 @property
|
|
133 def Peak(self):
|
|
134 """ Sneak peak to the next token in line """
|
|
135 return self.lexer.token[0]
|
|
136
|
|
137 def next_token(self):
|
|
138 """ Take the next token """
|
|
139 return self.lexer.next_token()
|
|
140
|
|
141 def consume(self, typ):
|
|
142 """ Eat next token of type typ or raise an exception """
|
|
143 if self.Peak == typ:
|
|
144 return self.next_token()
|
|
145 else:
|
|
146 raise ParseError('Expected {}, but got {}'.format(typ, self.Peak))
|
|
147
|
|
148 def has_consumed(self, typ):
|
|
149 """ Consume typ if possible and return true if so """
|
|
150 if self.Peak == typ:
|
|
151 self.consume(typ)
|
|
152 return True
|
|
153 return False
|
|
154
|
|
155 def parse_grammar(self):
|
|
156 """ Entry parse function into recursive descent parser """
|
|
157 # parse header
|
|
158 headers = []
|
|
159 terminals = []
|
|
160 while self.Peak in ['HEADER', '%tokens']:
|
|
161 if self.Peak == '%tokens':
|
|
162 self.consume('%tokens')
|
|
163 while self.Peak == 'ID':
|
|
164 terminals.append(self.consume('ID')[1])
|
|
165 else:
|
|
166 headers.append(self.consume('HEADER')[1])
|
|
167 self.consume('%%')
|
319
|
168 self.headers = headers
|
318
|
169 self.grammar = Grammar(terminals)
|
|
170 while self.Peak != 'eof':
|
|
171 self.parse_rule()
|
|
172 return self.grammar
|
|
173
|
|
174 def parse_symbol(self):
|
|
175 return self.consume('ID')[1]
|
|
176
|
|
177 def parse_rhs(self):
|
319
|
178 """ Parse the right hand side of a rule definition """
|
318
|
179 symbols = []
|
|
180 while self.Peak not in [';', 'BRACEDCODE', '|']:
|
|
181 symbols.append(self.parse_symbol())
|
|
182 if self.Peak == 'BRACEDCODE':
|
|
183 action = self.consume('BRACEDCODE')[1]
|
|
184 action = action[1:-1].strip()
|
|
185 else:
|
|
186 action = None
|
|
187 return symbols, action
|
|
188
|
|
189 def parse_rule(self):
|
319
|
190 """ Parse a rule definition """
|
318
|
191 p = self.parse_symbol()
|
|
192 self.consume(':')
|
|
193 symbols, action = self.parse_rhs()
|
|
194 self.grammar.add_production(p, symbols, action)
|
|
195 while self.has_consumed('|'):
|
|
196 symbols, action = self.parse_rhs()
|
|
197 self.grammar.add_production(p, symbols, action)
|
|
198 self.consume(';')
|
|
199
|
|
200
|
|
201 class XaccGenerator:
|
|
202 """ Generator that writes generated parser to file """
|
|
203 def __init__(self):
|
|
204 pass
|
|
205
|
319
|
206 def generate(self, grammar, headers, output_file):
|
321
|
207 self.output_file = output_file
|
318
|
208 self.grammar = grammar
|
319
|
209 self.headers = headers
|
318
|
210 self.action_table, self.goto_table = grammar.doGenerate()
|
321
|
211 self.generate_python_script()
|
318
|
212
|
321
|
213 def print(self, *args):
|
|
214 """ Print helper function that prints to output file """
|
|
215 print(*args, file=self.output_file)
|
|
216
|
|
217 def generate_python_script(self):
|
318
|
218 """ Generate python script with the parser table """
|
321
|
219 self.print('#!/usr/bin/python')
|
318
|
220 stamp = datetime.datetime.now().ctime()
|
321
|
221 self.print('""" Automatically generated by xacc on {} """'.format(stamp))
|
|
222 self.print('from pyyacc import LRParser, Reduce, Shift, Accept, Production, Grammar')
|
|
223 self.print('from ppci import Token')
|
|
224 self.print('')
|
319
|
225 for h in self.headers:
|
|
226 print(h, file=output_file)
|
321
|
227 self.print('')
|
|
228 self.print('class Parser(LRParser):')
|
|
229 self.print(' def __init__(self):')
|
318
|
230 # Generate rules:
|
321
|
231 self.print(' self.start_symbol = "{}"'.format(self.grammar.start_symbol))
|
|
232 self.print(' self.grammar = Grammar({})'.format(self.grammar.terminals))
|
318
|
233 for rule_number, rule in enumerate(self.grammar.productions):
|
|
234 rule.f_name = 'action_{}_{}'.format(rule.name, rule_number)
|
321
|
235 self.print(' self.grammar.add_production("{}", {}, self.{})'.format(rule.name, rule.symbols, rule.f_name))
|
318
|
236 # Fill action table:
|
321
|
237 self.print(' self.action_table = {}')
|
318
|
238 for state in self.action_table:
|
|
239 action = self.action_table[state]
|
321
|
240 self.print(' self.action_table[{}] = {}'.format(state, action))
|
|
241 self.print('')
|
318
|
242
|
|
243 # Fill goto table:
|
321
|
244 self.print(' self.goto_table = {}')
|
318
|
245 for gt in self.goto_table:
|
|
246 to = self.goto_table[gt]
|
321
|
247 self.print(' self.goto_table[{}] = {}'.format(gt, to))
|
|
248 self.print('')
|
318
|
249
|
|
250 # Generate a function for each action:
|
|
251 for rule in self.grammar.productions:
|
|
252 M = len(rule.symbols)
|
|
253 args = ', '.join('arg{}'.format(n + 1) for n in range(M))
|
321
|
254 self.print(' def {}(self, {}):'.format(rule.f_name, args))
|
318
|
255 if rule.f == None:
|
|
256 semantics = 'pass'
|
|
257 else:
|
|
258 semantics = str(rule.f)
|
|
259 if semantics.strip() == '':
|
|
260 semantics = 'pass'
|
|
261 for n in range(M):
|
|
262 semantics = semantics.replace('${}'.format(n + 1), 'arg{}'.format(n + 1))
|
321
|
263 self.print(' {}'.format(semantics))
|
318
|
264
|
|
265
|
321
|
266 def make_argument_parser():
|
318
|
267 # Parse arguments:
|
|
268 parser = argparse.ArgumentParser(description='xacc compiler compiler')
|
|
269 parser.add_argument('source', type=argparse.FileType('r'), \
|
|
270 help='the parser specification')
|
|
271 parser.add_argument('-o', '--output', type=argparse.FileType('w'), \
|
|
272 default=sys.stdout)
|
321
|
273
|
|
274
|
|
275 def load_as_module(filename):
|
|
276 """ Load a parser spec file, generate LR tables and create module """
|
|
277 ob = io.StringIO()
|
|
278 args = argparse.Namespace(source=open(filename), output=ob)
|
|
279 main(args)
|
|
280
|
|
281 parser_mod = types.ModuleType('generated_parser')
|
|
282 exec(ob.getvalue(), parser_mod.__dict__)
|
|
283 return parser_mod
|
|
284
|
|
285 def main(args):
|
318
|
286 src = args.source.read()
|
|
287 args.source.close()
|
|
288
|
|
289 # Construction of generator parts:
|
|
290 lexer = XaccLexer()
|
|
291 parser = XaccParser(lexer)
|
|
292 generator = XaccGenerator()
|
|
293
|
|
294 # Sequence source through the generator parts:
|
|
295 lexer.feed(src)
|
|
296 grammar = parser.parse_grammar()
|
321
|
297 # TODO: optionize this: print_grammar(grammar)
|
319
|
298 generator.generate(grammar, parser.headers, args.output)
|
318
|
299
|
|
300
|
|
301 if __name__ == '__main__':
|
321
|
302 args = make_argument_parser().parse_args()
|
|
303 main(args)
|