1
|
1 import collections
|
|
2 import re
|
|
3 from .errors import CompilerException
|
|
4
|
|
5 """
|
|
6 Lexical analyzer part. Splits the input character stream into tokens.
|
|
7 """
|
|
8
|
|
9 # Token is used in the lexical analyzer:
|
|
10 Token = collections.namedtuple('Token', 'typ val row col')
|
|
11
|
|
12 keywords = ['and', 'array', 'begin', 'by', 'case', 'const', 'div', 'do', \
|
|
13 'else', 'elsif', 'end', 'false', 'for', 'if', 'import', 'in', 'is', \
|
|
14 'mod', 'module', 'nil', 'not', 'of', 'or', 'pointer', 'procedure', \
|
|
15 'record', 'repeat', 'return', 'then', 'to', 'true', 'type', 'until', 'var', \
|
|
16 'while', 'asm' ]
|
|
17
|
|
18 def tokenize(s):
|
|
19 """
|
|
20 Tokenizer, generates an iterator that
|
|
21 returns tokens!
|
|
22
|
|
23 This GREAT example was taken from python re doc page!
|
|
24 """
|
|
25 tok_spec = [
|
|
26 ('REAL', r'\d+\.\d+'),
|
|
27 ('HEXNUMBER', r'0x[\da-fA-F]+'),
|
|
28 ('NUMBER', r'\d+'),
|
|
29 ('ID', r'[A-Za-z][A-Za-z\d_]*'),
|
|
30 ('NEWLINE', r'\n'),
|
|
31 ('SKIP', r'[ \t]'),
|
|
32 ('COMMENTS', r'{.*}'),
|
|
33 ('LEESTEKEN', r':=|[\.,=:;\-+*\[\]/\(\)]|>=|<=|<>|>|<'),
|
|
34 ('STRING', r"'.*?'")
|
|
35 ]
|
|
36 tok_re = '|'.join('(?P<%s>%s)' % pair for pair in tok_spec)
|
|
37 gettok = re.compile(tok_re).match
|
|
38 line = 1
|
|
39 pos = line_start = 0
|
|
40 mo = gettok(s)
|
|
41 while mo is not None:
|
|
42 typ = mo.lastgroup
|
|
43 val = mo.group(typ)
|
|
44 if typ == 'NEWLINE':
|
|
45 line_start = pos
|
|
46 line += 1
|
|
47 elif typ == 'COMMENTS':
|
|
48 pass
|
|
49 elif typ != 'SKIP':
|
|
50 if typ == 'ID':
|
|
51 if val in keywords:
|
|
52 typ = val
|
|
53 elif typ == 'LEESTEKEN':
|
|
54 typ = val
|
|
55 elif typ == 'NUMBER':
|
|
56 val = int(val)
|
|
57 elif typ == 'HEXNUMBER':
|
|
58 val = int(val[2:], 16)
|
|
59 typ = 'NUMBER'
|
|
60 elif typ == 'REAL':
|
|
61 val = float(val)
|
|
62 elif typ == 'STRING':
|
|
63 val = val[1:-1]
|
|
64 yield Token(typ, val, line, mo.start()-line_start)
|
|
65 pos = mo.end()
|
|
66 mo = gettok(s, pos)
|
|
67 if pos != len(s):
|
|
68 col = pos - line_start
|
|
69 raise CompilerException('Unexpected character {0}'.format(s[pos]), line, col)
|
|
70 yield Token('END', '', line, 0)
|
|
71
|