191
|
1 import re
|
|
2 import pyyacc
|
|
3 from ppci import Token, CompilerError, SourceLocation
|
159
|
4
|
|
5 # Different instruction sets:
|
|
6 class InstructionSet:
|
|
7 pass
|
|
8
|
|
9 class X86(InstructionSet):
|
|
10 pass
|
|
11
|
|
12 # Generic assembler:
|
|
13 keywords = ['global', 'db']
|
|
14
|
|
15 def tokenize(s):
|
|
16 """
|
|
17 Tokenizer, generates an iterator that
|
|
18 returns tokens!
|
|
19
|
|
20 This GREAT example was taken from python re doc page!
|
|
21 """
|
|
22 tok_spec = [
|
|
23 ('REAL', r'\d+\.\d+'),
|
|
24 ('HEXNUMBER', r'0x[\da-fA-F]+'),
|
|
25 ('NUMBER', r'\d+'),
|
|
26 ('ID', r'[A-Za-z][A-Za-z\d_]*'),
|
|
27 ('SKIP', r'[ \t]'),
|
191
|
28 ('LEESTEKEN', r':=|[\.,=:\-+*\[\]/\(\)]|>=|<=|<>|>|<'),
|
159
|
29 ('STRING', r"'.*?'")
|
|
30 ]
|
|
31 tok_re = '|'.join('(?P<%s>%s)' % pair for pair in tok_spec)
|
|
32 gettok = re.compile(tok_re).match
|
|
33 line = 1
|
|
34 pos = line_start = 0
|
|
35 mo = gettok(s)
|
|
36 while mo is not None:
|
|
37 typ = mo.lastgroup
|
|
38 val = mo.group(typ)
|
|
39 if typ == 'NEWLINE':
|
|
40 line_start = pos
|
|
41 line += 1
|
|
42 elif typ == 'COMMENTS':
|
|
43 pass
|
|
44 elif typ != 'SKIP':
|
|
45 if typ == 'ID':
|
|
46 if val in keywords:
|
|
47 typ = val
|
|
48 elif typ == 'LEESTEKEN':
|
|
49 typ = val
|
|
50 elif typ == 'NUMBER':
|
|
51 val = int(val)
|
|
52 elif typ == 'HEXNUMBER':
|
|
53 val = int(val[2:], 16)
|
|
54 typ = 'NUMBER'
|
|
55 elif typ == 'REAL':
|
|
56 val = float(val)
|
|
57 elif typ == 'STRING':
|
|
58 val = val[1:-1]
|
191
|
59 col = mo.start() - line_start
|
|
60 loc = SourceLocation(line, col, 0) # TODO retrieve length?
|
|
61 yield Token(typ, val, loc)
|
159
|
62 pos = mo.end()
|
|
63 mo = gettok(s, pos)
|
|
64 if pos != len(s):
|
|
65 col = pos - line_start
|
191
|
66 loc = SourceLocation(line, col, 0)
|
|
67 raise CompilerError('Unexpected character {0}'.format(s[pos]), loc)
|
159
|
68
|
|
69 class Lexer:
|
|
70 def __init__(self, src):
|
|
71 self.tokens = tokenize(src)
|
|
72 self.curTok = self.tokens.__next__()
|
|
73 def eat(self):
|
|
74 t = self.curTok
|
|
75 self.curTok = self.tokens.__next__()
|
|
76 return t
|
|
77 @property
|
|
78 def Peak(self):
|
|
79 return self.curTok
|
|
80
|
191
|
81 class Assembler:
|
|
82 def __init__(self):
|
|
83 # Construct a parser given a grammar:
|
193
|
84 g = pyyacc.Grammar(['ID', 'NUMBER', ',', '[', ']', ':', '+', '-', pyyacc.EPS])
|
159
|
85
|
191
|
86 g.add_production('asmline', ['label', 'instruction', 'operands'])
|
|
87 g.add_production('label', ['ID', ':'])
|
193
|
88 g.add_production('label', [pyyacc.EPS]) # label is optional
|
191
|
89 g.add_production('instruction', ['ID'])
|
|
90 g.add_production('operands', ['operand'])
|
|
91 g.add_production('operands', ['operands', ',', 'operand'])
|
|
92 g.add_production('operand', ['expression'])
|
193
|
93 g.add_production('operand', ['[', 'expression', ']'])
|
|
94 g.add_production('expression', ['term'])
|
|
95 g.add_production('expression', ['expression', 'addop', 'term'])
|
|
96 g.add_production('addop', ['-'])
|
|
97 g.add_production('addop', ['+'])
|
|
98 g.add_production('term', ['factor'])
|
|
99 g.add_production('factor', ['ID'])
|
|
100 g.add_production('factor', ['NUMBER'])
|
191
|
101 # TODO: expand grammar
|
|
102 g.start_symbol = 'asmline'
|
159
|
103
|
191
|
104 self.p = g.genParser()
|
|
105
|
|
106 def assemble(self, asmsrc):
|
159
|
107 lxr = Lexer(asmsrc)
|
|
108 prsr = Parser(lxr)
|
|
109 instructions = prsr.parse()
|
|
110 return instructions
|
|
111
|
191
|
112 def assembleLine(self, line):
|
|
113 """
|
|
114 Assemble a single source line.
|
|
115 Do not take newlines into account
|
|
116 """
|
|
117 tokens = tokenize(line)
|
|
118 self.p.parse(tokens)
|
|
119
|
|
120 def assembleAst(self, at):
|
|
121 """ Assemble a parsed asm line """
|
|
122 pass
|
|
123
|
|
124
|