changeset 159:5e1dd04cb61c

Added attempt to assembler
author Windel Bouwman
date Fri, 08 Mar 2013 17:16:22 +0100
parents 9683a4cd848f
children 10330be89bc2
files python/asm.py python/libasm.py
diffstat 2 files changed, 136 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/python/asm.py	Fri Mar 08 17:16:22 2013 +0100
@@ -0,0 +1,20 @@
+#!/usr/bin/python
+
+# Assembler
+
+import sys, argparse
+import pdb
+
+import libasm
+
+parser = argparse.ArgumentParser(description="Assembler")
+pdb.set_trace()
+parser.add_argument('sourcefile', type=argparse.FileType('r'), help='the source file to assemble')
+args = parser.parse_args()
+
+
+a = libasm.Assembler()
+obj = a.assemble(args.sourcefile.read())
+
+print('object:', obj)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/python/libasm.py	Fri Mar 08 17:16:22 2013 +0100
@@ -0,0 +1,116 @@
+import collections, re
+
+# Different instruction sets:
+class InstructionSet:
+   pass
+
+class X86(InstructionSet):
+   pass
+
+# Generic assembler:
+
+class SourceLocation:
+   def __init__(self, x):
+      self.pos = x
+
+class SourceRange:
+   def __init__(self, p1, p2):
+      self.p1 = p1
+      self.p2 = p2
+
+# Token is used in the lexical analyzer:
+Token = collections.namedtuple('Token', 'typ val row col')
+
+keywords = ['global', 'db']
+
+def tokenize(s):
+     """
+       Tokenizer, generates an iterator that
+       returns tokens!
+
+       This GREAT example was taken from python re doc page!
+     """
+     tok_spec = [
+       ('REAL', r'\d+\.\d+'),
+       ('HEXNUMBER', r'0x[\da-fA-F]+'),
+       ('NUMBER', r'\d+'),
+       ('ID', r'[A-Za-z][A-Za-z\d_]*'),
+       ('NEWLINE', r'\n'),
+       ('SKIP', r'[ \t]'),
+       ('LEESTEKEN', r':=|[\.,=:;\-+*\[\]/\(\)]|>=|<=|<>|>|<'),
+       ('STRING', r"'.*?'")
+     ]
+     tok_re = '|'.join('(?P<%s>%s)' % pair for pair in tok_spec)
+     print(tok_re)
+     gettok = re.compile(tok_re).match
+     line = 1
+     pos = line_start = 0
+     mo = gettok(s)
+     while mo is not None:
+       typ = mo.lastgroup
+       val = mo.group(typ)
+       if typ == 'NEWLINE':
+         line_start = pos
+         line += 1
+       elif typ == 'COMMENTS':
+         pass
+       elif typ != 'SKIP':
+         if typ == 'ID':
+           if val in keywords:
+             typ = val
+         elif typ == 'LEESTEKEN':
+           typ = val
+         elif typ == 'NUMBER':
+           val = int(val)
+         elif typ == 'HEXNUMBER':
+           val = int(val[2:], 16)
+           typ = 'NUMBER'
+         elif typ == 'REAL':
+           val = float(val)
+         elif typ == 'STRING':
+           val = val[1:-1]
+         yield Token(typ, val, line, mo.start()-line_start)
+       pos = mo.end()
+       mo = gettok(s, pos)
+     if pos != len(s):
+       col = pos - line_start
+       raise CompilerException('Unexpected character {0}'.format(s[pos]), line, col)
+     yield Token('END', '', line, 0)
+
+class Lexer:
+   def __init__(self, src):
+      self.tokens = tokenize(src)
+      self.curTok = self.tokens.__next__()
+   def eat(self):
+      t = self.curTok
+      self.curTok = self.tokens.__next__()
+      return t
+   @property
+   def Peak(self):
+      return self.curTok
+
+class Parser:
+   def __init__(self, lxr):
+      self.lxr = lxr
+   def parse(self):
+      t = self.lxr.eat()
+
+      while True:
+         ins = self.parseLine()
+         print(ins)
+         t = self.lxr.eat()
+   def parseLine(self):
+      self.parseLabel()
+      if self.lxr.Peak == ';':
+         self.eatComments()
+   def parseLabel(self):
+      i = self.lxr.eat()
+
+class Assembler:
+   def assemble(self, asmsrc):
+      print('assembling', asmsrc)
+      lxr = Lexer(asmsrc)
+      prsr = Parser(lxr)
+      instructions = prsr.parse()
+      return instructions
+