Mercurial > lcfOS
diff python/c3/lexer.py @ 148:e5263f74b287
Added c3 language frontend initial parser
author | Windel Bouwman |
---|---|
date | Fri, 01 Mar 2013 10:24:01 +0100 |
parents | |
children | 74241ca312cc |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/python/c3/lexer.py Fri Mar 01 10:24:01 2013 +0100 @@ -0,0 +1,73 @@ +import collections, re +from ppci.errors import CompilerException, SourceLocation + +""" + Lexical analyzer part. Splits the input character stream into tokens. +""" + +# Token is used in the lexical analyzer: +Token = collections.namedtuple('Token', 'typ val loc') + +keywords = ['and', 'or', 'not','true', 'false', \ + 'else', 'if', 'while', 'return', \ + 'public', 'function', 'var', 'type', \ + 'import', 'package' ] + +def tokenize(s): + """ + Tokenizer, generates an iterator that + returns tokens! + + This GREAT example was taken from python re doc page! + """ + tok_spec = [ + ('REAL', r'\d+\.\d+'), + ('HEXNUMBER', r'0x[\da-fA-F]+'), + ('NUMBER', r'\d+'), + ('ID', r'[A-Za-z][A-Za-z\d_]*'), + ('NEWLINE', r'\n'), + ('SKIP', r'[ \t]'), + ('COMMENTS', r'//.*'), + ('LEESTEKEN', r'==|[\.,=:;\-+*\[\]/\(\)]|>=|<=|<>|>|<|{|}'), + ('STRING', r"'.*?'") + ] + tok_re = '|'.join('(?P<%s>%s)' % pair for pair in tok_spec) + gettok = re.compile(tok_re).match + line = 1 + pos = line_start = 0 + mo = gettok(s) + while mo is not None: + typ = mo.lastgroup + val = mo.group(typ) + if typ == 'NEWLINE': + line_start = pos + line += 1 + elif typ == 'COMMENTS': + pass + elif typ == 'SKIP': + pass + else: + if typ == 'ID': + if val in keywords: + typ = val + elif typ == 'LEESTEKEN': + typ = val + elif typ == 'NUMBER': + val = int(val) + elif typ == 'HEXNUMBER': + val = int(val[2:], 16) + typ = 'NUMBER' + elif typ == 'REAL': + val = float(val) + elif typ == 'STRING': + val = val[1:-1] + loc = SourceLocation(line, mo.start()-line_start) + yield Token(typ, val, loc) + pos = mo.end() + mo = gettok(s, pos) + if pos != len(s): + col = pos - line_start + pos = line + raise CompilerException('Unexpected character {0}'.format(s[pos]), pos) + yield Token('END', '', line) +