Mercurial > lcfOS
diff python/ks/lexer.py @ 146:91af0e40f868
Moved several files
author | Windel Bouwman |
---|---|
date | Fri, 22 Feb 2013 10:31:58 +0100 |
parents | python/ppci/frontends/ks/lexer.py@af0d7913677a |
children | 4fd075e8259c |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/python/ks/lexer.py Fri Feb 22 10:31:58 2013 +0100 @@ -0,0 +1,70 @@ +import collections, re +from ...core.errors import CompilerException + +""" + Lexical analyzer part. Splits the input character stream into tokens. +""" + +# Token is used in the lexical analyzer: +Token = collections.namedtuple('Token', 'typ val row col') + +keywords = ['and', 'array', 'begin', 'by', 'case', 'const', 'div', 'do', \ + 'else', 'elsif', 'end', 'false', 'for', 'if', 'import', 'in', 'is', \ + 'mod', 'module', 'nil', 'not', 'of', 'or', 'pointer', 'procedure', \ + 'record', 'repeat', 'return', 'then', 'to', 'true', 'type', 'until', 'var', \ + 'while', 'asm' ] + +def tokenize(s): + """ + Tokenizer, generates an iterator that + returns tokens! + + This GREAT example was taken from python re doc page! + """ + tok_spec = [ + ('REAL', r'\d+\.\d+'), + ('HEXNUMBER', r'0x[\da-fA-F]+'), + ('NUMBER', r'\d+'), + ('ID', r'[A-Za-z][A-Za-z\d_]*'), + ('NEWLINE', r'\n'), + ('SKIP', r'[ \t]'), + ('COMMENTS', r'{.*}'), + ('LEESTEKEN', r':=|[\.,=:;\-+*\[\]/\(\)]|>=|<=|<>|>|<'), + ('STRING', r"'.*?'") + ] + tok_re = '|'.join('(?P<%s>%s)' % pair for pair in tok_spec) + gettok = re.compile(tok_re).match + line = 1 + pos = line_start = 0 + mo = gettok(s) + while mo is not None: + typ = mo.lastgroup + val = mo.group(typ) + if typ == 'NEWLINE': + line_start = pos + line += 1 + elif typ == 'COMMENTS': + pass + elif typ != 'SKIP': + if typ == 'ID': + if val in keywords: + typ = val + elif typ == 'LEESTEKEN': + typ = val + elif typ == 'NUMBER': + val = int(val) + elif typ == 'HEXNUMBER': + val = int(val[2:], 16) + typ = 'NUMBER' + elif typ == 'REAL': + val = float(val) + elif typ == 'STRING': + val = val[1:-1] + yield Token(typ, val, line, mo.start()-line_start) + pos = mo.end() + mo = gettok(s, pos) + if pos != len(s): + col = pos - line_start + raise CompilerException('Unexpected character {0}'.format(s[pos]), line, col) + yield Token('END', '', line, 0) +