Mercurial > lcfOS
view python/ppci/c3/lexer.py @ 393:6ae782a085e0
Added init program
author | Windel Bouwman |
---|---|
date | Sat, 17 May 2014 21:17:40 +0200 |
parents | 173e20a47fda |
children | fb3c1f029b30 |
line wrap: on
line source
import re from ppci import CompilerError, SourceLocation, Token from baselex import BaseLexer """ Lexical analyzer part. Splits the input character stream into tokens. """ keywords = ['and', 'or', 'not', 'true', 'false', 'else', 'if', 'while', 'for', 'return', 'switch', 'case', 'default', 'function', 'var', 'type', 'const', 'struct', 'cast', 'sizeof', 'import', 'module'] class Lexer: """ Generates a sequence of token from an input stream """ def __init__(self, diag): self.diag = diag def lex(self, source): return self.tokenize(source) def tokenize(self, input_file): """ Tokenizer, generates an iterator that returns tokens! Input is a file like object. This GREAT example was taken from python re doc page! """ filename = input_file.name if hasattr(input_file, 'name') else '' s = input_file.read() input_file.close() self.diag.addSource(filename, s) tok_spec = [ ('REAL', r'\d+\.\d+'), ('HEXNUMBER', r'0x[\da-fA-F]+'), ('NUMBER', r'\d+'), ('ID', r'[A-Za-z][A-Za-z\d_]*'), ('NEWLINE', r'\n'), ('SKIP', r'[ \t]'), ('COMMENTS', r'//.*'), ('LONGCOMMENTBEGIN', r'\/\*'), ('LONGCOMMENTEND', r'\*\/'), ('LEESTEKEN', r'==|->|<<|>>|!=|\+\+|[\.,=:;\-+*\[\]/\(\)]|>=|<=|<>|>|<|{|}|&|\^|\|'), ('STRING', r'".*?"') ] tok_re = '|'.join('(?P<%s>%s)' % pair for pair in tok_spec) gettok = re.compile(tok_re).match line = 1 pos = line_start = 0 mo = gettok(s) incomment = False while mo is not None: typ = mo.lastgroup val = mo.group(typ) if typ == 'NEWLINE': line_start = pos line += 1 elif typ == 'COMMENTS': pass elif typ == 'LONGCOMMENTBEGIN': incomment = True elif typ == 'LONGCOMMENTEND': incomment = False elif typ == 'SKIP': pass elif incomment: pass # Wait until we are not in a comment section else: if typ == 'ID': if val in keywords: typ = val elif typ == 'LEESTEKEN': typ = val elif typ == 'NUMBER': val = int(val) elif typ == 'HEXNUMBER': val = int(val[2:], 16) typ = 'NUMBER' elif typ == 'REAL': val = float(val) elif typ == 'STRING': val = val[1:-1] loc = SourceLocation(filename, line, mo.start() - line_start, mo.end() - mo.start()) yield Token(typ, val, loc) pos = mo.end() mo = gettok(s, pos) if pos != len(s): col = pos - line_start loc = SourceLocation(filename, line, col, 1) raise CompilerError('Unexpected: "{0}"'.format(s[pos]), loc) loc = SourceLocation(filename, line, 0, 0) yield Token('END', '', loc)