# HG changeset patch # User Windel Bouwman # Date 1398613239 -7200 # Node ID 173e20a47fda0f5790fdd9f82b71da65798e1eb5 # Parent 0c44e494ef58599aab1eccf288f9295a035782f2 Added linker description loader diff -r 0c44e494ef58 -r 173e20a47fda examples/c3/stm32f4.mmap --- a/examples/c3/stm32f4.mmap Sun Apr 27 12:24:21 2014 +0200 +++ b/examples/c3/stm32f4.mmap Sun Apr 27 17:40:39 2014 +0200 @@ -1,7 +1,9 @@ -{ - "code": "0x08000000", - "data": "0x20000000" +MEMORY flash LOCATION=0x08000000 SIZE=0x10000 { + SECTION(code) } +MEMORY ram LOCATION=0x20000000 SIZE=0x10000 { + SECTION(data) +} diff -r 0c44e494ef58 -r 173e20a47fda examples/qemu_a9_hello/qemu.mmap --- a/examples/qemu_a9_hello/qemu.mmap Sun Apr 27 12:24:21 2014 +0200 +++ b/examples/qemu_a9_hello/qemu.mmap Sun Apr 27 17:40:39 2014 +0200 @@ -1,5 +1,9 @@ -{ - "code": "0x60010000", - "data": "0x60020000" +MEMORY flash LOCATION=0x60010000 SIZE=0x10000 { + SECTION(code) } + +MEMORY ram LOCATION=0x60020000 SIZE=0x10000 { + SECTION(data) +} + diff -r 0c44e494ef58 -r 173e20a47fda kernel/arch/qemu_vexpress/vexpressA9.mmap --- a/kernel/arch/qemu_vexpress/vexpressA9.mmap Sun Apr 27 12:24:21 2014 +0200 +++ b/kernel/arch/qemu_vexpress/vexpressA9.mmap Sun Apr 27 17:40:39 2014 +0200 @@ -1,7 +1,10 @@ -{ - "code": "0x10000", - "mem_tables": "0x60000", - "data": "0x20000" +MEMORY image LOCATION=0x10000 SIZE=0x10000 { + SECTION(reset) + SECTION(code) } +MEMORY ram LOCATION=0x20000 SIZE=0x10000 { + SECTION(data) +} + diff -r 0c44e494ef58 -r 173e20a47fda python/ppci/__init__.py --- a/python/ppci/__init__.py Sun Apr 27 12:24:21 2014 +0200 +++ b/python/ppci/__init__.py Sun Apr 27 17:40:39 2014 +0200 @@ -17,3 +17,11 @@ def same_dir(full_path, filename): return os.path.join(os.path.dirname(os.path.abspath(full_path)), filename) + + +def make_num(txt): + if txt.startswith('0x'): + return int(txt[2:], 16) + else: + return int(txt) + diff -r 0c44e494ef58 -r 173e20a47fda python/ppci/buildfunctions.py --- a/python/ppci/buildfunctions.py Sun Apr 27 12:24:21 2014 +0200 +++ b/python/ppci/buildfunctions.py Sun Apr 27 17:40:39 2014 +0200 @@ -12,11 +12,12 @@ from .codegen import CodeGenerator from .transform import CleanPass, RemoveAddZero from .linker import Linker +from .layout import Layout, load_layout from .target.target_list import targets from .outstream import BinaryAndLoggingStream from .objectfile import ObjectFile, load_object from . import DiagnosticsManager, CompilerError - +from .tasks import TaskError def fix_target(tg): """ Try to return an instance of the Target class """ @@ -49,6 +50,19 @@ raise TaskError('Cannot use {} as objectfile'.format(o)) +def fix_layout(l): + if isinstance(l, Layout): + return l + elif hasattr(l, 'read'): + # Assume file handle + return load_layout(l) + elif isinstance(l, str): + with open(l, 'r') as f: + return load_layout(f) + else: + raise TaskError('Cannot use {} as layout'.format(l)) + + def assemble(source, target): """ Invoke the assembler on the given source, returns an object containing the output. """ @@ -111,6 +125,7 @@ def link(objects, layout): """ Links the iterable of objects into one using the given layout """ objects = list(map(fix_object, objects)) + layout = fix_layout(layout) linker = Linker() output_obj = linker.link(objects, layout) return output_obj diff -r 0c44e494ef58 -r 173e20a47fda python/ppci/buildtasks.py --- a/python/ppci/buildtasks.py Sun Apr 27 12:24:21 2014 +0200 +++ b/python/ppci/buildtasks.py Sun Apr 27 17:40:39 2014 +0200 @@ -5,7 +5,6 @@ """ import logging -import json from .tasks import Task, TaskError, register_task from .buildfunctions import c3compile, link, assemble @@ -78,31 +77,11 @@ output.save(f) -def make_num(txt): - if txt.startswith('0x'): - return int(txt[2:], 16) - else: - return int(txt) - - -def load_layout(filename): - """ Load a linker layout file which contains directives where sections - must be placed into memory. """ - try: - with open(filename, 'r') as f: - layout = json.load(f) - except OSError as e: - raise TaskError(str(e)) - for s in layout: - layout[s] = make_num(layout[s]) - return layout - - @register_task("link") class LinkTask(Task): """ Link together a collection of object files """ def run(self): - layout = load_layout(self.relpath(self.get_argument('layout'))) + layout = self.relpath(self.get_argument('layout')) objects = self.open_file_set(self.get_argument('objects')) output_file = self.relpath(self.get_argument('output')) diff -r 0c44e494ef58 -r 173e20a47fda python/ppci/c3/lexer.py --- a/python/ppci/c3/lexer.py Sun Apr 27 12:24:21 2014 +0200 +++ b/python/ppci/c3/lexer.py Sun Apr 27 17:40:39 2014 +0200 @@ -1,6 +1,6 @@ import re from ppci import CompilerError, SourceLocation, Token -import baselex +from baselex import BaseLexer """ Lexical analyzer part. Splits the input character stream into tokens. diff -r 0c44e494ef58 -r 173e20a47fda python/ppci/layout.py --- a/python/ppci/layout.py Sun Apr 27 12:24:21 2014 +0200 +++ b/python/ppci/layout.py Sun Apr 27 17:40:39 2014 +0200 @@ -1,58 +1,131 @@ +from baselex import BaseLexer +import pyyacc +from . import make_num + class Layout: def __init__(self): self.mems = [] + def add_memory(self, memory): + self.mems.append(memory) + def __eq__(self, other): return self.mems == other.mems + def __repr__(self): + return str(self.mems) + class Memory: - def __init__(self, address=0x0): + def __init__(self, name): self.inputs = [] - self.address = address + self.name = name + self.location = 0x0 self.size = 0x0 def add_input(self, inp): assert isinstance(inp, Input) self.inputs.append(inp) + def __repr__(self): + return 'MEM {} loc={:08X} size={:08X}'.format(self.name, self.location, self.size) + str(self.inputs) + + def __eq__(self, other): + return str(self) == str(other) class Input: pass -class SectionInput(Input): +class Section(Input): def __init__(self, section_name): self.section_name = section_name + def __repr__(self): + return 'Section({})'.format(self.section_name) -def load_layout(f): - return deserialize(json.load(f)) + +class Align(Input): + def __init__(self, alignment): + self.alignment = alignment + + def __repr__(self): + return 'Align({})'.format(self.alignment) -def make_int(txt): - if txt.startswith('0x'): - return int(txt[2:], 16) - else: - return int(txt) +class LayoutLexer(BaseLexer): + def __init__(self): + tok_spec = [ + ('HEXNUMBER', r'0x[\da-fA-F]+', self.handle_number), + ('NUMBER', r'\d+', self.handle_number), + ('ID', r'[A-Za-z][A-Za-z\d_]*', self.handle_id), + ('SKIP', r'[ \t\r\n]', None), + ('LEESTEKEN', r':=|[\.,=:\-+*\[\]/\(\)]|>=|<=|<>|>|<|}|{', lambda typ, val: (val, val)), + ('STRING', r"'.*?'", lambda typ, val: (typ, val[1:-1])), + ] + super().__init__(tok_spec) + self.kws = ['MEMORY', 'ALIGN', 'LOCATION','SECTION','SIZE'] + + def handle_id(self, typ, val): + if val in self.kws: + typ = val + return typ, val + + def handle_number(self, typ, val): + val = make_num(val) + typ = 'NUMBER' + return typ, val class LayoutParser: - def __init__(self): - toks = ['ID', '{', '}', 'MEMORY', 'ALIGN', '.', pyyacc.EPS, pyyacc.EOF] + def __init__(self, kws): + toks = ['ID', 'NUMBER', '{', '}', '.', ':', '=', '(', ')', pyyacc.EPS, pyyacc.EOF] + kws g = pyyacc.Grammar(toks) - g.add_production('layout', ['MEMORY', '{', 'input_list', '}']) - g.add_production('input_list', ['MEMORY', '{', 'input_list', '}']) + g.add_production('layout', ['mem_list']) + g.add_one_or_more('mem', 'mem_list') + g.add_production('mem', ['MEMORY', 'ID', 'LOCATION', '=', 'NUMBER', 'SIZE', '=', 'NUMBER', '{', 'input_list', '}'], self.handle_mem) + g.add_one_or_more('input', 'input_list') + g.add_production('input', ['ALIGN', '(', 'NUMBER', ')'], self.handle_align) + g.add_production('input', ['SECTION', '(', 'ID', ')'], self.handle_section) + + g.start_symbol = 'layout' + self.p = g.generate_parser() + + def parse(self, lexer, layout): + self.layout = layout + self.p.parse(lexer) + + def handle_mem(self, mem_tag, mem_name, loc_tag, eq1, loc, size_tag, eq2, size, lbrace, inps, rbrace): + m = Memory(mem_name.val) + m.size = size.val + m.location = loc.val + for inp in inps: + m.add_input(inp) + self.layout.add_memory(m) + + def handle_align(self, align_tag, lbrace, alignment, rbrace): + return Align(alignment.val) + + def handle_section(self, section_tag, lbrace, section_name, rbrace): + return Section(section_name.val) -def deserialize(d): - layout = Layout() - for mem_node in d['memories']: - m = Memory() - m.address = make_int(mem_node['address']) - m.size = make_int(mem_node['size']) - for input_node in mem_node['inputs']: - pass - return layout +class LayoutLoader: + def __init__(self): + self.lexer = LayoutLexer() + self.parser = LayoutParser(self.lexer.kws) + def load_layout(self, f): + layout = Layout() + self.lexer.feed(f.read()) # TODO: perhaps the read is better in the lexer? + self.parser.parse(self.lexer, layout) + return layout + +# Single definition: +_lloader = LayoutLoader() + + +def load_layout(f): + return _lloader.load_layout(f) + diff -r 0c44e494ef58 -r 173e20a47fda python/ppci/linker.py --- a/python/ppci/linker.py Sun Apr 27 12:24:21 2014 +0200 +++ b/python/ppci/linker.py Sun Apr 27 17:40:39 2014 +0200 @@ -3,6 +3,8 @@ from .objectfile import ObjectFile from . import CompilerError from .bitfun import encode_imm32 +from .layout import Layout, Section + def align(x, m): while ((x % m) != 0): @@ -138,12 +140,15 @@ def link(self, objs, layout): assert type(objs) is list + assert type(layout) is Layout # Create new object file to store output: self.dst = ObjectFile() # Create sections with address: - for section_name, address in layout.items(): - self.dst.get_section(section_name).address = address + for mem in layout.mems: + for inp in mem.inputs: + if type(inp) is Section: + self.dst.get_section(inp.section_name).address = mem.location # First copy all sections into output sections: for iobj in objs: diff -r 0c44e494ef58 -r 173e20a47fda python/pyyacc.py --- a/python/pyyacc.py Sun Apr 27 12:24:21 2014 +0200 +++ b/python/pyyacc.py Sun Apr 27 17:40:39 2014 +0200 @@ -110,6 +110,19 @@ self.nonterminals.add(name) self._first = None # Invalidate cached version + def add_one_or_more(self, element_nonterm, list_nonterm): + """ Helper to add the rule + lst: elem + lst: lst elem + """ + def a(el): + return [el] + def b(ls, el): + ls.append(el) + return ls + self.add_production(list_nonterm, [element_nonterm], a) + self.add_production(list_nonterm, [list_nonterm, element_nonterm], b) + def productionsForName(self, name): """ Retrieve all productions for a non terminal """ return [p for p in self.productions if p.name == name] diff -r 0c44e494ef58 -r 173e20a47fda python/yacc.py --- a/python/yacc.py Sun Apr 27 12:24:21 2014 +0200 +++ b/python/yacc.py Sun Apr 27 17:40:39 2014 +0200 @@ -43,81 +43,42 @@ import io import logging from pyyacc import Grammar +from baselex import BaseLexer +from ppci import Token -class XaccLexer: +class XaccLexer(BaseLexer): def __init__(self): - pass - - def feed(self, txt): - # Create a regular expression for the lexing part: tok_spec = [ - ('ID', r'[A-Za-z][A-Za-z\d_]*'), - ('STRING', r"'[^']*'"), - ('BRACEDCODE', r"\{[^\}]*\}"), - ('OTHER', r'[:;\|]'), - ('SKIP', r'[ ]') + ('ID', r'[A-Za-z][A-Za-z\d_]*', lambda typ, val: (typ, val)), + ('STRING', r"'[^']*'", lambda typ, val: ('ID', val[1:-1])), + ('BRACEDCODE', r"\{[^\}]*\}", lambda typ, val: (typ, val)), + ('OTHER', r'[:;\|]', lambda typ, val: (val, val)), + ('SKIP', r'[ ]', None) ] - tok_re = '|'.join('(?P<%s>%s)' % pair for pair in tok_spec) - gettok = re.compile(tok_re).match - - lines = txt.split('\n') + super().__init__(tok_spec) - def tokenize_line(line): - """ Generator that splits up a line into tokens """ - mo = gettok(line) - pos = 0 - while mo: - typ = mo.lastgroup - val = mo.group(typ) - if typ == 'ID': - yield (typ, val) - elif typ == 'STRING': - typ = 'ID' - yield (typ, val[1:-1]) - elif typ == 'OTHER': - typ = val - yield (typ, val) - elif typ == 'BRACEDCODE': - yield (typ, val) - elif typ == 'SKIP': - pass + def tokenize(self, txt): + lines = txt.split('\n') + section = 0 + for line in lines: + line = line.strip() + if not line: + continue # Skip empty lines + if line == '%%': + section += 1 + yield Token('%%', '%%') + continue + if section == 0: + if line.startswith('%tokens'): + yield Token('%tokens', '%tokens') + for tk in super().tokenize(line[7:]): + yield tk else: - raise NotImplementedError(str(typ)) - pos = mo.end() - mo = gettok(line, pos) - if len(line) != pos: - raise ParseError('Lex fault at {}'.format(line)) - - def tokenize(): - section = 0 - for line in lines: - line = line.strip() - if not line: - continue # Skip empty lines - if line == '%%': - section += 1 - yield('%%', '%%') - continue - if section == 0: - if line.startswith('%tokens'): - yield('%tokens', '%tokens') - for tk in tokenize_line(line[7:]): - yield tk - else: - yield ('HEADER', line) - elif section == 1: - for tk in tokenize_line(line): - yield tk - yield ('eof', 'eof') - self.tokens = tokenize() - self.token = self.tokens.__next__() - - def next_token(self): - t = self.token - if t[0] != 'eof': - self.token = self.tokens.__next__() - return t + yield Token('HEADER', line) + elif section == 1: + for tk in super().tokenize(line): + yield tk class ParseError(Exception): @@ -129,17 +90,23 @@ We could have made an generated parser, but that would yield a chicken egg issue. """ - def __init__(self, lexer): + def __init__(self): + pass + + def prepare_peak(self, lexer): self.lexer = lexer + self.look_ahead = self.lexer.next_token() @property def Peak(self): """ Sneak peak to the next token in line """ - return self.lexer.token[0] + return self.look_ahead.typ def next_token(self): """ Take the next token """ - return self.lexer.next_token() + token = self.look_ahead + self.look_ahead = self.lexer.next_token() + return token def consume(self, typ): """ Eat next token of type typ or raise an exception """ @@ -155,27 +122,27 @@ return True return False - def parse_grammar(self): + def parse_grammar(self, lexer): """ Entry parse function into recursive descent parser """ + self.prepare_peak(lexer) # parse header - headers = [] + self.headers = [] terminals = [] while self.Peak in ['HEADER', '%tokens']: if self.Peak == '%tokens': self.consume('%tokens') while self.Peak == 'ID': - terminals.append(self.consume('ID')[1]) + terminals.append(self.consume('ID').val) else: - headers.append(self.consume('HEADER')[1]) + self.headers.append(self.consume('HEADER').val) self.consume('%%') - self.headers = headers self.grammar = Grammar(terminals) - while self.Peak != 'eof': + while self.Peak != 'EOF': self.parse_rule() return self.grammar def parse_symbol(self): - return self.consume('ID')[1] + return self.consume('ID').val def parse_rhs(self): """ Parse the right hand side of a rule definition """ @@ -183,7 +150,7 @@ while self.Peak not in [';', 'BRACEDCODE', '|']: symbols.append(self.parse_symbol()) if self.Peak == 'BRACEDCODE': - action = self.consume('BRACEDCODE')[1] + action = self.consume('BRACEDCODE').val action = action[1:-1].strip() else: action = None @@ -295,12 +262,12 @@ # Construction of generator parts: lexer = XaccLexer() - parser = XaccParser(lexer) + parser = XaccParser() generator = XaccGenerator() # Sequence source through the generator parts: lexer.feed(src) - grammar = parser.parse_grammar() + grammar = parser.parse_grammar(lexer) generator.generate(grammar, parser.headers, args.output) diff -r 0c44e494ef58 -r 173e20a47fda test/m3_bare/m3bare.mmap --- a/test/m3_bare/m3bare.mmap Sun Apr 27 12:24:21 2014 +0200 +++ b/test/m3_bare/m3bare.mmap Sun Apr 27 17:40:39 2014 +0200 @@ -1,5 +1,10 @@ -{ -"code": "0x0", -"data": "0x20000000" +MEMORY flash LOCATION=0x0 SIZE=0x10000 { + SECTION(code) } + +MEMORY ram LOCATION=0x20000000 SIZE=0x10000 { + SECTION(data) +} + + diff -r 0c44e494ef58 -r 173e20a47fda test/testarmasm.py --- a/test/testarmasm.py Sun Apr 27 12:24:21 2014 +0200 +++ b/test/testarmasm.py Sun Apr 27 17:40:39 2014 +0200 @@ -1,8 +1,10 @@ import unittest +import io from ppci.outstream import BinaryOutputStream from ppci.objectfile import ObjectFile from testasm import AsmTestCaseBase from ppci.target.target_list import arm_target +from ppci.layout import load_layout class ArmAssemblerTestCase(AsmTestCaseBase): @@ -11,7 +13,7 @@ self.t = arm_target self.obj = ObjectFile() self.ostream = BinaryOutputStream(self.obj) - self.ostream.select_section('.text') + self.ostream.select_section('code') self.assembler = arm_target.assembler def testMovImm(self): @@ -122,7 +124,13 @@ """ Link code at 0x10000 and check if symbol was correctly patched """ self.feed('ldr r8, =a') self.feed('a:') - self.check('04801fe5 04000100', {'.text':0x10000}) + spec = """ + MEMORY flash LOCATION=0x10000 SIZE=0x10000 { + SECTION(code) + } + """ + layout = load_layout(io.StringIO(spec)) + self.check('04801fe5 04000100', layout) def testCmp(self): self.feed('cmp r4, r11') diff -r 0c44e494ef58 -r 173e20a47fda test/testasm.py --- a/test/testasm.py Sun Apr 27 12:24:21 2014 +0200 +++ b/test/testasm.py Sun Apr 27 17:40:39 2014 +0200 @@ -7,6 +7,7 @@ from ppci.outstream import BinaryOutputStream from ppci.target.basetarget import Label from ppci.buildfunctions import link +from ppci.layout import Layout class AssemblerLexingCase(unittest.TestCase): @@ -60,10 +61,10 @@ def feed(self, line): self.assembler.assemble(line, self.ostream) - def check(self, hexstr, layout={}): + def check(self, hexstr, layout=Layout()): self.assembler.flush() self.obj = link([self.obj], layout) - data = bytes(self.obj.get_section('.text').data) + data = bytes(self.obj.get_section('code').data) self.assertSequenceEqual(bytes.fromhex(hexstr), data) diff -r 0c44e494ef58 -r 173e20a47fda test/testbintools.py --- a/test/testbintools.py Sun Apr 27 12:24:21 2014 +0200 +++ b/test/testbintools.py Sun Apr 27 17:40:39 2014 +0200 @@ -8,6 +8,7 @@ from ppci.tasks import TaskRunner, TaskError from ppci.buildtasks import EmptyTask from ppci.buildfunctions import link +from ppci import layout class TaskTestCase(unittest.TestCase): @@ -59,7 +60,7 @@ o1.add_relocation('undefined_sym', 0, 'rel8', '.text') o2 = ObjectFile() with self.assertRaises(CompilerError): - o3 = link([o1, o2], {}) + o3 = link([o1, o2], layout.Layout()) def testDuplicateSymbol(self): o1 = ObjectFile() @@ -69,7 +70,7 @@ o2.get_section('.text') o2.add_symbol('a', 0, '.text') with self.assertRaises(CompilerError): - o3 = link([o1, o2], {}) + o3 = link([o1, o2], layout.Layout()) def testRel8Relocation(self): o1 = ObjectFile() @@ -78,7 +79,7 @@ o2 = ObjectFile() o2.get_section('.text').add_data(bytes([0]*100)) o2.add_symbol('a', 24, '.text') - o3 = link([o1, o2], {}) + o3 = link([o1, o2], layout.Layout()) def testSymbolValues(self): o1 = ObjectFile() @@ -87,27 +88,35 @@ o2 = ObjectFile() o2.get_section('.text').add_data(bytes([0]*100)) o2.add_symbol('a', 2, '.text') - o3 = link([o1, o2], {}) + o3 = link([o1, o2], layout.Layout()) self.assertEqual(110, o3.find_symbol('a').value) self.assertEqual(24, o3.find_symbol('b').value) self.assertEqual(208, o3.get_section('.text').Size) def testMemoryLayout(self): - memory_layout = {'.text': 0x08000000, '.data':0x20000000} + spec = """ + MEMORY flash LOCATION=0x08000000 SIZE=0x3000 { + SECTION(code) + } + MEMORY flash LOCATION=0x20000000 SIZE=0x3000 { + SECTION(data) + } + """ + memory_layout = layout.load_layout(io.StringIO(spec)) o1 = ObjectFile() - o1.get_section('.text').add_data(bytes([0]*108)) - o1.add_symbol('b', 24, '.text') + o1.get_section('code').add_data(bytes([0]*108)) + o1.add_symbol('b', 24, 'code') o2 = ObjectFile() - o2.get_section('.text').add_data(bytes([0]*100)) - o2.get_section('.data').add_data(bytes([0]*100)) - o2.add_symbol('a', 2, '.data') - o2.add_symbol('c', 2, '.text') + o2.get_section('code').add_data(bytes([0]*100)) + o2.get_section('data').add_data(bytes([0]*100)) + o2.add_symbol('a', 2, 'data') + o2.add_symbol('c', 2, 'code') o3 = link([o1, o2], memory_layout) self.assertEqual(0x20000000+2, o3.find_symbol('a').value) self.assertEqual(0x08000000+24, o3.find_symbol('b').value) self.assertEqual(0x08000000+110, o3.find_symbol('c').value) - self.assertEqual(208, o3.get_section('.text').Size) - self.assertEqual(100, o3.get_section('.data').Size) + self.assertEqual(208, o3.get_section('code').Size) + self.assertEqual(100, o3.get_section('data').Size) class ObjectFileTestCase(unittest.TestCase): @@ -142,6 +151,25 @@ self.assertEqual(o3, o1) +class LayoutFileTestCase(unittest.TestCase): + def testLayout1(self): + spec = """ + MEMORY flash LOCATION=0x1000 SIZE=0x3000 { + SECTION(code) + ALIGN(4) + } + """ + layout1 = layout.load_layout(io.StringIO(spec)) + layout2 = layout.Layout() + m = layout.Memory('flash') + m.location = 0x1000 + m.size = 0x3000 + m.add_input(layout.Section('code')) + m.add_input(layout.Align(4)) + layout2.add_memory(m) + self.assertEqual(layout2, layout1) + + if __name__ == '__main__': unittest.main() sys.exit() diff -r 0c44e494ef58 -r 173e20a47fda test/testmsp430asm.py --- a/test/testmsp430asm.py Sun Apr 27 12:24:21 2014 +0200 +++ b/test/testmsp430asm.py Sun Apr 27 17:40:39 2014 +0200 @@ -12,7 +12,7 @@ self.t = msp430target self.obj = ObjectFile() self.ostream = BinaryOutputStream(self.obj) - self.ostream.select_section('.text') + self.ostream.select_section('code') self.assembler = msp430target.assembler def testMov(self): diff -r 0c44e494ef58 -r 173e20a47fda test/testsamples.py --- a/test/testsamples.py Sun Apr 27 12:24:21 2014 +0200 +++ b/test/testsamples.py Sun Apr 27 17:40:39 2014 +0200 @@ -24,6 +24,17 @@ """ +arch_mmap = """ +MEMORY image LOCATION=0x10000 SIZE=0x10000 { + SECTION(reset) + SECTION(code) +} + +MEMORY ram LOCATION=0x20000 SIZE=0x10000 { + SECTION(data) +} +""" + class Samples: def testPrint(self): snippet = """ @@ -136,8 +147,7 @@ relpath('..', 'kernel', 'src', 'io.c3'), io.StringIO(modarchcode), io.StringIO(src)], [], 'arm') - layout = {'code': 0x10000, 'data': 0x20000} - o3 = link([o1, o2], layout) + o3 = link([o1, o2], io.StringIO(arch_mmap)) sample_filename = 'testsample.bin' with open(sample_filename, 'wb') as f: @@ -152,6 +162,9 @@ self.assertEqual(expected_output, res) +# TODO: test samples on thumb target.. + + if __name__ == '__main__': unittest.main() diff -r 0c44e494ef58 -r 173e20a47fda test/testthumbasm.py --- a/test/testthumbasm.py Sun Apr 27 12:24:21 2014 +0200 +++ b/test/testthumbasm.py Sun Apr 27 17:40:39 2014 +0200 @@ -10,7 +10,7 @@ self.t = thumb_target self.obj = ObjectFile() self.ostream = BinaryOutputStream(self.obj) - self.ostream.select_section('.text') + self.ostream.select_section('code') self.assembler = thumb_target.assembler def testMovImm8(self): diff -r 0c44e494ef58 -r 173e20a47fda user/app.mmap --- a/user/app.mmap Sun Apr 27 12:24:21 2014 +0200 +++ b/user/app.mmap Sun Apr 27 17:40:39 2014 +0200 @@ -1,6 +1,9 @@ -{ - "code": "0x0", - "data":"0x20000000" +MEMORY flash LOCATION=0x0 SIZE=0x10000 { + SECTION(code) } +MEMORY ram LOCATION=0x20000000 SIZE=0x10000 { + SECTION(data) +} +