ldgen: refactor generation internals

This commit is contained in:
Renz Bagaporo
2021-01-27 16:00:19 +08:00
parent 922194a295
commit a41a56b5b0
4 changed files with 582 additions and 504 deletions

222
tools/ldgen/entity.py Normal file
View File

@@ -0,0 +1,222 @@
#
# Copyright 2018-2019 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import fnmatch
import os
from enum import Enum
from functools import total_ordering
from pyparsing import (Group, Literal, OneOrMore, ParseException, SkipTo, Suppress, White, Word, ZeroOrMore, alphas,
nums, restOfLine)
@total_ordering
class Entity():
"""
Definition of an entity which can be placed or excluded
from placement.
"""
ALL = '*'
class Specificity(Enum):
NONE = 0
ARCHIVE = 1
OBJ = 2
SYMBOL = 3
def __init__(self, archive=None, obj=None, symbol=None):
archive_spec = archive and archive != Entity.ALL
obj_spec = obj and obj != Entity.ALL
symbol_spec = symbol and symbol != Entity.ALL
if not archive_spec and not obj_spec and not symbol_spec:
self.specificity = Entity.Specificity.NONE
elif archive_spec and not obj_spec and not symbol_spec:
self.specificity = Entity.Specificity.ARCHIVE
elif archive_spec and obj_spec and not symbol_spec:
self.specificity = Entity.Specificity.OBJ
elif archive_spec and obj_spec and symbol_spec:
self.specificity = Entity.Specificity.SYMBOL
else:
raise ValueError("Invalid arguments '(%s, %s, %s)'" % (archive, obj, symbol))
self.archive = archive
self.obj = obj
self.symbol = symbol
def __eq__(self, other):
return (self.specificity.value == other.specificity.value and
self.archive == other.archive and
self.obj == other.obj and
self.symbol == other.symbol)
def __lt__(self, other):
res = False
if self.specificity.value < other.specificity.value:
res = True
elif self.specificity == other.specificity:
for s in Entity.Specificity:
a = self[s] if self[s] else ''
b = other[s] if other[s] else ''
if a != b:
res = a < b
break
else:
res = False
return res
def __hash__(self):
return hash(self.__repr__())
def __str__(self):
return '%s:%s %s' % self.__repr__()
def __repr__(self):
return (self.archive, self.obj, self.symbol)
def __getitem__(self, spec):
res = None
if spec == Entity.Specificity.ARCHIVE:
res = self.archive
elif spec == Entity.Specificity.OBJ:
res = self.obj
elif spec == Entity.Specificity.SYMBOL:
res = self.symbol
else:
res = None
return res
class EntityDB():
"""
Encapsulates an output of objdump. Contains information about the static library sections
and names
"""
__info = collections.namedtuple('__info', 'filename content')
def __init__(self):
self.sections = dict()
def add_sections_info(self, sections_info_dump):
first_line = sections_info_dump.readline()
archive_path = (Literal('In archive').suppress() +
White().suppress() +
# trim the colon and line ending characters from archive_path
restOfLine.setResultsName('archive_path').setParseAction(lambda s, loc, toks: s.rstrip(':\n\r ')))
parser = archive_path
results = None
try:
results = parser.parseString(first_line, parseAll=True)
except ParseException as p:
raise ParseException('Parsing sections info for library ' + sections_info_dump.name + ' failed. ' + p.msg)
archive = os.path.basename(results.archive_path)
self.sections[archive] = EntityDB.__info(sections_info_dump.name, sections_info_dump.read())
def _get_infos_from_file(self, info):
# {object}: file format elf32-xtensa-le
object_line = SkipTo(':').setResultsName('object') + Suppress(restOfLine)
# Sections:
# Idx Name ...
section_start = Suppress(Literal('Sections:'))
section_header = Suppress(OneOrMore(Word(alphas)))
# 00 {section} 0000000 ...
# CONTENTS, ALLOC, ....
section_entry = Suppress(Word(nums)) + SkipTo(' ') + Suppress(restOfLine) + \
Suppress(ZeroOrMore(Word(alphas) + Literal(',')) + Word(alphas))
content = Group(object_line + section_start + section_header + Group(OneOrMore(section_entry)).setResultsName('sections'))
parser = Group(ZeroOrMore(content)).setResultsName('contents')
results = None
try:
results = parser.parseString(info.content, parseAll=True)
except ParseException as p:
raise ParseException('Unable to parse section info file ' + info.filename + '. ' + p.msg)
return results
def _process_archive(self, archive):
stored = self.sections[archive]
# Parse the contents of the sections file on-demand,
# save the result for later
if not isinstance(stored, dict):
parsed = self._get_infos_from_file(stored)
stored = dict()
for content in parsed.contents:
sections = list(map(lambda s: s, content.sections))
stored[content.object] = sections
self.sections[archive] = stored
def get_archives(self):
return self.sections.keys()
def get_objects(self, archive):
try:
self._process_archive(archive)
except KeyError:
return []
return self.sections[archive].keys()
def _match_obj(self, archive, obj):
objs = self.get_objects(archive)
match_objs = fnmatch.filter(objs, obj + '.o') + fnmatch.filter(objs, obj + '.*.obj') + fnmatch.filter(objs, obj + '.obj')
if len(match_objs) > 1:
raise ValueError("Multiple matches for object: '%s: %s': %s" % (archive, obj, str(match_objs)))
try:
return match_objs[0]
except IndexError:
return None
def get_sections(self, archive, obj):
obj = self._match_obj(archive, obj)
res = []
if obj:
res = self.sections[archive][obj]
return res
def _match_symbol(self, archive, obj, symbol):
sections = self.get_sections(archive, obj)
return [s for s in sections if s.endswith(symbol)]
def check_exists(self, entity):
res = True
if entity.specificity != Entity.Specificity.NONE:
if entity.specificity == Entity.Specificity.ARCHIVE:
res = entity.archive in self.get_archives()
elif entity.specificity == Entity.Specificity.OBJ:
res = self._match_obj(entity.archive, entity.obj) is not None
elif entity.specificity == Entity.Specificity.SYMBOL:
res = len(self._match_symbol(entity.archive, entity.obj, entity.symbol)) > 0
else:
res = False
return res

View File

@@ -17,242 +17,256 @@
import collections import collections
import fnmatch import fnmatch
import itertools import itertools
import os from collections import namedtuple
from fragments import Fragment, Mapping, Scheme, Sections from entity import Entity
from fragments import Mapping, Scheme, Sections
from ldgen_common import LdGenFailure from ldgen_common import LdGenFailure
from pyparsing import (Group, Literal, OneOrMore, ParseException, SkipTo, Suppress, White, Word, ZeroOrMore, alphas, from output_commands import InputSectionDesc
nums, restOfLine)
class PlacementRule(): class RuleNode():
"""
Encapsulates a generated placement rule placed under a target
"""
DEFAULT_SPECIFICITY = 0 class Section():
ARCHIVE_SPECIFICITY = 1
OBJECT_SPECIFICITY = 2
SYMBOL_SPECIFICITY = 3
class __container(): def __init__(self, target, exclusions, explicit=False):
def __init__(self, content):
self.content = content
__metadata = collections.namedtuple('__metadata', 'excludes expansions expanded')
def __init__(self, archive, obj, symbol, sections, target):
if archive == '*':
archive = None
if obj == '*':
obj = None
self.archive = archive
self.obj = obj
self.symbol = symbol
self.target = target self.target = target
self.exclusions = set(exclusions)
# Indicate whether this node has been created explicitly from a mapping,
# or simply just to create a path to the explicitly created node.
#
# For example,
#
# lib.a
# obj:sym (scheme)
#
# Nodes for lib.a and obj will be created, but only the node for
# sym will have been created explicitly.
#
# This is used in deciding whether or not an output command should
# be emitted for this node, or for exclusion rule generation.
self.explicit = explicit
def __init__(self, parent, name, sections):
self.children = []
self.parent = parent
self.name = name
self.child_node = None
self.entity = None
self.sections = dict() self.sections = dict()
self.specificity = 0 # A node inherits the section -> target entries from
self.specificity += 1 if self.archive else 0 # its parent. This is to simplify logic, avoiding
self.specificity += 1 if (self.obj and not self.obj == '*') else 0 # going up the parental chain to try a 'basis' rule
self.specificity += 1 if self.symbol else 0 # in creating exclusions. This relies on the fact that
# the mappings must be inserted from least to most specific.
# This sort is done in generate_rules().
if sections:
for (s, v) in sections.items():
self.sections[s] = RuleNode.Section(v.target, [], [])
for section in sections: def add_exclusion(self, sections, exclusion):
section_data = Sections.get_section_data_from_entry(section, self.symbol) self.sections[sections].exclusions.add(exclusion)
if not self.symbol: # Recursively create exclusions in parents
for s in section_data: if self.parent:
metadata = self.__metadata(self.__container([]), self.__container([]), self.__container(False)) self.exclude_from_parent(sections)
self.sections[s] = metadata
def add_sections(self, sections, target):
try:
_sections = self.sections[sections]
if not _sections.explicit:
_sections.target = target
_sections.explicit = True
else: else:
(section, expansion) = section_data if target != _sections.target:
if expansion: raise GenerationException('Sections mapped to multiple targets')
metadata = self.__metadata(self.__container([]), self.__container([expansion]), self.__container(True)) except KeyError:
self.sections[section] = metadata self.sections[sections] = RuleNode.Section(target, [], True)
def get_section_names(self): def exclude_from_parent(self, sections):
return self.sections.keys() self.parent.add_exclusion(sections, self.entity)
def add_exclusion(self, other, sections_infos=None): def add_child(self, entity):
# Utility functions for this method child_specificity = self.entity.specificity.value + 1
def do_section_expansion(rule, section): assert(child_specificity <= Entity.Specificity.SYMBOL.value)
if section in rule.get_section_names(): name = entity[Entity.Specificity(child_specificity)]
sections_in_obj = sections_infos.get_obj_sections(rule.archive, rule.obj) assert(name and name != Entity.ALL)
expansions = fnmatch.filter(sections_in_obj, section)
return expansions
def remove_section_expansions(rule, section, expansions): child = [c for c in self.children if c.name == name]
existing_expansions = self.sections[section].expansions.content assert(len(child) <= 1)
self.sections[section].expansions.content = [e for e in existing_expansions if e not in expansions]
# Exit immediately if the exclusion to be added is more general than this rule. if not child:
if not other.is_more_specific_rule_of(self): child = self.child_node(self, name, self.sections)
return self.children.append(child)
for section in self.get_sections_intersection(other):
if(other.specificity == PlacementRule.SYMBOL_SPECIFICITY):
# If this sections has not been expanded previously, expand now and keep track.
previously_expanded = self.sections[section].expanded.content
if not previously_expanded:
expansions = do_section_expansion(self, section)
if expansions:
self.sections[section].expansions.content = expansions
self.sections[section].expanded.content = True
previously_expanded = True
# Remove the sections corresponding to the symbol name
remove_section_expansions(self, section, other.sections[section].expansions.content)
# If it has been expanded previously but now the expansions list is empty,
# it means adding exclusions has exhausted the list. Remove the section entirely.
if previously_expanded and not self.sections[section].expanded.content:
del self.sections[section]
else: else:
# A rule section can have multiple rule sections excluded from it. Get the child = child[0]
# most specific rule from the list, and if an even more specific rule is found,
# replace it entirely. Otherwise, keep appending.
exclusions = self.sections[section].excludes
exclusions_list = exclusions.content if exclusions.content is not None else []
exclusions_to_remove = filter(lambda r: r.is_more_specific_rule_of(other), exclusions_list)
remaining_exclusions = [e for e in exclusions_list if e not in exclusions_to_remove] return child
remaining_exclusions.append(other)
self.sections[section].excludes.content = remaining_exclusions def get_output_commands(self):
commands = collections.defaultdict(list)
def get_sections_intersection(self, other): def process_commands(cmds):
return set(self.sections.keys()).intersection(set(other.sections.keys())) for (target, commands_list) in cmds.items():
commands[target].extend(commands_list)
def is_more_specific_rule_of(self, other): # Process the commands generated from this node
if (self.specificity <= other.specificity): node_commands = self.get_node_output_commands()
return False process_commands(node_commands)
# Compare archive, obj and target # Process the commands generated from this node's children
for entity_index in range(1, other.specificity + 1): # recursively
if self[entity_index] != other[entity_index] and other[entity_index] is not None: for child in sorted(self.children, key=lambda c: c.name):
return False children_commands = child.get_output_commands()
process_commands(children_commands)
return True return commands
def maps_same_entities_as(self, other): def add_node_child(self, entity, sections, target, sections_db):
if self.specificity != other.specificity: child = self.add_child(entity)
return False child.insert(entity, sections, target, sections_db)
# Compare archive, obj and target def get_node_output_commands(self):
for entity_index in range(1, other.specificity + 1): commands = collections.defaultdict(list)
if self[entity_index] != other[entity_index] and other[entity_index] is not None:
return False
return True for sections in self.get_section_keys():
info = self.sections[sections]
if info.exclusions or info.explicit:
command = InputSectionDesc(self.entity, sections, info.exclusions)
commands[info.target].append(command)
def __getitem__(self, key): return commands
if key == PlacementRule.ARCHIVE_SPECIFICITY:
return self.archive def insert(self, entity, sections, target, sections_db):
elif key == PlacementRule.OBJECT_SPECIFICITY: if self.entity.specificity == entity.specificity:
return self.obj if self.parent.sections[sections].target != target:
elif key == PlacementRule.SYMBOL_SPECIFICITY: self.add_sections(sections, target)
return self.symbol self.exclude_from_parent(sections)
else: else:
return None self.add_node_child(entity, sections, target, sections_db)
def __str__(self): def get_section_keys(self):
sorted_sections = sorted(self.get_section_names()) return sorted(self.sections.keys(), key=' '.join)
sections_string = list()
for section in sorted_sections: class SymbolNode(RuleNode):
exclusions = self.sections[section].excludes.content
exclusion_string = None def __init__(self, parent, name, sections):
RuleNode.__init__(self, parent, name, sections)
self.entity = Entity(self.parent.parent.name, self.parent.name, self.name)
if exclusions: def insert(self, entity, sections, target, sections_db):
exclusion_string = ' '.join(map(lambda e: '*' + e.archive + (':' + e.obj + '.*' if e.obj else ''), exclusions)) self.add_sections(sections, target)
exclusion_string = 'EXCLUDE_FILE(' + exclusion_string + ')'
def get_node_output_commands(self):
commands = collections.defaultdict(list)
for sections in self.get_section_keys():
info = self.sections[sections]
if info.explicit:
command = InputSectionDesc(Entity(self.parent.parent.name, self.parent.name), sections, [])
commands[info.target].append(command)
return commands
class ObjectNode(RuleNode):
def __init__(self, parent, name, sections):
RuleNode.__init__(self, parent, name, sections)
self.child_node = SymbolNode
self.expanded_sections = dict()
self.entity = Entity(self.parent.name, self.name)
def add_node_child(self, entity, sections, target, sections_db):
if self.sections[sections].target != target:
symbol = entity.symbol
match_sections = None
obj_sections = sections_db.get_sections(self.parent.name, self.name)
try:
match_sections = self.expanded_sections[sections]
except KeyError:
match_sections = []
for s in sections:
match_sections.extend(fnmatch.filter(obj_sections, s))
if match_sections:
remove_sections = [s.replace('.*', '.%s' % symbol) for s in sections if '.*' in s]
filtered_sections = [s for s in match_sections if s not in remove_sections]
if set(filtered_sections) != set(match_sections): # some sections removed
child = self.add_child(entity)
child.insert(entity, frozenset(remove_sections), target, obj_sections)
# Remember the result for node command generation
self.expanded_sections[sections] = filtered_sections
self.exclude_from_parent(sections)
def get_node_output_commands(self):
commands = collections.defaultdict(list)
for sections in self.get_section_keys():
info = self.sections[sections]
try:
match_sections = self.expanded_sections[sections]
except KeyError:
match_sections = []
if match_sections or info.explicit:
command_sections = match_sections if match_sections else sections
command = InputSectionDesc(self.entity, command_sections, [])
commands[info.target].append(command)
return commands
def exclude_from_parent(self, sections):
# Check if there is an explicit emmission for the parent node, which is an archive node.
# If there is, make the exclusion there. If not, make the exclusion on the root node.
# This is to avoid emitting unecessary command and exclusions for the archive node and
# from the root node, respectively.
if self.parent.sections[sections].explicit:
self.parent.add_exclusion(sections, self.entity)
else: else:
exclusion_string = '' self.parent.parent.add_exclusion(sections, self.entity)
section_string = None
exclusion_section_string = None
section_expansions = self.sections[section].expansions.content class ArchiveNode(RuleNode):
section_expanded = self.sections[section].expanded.content
if section_expansions and section_expanded: def __init__(self, parent, name, sections):
section_string = ' '.join(section_expansions) RuleNode.__init__(self, parent, name, sections)
exclusion_section_string = section_string self.child_node = ObjectNode
self.entity = Entity(self.name)
class RootNode(RuleNode):
def __init__(self):
RuleNode.__init__(self, None, Entity.ALL, None)
self.child_node = ArchiveNode
self.entity = Entity('*')
def insert(self, entity, sections, target, sections_db):
if self.entity.specificity == entity.specificity:
self.add_sections(sections, target)
else: else:
section_string = section self.add_node_child(entity, sections, target, sections_db)
exclusion_section_string = exclusion_string + ' ' + section_string
sections_string.append(exclusion_section_string)
sections_string = ' '.join(sections_string)
archive = str(self.archive) if self.archive else ''
obj = (str(self.obj) + ('.*' if self.obj else '')) if self.obj else ''
# Handle output string generation based on information available
if self.specificity == PlacementRule.DEFAULT_SPECIFICITY:
rule_string = '*(%s)' % (sections_string)
elif self.specificity == PlacementRule.ARCHIVE_SPECIFICITY:
rule_string = '*%s:(%s)' % (archive, sections_string)
else:
rule_string = '*%s:%s(%s)' % (archive, obj, sections_string)
return rule_string
def __eq__(self, other):
if id(self) == id(other):
return True
def exclusions_set(exclusions):
exclusions_set = {(e.archive, e.obj, e.symbol, e.target) for e in exclusions}
return exclusions_set
if self.archive != other.archive:
return False
if self.obj != other.obj:
return False
if self.symbol != other.symbol:
return False
if set(self.sections.keys()) != set(other.sections.keys()):
return False
for (section, metadata) in self.sections.items():
self_meta = metadata
other_meta = other.sections[section]
if exclusions_set(self_meta.excludes.content) != exclusions_set(other_meta.excludes.content):
return False
if set(self_meta.expansions.content) != set(other_meta.expansions.content):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __iter__(self):
yield self.archive
yield self.obj
yield self.symbol
raise StopIteration
class GenerationModel: class Generation:
""" """
Implements generation of placement rules based on collected sections, scheme and mapping fragment. Implements generation of placement rules based on collected sections, scheme and mapping fragment.
""" """
DEFAULT_SCHEME = 'default' DEFAULT_SCHEME = 'default'
# Processed mapping, scheme and section entries
EntityMapping = namedtuple('EntityMapping', 'entity sections_group target')
def __init__(self, check_mappings=False, check_mapping_exceptions=None): def __init__(self, check_mappings=False, check_mapping_exceptions=None):
self.schemes = {} self.schemes = {}
self.sections = {} self.sections = {}
@@ -265,23 +279,6 @@ class GenerationModel:
else: else:
self.check_mapping_exceptions = [] self.check_mapping_exceptions = []
def _add_mapping_rules(self, archive, obj, symbol, scheme_name, scheme_dict, rules):
# Use an ordinary dictionary to raise exception on non-existing keys
temp_dict = dict(scheme_dict)
sections_bucket = temp_dict[scheme_name]
for (target, sections) in sections_bucket.items():
section_entries = []
for section in sections:
section_entries.extend(section.entries)
rule = PlacementRule(archive, obj, symbol, section_entries, target)
if rule not in rules:
rules.append(rule)
def _build_scheme_dictionary(self): def _build_scheme_dictionary(self):
scheme_dictionary = collections.defaultdict(dict) scheme_dictionary = collections.defaultdict(dict)
@@ -297,7 +294,7 @@ class GenerationModel:
try: try:
sections = self.sections[sections_name] sections = self.sections[sections_name]
except KeyError: except KeyError:
message = GenerationException.UNDEFINED_REFERENCE + " to sections '" + sections + "'." message = GenerationException.UNDEFINED_REFERENCE + " to sections '" + sections_name + "'."
raise GenerationException(message, scheme) raise GenerationException(message, scheme)
sections_in_bucket.append(sections) sections_in_bucket.append(sections)
@@ -327,147 +324,69 @@ class GenerationModel:
return scheme_dictionary return scheme_dictionary
def generate_rules(self, sections_infos): def get_section_strs(self, section):
scheme_dictionary = self._build_scheme_dictionary() s_list = [Sections.get_section_data_from_entry(s) for s in section.entries]
return frozenset([item for sublist in s_list for item in sublist])
# Generate default rules def _generate_entity_mappings(self, scheme_dictionary, entities):
default_rules = list() entity_mappings = []
self._add_mapping_rules(None, None, None, GenerationModel.DEFAULT_SCHEME, scheme_dictionary, default_rules)
all_mapping_rules = collections.defaultdict(list)
# Generate rules based on mapping fragments
for mapping in self.mappings.values(): for mapping in self.mappings.values():
archive = mapping.archive archive = mapping.archive
mapping_rules = all_mapping_rules[archive]
for (obj, symbol, scheme_name) in mapping.entries: for (obj, symbol, scheme_name) in mapping.entries:
entity = Entity(archive, obj, symbol)
# Check the entity exists
if (self.check_mappings and
entity.specificity.value > Entity.Specificity.ARCHIVE.value and
mapping.name not in self.check_mapping_exceptions):
if not entities.check_exists(entity):
message = "'%s' not found" % str(entity)
raise GenerationException(message, mapping)
# Create placement rule for each 'section -> target' in the scheme.
#
# For example. for the mapping entry:
#
# obj (scheme)
#
# The enumrated to:
#
# obj (section1 -> target1)
# obj (section2 -> target2)
# ...
for (target, sections) in scheme_dictionary[scheme_name].items():
for section in sections:
entity_mappings.append(Generation.EntityMapping(entity, self.get_section_strs(section), target))
return entity_mappings
def generate_rules(self, entities):
scheme_dictionary = self._build_scheme_dictionary()
entity_mappings = self._generate_entity_mappings(scheme_dictionary, entities)
entity_mappings.sort(key=lambda m: m.entity)
# Create root nodes dictionary for the default scheme, whose
# key is the target name and value is a list of the root nodes for that target.
root_node = RootNode()
for (target, sections) in scheme_dictionary['default'].items():
for section in sections:
root_node.insert(Entity(), self.get_section_strs(section), target, entities)
for mapping in entity_mappings:
(entity, sections, target) = mapping
try: try:
if not (obj == Mapping.MAPPING_ALL_OBJECTS and symbol is None and root_node.insert(entity, sections, target, entities)
scheme_name == GenerationModel.DEFAULT_SCHEME): except ValueError as e:
if self.check_mappings and mapping.name not in self.check_mapping_exceptions: raise GenerationException(str(e))
if not obj == Mapping.MAPPING_ALL_OBJECTS:
obj_sections = sections_infos.get_obj_sections(archive, obj)
if not obj_sections:
message = "'%s:%s' not found" % (archive, obj)
raise GenerationException(message, mapping)
if symbol: # Traverse the tree, creating the rules
obj_sym = fnmatch.filter(obj_sections, '*%s' % symbol) commands = root_node.get_output_commands()
if not obj_sym:
message = "'%s:%s %s' not found" % (archive, obj, symbol)
raise GenerationException(message, mapping)
self._add_mapping_rules(archive, obj, symbol, scheme_name, scheme_dictionary, mapping_rules) return commands
except KeyError:
message = GenerationException.UNDEFINED_REFERENCE + " to scheme '" + scheme_name + "'."
raise GenerationException(message, mapping)
# Detect rule conflicts
for mapping_rules in all_mapping_rules.items():
self._detect_conflicts(mapping_rules)
# Add exclusions
for mapping_rules in all_mapping_rules.values():
self._create_exclusions(mapping_rules, default_rules, sections_infos)
placement_rules = collections.defaultdict(list)
# Add the default rules grouped by target
for default_rule in default_rules:
existing_rules = placement_rules[default_rule.target]
if default_rule.get_section_names():
existing_rules.append(default_rule)
archives = sorted(all_mapping_rules.keys())
for archive in archives:
# Add the mapping rules grouped by target
mapping_rules = sorted(all_mapping_rules[archive], key=lambda m: (m.specificity, str(m)))
for mapping_rule in mapping_rules:
existing_rules = placement_rules[mapping_rule.target]
if mapping_rule.get_section_names():
existing_rules.append(mapping_rule)
return placement_rules
def _detect_conflicts(self, rules):
(archive, rules_list) = rules
for specificity in range(0, PlacementRule.OBJECT_SPECIFICITY + 1):
rules_with_specificity = filter(lambda r: r.specificity == specificity, rules_list)
for rule_a, rule_b in itertools.combinations(rules_with_specificity, 2):
intersections = rule_a.get_sections_intersection(rule_b)
if intersections and rule_a.maps_same_entities_as(rule_b):
rules_string = str([str(rule_a), str(rule_b)])
message = 'Rules ' + rules_string + ' map sections ' + str(list(intersections)) + ' into multiple targets.'
raise GenerationException(message)
def _create_extra_rules(self, rules):
# This function generates extra rules for symbol specific rules. The reason for generating extra rules is to isolate,
# as much as possible, rules that require expansion. Particularly, object specific extra rules are generated.
rules_to_process = sorted(rules, key=lambda r: r.specificity)
symbol_specific_rules = list(filter(lambda r: r.specificity == PlacementRule.SYMBOL_SPECIFICITY, rules_to_process))
extra_rules = dict()
for symbol_specific_rule in symbol_specific_rules:
extra_rule_candidate = {s: None for s in symbol_specific_rule.get_section_names()}
super_rules = filter(lambda r: symbol_specific_rule.is_more_specific_rule_of(r), rules_to_process)
# Take a look at the existing rules that are more general than the current symbol-specific rule.
# Only generate an extra rule if there is no existing object specific rule for that section
for super_rule in super_rules:
intersections = symbol_specific_rule.get_sections_intersection(super_rule)
for intersection in intersections:
if super_rule.specificity != PlacementRule.OBJECT_SPECIFICITY:
extra_rule_candidate[intersection] = super_rule
else:
extra_rule_candidate[intersection] = None
# Generate the extra rules for the symbol specific rule section, keeping track of the generated extra rules
for (section, section_rule) in extra_rule_candidate.items():
if section_rule:
extra_rule = None
extra_rules_key = (symbol_specific_rule.archive, symbol_specific_rule.obj, section_rule.target)
try:
extra_rule = extra_rules[extra_rules_key]
if section not in extra_rule.get_section_names():
new_rule = PlacementRule(extra_rule.archive, extra_rule.obj, extra_rule.symbol,
list(extra_rule.get_section_names()) + [section], extra_rule.target)
extra_rules[extra_rules_key] = new_rule
except KeyError:
extra_rule = PlacementRule(symbol_specific_rule.archive, symbol_specific_rule.obj, None, [section], section_rule.target)
extra_rules[extra_rules_key] = extra_rule
return extra_rules.values()
def _create_exclusions(self, mapping_rules, default_rules, sections_info):
rules = list(default_rules)
rules.extend(mapping_rules)
extra_rules = self._create_extra_rules(rules)
mapping_rules.extend(extra_rules)
rules.extend(extra_rules)
# Sort the rules by means of how specific they are. Sort by specificity from lowest to highest
# * -> lib:* -> lib:obj -> lib:obj:symbol
sorted_rules = sorted(rules, key=lambda r: r.specificity)
# Now that the rules have been sorted, loop through each rule, and then loop
# through rules below it (higher indeces), adding exclusions whenever appropriate.
for general_rule in sorted_rules:
for specific_rule in reversed(sorted_rules):
if (specific_rule.specificity > general_rule.specificity and
specific_rule.specificity != PlacementRule.SYMBOL_SPECIFICITY) or \
(specific_rule.specificity == PlacementRule.SYMBOL_SPECIFICITY and
general_rule.specificity == PlacementRule.OBJECT_SPECIFICITY):
general_rule.add_exclusion(specific_rule, sections_info)
def add_fragments_from_file(self, fragment_file): def add_fragments_from_file(self, fragment_file):
for fragment in fragment_file.fragments: for fragment in fragment_file.fragments:
@@ -493,79 +412,6 @@ class GenerationModel:
dict_to_append_to[fragment.name] = fragment dict_to_append_to[fragment.name] = fragment
class TemplateModel:
"""
Encapsulates a linker script template file. Finds marker syntax and handles replacement to generate the
final output.
"""
Marker = collections.namedtuple('Marker', 'target indent rules')
def __init__(self, template_file):
self.members = []
self.file = os.path.realpath(template_file.name)
self._generate_members(template_file)
def _generate_members(self, template_file):
lines = template_file.readlines()
target = Fragment.IDENTIFIER
reference = Suppress('mapping') + Suppress('[') + target.setResultsName('target') + Suppress(']')
pattern = White(' \t').setResultsName('indent') + reference
# Find the markers in the template file line by line. If line does not match marker grammar,
# set it as a literal to be copied as is to the output file.
for line in lines:
try:
parsed = pattern.parseString(line)
indent = parsed.indent
target = parsed.target
marker = TemplateModel.Marker(target, indent, [])
self.members.append(marker)
except ParseException:
# Does not match marker syntax
self.members.append(line)
def fill(self, mapping_rules):
for member in self.members:
target = None
try:
target = member.target
rules = member.rules
del rules[:]
rules.extend(mapping_rules[target])
except KeyError:
message = GenerationException.UNDEFINED_REFERENCE + " to target '" + target + "'."
raise GenerationException(message)
except AttributeError:
pass
def write(self, output_file):
# Add information that this is a generated file.
output_file.write('/* Automatically generated file; DO NOT EDIT */\n')
output_file.write('/* Espressif IoT Development Framework Linker Script */\n')
output_file.write('/* Generated from: %s */\n' % self.file)
output_file.write('\n')
# Do the text replacement
for member in self.members:
try:
indent = member.indent
rules = member.rules
for rule in rules:
generated_line = ''.join([indent, str(rule), '\n'])
output_file.write(generated_line)
except AttributeError:
output_file.write(member)
class GenerationException(LdGenFailure): class GenerationException(LdGenFailure):
""" """
Exception for linker script generation failures such as undefined references/ failure to Exception for linker script generation failures such as undefined references/ failure to
@@ -583,90 +429,3 @@ class GenerationException(LdGenFailure):
return "%s\nIn fragment '%s' defined in '%s'." % (self.message, self.fragment.name, self.fragment.path) return "%s\nIn fragment '%s' defined in '%s'." % (self.message, self.fragment.name, self.fragment.path)
else: else:
return self.message return self.message
class SectionsInfo(dict):
"""
Encapsulates an output of objdump. Contains information about the static library sections
and names
"""
__info = collections.namedtuple('__info', 'filename content')
def __init__(self):
self.sections = dict()
def add_sections_info(self, sections_info_dump):
first_line = sections_info_dump.readline()
archive_path = (Literal('In archive').suppress() +
White().suppress() +
# trim the colon and line ending characters from archive_path
restOfLine.setResultsName('archive_path').setParseAction(lambda s, loc, toks: s.rstrip(':\n\r ')))
parser = archive_path
results = None
try:
results = parser.parseString(first_line, parseAll=True)
except ParseException as p:
raise ParseException('Parsing sections info for library ' + sections_info_dump.name + ' failed. ' + p.msg)
archive = os.path.basename(results.archive_path)
self.sections[archive] = SectionsInfo.__info(sections_info_dump.name, sections_info_dump.read())
def _get_infos_from_file(self, info):
# {object}: file format elf32-xtensa-le
object_line = SkipTo(':').setResultsName('object') + Suppress(restOfLine)
# Sections:
# Idx Name ...
section_start = Suppress(Literal('Sections:'))
section_header = Suppress(OneOrMore(Word(alphas)))
# 00 {section} 0000000 ...
# CONTENTS, ALLOC, ....
section_entry = Suppress(Word(nums)) + SkipTo(' ') + Suppress(restOfLine) + \
Suppress(ZeroOrMore(Word(alphas) + Literal(',')) + Word(alphas))
content = Group(object_line + section_start + section_header + Group(OneOrMore(section_entry)).setResultsName('sections'))
parser = Group(ZeroOrMore(content)).setResultsName('contents')
results = None
try:
results = parser.parseString(info.content, parseAll=True)
except ParseException as p:
raise ParseException('Unable to parse section info file ' + info.filename + '. ' + p.msg)
return results
def get_obj_sections(self, archive, obj):
res = []
try:
stored = self.sections[archive]
# Parse the contents of the sections file on-demand,
# save the result for later
if not isinstance(stored, dict):
parsed = self._get_infos_from_file(stored)
stored = dict()
for content in parsed.contents:
sections = list(map(lambda s: s, content.sections))
stored[content.object] = sections
self.sections[archive] = stored
try:
res = stored[obj + '.o']
except KeyError:
try:
res = stored[obj + '.c.obj']
except KeyError:
try:
res = stored[obj + '.cpp.obj']
except KeyError:
res = stored[obj + '.S.obj']
except KeyError:
pass
return res

View File

@@ -24,9 +24,11 @@ import sys
import tempfile import tempfile
from io import StringIO from io import StringIO
from entity import EntityDB
from fragments import FragmentFile from fragments import FragmentFile
from generation import GenerationModel, SectionsInfo, TemplateModel from generation import Generation
from ldgen_common import LdGenFailure from ldgen_common import LdGenFailure
from linker_script import LinkerScript
from pyparsing import ParseException, ParseFatalException from pyparsing import ParseException, ParseFatalException
from sdkconfig import SDKConfig from sdkconfig import SDKConfig
@@ -125,7 +127,7 @@ def main():
check_mapping_exceptions = None check_mapping_exceptions = None
try: try:
sections_infos = SectionsInfo() sections_infos = EntityDB()
for library in libraries_file: for library in libraries_file:
library = library.strip() library = library.strip()
if library: if library:
@@ -133,7 +135,7 @@ def main():
dump.name = library dump.name = library
sections_infos.add_sections_info(dump) sections_infos.add_sections_info(dump)
generation_model = GenerationModel(check_mapping, check_mapping_exceptions) generation_model = Generation(check_mapping, check_mapping_exceptions)
_update_environment(args) # assign args.env and args.env_file to os.environ _update_environment(args) # assign args.env and args.env_file to os.environ
@@ -151,7 +153,7 @@ def main():
mapping_rules = generation_model.generate_rules(sections_infos) mapping_rules = generation_model.generate_rules(sections_infos)
script_model = TemplateModel(input_file) script_model = LinkerScript(input_file)
script_model.fill(mapping_rules) script_model.fill(mapping_rules)
with tempfile.TemporaryFile('w+') as output: with tempfile.TemporaryFile('w+') as output:

View File

@@ -0,0 +1,95 @@
#
# Copyright 2018-2019 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import os
from fragments import Fragment
from generation import GenerationException
from pyparsing import ParseException, Suppress, White
class LinkerScript:
"""
Encapsulates a linker script template file. Finds marker syntax and handles replacement to generate the
final output.
"""
Marker = collections.namedtuple('Marker', 'target indent rules')
def __init__(self, template_file):
self.members = []
self.file = os.path.realpath(template_file.name)
self._generate_members(template_file)
def _generate_members(self, template_file):
lines = template_file.readlines()
target = Fragment.IDENTIFIER
reference = Suppress('mapping') + Suppress('[') + target.setResultsName('target') + Suppress(']')
pattern = White(' \t').setResultsName('indent') + reference
# Find the markers in the template file line by line. If line does not match marker grammar,
# set it as a literal to be copied as is to the output file.
for line in lines:
try:
parsed = pattern.parseString(line)
indent = parsed.indent
target = parsed.target
marker = LinkerScript.Marker(target, indent, [])
self.members.append(marker)
except ParseException:
# Does not match marker syntax
self.members.append(line)
def fill(self, mapping_rules):
for member in self.members:
target = None
try:
target = member.target
rules = member.rules
del rules[:]
rules.extend(mapping_rules[target])
except KeyError:
message = GenerationException.UNDEFINED_REFERENCE + " to target '" + target + "'."
raise GenerationException(message)
except AttributeError:
pass
def write(self, output_file):
# Add information that this is a generated file.
output_file.write('/* Automatically generated file; DO NOT EDIT */\n')
output_file.write('/* Espressif IoT Development Framework Linker Script */\n')
output_file.write('/* Generated from: %s */\n' % self.file)
output_file.write('\n')
# Do the text replacement
for member in self.members:
try:
indent = member.indent
rules = member.rules
for rule in rules:
generated_line = ''.join([indent, str(rule), '\n'])
output_file.write(generated_line)
except AttributeError:
output_file.write(member)