mirror of
https://github.com/espressif/esp-idf.git
synced 2025-08-03 12:44:33 +02:00
Merge branch 'refactor/ldgen_generation_changes' into 'master'
ldgen: internal data structure change See merge request espressif/esp-idf!11494
This commit is contained in:
@@ -72,8 +72,10 @@ test_ldgen_on_host:
|
||||
extends: .host_test_template
|
||||
script:
|
||||
- cd tools/ldgen/test
|
||||
- ./test_fragments.py
|
||||
- ./test_generation.py
|
||||
- ${IDF_PATH}/tools/ci/multirun_with_pyenv.sh ./test_fragments.py
|
||||
- ${IDF_PATH}/tools/ci/multirun_with_pyenv.sh ./test_generation.py
|
||||
- ${IDF_PATH}/tools/ci/multirun_with_pyenv.sh ./test_entity.py
|
||||
- ${IDF_PATH}/tools/ci/multirun_with_pyenv.sh ./test_output_commands.py
|
||||
variables:
|
||||
LC_ALL: C.UTF-8
|
||||
|
||||
|
@@ -88,8 +88,10 @@ tools/kconfig_new/test/confserver/test_confserver.py
|
||||
tools/kconfig_new/test/gen_kconfig_doc/test_kconfig_out.py
|
||||
tools/kconfig_new/test/gen_kconfig_doc/test_target_visibility.py
|
||||
tools/ldgen/ldgen.py
|
||||
tools/ldgen/test/test_entity.py
|
||||
tools/ldgen/test/test_fragments.py
|
||||
tools/ldgen/test/test_generation.py
|
||||
tools/ldgen/test/test_output_commands.py
|
||||
tools/mass_mfg/mfg_gen.py
|
||||
tools/mkdfu.py
|
||||
tools/mkuf2.py
|
||||
|
222
tools/ldgen/entity.py
Normal file
222
tools/ldgen/entity.py
Normal file
@@ -0,0 +1,222 @@
|
||||
#
|
||||
# Copyright 2021 Espressif Systems (Shanghai) CO LTD
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import collections
|
||||
import fnmatch
|
||||
import os
|
||||
from enum import Enum
|
||||
from functools import total_ordering
|
||||
|
||||
from pyparsing import (Group, Literal, OneOrMore, ParseException, SkipTo, Suppress, White, Word, ZeroOrMore, alphas,
|
||||
nums, restOfLine)
|
||||
|
||||
|
||||
@total_ordering
|
||||
class Entity():
|
||||
"""
|
||||
Definition of an entity which can be placed or excluded
|
||||
from placement.
|
||||
"""
|
||||
|
||||
ALL = '*'
|
||||
|
||||
class Specificity(Enum):
|
||||
NONE = 0
|
||||
ARCHIVE = 1
|
||||
OBJ = 2
|
||||
SYMBOL = 3
|
||||
|
||||
def __init__(self, archive=None, obj=None, symbol=None):
|
||||
archive_spec = archive and archive != Entity.ALL
|
||||
obj_spec = obj and obj != Entity.ALL
|
||||
symbol_spec = symbol and symbol != Entity.ALL
|
||||
|
||||
if not archive_spec and not obj_spec and not symbol_spec:
|
||||
self.specificity = Entity.Specificity.NONE
|
||||
elif archive_spec and not obj_spec and not symbol_spec:
|
||||
self.specificity = Entity.Specificity.ARCHIVE
|
||||
elif archive_spec and obj_spec and not symbol_spec:
|
||||
self.specificity = Entity.Specificity.OBJ
|
||||
elif archive_spec and obj_spec and symbol_spec:
|
||||
self.specificity = Entity.Specificity.SYMBOL
|
||||
else:
|
||||
raise ValueError("Invalid arguments '(%s, %s, %s)'" % (archive, obj, symbol))
|
||||
|
||||
self.archive = archive
|
||||
self.obj = obj
|
||||
self.symbol = symbol
|
||||
|
||||
def __eq__(self, other):
|
||||
return (self.specificity.value == other.specificity.value and
|
||||
self.archive == other.archive and
|
||||
self.obj == other.obj and
|
||||
self.symbol == other.symbol)
|
||||
|
||||
def __lt__(self, other):
|
||||
res = False
|
||||
if self.specificity.value < other.specificity.value:
|
||||
res = True
|
||||
elif self.specificity == other.specificity:
|
||||
for s in Entity.Specificity:
|
||||
a = self[s] if self[s] else ''
|
||||
b = other[s] if other[s] else ''
|
||||
|
||||
if a != b:
|
||||
res = a < b
|
||||
break
|
||||
else:
|
||||
res = False
|
||||
return res
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.__repr__())
|
||||
|
||||
def __str__(self):
|
||||
return '%s:%s %s' % self.__repr__()
|
||||
|
||||
def __repr__(self):
|
||||
return (self.archive, self.obj, self.symbol)
|
||||
|
||||
def __getitem__(self, spec):
|
||||
res = None
|
||||
if spec == Entity.Specificity.ARCHIVE:
|
||||
res = self.archive
|
||||
elif spec == Entity.Specificity.OBJ:
|
||||
res = self.obj
|
||||
elif spec == Entity.Specificity.SYMBOL:
|
||||
res = self.symbol
|
||||
else:
|
||||
res = None
|
||||
return res
|
||||
|
||||
|
||||
class EntityDB():
|
||||
"""
|
||||
Encapsulates an output of objdump. Contains information about the static library sections
|
||||
and names
|
||||
"""
|
||||
|
||||
__info = collections.namedtuple('__info', 'filename content')
|
||||
|
||||
def __init__(self):
|
||||
self.sections = dict()
|
||||
|
||||
def add_sections_info(self, sections_info_dump):
|
||||
first_line = sections_info_dump.readline()
|
||||
|
||||
archive_path = (Literal('In archive').suppress() +
|
||||
White().suppress() +
|
||||
# trim the colon and line ending characters from archive_path
|
||||
restOfLine.setResultsName('archive_path').setParseAction(lambda s, loc, toks: s.rstrip(':\n\r ')))
|
||||
parser = archive_path
|
||||
|
||||
results = None
|
||||
|
||||
try:
|
||||
results = parser.parseString(first_line, parseAll=True)
|
||||
except ParseException as p:
|
||||
raise ParseException('Parsing sections info for library ' + sections_info_dump.name + ' failed. ' + p.msg)
|
||||
|
||||
archive = os.path.basename(results.archive_path)
|
||||
self.sections[archive] = EntityDB.__info(sections_info_dump.name, sections_info_dump.read())
|
||||
|
||||
def _get_infos_from_file(self, info):
|
||||
# {object}: file format elf32-xtensa-le
|
||||
object_line = SkipTo(':').setResultsName('object') + Suppress(restOfLine)
|
||||
|
||||
# Sections:
|
||||
# Idx Name ...
|
||||
section_start = Suppress(Literal('Sections:'))
|
||||
section_header = Suppress(OneOrMore(Word(alphas)))
|
||||
|
||||
# 00 {section} 0000000 ...
|
||||
# CONTENTS, ALLOC, ....
|
||||
section_entry = Suppress(Word(nums)) + SkipTo(' ') + Suppress(restOfLine) + \
|
||||
Suppress(ZeroOrMore(Word(alphas) + Literal(',')) + Word(alphas))
|
||||
|
||||
content = Group(object_line + section_start + section_header + Group(OneOrMore(section_entry)).setResultsName('sections'))
|
||||
parser = Group(ZeroOrMore(content)).setResultsName('contents')
|
||||
|
||||
results = None
|
||||
|
||||
try:
|
||||
results = parser.parseString(info.content, parseAll=True)
|
||||
except ParseException as p:
|
||||
raise ParseException('Unable to parse section info file ' + info.filename + '. ' + p.msg)
|
||||
|
||||
return results
|
||||
|
||||
def _process_archive(self, archive):
|
||||
stored = self.sections[archive]
|
||||
|
||||
# Parse the contents of the sections file on-demand,
|
||||
# save the result for later
|
||||
if not isinstance(stored, dict):
|
||||
parsed = self._get_infos_from_file(stored)
|
||||
stored = dict()
|
||||
for content in parsed.contents:
|
||||
sections = list(map(lambda s: s, content.sections))
|
||||
stored[content.object] = sections
|
||||
self.sections[archive] = stored
|
||||
|
||||
def get_archives(self):
|
||||
return self.sections.keys()
|
||||
|
||||
def get_objects(self, archive):
|
||||
try:
|
||||
self._process_archive(archive)
|
||||
except KeyError:
|
||||
return []
|
||||
|
||||
return self.sections[archive].keys()
|
||||
|
||||
def _match_obj(self, archive, obj):
|
||||
objs = self.get_objects(archive)
|
||||
match_objs = fnmatch.filter(objs, obj + '.o') + fnmatch.filter(objs, obj + '.*.obj') + fnmatch.filter(objs, obj + '.obj')
|
||||
|
||||
if len(match_objs) > 1:
|
||||
raise ValueError("Multiple matches for object: '%s: %s': %s" % (archive, obj, str(match_objs)))
|
||||
|
||||
try:
|
||||
return match_objs[0]
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
def get_sections(self, archive, obj):
|
||||
obj = self._match_obj(archive, obj)
|
||||
res = []
|
||||
if obj:
|
||||
res = self.sections[archive][obj]
|
||||
return res
|
||||
|
||||
def _match_symbol(self, archive, obj, symbol):
|
||||
sections = self.get_sections(archive, obj)
|
||||
return [s for s in sections if s.endswith(symbol)]
|
||||
|
||||
def check_exists(self, entity):
|
||||
res = True
|
||||
|
||||
if entity.specificity != Entity.Specificity.NONE:
|
||||
if entity.specificity == Entity.Specificity.ARCHIVE:
|
||||
res = entity.archive in self.get_archives()
|
||||
elif entity.specificity == Entity.Specificity.OBJ:
|
||||
res = self._match_obj(entity.archive, entity.obj) is not None
|
||||
elif entity.specificity == Entity.Specificity.SYMBOL:
|
||||
res = len(self._match_symbol(entity.archive, entity.obj, entity.symbol)) > 0
|
||||
else:
|
||||
res = False
|
||||
|
||||
return res
|
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright 2018-2019 Espressif Systems (Shanghai) PTE LTD
|
||||
# Copyright 2021 Espressif Systems (Shanghai) CO LTD
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright 2018-2019 Espressif Systems (Shanghai) PTE LTD
|
||||
# Copyright 2021 Espressif Systems (Shanghai) CO LTD
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -17,242 +17,256 @@
|
||||
import collections
|
||||
import fnmatch
|
||||
import itertools
|
||||
import os
|
||||
from collections import namedtuple
|
||||
|
||||
from fragments import Fragment, Mapping, Scheme, Sections
|
||||
from entity import Entity
|
||||
from fragments import Mapping, Scheme, Sections
|
||||
from ldgen_common import LdGenFailure
|
||||
from pyparsing import (Group, Literal, OneOrMore, ParseException, SkipTo, Suppress, White, Word, ZeroOrMore, alphas,
|
||||
nums, restOfLine)
|
||||
from output_commands import InputSectionDesc
|
||||
|
||||
|
||||
class PlacementRule():
|
||||
"""
|
||||
Encapsulates a generated placement rule placed under a target
|
||||
"""
|
||||
class RuleNode():
|
||||
|
||||
DEFAULT_SPECIFICITY = 0
|
||||
ARCHIVE_SPECIFICITY = 1
|
||||
OBJECT_SPECIFICITY = 2
|
||||
SYMBOL_SPECIFICITY = 3
|
||||
class Section():
|
||||
|
||||
class __container():
|
||||
def __init__(self, content):
|
||||
self.content = content
|
||||
def __init__(self, target, exclusions, explicit=False):
|
||||
self.target = target
|
||||
self.exclusions = set(exclusions)
|
||||
|
||||
__metadata = collections.namedtuple('__metadata', 'excludes expansions expanded')
|
||||
# Indicate whether this node has been created explicitly from a mapping,
|
||||
# or simply just to create a path to the explicitly created node.
|
||||
#
|
||||
# For example,
|
||||
#
|
||||
# lib.a
|
||||
# obj:sym (scheme)
|
||||
#
|
||||
# Nodes for lib.a and obj will be created, but only the node for
|
||||
# sym will have been created explicitly.
|
||||
#
|
||||
# This is used in deciding whether or not an output command should
|
||||
# be emitted for this node, or for exclusion rule generation.
|
||||
self.explicit = explicit
|
||||
|
||||
def __init__(self, archive, obj, symbol, sections, target):
|
||||
if archive == '*':
|
||||
archive = None
|
||||
def __init__(self, parent, name, sections):
|
||||
self.children = []
|
||||
self.parent = parent
|
||||
self.name = name
|
||||
self.child_node = None
|
||||
self.entity = None
|
||||
|
||||
if obj == '*':
|
||||
obj = None
|
||||
|
||||
self.archive = archive
|
||||
self.obj = obj
|
||||
self.symbol = symbol
|
||||
self.target = target
|
||||
self.sections = dict()
|
||||
|
||||
self.specificity = 0
|
||||
self.specificity += 1 if self.archive else 0
|
||||
self.specificity += 1 if (self.obj and not self.obj == '*') else 0
|
||||
self.specificity += 1 if self.symbol else 0
|
||||
# A node inherits the section -> target entries from
|
||||
# its parent. This is to simplify logic, avoiding
|
||||
# going up the parental chain to try a 'basis' rule
|
||||
# in creating exclusions. This relies on the fact that
|
||||
# the mappings must be inserted from least to most specific.
|
||||
# This sort is done in generate_rules().
|
||||
if sections:
|
||||
for (s, v) in sections.items():
|
||||
self.sections[s] = RuleNode.Section(v.target, [], [])
|
||||
|
||||
for section in sections:
|
||||
section_data = Sections.get_section_data_from_entry(section, self.symbol)
|
||||
def add_exclusion(self, sections, exclusion):
|
||||
self.sections[sections].exclusions.add(exclusion)
|
||||
|
||||
if not self.symbol:
|
||||
for s in section_data:
|
||||
metadata = self.__metadata(self.__container([]), self.__container([]), self.__container(False))
|
||||
self.sections[s] = metadata
|
||||
# Recursively create exclusions in parents
|
||||
if self.parent:
|
||||
self.exclude_from_parent(sections)
|
||||
|
||||
def add_sections(self, sections, target):
|
||||
try:
|
||||
_sections = self.sections[sections]
|
||||
if not _sections.explicit:
|
||||
_sections.target = target
|
||||
_sections.explicit = True
|
||||
else:
|
||||
(section, expansion) = section_data
|
||||
if expansion:
|
||||
metadata = self.__metadata(self.__container([]), self.__container([expansion]), self.__container(True))
|
||||
self.sections[section] = metadata
|
||||
if target != _sections.target:
|
||||
raise GenerationException('Sections mapped to multiple targets')
|
||||
except KeyError:
|
||||
self.sections[sections] = RuleNode.Section(target, [], True)
|
||||
|
||||
def get_section_names(self):
|
||||
return self.sections.keys()
|
||||
def exclude_from_parent(self, sections):
|
||||
self.parent.add_exclusion(sections, self.entity)
|
||||
|
||||
def add_exclusion(self, other, sections_infos=None):
|
||||
# Utility functions for this method
|
||||
def do_section_expansion(rule, section):
|
||||
if section in rule.get_section_names():
|
||||
sections_in_obj = sections_infos.get_obj_sections(rule.archive, rule.obj)
|
||||
expansions = fnmatch.filter(sections_in_obj, section)
|
||||
return expansions
|
||||
def add_child(self, entity):
|
||||
child_specificity = self.entity.specificity.value + 1
|
||||
assert(child_specificity <= Entity.Specificity.SYMBOL.value)
|
||||
name = entity[Entity.Specificity(child_specificity)]
|
||||
assert(name and name != Entity.ALL)
|
||||
|
||||
def remove_section_expansions(rule, section, expansions):
|
||||
existing_expansions = self.sections[section].expansions.content
|
||||
self.sections[section].expansions.content = [e for e in existing_expansions if e not in expansions]
|
||||
child = [c for c in self.children if c.name == name]
|
||||
assert(len(child) <= 1)
|
||||
|
||||
# Exit immediately if the exclusion to be added is more general than this rule.
|
||||
if not other.is_more_specific_rule_of(self):
|
||||
return
|
||||
|
||||
for section in self.get_sections_intersection(other):
|
||||
if(other.specificity == PlacementRule.SYMBOL_SPECIFICITY):
|
||||
# If this sections has not been expanded previously, expand now and keep track.
|
||||
previously_expanded = self.sections[section].expanded.content
|
||||
if not previously_expanded:
|
||||
expansions = do_section_expansion(self, section)
|
||||
if expansions:
|
||||
self.sections[section].expansions.content = expansions
|
||||
self.sections[section].expanded.content = True
|
||||
previously_expanded = True
|
||||
|
||||
# Remove the sections corresponding to the symbol name
|
||||
remove_section_expansions(self, section, other.sections[section].expansions.content)
|
||||
|
||||
# If it has been expanded previously but now the expansions list is empty,
|
||||
# it means adding exclusions has exhausted the list. Remove the section entirely.
|
||||
if previously_expanded and not self.sections[section].expanded.content:
|
||||
del self.sections[section]
|
||||
else:
|
||||
# A rule section can have multiple rule sections excluded from it. Get the
|
||||
# most specific rule from the list, and if an even more specific rule is found,
|
||||
# replace it entirely. Otherwise, keep appending.
|
||||
exclusions = self.sections[section].excludes
|
||||
exclusions_list = exclusions.content if exclusions.content is not None else []
|
||||
exclusions_to_remove = filter(lambda r: r.is_more_specific_rule_of(other), exclusions_list)
|
||||
|
||||
remaining_exclusions = [e for e in exclusions_list if e not in exclusions_to_remove]
|
||||
remaining_exclusions.append(other)
|
||||
|
||||
self.sections[section].excludes.content = remaining_exclusions
|
||||
|
||||
def get_sections_intersection(self, other):
|
||||
return set(self.sections.keys()).intersection(set(other.sections.keys()))
|
||||
|
||||
def is_more_specific_rule_of(self, other):
|
||||
if (self.specificity <= other.specificity):
|
||||
return False
|
||||
|
||||
# Compare archive, obj and target
|
||||
for entity_index in range(1, other.specificity + 1):
|
||||
if self[entity_index] != other[entity_index] and other[entity_index] is not None:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def maps_same_entities_as(self, other):
|
||||
if self.specificity != other.specificity:
|
||||
return False
|
||||
|
||||
# Compare archive, obj and target
|
||||
for entity_index in range(1, other.specificity + 1):
|
||||
if self[entity_index] != other[entity_index] and other[entity_index] is not None:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def __getitem__(self, key):
|
||||
if key == PlacementRule.ARCHIVE_SPECIFICITY:
|
||||
return self.archive
|
||||
elif key == PlacementRule.OBJECT_SPECIFICITY:
|
||||
return self.obj
|
||||
elif key == PlacementRule.SYMBOL_SPECIFICITY:
|
||||
return self.symbol
|
||||
if not child:
|
||||
child = self.child_node(self, name, self.sections)
|
||||
self.children.append(child)
|
||||
else:
|
||||
return None
|
||||
child = child[0]
|
||||
|
||||
def __str__(self):
|
||||
sorted_sections = sorted(self.get_section_names())
|
||||
return child
|
||||
|
||||
sections_string = list()
|
||||
def get_output_commands(self):
|
||||
commands = collections.defaultdict(list)
|
||||
|
||||
for section in sorted_sections:
|
||||
exclusions = self.sections[section].excludes.content
|
||||
def process_commands(cmds):
|
||||
for (target, commands_list) in cmds.items():
|
||||
commands[target].extend(commands_list)
|
||||
|
||||
exclusion_string = None
|
||||
# Process the commands generated from this node
|
||||
node_commands = self.get_node_output_commands()
|
||||
process_commands(node_commands)
|
||||
|
||||
if exclusions:
|
||||
exclusion_string = ' '.join(map(lambda e: '*' + e.archive + (':' + e.obj + '.*' if e.obj else ''), exclusions))
|
||||
exclusion_string = 'EXCLUDE_FILE(' + exclusion_string + ')'
|
||||
else:
|
||||
exclusion_string = ''
|
||||
# Process the commands generated from this node's children
|
||||
# recursively
|
||||
for child in sorted(self.children, key=lambda c: c.name):
|
||||
children_commands = child.get_output_commands()
|
||||
process_commands(children_commands)
|
||||
|
||||
section_string = None
|
||||
exclusion_section_string = None
|
||||
return commands
|
||||
|
||||
section_expansions = self.sections[section].expansions.content
|
||||
section_expanded = self.sections[section].expanded.content
|
||||
def add_node_child(self, entity, sections, target, sections_db):
|
||||
child = self.add_child(entity)
|
||||
child.insert(entity, sections, target, sections_db)
|
||||
|
||||
if section_expansions and section_expanded:
|
||||
section_string = ' '.join(section_expansions)
|
||||
exclusion_section_string = section_string
|
||||
else:
|
||||
section_string = section
|
||||
exclusion_section_string = exclusion_string + ' ' + section_string
|
||||
def get_node_output_commands(self):
|
||||
commands = collections.defaultdict(list)
|
||||
|
||||
sections_string.append(exclusion_section_string)
|
||||
for sections in self.get_section_keys():
|
||||
info = self.sections[sections]
|
||||
if info.exclusions or info.explicit:
|
||||
command = InputSectionDesc(self.entity, sections, info.exclusions)
|
||||
commands[info.target].append(command)
|
||||
|
||||
sections_string = ' '.join(sections_string)
|
||||
return commands
|
||||
|
||||
archive = str(self.archive) if self.archive else ''
|
||||
obj = (str(self.obj) + ('.*' if self.obj else '')) if self.obj else ''
|
||||
|
||||
# Handle output string generation based on information available
|
||||
if self.specificity == PlacementRule.DEFAULT_SPECIFICITY:
|
||||
rule_string = '*(%s)' % (sections_string)
|
||||
elif self.specificity == PlacementRule.ARCHIVE_SPECIFICITY:
|
||||
rule_string = '*%s:(%s)' % (archive, sections_string)
|
||||
def insert(self, entity, sections, target, sections_db):
|
||||
if self.entity.specificity == entity.specificity:
|
||||
if self.parent.sections[sections].target != target:
|
||||
self.add_sections(sections, target)
|
||||
self.exclude_from_parent(sections)
|
||||
else:
|
||||
rule_string = '*%s:%s(%s)' % (archive, obj, sections_string)
|
||||
self.add_node_child(entity, sections, target, sections_db)
|
||||
|
||||
return rule_string
|
||||
|
||||
def __eq__(self, other):
|
||||
if id(self) == id(other):
|
||||
return True
|
||||
|
||||
def exclusions_set(exclusions):
|
||||
exclusions_set = {(e.archive, e.obj, e.symbol, e.target) for e in exclusions}
|
||||
return exclusions_set
|
||||
|
||||
if self.archive != other.archive:
|
||||
return False
|
||||
|
||||
if self.obj != other.obj:
|
||||
return False
|
||||
|
||||
if self.symbol != other.symbol:
|
||||
return False
|
||||
|
||||
if set(self.sections.keys()) != set(other.sections.keys()):
|
||||
return False
|
||||
|
||||
for (section, metadata) in self.sections.items():
|
||||
|
||||
self_meta = metadata
|
||||
other_meta = other.sections[section]
|
||||
|
||||
if exclusions_set(self_meta.excludes.content) != exclusions_set(other_meta.excludes.content):
|
||||
return False
|
||||
|
||||
if set(self_meta.expansions.content) != set(other_meta.expansions.content):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def __iter__(self):
|
||||
yield self.archive
|
||||
yield self.obj
|
||||
yield self.symbol
|
||||
raise StopIteration
|
||||
def get_section_keys(self):
|
||||
return sorted(self.sections.keys(), key=' '.join)
|
||||
|
||||
|
||||
class GenerationModel:
|
||||
class SymbolNode(RuleNode):
|
||||
|
||||
def __init__(self, parent, name, sections):
|
||||
RuleNode.__init__(self, parent, name, sections)
|
||||
self.entity = Entity(self.parent.parent.name, self.parent.name, self.name)
|
||||
|
||||
def insert(self, entity, sections, target, sections_db):
|
||||
self.add_sections(sections, target)
|
||||
|
||||
def get_node_output_commands(self):
|
||||
commands = collections.defaultdict(list)
|
||||
|
||||
for sections in self.get_section_keys():
|
||||
info = self.sections[sections]
|
||||
if info.explicit:
|
||||
command = InputSectionDesc(Entity(self.parent.parent.name, self.parent.name), sections, [])
|
||||
commands[info.target].append(command)
|
||||
|
||||
return commands
|
||||
|
||||
|
||||
class ObjectNode(RuleNode):
|
||||
|
||||
def __init__(self, parent, name, sections):
|
||||
RuleNode.__init__(self, parent, name, sections)
|
||||
self.child_node = SymbolNode
|
||||
self.expanded_sections = dict()
|
||||
self.entity = Entity(self.parent.name, self.name)
|
||||
|
||||
def add_node_child(self, entity, sections, target, sections_db):
|
||||
if self.sections[sections].target != target:
|
||||
symbol = entity.symbol
|
||||
match_sections = None
|
||||
|
||||
obj_sections = sections_db.get_sections(self.parent.name, self.name)
|
||||
|
||||
try:
|
||||
match_sections = self.expanded_sections[sections]
|
||||
except KeyError:
|
||||
match_sections = []
|
||||
for s in sections:
|
||||
match_sections.extend(fnmatch.filter(obj_sections, s))
|
||||
|
||||
if match_sections:
|
||||
remove_sections = [s.replace('.*', '.%s' % symbol) for s in sections if '.*' in s]
|
||||
filtered_sections = [s for s in match_sections if s not in remove_sections]
|
||||
|
||||
if set(filtered_sections) != set(match_sections): # some sections removed
|
||||
child = self.add_child(entity)
|
||||
child.insert(entity, frozenset(remove_sections), target, obj_sections)
|
||||
|
||||
# Remember the result for node command generation
|
||||
self.expanded_sections[sections] = filtered_sections
|
||||
self.exclude_from_parent(sections)
|
||||
|
||||
def get_node_output_commands(self):
|
||||
commands = collections.defaultdict(list)
|
||||
|
||||
for sections in self.get_section_keys():
|
||||
info = self.sections[sections]
|
||||
|
||||
try:
|
||||
match_sections = self.expanded_sections[sections]
|
||||
except KeyError:
|
||||
match_sections = []
|
||||
|
||||
if match_sections or info.explicit:
|
||||
command_sections = match_sections if match_sections else sections
|
||||
command = InputSectionDesc(self.entity, command_sections, [])
|
||||
commands[info.target].append(command)
|
||||
|
||||
return commands
|
||||
|
||||
def exclude_from_parent(self, sections):
|
||||
# Check if there is an explicit emmission for the parent node, which is an archive node.
|
||||
# If there is, make the exclusion there. If not, make the exclusion on the root node.
|
||||
# This is to avoid emitting unecessary command and exclusions for the archive node and
|
||||
# from the root node, respectively.
|
||||
if self.parent.sections[sections].explicit:
|
||||
self.parent.add_exclusion(sections, self.entity)
|
||||
else:
|
||||
self.parent.parent.add_exclusion(sections, self.entity)
|
||||
|
||||
|
||||
class ArchiveNode(RuleNode):
|
||||
|
||||
def __init__(self, parent, name, sections):
|
||||
RuleNode.__init__(self, parent, name, sections)
|
||||
self.child_node = ObjectNode
|
||||
self.entity = Entity(self.name)
|
||||
|
||||
|
||||
class RootNode(RuleNode):
|
||||
def __init__(self):
|
||||
RuleNode.__init__(self, None, Entity.ALL, None)
|
||||
self.child_node = ArchiveNode
|
||||
self.entity = Entity('*')
|
||||
|
||||
def insert(self, entity, sections, target, sections_db):
|
||||
if self.entity.specificity == entity.specificity:
|
||||
self.add_sections(sections, target)
|
||||
else:
|
||||
self.add_node_child(entity, sections, target, sections_db)
|
||||
|
||||
|
||||
class Generation:
|
||||
"""
|
||||
Implements generation of placement rules based on collected sections, scheme and mapping fragment.
|
||||
"""
|
||||
|
||||
DEFAULT_SCHEME = 'default'
|
||||
|
||||
# Processed mapping, scheme and section entries
|
||||
EntityMapping = namedtuple('EntityMapping', 'entity sections_group target')
|
||||
|
||||
def __init__(self, check_mappings=False, check_mapping_exceptions=None):
|
||||
self.schemes = {}
|
||||
self.sections = {}
|
||||
@@ -265,23 +279,6 @@ class GenerationModel:
|
||||
else:
|
||||
self.check_mapping_exceptions = []
|
||||
|
||||
def _add_mapping_rules(self, archive, obj, symbol, scheme_name, scheme_dict, rules):
|
||||
# Use an ordinary dictionary to raise exception on non-existing keys
|
||||
temp_dict = dict(scheme_dict)
|
||||
|
||||
sections_bucket = temp_dict[scheme_name]
|
||||
|
||||
for (target, sections) in sections_bucket.items():
|
||||
section_entries = []
|
||||
|
||||
for section in sections:
|
||||
section_entries.extend(section.entries)
|
||||
|
||||
rule = PlacementRule(archive, obj, symbol, section_entries, target)
|
||||
|
||||
if rule not in rules:
|
||||
rules.append(rule)
|
||||
|
||||
def _build_scheme_dictionary(self):
|
||||
scheme_dictionary = collections.defaultdict(dict)
|
||||
|
||||
@@ -297,7 +294,7 @@ class GenerationModel:
|
||||
try:
|
||||
sections = self.sections[sections_name]
|
||||
except KeyError:
|
||||
message = GenerationException.UNDEFINED_REFERENCE + " to sections '" + sections + "'."
|
||||
message = GenerationException.UNDEFINED_REFERENCE + " to sections '" + sections_name + "'."
|
||||
raise GenerationException(message, scheme)
|
||||
|
||||
sections_in_bucket.append(sections)
|
||||
@@ -327,147 +324,69 @@ class GenerationModel:
|
||||
|
||||
return scheme_dictionary
|
||||
|
||||
def generate_rules(self, sections_infos):
|
||||
scheme_dictionary = self._build_scheme_dictionary()
|
||||
def get_section_strs(self, section):
|
||||
s_list = [Sections.get_section_data_from_entry(s) for s in section.entries]
|
||||
return frozenset([item for sublist in s_list for item in sublist])
|
||||
|
||||
# Generate default rules
|
||||
default_rules = list()
|
||||
self._add_mapping_rules(None, None, None, GenerationModel.DEFAULT_SCHEME, scheme_dictionary, default_rules)
|
||||
def _generate_entity_mappings(self, scheme_dictionary, entities):
|
||||
entity_mappings = []
|
||||
|
||||
all_mapping_rules = collections.defaultdict(list)
|
||||
|
||||
# Generate rules based on mapping fragments
|
||||
for mapping in self.mappings.values():
|
||||
archive = mapping.archive
|
||||
mapping_rules = all_mapping_rules[archive]
|
||||
|
||||
for (obj, symbol, scheme_name) in mapping.entries:
|
||||
try:
|
||||
if not (obj == Mapping.MAPPING_ALL_OBJECTS and symbol is None and
|
||||
scheme_name == GenerationModel.DEFAULT_SCHEME):
|
||||
if self.check_mappings and mapping.name not in self.check_mapping_exceptions:
|
||||
if not obj == Mapping.MAPPING_ALL_OBJECTS:
|
||||
obj_sections = sections_infos.get_obj_sections(archive, obj)
|
||||
if not obj_sections:
|
||||
message = "'%s:%s' not found" % (archive, obj)
|
||||
raise GenerationException(message, mapping)
|
||||
entity = Entity(archive, obj, symbol)
|
||||
|
||||
if symbol:
|
||||
obj_sym = fnmatch.filter(obj_sections, '*%s' % symbol)
|
||||
if not obj_sym:
|
||||
message = "'%s:%s %s' not found" % (archive, obj, symbol)
|
||||
raise GenerationException(message, mapping)
|
||||
# Check the entity exists
|
||||
if (self.check_mappings and
|
||||
entity.specificity.value > Entity.Specificity.ARCHIVE.value and
|
||||
mapping.name not in self.check_mapping_exceptions):
|
||||
if not entities.check_exists(entity):
|
||||
message = "'%s' not found" % str(entity)
|
||||
raise GenerationException(message, mapping)
|
||||
|
||||
self._add_mapping_rules(archive, obj, symbol, scheme_name, scheme_dictionary, mapping_rules)
|
||||
except KeyError:
|
||||
message = GenerationException.UNDEFINED_REFERENCE + " to scheme '" + scheme_name + "'."
|
||||
raise GenerationException(message, mapping)
|
||||
# Create placement rule for each 'section -> target' in the scheme.
|
||||
#
|
||||
# For example. for the mapping entry:
|
||||
#
|
||||
# obj (scheme)
|
||||
#
|
||||
# The enumrated to:
|
||||
#
|
||||
# obj (section1 -> target1)
|
||||
# obj (section2 -> target2)
|
||||
# ...
|
||||
for (target, sections) in scheme_dictionary[scheme_name].items():
|
||||
for section in sections:
|
||||
entity_mappings.append(Generation.EntityMapping(entity, self.get_section_strs(section), target))
|
||||
|
||||
# Detect rule conflicts
|
||||
for mapping_rules in all_mapping_rules.items():
|
||||
self._detect_conflicts(mapping_rules)
|
||||
return entity_mappings
|
||||
|
||||
# Add exclusions
|
||||
for mapping_rules in all_mapping_rules.values():
|
||||
self._create_exclusions(mapping_rules, default_rules, sections_infos)
|
||||
def generate_rules(self, entities):
|
||||
scheme_dictionary = self._build_scheme_dictionary()
|
||||
|
||||
placement_rules = collections.defaultdict(list)
|
||||
entity_mappings = self._generate_entity_mappings(scheme_dictionary, entities)
|
||||
|
||||
# Add the default rules grouped by target
|
||||
for default_rule in default_rules:
|
||||
existing_rules = placement_rules[default_rule.target]
|
||||
if default_rule.get_section_names():
|
||||
existing_rules.append(default_rule)
|
||||
entity_mappings.sort(key=lambda m: m.entity)
|
||||
|
||||
archives = sorted(all_mapping_rules.keys())
|
||||
# Create root nodes dictionary for the default scheme, whose
|
||||
# key is the target name and value is a list of the root nodes for that target.
|
||||
root_node = RootNode()
|
||||
for (target, sections) in scheme_dictionary['default'].items():
|
||||
for section in sections:
|
||||
root_node.insert(Entity(), self.get_section_strs(section), target, entities)
|
||||
|
||||
for archive in archives:
|
||||
# Add the mapping rules grouped by target
|
||||
mapping_rules = sorted(all_mapping_rules[archive], key=lambda m: (m.specificity, str(m)))
|
||||
for mapping_rule in mapping_rules:
|
||||
existing_rules = placement_rules[mapping_rule.target]
|
||||
if mapping_rule.get_section_names():
|
||||
existing_rules.append(mapping_rule)
|
||||
for mapping in entity_mappings:
|
||||
(entity, sections, target) = mapping
|
||||
try:
|
||||
root_node.insert(entity, sections, target, entities)
|
||||
except ValueError as e:
|
||||
raise GenerationException(str(e))
|
||||
|
||||
return placement_rules
|
||||
# Traverse the tree, creating the rules
|
||||
commands = root_node.get_output_commands()
|
||||
|
||||
def _detect_conflicts(self, rules):
|
||||
(archive, rules_list) = rules
|
||||
|
||||
for specificity in range(0, PlacementRule.OBJECT_SPECIFICITY + 1):
|
||||
rules_with_specificity = filter(lambda r: r.specificity == specificity, rules_list)
|
||||
|
||||
for rule_a, rule_b in itertools.combinations(rules_with_specificity, 2):
|
||||
intersections = rule_a.get_sections_intersection(rule_b)
|
||||
|
||||
if intersections and rule_a.maps_same_entities_as(rule_b):
|
||||
rules_string = str([str(rule_a), str(rule_b)])
|
||||
message = 'Rules ' + rules_string + ' map sections ' + str(list(intersections)) + ' into multiple targets.'
|
||||
raise GenerationException(message)
|
||||
|
||||
def _create_extra_rules(self, rules):
|
||||
# This function generates extra rules for symbol specific rules. The reason for generating extra rules is to isolate,
|
||||
# as much as possible, rules that require expansion. Particularly, object specific extra rules are generated.
|
||||
rules_to_process = sorted(rules, key=lambda r: r.specificity)
|
||||
symbol_specific_rules = list(filter(lambda r: r.specificity == PlacementRule.SYMBOL_SPECIFICITY, rules_to_process))
|
||||
|
||||
extra_rules = dict()
|
||||
|
||||
for symbol_specific_rule in symbol_specific_rules:
|
||||
extra_rule_candidate = {s: None for s in symbol_specific_rule.get_section_names()}
|
||||
|
||||
super_rules = filter(lambda r: symbol_specific_rule.is_more_specific_rule_of(r), rules_to_process)
|
||||
|
||||
# Take a look at the existing rules that are more general than the current symbol-specific rule.
|
||||
# Only generate an extra rule if there is no existing object specific rule for that section
|
||||
for super_rule in super_rules:
|
||||
intersections = symbol_specific_rule.get_sections_intersection(super_rule)
|
||||
for intersection in intersections:
|
||||
if super_rule.specificity != PlacementRule.OBJECT_SPECIFICITY:
|
||||
extra_rule_candidate[intersection] = super_rule
|
||||
else:
|
||||
extra_rule_candidate[intersection] = None
|
||||
|
||||
# Generate the extra rules for the symbol specific rule section, keeping track of the generated extra rules
|
||||
for (section, section_rule) in extra_rule_candidate.items():
|
||||
if section_rule:
|
||||
extra_rule = None
|
||||
extra_rules_key = (symbol_specific_rule.archive, symbol_specific_rule.obj, section_rule.target)
|
||||
|
||||
try:
|
||||
extra_rule = extra_rules[extra_rules_key]
|
||||
|
||||
if section not in extra_rule.get_section_names():
|
||||
new_rule = PlacementRule(extra_rule.archive, extra_rule.obj, extra_rule.symbol,
|
||||
list(extra_rule.get_section_names()) + [section], extra_rule.target)
|
||||
extra_rules[extra_rules_key] = new_rule
|
||||
except KeyError:
|
||||
extra_rule = PlacementRule(symbol_specific_rule.archive, symbol_specific_rule.obj, None, [section], section_rule.target)
|
||||
extra_rules[extra_rules_key] = extra_rule
|
||||
|
||||
return extra_rules.values()
|
||||
|
||||
def _create_exclusions(self, mapping_rules, default_rules, sections_info):
|
||||
rules = list(default_rules)
|
||||
rules.extend(mapping_rules)
|
||||
|
||||
extra_rules = self._create_extra_rules(rules)
|
||||
|
||||
mapping_rules.extend(extra_rules)
|
||||
rules.extend(extra_rules)
|
||||
|
||||
# Sort the rules by means of how specific they are. Sort by specificity from lowest to highest
|
||||
# * -> lib:* -> lib:obj -> lib:obj:symbol
|
||||
sorted_rules = sorted(rules, key=lambda r: r.specificity)
|
||||
|
||||
# Now that the rules have been sorted, loop through each rule, and then loop
|
||||
# through rules below it (higher indeces), adding exclusions whenever appropriate.
|
||||
for general_rule in sorted_rules:
|
||||
for specific_rule in reversed(sorted_rules):
|
||||
if (specific_rule.specificity > general_rule.specificity and
|
||||
specific_rule.specificity != PlacementRule.SYMBOL_SPECIFICITY) or \
|
||||
(specific_rule.specificity == PlacementRule.SYMBOL_SPECIFICITY and
|
||||
general_rule.specificity == PlacementRule.OBJECT_SPECIFICITY):
|
||||
general_rule.add_exclusion(specific_rule, sections_info)
|
||||
return commands
|
||||
|
||||
def add_fragments_from_file(self, fragment_file):
|
||||
for fragment in fragment_file.fragments:
|
||||
@@ -493,79 +412,6 @@ class GenerationModel:
|
||||
dict_to_append_to[fragment.name] = fragment
|
||||
|
||||
|
||||
class TemplateModel:
|
||||
"""
|
||||
Encapsulates a linker script template file. Finds marker syntax and handles replacement to generate the
|
||||
final output.
|
||||
"""
|
||||
|
||||
Marker = collections.namedtuple('Marker', 'target indent rules')
|
||||
|
||||
def __init__(self, template_file):
|
||||
self.members = []
|
||||
self.file = os.path.realpath(template_file.name)
|
||||
|
||||
self._generate_members(template_file)
|
||||
|
||||
def _generate_members(self, template_file):
|
||||
lines = template_file.readlines()
|
||||
|
||||
target = Fragment.IDENTIFIER
|
||||
reference = Suppress('mapping') + Suppress('[') + target.setResultsName('target') + Suppress(']')
|
||||
pattern = White(' \t').setResultsName('indent') + reference
|
||||
|
||||
# Find the markers in the template file line by line. If line does not match marker grammar,
|
||||
# set it as a literal to be copied as is to the output file.
|
||||
for line in lines:
|
||||
try:
|
||||
parsed = pattern.parseString(line)
|
||||
|
||||
indent = parsed.indent
|
||||
target = parsed.target
|
||||
|
||||
marker = TemplateModel.Marker(target, indent, [])
|
||||
|
||||
self.members.append(marker)
|
||||
except ParseException:
|
||||
# Does not match marker syntax
|
||||
self.members.append(line)
|
||||
|
||||
def fill(self, mapping_rules):
|
||||
for member in self.members:
|
||||
target = None
|
||||
try:
|
||||
target = member.target
|
||||
rules = member.rules
|
||||
|
||||
del rules[:]
|
||||
|
||||
rules.extend(mapping_rules[target])
|
||||
except KeyError:
|
||||
message = GenerationException.UNDEFINED_REFERENCE + " to target '" + target + "'."
|
||||
raise GenerationException(message)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def write(self, output_file):
|
||||
# Add information that this is a generated file.
|
||||
output_file.write('/* Automatically generated file; DO NOT EDIT */\n')
|
||||
output_file.write('/* Espressif IoT Development Framework Linker Script */\n')
|
||||
output_file.write('/* Generated from: %s */\n' % self.file)
|
||||
output_file.write('\n')
|
||||
|
||||
# Do the text replacement
|
||||
for member in self.members:
|
||||
try:
|
||||
indent = member.indent
|
||||
rules = member.rules
|
||||
|
||||
for rule in rules:
|
||||
generated_line = ''.join([indent, str(rule), '\n'])
|
||||
output_file.write(generated_line)
|
||||
except AttributeError:
|
||||
output_file.write(member)
|
||||
|
||||
|
||||
class GenerationException(LdGenFailure):
|
||||
"""
|
||||
Exception for linker script generation failures such as undefined references/ failure to
|
||||
@@ -583,90 +429,3 @@ class GenerationException(LdGenFailure):
|
||||
return "%s\nIn fragment '%s' defined in '%s'." % (self.message, self.fragment.name, self.fragment.path)
|
||||
else:
|
||||
return self.message
|
||||
|
||||
|
||||
class SectionsInfo(dict):
|
||||
"""
|
||||
Encapsulates an output of objdump. Contains information about the static library sections
|
||||
and names
|
||||
"""
|
||||
|
||||
__info = collections.namedtuple('__info', 'filename content')
|
||||
|
||||
def __init__(self):
|
||||
self.sections = dict()
|
||||
|
||||
def add_sections_info(self, sections_info_dump):
|
||||
first_line = sections_info_dump.readline()
|
||||
|
||||
archive_path = (Literal('In archive').suppress() +
|
||||
White().suppress() +
|
||||
# trim the colon and line ending characters from archive_path
|
||||
restOfLine.setResultsName('archive_path').setParseAction(lambda s, loc, toks: s.rstrip(':\n\r ')))
|
||||
parser = archive_path
|
||||
|
||||
results = None
|
||||
|
||||
try:
|
||||
results = parser.parseString(first_line, parseAll=True)
|
||||
except ParseException as p:
|
||||
raise ParseException('Parsing sections info for library ' + sections_info_dump.name + ' failed. ' + p.msg)
|
||||
|
||||
archive = os.path.basename(results.archive_path)
|
||||
self.sections[archive] = SectionsInfo.__info(sections_info_dump.name, sections_info_dump.read())
|
||||
|
||||
def _get_infos_from_file(self, info):
|
||||
# {object}: file format elf32-xtensa-le
|
||||
object_line = SkipTo(':').setResultsName('object') + Suppress(restOfLine)
|
||||
|
||||
# Sections:
|
||||
# Idx Name ...
|
||||
section_start = Suppress(Literal('Sections:'))
|
||||
section_header = Suppress(OneOrMore(Word(alphas)))
|
||||
|
||||
# 00 {section} 0000000 ...
|
||||
# CONTENTS, ALLOC, ....
|
||||
section_entry = Suppress(Word(nums)) + SkipTo(' ') + Suppress(restOfLine) + \
|
||||
Suppress(ZeroOrMore(Word(alphas) + Literal(',')) + Word(alphas))
|
||||
|
||||
content = Group(object_line + section_start + section_header + Group(OneOrMore(section_entry)).setResultsName('sections'))
|
||||
parser = Group(ZeroOrMore(content)).setResultsName('contents')
|
||||
|
||||
results = None
|
||||
|
||||
try:
|
||||
results = parser.parseString(info.content, parseAll=True)
|
||||
except ParseException as p:
|
||||
raise ParseException('Unable to parse section info file ' + info.filename + '. ' + p.msg)
|
||||
|
||||
return results
|
||||
|
||||
def get_obj_sections(self, archive, obj):
|
||||
res = []
|
||||
try:
|
||||
stored = self.sections[archive]
|
||||
|
||||
# Parse the contents of the sections file on-demand,
|
||||
# save the result for later
|
||||
if not isinstance(stored, dict):
|
||||
parsed = self._get_infos_from_file(stored)
|
||||
stored = dict()
|
||||
for content in parsed.contents:
|
||||
sections = list(map(lambda s: s, content.sections))
|
||||
stored[content.object] = sections
|
||||
self.sections[archive] = stored
|
||||
|
||||
try:
|
||||
res = stored[obj + '.o']
|
||||
except KeyError:
|
||||
try:
|
||||
res = stored[obj + '.c.obj']
|
||||
except KeyError:
|
||||
try:
|
||||
res = stored[obj + '.cpp.obj']
|
||||
except KeyError:
|
||||
res = stored[obj + '.S.obj']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return res
|
||||
|
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2018-2019 Espressif Systems (Shanghai) PTE LTD
|
||||
# Copyright 2021 Espressif Systems (Shanghai) CO LTD
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -24,9 +24,11 @@ import sys
|
||||
import tempfile
|
||||
from io import StringIO
|
||||
|
||||
from entity import EntityDB
|
||||
from fragments import FragmentFile
|
||||
from generation import GenerationModel, SectionsInfo, TemplateModel
|
||||
from generation import Generation
|
||||
from ldgen_common import LdGenFailure
|
||||
from linker_script import LinkerScript
|
||||
from pyparsing import ParseException, ParseFatalException
|
||||
from sdkconfig import SDKConfig
|
||||
|
||||
@@ -125,7 +127,7 @@ def main():
|
||||
check_mapping_exceptions = None
|
||||
|
||||
try:
|
||||
sections_infos = SectionsInfo()
|
||||
sections_infos = EntityDB()
|
||||
for library in libraries_file:
|
||||
library = library.strip()
|
||||
if library:
|
||||
@@ -133,7 +135,7 @@ def main():
|
||||
dump.name = library
|
||||
sections_infos.add_sections_info(dump)
|
||||
|
||||
generation_model = GenerationModel(check_mapping, check_mapping_exceptions)
|
||||
generation_model = Generation(check_mapping, check_mapping_exceptions)
|
||||
|
||||
_update_environment(args) # assign args.env and args.env_file to os.environ
|
||||
|
||||
@@ -151,7 +153,7 @@ def main():
|
||||
|
||||
mapping_rules = generation_model.generate_rules(sections_infos)
|
||||
|
||||
script_model = TemplateModel(input_file)
|
||||
script_model = LinkerScript(input_file)
|
||||
script_model.fill(mapping_rules)
|
||||
|
||||
with tempfile.TemporaryFile('w+') as output:
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright 2018-2019 Espressif Systems (Shanghai) PTE LTD
|
||||
# Copyright 2021 Espressif Systems (Shanghai) CO LTD
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
95
tools/ldgen/linker_script.py
Normal file
95
tools/ldgen/linker_script.py
Normal file
@@ -0,0 +1,95 @@
|
||||
#
|
||||
# Copyright 2021 Espressif Systems (Shanghai) CO LTD
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import collections
|
||||
import os
|
||||
|
||||
from fragments import Fragment
|
||||
from generation import GenerationException
|
||||
from pyparsing import ParseException, Suppress, White
|
||||
|
||||
|
||||
class LinkerScript:
|
||||
"""
|
||||
Encapsulates a linker script template file. Finds marker syntax and handles replacement to generate the
|
||||
final output.
|
||||
"""
|
||||
|
||||
Marker = collections.namedtuple('Marker', 'target indent rules')
|
||||
|
||||
def __init__(self, template_file):
|
||||
self.members = []
|
||||
self.file = os.path.realpath(template_file.name)
|
||||
|
||||
self._generate_members(template_file)
|
||||
|
||||
def _generate_members(self, template_file):
|
||||
lines = template_file.readlines()
|
||||
|
||||
target = Fragment.IDENTIFIER
|
||||
reference = Suppress('mapping') + Suppress('[') + target.setResultsName('target') + Suppress(']')
|
||||
pattern = White(' \t').setResultsName('indent') + reference
|
||||
|
||||
# Find the markers in the template file line by line. If line does not match marker grammar,
|
||||
# set it as a literal to be copied as is to the output file.
|
||||
for line in lines:
|
||||
try:
|
||||
parsed = pattern.parseString(line)
|
||||
|
||||
indent = parsed.indent
|
||||
target = parsed.target
|
||||
|
||||
marker = LinkerScript.Marker(target, indent, [])
|
||||
|
||||
self.members.append(marker)
|
||||
except ParseException:
|
||||
# Does not match marker syntax
|
||||
self.members.append(line)
|
||||
|
||||
def fill(self, mapping_rules):
|
||||
for member in self.members:
|
||||
target = None
|
||||
try:
|
||||
target = member.target
|
||||
rules = member.rules
|
||||
|
||||
del rules[:]
|
||||
|
||||
rules.extend(mapping_rules[target])
|
||||
except KeyError:
|
||||
message = GenerationException.UNDEFINED_REFERENCE + " to target '" + target + "'."
|
||||
raise GenerationException(message)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def write(self, output_file):
|
||||
# Add information that this is a generated file.
|
||||
output_file.write('/* Automatically generated file; DO NOT EDIT */\n')
|
||||
output_file.write('/* Espressif IoT Development Framework Linker Script */\n')
|
||||
output_file.write('/* Generated from: %s */\n' % self.file)
|
||||
output_file.write('\n')
|
||||
|
||||
# Do the text replacement
|
||||
for member in self.members:
|
||||
try:
|
||||
indent = member.indent
|
||||
rules = member.rules
|
||||
|
||||
for rule in rules:
|
||||
generated_line = ''.join([indent, str(rule), '\n'])
|
||||
output_file.write(generated_line)
|
||||
except AttributeError:
|
||||
output_file.write(member)
|
78
tools/ldgen/output_commands.py
Normal file
78
tools/ldgen/output_commands.py
Normal file
@@ -0,0 +1,78 @@
|
||||
#
|
||||
# Copyright 2021 Espressif Systems (Shanghai) CO LTD
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from entity import Entity
|
||||
|
||||
|
||||
class InputSectionDesc():
|
||||
|
||||
def __init__(self, entity, sections, exclusions=None):
|
||||
assert(entity.specificity != Entity.Specificity.SYMBOL)
|
||||
|
||||
self.entity = entity
|
||||
self.sections = set(sections)
|
||||
|
||||
self.exclusions = set()
|
||||
|
||||
if exclusions:
|
||||
assert(not [e for e in exclusions if e.specificity == Entity.Specificity.SYMBOL or
|
||||
e.specificity == Entity.Specificity.NONE])
|
||||
self.exclusions = set(exclusions)
|
||||
else:
|
||||
self.exclusions = set()
|
||||
|
||||
def __str__(self):
|
||||
if self.sections:
|
||||
exclusion_strings = []
|
||||
|
||||
for exc in sorted(self.exclusions):
|
||||
if exc.specificity == Entity.Specificity.ARCHIVE:
|
||||
exc_string = '*%s' % (exc.archive)
|
||||
else:
|
||||
exc_string = '*%s:%s.*' % (exc.archive, exc.obj)
|
||||
|
||||
exclusion_strings.append(exc_string)
|
||||
|
||||
section_strings = []
|
||||
|
||||
if exclusion_strings:
|
||||
exclusion_string = 'EXCLUDE_FILE(%s)' % ' '.join(exclusion_strings)
|
||||
|
||||
for section in sorted(self.sections):
|
||||
section_strings.append('%s %s' % (exclusion_string, section))
|
||||
else:
|
||||
for section in sorted(self.sections):
|
||||
section_strings.append(section)
|
||||
|
||||
sections_string = '(%s)' % ' '.join(section_strings)
|
||||
else:
|
||||
sections_string = '( )'
|
||||
|
||||
command = None
|
||||
|
||||
if self.entity.specificity == Entity.Specificity.NONE:
|
||||
command = '*%s' % (sections_string)
|
||||
elif self.entity.specificity == Entity.Specificity.ARCHIVE:
|
||||
command = '*%s:%s' % (self.entity.archive, sections_string)
|
||||
else:
|
||||
command = '*%s:%s.*%s' % (self.entity.archive, self.entity.obj, sections_string)
|
||||
|
||||
return command
|
||||
|
||||
def __eq__(self, other):
|
||||
return (self.entity == other.entity and
|
||||
self.sections == other.sections and
|
||||
self.exclusions == other.exclusions)
|
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright 2018-2019 Espressif Systems (Shanghai) PTE LTD
|
||||
# Copyright 2021 Espressif Systems (Shanghai) CO LTD
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@@ -343,6 +343,111 @@ Idx Name Size VMA LMA File off Algn
|
||||
49 .xt.prop 00000408 00000000 00000000 00002f3e 2**0
|
||||
CONTENTS, RELOC, READONLY
|
||||
|
||||
port.cpp.obj: file format elf32-xtensa-le
|
||||
|
||||
Sections:
|
||||
Idx Name Size VMA LMA File off Algn
|
||||
0 .literal.pxPortInitialiseStack 00000018 00000000 00000000 00000034 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
1 .literal.xPortStartScheduler 00000014 00000000 00000000 0000004c 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
2 .literal.xPortSysTickHandler 00000008 00000000 00000000 00000060 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
3 .literal.vPortYieldOtherCore 00000004 00000000 00000000 00000068 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
4 .literal.vPortReleaseTaskMPUSettings 00000004 00000000 00000000 0000006c 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
5 .literal.xPortInIsrContext 00000008 00000000 00000000 00000070 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
6 .iram1.literal 00000004 00000000 00000000 00000078 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
7 .literal.vPortAssertIfInISR 00000018 00000000 00000000 0000007c 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
8 .literal.vPortCPUInitializeMutex 00000004 00000000 00000000 00000094 2**2
|
||||
CONTENTS, ALLOC, LOAD, READONLY, CODE
|
||||
9 .literal.vPortCPUAcquireMutex 00000030 00000000 00000000 00000098 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
10 .literal.vPortCPUAcquireMutexTimeout 00000030 00000000 00000000 000000c8 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
11 .literal.vPortCPUReleaseMutex 00000028 00000000 00000000 000000f8 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
12 .literal.vPortSetStackWatchpoint 00000008 00000000 00000000 00000120 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
13 .text 00000000 00000000 00000000 00000128 2**0
|
||||
CONTENTS, ALLOC, LOAD, READONLY, CODE
|
||||
14 .data 00000000 00000000 00000000 00000128 2**0
|
||||
CONTENTS, ALLOC, LOAD, DATA
|
||||
15 .bss 00000000 00000000 00000000 00000128 2**0
|
||||
ALLOC
|
||||
16 .text.pxPortInitialiseStack 00000086 00000000 00000000 00000128 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
17 .text.vPortEndScheduler 00000005 00000000 00000000 000001b0 2**2
|
||||
CONTENTS, ALLOC, LOAD, READONLY, CODE
|
||||
18 .text.xPortStartScheduler 0000002e 00000000 00000000 000001b8 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
19 .text.xPortSysTickHandler 00000016 00000000 00000000 000001e8 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
20 .text.vPortYieldOtherCore 0000000e 00000000 00000000 00000200 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
21 .text.vPortStoreTaskMPUSettings 00000013 00000000 00000000 00000210 2**2
|
||||
CONTENTS, ALLOC, LOAD, READONLY, CODE
|
||||
22 .text.vPortReleaseTaskMPUSettings 0000000e 00000000 00000000 00000224 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
23 .text.xPortInIsrContext 00000026 00000000 00000000 00000234 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
24 .iram1 0000001a 00000000 00000000 0000025c 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
25 .rodata.str1.4 0000013b 00000000 00000000 00000278 2**2
|
||||
CONTENTS, ALLOC, LOAD, READONLY, DATA
|
||||
26 .text.vPortAssertIfInISR 00000025 00000000 00000000 000003b4 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
27 .text.vPortCPUInitializeMutex 0000000e 00000000 00000000 000003dc 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
28 .text.vPortCPUAcquireMutex 00000088 00000000 00000000 000003ec 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
29 .text.vPortCPUAcquireMutexTimeout 000000ab 00000000 00000000 00000474 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
30 .text.vPortCPUReleaseMutex 00000061 00000000 00000000 00000520 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
31 .text.vPortSetStackWatchpoint 0000001a 00000000 00000000 00000584 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
32 .text.xPortGetTickRateHz 00000008 00000000 00000000 000005a0 2**2
|
||||
CONTENTS, ALLOC, LOAD, READONLY, CODE
|
||||
33 .rodata.__func__$5264 00000029 00000000 00000000 000005a8 2**2
|
||||
CONTENTS, ALLOC, LOAD, READONLY, DATA
|
||||
34 .rodata.__func__$5259 00000029 00000000 00000000 000005d4 2**2
|
||||
CONTENTS, ALLOC, LOAD, READONLY, DATA
|
||||
35 .rodata.__FUNCTION__$5243 00000013 00000000 00000000 00000600 2**2
|
||||
CONTENTS, ALLOC, LOAD, READONLY, DATA
|
||||
36 .bss.port_interruptNesting 00000008 00000000 00000000 00000614 2**2
|
||||
ALLOC
|
||||
37 .bss.port_xSchedulerRunning 00000008 00000000 00000000 00000614 2**2
|
||||
ALLOC
|
||||
38 .debug_frame 00000190 00000000 00000000 00000614 2**2
|
||||
CONTENTS, RELOC, READONLY, DEBUGGING
|
||||
39 .debug_info 00000e78 00000000 00000000 000007a4 2**0
|
||||
CONTENTS, RELOC, READONLY, DEBUGGING
|
||||
40 .debug_abbrev 00000404 00000000 00000000 0000161c 2**0
|
||||
CONTENTS, READONLY, DEBUGGING
|
||||
41 .debug_loc 000005f1 00000000 00000000 00001a20 2**0
|
||||
CONTENTS, RELOC, READONLY, DEBUGGING
|
||||
42 .debug_aranges 00000098 00000000 00000000 00002011 2**0
|
||||
CONTENTS, RELOC, READONLY, DEBUGGING
|
||||
43 .debug_ranges 000000a0 00000000 00000000 000020a9 2**0
|
||||
CONTENTS, RELOC, READONLY, DEBUGGING
|
||||
44 .debug_line 000005fb 00000000 00000000 00002149 2**0
|
||||
CONTENTS, RELOC, READONLY, DEBUGGING
|
||||
45 .debug_str 0000071f 00000000 00000000 00002744 2**0
|
||||
CONTENTS, READONLY, DEBUGGING
|
||||
46 .comment 0000003b 00000000 00000000 00002e63 2**0
|
||||
CONTENTS, READONLY
|
||||
47 .xtensa.info 00000038 00000000 00000000 00002e9e 2**0
|
||||
CONTENTS, READONLY
|
||||
48 .xt.lit 00000068 00000000 00000000 00002ed6 2**0
|
||||
CONTENTS, RELOC, READONLY
|
||||
49 .xt.prop 00000408 00000000 00000000 00002f3e 2**0
|
||||
CONTENTS, RELOC, READONLY
|
||||
|
||||
portasm.S.obj: file format elf32-xtensa-le
|
||||
|
||||
Sections:
|
39
tools/ldgen/test/data/test_entity/libfreertos.a.txt
Normal file
39
tools/ldgen/test/data/test_entity/libfreertos.a.txt
Normal file
@@ -0,0 +1,39 @@
|
||||
In archive /home/user/build/esp-idf/freertos/libfreertos.a:
|
||||
|
||||
croutine.c.obj: file format elf32-xtensa-le
|
||||
|
||||
Sections:
|
||||
Idx Name Size VMA LMA File off Algn
|
||||
0 .literal.prvCheckPendingReadyList 00000018 00000000 00000000 00000034 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
1 .literal.prvCheckDelayedList 0000002c 00000000 00000000 0000004c 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
|
||||
croutine.cpp.obj: file format elf32-xtensa-le
|
||||
|
||||
Sections:
|
||||
Idx Name Size VMA LMA File off Algn
|
||||
9 .text.prvCheckPendingReadyList 00000056 00000000 00000000 000000d8 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
10 .text.prvCheckDelayedList 000000ac 00000000 00000000 00000130 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
|
||||
croutine.S.obj: file format elf32-xtensa-le
|
||||
|
||||
Sections:
|
||||
Idx Name Size VMA LMA File off Algn
|
||||
26 .debug_frame 000000a0 00000000 00000000 00000394 2**2
|
||||
CONTENTS, RELOC, READONLY, DEBUGGING
|
||||
27 .debug_info 000006b8 00000000 00000000 00000434 2**0
|
||||
CONTENTS, RELOC, READONLY, DEBUGGING
|
||||
28 .debug_abbrev 00000233 00000000 00000000 00000aec 2**0
|
||||
CONTENTS, READONLY, DEBUGGING
|
||||
|
||||
timers.o: file format elf32-xtensa-le
|
||||
|
||||
Sections:
|
||||
Idx Name Size VMA LMA File off Algn
|
||||
0 .literal.prvGetNextExpireTime 00000004 00000000 00000000 00000034 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
||||
1 .literal.prvInsertTimerInActiveList 00000010 00000000 00000000 00000038 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
@@ -1,4 +1,4 @@
|
||||
In archive /home/user/ãóç+ěščřžýáíé/build/esp-idf/freertos/libsections_parse.a:
|
||||
In archive /home/user/ãóç+ěščřžýáíé/build/esp-idf/freertos/ěščřžýáíé.a:
|
||||
|
||||
croutine.c.obj: file format elf32-littleriscv
|
||||
|
||||
@@ -11,9 +11,9 @@ Idx Name Size VMA LMA File off Algn
|
||||
2 .bss 00000000 00000000 00000000 00000034 2**0
|
||||
ALLOC
|
||||
|
||||
FreeRTOS-openocd.c.obj: file format elf32-xtensa-le // 'F' should not get included in match for 'CONTENTS, ALLOC, LOAD ...' prior
|
||||
FreeRTOS-ěščřžýáíé.c.obj: file format elf32-xtensa-le // 'F' should not get included in match for 'CONTENTS, ALLOC, LOAD ...' prior
|
||||
|
||||
Sections:
|
||||
Idx Name Size VMA LMA File off Algn
|
||||
0 .literal.prvCheckPendingReadyList 00000018 00000000 00000000 00000034 2**2
|
||||
0 .literal.ěščřžýáíé 00000018 00000000 00000000 00000034 2**2
|
||||
CONTENTS, ALLOC, LOAD, RELOC, READONLY, CODE
|
251
tools/ldgen/test/test_entity.py
Executable file
251
tools/ldgen/test/test_entity.py
Executable file
@@ -0,0 +1,251 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
#
|
||||
# Copyright 2018-2020 Espressif Systems (Shanghai) CO LTD
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
try:
|
||||
from entity import Entity, EntityDB
|
||||
except ImportError:
|
||||
sys.path.append('../')
|
||||
from entity import Entity, EntityDB
|
||||
|
||||
|
||||
class EntityTest(unittest.TestCase):
|
||||
|
||||
def test_create_none(self):
|
||||
entity = Entity(Entity.ALL)
|
||||
self.assertEqual(Entity.Specificity.NONE, entity.specificity)
|
||||
|
||||
entity = Entity(None)
|
||||
self.assertEqual(Entity.Specificity.NONE, entity.specificity)
|
||||
|
||||
entity = Entity()
|
||||
self.assertEqual(Entity.Specificity.NONE, entity.specificity)
|
||||
|
||||
def test_create_archive(self):
|
||||
entity = Entity('libfreertos.a')
|
||||
self.assertEqual(Entity.Specificity.ARCHIVE, entity.specificity)
|
||||
|
||||
entity = Entity('libfreertos.a', Entity.ALL, Entity.ALL)
|
||||
self.assertEqual(Entity.Specificity.ARCHIVE, entity.specificity)
|
||||
|
||||
entity = Entity('libfreertos.a', None, None)
|
||||
self.assertEqual(Entity.Specificity.ARCHIVE, entity.specificity)
|
||||
|
||||
entity = Entity('libfreertos.a', Entity.ALL, None)
|
||||
self.assertEqual(Entity.Specificity.ARCHIVE, entity.specificity)
|
||||
|
||||
entity = Entity('libfreertos.a', None, Entity.ALL)
|
||||
self.assertEqual(Entity.Specificity.ARCHIVE, entity.specificity)
|
||||
|
||||
def test_create_obj(self):
|
||||
entity = Entity('libfreertos.a', 'croutine')
|
||||
self.assertEqual(Entity.Specificity.OBJ, entity.specificity)
|
||||
|
||||
entity = Entity('libfreertos.a', 'croutine', Entity.ALL)
|
||||
self.assertEqual(Entity.Specificity.OBJ, entity.specificity)
|
||||
|
||||
entity = Entity('libfreertos.a', 'croutine', None)
|
||||
self.assertEqual(Entity.Specificity.OBJ, entity.specificity)
|
||||
|
||||
def test_create_symbol(self):
|
||||
entity = Entity('libfreertos.a', 'croutine', 'prvCheckPendingReadyList')
|
||||
self.assertEqual(Entity.Specificity.SYMBOL, entity.specificity)
|
||||
|
||||
def test_create_invalid(self):
|
||||
with self.assertRaises(ValueError):
|
||||
Entity(None, 'croutine')
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
Entity(Entity.ALL, 'croutine')
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
Entity(None, None, 'prvCheckPendingReadyList')
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
Entity(Entity.ALL, Entity.ALL, 'prvCheckPendingReadyList')
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
Entity(None, Entity.ALL, 'prvCheckPendingReadyList')
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
Entity(Entity.ALL, None, 'prvCheckPendingReadyList')
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
Entity('libfreertos.a', None, 'prvCheckPendingReadyList')
|
||||
|
||||
with self.assertRaises(ValueError):
|
||||
Entity('libfreertos.a', Entity.ALL, 'prvCheckPendingReadyList')
|
||||
|
||||
def test_compare_different_specificity(self):
|
||||
# Different specificity: NONE < ARCHIVE < OBJ < SYMBOL
|
||||
entity_a = Entity()
|
||||
entity_b = Entity('libfreertos.a')
|
||||
self.assertLess(entity_a, entity_b)
|
||||
|
||||
entity_a = Entity('libfreertos.a')
|
||||
entity_b = Entity('libfreertos.a', 'croutine')
|
||||
self.assertLess(entity_a, entity_b)
|
||||
|
||||
entity_a = Entity('libfreertos.a', 'croutine')
|
||||
entity_b = Entity('libfreertos.a', 'croutine', 'prvCheckPendingReadyList')
|
||||
self.assertLess(entity_a, entity_b)
|
||||
|
||||
entity_a = Entity(Entity.ALL)
|
||||
entity_b = Entity('libfreertos.a')
|
||||
self.assertLess(entity_a, entity_b)
|
||||
|
||||
entity_a = Entity('libfreertos.a', Entity.ALL)
|
||||
entity_b = Entity('libfreertos.a', 'croutine')
|
||||
self.assertLess(entity_a, entity_b)
|
||||
|
||||
entity_a = Entity('libfreertos.a', 'croutine', Entity.ALL)
|
||||
entity_b = Entity('libfreertos.a', 'croutine', 'prvCheckPendingReadyList')
|
||||
self.assertLess(entity_a, entity_b)
|
||||
|
||||
def test_compare_equal(self):
|
||||
# Compare equal specificities and members
|
||||
entity_a = Entity()
|
||||
entity_b = Entity()
|
||||
self.assertEqual(entity_a, entity_b)
|
||||
|
||||
entity_a = Entity('libfreertos.a')
|
||||
entity_b = Entity('libfreertos.a')
|
||||
self.assertEqual(entity_a, entity_b)
|
||||
|
||||
entity_a = Entity('libfreertos.a', 'croutine')
|
||||
entity_b = Entity('libfreertos.a', 'croutine')
|
||||
self.assertEqual(entity_a, entity_b)
|
||||
|
||||
entity_a = Entity('libfreertos.a', 'croutine', 'prvCheckPendingReadyList')
|
||||
entity_b = Entity('libfreertos.a', 'croutine', 'prvCheckPendingReadyList')
|
||||
self.assertEqual(entity_a, entity_b)
|
||||
|
||||
def test_compare_none_vs_all(self):
|
||||
# Two entities might have the same specifity whether
|
||||
# Entity.ALL is used or not specified; the latter is
|
||||
# considered less than the former.
|
||||
entity_a = Entity()
|
||||
entity_b = Entity(Entity.ALL)
|
||||
self.assertLess(entity_a, entity_b)
|
||||
|
||||
entity_a = Entity('libfreertos.a')
|
||||
entity_b = Entity('libfreertos.a', Entity.ALL, Entity.ALL)
|
||||
self.assertLess(entity_a, entity_b)
|
||||
|
||||
entity_a = Entity('libfreertos.a', 'croutine')
|
||||
entity_b = Entity('libfreertos.a', 'croutine', Entity.ALL)
|
||||
self.assertLess(entity_a, entity_b)
|
||||
|
||||
def test_compare_same_specificity(self):
|
||||
# Test that entities will be compared alphabetically
|
||||
# when the specificities are the same.
|
||||
entity_a = Entity('libfreertos_a.a')
|
||||
entity_b = Entity('libfreertos_b.a')
|
||||
self.assertLess(entity_a, entity_b)
|
||||
|
||||
entity_a = Entity('libfreertos_b.a', 'croutine_a')
|
||||
entity_b = Entity('libfreertos_a.a', 'croutine_b')
|
||||
self.assertLess(entity_b, entity_a)
|
||||
|
||||
entity_a = Entity('libfreertos.a', 'croutine', 'prvCheckPendingReadyList_a')
|
||||
entity_b = Entity('libfreertos.a', 'croutine', 'prvCheckPendingReadyList_b')
|
||||
self.assertLess(entity_a, entity_b)
|
||||
|
||||
entity_a = Entity('libfreertos.a', 'croutine_b', 'prvCheckPendingReadyList_a')
|
||||
entity_b = Entity('libfreertos.a', 'croutine_a', 'prvCheckPendingReadyList_b')
|
||||
self.assertLess(entity_b, entity_a)
|
||||
|
||||
entity_a = Entity('libfreertos_a.a', 'croutine_b', 'prvCheckPendingReadyList_a')
|
||||
entity_b = Entity('libfreertos_b.a', 'croutine_a', 'prvCheckPendingReadyList_b')
|
||||
self.assertLess(entity_a, entity_b)
|
||||
|
||||
def test_compare_all_non_character(self):
|
||||
# Test that Entity.ALL is not treated as an
|
||||
# ordinary character in comparisons.
|
||||
entity_a = Entity(Entity.ALL)
|
||||
entity_b = Entity(chr(ord(Entity.ALL[0]) - 1))
|
||||
|
||||
self.assertLess(entity_a, entity_b)
|
||||
|
||||
entity_a = Entity('libfreertos.a', Entity.ALL)
|
||||
entity_b = Entity('libfreertos.a', chr(ord(Entity.ALL[0]) - 1))
|
||||
|
||||
self.assertLess(entity_a, entity_b)
|
||||
|
||||
entity_a = Entity('libfreertos.a', 'croutine', '*')
|
||||
entity_b = Entity('libfreertos.a', 'croutine', chr(ord(Entity.ALL[0]) - 1))
|
||||
|
||||
self.assertLess(entity_a, entity_b)
|
||||
|
||||
|
||||
class EntityDBTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.entities = EntityDB()
|
||||
|
||||
with open('data/test_entity/libfreertos.a.txt') as objdump:
|
||||
self.entities.add_sections_info(objdump)
|
||||
|
||||
def test_get_archives(self):
|
||||
archives = self.entities.get_archives()
|
||||
self.assertEqual(set(archives), set(['libfreertos.a']))
|
||||
|
||||
def test_get_objs(self):
|
||||
objs = self.entities.get_objects('libfreertos.a')
|
||||
self.assertEqual(set(objs), set(['croutine.S.obj', 'croutine.c.obj', 'croutine.cpp.obj', 'timers.o']))
|
||||
|
||||
def test_get_sections(self):
|
||||
# Needs disambugation between possible matches: croutine.S, croutine.c, croutine.cpp
|
||||
with self.assertRaises(ValueError):
|
||||
self.entities.get_sections('libfreertos.a', 'croutine')
|
||||
|
||||
# Test disambugation works
|
||||
sections = self.entities.get_sections('libfreertos.a', 'croutine.c')
|
||||
expected = set(['.literal.prvCheckPendingReadyList', '.literal.prvCheckDelayedList'])
|
||||
self.assertEqual(set(sections), expected)
|
||||
|
||||
sections = self.entities.get_sections('libfreertos.a', 'croutine.S')
|
||||
expected = set(['.debug_frame', '.debug_info', '.debug_abbrev'])
|
||||
self.assertEqual(set(sections), expected)
|
||||
|
||||
# Test .o extension works
|
||||
sections = self.entities.get_sections('libfreertos.a', 'timers')
|
||||
expected = set(['.literal.prvGetNextExpireTime', '.literal.prvInsertTimerInActiveList'])
|
||||
self.assertEqual(set(sections), expected)
|
||||
|
||||
def test_parsing(self):
|
||||
# Tests parsing objdump with the following:
|
||||
#
|
||||
# - non-ascii characters
|
||||
# - different architecture string
|
||||
# - different column entries for each sections
|
||||
# - unexpected 'comments'
|
||||
with open('data/test_entity/parse_test.txt') as objdump:
|
||||
self.entities.add_sections_info(objdump)
|
||||
|
||||
sections = self.entities.get_sections('ěščřžýáíé.a', 'croutine')
|
||||
self.assertEqual(set(sections), set(['.text', '.data', '.bss']))
|
||||
|
||||
sections = self.entities.get_sections('ěščřžýáíé.a', 'FreeRTOS-ěščřžýáíé')
|
||||
self.assertEqual(set(sections), set(['.literal.ěščřžýáíé']))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2018-2019 Espressif Systems (Shanghai) PTE LTD
|
||||
# Copyright 2021 Espressif Systems (Shanghai) CO LTD
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
File diff suppressed because it is too large
Load Diff
141
tools/ldgen/test/test_output_commands.py
Executable file
141
tools/ldgen/test/test_output_commands.py
Executable file
@@ -0,0 +1,141 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2021 Espressif Systems (Shanghai) CO LTD
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
try:
|
||||
from output_commands import InputSectionDesc
|
||||
except ImportError:
|
||||
sys.path.append('../')
|
||||
from output_commands import InputSectionDesc
|
||||
|
||||
from entity import Entity
|
||||
|
||||
SECTIONS = ['.text', '.text.*', '.literal', '.literal.*']
|
||||
|
||||
FREERTOS = Entity('libfreertos.a')
|
||||
CROUTINE = Entity('libfreertos.a', 'croutine')
|
||||
|
||||
|
||||
class InputSectionDescTest(unittest.TestCase):
|
||||
|
||||
def test_output_00(self):
|
||||
# Test default (catch-all) command
|
||||
expected = '*(.literal .literal.* .text .text.*)'
|
||||
|
||||
desc = InputSectionDesc(Entity(), SECTIONS)
|
||||
self.assertEqual(expected, str(desc))
|
||||
|
||||
desc = InputSectionDesc(Entity(Entity.ALL), SECTIONS)
|
||||
self.assertEqual(expected, str(desc))
|
||||
|
||||
def test_output_01(self):
|
||||
# Test library placement command
|
||||
expected = '*libfreertos.a:(.literal .literal.* .text .text.*)'
|
||||
|
||||
desc = InputSectionDesc(Entity('libfreertos.a'), SECTIONS)
|
||||
self.assertEqual(expected, str(desc))
|
||||
|
||||
desc = InputSectionDesc(Entity('libfreertos.a', Entity.ALL), SECTIONS)
|
||||
self.assertEqual(expected, str(desc))
|
||||
|
||||
desc = InputSectionDesc(Entity('libfreertos.a', None, Entity.ALL), SECTIONS)
|
||||
self.assertEqual(expected, str(desc))
|
||||
|
||||
desc = InputSectionDesc(Entity('libfreertos.a', Entity.ALL, Entity.ALL), SECTIONS)
|
||||
self.assertEqual(expected, str(desc))
|
||||
|
||||
def test_output_02(self):
|
||||
# Test object placement command
|
||||
expected = '*libfreertos.a:croutine.*(.literal .literal.* .text .text.*)'
|
||||
|
||||
desc = InputSectionDesc(Entity('libfreertos.a', 'croutine'), SECTIONS)
|
||||
self.assertEqual(expected, str(desc))
|
||||
|
||||
desc = InputSectionDesc(Entity('libfreertos.a', 'croutine'), SECTIONS)
|
||||
self.assertEqual(expected, str(desc))
|
||||
|
||||
desc = InputSectionDesc(Entity('libfreertos.a', 'croutine', Entity.ALL), SECTIONS)
|
||||
self.assertEqual(expected, str(desc))
|
||||
|
||||
# Disambugated placement
|
||||
expected = '*libfreertos.a:croutine.c.*(.literal .literal.* .text .text.*)'
|
||||
|
||||
desc = InputSectionDesc(Entity('libfreertos.a', 'croutine.c'), SECTIONS)
|
||||
self.assertEqual(expected, str(desc))
|
||||
|
||||
def test_output_03(self):
|
||||
# Invalid entity specification
|
||||
with self.assertRaises(AssertionError):
|
||||
InputSectionDesc(Entity('libfreertos.a', 'croutine', 'prvCheckPendingReadyList'), SECTIONS)
|
||||
|
||||
with self.assertRaises(AssertionError):
|
||||
InputSectionDesc(Entity('libfreertos.a', 'croutine'), SECTIONS, [Entity()])
|
||||
|
||||
with self.assertRaises(AssertionError):
|
||||
InputSectionDesc(Entity('libfreertos.a', 'croutine'), SECTIONS, [Entity('libfreertos.a', 'croutine', 'prvCheckPendingReadyList')])
|
||||
|
||||
def test_output_04(self):
|
||||
# Test exclusions
|
||||
|
||||
# Library
|
||||
expected = ('*libfreertos.a:croutine.*'
|
||||
'(EXCLUDE_FILE(*libfreertos.a) '
|
||||
'.literal EXCLUDE_FILE(*libfreertos.a) '
|
||||
'.literal.* EXCLUDE_FILE(*libfreertos.a) '
|
||||
'.text EXCLUDE_FILE(*libfreertos.a) .text.*)')
|
||||
desc = InputSectionDesc(CROUTINE, SECTIONS, [FREERTOS])
|
||||
self.assertEqual(expected, str(desc))
|
||||
|
||||
# Object
|
||||
expected = ('*libfreertos.a:croutine.*'
|
||||
'(EXCLUDE_FILE(*libfreertos.a:croutine.*) '
|
||||
'.literal EXCLUDE_FILE(*libfreertos.a:croutine.*) '
|
||||
'.literal.* EXCLUDE_FILE(*libfreertos.a:croutine.*) '
|
||||
'.text EXCLUDE_FILE(*libfreertos.a:croutine.*) .text.*)')
|
||||
desc = InputSectionDesc(CROUTINE, SECTIONS, [CROUTINE])
|
||||
self.assertEqual(expected, str(desc))
|
||||
|
||||
# Multiple exclusions
|
||||
expected = ('*libfreertos.a:croutine.*'
|
||||
'(EXCLUDE_FILE(*libfreertos.a *libfreertos.a:croutine.*) '
|
||||
'.literal EXCLUDE_FILE(*libfreertos.a *libfreertos.a:croutine.*) '
|
||||
'.literal.* EXCLUDE_FILE(*libfreertos.a *libfreertos.a:croutine.*) '
|
||||
'.text EXCLUDE_FILE(*libfreertos.a *libfreertos.a:croutine.*) .text.*)')
|
||||
desc = InputSectionDesc(CROUTINE, SECTIONS, [FREERTOS, CROUTINE])
|
||||
self.assertEqual(expected, str(desc))
|
||||
|
||||
# Disambugated exclusion
|
||||
expected = ('*libfreertos.a:croutine.*'
|
||||
'(EXCLUDE_FILE(*libfreertos.a:croutine.c.*) '
|
||||
'.literal EXCLUDE_FILE(*libfreertos.a:croutine.c.*) '
|
||||
'.literal.* EXCLUDE_FILE(*libfreertos.a:croutine.c.*) '
|
||||
'.text EXCLUDE_FILE(*libfreertos.a:croutine.c.*) .text.*)')
|
||||
desc = InputSectionDesc(CROUTINE, SECTIONS, [Entity('libfreertos.a', 'croutine.c')])
|
||||
self.assertEqual(expected, str(desc))
|
||||
|
||||
def test_output_05(self):
|
||||
# Test empty sections
|
||||
expected = '*libfreertos.a:croutine.*( )'
|
||||
|
||||
desc = InputSectionDesc(Entity('libfreertos.a', 'croutine'), [])
|
||||
self.assertEqual(expected, str(desc))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
15
tools/test_apps/build_system/ldgen_test/CMakeLists.txt
Normal file
15
tools/test_apps/build_system/ldgen_test/CMakeLists.txt
Normal file
@@ -0,0 +1,15 @@
|
||||
# The following lines of boilerplate have to be in your project's
|
||||
# CMakeLists in this exact order for cmake to work correctly
|
||||
cmake_minimum_required(VERSION 3.5)
|
||||
|
||||
include($ENV{IDF_PATH}/tools/cmake/project.cmake)
|
||||
project(ldgen_test)
|
||||
|
||||
idf_build_get_property(python PYTHON)
|
||||
idf_build_get_property(elf EXECUTABLE)
|
||||
|
||||
add_custom_command(
|
||||
TARGET ${elf}
|
||||
POST_BUILD
|
||||
COMMAND ${python} ${CMAKE_CURRENT_LIST_DIR}/check_placements.py ${CMAKE_OBJDUMP} $<TARGET_FILE:${elf}>
|
||||
)
|
5
tools/test_apps/build_system/ldgen_test/README.txt
Normal file
5
tools/test_apps/build_system/ldgen_test/README.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
Runs a build test to check ldgen places libraries, objects and symbols
|
||||
correctly as specified in the linker fragments. Specifically, this app
|
||||
tests the placement for the main component, as specified in `main/linker.lf`
|
||||
The Python script that performs the checks, `check_placements.py`, automatically
|
||||
runs after the app is built.
|
64
tools/test_apps/build_system/ldgen_test/check_placements.py
Normal file
64
tools/test_apps/build_system/ldgen_test/check_placements.py
Normal file
@@ -0,0 +1,64 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright 2020 Espressif Systems (Shanghai) PTE LTD
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# Check placements in this test app for main
|
||||
# specified in main/linker.lf
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
|
||||
from pyparsing import LineEnd, Literal, ParseException, SkipTo, Word, alphanums, hexnums
|
||||
|
||||
argparser = argparse.ArgumentParser()
|
||||
|
||||
argparser.add_argument('objdump')
|
||||
argparser.add_argument('elf')
|
||||
|
||||
args = argparser.parse_args()
|
||||
|
||||
contents = subprocess.check_output([args.objdump, '-t', args.elf]).decode()
|
||||
|
||||
|
||||
def check_location(symbol, expected):
|
||||
pattern = Word(alphanums + '._').setResultsName('actual') + Word(hexnums) + Literal(symbol) + LineEnd()
|
||||
pattern = SkipTo(pattern) + pattern
|
||||
|
||||
try:
|
||||
results = pattern.parseString(contents)
|
||||
except ParseException:
|
||||
print("check placement fail: '%s' was not found" % (symbol))
|
||||
exit(1)
|
||||
|
||||
if results.actual != expected:
|
||||
print("check placement fail: '%s' was placed in '%s', not in '%s'" % (symbol, results.actual, expected))
|
||||
exit(1)
|
||||
|
||||
print("check placement pass: '%s' was successfully placed in '%s'" % (symbol, results.actual))
|
||||
|
||||
|
||||
# src1:func1 (noflash) - explicit mapping for func2 using 'rtc' scheme
|
||||
check_location('func1', '.iram0.text')
|
||||
|
||||
# src1:func2 (rtc) - explicit mapping for func2 using 'rtc' scheme
|
||||
check_location('func2', '.rtc.text')
|
||||
|
||||
# src1 (default) - only func3 in src1 remains that has not been
|
||||
# mapped using a different scheme
|
||||
check_location('func3', '.flash.text')
|
||||
|
||||
# * (noflash) - no explicit mapping for src2
|
||||
check_location('func4', '.iram0.text')
|
@@ -0,0 +1,3 @@
|
||||
idf_component_register(SRCS "src1.c" "src2.c" "test_main.c"
|
||||
INCLUDE_DIRS "."
|
||||
LDFRAGMENTS "linker.lf")
|
7
tools/test_apps/build_system/ldgen_test/main/linker.lf
Normal file
7
tools/test_apps/build_system/ldgen_test/main/linker.lf
Normal file
@@ -0,0 +1,7 @@
|
||||
[mapping:main]
|
||||
archive: libmain.a
|
||||
entries:
|
||||
* (noflash)
|
||||
src1 (default)
|
||||
src1:func1 (noflash)
|
||||
src1:func2 (rtc)
|
16
tools/test_apps/build_system/ldgen_test/main/src1.c
Normal file
16
tools/test_apps/build_system/ldgen_test/main/src1.c
Normal file
@@ -0,0 +1,16 @@
|
||||
#include <stdio.h>
|
||||
|
||||
void func1(void)
|
||||
{
|
||||
printf("Hello from func1!\n");
|
||||
}
|
||||
|
||||
void func2(void)
|
||||
{
|
||||
printf("Hello from func2!\n");
|
||||
}
|
||||
|
||||
void func3(void)
|
||||
{
|
||||
printf("Hello from func3!\n");
|
||||
}
|
6
tools/test_apps/build_system/ldgen_test/main/src2.c
Normal file
6
tools/test_apps/build_system/ldgen_test/main/src2.c
Normal file
@@ -0,0 +1,6 @@
|
||||
#include <stdio.h>
|
||||
|
||||
void func4(void)
|
||||
{
|
||||
printf("Hello from func4!\n");
|
||||
}
|
14
tools/test_apps/build_system/ldgen_test/main/test_main.c
Normal file
14
tools/test_apps/build_system/ldgen_test/main/test_main.c
Normal file
@@ -0,0 +1,14 @@
|
||||
|
||||
extern void func1(void);
|
||||
extern void func2(void);
|
||||
extern void func3(void);
|
||||
extern void func4(void);
|
||||
|
||||
|
||||
void app_main(void)
|
||||
{
|
||||
func1();
|
||||
func2();
|
||||
func3();
|
||||
func4();
|
||||
}
|
Reference in New Issue
Block a user