forked from espressif/arduino-esp32
v2.0.0 Add support for ESP32S2 and update ESP-IDF to 4.4 (#4996)
This is very much still work in progress and much more will change before the final 2.0.0 Some APIs have changed. New libraries have been added. LittleFS included. Co-authored-by: Seon Rozenblum <seonr@3sprockets.com> Co-authored-by: Me No Dev <me-no-dev@users.noreply.github.com> Co-authored-by: geeksville <kevinh@geeksville.com> Co-authored-by: Mike Dunston <m_dunston@comcast.net> Co-authored-by: Unexpected Maker <seon@unexpectedmaker.com> Co-authored-by: Seon Rozenblum <seonr@3sprockets.com> Co-authored-by: microDev <70126934+microDev1@users.noreply.github.com> Co-authored-by: tobozo <tobozo@users.noreply.github.com> Co-authored-by: bobobo1618 <bobobo1618@users.noreply.github.com> Co-authored-by: lorol <lorolouis@gmail.com> Co-authored-by: geeksville <kevinh@geeksville.com> Co-authored-by: Limor "Ladyada" Fried <limor@ladyada.net> Co-authored-by: Sweety <switi.mhaiske@espressif.com> Co-authored-by: Loick MAHIEUX <loick111@gmail.com> Co-authored-by: Larry Bernstone <lbernstone@gmail.com> Co-authored-by: Valerii Koval <valeros@users.noreply.github.com> Co-authored-by: 快乐的我531 <2302004040@qq.com> Co-authored-by: chegewara <imperiaonline4@gmail.com> Co-authored-by: Clemens Kirchgatterer <clemens@1541.org> Co-authored-by: Aron Rubin <aronrubin@gmail.com> Co-authored-by: Pete Lewis <601236+lewispg228@users.noreply.github.com>
This commit is contained in:
@ -20,19 +20,19 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import print_function, division
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import division, print_function, unicode_literals
|
||||
|
||||
import argparse
|
||||
import binascii
|
||||
import errno
|
||||
import hashlib
|
||||
import os
|
||||
import re
|
||||
import struct
|
||||
import sys
|
||||
import hashlib
|
||||
import binascii
|
||||
import errno
|
||||
|
||||
MAX_PARTITION_LENGTH = 0xC00 # 3K for partition data (96 entries) leaves 1K in a 4K sector for signature
|
||||
MD5_PARTITION_BEGIN = b"\xEB\xEB" + b"\xFF" * 14 # The first 2 bytes are like magic numbers for MD5 sum
|
||||
MD5_PARTITION_BEGIN = b'\xEB\xEB' + b'\xFF' * 14 # The first 2 bytes are like magic numbers for MD5 sum
|
||||
PARTITION_TABLE_SIZE = 0x1000 # Size of partition table
|
||||
|
||||
MIN_PARTITION_SUBTYPE_APP_OTA = 0x10
|
||||
@ -44,26 +44,26 @@ APP_TYPE = 0x00
|
||||
DATA_TYPE = 0x01
|
||||
|
||||
TYPES = {
|
||||
"app": APP_TYPE,
|
||||
"data": DATA_TYPE,
|
||||
'app': APP_TYPE,
|
||||
'data': DATA_TYPE,
|
||||
}
|
||||
|
||||
# Keep this map in sync with esp_partition_subtype_t enum in esp_partition.h
|
||||
SUBTYPES = {
|
||||
APP_TYPE: {
|
||||
"factory": 0x00,
|
||||
"test": 0x20,
|
||||
'factory': 0x00,
|
||||
'test': 0x20,
|
||||
},
|
||||
DATA_TYPE: {
|
||||
"ota": 0x00,
|
||||
"phy": 0x01,
|
||||
"nvs": 0x02,
|
||||
"coredump": 0x03,
|
||||
"nvs_keys": 0x04,
|
||||
"efuse": 0x05,
|
||||
"esphttpd": 0x80,
|
||||
"fat": 0x81,
|
||||
"spiffs": 0x82,
|
||||
'ota': 0x00,
|
||||
'phy': 0x01,
|
||||
'nvs': 0x02,
|
||||
'coredump': 0x03,
|
||||
'nvs_keys': 0x04,
|
||||
'efuse': 0x05,
|
||||
'esphttpd': 0x80,
|
||||
'fat': 0x81,
|
||||
'spiffs': 0x82,
|
||||
},
|
||||
}
|
||||
|
||||
@ -103,14 +103,14 @@ class PartitionTable(list):
|
||||
|
||||
for line_no in range(len(lines)):
|
||||
line = expand_vars(lines[line_no]).strip()
|
||||
if line.startswith("#") or len(line) == 0:
|
||||
if line.startswith('#') or len(line) == 0:
|
||||
continue
|
||||
try:
|
||||
res.append(PartitionDefinition.from_csv(line, line_no + 1))
|
||||
except InputError as e:
|
||||
raise InputError("Error at line %d: %s" % (line_no + 1, e))
|
||||
raise InputError('Error at line %d: %s' % (line_no + 1, e))
|
||||
except Exception:
|
||||
critical("Unexpected error parsing CSV line %d: %s" % (line_no + 1, line))
|
||||
critical('Unexpected error parsing CSV line %d: %s' % (line_no + 1, line))
|
||||
raise
|
||||
|
||||
# fix up missing offsets & negative sizes
|
||||
@ -118,10 +118,10 @@ class PartitionTable(list):
|
||||
for e in res:
|
||||
if e.offset is not None and e.offset < last_end:
|
||||
if e == res[0]:
|
||||
raise InputError("CSV Error: First partition offset 0x%x overlaps end of partition table 0x%x"
|
||||
raise InputError('CSV Error: First partition offset 0x%x overlaps end of partition table 0x%x'
|
||||
% (e.offset, last_end))
|
||||
else:
|
||||
raise InputError("CSV Error: Partitions overlap. Partition at line %d sets offset 0x%x. Previous partition ends 0x%x"
|
||||
raise InputError('CSV Error: Partitions overlap. Partition at line %d sets offset 0x%x. Previous partition ends 0x%x'
|
||||
% (e.line_no, e.offset, last_end))
|
||||
if e.offset is None:
|
||||
pad_to = 0x10000 if e.type == APP_TYPE else 4
|
||||
@ -166,8 +166,8 @@ class PartitionTable(list):
|
||||
|
||||
for p in self:
|
||||
if p.type == ptype and p.subtype == subtype:
|
||||
return p
|
||||
return None
|
||||
yield p
|
||||
return
|
||||
|
||||
def find_by_name(self, name):
|
||||
for p in self:
|
||||
@ -186,19 +186,19 @@ class PartitionTable(list):
|
||||
|
||||
# print sorted duplicate partitions by name
|
||||
if len(duplicates) != 0:
|
||||
print("A list of partitions that have the same name:")
|
||||
print('A list of partitions that have the same name:')
|
||||
for p in sorted(self, key=lambda x:x.name):
|
||||
if len(duplicates.intersection([p.name])) != 0:
|
||||
print("%s" % (p.to_csv()))
|
||||
raise InputError("Partition names must be unique")
|
||||
print('%s' % (p.to_csv()))
|
||||
raise InputError('Partition names must be unique')
|
||||
|
||||
# check for overlaps
|
||||
last = None
|
||||
for p in sorted(self, key=lambda x:x.offset):
|
||||
if p.offset < offset_part_table + PARTITION_TABLE_SIZE:
|
||||
raise InputError("Partition offset 0x%x is below 0x%x" % (p.offset, offset_part_table + PARTITION_TABLE_SIZE))
|
||||
raise InputError('Partition offset 0x%x is below 0x%x' % (p.offset, offset_part_table + PARTITION_TABLE_SIZE))
|
||||
if last is not None and p.offset < last.offset + last.size:
|
||||
raise InputError("Partition at 0x%x overlaps 0x%x-0x%x" % (p.offset, last.offset, last.offset + last.size - 1))
|
||||
raise InputError('Partition at 0x%x overlaps 0x%x-0x%x' % (p.offset, last.offset, last.offset + last.size - 1))
|
||||
last = p
|
||||
|
||||
def flash_size(self):
|
||||
@ -218,7 +218,7 @@ class PartitionTable(list):
|
||||
for o in range(0,len(b),32):
|
||||
data = b[o:o + 32]
|
||||
if len(data) != 32:
|
||||
raise InputError("Partition table length must be a multiple of 32 bytes")
|
||||
raise InputError('Partition table length must be a multiple of 32 bytes')
|
||||
if data == b'\xFF' * 32:
|
||||
return result # got end marker
|
||||
if md5sum and data[:2] == MD5_PARTITION_BEGIN[:2]: # check only the magic number part
|
||||
@ -229,26 +229,26 @@ class PartitionTable(list):
|
||||
else:
|
||||
md5.update(data)
|
||||
result.append(PartitionDefinition.from_binary(data))
|
||||
raise InputError("Partition table is missing an end-of-table marker")
|
||||
raise InputError('Partition table is missing an end-of-table marker')
|
||||
|
||||
def to_binary(self):
|
||||
result = b"".join(e.to_binary() for e in self)
|
||||
result = b''.join(e.to_binary() for e in self)
|
||||
if md5sum:
|
||||
result += MD5_PARTITION_BEGIN + hashlib.md5(result).digest()
|
||||
if len(result) >= MAX_PARTITION_LENGTH:
|
||||
raise InputError("Binary partition table length (%d) longer than max" % len(result))
|
||||
result += b"\xFF" * (MAX_PARTITION_LENGTH - len(result)) # pad the sector, for signing
|
||||
raise InputError('Binary partition table length (%d) longer than max' % len(result))
|
||||
result += b'\xFF' * (MAX_PARTITION_LENGTH - len(result)) # pad the sector, for signing
|
||||
return result
|
||||
|
||||
def to_csv(self, simple_formatting=False):
|
||||
rows = ["# Espressif ESP32 Partition Table",
|
||||
"# Name, Type, SubType, Offset, Size, Flags"]
|
||||
rows = ['# ESP-IDF Partition Table',
|
||||
'# Name, Type, SubType, Offset, Size, Flags']
|
||||
rows += [x.to_csv(simple_formatting) for x in self]
|
||||
return "\n".join(rows) + "\n"
|
||||
return '\n'.join(rows) + '\n'
|
||||
|
||||
|
||||
class PartitionDefinition(object):
|
||||
MAGIC_BYTES = b"\xAA\x50"
|
||||
MAGIC_BYTES = b'\xAA\x50'
|
||||
|
||||
ALIGNMENT = {
|
||||
APP_TYPE: 0x10000,
|
||||
@ -258,15 +258,15 @@ class PartitionDefinition(object):
|
||||
# dictionary maps flag name (as used in CSV flags list, property name)
|
||||
# to bit set in flags words in binary format
|
||||
FLAGS = {
|
||||
"encrypted": 0
|
||||
'encrypted': 0
|
||||
}
|
||||
|
||||
# add subtypes for the 16 OTA slot values ("ota_XX, etc.")
|
||||
for ota_slot in range(NUM_PARTITION_SUBTYPE_APP_OTA):
|
||||
SUBTYPES[TYPES["app"]]["ota_%d" % ota_slot] = MIN_PARTITION_SUBTYPE_APP_OTA + ota_slot
|
||||
SUBTYPES[TYPES['app']]['ota_%d' % ota_slot] = MIN_PARTITION_SUBTYPE_APP_OTA + ota_slot
|
||||
|
||||
def __init__(self):
|
||||
self.name = ""
|
||||
self.name = ''
|
||||
self.type = None
|
||||
self.subtype = None
|
||||
self.offset = None
|
||||
@ -276,8 +276,8 @@ class PartitionDefinition(object):
|
||||
@classmethod
|
||||
def from_csv(cls, line, line_no):
|
||||
""" Parse a line from the CSV """
|
||||
line_w_defaults = line + ",,,," # lazy way to support default fields
|
||||
fields = [f.strip() for f in line_w_defaults.split(",")]
|
||||
line_w_defaults = line + ',,,,' # lazy way to support default fields
|
||||
fields = [f.strip() for f in line_w_defaults.split(',')]
|
||||
|
||||
res = PartitionDefinition()
|
||||
res.line_no = line_no
|
||||
@ -289,7 +289,7 @@ class PartitionDefinition(object):
|
||||
if res.size is None:
|
||||
raise InputError("Size field can't be empty")
|
||||
|
||||
flags = fields[5].split(":")
|
||||
flags = fields[5].split(':')
|
||||
for flag in flags:
|
||||
if flag in cls.FLAGS:
|
||||
setattr(res, flag, True)
|
||||
@ -305,7 +305,7 @@ class PartitionDefinition(object):
|
||||
|
||||
def __repr__(self):
|
||||
def maybe_hex(x):
|
||||
return "0x%x" % x if x is not None else "None"
|
||||
return '0x%x' % x if x is not None else 'None'
|
||||
return "PartitionDefinition('%s', 0x%x, 0x%x, %s, %s)" % (self.name, self.type, self.subtype or 0,
|
||||
maybe_hex(self.offset), maybe_hex(self.size))
|
||||
|
||||
@ -328,65 +328,65 @@ class PartitionDefinition(object):
|
||||
return self.offset >= other.offset
|
||||
|
||||
def parse_type(self, strval):
|
||||
if strval == "":
|
||||
if strval == '':
|
||||
raise InputError("Field 'type' can't be left empty.")
|
||||
return parse_int(strval, TYPES)
|
||||
|
||||
def parse_subtype(self, strval):
|
||||
if strval == "":
|
||||
if strval == '':
|
||||
return 0 # default
|
||||
return parse_int(strval, SUBTYPES.get(self.type, {}))
|
||||
|
||||
def parse_address(self, strval):
|
||||
if strval == "":
|
||||
if strval == '':
|
||||
return None # PartitionTable will fill in default
|
||||
return parse_int(strval)
|
||||
|
||||
def verify(self):
|
||||
if self.type is None:
|
||||
raise ValidationError(self, "Type field is not set")
|
||||
raise ValidationError(self, 'Type field is not set')
|
||||
if self.subtype is None:
|
||||
raise ValidationError(self, "Subtype field is not set")
|
||||
raise ValidationError(self, 'Subtype field is not set')
|
||||
if self.offset is None:
|
||||
raise ValidationError(self, "Offset field is not set")
|
||||
raise ValidationError(self, 'Offset field is not set')
|
||||
align = self.ALIGNMENT.get(self.type, 4)
|
||||
if self.offset % align:
|
||||
raise ValidationError(self, "Offset 0x%x is not aligned to 0x%x" % (self.offset, align))
|
||||
raise ValidationError(self, 'Offset 0x%x is not aligned to 0x%x' % (self.offset, align))
|
||||
if self.size % align and secure:
|
||||
raise ValidationError(self, "Size 0x%x is not aligned to 0x%x" % (self.size, align))
|
||||
raise ValidationError(self, 'Size 0x%x is not aligned to 0x%x' % (self.size, align))
|
||||
if self.size is None:
|
||||
raise ValidationError(self, "Size field is not set")
|
||||
raise ValidationError(self, 'Size field is not set')
|
||||
|
||||
if self.name in TYPES and TYPES.get(self.name, "") != self.type:
|
||||
if self.name in TYPES and TYPES.get(self.name, '') != self.type:
|
||||
critical("WARNING: Partition has name '%s' which is a partition type, but does not match this partition's "
|
||||
"type (0x%x). Mistake in partition table?" % (self.name, self.type))
|
||||
'type (0x%x). Mistake in partition table?' % (self.name, self.type))
|
||||
all_subtype_names = []
|
||||
for names in (t.keys() for t in SUBTYPES.values()):
|
||||
all_subtype_names += names
|
||||
if self.name in all_subtype_names and SUBTYPES.get(self.type, {}).get(self.name, "") != self.subtype:
|
||||
if self.name in all_subtype_names and SUBTYPES.get(self.type, {}).get(self.name, '') != self.subtype:
|
||||
critical("WARNING: Partition has name '%s' which is a partition subtype, but this partition has "
|
||||
"non-matching type 0x%x and subtype 0x%x. Mistake in partition table?" % (self.name, self.type, self.subtype))
|
||||
'non-matching type 0x%x and subtype 0x%x. Mistake in partition table?' % (self.name, self.type, self.subtype))
|
||||
|
||||
STRUCT_FORMAT = b"<2sBBLL16sL"
|
||||
STRUCT_FORMAT = b'<2sBBLL16sL'
|
||||
|
||||
@classmethod
|
||||
def from_binary(cls, b):
|
||||
if len(b) != 32:
|
||||
raise InputError("Partition definition length must be exactly 32 bytes. Got %d bytes." % len(b))
|
||||
raise InputError('Partition definition length must be exactly 32 bytes. Got %d bytes.' % len(b))
|
||||
res = cls()
|
||||
(magic, res.type, res.subtype, res.offset,
|
||||
res.size, res.name, flags) = struct.unpack(cls.STRUCT_FORMAT, b)
|
||||
if b"\x00" in res.name: # strip null byte padding from name string
|
||||
res.name = res.name[:res.name.index(b"\x00")]
|
||||
if b'\x00' in res.name: # strip null byte padding from name string
|
||||
res.name = res.name[:res.name.index(b'\x00')]
|
||||
res.name = res.name.decode()
|
||||
if magic != cls.MAGIC_BYTES:
|
||||
raise InputError("Invalid magic bytes (%r) for partition definition" % magic)
|
||||
raise InputError('Invalid magic bytes (%r) for partition definition' % magic)
|
||||
for flag,bit in cls.FLAGS.items():
|
||||
if flags & (1 << bit):
|
||||
setattr(res, flag, True)
|
||||
flags &= ~(1 << bit)
|
||||
if flags != 0:
|
||||
critical("WARNING: Partition definition had unknown flag(s) 0x%08x. Newer binary format?" % flags)
|
||||
critical('WARNING: Partition definition had unknown flag(s) 0x%08x. Newer binary format?' % flags)
|
||||
return res
|
||||
|
||||
def get_flags_list(self):
|
||||
@ -404,22 +404,22 @@ class PartitionDefinition(object):
|
||||
def to_csv(self, simple_formatting=False):
|
||||
def addr_format(a, include_sizes):
|
||||
if not simple_formatting and include_sizes:
|
||||
for (val, suffix) in [(0x100000, "M"), (0x400, "K")]:
|
||||
for (val, suffix) in [(0x100000, 'M'), (0x400, 'K')]:
|
||||
if a % val == 0:
|
||||
return "%d%s" % (a // val, suffix)
|
||||
return "0x%x" % a
|
||||
return '%d%s' % (a // val, suffix)
|
||||
return '0x%x' % a
|
||||
|
||||
def lookup_keyword(t, keywords):
|
||||
for k,v in keywords.items():
|
||||
if simple_formatting is False and t == v:
|
||||
return k
|
||||
return "%d" % t
|
||||
return '%d' % t
|
||||
|
||||
def generate_text_flags():
|
||||
""" colon-delimited list of flags """
|
||||
return ":".join(self.get_flags_list())
|
||||
return ':'.join(self.get_flags_list())
|
||||
|
||||
return ",".join([self.name,
|
||||
return ','.join([self.name,
|
||||
lookup_keyword(self.type, TYPES),
|
||||
lookup_keyword(self.subtype, SUBTYPES.get(self.type, {})),
|
||||
addr_format(self.offset, False),
|
||||
@ -432,17 +432,17 @@ def parse_int(v, keywords={}):
|
||||
k/m/K/M suffixes and 'keyword' value lookup.
|
||||
"""
|
||||
try:
|
||||
for letter, multiplier in [("k", 1024), ("m", 1024 * 1024)]:
|
||||
for letter, multiplier in [('k', 1024), ('m', 1024 * 1024)]:
|
||||
if v.lower().endswith(letter):
|
||||
return parse_int(v[:-1], keywords) * multiplier
|
||||
return int(v, 0)
|
||||
except ValueError:
|
||||
if len(keywords) == 0:
|
||||
raise InputError("Invalid field value %s" % v)
|
||||
raise InputError('Invalid field value %s' % v)
|
||||
try:
|
||||
return keywords[v.lower()]
|
||||
except KeyError:
|
||||
raise InputError("Value '%s' is not valid. Known keywords: %s" % (v, ", ".join(keywords)))
|
||||
raise InputError("Value '%s' is not valid. Known keywords: %s" % (v, ', '.join(keywords)))
|
||||
|
||||
|
||||
def main():
|
||||
@ -456,11 +456,11 @@ def main():
|
||||
nargs='?', choices=['1MB', '2MB', '4MB', '8MB', '16MB'])
|
||||
parser.add_argument('--disable-md5sum', help='Disable md5 checksum for the partition table', default=False, action='store_true')
|
||||
parser.add_argument('--no-verify', help="Don't verify partition table fields", action='store_true')
|
||||
parser.add_argument('--verify', '-v', help="Verify partition table fields (deprecated, this behaviour is "
|
||||
"enabled by default and this flag does nothing.", action='store_true')
|
||||
parser.add_argument('--verify', '-v', help='Verify partition table fields (deprecated, this behaviour is '
|
||||
'enabled by default and this flag does nothing.', action='store_true')
|
||||
parser.add_argument('--quiet', '-q', help="Don't print non-critical status messages to stderr", action='store_true')
|
||||
parser.add_argument('--offset', '-o', help='Set offset partition table', default='0x8000')
|
||||
parser.add_argument('--secure', help="Require app partitions to be suitable for secure boot", action='store_true')
|
||||
parser.add_argument('--secure', help='Require app partitions to be suitable for secure boot', action='store_true')
|
||||
parser.add_argument('input', help='Path to CSV or binary file to parse.', type=argparse.FileType('rb'))
|
||||
parser.add_argument('output', help='Path to output converted binary or CSV file. Will use stdout if omitted.',
|
||||
nargs='?', default='-')
|
||||
@ -474,19 +474,19 @@ def main():
|
||||
input = args.input.read()
|
||||
input_is_binary = input[0:2] == PartitionDefinition.MAGIC_BYTES
|
||||
if input_is_binary:
|
||||
status("Parsing binary partition input...")
|
||||
status('Parsing binary partition input...')
|
||||
table = PartitionTable.from_binary(input)
|
||||
else:
|
||||
input = input.decode()
|
||||
status("Parsing CSV input...")
|
||||
status('Parsing CSV input...')
|
||||
table = PartitionTable.from_csv(input)
|
||||
|
||||
if not args.no_verify:
|
||||
status("Verifying table...")
|
||||
status('Verifying table...')
|
||||
table.verify()
|
||||
|
||||
if args.flash_size:
|
||||
size_mb = int(args.flash_size.replace("MB", ""))
|
||||
size_mb = int(args.flash_size.replace('MB', ''))
|
||||
size = size_mb * 1024 * 1024 # flash memory uses honest megabytes!
|
||||
table_size = table.flash_size()
|
||||
if size < table_size:
|
||||
@ -526,7 +526,7 @@ class InputError(RuntimeError):
|
||||
class ValidationError(InputError):
|
||||
def __init__(self, partition, message):
|
||||
super(ValidationError, self).__init__(
|
||||
"Partition %s invalid: %s" % (partition.name, message))
|
||||
'Partition %s invalid: %s' % (partition.name, message))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
Reference in New Issue
Block a user