mirror of
https://github.com/espressif/esp-idf.git
synced 2025-07-30 02:37:19 +02:00
refactor(partition_table): Refactoring of code style
This commit is contained in:
@ -3,7 +3,7 @@
|
||||
# parttool is used to perform partition level operations - reading,
|
||||
# writing, erasing and getting info about the partition.
|
||||
#
|
||||
# SPDX-FileCopyrightText: 2018-2024 Espressif Systems (Shanghai) CO LTD
|
||||
# SPDX-FileCopyrightText: 2018-2025 Espressif Systems (Shanghai) CO LTD
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
import argparse
|
||||
import os
|
||||
@ -30,8 +30,7 @@ def status(msg):
|
||||
print(msg)
|
||||
|
||||
|
||||
class _PartitionId():
|
||||
|
||||
class _PartitionId:
|
||||
def __init__(self, name=None, p_type=None, subtype=None, part_list=None):
|
||||
self.name = name
|
||||
self.type = p_type
|
||||
@ -40,13 +39,11 @@ class _PartitionId():
|
||||
|
||||
|
||||
class PartitionName(_PartitionId):
|
||||
|
||||
def __init__(self, name):
|
||||
_PartitionId.__init__(self, name=name)
|
||||
|
||||
|
||||
class PartitionType(_PartitionId):
|
||||
|
||||
def __init__(self, p_type, subtype, part_list=None):
|
||||
_PartitionId.__init__(self, p_type=p_type, subtype=subtype, part_list=part_list)
|
||||
|
||||
@ -54,10 +51,20 @@ class PartitionType(_PartitionId):
|
||||
PARTITION_BOOT_DEFAULT = _PartitionId()
|
||||
|
||||
|
||||
class ParttoolTarget():
|
||||
|
||||
def __init__(self, port=None, baud=None, partition_table_offset=PARTITION_TABLE_OFFSET, primary_bootloader_offset=None, recovery_bootloader_offset=None,
|
||||
partition_table_file=None, esptool_args=[], esptool_write_args=[], esptool_read_args=[], esptool_erase_args=[]):
|
||||
class ParttoolTarget:
|
||||
def __init__(
|
||||
self,
|
||||
port=None,
|
||||
baud=None,
|
||||
partition_table_offset=PARTITION_TABLE_OFFSET,
|
||||
primary_bootloader_offset=None,
|
||||
recovery_bootloader_offset=None,
|
||||
partition_table_file=None,
|
||||
esptool_args=[],
|
||||
esptool_write_args=[],
|
||||
esptool_read_args=[],
|
||||
esptool_erase_args=[],
|
||||
):
|
||||
self.port = port
|
||||
self.baud = baud
|
||||
|
||||
@ -92,7 +99,9 @@ class ParttoolTarget():
|
||||
temp_file.close()
|
||||
|
||||
try:
|
||||
self._call_esptool(['read_flash', str(partition_table_offset), str(gen.MAX_PARTITION_LENGTH), temp_file.name])
|
||||
self._call_esptool(
|
||||
['read_flash', str(partition_table_offset), str(gen.MAX_PARTITION_LENGTH), temp_file.name]
|
||||
)
|
||||
with open(temp_file.name, 'rb') as f:
|
||||
partition_table = gen.PartitionTable.from_binary(f.read())
|
||||
finally:
|
||||
@ -144,13 +153,13 @@ class ParttoolTarget():
|
||||
|
||||
def erase_partition(self, partition_id):
|
||||
partition = self.get_partition_info(partition_id)
|
||||
self._call_esptool(['erase_region', str(partition.offset), str(partition.size)] + self.esptool_erase_args)
|
||||
self._call_esptool(['erase_region', str(partition.offset), str(partition.size)] + self.esptool_erase_args)
|
||||
|
||||
def read_partition(self, partition_id, output):
|
||||
partition = self.get_partition_info(partition_id)
|
||||
self._call_esptool(['read_flash', str(partition.offset), str(partition.size), output] + self.esptool_read_args)
|
||||
|
||||
def write_partition(self, partition_id, input, ignore_readonly=False):
|
||||
def write_partition(self, partition_id, input, ignore_readonly=False): # noqa: A002
|
||||
partition = self.get_partition_info(partition_id)
|
||||
|
||||
if partition.readonly and not ignore_readonly:
|
||||
@ -158,8 +167,8 @@ class ParttoolTarget():
|
||||
|
||||
self.erase_partition(partition_id)
|
||||
|
||||
with open(input, 'rb') as input_file:
|
||||
content_len = len(input_file.read())
|
||||
with open(input, 'rb') as f:
|
||||
content_len = len(f.read())
|
||||
|
||||
if content_len > partition.size:
|
||||
raise Exception('Input file size exceeds partition size')
|
||||
@ -167,7 +176,7 @@ class ParttoolTarget():
|
||||
self._call_esptool(['write_flash', str(partition.offset), input] + self.esptool_write_args)
|
||||
|
||||
|
||||
def _write_partition(target, partition_id, input, ignore_readonly=False):
|
||||
def _write_partition(target, partition_id, input, ignore_readonly=False): # noqa: A002
|
||||
target.write_partition(partition_id, input, ignore_readonly)
|
||||
partition = target.get_partition_info(partition_id)
|
||||
status("Written contents of file '{}' at offset 0x{:x}".format(input, partition.offset))
|
||||
@ -176,8 +185,11 @@ def _write_partition(target, partition_id, input, ignore_readonly=False):
|
||||
def _read_partition(target, partition_id, output):
|
||||
target.read_partition(partition_id, output)
|
||||
partition = target.get_partition_info(partition_id)
|
||||
status("Read partition '{}' contents from device at offset 0x{:x} to file '{}'"
|
||||
.format(partition.name, partition.offset, output))
|
||||
status(
|
||||
"Read partition '{}' contents from device at offset 0x{:x} to file '{}'".format(
|
||||
partition.name, partition.offset, output
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _erase_partition(target, partition_id):
|
||||
@ -205,7 +217,7 @@ def _get_partition_info(target, partition_id, info):
|
||||
'offset': '0x{:x}'.format(p.offset),
|
||||
'size': '0x{:x}'.format(p.size),
|
||||
'encrypted': '{}'.format(p.encrypted),
|
||||
'readonly': '{}'.format(p.readonly)
|
||||
'readonly': '{}'.format(p.readonly),
|
||||
}
|
||||
for i in info:
|
||||
infos += [info_dict[i]]
|
||||
@ -224,19 +236,29 @@ def main():
|
||||
parser.add_argument('--esptool-args', help='additional main arguments for esptool', nargs='+')
|
||||
parser.add_argument('--esptool-write-args', help='additional subcommand arguments when writing to flash', nargs='+')
|
||||
parser.add_argument('--esptool-read-args', help='additional subcommand arguments when reading flash', nargs='+')
|
||||
parser.add_argument('--esptool-erase-args', help='additional subcommand arguments when erasing regions of flash', nargs='+')
|
||||
parser.add_argument(
|
||||
'--esptool-erase-args', help='additional subcommand arguments when erasing regions of flash', nargs='+'
|
||||
)
|
||||
|
||||
# By default the device attached to the specified port is queried for the partition table. If a partition table file
|
||||
# is specified, that is used instead.
|
||||
parser.add_argument('--port', '-p', help='port where the target device of the command is connected to; the partition table is sourced from this device \
|
||||
when the partition table file is not defined')
|
||||
parser.add_argument(
|
||||
'--port',
|
||||
'-p',
|
||||
help='port where the target device of the command is connected to; the partition table is sourced from '
|
||||
'this device when the partition table file is not defined',
|
||||
)
|
||||
parser.add_argument('--baud', '-b', help='baudrate to use', type=int)
|
||||
|
||||
parser.add_argument('--partition-table-offset', '-o', help='offset to read the partition table from', type=str)
|
||||
parser.add_argument('--primary-bootloader-offset', help='offset for primary bootloader', type=str)
|
||||
parser.add_argument('--recovery-bootloader-offset', help='offset for recovery bootloader', type=str)
|
||||
parser.add_argument('--partition-table-file', '-f', help='file (CSV/binary) to read the partition table from; \
|
||||
overrides device attached to specified port as the partition table source when defined')
|
||||
parser.add_argument(
|
||||
'--partition-table-file',
|
||||
'-f',
|
||||
help='file (CSV/binary) to read the partition table from; '
|
||||
'overrides device attached to specified port as the partition table source when defined',
|
||||
)
|
||||
|
||||
partition_selection_parser = argparse.ArgumentParser(add_help=False)
|
||||
|
||||
@ -246,31 +268,54 @@ def main():
|
||||
|
||||
partition_selection_args.add_argument('--partition-name', '-n', help='name of the partition')
|
||||
partition_selection_args.add_argument('--partition-type', '-t', help='type of the partition')
|
||||
partition_selection_args.add_argument('--partition-boot-default', '-d', help='select the default boot partition \
|
||||
using the same fallback logic as the IDF bootloader', action='store_true')
|
||||
partition_selection_args.add_argument(
|
||||
'--partition-boot-default',
|
||||
'-d',
|
||||
help='select the default boot partition \
|
||||
using the same fallback logic as the IDF bootloader',
|
||||
action='store_true',
|
||||
)
|
||||
|
||||
partition_selection_parser.add_argument('--partition-subtype', '-s', help='subtype of the partition')
|
||||
partition_selection_parser.add_argument('--extra-partition-subtypes', help='Extra partition subtype entries', nargs='*')
|
||||
partition_selection_parser.add_argument(
|
||||
'--extra-partition-subtypes', help='Extra partition subtype entries', nargs='*'
|
||||
)
|
||||
|
||||
subparsers = parser.add_subparsers(dest='operation', help='run parttool -h for additional help')
|
||||
|
||||
# Specify the supported operations
|
||||
read_part_subparser = subparsers.add_parser('read_partition', help='read partition from device and dump contents into a file',
|
||||
parents=[partition_selection_parser])
|
||||
read_part_subparser = subparsers.add_parser(
|
||||
'read_partition',
|
||||
help='read partition from device and dump contents into a file',
|
||||
parents=[partition_selection_parser],
|
||||
)
|
||||
read_part_subparser.add_argument('--output', help='file to dump the read partition contents to')
|
||||
|
||||
write_part_subparser = subparsers.add_parser('write_partition', help='write contents of a binary file to partition on device',
|
||||
parents=[partition_selection_parser])
|
||||
write_part_subparser = subparsers.add_parser(
|
||||
'write_partition',
|
||||
help='write contents of a binary file to partition on device',
|
||||
parents=[partition_selection_parser],
|
||||
)
|
||||
write_part_subparser.add_argument('--input', help='file whose contents are to be written to the partition offset')
|
||||
write_part_subparser.add_argument('--ignore-readonly', help='Ignore read-only attribute', action='store_true')
|
||||
|
||||
subparsers.add_parser('erase_partition', help='erase the contents of a partition on the device', parents=[partition_selection_parser])
|
||||
subparsers.add_parser(
|
||||
'erase_partition', help='erase the contents of a partition on the device', parents=[partition_selection_parser]
|
||||
)
|
||||
|
||||
print_partition_info_subparser = subparsers.add_parser('get_partition_info', help='get partition information', parents=[partition_selection_parser])
|
||||
print_partition_info_subparser.add_argument('--info', help='type of partition information to get',
|
||||
choices=['name', 'type', 'subtype', 'offset', 'size', 'encrypted', 'readonly'],
|
||||
default=['offset', 'size'], nargs='+')
|
||||
print_partition_info_subparser.add_argument('--part_list', help='Get a list of partitions suitable for a given type', action='store_true')
|
||||
print_partition_info_subparser = subparsers.add_parser(
|
||||
'get_partition_info', help='get partition information', parents=[partition_selection_parser]
|
||||
)
|
||||
print_partition_info_subparser.add_argument(
|
||||
'--info',
|
||||
help='type of partition information to get',
|
||||
choices=['name', 'type', 'subtype', 'offset', 'size', 'encrypted', 'readonly'],
|
||||
default=['offset', 'size'],
|
||||
nargs='+',
|
||||
)
|
||||
print_partition_info_subparser.add_argument(
|
||||
'--part_list', help='Get a list of partitions suitable for a given type', action='store_true'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
quiet = args.quiet
|
||||
@ -291,8 +336,10 @@ def main():
|
||||
elif args.partition_boot_default:
|
||||
partition_id = PARTITION_BOOT_DEFAULT
|
||||
else:
|
||||
raise RuntimeError('Partition to operate on should be defined using --partition-name OR \
|
||||
partition-type,--partition-subtype OR partition-boot-default')
|
||||
raise RuntimeError(
|
||||
'Partition to operate on should be defined using --partition-name OR \
|
||||
partition-type,--partition-subtype OR partition-boot-default'
|
||||
)
|
||||
|
||||
# Prepare the device to perform operation on
|
||||
target_args = {}
|
||||
@ -333,18 +380,18 @@ def main():
|
||||
target = ParttoolTarget(**target_args)
|
||||
|
||||
# Create the operation table and execute the operation
|
||||
common_args = {'target':target, 'partition_id':partition_id}
|
||||
common_args = {'target': target, 'partition_id': partition_id}
|
||||
parttool_ops = {
|
||||
'erase_partition': (_erase_partition, []),
|
||||
'read_partition': (_read_partition, ['output']),
|
||||
'write_partition': (_write_partition, ['input', 'ignore_readonly']),
|
||||
'get_partition_info': (_get_partition_info, ['info'])
|
||||
'get_partition_info': (_get_partition_info, ['info']),
|
||||
}
|
||||
|
||||
(op, op_args) = parttool_ops[args.operation]
|
||||
|
||||
for op_arg in op_args:
|
||||
common_args.update({op_arg:vars(args)[op_arg]})
|
||||
common_args.update({op_arg: vars(args)[op_arg]})
|
||||
|
||||
if quiet:
|
||||
# If exceptions occur, suppress and exit quietly
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
# SPDX-FileCopyrightText: 2021-2024 Espressif Systems (Shanghai) CO LTD
|
||||
# SPDX-FileCopyrightText: 2021-2025 Espressif Systems (Shanghai) CO LTD
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
import csv
|
||||
import io
|
||||
@ -26,38 +26,32 @@ factory,0,2,65536,1048576,
|
||||
LONGER_BINARY_TABLE = b''
|
||||
# type 0x00, subtype 0x00,
|
||||
# offset 64KB, size 1MB
|
||||
LONGER_BINARY_TABLE += b'\xAA\x50\x00\x00' + \
|
||||
b'\x00\x00\x01\x00' + \
|
||||
b'\x00\x00\x10\x00' + \
|
||||
b'factory\0' + (b'\0' * 8) + \
|
||||
b'\x00\x00\x00\x00'
|
||||
LONGER_BINARY_TABLE += (
|
||||
b'\xaa\x50\x00\x00' + b'\x00\x00\x01\x00' + b'\x00\x00\x10\x00' + b'factory\0' + (b'\0' * 8) + b'\x00\x00\x00\x00'
|
||||
)
|
||||
# type 0x01, subtype 0x20,
|
||||
# offset 0x110000, size 128KB
|
||||
LONGER_BINARY_TABLE += b'\xAA\x50\x01\x20' + \
|
||||
b'\x00\x00\x11\x00' + \
|
||||
b'\x00\x02\x00\x00' + \
|
||||
b'data' + (b'\0' * 12) + \
|
||||
b'\x00\x00\x00\x00'
|
||||
LONGER_BINARY_TABLE += (
|
||||
b'\xaa\x50\x01\x20' + b'\x00\x00\x11\x00' + b'\x00\x02\x00\x00' + b'data' + (b'\0' * 12) + b'\x00\x00\x00\x00'
|
||||
)
|
||||
# type 0x10, subtype 0x00,
|
||||
# offset 0x150000, size 1MB
|
||||
LONGER_BINARY_TABLE += b'\xAA\x50\x10\x00' + \
|
||||
b'\x00\x00\x15\x00' + \
|
||||
b'\x00\x10\x00\x00' + \
|
||||
b'second' + (b'\0' * 10) + \
|
||||
b'\x00\x00\x00\x00'
|
||||
LONGER_BINARY_TABLE += (
|
||||
b'\xaa\x50\x10\x00' + b'\x00\x00\x15\x00' + b'\x00\x10\x00\x00' + b'second' + (b'\0' * 10) + b'\x00\x00\x00\x00'
|
||||
)
|
||||
# MD5 checksum
|
||||
LONGER_BINARY_TABLE += b'\xEB\xEB' + b'\xFF' * 14
|
||||
LONGER_BINARY_TABLE += b'\xeb\xeb' + b'\xff' * 14
|
||||
LONGER_BINARY_TABLE += b'\xf9\xbd\x06\x1b\x45\x68\x6f\x86\x57\x1a\x2c\xd5\x2a\x1d\xa6\x5b'
|
||||
# empty partition
|
||||
LONGER_BINARY_TABLE += b'\xFF' * 32
|
||||
LONGER_BINARY_TABLE += b'\xff' * 32
|
||||
|
||||
|
||||
def _strip_trailing_ffs(binary_table):
|
||||
"""
|
||||
Strip all FFs down to the last 32 bytes (terminating entry)
|
||||
"""
|
||||
while binary_table.endswith(b'\xFF' * 64):
|
||||
binary_table = binary_table[0:len(binary_table) - 32]
|
||||
while binary_table.endswith(b'\xff' * 64):
|
||||
binary_table = binary_table[0 : len(binary_table) - 32]
|
||||
return binary_table
|
||||
|
||||
|
||||
@ -143,10 +137,10 @@ otherapp, app, factory,, 1M
|
||||
t = gen_esp32part.PartitionTable.from_csv(csv)
|
||||
# 'first'
|
||||
self.assertEqual(t[0].offset, 0x010000) # 64KB boundary as it's an app image
|
||||
self.assertEqual(t[0].size, 0x100000) # Size specified in CSV
|
||||
self.assertEqual(t[0].size, 0x100000) # Size specified in CSV
|
||||
# 'second'
|
||||
self.assertEqual(t[1].offset, 0x110000) # prev offset+size
|
||||
self.assertEqual(t[1].size, 0x100000) # Size specified in CSV
|
||||
self.assertEqual(t[1].size, 0x100000) # Size specified in CSV
|
||||
# 'minidata'
|
||||
self.assertEqual(t[2].offset, 0x210000)
|
||||
# 'otherapp'
|
||||
@ -162,7 +156,7 @@ second, data, 0x15, , 1M
|
||||
t.verify()
|
||||
# 'first'
|
||||
self.assertEqual(t[0].offset, 0x10000) # in CSV
|
||||
self.assertEqual(t[0].size, 0x200000 - t[0].offset) # Up to 2M
|
||||
self.assertEqual(t[0].size, 0x200000 - t[0].offset) # Up to 2M
|
||||
# 'second'
|
||||
self.assertEqual(t[1].offset, 0x200000) # prev offset+size
|
||||
|
||||
@ -188,7 +182,9 @@ first, app, ota_0, 0x200000, 1M
|
||||
csv = """
|
||||
bootloader, bootloader, primary, N/A, N/A
|
||||
"""
|
||||
with self.assertRaisesRegex(gen_esp32part.InputError, 'Primary bootloader offset is not defined. Please use --primary-bootloader-offset'):
|
||||
with self.assertRaisesRegex(
|
||||
gen_esp32part.InputError, 'Primary bootloader offset is not defined. Please use --primary-bootloader-offset'
|
||||
):
|
||||
gen_esp32part.PartitionTable.from_csv(csv)
|
||||
|
||||
def test_bootloader_and_part_table_partitions(self):
|
||||
@ -234,12 +230,12 @@ first, 0x30, 0xEE, 0x100400, 0x300000
|
||||
t = gen_esp32part.PartitionTable.from_csv(csv)
|
||||
tb = _strip_trailing_ffs(t.to_binary())
|
||||
self.assertEqual(len(tb), 64 + 32)
|
||||
self.assertEqual(b'\xAA\x50', tb[0:2]) # magic
|
||||
self.assertEqual(b'\xaa\x50', tb[0:2]) # magic
|
||||
self.assertEqual(b'\x30\xee', tb[2:4]) # type, subtype
|
||||
eo, es = struct.unpack('<LL', tb[4:12])
|
||||
self.assertEqual(eo, 0x100400) # offset
|
||||
self.assertEqual(es, 0x300000) # size
|
||||
self.assertEqual(b'\xEB\xEB' + b'\xFF' * 14, tb[32:48])
|
||||
self.assertEqual(b'\xeb\xeb' + b'\xff' * 14, tb[32:48])
|
||||
self.assertEqual(b'\x43\x03\x3f\x33\x40\x87\x57\x51\x69\x83\x9b\x40\x61\xb1\x27\x26', tb[48:64])
|
||||
|
||||
def test_multiple_entries(self):
|
||||
@ -250,8 +246,8 @@ second,0x31, 0xEF, , 0x100000
|
||||
t = gen_esp32part.PartitionTable.from_csv(csv)
|
||||
tb = _strip_trailing_ffs(t.to_binary())
|
||||
self.assertEqual(len(tb), 96 + 32)
|
||||
self.assertEqual(b'\xAA\x50', tb[0:2])
|
||||
self.assertEqual(b'\xAA\x50', tb[32:34])
|
||||
self.assertEqual(b'\xaa\x50', tb[0:2])
|
||||
self.assertEqual(b'\xaa\x50', tb[32:34])
|
||||
|
||||
def test_encrypted_flag(self):
|
||||
csv = """
|
||||
@ -278,14 +274,14 @@ storage2, data, undefined, , 12k,
|
||||
"""
|
||||
t = gen_esp32part.PartitionTable.from_csv(csv_txt)
|
||||
t.verify()
|
||||
self.assertEqual(t[1].name, 'otadata')
|
||||
self.assertEqual(t[1].type, 1)
|
||||
self.assertEqual(t[1].name, 'otadata')
|
||||
self.assertEqual(t[1].type, 1)
|
||||
self.assertEqual(t[1].subtype, 0)
|
||||
self.assertEqual(t[6].name, 'storage')
|
||||
self.assertEqual(t[6].type, 1)
|
||||
self.assertEqual(t[6].name, 'storage')
|
||||
self.assertEqual(t[6].type, 1)
|
||||
self.assertEqual(t[6].subtype, 0x06)
|
||||
self.assertEqual(t[7].name, 'storage2')
|
||||
self.assertEqual(t[7].type, 1)
|
||||
self.assertEqual(t[7].name, 'storage2')
|
||||
self.assertEqual(t[7].type, 1)
|
||||
self.assertEqual(t[7].subtype, 0x06)
|
||||
|
||||
|
||||
@ -343,12 +339,14 @@ class BinaryParserTests(Py23TestCase):
|
||||
def test_parse_one_entry(self):
|
||||
# type 0x30, subtype 0xee,
|
||||
# offset 1MB, size 2MB
|
||||
entry = b'\xAA\x50\x30\xee' + \
|
||||
b'\x00\x00\x10\x00' + \
|
||||
b'\x00\x00\x20\x00' + \
|
||||
b'0123456789abc\0\0\0' + \
|
||||
b'\x00\x00\x00\x00' + \
|
||||
b'\xFF' * 32
|
||||
entry = (
|
||||
b'\xaa\x50\x30\xee'
|
||||
+ b'\x00\x00\x10\x00'
|
||||
+ b'\x00\x00\x20\x00'
|
||||
+ b'0123456789abc\0\0\0'
|
||||
+ b'\x00\x00\x00\x00'
|
||||
+ b'\xff' * 32
|
||||
)
|
||||
# verify that parsing 32 bytes as a table
|
||||
# or as a single Definition are the same thing
|
||||
t = gen_esp32part.PartitionTable.from_binary(entry)
|
||||
@ -362,7 +360,7 @@ class BinaryParserTests(Py23TestCase):
|
||||
self.assertEqual(e.type, 0x30)
|
||||
self.assertEqual(e.subtype, 0xEE)
|
||||
self.assertEqual(e.offset, 0x100000)
|
||||
self.assertEqual(e.size, 0x200000)
|
||||
self.assertEqual(e.size, 0x200000)
|
||||
self.assertEqual(e.name, '0123456789abc')
|
||||
|
||||
def test_multiple_entries(self):
|
||||
@ -383,25 +381,17 @@ class BinaryParserTests(Py23TestCase):
|
||||
self.assertEqual(round_trip, LONGER_BINARY_TABLE)
|
||||
|
||||
def test_bad_magic(self):
|
||||
bad_magic = b'OHAI' + \
|
||||
b'\x00\x00\x10\x00' + \
|
||||
b'\x00\x00\x20\x00' + \
|
||||
b'0123456789abc\0\0\0' + \
|
||||
b'\x00\x00\x00\x00'
|
||||
bad_magic = b'OHAI' + b'\x00\x00\x10\x00' + b'\x00\x00\x20\x00' + b'0123456789abc\0\0\0' + b'\x00\x00\x00\x00'
|
||||
with self.assertRaisesRegex(gen_esp32part.InputError, 'Invalid magic bytes'):
|
||||
gen_esp32part.PartitionTable.from_binary(bad_magic)
|
||||
|
||||
def test_bad_length(self):
|
||||
bad_length = b'OHAI' + \
|
||||
b'\x00\x00\x10\x00' + \
|
||||
b'\x00\x00\x20\x00' + \
|
||||
b'0123456789'
|
||||
bad_length = b'OHAI' + b'\x00\x00\x10\x00' + b'\x00\x00\x20\x00' + b'0123456789'
|
||||
with self.assertRaisesRegex(gen_esp32part.InputError, '32 bytes'):
|
||||
gen_esp32part.PartitionTable.from_binary(bad_length)
|
||||
|
||||
|
||||
class CSVOutputTests(Py23TestCase):
|
||||
|
||||
def _readcsv(self, source_str):
|
||||
return list(csv.reader(source_str.split('\n')))
|
||||
|
||||
@ -443,7 +433,6 @@ class CSVOutputTests(Py23TestCase):
|
||||
|
||||
|
||||
class CommandLineTests(Py23TestCase):
|
||||
|
||||
def test_basic_cmdline(self):
|
||||
try:
|
||||
binpath = tempfile.mktemp()
|
||||
@ -454,8 +443,9 @@ class CommandLineTests(Py23TestCase):
|
||||
f.write(LONGER_BINARY_TABLE)
|
||||
|
||||
# run gen_esp32part.py to convert binary file to CSV
|
||||
output = subprocess.check_output([sys.executable, '../gen_esp32part.py',
|
||||
binpath, csvpath], stderr=subprocess.STDOUT)
|
||||
output = subprocess.check_output(
|
||||
[sys.executable, '../gen_esp32part.py', binpath, csvpath], stderr=subprocess.STDOUT
|
||||
)
|
||||
# reopen the CSV and check the generated binary is identical
|
||||
self.assertNotIn(b'WARNING', output)
|
||||
with open(csvpath, 'r') as f:
|
||||
@ -463,8 +453,9 @@ class CommandLineTests(Py23TestCase):
|
||||
self.assertEqual(_strip_trailing_ffs(from_csv.to_binary()), LONGER_BINARY_TABLE)
|
||||
|
||||
# run gen_esp32part.py to convert the CSV to binary again
|
||||
output = subprocess.check_output([sys.executable, '../gen_esp32part.py',
|
||||
csvpath, binpath], stderr=subprocess.STDOUT)
|
||||
output = subprocess.check_output(
|
||||
[sys.executable, '../gen_esp32part.py', csvpath, binpath], stderr=subprocess.STDOUT
|
||||
)
|
||||
self.assertNotIn(b'WARNING', output)
|
||||
# assert that file reads back as identical
|
||||
with open(binpath, 'rb') as f:
|
||||
@ -481,13 +472,14 @@ class CommandLineTests(Py23TestCase):
|
||||
|
||||
|
||||
class VerificationTests(Py23TestCase):
|
||||
|
||||
def _run_genesp32(self, csvcontents, args):
|
||||
csvpath = tempfile.mktemp()
|
||||
with open(csvpath, 'w') as f:
|
||||
f.write(csvcontents)
|
||||
try:
|
||||
output = subprocess.check_output([sys.executable, '../gen_esp32part.py', csvpath] + args, stderr=subprocess.STDOUT)
|
||||
output = subprocess.check_output(
|
||||
[sys.executable, '../gen_esp32part.py', csvpath] + args, stderr=subprocess.STDOUT
|
||||
)
|
||||
return output.strip()
|
||||
except subprocess.CalledProcessError as e:
|
||||
return e.output.strip()
|
||||
@ -504,14 +496,15 @@ ota_1, app, ota_1, , 0x100800
|
||||
return self._run_genesp32(sample_csv, args)
|
||||
|
||||
# Failure case 1, incorrect ota_1 partition size
|
||||
self.assertEqual(rge(['-q']),
|
||||
b'Partition ota_1 invalid: Size 0x100800 is not aligned to 0x1000')
|
||||
self.assertEqual(rge(['-q']), b'Partition ota_1 invalid: Size 0x100800 is not aligned to 0x1000')
|
||||
# Failure case 2, incorrect ota_0 partition size
|
||||
self.assertEqual(rge(['-q', '--secure', 'v1']),
|
||||
b'Partition ota_0 invalid: Size 0x101000 is not aligned to 0x10000')
|
||||
self.assertEqual(
|
||||
rge(['-q', '--secure', 'v1']), b'Partition ota_0 invalid: Size 0x101000 is not aligned to 0x10000'
|
||||
)
|
||||
# Failure case 3, incorrect ota_1 partition size with Secure Boot V2
|
||||
self.assertEqual(rge(['-q', '--secure', 'v2']),
|
||||
b'Partition ota_1 invalid: Size 0x100800 is not aligned to 0x1000')
|
||||
self.assertEqual(
|
||||
rge(['-q', '--secure', 'v2']), b'Partition ota_1 invalid: Size 0x100800 is not aligned to 0x1000'
|
||||
)
|
||||
|
||||
def test_bad_alignment(self):
|
||||
csv = """
|
||||
@ -540,7 +533,9 @@ nvs, data, nvs, 0x0000, 0x6000,
|
||||
phy_init, data, phy, , 0x1000,
|
||||
factory, app, factory, , 1M,
|
||||
"""
|
||||
with self.assertRaisesRegex(gen_esp32part.InputError, r'CSV Error at line 3: Partitions overlap. Partition sets offset 0x0'):
|
||||
with self.assertRaisesRegex(
|
||||
gen_esp32part.InputError, r'CSV Error at line 3: Partitions overlap. Partition sets offset 0x0'
|
||||
):
|
||||
gen_esp32part.PartitionTable.from_csv(csv)
|
||||
|
||||
def test_only_one_otadata(self):
|
||||
@ -611,15 +606,16 @@ factory, app, factory, 0x10000, 20M
|
||||
|
||||
|
||||
class PartToolTests(Py23TestCase):
|
||||
|
||||
def _run_parttool(self, csvcontents, args):
|
||||
csvpath = tempfile.mktemp()
|
||||
with open(csvpath, 'w') as f:
|
||||
f.write(csvcontents)
|
||||
try:
|
||||
output = subprocess.check_output([sys.executable, '../parttool.py', '-q', '--partition-table-file',
|
||||
csvpath, 'get_partition_info'] + args,
|
||||
stderr=subprocess.STDOUT)
|
||||
output = subprocess.check_output(
|
||||
[sys.executable, '../parttool.py', '-q', '--partition-table-file', csvpath, 'get_partition_info']
|
||||
+ args,
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
self.assertNotIn(b'WARNING', output)
|
||||
return output.strip()
|
||||
finally:
|
||||
@ -640,42 +636,77 @@ nvs_key2, data, nvs_keys, 0x119000, 0x1000, encrypted
|
||||
def rpt(args):
|
||||
return self._run_parttool(csv, args)
|
||||
|
||||
self.assertEqual(rpt(['--partition-type', 'data', '--partition-subtype', 'nvs', '--info', 'offset']), b'0x9000')
|
||||
self.assertEqual(rpt(['--partition-type', 'data', '--partition-subtype', 'nvs', '--info', 'size']), b'0x4000')
|
||||
self.assertEqual(rpt(['--partition-name', 'otadata', '--info', 'offset']), b'0xd000')
|
||||
self.assertEqual(rpt(['--partition-boot-default', '--info', 'offset']), b'0x10000')
|
||||
self.assertEqual(
|
||||
rpt(['--partition-type', 'data', '--partition-subtype', 'nvs', '--info', 'offset']), b'0x9000')
|
||||
rpt(
|
||||
[
|
||||
'--partition-type',
|
||||
'data',
|
||||
'--partition-subtype',
|
||||
'nvs',
|
||||
'--info',
|
||||
'name',
|
||||
'offset',
|
||||
'size',
|
||||
'encrypted',
|
||||
]
|
||||
),
|
||||
b'nvs 0x9000 0x4000 False',
|
||||
)
|
||||
self.assertEqual(
|
||||
rpt(['--partition-type', 'data', '--partition-subtype', 'nvs', '--info', 'size']), b'0x4000')
|
||||
self.assertEqual(
|
||||
rpt(['--partition-name', 'otadata', '--info', 'offset']), b'0xd000')
|
||||
self.assertEqual(
|
||||
rpt(['--partition-boot-default', '--info', 'offset']), b'0x10000')
|
||||
self.assertEqual(
|
||||
rpt(['--partition-type', 'data', '--partition-subtype', 'nvs', '--info', 'name', 'offset', 'size', 'encrypted']),
|
||||
b'nvs 0x9000 0x4000 False')
|
||||
self.assertEqual(
|
||||
rpt(['--partition-type', 'data', '--partition-subtype', 'nvs', '--info', 'name', 'offset', 'size', 'encrypted', '--part_list']),
|
||||
b'nvs 0x9000 0x4000 False nvs1_user 0x110000 0x4000 False nvs2_user 0x114000 0x4000 False')
|
||||
rpt(
|
||||
[
|
||||
'--partition-type',
|
||||
'data',
|
||||
'--partition-subtype',
|
||||
'nvs',
|
||||
'--info',
|
||||
'name',
|
||||
'offset',
|
||||
'size',
|
||||
'encrypted',
|
||||
'--part_list',
|
||||
]
|
||||
),
|
||||
b'nvs 0x9000 0x4000 False nvs1_user 0x110000 0x4000 False nvs2_user 0x114000 0x4000 False',
|
||||
)
|
||||
self.assertEqual(
|
||||
rpt(['--partition-type', 'data', '--partition-subtype', 'nvs', '--info', 'name', '--part_list']),
|
||||
b'nvs nvs1_user nvs2_user')
|
||||
b'nvs nvs1_user nvs2_user',
|
||||
)
|
||||
self.assertEqual(
|
||||
rpt(['--partition-type', 'data', '--partition-subtype', 'nvs_keys', '--info', 'name', '--part_list']),
|
||||
b'nvs_key1 nvs_key2')
|
||||
b'nvs_key1 nvs_key2',
|
||||
)
|
||||
self.assertEqual(rpt(['--partition-name', 'nvs', '--info', 'encrypted']), b'False')
|
||||
self.assertEqual(rpt(['--partition-name', 'nvs1_user', '--info', 'encrypted']), b'False')
|
||||
self.assertEqual(rpt(['--partition-name', 'nvs2_user', '--info', 'encrypted']), b'False')
|
||||
self.assertEqual(rpt(['--partition-name', 'nvs_key1', '--info', 'encrypted']), b'True')
|
||||
self.assertEqual(rpt(['--partition-name', 'nvs_key2', '--info', 'encrypted']), b'True')
|
||||
self.assertEqual(
|
||||
rpt(['--partition-name', 'nvs', '--info', 'encrypted']), b'False')
|
||||
rpt(
|
||||
[
|
||||
'--partition-type',
|
||||
'data',
|
||||
'--partition-subtype',
|
||||
'nvs_keys',
|
||||
'--info',
|
||||
'name',
|
||||
'encrypted',
|
||||
'--part_list',
|
||||
]
|
||||
),
|
||||
b'nvs_key1 True nvs_key2 True',
|
||||
)
|
||||
self.assertEqual(
|
||||
rpt(['--partition-name', 'nvs1_user', '--info', 'encrypted']), b'False')
|
||||
self.assertEqual(
|
||||
rpt(['--partition-name', 'nvs2_user', '--info', 'encrypted']), b'False')
|
||||
self.assertEqual(
|
||||
rpt(['--partition-name', 'nvs_key1', '--info', 'encrypted']), b'True')
|
||||
self.assertEqual(
|
||||
rpt(['--partition-name', 'nvs_key2', '--info', 'encrypted']), b'True')
|
||||
self.assertEqual(
|
||||
rpt(['--partition-type', 'data', '--partition-subtype', 'nvs_keys', '--info', 'name', 'encrypted', '--part_list']),
|
||||
b'nvs_key1 True nvs_key2 True')
|
||||
self.assertEqual(
|
||||
rpt(['--partition-type', 'data', '--partition-subtype', 'nvs', '--info', 'name', 'encrypted', '--part_list']),
|
||||
b'nvs False nvs1_user False nvs2_user False')
|
||||
rpt(
|
||||
['--partition-type', 'data', '--partition-subtype', 'nvs', '--info', 'name', 'encrypted', '--part_list']
|
||||
),
|
||||
b'nvs False nvs1_user False nvs2_user False',
|
||||
)
|
||||
|
||||
def test_fallback(self):
|
||||
csv = """
|
||||
@ -690,13 +721,13 @@ ota_1, app, ota_1, , 1M
|
||||
return self._run_parttool(csv, args)
|
||||
|
||||
self.assertEqual(
|
||||
rpt(['--partition-type', 'app', '--partition-subtype', 'ota_1', '--info', 'offset']), b'0x130000')
|
||||
self.assertEqual(
|
||||
rpt(['--partition-boot-default', '--info', 'offset']), b'0x30000') # ota_0
|
||||
rpt(['--partition-type', 'app', '--partition-subtype', 'ota_1', '--info', 'offset']), b'0x130000'
|
||||
)
|
||||
self.assertEqual(rpt(['--partition-boot-default', '--info', 'offset']), b'0x30000') # ota_0
|
||||
csv_mod = csv.replace('ota_0', 'ota_2')
|
||||
self.assertEqual(
|
||||
self._run_parttool(csv_mod, ['--partition-boot-default', '--info', 'offset']),
|
||||
b'0x130000') # now default is ota_1
|
||||
self._run_parttool(csv_mod, ['--partition-boot-default', '--info', 'offset']), b'0x130000'
|
||||
) # now default is ota_1
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
Reference in New Issue
Block a user