forked from espressif/esp-idf
Compare commits
312 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8d3d3fdd4a | ||
|
|
c4973908e4 | ||
|
|
3bb43d5f03 | ||
|
|
209d454b5f | ||
|
|
b385fe0586 | ||
|
|
487c4b6725 | ||
|
|
667846cba1 | ||
|
|
ccbb261c35 | ||
|
|
f35fd2a6b0 | ||
|
|
5682a5ef36 | ||
|
|
b66af09b75 | ||
|
|
9b5841f17c | ||
|
|
103de7acdc | ||
|
|
c2b39f4a5f | ||
|
|
2bd198d180 | ||
|
|
5b2fee13cd | ||
|
|
6ceedabb27 | ||
|
|
dac71d688a | ||
|
|
dc304fb3af | ||
|
|
6ae56b61cf | ||
|
|
6698be57c8 | ||
|
|
ef9dbff112 | ||
|
|
561f8ff513 | ||
|
|
945d2e697c | ||
|
|
94c4f32df0 | ||
|
|
ab62202eb9 | ||
|
|
063c23deff | ||
|
|
3868307efd | ||
|
|
e0c834285f | ||
|
|
e99b4e85b9 | ||
|
|
20ab122f80 | ||
|
|
98dd235819 | ||
|
|
94ec3c8e53 | ||
|
|
7535dbc454 | ||
|
|
aeb4d8e3c2 | ||
|
|
ba13d282dd | ||
|
|
806d23b17b | ||
|
|
e410f4268e | ||
|
|
8de29499ce | ||
|
|
ffab6084f0 | ||
|
|
f58c664e2b | ||
|
|
b42ba1b0a5 | ||
|
|
67ba85650d | ||
|
|
0a97cb62ef | ||
|
|
594e1b5e44 | ||
|
|
10a5cfc900 | ||
|
|
e4a840d8cf | ||
|
|
7a32ae363e | ||
|
|
fddc905fa3 | ||
|
|
574dd08085 | ||
|
|
7705126287 | ||
|
|
bf4184a049 | ||
|
|
212222a9e0 | ||
|
|
6d3f81aa77 | ||
|
|
489e98cfb7 | ||
|
|
8d0d7972f3 | ||
|
|
e6acfedd3c | ||
|
|
0f5cae0218 | ||
|
|
712bd1d773 | ||
|
|
4e36ede3bf | ||
|
|
2f2aa41ea7 | ||
|
|
089438139d | ||
|
|
e468a105d8 | ||
|
|
894ddea353 | ||
|
|
fed3c3ebf0 | ||
|
|
f8515688cb | ||
|
|
b9dab23437 | ||
|
|
5632385d5f | ||
|
|
cfcb22fe16 | ||
|
|
d6e3943233 | ||
|
|
b563219f93 | ||
|
|
d881fcd380 | ||
|
|
058a38a141 | ||
|
|
ffe6af44c1 | ||
|
|
1fca253a65 | ||
|
|
486ff50eac | ||
|
|
cf69dfa458 | ||
|
|
501640514a | ||
|
|
a80e687f8c | ||
|
|
bc90b89e2b | ||
|
|
a1cc202b70 | ||
|
|
062ba57d0f | ||
|
|
14baac302f | ||
|
|
af211fcf0c | ||
|
|
5ee7233b5b | ||
|
|
48c7afde27 | ||
|
|
d3bab0d7f0 | ||
|
|
c9ed467e26 | ||
|
|
f0eb023f17 | ||
|
|
8944b90a06 | ||
|
|
9e4ec90a1e | ||
|
|
e4357d9cf3 | ||
|
|
db17ffef00 | ||
|
|
7f2a9f0359 | ||
|
|
ecdeea9a85 | ||
|
|
cfce9e1c72 | ||
|
|
dfce994b54 | ||
|
|
1b78dc2deb | ||
|
|
37a4b0e933 | ||
|
|
8fab9ffb95 | ||
|
|
354137a313 | ||
|
|
0399c8ecaf | ||
|
|
a12e7fa638 | ||
|
|
3278f755d2 | ||
|
|
30281166b1 | ||
|
|
9bd227f8c5 | ||
|
|
80423d638d | ||
|
|
998e18ed69 | ||
|
|
d0e553358a | ||
|
|
0cdb8b4888 | ||
|
|
c1e4ebf2bc | ||
|
|
71ca4c50b0 | ||
|
|
56d82dd5ce | ||
|
|
c4a6eef841 | ||
|
|
05b5671fc4 | ||
|
|
d3646ca59f | ||
|
|
0cb7b27f8b | ||
|
|
2319e8a0eb | ||
|
|
bc12970dde | ||
|
|
7a863cca7c | ||
|
|
9c42b6194e | ||
|
|
3511109b6e | ||
|
|
acff7e58d2 | ||
|
|
0f45323879 | ||
|
|
48ea4bc7f4 | ||
|
|
69686ae8a9 | ||
|
|
28cb5624ef | ||
|
|
c82adcae1b | ||
|
|
c574ad90df | ||
|
|
1d03398a64 | ||
|
|
daf1d05576 | ||
|
|
816a98c3ad | ||
|
|
ebfc47c34b | ||
|
|
681805b6c7 | ||
|
|
392b0b89e3 | ||
|
|
229be8c025 | ||
|
|
e4fb19000a | ||
|
|
0e640c61bd | ||
|
|
ce64bcb81f | ||
|
|
5e48c2bfef | ||
|
|
55c179a599 | ||
|
|
1181b65f7d | ||
|
|
3b116ae0cb | ||
|
|
54b595ed51 | ||
|
|
cf2600adef | ||
|
|
f7eecfcc67 | ||
|
|
eac9eb36d1 | ||
|
|
ec05f3af8f | ||
|
|
9bfb2f0cab | ||
|
|
963fa0fd3a | ||
|
|
8856cc055a | ||
|
|
fab14106c8 | ||
|
|
154294e79f | ||
|
|
e1d965e4b9 | ||
|
|
673d4679ce | ||
|
|
95f6f88aa7 | ||
|
|
621794abb5 | ||
|
|
f918cb185d | ||
|
|
ab22836859 | ||
|
|
9539d44158 | ||
|
|
b6b8af498c | ||
|
|
194b1324d2 | ||
|
|
7a030ff8d6 | ||
|
|
d9a0f9d443 | ||
|
|
3a6be05945 | ||
|
|
d1c536258c | ||
|
|
5a1247246c | ||
|
|
45758b6be0 | ||
|
|
6183de959a | ||
|
|
e9a4eb57b9 | ||
|
|
5894e15f6b | ||
|
|
891c1f4a2b | ||
|
|
f5c962d8b2 | ||
|
|
6aff7125cb | ||
|
|
c68fd9d545 | ||
|
|
d6c06ed0b5 | ||
|
|
8baa6b9d8b | ||
|
|
3923a2be85 | ||
|
|
e7dc749e2f | ||
|
|
df93f672e3 | ||
|
|
4a55009f3e | ||
|
|
dfcff0a5bf | ||
|
|
ff3566e40b | ||
|
|
63eb620d99 | ||
|
|
93c18bb2b4 | ||
|
|
3c6bce1d81 | ||
|
|
90b9c42dc0 | ||
|
|
c5f63bf701 | ||
|
|
ced95c7fb0 | ||
|
|
309fb23ffc | ||
|
|
e9a230c20c | ||
|
|
b0c7f28aed | ||
|
|
7bc1e7608c | ||
|
|
449ce1bad9 | ||
|
|
424a5e2705 | ||
|
|
d8f311c980 | ||
|
|
9b0c252e67 | ||
|
|
2a55629556 | ||
|
|
b523660199 | ||
|
|
150be549eb | ||
|
|
10898a33ed | ||
|
|
dddfc61411 | ||
|
|
404a6b3782 | ||
|
|
29f999361e | ||
|
|
32eeac0b12 | ||
|
|
7d3e8998d8 | ||
|
|
e276b98fe4 | ||
|
|
de7381b77e | ||
|
|
645d9b9590 | ||
|
|
2ad618e068 | ||
|
|
eb408e50c4 | ||
|
|
60da98ee0b | ||
|
|
314f6f371c | ||
|
|
6c69d5e6fd | ||
|
|
90ea0bb7e8 | ||
|
|
6ba817038c | ||
|
|
e54f8a96a0 | ||
|
|
2f9edfebac | ||
|
|
27c28eb1e6 | ||
|
|
6f339ff7a5 | ||
|
|
2cc07d0c4c | ||
|
|
da81b97e17 | ||
|
|
15e65aad1b | ||
|
|
281874d380 | ||
|
|
3f3d8746fc | ||
|
|
b278deabdb | ||
|
|
9065498a5a | ||
|
|
012f5c608d | ||
|
|
c695a4e468 | ||
|
|
755b163d4e | ||
|
|
b19b8702fd | ||
|
|
bd2ff0613d | ||
|
|
b6ebbb5662 | ||
|
|
38d58f1c2e | ||
|
|
ecc6080117 | ||
|
|
e589cad07e | ||
|
|
44764222a5 | ||
|
|
b6ee7f699c | ||
|
|
9b57d4aa0d | ||
|
|
60a642b31c | ||
|
|
48fda0f27b | ||
|
|
33ca8874d4 | ||
|
|
c884931b0b | ||
|
|
41baf59287 | ||
|
|
87f7d1875d | ||
|
|
545c7e5cdd | ||
|
|
ef6fe211b8 | ||
|
|
50637f638f | ||
|
|
c4b861ad65 | ||
|
|
492b926d50 | ||
|
|
b5f8cf0f03 | ||
|
|
c4bb528c61 | ||
|
|
a0bdee0c9c | ||
|
|
c015dd6c41 | ||
|
|
5afafb0050 | ||
|
|
ff008d2be3 | ||
|
|
84bfc96f08 | ||
|
|
0448ee9685 | ||
|
|
42e411dafc | ||
|
|
fbff8eb95b | ||
|
|
c778951547 | ||
|
|
a0776b2f21 | ||
|
|
ae30d1bc7b | ||
|
|
28e4162dd2 | ||
|
|
242f8ea743 | ||
|
|
5c1506f796 | ||
|
|
103559153f | ||
|
|
d340088993 | ||
|
|
e8fbd6e288 | ||
|
|
50b710d267 | ||
|
|
f4554c81fc | ||
|
|
96be8f2efa | ||
|
|
033124be14 | ||
|
|
b4c1bdb11b | ||
|
|
acc9b871d1 | ||
|
|
cb9be8c0c4 | ||
|
|
8388e1be54 | ||
|
|
5960e7419d | ||
|
|
21912b95f4 | ||
|
|
3085eb7ec6 | ||
|
|
8ed44ace4b | ||
|
|
58accf05cf | ||
|
|
6f90393f22 | ||
|
|
66fe94f816 | ||
|
|
df6adbd5bf | ||
|
|
ce3ccc18fa | ||
|
|
36d6e4e2c7 | ||
|
|
a4d45a0a4d | ||
|
|
38170d465c | ||
|
|
ccea4a0f8f | ||
|
|
954c0981d8 | ||
|
|
de79de1c26 | ||
|
|
194e1835c2 | ||
|
|
17ac80867b | ||
|
|
05605920ae | ||
|
|
121e5a7847 | ||
|
|
031ab556a9 | ||
|
|
4837f93968 | ||
|
|
717b1697df | ||
|
|
b5942dc400 | ||
|
|
72d4de442a | ||
|
|
485e254719 | ||
|
|
0ebae99ab0 | ||
|
|
7e488b0c6b | ||
|
|
3a4cf72f30 | ||
|
|
6e0f905761 | ||
|
|
489f5efbd1 | ||
|
|
3a70e61477 | ||
|
|
043ef32651 | ||
|
|
eae3b45170 | ||
|
|
a1c79bbc7b | ||
|
|
cc46b5054a |
@@ -19,19 +19,16 @@ trim_trailing_whitespace = false
|
||||
indent_style = tab
|
||||
indent_size = 2
|
||||
|
||||
[*/freertos/**]
|
||||
indent_style = tab
|
||||
indent_size = 4
|
||||
|
||||
[{*/freertos/**.S,**/FreeRTOSConfig.h}]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
[*.pem]
|
||||
insert_final_newline = false
|
||||
|
||||
[*.py]
|
||||
max_line_length = 119
|
||||
|
||||
[{*.cmake,CMakeLists.txt}]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
max_line_length = 120
|
||||
|
||||
[{*.sh,*.yml,*.yaml}]
|
||||
indent_size = 2
|
||||
|
||||
[*.ini]
|
||||
indent_size = 2
|
||||
|
||||
165
.flake8
165
.flake8
@@ -1,165 +0,0 @@
|
||||
[flake8]
|
||||
|
||||
select =
|
||||
# Full lists are given in order to suppress all errors from other plugins
|
||||
# Full list of pyflakes error codes:
|
||||
F401, # module imported but unused
|
||||
F402, # import module from line N shadowed by loop variable
|
||||
F403, # 'from module import *' used; unable to detect undefined names
|
||||
F404, # future import(s) name after other statements
|
||||
F405, # name may be undefined, or defined from star imports: module
|
||||
F406, # 'from module import *' only allowed at module level
|
||||
F407, # an undefined __future__ feature name was imported
|
||||
F601, # dictionary key name repeated with different values
|
||||
F602, # dictionary key variable name repeated with different values
|
||||
F621, # too many expressions in an assignment with star-unpacking
|
||||
F622, # two or more starred expressions in an assignment (a, *b, *c = d)
|
||||
F631, # assertion test is a tuple, which are always True
|
||||
F701, # a break statement outside of a while or for loop
|
||||
F702, # a continue statement outside of a while or for loop
|
||||
F703, # a continue statement in a finally block in a loop
|
||||
F704, # a yield or yield from statement outside of a function
|
||||
F705, # a return statement with arguments inside a generator
|
||||
F706, # a return statement outside of a function/method
|
||||
F707, # an except: block as not the last exception handler
|
||||
F721, F722, # doctest syntax error syntax error in forward type annotation
|
||||
F811, # redefinition of unused name from line N
|
||||
F812, # list comprehension redefines name from line N
|
||||
F821, # undefined name name
|
||||
F822, # undefined name name in __all__
|
||||
F823, # local variable name referenced before assignment
|
||||
F831, # duplicate argument name in function definition
|
||||
F841, # local variable name is assigned to but never used
|
||||
F901, # raise NotImplemented should be raise NotImplementedError
|
||||
|
||||
# Full list of pycodestyle violations:
|
||||
E101, # indentation contains mixed spaces and tabs
|
||||
E111, # indentation is not a multiple of four
|
||||
E112, # expected an indented block
|
||||
E113, # unexpected indentation
|
||||
E114, # indentation is not a multiple of four (comment)
|
||||
E115, # expected an indented block (comment)
|
||||
E116, # unexpected indentation (comment)
|
||||
E121, # continuation line under-indented for hanging indent
|
||||
E122, # continuation line missing indentation or outdented
|
||||
E123, # closing bracket does not match indentation of opening bracket's line
|
||||
E124, # closing bracket does not match visual indentation
|
||||
E125, # continuation line with same indent as next logical line
|
||||
E126, # continuation line over-indented for hanging indent
|
||||
E127, # continuation line over-indented for visual indent
|
||||
E128, # continuation line under-indented for visual indent
|
||||
E129, # visually indented line with same indent as next logical line
|
||||
E131, # continuation line unaligned for hanging indent
|
||||
E133, # closing bracket is missing indentation
|
||||
E201, # whitespace after '('
|
||||
E202, # whitespace before ')'
|
||||
E203, # whitespace before ':'
|
||||
E211, # whitespace before '('
|
||||
E221, # multiple spaces before operator
|
||||
E222, # multiple spaces after operator
|
||||
E223, # tab before operator
|
||||
E224, # tab after operator
|
||||
E225, # missing whitespace around operator
|
||||
E226, # missing whitespace around arithmetic operator
|
||||
E227, # missing whitespace around bitwise or shift operator
|
||||
E228, # missing whitespace around modulo operator
|
||||
E231, # missing whitespace after ',', ';', or ':'
|
||||
E241, # multiple spaces after ','
|
||||
E242, # tab after ','
|
||||
E251, # unexpected spaces around keyword / parameter equals
|
||||
E261, # at least two spaces before inline comment
|
||||
E262, # inline comment should start with '# '
|
||||
E265, # block comment should start with '# '
|
||||
E266, # too many leading '#' for block comment
|
||||
E271, # multiple spaces after keyword
|
||||
E272, # multiple spaces before keyword
|
||||
E273, # tab after keyword
|
||||
E274, # tab before keyword
|
||||
E275, # missing whitespace after keyword
|
||||
E301, # expected 1 blank line, found 0
|
||||
E302, # expected 2 blank lines, found 0
|
||||
E303, # too many blank lines
|
||||
E304, # blank lines found after function decorator
|
||||
E305, # expected 2 blank lines after end of function or class
|
||||
E306, # expected 1 blank line before a nested definition
|
||||
E401, # multiple imports on one line
|
||||
E402, # module level import not at top of file
|
||||
E501, # line too long (82 > 79 characters)
|
||||
E502, # the backslash is redundant between brackets
|
||||
E701, # multiple statements on one line (colon)
|
||||
E702, # multiple statements on one line (semicolon)
|
||||
E703, # statement ends with a semicolon
|
||||
E704, # multiple statements on one line (def)
|
||||
E711, # comparison to None should be 'if cond is None:'
|
||||
E712, # comparison to True should be 'if cond is True:' or 'if cond:'
|
||||
E713, # test for membership should be 'not in'
|
||||
E714, # test for object identity should be 'is not'
|
||||
E721, # do not compare types, use 'isinstance()'
|
||||
E722, # do not use bare except, specify exception instead
|
||||
E731, # do not assign a lambda expression, use a def
|
||||
E741, # do not use variables named 'l', 'O', or 'I'
|
||||
E742, # do not define classes named 'l', 'O', or 'I'
|
||||
E743, # do not define functions named 'l', 'O', or 'I'
|
||||
E901, # SyntaxError or IndentationError
|
||||
E902, # IOError
|
||||
W191, # indentation contains tabs
|
||||
W291, # trailing whitespace
|
||||
W292, # no newline at end of file
|
||||
W293, # blank line contains whitespace
|
||||
W391, # blank line at end of file
|
||||
W503, # line break before binary operator
|
||||
W504, # line break after binary operator
|
||||
W505, # doc line too long (82 > 79 characters)
|
||||
W601, # .has_key() is deprecated, use 'in'
|
||||
W602, # deprecated form of raising exception
|
||||
W603, # '<>' is deprecated, use '!='
|
||||
W604, # backticks are deprecated, use 'repr()'
|
||||
W605, # invalid escape sequence 'x'
|
||||
W606, # 'async' and 'await' are reserved keywords starting with Python 3.7
|
||||
|
||||
# Full list of flake8 violations
|
||||
E999, # failed to compile a file into an Abstract Syntax Tree for the plugins that require it
|
||||
|
||||
# Full list of mccabe violations
|
||||
C901 # complexity value provided by the user
|
||||
|
||||
ignore =
|
||||
E221, # multiple spaces before operator
|
||||
E231, # missing whitespace after ',', ';', or ':'
|
||||
E241, # multiple spaces after ','
|
||||
W503, # line break before binary operator
|
||||
W504 # line break after binary operator
|
||||
|
||||
max-line-length = 160
|
||||
|
||||
show_source = True
|
||||
|
||||
statistics = True
|
||||
|
||||
exclude =
|
||||
.git,
|
||||
__pycache__,
|
||||
# submodules
|
||||
components/bootloader/subproject/components/micro-ecc/micro-ecc,
|
||||
components/bt/host/nimble/nimble,
|
||||
components/cmock/CMock,
|
||||
components/json/cJSON,
|
||||
components/mbedtls/mbedtls,
|
||||
components/openthread/openthread,
|
||||
components/unity/unity,
|
||||
components/spiffs/spiffs,
|
||||
# autogenerated scripts
|
||||
components/protocomm/python/constants_pb2.py,
|
||||
components/protocomm/python/sec0_pb2.py,
|
||||
components/protocomm/python/sec1_pb2.py,
|
||||
components/protocomm/python/sec2_pb2.py,
|
||||
components/protocomm/python/session_pb2.py,
|
||||
components/wifi_provisioning/python/wifi_ctrl_pb2.py,
|
||||
components/wifi_provisioning/python/wifi_scan_pb2.py,
|
||||
components/wifi_provisioning/python/wifi_config_pb2.py,
|
||||
components/wifi_provisioning/python/wifi_constants_pb2.py,
|
||||
components/esp_local_ctrl/python/esp_local_ctrl_pb2.py,
|
||||
|
||||
per-file-ignores =
|
||||
# Sphinx conf.py files use star imports to setup config variables
|
||||
docs/conf_common.py: F405
|
||||
105
.github/ISSUE_TEMPLATE/01_build_install_bug.yml
vendored
105
.github/ISSUE_TEMPLATE/01_build_install_bug.yml
vendored
@@ -1,105 +0,0 @@
|
||||
name: Installation or build bug report
|
||||
description: Report installation or build bugs
|
||||
labels: ['Type: Bug']
|
||||
body:
|
||||
- type: checkboxes
|
||||
id: checklist
|
||||
attributes:
|
||||
label: Answers checklist.
|
||||
description: Before submitting a new issue, please follow the checklist and try to find the answer.
|
||||
options:
|
||||
- label: I have read the documentation [ESP-IDF Programming Guide](https://docs.espressif.com/projects/esp-idf/en/latest/) and the issue is not addressed there.
|
||||
required: true
|
||||
- label: I have updated my IDF branch (master or release) to the latest version and checked that the issue is present there.
|
||||
required: true
|
||||
- label: I have searched the issue tracker for a similar issue and not found a similar issue.
|
||||
required: true
|
||||
- type: input
|
||||
id: idf_version
|
||||
attributes:
|
||||
label: IDF version.
|
||||
description: On which IDF version does this issue occur on? Run `git describe --tags` to find it.
|
||||
placeholder: ex. v3.2-dev-1148-g96cd3b75c
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: operating_system
|
||||
attributes:
|
||||
label: Operating System used.
|
||||
multiple: false
|
||||
options:
|
||||
- Windows
|
||||
- Linux
|
||||
- macOS
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: build
|
||||
attributes:
|
||||
label: How did you build your project?
|
||||
multiple: false
|
||||
options:
|
||||
- Command line with Make
|
||||
- Command line with CMake
|
||||
- Command line with idf.py
|
||||
- Eclipse IDE
|
||||
- CLion IDE
|
||||
- VS Code IDE
|
||||
- Other (please specify in More Information)
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: windows_comand_line
|
||||
attributes:
|
||||
label: If you are using Windows, please specify command line type.
|
||||
multiple: false
|
||||
options:
|
||||
- PowerShell
|
||||
- CMD
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: expected
|
||||
attributes:
|
||||
label: What is the expected behavior?
|
||||
description: Please provide a clear and concise description of the expected behavior.
|
||||
placeholder: I expected it to...
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: actual
|
||||
attributes:
|
||||
label: What is the actual behavior?
|
||||
description: Please describe actual behavior.
|
||||
placeholder: Instead it...
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: steps
|
||||
attributes:
|
||||
label: Steps to reproduce.
|
||||
description: 'How do you trigger this bug? Please walk us through it step by step. If this is build bug, please attach sdkconfig file (from your project folder). Please attach your code here.'
|
||||
value: |
|
||||
1. Step
|
||||
2. Step
|
||||
3. Step
|
||||
...
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: debug_logs
|
||||
attributes:
|
||||
label: Build or installation Logs.
|
||||
description: Build or installation log goes here, should contain the backtrace, as well as the reset source if it is a crash.
|
||||
placeholder: Your log goes here.
|
||||
render: plain
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: more-info
|
||||
attributes:
|
||||
label: More Information.
|
||||
description: Do you have any other information from investigating this?
|
||||
placeholder: ex. I tried on my friend's Windows 10 PC and the command works there.
|
||||
validations:
|
||||
required: false
|
||||
133
.github/ISSUE_TEMPLATE/02_runtime_bug.yml
vendored
133
.github/ISSUE_TEMPLATE/02_runtime_bug.yml
vendored
@@ -1,133 +0,0 @@
|
||||
name: Runtime bug report
|
||||
description: Report runtime bugs
|
||||
labels: ['Type: Bug']
|
||||
body:
|
||||
- type: checkboxes
|
||||
id: checklist
|
||||
attributes:
|
||||
label: Answers checklist.
|
||||
description: Before submitting a new issue, please follow the checklist and try to find the answer.
|
||||
options:
|
||||
- label: I have read the documentation [ESP-IDF Programming Guide](https://docs.espressif.com/projects/esp-idf/en/latest/) and the issue is not addressed there.
|
||||
required: true
|
||||
- label: I have updated my IDF branch (master or release) to the latest version and checked that the issue is present there.
|
||||
required: true
|
||||
- label: I have searched the issue tracker for a similar issue and not found a similar issue.
|
||||
required: true
|
||||
- type: input
|
||||
id: idf_version
|
||||
attributes:
|
||||
label: IDF version.
|
||||
description: On which IDF version does this issue occur on? Run `git describe --tags` to find it.
|
||||
placeholder: ex. v3.2-dev-1148-g96cd3b75c
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: chip_revision
|
||||
attributes:
|
||||
label: Espressif SoC revision.
|
||||
description: On which Espressif SoC revision does your application run on? Run `esptool chip_id` to find it.
|
||||
placeholder: ex. ESP32-C3 (QFN32) (revision v0.3)
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: operating_system
|
||||
attributes:
|
||||
label: Operating System used.
|
||||
multiple: false
|
||||
options:
|
||||
- Windows
|
||||
- Linux
|
||||
- macOS
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: build
|
||||
attributes:
|
||||
label: How did you build your project?
|
||||
multiple: false
|
||||
options:
|
||||
- Command line with Make
|
||||
- Command line with CMake
|
||||
- Command line with idf.py
|
||||
- Eclipse IDE
|
||||
- CLion IDE
|
||||
- VS Code IDE
|
||||
- Other (please specify in More Information)
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: windows_comand_line
|
||||
attributes:
|
||||
label: If you are using Windows, please specify command line type.
|
||||
multiple: false
|
||||
options:
|
||||
- PowerShell
|
||||
- CMD
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
id: devkit
|
||||
attributes:
|
||||
label: Development Kit.
|
||||
description: On which Development Kit does this issue occur on?
|
||||
placeholder: ex. ESP32-Wrover-Kit v2 | Custom Board | QEMU
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: power_supply
|
||||
attributes:
|
||||
label: Power Supply used.
|
||||
multiple: false
|
||||
options:
|
||||
- USB
|
||||
- External 5V
|
||||
- External 3.3V
|
||||
- Battery
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: expected
|
||||
attributes:
|
||||
label: What is the expected behavior?
|
||||
description: Please provide a clear and concise description of the expected behavior.
|
||||
placeholder: I expected it to...
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: actual
|
||||
attributes:
|
||||
label: What is the actual behavior?
|
||||
description: Please describe actual behavior.
|
||||
placeholder: Instead it...
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: steps
|
||||
attributes:
|
||||
label: Steps to reproduce.
|
||||
description: 'How do you trigger this bug? Please walk us through it step by step. Please attach your code here.'
|
||||
value: |
|
||||
1. Step
|
||||
2. Step
|
||||
3. Step
|
||||
...
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: debug_logs
|
||||
attributes:
|
||||
label: Debug Logs.
|
||||
description: Debug log goes here, should contain the backtrace, as well as the reset source if it is a crash.
|
||||
placeholder: Your log goes here.
|
||||
render: plain
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: more-info
|
||||
attributes:
|
||||
label: More Information.
|
||||
description: Do you have any other information from investigating this?
|
||||
placeholder: ex. I tried on my friend's Windows 10 PC and the command works there.
|
||||
validations:
|
||||
required: false
|
||||
34
.github/ISSUE_TEMPLATE/03_feature_request.yml
vendored
34
.github/ISSUE_TEMPLATE/03_feature_request.yml
vendored
@@ -1,34 +0,0 @@
|
||||
name: Feature request
|
||||
description: Suggest an idea for this project.
|
||||
labels: ['Type: Feature Request']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
* We welcome any ideas or feature requests! It’s helpful if you can explain exactly why the feature would be useful.
|
||||
* There are usually some outstanding feature requests in the [existing issues list](https://github.com/espressif/esp-idf/labels/Type%3A%20Feature%20Request), feel free to add comments to them.
|
||||
* If you would like to contribute, please read the [contributions guide](https://docs.espressif.com/projects/esp-idf/en/stable/esp32/contribute/index.html).
|
||||
- type: textarea
|
||||
id: problem-related
|
||||
attributes:
|
||||
label: Is your feature request related to a problem?
|
||||
description: Please provide a clear and concise description of what the problem is.
|
||||
placeholder: ex. I'm always frustrated when ...
|
||||
- type: textarea
|
||||
id: solution
|
||||
attributes:
|
||||
label: Describe the solution you'd like.
|
||||
description: Please provide a clear and concise description of what you want to happen.
|
||||
placeholder: ex. When connecting to an Espressif chip ...
|
||||
- type: textarea
|
||||
id: alternatives
|
||||
attributes:
|
||||
label: Describe alternatives you've considered.
|
||||
description: Please provide a clear and concise description of any alternative solutions or features you've considered.
|
||||
placeholder: ex. Choosing other approach wouldn't work, because ...
|
||||
- type: textarea
|
||||
id: context
|
||||
attributes:
|
||||
label: Additional context.
|
||||
description: Please add any other context or screenshots about the feature request here.
|
||||
placeholder: ex. This would work only when ...
|
||||
23
.github/ISSUE_TEMPLATE/04_other_issue.yml
vendored
23
.github/ISSUE_TEMPLATE/04_other_issue.yml
vendored
@@ -1,23 +0,0 @@
|
||||
name: General issue report
|
||||
description: File an issue report
|
||||
body:
|
||||
- type: checkboxes
|
||||
id: checklist
|
||||
attributes:
|
||||
label: Answers checklist.
|
||||
description: Before submitting a new issue, please follow the checklist and try to find the answer.
|
||||
options:
|
||||
- label: I have read the documentation [ESP-IDF Programming Guide](https://docs.espressif.com/projects/esp-idf/en/latest/) and the issue is not addressed there.
|
||||
required: true
|
||||
- label: I have updated my IDF branch (master or release) to the latest version and checked that the issue is present there.
|
||||
required: true
|
||||
- label: I have searched the issue tracker for a similar issue and not found a similar issue.
|
||||
required: true
|
||||
- type: textarea
|
||||
id: issue
|
||||
attributes:
|
||||
label: General issue report
|
||||
description: Your issue report goes here.
|
||||
placeholder: ex. How do I run...
|
||||
validations:
|
||||
required: true
|
||||
14
.github/ISSUE_TEMPLATE/config.yml
vendored
14
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,14 +0,0 @@
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: ESP-IDF Programming Guide
|
||||
url: https://docs.espressif.com/projects/esp-idf/en/latest/
|
||||
about: Documentation for configuring and using ESP-IDF
|
||||
- name: Espressif documentation page
|
||||
url: https://www.espressif.com/en/support/download/documents
|
||||
about: Hardware documentation (datasheets, Technical Reference Manual, etc)
|
||||
- name: Forum
|
||||
url: https://esp32.com
|
||||
about: For questions about using ESP-IDF and/or ESP32 series chips. Please submit all questions starting "How do I..." here.
|
||||
- name: Hardware-related services
|
||||
url: https://www.espressif.com/en/products/hardware-services
|
||||
about: Espressif service providing hardware design and certification support
|
||||
5
.github/dangerjs/.gitignore
vendored
5
.github/dangerjs/.gitignore
vendored
@@ -1,5 +0,0 @@
|
||||
# Transpiled JavaScript (if any)
|
||||
dist
|
||||
|
||||
# Installed dependencies
|
||||
node_modules
|
||||
47
.github/dangerjs/README.md
vendored
47
.github/dangerjs/README.md
vendored
@@ -1,47 +0,0 @@
|
||||
# DangerJS pull request automatic review tool - GitHub
|
||||
|
||||
## Implementation
|
||||
The main development is done in Espressif Gitlab project.
|
||||
Espressif [GitHub project espressif/esp-idf](https://github.com/espressif/esp-idf) is only a public mirror.
|
||||
|
||||
Therefore, all changes and updates to DangerJS files (`.github/dangerjs`) must be made via MR in the **Gitlab** repository by Espressif engineer.
|
||||
|
||||
When adding a new Danger rule or updating existing one, might be a good idea to test it on the developer's fork of GitHub project. This way, the new feature can be tested using a GitHub action without concern of damaging Espressif's GitHub repository.
|
||||
|
||||
Danger for Espressif GitHub is implemented in TypeScript. This makes the code more readable and robust than plain JavaScript.
|
||||
Compilation to JavaScript code (using `tsc`) is not necessary; Danger handles TypeScript natively.
|
||||
|
||||
A good practice is to store each Danger rule in a separate module, and then import these modules into the main Danger file `.github/dangerjs/dangerfile.ts` (see how this is done for currently present modules when adding a new one).
|
||||
|
||||
If the Danger module (new check/rule) uses an external NPM module (e.g. `axios`), be sure to add this dependency to `.github/dangerjs/package.json` and also update `.github/dangerjs/package-lock.json`.
|
||||
|
||||
In the GitHub action, `danger` is not installed globally (nor are its dependencies) and the `npx` call is used to start the `danger` checks in CI.
|
||||
|
||||
|
||||
## Adding new Danger rule
|
||||
For local development you can use following strategy
|
||||
|
||||
#### Install dependencies
|
||||
```sh
|
||||
cd .github/dangerjs
|
||||
npm install
|
||||
```
|
||||
(If the IDE still shows compiler/typing errors, reload the IDE window.)
|
||||
|
||||
#### Add new code as needed or make updates
|
||||
|
||||
#### Test locally
|
||||
Danger rules can be tested locally (without running the GitHub action pipeline).
|
||||
To do this, you have to first export the ENV variables used by Danger in the local terminal:
|
||||
|
||||
```sh
|
||||
export GITHUB_TOKEN='**************************************'
|
||||
```
|
||||
|
||||
Then you can call Danger by:
|
||||
```sh
|
||||
cd .github/dangerjs
|
||||
|
||||
danger pr https://github.com/espressif/esp-idf/pull/<number_of_pull_request>
|
||||
```
|
||||
The result will be displayed in your terminal.
|
||||
48
.github/dangerjs/dangerfile.ts
vendored
48
.github/dangerjs/dangerfile.ts
vendored
@@ -1,48 +0,0 @@
|
||||
import { DangerResults } from "danger";
|
||||
declare const results: DangerResults;
|
||||
declare const message: (message: string, results?: DangerResults) => void;
|
||||
declare const markdown: (message: string, results?: DangerResults) => void;
|
||||
|
||||
// Import modules with danger rules
|
||||
// (Modules with checks are stored in ".github/dangerjs/<module_name>.ts". To import them, use path relative to "dangerfile.ts")
|
||||
import prCommitsTooManyCommits from "./prCommitsTooManyCommits";
|
||||
import prDescription from "./prDescription";
|
||||
import prTargetBranch from "./prTargetBranch";
|
||||
import prInfoContributor from "./prInfoContributor";
|
||||
import prCommitMessage from "./prCommitMessage";
|
||||
|
||||
async function runDangerRules(): Promise<void> {
|
||||
// Message to contributor about review and merge process
|
||||
const prInfoContributorMessage: string = await prInfoContributor();
|
||||
markdown(prInfoContributorMessage);
|
||||
|
||||
// Run danger checks
|
||||
prCommitsTooManyCommits();
|
||||
prDescription();
|
||||
prTargetBranch();
|
||||
prCommitMessage();
|
||||
|
||||
// Add success log if no issues
|
||||
const dangerFails: number = results.fails.length;
|
||||
const dangerWarns: number = results.warnings.length;
|
||||
const dangerInfos: number = results.messages.length;
|
||||
if (!dangerFails && !dangerWarns && !dangerInfos) {
|
||||
return message("Good Job! All checks are passing!");
|
||||
}
|
||||
|
||||
// Add retry link
|
||||
addRetryLink();
|
||||
}
|
||||
|
||||
runDangerRules();
|
||||
|
||||
function addRetryLink(): void {
|
||||
const serverUrl: string | undefined = process.env.GITHUB_SERVER_URL;
|
||||
const repoName: string | undefined = process.env.GITHUB_REPOSITORY;
|
||||
const runId: string | undefined = process.env.GITHUB_RUN_ID;
|
||||
|
||||
const retryLinkUrl: string = `${serverUrl}/${repoName}/actions/runs/${runId}`;
|
||||
const retryLink: string = `<sub>:repeat: You can re-run automatic PR checks by retrying the <a href="${retryLinkUrl}">DangerJS action</a></sub>`;
|
||||
|
||||
markdown(retryLink);
|
||||
}
|
||||
1999
.github/dangerjs/package-lock.json
generated
vendored
1999
.github/dangerjs/package-lock.json
generated
vendored
File diff suppressed because it is too large
Load Diff
18
.github/dangerjs/package.json
vendored
18
.github/dangerjs/package.json
vendored
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"name": "dangerjs-github",
|
||||
"description": "GitHub PR reviewing with DangerJS",
|
||||
"main": "dangerfile.ts",
|
||||
"keywords": [],
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"axios": "^1.3.3",
|
||||
"danger": "^11.2.3",
|
||||
"request": "^2.88.2",
|
||||
"sync-request": "^6.1.0",
|
||||
"typescript": "^5.0.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^18.15.11"
|
||||
}
|
||||
}
|
||||
67
.github/dangerjs/prCommitMessage.ts
vendored
67
.github/dangerjs/prCommitMessage.ts
vendored
@@ -1,67 +0,0 @@
|
||||
import { DangerDSLType, DangerResults } from "danger";
|
||||
declare const danger: DangerDSLType;
|
||||
declare const warn: (message: string, results?: DangerResults) => void;
|
||||
|
||||
interface Commit {
|
||||
message: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if commit messages are sufficiently descriptive (not too short).
|
||||
*
|
||||
* Search for commit messages that appear to be automatically generated or temporary messages and report them.
|
||||
*
|
||||
* @dangerjs WARN
|
||||
*/
|
||||
export default function (): void {
|
||||
const prCommits: Commit[] = danger.git.commits;
|
||||
|
||||
const detectRegexes: RegExp[] = [
|
||||
/^Merge pull request #\d+ from .*/i, // Automatically generated message by GitHub
|
||||
/^Merged .+:.+ into .+/i, // Automatically generated message by GitHub
|
||||
/^Automatic merge by GitHub Action/i, // Automatically generated message by GitHub
|
||||
/^Merge branch '.*' of .+ into .+/i, // Automatically generated message by GitHub
|
||||
/^Create\s[a-zA-Z0-9_.-]+(\.[a-zA-Z0-9]{1,4})?(?=\s|$)/, // Automatically generated message by GitHub using UI
|
||||
/^Delete\s[a-zA-Z0-9_.-]+(\.[a-zA-Z0-9]{1,4})?(?=\s|$)/, // Automatically generated message by GitHub using UI
|
||||
/^Update\s[a-zA-Z0-9_.-]+(\.[a-zA-Z0-9]{1,4})?(?=\s|$)/, // Automatically generated message by GitHub using UI
|
||||
/^Initial commit/i, // Automatically generated message by GitHub
|
||||
/^WIP.*/i, // Message starts with prefix "WIP"
|
||||
/^Cleaned.*/i, // Message starts "Cleaned", , probably temporary
|
||||
/^Test:.*/i, // Message starts with "test" prefix, probably temporary
|
||||
/clean ?up/i, // Message contains "clean up", probably temporary
|
||||
/^[^A-Za-z0-9\s].*/, // Message starts with special characters
|
||||
];
|
||||
|
||||
let partMessages: string[] = [];
|
||||
|
||||
for (const commit of prCommits) {
|
||||
const commitMessage: string = commit.message;
|
||||
const commitMessageTitle: string = commit.message.split("\n")[0];
|
||||
|
||||
// Check if the commit message matches any regex from "detectRegexes"
|
||||
if (detectRegexes.some((regex) => commitMessage.match(regex))) {
|
||||
partMessages.push(
|
||||
`- the commit message \`${commitMessageTitle}\` appears to be a temporary or automatically generated message`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if the commit message is not too short
|
||||
const shortCommitMessageThreshold: number = 20; // commit message is considered too short below this number of characters
|
||||
if (commitMessage.length < shortCommitMessageThreshold) {
|
||||
partMessages.push(
|
||||
`- the commit message \`${commitMessageTitle}\` may not be sufficiently descriptive`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Create report
|
||||
if (partMessages.length) {
|
||||
partMessages.sort();
|
||||
let dangerMessage = `\nSome issues found for the commit messages in this MR:\n${partMessages.join(
|
||||
"\n"
|
||||
)}
|
||||
\nPlease consider updating these commit messages.`;
|
||||
warn(dangerMessage);
|
||||
}
|
||||
}
|
||||
19
.github/dangerjs/prCommitsTooManyCommits.ts
vendored
19
.github/dangerjs/prCommitsTooManyCommits.ts
vendored
@@ -1,19 +0,0 @@
|
||||
import { DangerDSLType, DangerResults } from "danger";
|
||||
declare const danger: DangerDSLType;
|
||||
declare const message: (message: string, results?: DangerResults) => void;
|
||||
|
||||
/**
|
||||
* Check if pull request has not an excessive numbers of commits (if squashed)
|
||||
*
|
||||
* @dangerjs INFO
|
||||
*/
|
||||
export default function (): void {
|
||||
const tooManyCommitThreshold: number = 2; // above this number of commits, squash commits is suggested
|
||||
const prCommits: number = danger.github.commits.length;
|
||||
|
||||
if (prCommits > tooManyCommitThreshold) {
|
||||
return message(
|
||||
`You might consider squashing your ${prCommits} commits (simplifying branch history).`
|
||||
);
|
||||
}
|
||||
}
|
||||
19
.github/dangerjs/prDescription.ts
vendored
19
.github/dangerjs/prDescription.ts
vendored
@@ -1,19 +0,0 @@
|
||||
import { DangerDSLType, DangerResults } from "danger";
|
||||
declare const danger: DangerDSLType;
|
||||
declare const warn: (message: string, results?: DangerResults) => void;
|
||||
|
||||
/**
|
||||
* Check if pull request has has a sufficiently accurate description
|
||||
*
|
||||
* @dangerjs WARN
|
||||
*/
|
||||
export default function (): void {
|
||||
const prDescription: string = danger.github.pr.body;
|
||||
const shortPrDescriptionThreshold: number = 100; // Description is considered too short below this number of characters
|
||||
|
||||
if (prDescription.length < shortPrDescriptionThreshold) {
|
||||
return warn(
|
||||
"The PR description looks very brief, please check if more details can be added."
|
||||
);
|
||||
}
|
||||
}
|
||||
58
.github/dangerjs/prInfoContributor.ts
vendored
58
.github/dangerjs/prInfoContributor.ts
vendored
@@ -1,58 +0,0 @@
|
||||
import { DangerDSLType } from "danger";
|
||||
declare const danger: DangerDSLType;
|
||||
|
||||
interface Contributor {
|
||||
login?: string;
|
||||
}
|
||||
|
||||
const authorLogin = danger.github.pr.user.login;
|
||||
const messageKnownContributor: string = `
|
||||
***
|
||||
👋 **Hi ${authorLogin}**, thank you for your another contribution to \`espressif/esp-idf\` project!
|
||||
|
||||
If the change is approved and passes the tests in our internal git repository, it will appear in this public Github repository on the next sync.
|
||||
***
|
||||
`;
|
||||
|
||||
const messageFirstContributor: string = `
|
||||
***
|
||||
👋 **Welcome ${authorLogin}**, thank you for your first contribution to \`espressif/esp-idf\` project!
|
||||
|
||||
📘 Please check [Contributions Guide](https://docs.espressif.com/projects/esp-idf/en/latest/esp32/contribute/index.html#contributions-guide) for the contribution checklist, information regarding code and documentation style, testing and other topics.
|
||||
|
||||
🖊️ Please also make sure you have **read and signed** the [Contributor License Agreement for espressif/esp-idf project](https://cla-assistant.io/espressif/esp-idf).
|
||||
|
||||
#### Pull request review and merge process you can expect
|
||||
Espressif develops the ESP-IDF project in an internal repository (Gitlab). We do welcome contributions in the form of bug reports, feature requests and pull requests via this public GitHub repository.
|
||||
|
||||
1. An internal issue has been created for the PR, we assign it to the relevant engineer
|
||||
2. They review the PR and either approve it or ask you for changes or clarifications
|
||||
3. Once the Github PR is approved, we synchronize it into our internal git repository
|
||||
4. In the internal git repository we do the final review, collect approvals from core owners and make sure all the automated tests are passing
|
||||
- At this point we may do some adjustments to the proposed change, or extend it by adding tests or documentation.
|
||||
5. If the change is approved and passes the tests it is merged into the \`master\` branch
|
||||
6. On next sync from the internal git repository merged change will appear in this public Github repository
|
||||
|
||||
***
|
||||
`;
|
||||
|
||||
/**
|
||||
* Check whether the author of the pull request is known or a first-time contributor, and add a message to the PR with information about the review and merge process.
|
||||
*/
|
||||
export default async function (): Promise<string> {
|
||||
const contributors = await danger.github.api.repos.listContributors({
|
||||
owner: danger.github.thisPR.owner,
|
||||
repo: danger.github.thisPR.repo,
|
||||
});
|
||||
|
||||
const contributorsData: Contributor[] = contributors.data;
|
||||
const knownContributors: (string | undefined)[] = contributorsData.map(
|
||||
(contributor: Contributor) => contributor.login
|
||||
);
|
||||
|
||||
if (knownContributors.includes(authorLogin)) {
|
||||
return messageKnownContributor;
|
||||
} else {
|
||||
return messageFirstContributor;
|
||||
}
|
||||
}
|
||||
19
.github/dangerjs/prTargetBranch.ts
vendored
19
.github/dangerjs/prTargetBranch.ts
vendored
@@ -1,19 +0,0 @@
|
||||
import { DangerDSLType, DangerResults } from "danger";
|
||||
declare const danger: DangerDSLType;
|
||||
declare const fail: (message: string, results?: DangerResults) => void;
|
||||
|
||||
/**
|
||||
* Check if the target branch is "master"
|
||||
*
|
||||
* @dangerjs FAIL
|
||||
*/
|
||||
export default function (): void {
|
||||
const prTargetBranch: string = danger.github?.pr?.base?.ref;
|
||||
|
||||
if (prTargetBranch !== "master") {
|
||||
return fail(`
|
||||
The target branch for this pull request should be \`master\`.\n
|
||||
If you would like to add this feature to the release branch, please state this in the PR description and we will consider backporting it.
|
||||
`);
|
||||
}
|
||||
}
|
||||
17
.github/dangerjs/tsconfig.json
vendored
17
.github/dangerjs/tsconfig.json
vendored
@@ -1,17 +0,0 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"module": "commonjs",
|
||||
"moduleResolution": "node",
|
||||
"esModuleInterop": true,
|
||||
"target": "es6",
|
||||
"noImplicitAny": true,
|
||||
"noUnusedParameters": true,
|
||||
"strictNullChecks": true,
|
||||
"sourceMap": true,
|
||||
"removeComments": true,
|
||||
"outDir": "./dist"
|
||||
},
|
||||
"include": [
|
||||
"./*.ts"
|
||||
]
|
||||
}
|
||||
15
.github/dependabot.yml
vendored
15
.github/dependabot.yml
vendored
@@ -1,15 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "all"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
ignore:
|
||||
- directory: ".gitlab/dangerjs"
|
||||
patterns:
|
||||
- "package-lock.json"
|
||||
- directory: ".github/dangerjs"
|
||||
patterns:
|
||||
- "package-lock.json"
|
||||
# Disable "version updates" (keep only "security updates")
|
||||
open-pull-requests-limit: 0
|
||||
36
.github/workflows/dangerjs.yml
vendored
36
.github/workflows/dangerjs.yml
vendored
@@ -1,36 +0,0 @@
|
||||
name: DangerJS Pull Request review
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited, reopened, synchronize]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
danger-check:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: .github/dangerjs
|
||||
steps:
|
||||
- name: Check out PR head
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Setup NodeJS environment
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 18
|
||||
cache: npm
|
||||
cache-dependency-path: .github/dangerjs/package-lock.json
|
||||
|
||||
- name: Install DangerJS dependencies
|
||||
run: npm install
|
||||
|
||||
- name: Run DangerJS
|
||||
run: npx danger ci --failOnErrors -v
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
87
.github/workflows/docker.yml
vendored
87
.github/workflows/docker.yml
vendored
@@ -1,87 +0,0 @@
|
||||
name: docker
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'release/*'
|
||||
tags:
|
||||
- 'v*.*'
|
||||
|
||||
env:
|
||||
# Platforms to build the image for
|
||||
BUILD_PLATFORMS: linux/amd64,linux/arm64
|
||||
DOCKERHUB_REPO: ${{ github.repository_owner }}/idf
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
# Disable the job in forks
|
||||
if: ${{ github.repository_owner == 'espressif' }}
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Depending on the branch/tag, set CLONE_BRANCH_OR_TAG variable (used in the Dockerfile
|
||||
# as a build arg) and TAG_NAME (used when tagging the image).
|
||||
#
|
||||
# The following 3 steps cover the alternatives (tag, release branch, master branch):
|
||||
- name: Set variables (tags)
|
||||
if: ${{ github.ref_type == 'tag' }}
|
||||
run: |
|
||||
echo "CLONE_BRANCH_OR_TAG=$GITHUB_REF_NAME" >> $GITHUB_ENV
|
||||
echo "TAG_NAME=$GITHUB_REF_NAME" >> $GITHUB_ENV
|
||||
- name: Set variables (release branches)
|
||||
if: ${{ github.ref_type == 'branch' && startsWith(github.ref_name, 'release/') }}
|
||||
run: |
|
||||
echo "CLONE_BRANCH_OR_TAG=$GITHUB_REF_NAME" >> $GITHUB_ENV
|
||||
echo "TAG_NAME=release-${GITHUB_REF_NAME##release/}" >> $GITHUB_ENV
|
||||
- name: Set variables (main branch)
|
||||
if: ${{ github.ref_type == 'branch' && github.ref_name == 'master' }}
|
||||
run: |
|
||||
echo "CLONE_BRANCH_OR_TAG=master" >> $GITHUB_ENV
|
||||
echo "TAG_NAME=latest" >> $GITHUB_ENV
|
||||
|
||||
# Display the variables set above, just in case.
|
||||
- name: Check variables
|
||||
run: |
|
||||
echo "CLONE_BRANCH_OR_TAG: $CLONE_BRANCH_OR_TAG"
|
||||
echo "CHECKOUT_REF: $CHECKOUT_REF"
|
||||
echo "TAG_NAME: $TAG_NAME"
|
||||
|
||||
# The following steps are the standard boilerplate from
|
||||
# https://github.com/marketplace/actions/build-and-push-docker-images
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Set up QEMU for multiarch builds
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: tools/docker
|
||||
push: true
|
||||
tags: ${{ env.DOCKERHUB_REPO }}:${{ env.TAG_NAME }}
|
||||
platforms: ${{ env.BUILD_PLATFORMS }}
|
||||
build-args: |
|
||||
IDF_CLONE_URL=${{ github.server_url }}/${{ github.repository }}.git
|
||||
IDF_CLONE_BRANCH_OR_TAG=${{ env.CLONE_BRANCH_OR_TAG }}
|
||||
|
||||
- name: Update Docker Hub repository description (master branch)
|
||||
if: ${{ github.ref_type == 'branch' && github.ref_name == 'master' }}
|
||||
uses: peter-evans/dockerhub-description@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
# Token based authentication is not supported here:
|
||||
# https://github.com/peter-evans/dockerhub-description/issues/10
|
||||
# https://github.com/docker/roadmap/issues/115#issuecomment-891694974
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
repository: ${{ env.DOCKERHUB_REPO }}
|
||||
readme-filepath: ./tools/docker/README.md
|
||||
23
.github/workflows/issue_comment.yml
vendored
23
.github/workflows/issue_comment.yml
vendored
@@ -1,23 +0,0 @@
|
||||
name: Sync issue comments to JIRA
|
||||
|
||||
# This workflow will be triggered when new issue comment is created (including PR comments)
|
||||
on: issue_comment
|
||||
|
||||
# Limit to single concurrent run for workflows which can create Jira issues.
|
||||
# Same concurrency group is used in new_issues.yml
|
||||
concurrency: jira_issues
|
||||
|
||||
jobs:
|
||||
sync_issue_comments_to_jira:
|
||||
name: Sync Issue Comments to Jira
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Sync issue comments to JIRA
|
||||
uses: espressif/github-actions/sync_issues_to_jira@master
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
JIRA_PASS: ${{ secrets.JIRA_PASS }}
|
||||
JIRA_PROJECT: IDFGH
|
||||
JIRA_URL: ${{ secrets.JIRA_URL }}
|
||||
JIRA_USER: ${{ secrets.JIRA_USER }}
|
||||
23
.github/workflows/new_issues.yml
vendored
23
.github/workflows/new_issues.yml
vendored
@@ -1,23 +0,0 @@
|
||||
name: Sync issues to Jira
|
||||
|
||||
# This workflow will be triggered when a new issue is opened
|
||||
on: issues
|
||||
|
||||
# Limit to single concurrent run for workflows which can create Jira issues.
|
||||
# Same concurrency group is used in issue_comment.yml
|
||||
concurrency: jira_issues
|
||||
|
||||
jobs:
|
||||
sync_issues_to_jira:
|
||||
name: Sync issues to Jira
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Sync GitHub issues to Jira project
|
||||
uses: espressif/github-actions/sync_issues_to_jira@master
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
JIRA_PASS: ${{ secrets.JIRA_PASS }}
|
||||
JIRA_PROJECT: IDFGH
|
||||
JIRA_URL: ${{ secrets.JIRA_URL }}
|
||||
JIRA_USER: ${{ secrets.JIRA_USER }}
|
||||
28
.github/workflows/new_prs.yml
vendored
28
.github/workflows/new_prs.yml
vendored
@@ -1,28 +0,0 @@
|
||||
name: Sync remain PRs to Jira
|
||||
|
||||
# This workflow will be triggered every hour, to sync remaining PRs (i.e. PRs with zero comment) to Jira project
|
||||
# Note that, PRs can also get synced when new PR comment is created
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 * * * *"
|
||||
|
||||
# Limit to single concurrent run for workflows which can create Jira issues.
|
||||
# Same concurrency group is used in issue_comment.yml
|
||||
concurrency: jira_issues
|
||||
|
||||
jobs:
|
||||
sync_prs_to_jira:
|
||||
name: Sync PRs to Jira
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Sync PRs to Jira project
|
||||
uses: espressif/github-actions/sync_issues_to_jira@master
|
||||
with:
|
||||
cron_job: true
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
JIRA_PASS: ${{ secrets.JIRA_PASS }}
|
||||
JIRA_PROJECT: IDFGH
|
||||
JIRA_URL: ${{ secrets.JIRA_URL }}
|
||||
JIRA_USER: ${{ secrets.JIRA_USER }}
|
||||
23
.github/workflows/pr_approved.yml
vendored
23
.github/workflows/pr_approved.yml
vendored
@@ -1,23 +0,0 @@
|
||||
name: Sync approved PRs to internal codebase
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
sync_prs_to_internal_codebase:
|
||||
name: GitHub PR to Internal Codebase Sync
|
||||
runs-on: ubuntu-latest
|
||||
if: (github.event.label.name == 'PR-Sync-Merge') ||
|
||||
(github.event.label.name == 'PR-Sync-Rebase') ||
|
||||
(github.event.label.name == 'PR-Sync-Update')
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Sync approved PRs to internal codebase
|
||||
uses: espressif/github-actions/github_pr_to_internal_pr@master
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITLAB_URL: ${{ secrets.GITLAB_URL }}
|
||||
GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }}
|
||||
GIT_CONFIG_NAME: ${{ secrets.GIT_CONFIG_NAME }}
|
||||
GIT_CONFIG_EMAIL: ${{ secrets.GIT_CONFIG_EMAIL }}
|
||||
JIRA_PROJECT: IDFGH
|
||||
42
.github/workflows/pre_commit_check.yml
vendored
42
.github/workflows/pre_commit_check.yml
vendored
@@ -1,42 +0,0 @@
|
||||
name: Check pre-commit rules
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
pre_commit_check:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
SKIP: "cleanup-ignore-lists" # Comma-separated string of ignored pre-commit check IDs
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Fetch head and base refs
|
||||
# This is necessary for pre-commit to check the changes in the PR branch
|
||||
run: |
|
||||
git fetch origin ${{ github.base_ref }}:base_ref
|
||||
git fetch origin pull/${{ github.event.pull_request.number }}/head:pr_ref
|
||||
- name: Set up Python environment
|
||||
uses: actions/setup-python@master
|
||||
with:
|
||||
python-version: v3.8
|
||||
- name: Install python packages
|
||||
run: |
|
||||
pip install pre-commit
|
||||
pre-commit install-hooks
|
||||
- name: Run pre-commit and check for any changes
|
||||
run: |
|
||||
echo "Commits being checked:"
|
||||
git log --oneline --no-decorate base_ref..pr_ref
|
||||
echo ""
|
||||
if ! pre-commit run --from-ref base_ref --to-ref pr_ref --show-diff-on-failure ; then
|
||||
echo ""
|
||||
echo "::notice::It looks like the commits in this PR have been made without having pre-commit hooks installed."
|
||||
echo "::notice::Please see https://docs.espressif.com/projects/esp-idf/en/latest/esp32/contribute/install-pre-commit-hook.html for instructions."
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
17
.github/workflows/release_zips.yml
vendored
17
.github/workflows/release_zips.yml
vendored
@@ -1,17 +0,0 @@
|
||||
name: Create zip file with recursive source clone for release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
|
||||
jobs:
|
||||
release_zips:
|
||||
name: Create release zip file
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Create a recursive clone source zip
|
||||
uses: espressif/github-actions/release_zips@master
|
||||
env:
|
||||
RELEASE_PROJECT_NAME: ESP-IDF
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
83
.gitignore
vendored
83
.gitignore
vendored
@@ -18,94 +18,31 @@ GPATH
|
||||
# eclipse setting
|
||||
.settings
|
||||
|
||||
# MacOS directory files
|
||||
.DS_Store
|
||||
|
||||
# cache dir
|
||||
.cache/
|
||||
|
||||
# Components Unit Test Apps files
|
||||
components/**/build/
|
||||
components/**/build_*_*/
|
||||
components/**/sdkconfig
|
||||
components/**/sdkconfig.old
|
||||
|
||||
# Example project files
|
||||
examples/**/build/
|
||||
examples/**/build_esp*_*/
|
||||
examples/**/sdkconfig
|
||||
examples/**/sdkconfig.old
|
||||
examples/**/build
|
||||
|
||||
# Doc build artifacts
|
||||
#Doc build artifacts
|
||||
docs/_build/
|
||||
docs/doxygen_sqlite3.db
|
||||
|
||||
# Downloaded font files
|
||||
docs/_static/DejaVuSans.ttf
|
||||
docs/_static/NotoSansSC-Regular.otf
|
||||
docs/doxygen-warning-log.txt
|
||||
docs/sphinx-warning-log.txt
|
||||
docs/sphinx-warning-log-sanitized.txt
|
||||
docs/xml/
|
||||
docs/man/
|
||||
|
||||
# Unit test app files
|
||||
tools/unit-test-app/sdkconfig
|
||||
tools/unit-test-app/sdkconfig.old
|
||||
tools/unit-test-app/build
|
||||
tools/unit-test-app/build_*_*/
|
||||
tools/unit-test-app/builds
|
||||
tools/unit-test-app/output
|
||||
tools/unit-test-app/test_configs
|
||||
|
||||
# Unit Test CMake compile log folder
|
||||
log_ut_cmake
|
||||
|
||||
# test application build files
|
||||
tools/test_apps/**/build/
|
||||
tools/test_apps/**/build_*_*/
|
||||
tools/test_apps/**/sdkconfig
|
||||
tools/test_apps/**/sdkconfig.old
|
||||
|
||||
TEST_LOGS
|
||||
# AWS IoT Examples require device-specific certs/keys
|
||||
examples/protocols/aws_iot/*/main/certs/*.pem.*
|
||||
|
||||
# gcov coverage reports
|
||||
*.gcda
|
||||
*.gcno
|
||||
coverage.info
|
||||
coverage_report/
|
||||
|
||||
test_multi_heap_host
|
||||
|
||||
# VS Code Settings
|
||||
.vscode/
|
||||
|
||||
# VIM files
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# Sublime Text files
|
||||
*.sublime-project
|
||||
*.sublime-workspace
|
||||
|
||||
# Clion IDE CMake build & config
|
||||
.idea/
|
||||
cmake-build-*/
|
||||
|
||||
# Results for the checking of the Python coding style and static analysis
|
||||
.mypy_cache
|
||||
flake8_output.txt
|
||||
|
||||
# ESP-IDF default build directory name
|
||||
build
|
||||
|
||||
# lock files for examples and components
|
||||
dependencies.lock
|
||||
|
||||
# managed_components for examples
|
||||
managed_components
|
||||
|
||||
# pytest log
|
||||
pytest_embedded_log/
|
||||
list_job_*.txt
|
||||
size_info.txt
|
||||
|
||||
# clang config (for LSP)
|
||||
.clangd
|
||||
|
||||
# Vale
|
||||
.vale/styles/*
|
||||
|
||||
1062
.gitlab-ci.yml
1062
.gitlab-ci.yml
File diff suppressed because it is too large
Load Diff
@@ -1,6 +0,0 @@
|
||||
# For the syntax of this file, see:
|
||||
#
|
||||
# https://docs.gitlab.com/ee/user/project/code_owners.html#the-syntax-of-code-owners-files
|
||||
#
|
||||
|
||||
* @esp-idf-codeowners/all-maintainers
|
||||
@@ -1,325 +0,0 @@
|
||||
# IDF CI
|
||||
|
||||
- [IDF CI](#idf-ci)
|
||||
- [General Workflow](#general-workflow)
|
||||
- [What if Expected Jobs ARE NOT Created?](#what-if-expected-jobs-are-not-created)
|
||||
- [MR labels for additional jobs](#mr-labels-for-additional-jobs)
|
||||
- [Supported MR Labels](#supported-mr-labels)
|
||||
- [How to trigger a `detached` pipeline without pushing new commits?](#how-to-trigger-a-detached-pipeline-without-pushing-new-commits)
|
||||
- [How to Develop With `rules.yml`?](#how-to-develop-with-rulesyml)
|
||||
- [General Concepts](#general-concepts)
|
||||
- [How to Add a New `Job`?](#how-to-add-a-new-job)
|
||||
- [How to Add a New `Rules` Template?](#how-to-add-a-new-rules-template)
|
||||
- [How to Add a New `if` Anchor?](#how-to-add-a-new-if-anchor)
|
||||
- [Naming Rules](#naming-rules)
|
||||
- [Common Naming Rules](#common-naming-rules)
|
||||
- [`if` Anchors Naming Rules](#if-anchors-naming-rules)
|
||||
- [`rules` Template Naming Rules](#rules-template-naming-rules)
|
||||
- [Reusable Shell Script `tools/ci/utils.sh`](#reusable-shell-script-toolsciutilssh)
|
||||
- [Functions](#functions)
|
||||
- [CI Job Related](#ci-job-related)
|
||||
- [Shell Script Related](#shell-script-related)
|
||||
- [Manifest File to Control the Build/Test apps](#manifest-file-to-control-the-buildtest-apps)
|
||||
- [Grammar](#grammar)
|
||||
- [Special Rules](#special-rules)
|
||||
- [Upload/Download Artifacts to Internal Minio Server](#uploaddownload-artifacts-to-internal-minio-server)
|
||||
- [Users Without Access to Minio](#users-without-access-to-minio)
|
||||
- [Users With Access to Minio](#users-with-access-to-minio)
|
||||
- [Env Vars for Minio](#env-vars-for-minio)
|
||||
- [Artifacts Types and File Patterns](#artifacts-types-and-file-patterns)
|
||||
- [Upload](#upload)
|
||||
- [Download](#download)
|
||||
|
||||
## General Workflow
|
||||
|
||||
1. Push to a remote branch
|
||||
2. Create an MR, choose related labels (not required)
|
||||
3. A `detached` pipeline will be created.
|
||||
4. if you push a new commit, a new pipeline will be created automatically.
|
||||
|
||||
## What if Expected Jobs ARE NOT Created?
|
||||
|
||||
1. check the file patterns
|
||||
|
||||
If you found a job that is not running as expected with some file changes, a git commit to improve the `pattern` will be appreciated.
|
||||
|
||||
2. please add MR labels to run additional tests, currently we have to do this only for `target-test` jobs, please use it as few as possible. Our final goal is to remove all the labels and let the file changes decide everything!
|
||||
|
||||
## MR labels for additional jobs
|
||||
|
||||
### Supported MR Labels
|
||||
|
||||
- `build`
|
||||
- `build_docs`
|
||||
- `component_ut[_esp32/esp32s2/...]`
|
||||
- `custom_test[_esp32/esp32s2/...]`
|
||||
- `docker`
|
||||
- `docs`
|
||||
- `docs_full`, triggers a full docs build, regardless of files changed
|
||||
- `example_test[_esp32/esp32s2/...]`
|
||||
- `fuzzer_test`
|
||||
- `host_test`
|
||||
- `integration_test`
|
||||
- `iperf_stress_test`
|
||||
- `macos`
|
||||
- `macos_test`
|
||||
- `nvs_coverage`
|
||||
- `submodule`
|
||||
- `windows`
|
||||
|
||||
There are two general labels (not recommended since these two labels will trigger a lot of jobs)
|
||||
|
||||
- `target_test`: includes all target for `example_test`, `custom_test`, `component_ut`, `integration_test`
|
||||
- `all_test`: includes all test labels
|
||||
|
||||
### How to trigger a `detached` pipeline without pushing new commits?
|
||||
|
||||
Go to MR web page -> `Pipelines` tab -> click `Run pipeline` button.
|
||||
|
||||
In very rare case, this tab will not show up because no merge_request pipeline is created before. Please use web API then.
|
||||
|
||||
```shell
|
||||
curl -X POST --header "PRIVATE-TOKEN: [YOUR PERSONAL ACCESS TOKEN]" [GITLAB_SERVER]/api/v4/projects/103/merge_requests/[MERGE_REQUEST_IID]/pipelines
|
||||
```
|
||||
|
||||
## How to Develop With `rules.yml`?
|
||||
|
||||
### General Concepts
|
||||
|
||||
- `pattern`: Defined in an array. A GitLab job will be created if the changed files in this MR matched one of the patterns. For example:
|
||||
|
||||
```yaml
|
||||
.patterns-python-files: &patterns-python-files
|
||||
- "**/*.py"
|
||||
```
|
||||
|
||||
- `label`: Defined in an if clause, similar as the previous bot command. A GitLab job will be created if the pipeline variables contains variables in `BOT_LABEL_xxx` format (DEPRECATED) or included in the MR labels. For example:
|
||||
|
||||
```yaml
|
||||
.if-label-build_docs: &if-label-build_docs
|
||||
if: '$BOT_LABEL_BUILD_DOCS || $CI_MERGE_REQUEST_LABELS =~ /^(?:[^,\n\r]+,)*build_docs(?:,[^,\n\r]+)*$/i'
|
||||
```
|
||||
|
||||
- `rule`: A combination of various patterns, and labels. It will be used by GitLab YAML `extends` keyword to tell GitLab in what conditions will this job be created. For example:
|
||||
|
||||
```yaml
|
||||
.rules:build:docs:
|
||||
rules:
|
||||
- <<: *if-protected
|
||||
- <<: *if-label-build_docs
|
||||
- <<: *if-label-docs
|
||||
- <<: *if-dev-push
|
||||
changes: *patterns-docs
|
||||
```
|
||||
|
||||
An example for GitLab job on how to use extends:
|
||||
|
||||
```yaml
|
||||
check_docs_lang_sync:
|
||||
extends:
|
||||
- .pre_check_template
|
||||
- .rules:build:docs
|
||||
script:
|
||||
- cd docs
|
||||
- ./check_lang_folder_sync.sh
|
||||
```
|
||||
|
||||
### How to Add a New `Job`?
|
||||
|
||||
check if there's a suitable `.rules:<rules-you-need>` template
|
||||
|
||||
1. if there is, put this in the job `extends`. All done, now you can close this window. (`extends` could be array or string)
|
||||
2. if there isn't
|
||||
1. check [How to Add a New `Rules` Template?](#how-to-add-a-new-rules-template), create a suitable one
|
||||
2. follow step 1
|
||||
|
||||
### How to Add a New `Rules` Template?
|
||||
|
||||
check if this rule is related to `labels`, `patterns`
|
||||
|
||||
1. if it is, please refer to [dependencies/README.md](./dependencies/README.md) and add new rules by auto-generating
|
||||
2. if it isn't, please continue reading
|
||||
|
||||
check if there's a suitable `.if-<if-anchor-you-need>` anchor
|
||||
|
||||
1. if there is, create a rule following [`rules` Template Naming Rules](#rules-template-naming-rules).For detail information, please refer to [GitLab Documentation `rules-if`](https://docs.gitlab.com/ee/ci/yaml/README.html#rulesif). Here's an example.
|
||||
|
||||
```yaml
|
||||
.rules:patterns:python-files:
|
||||
rules:
|
||||
- <<: *if-protected
|
||||
- <<: *if-dev-push
|
||||
changes: *patterns-python-files
|
||||
```
|
||||
|
||||
2. if there isn't
|
||||
|
||||
1. check [How to Add a New `if` Anchor?](#how-to-add-a-new-if-anchor), create a suitable one
|
||||
2. follow step 1
|
||||
|
||||
### How to Add a New `if` Anchor?
|
||||
|
||||
Create an `if` anchor following [`if` Anchors Naming Rules](#if-anchors-naming-rules). For detailed information about how to write the condition clause, please refer to [GitLab Documentation `only/except (advanced)](https://docs.gitlab.com/ee/ci/yaml/README.html#onlyexcept-advanced). Here's an example.
|
||||
|
||||
```yaml
|
||||
.if-schedule: &if-schedule:
|
||||
if: '$CI_PIPELINE_SOURCE == "schedule"'
|
||||
```
|
||||
|
||||
### Naming Rules
|
||||
|
||||
#### Common Naming Rules
|
||||
|
||||
if a phrase has multi words, use `_` to concatenate them.
|
||||
|
||||
> e.g. `regular_test`
|
||||
|
||||
if a name has multi phrases, use `-` to concatenate them.
|
||||
|
||||
> e.g. `regular_test-example_test`
|
||||
|
||||
#### `if` Anchors Naming Rules
|
||||
|
||||
- if it's a label: `.if-label-<label_name>`
|
||||
- if it's a ref: `.if-ref-<ref_name>`
|
||||
- if it's a branch: `.if-branch-<branch_name>`
|
||||
- if it's a tag: `.if-tag-<tag_name>`
|
||||
- if it's multi-type combination: `.if-ref-<release_name>-branch-<branch_name>`
|
||||
|
||||
**Common Phrases/Abbreviations**
|
||||
|
||||
- `no_label`
|
||||
|
||||
`$BOT_TRIGGER_WITH_LABEL == null`
|
||||
|
||||
- `protected`
|
||||
|
||||
`($CI_COMMIT_REF_NAME == "master" || $CI_COMMIT_BRANCH =~ /^release\/v/ || $CI_COMMIT_TAG =~ /^v\d+\.\d+(\.\d+)?($|-)/)`
|
||||
|
||||
- `target_test`
|
||||
|
||||
a combination of `example_test`, `custom_test`, `component_ut`, `integration_test` and all targets
|
||||
|
||||
#### `rules` Template Naming Rules
|
||||
|
||||
- if it's tag related: `.rules:tag:<tag_1>-<tag_2>`
|
||||
- if it's label related: `.rules:labels:<label_1>-<label_2>`
|
||||
- if it's test related: `.rules:test:<test_type>`
|
||||
- if it's build related: `.rules:build:<build_type>`
|
||||
- if it's pattern related: `.rules:patterns:<patterns>`
|
||||
|
||||
## Reusable Shell Script `tools/ci/utils.sh`
|
||||
|
||||
It is used to put all the reusable shell scripts as small functions. If you want to set `before_script: []` for you job, now you can set `extends: .before_script_slim` instead. it will only run `source tools/ci/utils.sh`
|
||||
|
||||
If you're developing CI shell scripts, you can use these functions without `source` them. They're already included in all `before_script`
|
||||
|
||||
To run these commands in shell script locally, place `source tools/ci/utils.sh` at the very beginning.
|
||||
|
||||
### Functions
|
||||
|
||||
#### CI Job Related
|
||||
|
||||
- `add_gitlab_ssh_keys`
|
||||
- `add_github_ssh_keys`
|
||||
- `add_doc_server_ssh_keys`
|
||||
- `fetch_submodules`
|
||||
- `get_all_submodules`
|
||||
|
||||
#### Shell Script Related
|
||||
|
||||
- `error`: log in red color
|
||||
- `warning`: log in orange color
|
||||
- `info`: log in green color
|
||||
- `run_cmd`: run the command with duration seconds info
|
||||
- `retry_failed`: run the command with duration seconds info, retry when failed
|
||||
|
||||
## Manifest File to Control the Build/Test apps
|
||||
|
||||
`.build-test-rules.yml` file is a manifest file to control if the CI is running the build and test job or not. The Supported Targets table in `README.md` for apps would be auto-generated by `pre-commit` from the app's `.build-test-rules.yml`.
|
||||
|
||||
### Grammar
|
||||
|
||||
We're using the latest version of [idf-build-apps][idf-build-apps]. Please refer to their [documentation][manifest-doc]
|
||||
|
||||
[idf-build-apps]: https://github.com/espressif/idf-build-apps
|
||||
[manifest-doc]: https://docs.espressif.com/projects/idf-build-apps/en/latest/manifest.html
|
||||
|
||||
### Special Rules
|
||||
|
||||
In ESP-IDF CI, there's a few more special rules are additionally supported to disable the check app dependencies feature:
|
||||
|
||||
- Add MR labels `BUILD_AND_TEST_ALL_APPS`
|
||||
- Run in protected branches
|
||||
|
||||
## Upload/Download Artifacts to Internal Minio Server
|
||||
|
||||
### Users Without Access to Minio
|
||||
|
||||
If you don't have access to the internal Minio server, you can still download the artifacts from the shared link in the job log.
|
||||
|
||||
The log will look like this:
|
||||
|
||||
```shell
|
||||
Pipeline ID : 587355
|
||||
Job name : build_clang_test_apps_esp32
|
||||
Job ID : 40272275
|
||||
Created archive file: 40272275.zip, uploading as 587355/build_dir_without_map_and_elf_files/build_clang_test_apps_esp32/40272275.zip
|
||||
Please download the archive file includes build_dir_without_map_and_elf_files from [INTERNAL_URL]
|
||||
```
|
||||
|
||||
### Users With Access to Minio
|
||||
|
||||
#### Env Vars for Minio
|
||||
|
||||
Minio takes these env vars to connect to the server:
|
||||
|
||||
- `IDF_S3_SERVER`
|
||||
- `IDF_S3_ACCESS_KEY`
|
||||
- `IDF_S3_SECRET_KEY`
|
||||
- `IDF_S3_BUCKET`
|
||||
|
||||
#### Artifacts Types and File Patterns
|
||||
|
||||
The artifacts types and corresponding file patterns are defined in tools/ci/artifacts_handler.py, inside `ArtifactType` and `TYPE_PATTERNS_DICT`.
|
||||
|
||||
#### Upload
|
||||
|
||||
```shell
|
||||
python tools/ci/artifacts_handler.py upload
|
||||
```
|
||||
|
||||
will upload the files that match the file patterns to minio object storage with name:
|
||||
|
||||
`<pipeline_id>/<artifact_type>/<job_name>/<job_id>.zip`
|
||||
|
||||
For example, job 39043328 will upload these four files:
|
||||
|
||||
- `575500/map_and_elf_files/build_pytest_examples_esp32/39043328.zip`
|
||||
- `575500/build_dir_without_map_and_elf_files/build_pytest_examples_esp32/39043328.zip`
|
||||
- `575500/logs/build_pytest_examples_esp32/39043328.zip`
|
||||
- `575500/size_reports/build_pytest_examples_esp32/39043328.zip`
|
||||
|
||||
#### Download
|
||||
|
||||
You may run
|
||||
|
||||
```shell
|
||||
python tools/ci/artifacts_handler.py download --pipeline_id <pipeline_id>
|
||||
```
|
||||
|
||||
to download all files of the pipeline, or
|
||||
|
||||
```shell
|
||||
python tools/ci/artifacts_handler.py download --pipeline_id <pipeline_id> --job_name <job_name_or_pattern>
|
||||
```
|
||||
|
||||
to download all files with the specified job name or pattern, or
|
||||
|
||||
```shell
|
||||
python tools/ci/artifacts_handler.py download --pipeline_id <pipeline_id> --job_name <job_name_or_pattern> --type <artifact_type> <artifact_type> ...
|
||||
```
|
||||
|
||||
to download all files with the specified job name or pattern and artifact type(s).
|
||||
|
||||
You may check all detailed documentation with `python tools/ci/artifacts_handler.py download -h`
|
||||
@@ -1,585 +0,0 @@
|
||||
.build_template:
|
||||
stage: build
|
||||
extends:
|
||||
- .after_script:build:ccache
|
||||
image: $ESP_ENV_IMAGE
|
||||
tags:
|
||||
- build
|
||||
# build only on shiny servers since shiny storage server is at the same location
|
||||
- shiny
|
||||
variables:
|
||||
# Enable ccache for all build jobs. See configure_ci_environment.sh for more ccache related settings.
|
||||
IDF_CCACHE_ENABLE: "1"
|
||||
dependencies: []
|
||||
|
||||
.build_cmake_template:
|
||||
extends:
|
||||
- .build_template
|
||||
- .before_script:build
|
||||
- .after_script:build:ccache
|
||||
dependencies: # set dependencies to null to avoid missing artifacts issue
|
||||
needs:
|
||||
- job: fast_template_app
|
||||
artifacts: false
|
||||
- pipeline_variables
|
||||
artifacts:
|
||||
paths:
|
||||
# The other artifacts patterns are defined under tools/ci/artifacts_handler.py
|
||||
# Now we're uploading/downloading the binary files from our internal storage server
|
||||
#
|
||||
# keep the log file to help debug
|
||||
- "**/build*/build_log.txt"
|
||||
# keep the size info to help track the binary size
|
||||
- size_info.txt
|
||||
- "**/build*/size.json"
|
||||
when: always
|
||||
expire_in: 4 days
|
||||
script:
|
||||
# CI specific options start from "--parallel-count xxx". could ignore when running locally
|
||||
- run_cmd python tools/ci/ci_build_apps.py $TEST_DIR -v
|
||||
-t $IDF_TARGET
|
||||
--copy-sdkconfig
|
||||
--parallel-count ${CI_NODE_TOTAL:-1}
|
||||
--parallel-index ${CI_NODE_INDEX:-1}
|
||||
--extra-preserve-dirs
|
||||
examples/bluetooth/esp_ble_mesh/ble_mesh_console
|
||||
examples/bluetooth/hci/controller_hci_uart_esp32
|
||||
examples/wifi/iperf
|
||||
--modified-components ${MODIFIED_COMPONENTS}
|
||||
--modified-files ${MODIFIED_FILES}
|
||||
# for detailed documents, please refer to .gitlab/ci/README.md#uploaddownload-artifacts-to-internal-minio-server
|
||||
- python tools/ci/artifacts_handler.py upload
|
||||
|
||||
.build_cmake_clang_template:
|
||||
extends:
|
||||
- .build_cmake_template
|
||||
variables:
|
||||
IDF_TOOLCHAIN: clang
|
||||
TEST_BUILD_OPTS_EXTRA: ""
|
||||
TEST_DIR: tools/test_apps/system/cxx_pthread_bluetooth
|
||||
script:
|
||||
# CI specific options start from "--parallel-count xxx". could ignore when running locally
|
||||
- run_cmd python tools/ci/ci_build_apps.py $TEST_DIR -v
|
||||
-t $IDF_TARGET
|
||||
--copy-sdkconfig
|
||||
--parallel-count ${CI_NODE_TOTAL:-1}
|
||||
--parallel-index ${CI_NODE_INDEX:-1}
|
||||
--modified-components ${MODIFIED_COMPONENTS}
|
||||
--modified-files ${MODIFIED_FILES}
|
||||
$TEST_BUILD_OPTS_EXTRA
|
||||
- python tools/ci/artifacts_handler.py upload
|
||||
|
||||
.build_pytest_template:
|
||||
extends:
|
||||
- .build_cmake_template
|
||||
script:
|
||||
# CI specific options start from "--parallel-count xxx". could ignore when running locally
|
||||
- run_cmd python tools/ci/ci_build_apps.py $TEST_DIR -v
|
||||
-t $IDF_TARGET
|
||||
-m \"not host_test\"
|
||||
--pytest-apps
|
||||
--parallel-count ${CI_NODE_TOTAL:-1}
|
||||
--parallel-index ${CI_NODE_INDEX:-1}
|
||||
--collect-app-info "list_job_${CI_JOB_NAME_SLUG}.txt"
|
||||
--modified-components ${MODIFIED_COMPONENTS}
|
||||
--modified-files ${MODIFIED_FILES}
|
||||
- python tools/ci/artifacts_handler.py upload
|
||||
|
||||
.build_pytest_no_jtag_template:
|
||||
extends:
|
||||
- .build_cmake_template
|
||||
script:
|
||||
# CI specific options start from "--parallel-count xxx". could ignore when running locally
|
||||
- run_cmd python tools/ci/ci_build_apps.py $TEST_DIR -v
|
||||
-t $IDF_TARGET
|
||||
-m \"not host_test and not jtag\"
|
||||
--pytest-apps
|
||||
--parallel-count ${CI_NODE_TOTAL:-1}
|
||||
--parallel-index ${CI_NODE_INDEX:-1}
|
||||
--collect-app-info "list_job_${CI_JOB_NAME_SLUG}.txt"
|
||||
--modified-components ${MODIFIED_COMPONENTS}
|
||||
--modified-files ${MODIFIED_FILES}
|
||||
- python tools/ci/artifacts_handler.py upload
|
||||
|
||||
.build_pytest_jtag_template:
|
||||
extends:
|
||||
- .build_cmake_template
|
||||
script:
|
||||
# CI specific options start from "--parallel-count xxx". could ignore when running locally
|
||||
- run_cmd python tools/ci/ci_build_apps.py $TEST_DIR -v
|
||||
-t $IDF_TARGET
|
||||
-m \"not host_test and jtag\"
|
||||
--pytest-apps
|
||||
--parallel-count ${CI_NODE_TOTAL:-1}
|
||||
--parallel-index ${CI_NODE_INDEX:-1}
|
||||
--collect-app-info "list_job_${CI_JOB_NAME_SLUG}.txt"
|
||||
--modified-components ${MODIFIED_COMPONENTS}
|
||||
--modified-files ${MODIFIED_FILES}
|
||||
- python tools/ci/artifacts_handler.py upload
|
||||
|
||||
build_pytest_examples_esp32:
|
||||
extends:
|
||||
- .build_pytest_no_jtag_template
|
||||
- .rules:build:example_test-esp32
|
||||
parallel: 6
|
||||
variables:
|
||||
IDF_TARGET: esp32
|
||||
TEST_DIR: examples
|
||||
|
||||
build_pytest_examples_esp32s2:
|
||||
extends:
|
||||
- .build_pytest_no_jtag_template
|
||||
- .rules:build:example_test-esp32s2
|
||||
parallel: 3
|
||||
variables:
|
||||
IDF_TARGET: esp32s2
|
||||
TEST_DIR: examples
|
||||
|
||||
build_pytest_examples_esp32s3:
|
||||
extends:
|
||||
- .build_pytest_no_jtag_template
|
||||
- .rules:build:example_test-esp32s3
|
||||
parallel: 4
|
||||
variables:
|
||||
IDF_TARGET: esp32s3
|
||||
TEST_DIR: examples
|
||||
|
||||
build_pytest_examples_esp32c3:
|
||||
extends:
|
||||
- .build_pytest_no_jtag_template
|
||||
- .rules:build:example_test-esp32c3
|
||||
parallel: 4
|
||||
variables:
|
||||
IDF_TARGET: esp32c3
|
||||
TEST_DIR: examples
|
||||
|
||||
build_pytest_examples_esp32c2:
|
||||
extends:
|
||||
- .build_pytest_no_jtag_template
|
||||
- .rules:build:example_test-esp32c2
|
||||
parallel: 2
|
||||
variables:
|
||||
IDF_TARGET: esp32c2
|
||||
TEST_DIR: examples
|
||||
|
||||
build_pytest_examples_esp32c6:
|
||||
extends:
|
||||
- .build_pytest_no_jtag_template
|
||||
- .rules:build:example_test-esp32c6
|
||||
parallel: 2
|
||||
variables:
|
||||
IDF_TARGET: esp32c6
|
||||
TEST_DIR: examples
|
||||
|
||||
build_pytest_examples_esp32h2:
|
||||
extends:
|
||||
- .build_pytest_no_jtag_template
|
||||
- .rules:build:example_test-esp32h2
|
||||
parallel: 2
|
||||
variables:
|
||||
IDF_TARGET: esp32h2
|
||||
TEST_DIR: examples
|
||||
|
||||
build_pytest_examples_jtag: # for all targets
|
||||
extends:
|
||||
- .build_pytest_jtag_template
|
||||
- .rules:build:example_test
|
||||
variables:
|
||||
IDF_TARGET: all
|
||||
TEST_DIR: examples
|
||||
|
||||
build_pytest_components_esp32:
|
||||
extends:
|
||||
- .build_pytest_template
|
||||
- .rules:build:component_ut-esp32
|
||||
parallel: 5
|
||||
variables:
|
||||
IDF_TARGET: esp32
|
||||
TEST_DIR: components
|
||||
|
||||
build_pytest_components_esp32s2:
|
||||
extends:
|
||||
- .build_pytest_template
|
||||
- .rules:build:component_ut-esp32s2
|
||||
parallel: 4
|
||||
variables:
|
||||
IDF_TARGET: esp32s2
|
||||
TEST_DIR: components
|
||||
|
||||
build_pytest_components_esp32s3:
|
||||
extends:
|
||||
- .build_pytest_template
|
||||
- .rules:build:component_ut-esp32s3
|
||||
parallel: 4
|
||||
variables:
|
||||
IDF_TARGET: esp32s3
|
||||
TEST_DIR: components
|
||||
|
||||
build_pytest_components_esp32c3:
|
||||
extends:
|
||||
- .build_pytest_template
|
||||
- .rules:build:component_ut-esp32c3
|
||||
parallel: 4
|
||||
variables:
|
||||
IDF_TARGET: esp32c3
|
||||
TEST_DIR: components
|
||||
|
||||
build_pytest_components_esp32c2:
|
||||
extends:
|
||||
- .build_pytest_template
|
||||
- .rules:build:component_ut-esp32c2
|
||||
parallel: 3
|
||||
variables:
|
||||
IDF_TARGET: esp32c2
|
||||
TEST_DIR: components
|
||||
|
||||
build_pytest_components_esp32c6:
|
||||
extends:
|
||||
- .build_pytest_template
|
||||
- .rules:build:component_ut-esp32c6
|
||||
parallel: 3
|
||||
variables:
|
||||
IDF_TARGET: esp32c6
|
||||
TEST_DIR: components
|
||||
|
||||
build_pytest_components_esp32h2:
|
||||
extends:
|
||||
- .build_pytest_template
|
||||
- .rules:build:component_ut-esp32h2
|
||||
parallel: 4
|
||||
variables:
|
||||
IDF_TARGET: esp32h2
|
||||
TEST_DIR: components
|
||||
|
||||
build_only_components_apps:
|
||||
extends:
|
||||
- .build_cmake_template
|
||||
- .rules:build:component_ut
|
||||
parallel: 5
|
||||
script:
|
||||
- set_component_ut_vars
|
||||
# CI specific options start from "--parallel-count xxx". could ignore when running locally
|
||||
- run_cmd python tools/ci/ci_build_apps.py $COMPONENT_UT_DIRS -v
|
||||
-t all
|
||||
--parallel-count ${CI_NODE_TOTAL:-1}
|
||||
--parallel-index ${CI_NODE_INDEX:-1}
|
||||
--modified-components ${MODIFIED_COMPONENTS}
|
||||
--modified-files ${MODIFIED_FILES}
|
||||
- python tools/ci/artifacts_handler.py upload
|
||||
|
||||
build_pytest_test_apps_esp32:
|
||||
extends:
|
||||
- .build_pytest_template
|
||||
- .rules:build:custom_test-esp32
|
||||
variables:
|
||||
IDF_TARGET: esp32
|
||||
TEST_DIR: tools/test_apps
|
||||
|
||||
build_pytest_test_apps_esp32s2:
|
||||
extends:
|
||||
- .build_pytest_template
|
||||
- .rules:build:custom_test-esp32s2
|
||||
variables:
|
||||
IDF_TARGET: esp32s2
|
||||
TEST_DIR: tools/test_apps
|
||||
|
||||
build_pytest_test_apps_esp32s3:
|
||||
extends:
|
||||
- .build_pytest_template
|
||||
- .rules:build:custom_test-esp32s3
|
||||
parallel: 2
|
||||
variables:
|
||||
IDF_TARGET: esp32s3
|
||||
TEST_DIR: tools/test_apps
|
||||
|
||||
build_pytest_test_apps_esp32c3:
|
||||
extends:
|
||||
- .build_pytest_template
|
||||
- .rules:build:custom_test-esp32c3
|
||||
variables:
|
||||
IDF_TARGET: esp32c3
|
||||
TEST_DIR: tools/test_apps
|
||||
|
||||
build_pytest_test_apps_esp32c2:
|
||||
extends:
|
||||
- .build_pytest_template
|
||||
- .rules:build:custom_test-esp32c2
|
||||
variables:
|
||||
IDF_TARGET: esp32c2
|
||||
TEST_DIR: tools/test_apps
|
||||
|
||||
build_pytest_test_apps_esp32c6:
|
||||
extends:
|
||||
- .build_pytest_template
|
||||
- .rules:build:custom_test-esp32c6
|
||||
variables:
|
||||
IDF_TARGET: esp32c6
|
||||
TEST_DIR: tools/test_apps
|
||||
|
||||
build_pytest_test_apps_esp32h2:
|
||||
extends:
|
||||
- .build_pytest_template
|
||||
- .rules:build:custom_test-esp32h2
|
||||
variables:
|
||||
IDF_TARGET: esp32h2
|
||||
TEST_DIR: tools/test_apps
|
||||
|
||||
build_only_tools_test_apps:
|
||||
extends:
|
||||
- .build_cmake_template
|
||||
- .rules:build:custom_test
|
||||
parallel: 9
|
||||
script:
|
||||
# CI specific options start from "--parallel-count xxx". could ignore when running locally
|
||||
- run_cmd python tools/ci/ci_build_apps.py tools/test_apps -v
|
||||
-t all
|
||||
--parallel-count ${CI_NODE_TOTAL:-1}
|
||||
--parallel-index ${CI_NODE_INDEX:-1}
|
||||
--modified-components ${MODIFIED_COMPONENTS}
|
||||
--modified-files ${MODIFIED_FILES}
|
||||
- python tools/ci/artifacts_handler.py upload
|
||||
|
||||
.build_template_app_template:
|
||||
extends:
|
||||
- .build_template
|
||||
- .before_script:build
|
||||
variables:
|
||||
LOG_PATH: "${CI_PROJECT_DIR}/log_template_app"
|
||||
BUILD_PATH: "${CI_PROJECT_DIR}/build_template_app"
|
||||
BUILD_DIR: "${BUILD_PATH}/@t/@w"
|
||||
BUILD_LOG_CMAKE: "${LOG_PATH}/cmake_@t_@w.txt"
|
||||
BUILD_COMMAND_ARGS: ""
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- log_template_app/*
|
||||
- size_info.txt
|
||||
- build_template_app/**/size.json
|
||||
expire_in: 1 week
|
||||
script:
|
||||
# Set the variable for 'esp-idf-template' testing
|
||||
- ESP_IDF_TEMPLATE_GIT=${ESP_IDF_TEMPLATE_GIT:-"https://github.com/espressif/esp-idf-template.git"}
|
||||
- retry_failed git clone ${ESP_IDF_TEMPLATE_GIT}
|
||||
# Try to use the same branch name for esp-idf-template that we're
|
||||
# using on esp-idf. If it doesn't exist then just stick to the default branch
|
||||
- python $CHECKOUT_REF_SCRIPT esp-idf-template esp-idf-template
|
||||
- export PATH="$IDF_PATH/tools:$PATH"
|
||||
# Only do the default cmake build for each target, remaining part are done in the build_template_app job
|
||||
- tools/ci/build_template_app.sh ${BUILD_COMMAND_ARGS}
|
||||
|
||||
# build-related-pre-check-jobs ------------------------------------------------
|
||||
# Build at least one project for each target at earliest stage to reduce build cost for obvious failing commits
|
||||
fast_template_app:
|
||||
extends:
|
||||
- .build_template_app_template
|
||||
- .rules:build:target_test
|
||||
stage: pre_check
|
||||
variables:
|
||||
BUILD_COMMAND_ARGS: "-p"
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
build_examples_cmake_esp32:
|
||||
extends:
|
||||
- .build_cmake_template
|
||||
- .rules:build:example_test-esp32
|
||||
parallel: 8
|
||||
variables:
|
||||
IDF_TARGET: esp32
|
||||
TEST_DIR: examples
|
||||
|
||||
build_examples_cmake_esp32s2:
|
||||
extends:
|
||||
- .build_cmake_template
|
||||
- .rules:build:example_test-esp32s2
|
||||
parallel: 7
|
||||
variables:
|
||||
IDF_TARGET: esp32s2
|
||||
TEST_DIR: examples
|
||||
|
||||
build_examples_cmake_esp32s3:
|
||||
extends:
|
||||
- .build_cmake_template
|
||||
- .rules:build:example_test-esp32s3
|
||||
parallel: 11
|
||||
variables:
|
||||
IDF_TARGET: esp32s3
|
||||
TEST_DIR: examples
|
||||
|
||||
build_examples_cmake_esp32c2:
|
||||
extends:
|
||||
- .build_cmake_template
|
||||
- .rules:build:example_test-esp32c2
|
||||
parallel: 7
|
||||
variables:
|
||||
IDF_TARGET: esp32c2
|
||||
TEST_DIR: examples
|
||||
|
||||
build_examples_cmake_esp32c3:
|
||||
extends:
|
||||
- .build_cmake_template
|
||||
- .rules:build:example_test-esp32c3
|
||||
parallel: 9
|
||||
variables:
|
||||
IDF_TARGET: esp32c3
|
||||
TEST_DIR: examples
|
||||
|
||||
build_examples_cmake_esp32c6:
|
||||
extends:
|
||||
- .build_cmake_template
|
||||
- .rules:build:example_test-esp32c6
|
||||
parallel: 11
|
||||
variables:
|
||||
IDF_TARGET: esp32c6
|
||||
TEST_DIR: examples
|
||||
|
||||
build_examples_cmake_esp32h2:
|
||||
extends:
|
||||
- .build_cmake_template
|
||||
- .rules:build:example_test-esp32h2
|
||||
parallel: 9
|
||||
variables:
|
||||
IDF_TARGET: esp32h2
|
||||
TEST_DIR: examples
|
||||
|
||||
build_examples_cmake_esp32p4:
|
||||
extends:
|
||||
- .build_cmake_template
|
||||
- .rules:build:example_test-esp32p4
|
||||
parallel: 4
|
||||
variables:
|
||||
IDF_TARGET: esp32p4
|
||||
TEST_DIR: examples
|
||||
|
||||
build_clang_test_apps_esp32:
|
||||
extends:
|
||||
- .build_cmake_clang_template
|
||||
- .rules:build:custom_test-esp32
|
||||
variables:
|
||||
IDF_TARGET: esp32
|
||||
|
||||
build_clang_test_apps_esp32s2:
|
||||
extends:
|
||||
- .build_cmake_clang_template
|
||||
- .rules:build:custom_test-esp32s2
|
||||
variables:
|
||||
IDF_TARGET: esp32s2
|
||||
|
||||
build_clang_test_apps_esp32s3:
|
||||
extends:
|
||||
- .build_cmake_clang_template
|
||||
- .rules:build:custom_test-esp32s3
|
||||
variables:
|
||||
IDF_TARGET: esp32s3
|
||||
|
||||
.build_clang_test_apps_riscv:
|
||||
extends:
|
||||
- .build_cmake_clang_template
|
||||
variables:
|
||||
# For RISCV clang generates '.linker-options' sections of type 'llvm_linker_options' in asm files.
|
||||
# See (https://llvm.org/docs/Extensions.html#linker-options-section-linker-options).
|
||||
# Binutils gas ignores them with warning.
|
||||
# TODO: LLVM-112, Use integrated assembler.
|
||||
TEST_BUILD_OPTS_EXTRA: "--ignore-warning-str 'Warning: unrecognized section type'"
|
||||
|
||||
build_clang_test_apps_esp32c3:
|
||||
extends:
|
||||
- .build_clang_test_apps_riscv
|
||||
- .rules:build:custom_test-esp32c3
|
||||
variables:
|
||||
IDF_TARGET: esp32c3
|
||||
|
||||
build_clang_test_apps_esp32c2:
|
||||
extends:
|
||||
- .build_clang_test_apps_riscv
|
||||
- .rules:build:custom_test-esp32c2
|
||||
variables:
|
||||
IDF_TARGET: esp32c2
|
||||
|
||||
build_clang_test_apps_esp32c6:
|
||||
extends:
|
||||
- .build_clang_test_apps_riscv
|
||||
- .rules:build:custom_test-esp32c6
|
||||
# TODO: c6 builds fail in master due to missing headers
|
||||
allow_failure: true
|
||||
variables:
|
||||
IDF_TARGET: esp32c6
|
||||
|
||||
.test_build_system_template:
|
||||
stage: host_test
|
||||
extends:
|
||||
- .build_template
|
||||
- .rules:build:check
|
||||
needs:
|
||||
- job: fast_template_app
|
||||
artifacts: false
|
||||
optional: true
|
||||
script:
|
||||
- ${IDF_PATH}/tools/ci/test_configure_ci_environment.sh
|
||||
- cd ${IDF_PATH}/tools/test_build_system
|
||||
- python ${IDF_PATH}/tools/ci/get_known_failure_cases_file.py
|
||||
- pytest --parallel-count ${CI_NODE_TOTAL:-1} --parallel-index ${CI_NODE_INDEX:-1}
|
||||
--work-dir ${CI_PROJECT_DIR}/test_build_system --junitxml=${CI_PROJECT_DIR}/XUNIT_RESULT.xml
|
||||
--ignore-result-files ${KNOWN_FAILURE_CASES_FILE_NAME}
|
||||
|
||||
pytest_build_system:
|
||||
extends: .test_build_system_template
|
||||
parallel: 3
|
||||
artifacts:
|
||||
paths:
|
||||
- XUNIT_RESULT.xml
|
||||
- test_build_system
|
||||
when: always
|
||||
expire_in: 2 days
|
||||
reports:
|
||||
junit: XUNIT_RESULT.xml
|
||||
|
||||
pytest_build_system_macos:
|
||||
extends:
|
||||
- .test_build_system_template
|
||||
- .before_script:build:macos
|
||||
- .rules:build:macos
|
||||
tags:
|
||||
- macos_shell
|
||||
parallel: 3
|
||||
artifacts:
|
||||
paths:
|
||||
- XUNIT_RESULT.xml
|
||||
- test_build_system
|
||||
when: always
|
||||
expire_in: 2 days
|
||||
reports:
|
||||
junit: XUNIT_RESULT.xml
|
||||
|
||||
build_docker:
|
||||
extends:
|
||||
- .before_script:minimal
|
||||
- .rules:build:docker
|
||||
stage: host_test
|
||||
needs: []
|
||||
image: espressif/docker-builder:1
|
||||
tags:
|
||||
- build_docker_amd64_brno
|
||||
variables:
|
||||
DOCKER_TMP_IMAGE_NAME: "idf_tmp_image"
|
||||
script:
|
||||
- export LOCAL_CI_REPOSITORY_URL=$CI_REPOSITORY_URL
|
||||
- if [ -n "$LOCAL_GITLAB_HTTPS_HOST" ]; then export LOCAL_CI_REPOSITORY_URL="https://gitlab-ci-token:${CI_JOB_TOKEN}@${LOCAL_GITLAB_HTTPS_HOST}/${CI_PROJECT_PATH}"; fi
|
||||
- if [ -n "$LOCAL_GIT_MIRROR" ]; then export LOCAL_CI_REPOSITORY_URL="${LOCAL_GIT_MIRROR}/${CI_PROJECT_PATH}"; fi
|
||||
- echo "Using repository at $LOCAL_CI_REPOSITORY_URL"
|
||||
- export DOCKER_BUILD_ARGS="--build-arg IDF_CLONE_URL=${LOCAL_CI_REPOSITORY_URL} --build-arg IDF_CLONE_BRANCH_OR_TAG=${CI_COMMIT_REF_NAME} --build-arg IDF_CHECKOUT_REF=${CI_COMMIT_TAG:-$PIPELINE_COMMIT_SHA}"
|
||||
# Build
|
||||
- docker build --tag ${DOCKER_TMP_IMAGE_NAME} ${DOCKER_BUILD_ARGS} tools/docker/
|
||||
# We can't mount $PWD/examples/get-started/blink into the container, see https://gitlab.com/gitlab-org/gitlab-ce/issues/41227.
|
||||
# The workaround mentioned there works, but leaves around directories which need to be cleaned up manually.
|
||||
# Therefore, build a copy of the example located inside the container.
|
||||
- docker run --rm --workdir /opt/esp/idf/examples/get-started/blink ${DOCKER_TMP_IMAGE_NAME} idf.py build
|
||||
|
||||
# This job builds template app with permutations of targets and optimization levels
|
||||
build_template_app:
|
||||
extends:
|
||||
- .build_template_app_template
|
||||
- .rules:build
|
||||
stage: host_test
|
||||
needs:
|
||||
- job: fast_template_app
|
||||
artifacts: false
|
||||
@@ -1,341 +0,0 @@
|
||||
#####################
|
||||
# Default Variables #
|
||||
#####################
|
||||
stages:
|
||||
- upload_cache
|
||||
- pre_check
|
||||
- build
|
||||
- assign_test
|
||||
- target_test
|
||||
- host_test
|
||||
- build_doc
|
||||
- test_deploy
|
||||
- deploy
|
||||
- post_deploy
|
||||
|
||||
variables:
|
||||
# System environment
|
||||
|
||||
# Common parameters for the 'make' during CI tests
|
||||
MAKEFLAGS: "-j5 --no-keep-going"
|
||||
|
||||
# GitLab-CI environment
|
||||
# Thanks to pack-objects cache, clone strategy should behave faster than fetch
|
||||
# so we pick "clone" as default git strategy
|
||||
# Shiny runners by default remove the CI_PROJECT_DIR every time at the beginning of one job
|
||||
# and clone with a --depth=1
|
||||
# Brew runners will fetch from locally mirror first, and cache the local CI_PROJECT_DIR
|
||||
# In conclusion
|
||||
# - set GIT_STRATEGY: "clone" to shiny runners
|
||||
# - set GIT_STRATEGY: "fetch" to brew runners
|
||||
GIT_STRATEGY: clone
|
||||
# we will download archive for each submodule instead of clone.
|
||||
# we don't do "recursive" when fetch submodule as they're not used in CI now.
|
||||
GIT_SUBMODULE_STRATEGY: none
|
||||
# since we're using merged-result pipelines, the last commit should work for most cases
|
||||
GIT_DEPTH: 1
|
||||
# --no-recurse-submodules: we use cache for submodules
|
||||
# --prune --prune-tags: in case remote branch or tag is force pushed
|
||||
GIT_FETCH_EXTRA_FLAGS: "--no-recurse-submodules --prune --prune-tags"
|
||||
# we're using .cache folder for caches
|
||||
GIT_CLEAN_FLAGS: -ffdx -e .cache/
|
||||
LATEST_GIT_TAG: v5.2.1
|
||||
|
||||
SUBMODULE_FETCH_TOOL: "tools/ci/ci_fetch_submodule.py"
|
||||
# by default we will fetch all submodules
|
||||
# jobs can overwrite this variable to only fetch submodules they required
|
||||
# set to "none" if don't need to fetch submodules
|
||||
SUBMODULES_TO_FETCH: "all"
|
||||
# tell build system do not check submodule update as we download archive instead of clone
|
||||
IDF_SKIP_CHECK_SUBMODULES: 1
|
||||
|
||||
IDF_PATH: "$CI_PROJECT_DIR"
|
||||
V: "0"
|
||||
CHECKOUT_REF_SCRIPT: "$CI_PROJECT_DIR/tools/ci/checkout_project_ref.py"
|
||||
PYTHON_VER: 3.8.17
|
||||
|
||||
# Docker images
|
||||
ESP_ENV_IMAGE: "${CI_DOCKER_REGISTRY}/esp-env-v5.2:2"
|
||||
ESP_IDF_DOC_ENV_IMAGE: "${CI_DOCKER_REGISTRY}/esp-idf-doc-env-v5.2:2-1"
|
||||
QEMU_IMAGE: "${CI_DOCKER_REGISTRY}/qemu-v5.2:2-20230522"
|
||||
TARGET_TEST_ENV_IMAGE: "${CI_DOCKER_REGISTRY}/target-test-env-v5.2:2"
|
||||
|
||||
SONARQUBE_SCANNER_IMAGE: "${CI_DOCKER_REGISTRY}/sonarqube-scanner:5"
|
||||
PRE_COMMIT_IMAGE: "${CI_DOCKER_REGISTRY}/esp-idf-pre-commit:1"
|
||||
|
||||
# target test repo parameters
|
||||
TEST_ENV_CONFIG_REPO: "https://gitlab-ci-token:${BOT_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/qa/ci-test-runner-configs.git"
|
||||
|
||||
# cache python dependencies
|
||||
PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
|
||||
|
||||
# Set this variable to the branch of idf-constraints repo in order to test a custom Python constraint file. The
|
||||
# branch name must be without the remote part ("origin/"). Keep the variable empty in order to use the constraint
|
||||
# file from https://dl.espressif.com/dl/esp-idf.
|
||||
CI_PYTHON_CONSTRAINT_BRANCH: ""
|
||||
|
||||
# Update the filename for a specific ESP-IDF release. It is used only with CI_PYTHON_CONSTRAINT_BRANCH.
|
||||
CI_PYTHON_CONSTRAINT_FILE: "espidf.constraints.v5.2.txt"
|
||||
|
||||
# Set this variable to repository name of a Python tool you wish to install and test in the context of ESP-IDF CI.
|
||||
# Keep the variable empty when not used.
|
||||
CI_PYTHON_TOOL_REPO: ""
|
||||
|
||||
# Set this variable to the branch of a Python tool repo specified in CI_PYTHON_TOOL_REPO. The
|
||||
# branch name must be without the remote part ("origin/"). Keep the variable empty when not used.
|
||||
# This is used only if CI_PYTHON_TOOL_REPO is not empty.
|
||||
CI_PYTHON_TOOL_BRANCH: ""
|
||||
|
||||
# Set this variable to specify the file name for the known failure cases.
|
||||
KNOWN_FAILURE_CASES_FILE_NAME: "5.2.txt"
|
||||
|
||||
IDF_CI_BUILD: 1
|
||||
|
||||
################################################
|
||||
# `before_script` and `after_script` Templates #
|
||||
################################################
|
||||
.common_before_scripts: &common-before_scripts |
|
||||
source tools/ci/utils.sh
|
||||
is_based_on_commits $REQUIRED_ANCESTOR_COMMITS
|
||||
|
||||
if [[ -n "$IDF_DONT_USE_MIRRORS" ]]; then
|
||||
export IDF_MIRROR_PREFIX_MAP=
|
||||
fi
|
||||
|
||||
if echo "$CI_MERGE_REQUEST_LABELS" | egrep "(^|,)include_nightly_run(,|$)"; then
|
||||
export INCLUDE_NIGHTLY_RUN="1"
|
||||
export NIGHTLY_RUN="1"
|
||||
fi
|
||||
|
||||
# configure cmake related flags
|
||||
source tools/ci/configure_ci_environment.sh
|
||||
|
||||
# add extra python packages
|
||||
export PYTHONPATH="$IDF_PATH/tools:$IDF_PATH/tools/esp_app_trace:$IDF_PATH/components/partition_table:$IDF_PATH/tools/ci/python_packages:$PYTHONPATH"
|
||||
|
||||
.setup_tools_and_idf_python_venv: &setup_tools_and_idf_python_venv |
|
||||
# must use after setup_tools_except_target_test
|
||||
# otherwise the export.sh won't work properly
|
||||
|
||||
# download constraint file for dev
|
||||
if [[ -n "$CI_PYTHON_CONSTRAINT_BRANCH" ]]; then
|
||||
wget -O /tmp/constraint.txt --header="Authorization:Bearer ${ESPCI_TOKEN}" ${GITLAB_HTTP_SERVER}/api/v4/projects/2581/repository/files/${CI_PYTHON_CONSTRAINT_FILE}/raw?ref=${CI_PYTHON_CONSTRAINT_BRANCH}
|
||||
mkdir -p ~/.espressif
|
||||
mv /tmp/constraint.txt ~/.espressif/${CI_PYTHON_CONSTRAINT_FILE}
|
||||
fi
|
||||
|
||||
# Mirror
|
||||
if [[ -n "$IDF_DONT_USE_MIRRORS" ]]; then
|
||||
export IDF_MIRROR_PREFIX_MAP=
|
||||
fi
|
||||
|
||||
# install latest python packages
|
||||
# target test jobs
|
||||
if [[ "${CI_JOB_STAGE}" == "target_test" ]]; then
|
||||
run_cmd bash install.sh --enable-ci --enable-pytest
|
||||
elif [[ "${CI_JOB_STAGE}" == "build_doc" ]]; then
|
||||
run_cmd bash install.sh --enable-ci --enable-docs
|
||||
elif [[ "${CI_JOB_STAGE}" == "build" ]]; then
|
||||
run_cmd bash install.sh --enable-ci --enable-pytest
|
||||
else
|
||||
if ! echo "${CI_JOB_NAME}" | egrep ".*pytest.*"; then
|
||||
run_cmd bash install.sh --enable-ci
|
||||
else
|
||||
run_cmd bash install.sh --enable-ci --enable-pytest
|
||||
fi
|
||||
fi
|
||||
|
||||
# Install esp-clang if necessary
|
||||
if [[ "$IDF_TOOLCHAIN" == "clang" ]]; then
|
||||
$IDF_PATH/tools/idf_tools.py --non-interactive install esp-clang
|
||||
fi
|
||||
|
||||
# Since the version 3.21 CMake passes source files and include dirs to ninja using absolute paths.
|
||||
# Needed for pytest junit reports.
|
||||
$IDF_PATH/tools/idf_tools.py --non-interactive install cmake
|
||||
|
||||
source ./export.sh
|
||||
|
||||
# Custom clang
|
||||
if [[ ! -z "$CI_CLANG_DISTRO_URL" ]]; then
|
||||
echo "Using custom clang from ${CI_CLANG_DISTRO_URL}"
|
||||
wget $CI_CLANG_DISTRO_URL
|
||||
ARCH_NAME=$(basename $CI_CLANG_DISTRO_URL)
|
||||
tar -x -f $ARCH_NAME
|
||||
export PATH=$PWD/esp-clang/bin:$PATH
|
||||
fi
|
||||
|
||||
# Custom OpenOCD
|
||||
if [[ ! -z "$OOCD_DISTRO_URL" && "$CI_JOB_STAGE" == "target_test" ]]; then
|
||||
echo "Using custom OpenOCD from ${OOCD_DISTRO_URL}"
|
||||
wget $OOCD_DISTRO_URL
|
||||
ARCH_NAME=$(basename $OOCD_DISTRO_URL)
|
||||
tar -x -f $ARCH_NAME
|
||||
export OPENOCD_SCRIPTS=$PWD/openocd-esp32/share/openocd/scripts
|
||||
export PATH=$PWD/openocd-esp32/bin:$PATH
|
||||
fi
|
||||
|
||||
if [[ -n "$CI_PYTHON_TOOL_REPO" ]]; then
|
||||
git clone --quiet --depth=1 -b ${CI_PYTHON_TOOL_BRANCH} https://gitlab-ci-token:${ESPCI_TOKEN}@${GITLAB_HTTPS_HOST}/espressif/${CI_PYTHON_TOOL_REPO}.git
|
||||
pip install ./${CI_PYTHON_TOOL_REPO}
|
||||
rm -rf ${CI_PYTHON_TOOL_REPO}
|
||||
fi
|
||||
|
||||
.show_ccache_statistics: &show_ccache_statistics |
|
||||
# Show ccache statistics if enabled globally
|
||||
test "$CI_CCACHE_STATS" == 1 && test -n "$(which ccache)" && ccache --show-stats || true
|
||||
|
||||
.upload_failed_job_log_artifacts: &upload_failed_job_log_artifacts |
|
||||
if [ $CI_JOB_STATUS = "failed" ]; then
|
||||
python tools/ci/artifacts_handler.py upload --type logs
|
||||
fi
|
||||
|
||||
.before_script:minimal:
|
||||
before_script:
|
||||
- *common-before_scripts
|
||||
|
||||
.before_script:build:macos:
|
||||
before_script:
|
||||
- *common-before_scripts
|
||||
# On macOS, these tools need to be installed
|
||||
- export IDF_TOOLS_PATH="${HOME}/.espressif_runner_${CI_RUNNER_ID}_${CI_CONCURRENT_ID}"
|
||||
- $IDF_PATH/tools/idf_tools.py --non-interactive install cmake ninja
|
||||
# This adds tools (compilers) and the version-specific Python environment to PATH
|
||||
- *setup_tools_and_idf_python_venv
|
||||
- fetch_submodules
|
||||
|
||||
.before_script:build:
|
||||
before_script:
|
||||
- *common-before_scripts
|
||||
- *setup_tools_and_idf_python_venv
|
||||
- add_gitlab_ssh_keys
|
||||
- fetch_submodules
|
||||
- export EXTRA_CFLAGS=${PEDANTIC_CFLAGS}
|
||||
- export EXTRA_CXXFLAGS=${PEDANTIC_CXXFLAGS}
|
||||
|
||||
.after_script:build:ccache:
|
||||
after_script:
|
||||
- *show_ccache_statistics
|
||||
- *upload_failed_job_log_artifacts
|
||||
|
||||
##############################
|
||||
# Git Strategy Job Templates #
|
||||
##############################
|
||||
.git_init: &git_init |
|
||||
mkdir -p "${CI_PROJECT_DIR}"
|
||||
cd "${CI_PROJECT_DIR}"
|
||||
git init
|
||||
|
||||
.git_fetch_from_mirror_url_if_exists: &git_fetch_from_mirror_url_if_exists |
|
||||
# check if set mirror
|
||||
if [ -n "${LOCAL_GITLAB_HTTPS_HOST:-}" ] && [ -n "${ESPCI_TOKEN:-}" ]; then
|
||||
MIRROR_REPO_URL="https://bot:${ESPCI_TOKEN}@${LOCAL_GITLAB_HTTPS_HOST}/${CI_PROJECT_PATH}"
|
||||
elif [ -n "${LOCAL_GIT_MIRROR:-}" ]; then
|
||||
MIRROR_REPO_URL="${LOCAL_GIT_MIRROR}/${CI_PROJECT_PATH}"
|
||||
fi
|
||||
|
||||
# fetch from mirror first if set
|
||||
if [ -n "${MIRROR_REPO_URL:-}" ]; then
|
||||
if git remote -v | grep origin; then
|
||||
git remote set-url origin "${MIRROR_REPO_URL}"
|
||||
else
|
||||
git remote add origin "${MIRROR_REPO_URL}"
|
||||
fi
|
||||
# mirror url may fail with authentication issue
|
||||
git fetch origin --no-recurse-submodules || true
|
||||
fi
|
||||
|
||||
# set remote url to CI_REPOSITORY_URL
|
||||
if git remote -v | grep origin; then
|
||||
git remote set-url origin "${CI_REPOSITORY_URL}"
|
||||
else
|
||||
git remote add origin "${CI_REPOSITORY_URL}"
|
||||
fi
|
||||
|
||||
.git_checkout_fetch_head: &git_checkout_fetch_head |
|
||||
git checkout FETCH_HEAD
|
||||
git clean ${GIT_CLEAN_FLAGS}
|
||||
|
||||
# git diff requires two commits, with different CI env var
|
||||
#
|
||||
# By default, we use git strategy "clone" with depth 1 to speed up the clone process.
|
||||
# But for jobs requires running `git diff`, we need to fetch more commits to get the correct diffs.
|
||||
#
|
||||
# Since there's no way to get the correct git_depth before the job starts,
|
||||
# we can't set `GIT_DEPTH` in the job definition.
|
||||
#
|
||||
# Set git strategy to "none" and fetch manually instead.
|
||||
.before_script:fetch:git_diff:
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
before_script:
|
||||
- *git_init
|
||||
- *git_fetch_from_mirror_url_if_exists
|
||||
- |
|
||||
# merged results pipelines, by default
|
||||
if [[ -n $CI_MERGE_REQUEST_SOURCE_BRANCH_SHA ]]; then
|
||||
git fetch origin $CI_MERGE_REQUEST_DIFF_BASE_SHA --depth=1 ${GIT_FETCH_EXTRA_FLAGS}
|
||||
git fetch origin $CI_MERGE_REQUEST_SOURCE_BRANCH_SHA --depth=1 ${GIT_FETCH_EXTRA_FLAGS}
|
||||
export GIT_DIFF_OUTPUT=$(git diff --name-only $CI_MERGE_REQUEST_DIFF_BASE_SHA $CI_MERGE_REQUEST_SOURCE_BRANCH_SHA)
|
||||
# merge request pipelines, when the mr got conflicts
|
||||
elif [[ -n $CI_MERGE_REQUEST_DIFF_BASE_SHA ]]; then
|
||||
git fetch origin $CI_MERGE_REQUEST_DIFF_BASE_SHA --depth=1 ${GIT_FETCH_EXTRA_FLAGS}
|
||||
git fetch origin $CI_COMMIT_SHA --depth=1 ${GIT_FETCH_EXTRA_FLAGS}
|
||||
export GIT_DIFF_OUTPUT=$(git diff --name-only $CI_MERGE_REQUEST_DIFF_BASE_SHA $CI_COMMIT_SHA)
|
||||
# other pipelines, like the protected branches pipelines
|
||||
elif [[ "$CI_COMMIT_BEFORE_SHA" != "0000000000000000000000000000000000000000" ]]; then
|
||||
git fetch origin $CI_COMMIT_BEFORE_SHA --depth=1 ${GIT_FETCH_EXTRA_FLAGS}
|
||||
git fetch origin $CI_COMMIT_SHA --depth=1 ${GIT_FETCH_EXTRA_FLAGS}
|
||||
export GIT_DIFF_OUTPUT=$(git diff --name-only $CI_COMMIT_BEFORE_SHA $CI_COMMIT_SHA)
|
||||
else
|
||||
# pipeline source could be web, scheduler, etc.
|
||||
git fetch origin $CI_COMMIT_SHA --depth=2 ${GIT_FETCH_EXTRA_FLAGS}
|
||||
export GIT_DIFF_OUTPUT=$(git diff --name-only $CI_COMMIT_SHA~1 $CI_COMMIT_SHA)
|
||||
fi
|
||||
- *git_checkout_fetch_head
|
||||
- *common-before_scripts
|
||||
- *setup_tools_and_idf_python_venv
|
||||
- add_gitlab_ssh_keys
|
||||
|
||||
# target test runners may locate in different places
|
||||
# for runners set git mirror, we fetch from the mirror first, then fetch the HEAD commit
|
||||
.before_script:fetch:target_test:
|
||||
variables:
|
||||
GIT_STRATEGY: none
|
||||
before_script:
|
||||
- *git_init
|
||||
- *git_fetch_from_mirror_url_if_exists
|
||||
- git fetch origin "${CI_COMMIT_SHA}" --depth=1 ${GIT_FETCH_EXTRA_FLAGS}
|
||||
- *git_checkout_fetch_head
|
||||
- *common-before_scripts
|
||||
- *setup_tools_and_idf_python_venv
|
||||
- add_gitlab_ssh_keys
|
||||
# no submodules
|
||||
|
||||
#############
|
||||
# `default` #
|
||||
#############
|
||||
default:
|
||||
cache:
|
||||
# pull only for most of the use cases since it's cache dir.
|
||||
# Only set "push" policy for "upload_cache" stage jobs
|
||||
- key: pip-cache
|
||||
paths:
|
||||
- .cache/pip
|
||||
policy: pull
|
||||
- key: submodule-cache
|
||||
paths:
|
||||
- .cache/submodule_archives
|
||||
policy: pull
|
||||
before_script:
|
||||
- *common-before_scripts
|
||||
- *setup_tools_and_idf_python_venv
|
||||
- add_gitlab_ssh_keys
|
||||
- fetch_submodules
|
||||
retry:
|
||||
max: 2
|
||||
when:
|
||||
# In case of a runner failure we could hop to another one, or a network error could go away.
|
||||
- runner_system_failure
|
||||
# Job execution timeout may be caused by a network issue.
|
||||
- job_execution_timeout
|
||||
@@ -1,18 +0,0 @@
|
||||
# this file support two keywords:
|
||||
# - extra_default_build_targets:
|
||||
# besides of the SUPPORTED_TARGETS in IDF,
|
||||
# enable build for the specified targets by default as well.
|
||||
# - bypass_check_test_targets:
|
||||
# suppress the check_build_test_rules check-test-script warnings for the specified targets
|
||||
#
|
||||
# This file should ONLY be used during bringup. Should be reset to empty after the bringup process
|
||||
extra_default_build_targets:
|
||||
- esp32p4
|
||||
|
||||
bypass_check_test_targets:
|
||||
- esp32p4
|
||||
#
|
||||
# These lines would
|
||||
# - enable the README.md check for esp32c6. Don't forget to add the build jobs in .gitlab/ci/build.yml
|
||||
# - disable the test script check with the manifest file.
|
||||
#
|
||||
@@ -1,100 +0,0 @@
|
||||
# How the `generate_rules.py` works
|
||||
|
||||
## Functionalities
|
||||
|
||||
This script can do only two things:
|
||||
|
||||
1. Auto-generate some labels/rules we need and update them in `rules.yml`
|
||||
2. Generate a dependency tree graph
|
||||
|
||||
## Schema
|
||||
|
||||
This file only used basic YAML grammar and has nothing to do with the GitLab version YAML file.
|
||||
|
||||
It has five custom keywords:
|
||||
|
||||
- `matrix`: An array of sub-arrays, used to replicate rules by formatting strings. You can use the format string everywhere, it will be formatted recursively
|
||||
- `labels`: An array of `labels`.
|
||||
- `patterns`: An array of `patterns`. Patterns that not included
|
||||
- `included_in`: An array of other `rule` names. It indicates the `labels` and `patterns` will be included in all specified `rules` as well
|
||||
- `deploy`: An array of strings, used to replicate rules by adding postfix `-<item in deploy array>`. It indicates the extra `label` used in `rules`, which will explain later.
|
||||
|
||||
## How to use this file to generate `rules.yml`
|
||||
|
||||
Let's take a complicated example to help understand the process
|
||||
|
||||
```yaml
|
||||
"test-{0}-{1}":
|
||||
matrix:
|
||||
- [a, b]
|
||||
- [c, d]
|
||||
labels:
|
||||
- "{0}-{1}"
|
||||
patterns:
|
||||
- "{0}"
|
||||
- pattern-not-exist
|
||||
included_in:
|
||||
- build-{0}
|
||||
```
|
||||
|
||||
1. expand the mapping dicts defined by `matrix`
|
||||
|
||||
After this step, it will turn into 4 dicts:
|
||||
|
||||
| key | labels | patterns | included_in |
|
||||
| -------- | ------ | -------- | ----------- |
|
||||
| test-a-c | a-c | a | build-a |
|
||||
| test-a-d | a-d | a | build-a |
|
||||
| test-b-c | b-c | b | build-b |
|
||||
| test-b-d | b-d | b | build-b |
|
||||
|
||||
**Advanced Usage: You can overwrite a mapping by declaring it again later**, For example:
|
||||
|
||||
If we concatenate this part to the previous example,
|
||||
|
||||
```yaml
|
||||
# ... The same as the previous example
|
||||
|
||||
test-a-c:
|
||||
labels:
|
||||
- overwrite
|
||||
```
|
||||
|
||||
`rule` `test-a-c` will be turned into:
|
||||
|
||||
| key | labels |
|
||||
| -------- | --------- |
|
||||
| test-a-c | overwrite |
|
||||
|
||||
**Mappings with the keyword `deploy` will also replicate by adding a postfix `-<item in deploy array>` to the mapping key**
|
||||
|
||||
2. create rules by `included_in`
|
||||
|
||||
After this step, it will turn into 6 mapping dicts:
|
||||
|
||||
| key | labels | patterns |
|
||||
| -------- | -------- | -------- |
|
||||
| test-a-c | a-c | a |
|
||||
| test-a-d | a-d | a |
|
||||
| test-b-c | b-c | b |
|
||||
| test-b-d | b-d | b |
|
||||
| build-a | a-c, a-d | a |
|
||||
| build-b | b-c, b-d | b |
|
||||
|
||||
3. replace the auto-generated region in `rules.yml` with `labels`, and `rules`. Each mapping will generate a `rule` and all the required labels. `patterns` are pre-defined in `rules.yml` and could not be generated automatically. If a mapping is using a `pattern` undefined, the `pattern` will be ignored.
|
||||
|
||||
- If a mapping key has postfix `-preview`, no `if-protected-xxx` clause will be added
|
||||
- else if a mapping key has postfix `-production`, an `if-protected-no_label` clause will be added
|
||||
- else, an `if-protected` clause will be added
|
||||
|
||||
## Graph
|
||||
|
||||
All `label` nodes are in green, `pattern` nodes are in cyan, `rule` nodes are in blue
|
||||
|
||||
### Requirements
|
||||
|
||||
There are a few extra dependencies while generating the dependency tree graph, please refer to [pygraphviz](https://github.com/pygraphviz/pygraphviz/blob/master/INSTALL.txt) documentation to install both `graphviz` and `pygraphviz`
|
||||
|
||||
### CLI usage
|
||||
|
||||
`python $IDF_PATH/tools/ci/generate_rules.py --graph OUTPUT_PATH`
|
||||
@@ -1,183 +0,0 @@
|
||||
.all_targets: &all_targets
|
||||
- esp32
|
||||
- esp32s2
|
||||
- esp32s3
|
||||
- esp32c3
|
||||
- esp32c2
|
||||
- esp32c6
|
||||
- esp32h2
|
||||
- esp32p4
|
||||
|
||||
.target_test: &target_test
|
||||
- example_test
|
||||
- custom_test
|
||||
- component_ut
|
||||
|
||||
##############
|
||||
# Build Jobs #
|
||||
##############
|
||||
"build":
|
||||
labels:
|
||||
- build
|
||||
patterns:
|
||||
- build_components
|
||||
- build_system
|
||||
- downloadable-tools
|
||||
included_in:
|
||||
- build:target_test
|
||||
- build:check
|
||||
|
||||
# -------------------
|
||||
# Specific Build Jobs
|
||||
# -------------------
|
||||
"build:docker":
|
||||
labels:
|
||||
- build
|
||||
- docker
|
||||
patterns:
|
||||
- docker
|
||||
- submodule
|
||||
- build_system
|
||||
- downloadable-tools
|
||||
|
||||
"build:macos":
|
||||
labels:
|
||||
- build
|
||||
- macos
|
||||
- macos_test # for backward compatibility
|
||||
patterns:
|
||||
- build_system
|
||||
- build_macos
|
||||
- downloadable-tools
|
||||
|
||||
# ---------------------------
|
||||
# Add patterns to build rules
|
||||
# ---------------------------
|
||||
"patterns:template-app":
|
||||
patterns:
|
||||
- build_template-app
|
||||
included_in:
|
||||
- build:target_test
|
||||
|
||||
"patterns:build-check":
|
||||
patterns:
|
||||
- build_check
|
||||
included_in:
|
||||
- build:check
|
||||
|
||||
# ---------------
|
||||
# Build Test Jobs
|
||||
# ---------------
|
||||
"build:{0}-{1}":
|
||||
matrix:
|
||||
- *target_test
|
||||
- *all_targets
|
||||
labels:
|
||||
- build
|
||||
patterns:
|
||||
- build_components
|
||||
- build_system
|
||||
- build_target_test
|
||||
- downloadable-tools
|
||||
included_in:
|
||||
- "build:{0}"
|
||||
- build:target_test
|
||||
|
||||
####################
|
||||
# Target Test Jobs #
|
||||
####################
|
||||
"test:{0}-{1}":
|
||||
matrix:
|
||||
- *target_test
|
||||
- *all_targets
|
||||
labels: # For each rule, use labels <test_type> and <test_type>-<target>
|
||||
- "{0}"
|
||||
- "{0}_{1}"
|
||||
- target_test
|
||||
patterns: # For each rule, use patterns <test_type> and build-<test_type>
|
||||
- "{0}"
|
||||
- "build-{0}"
|
||||
included_in: # Parent rules
|
||||
- "build:{0}"
|
||||
- "build:{0}-{1}"
|
||||
- build:target_test
|
||||
|
||||
# -------------
|
||||
# Special Cases
|
||||
# -------------
|
||||
|
||||
# To reduce the specific runners' usage.
|
||||
# Do not create these jobs by default patterns on development branches
|
||||
# Can be triggered by labels or related changes
|
||||
"test:{0}-{1}-{2}":
|
||||
matrix:
|
||||
- *target_test
|
||||
- *all_targets
|
||||
- - wifi # pytest*wifi*
|
||||
- ethernet # pytest*ethernet*
|
||||
- sdio # pytest*sdio*
|
||||
- usb # USB Device & Host tests
|
||||
- adc # pytest*adc*
|
||||
- i154
|
||||
- flash_multi
|
||||
- ecdsa
|
||||
- nvs_encr_hmac
|
||||
patterns:
|
||||
- "{0}-{1}-{2}"
|
||||
- "{0}-{2}"
|
||||
- "target_test-{2}"
|
||||
labels:
|
||||
- "{0}_{1}"
|
||||
- "{0}"
|
||||
- target_test
|
||||
included_in:
|
||||
- "build:{0}-{1}"
|
||||
- "build:{0}"
|
||||
- build:target_test
|
||||
|
||||
# For example_test*flash_encryption_wifi_high_traffic jobs
|
||||
# set `INCLUDE_NIGHTLY_RUN` variable when triggered on development branches
|
||||
"test:example_test-{0}-include_nightly_run-rule":
|
||||
matrix:
|
||||
- - esp32
|
||||
- esp32c3
|
||||
specific_rules:
|
||||
- "if-example_test-ota-include_nightly_run-rule"
|
||||
included_in:
|
||||
- "build:example_test-{0}"
|
||||
- "build:example_test"
|
||||
- build:target_test
|
||||
|
||||
# For i154 runners
|
||||
"test:example_test-i154":
|
||||
patterns:
|
||||
- "example_test-i154"
|
||||
- "target_test-i154"
|
||||
labels:
|
||||
- target_test
|
||||
- example_test
|
||||
included_in:
|
||||
- "build:example_test-esp32s3"
|
||||
- "build:example_test-esp32c6"
|
||||
- "build:example_test-esp32h2"
|
||||
- "build:example_test"
|
||||
- build:target_test
|
||||
|
||||
"test:host_test":
|
||||
labels:
|
||||
- host_test
|
||||
patterns:
|
||||
- host_test
|
||||
|
||||
"test:submodule":
|
||||
labels:
|
||||
- submodule
|
||||
patterns:
|
||||
- submodule
|
||||
|
||||
#################################
|
||||
# Triggered Only By Labels Jobs #
|
||||
#################################
|
||||
"labels:nvs_coverage": # host_test
|
||||
labels:
|
||||
- nvs_coverage
|
||||
@@ -1,61 +0,0 @@
|
||||
.deploy_job_template:
|
||||
stage: deploy
|
||||
image: $ESP_ENV_IMAGE
|
||||
tags: [ deploy ]
|
||||
|
||||
# Check this before push_to_github
|
||||
check_submodule_sync:
|
||||
extends:
|
||||
- .deploy_job_template
|
||||
- .rules:test:submodule
|
||||
stage: test_deploy
|
||||
tags: [ brew, github_sync ]
|
||||
retry: 2
|
||||
variables:
|
||||
# for brew runners, we always set GIT_STRATEGY to fetch
|
||||
GIT_STRATEGY: fetch
|
||||
SUBMODULES_TO_FETCH: "none"
|
||||
PUBLIC_IDF_URL: "https://github.com/espressif/esp-idf.git"
|
||||
dependencies: []
|
||||
script:
|
||||
- git submodule deinit --force .
|
||||
# setting the default remote URL to the public one, to resolve relative location URLs
|
||||
- git config remote.origin.url ${PUBLIC_IDF_URL}
|
||||
# check if all submodules are correctly synced to public repository
|
||||
- git submodule init
|
||||
- git config --get-regexp '^submodule\..*\.url$' || true
|
||||
- git submodule update --recursive
|
||||
- echo "IDF was cloned from ${PUBLIC_IDF_URL} completely"
|
||||
|
||||
push_to_github:
|
||||
extends:
|
||||
- .deploy_job_template
|
||||
- .before_script:minimal
|
||||
- .rules:push_to_github
|
||||
needs:
|
||||
- check_submodule_sync
|
||||
tags: [ brew, github_sync ]
|
||||
variables:
|
||||
# for brew runners, we always set GIT_STRATEGY to fetch
|
||||
GIT_STRATEGY: fetch
|
||||
# github also need full record of commits
|
||||
GIT_DEPTH: 0
|
||||
script:
|
||||
- add_github_ssh_keys
|
||||
- git remote remove github &>/dev/null || true
|
||||
- git remote add github git@github.com:espressif/esp-idf.git
|
||||
- tools/ci/push_to_github.sh
|
||||
|
||||
deploy_update_SHA_in_esp-dockerfiles:
|
||||
extends:
|
||||
- .deploy_job_template
|
||||
- .before_script:minimal
|
||||
- .rules:protected-no_label-always
|
||||
dependencies: []
|
||||
variables:
|
||||
GIT_DEPTH: 2
|
||||
tags: [ shiny, build ]
|
||||
script:
|
||||
- 'curl --header "PRIVATE-TOKEN: ${ESPCI_SCRIPTS_TOKEN}" -o create_MR_in_esp_dockerfile.sh $GITLAB_HTTP_SERVER/api/v4/projects/1260/repository/files/create_MR_in_esp_dockerfile%2Fcreate_MR_in_esp_dockerfile.sh/raw\?ref\=master'
|
||||
- chmod +x create_MR_in_esp_dockerfile.sh
|
||||
- ./create_MR_in_esp_dockerfile.sh
|
||||
@@ -1,277 +0,0 @@
|
||||
.patterns-docs-full: &patterns-docs-full
|
||||
- ".gitlab/ci/docs.yml"
|
||||
- "docs/**/*"
|
||||
- "**/*.rst"
|
||||
- "CONTRIBUTING.rst"
|
||||
- "**/soc_caps.h"
|
||||
|
||||
.patterns-docs-partial: &patterns-docs-partial
|
||||
- "components/**/*.h"
|
||||
- "components/**/Kconfig*"
|
||||
- "components/**/CMakeList.txt"
|
||||
- "components/**/sdkconfig*"
|
||||
- "tools/tools.json"
|
||||
- "tools/idf_tools.py"
|
||||
|
||||
.patterns-example-readme: &patterns-example-readme
|
||||
- "examples/**/*.md"
|
||||
|
||||
.patterns-docs-preview: &patterns-docs-preview
|
||||
- "docs/**/*"
|
||||
|
||||
.if-protected: &if-protected
|
||||
if: '($CI_COMMIT_REF_NAME == "master" || $CI_COMMIT_BRANCH =~ /^release\/v/ || $CI_COMMIT_TAG =~ /^v\d+\.\d+(\.\d+)?($|-)/)'
|
||||
|
||||
.if-protected-no_label: &if-protected-no_label
|
||||
if: '($CI_COMMIT_REF_NAME == "master" || $CI_COMMIT_BRANCH =~ /^release\/v/ || $CI_COMMIT_TAG =~ /^v\d+\.\d+(\.\d+)?($|-)/) && $BOT_TRIGGER_WITH_LABEL == null'
|
||||
|
||||
.if-qa-test-tag: &if-qa-test-tag
|
||||
if: '$CI_COMMIT_TAG =~ /^qa-test/'
|
||||
|
||||
.if-label-build_docs: &if-label-build_docs
|
||||
if: '$BOT_LABEL_BUILD_DOCS || $CI_MERGE_REQUEST_LABELS =~ /^(?:[^,\n\r]+,)*build_docs(?:,[^,\n\r]+)*$/i'
|
||||
|
||||
.if-label-docs_full: &if-label-docs_full
|
||||
if: '$BOT_LABEL_DOCS_FULL || $CI_MERGE_REQUEST_LABELS =~ /^(?:[^,\n\r]+,)*docs_full(?:,[^,\n\r]+)*$/i'
|
||||
|
||||
.if-dev-push: &if-dev-push
|
||||
if: '$CI_COMMIT_REF_NAME != "master" && $CI_COMMIT_BRANCH !~ /^release\/v/ && $CI_COMMIT_TAG !~ /^v\d+\.\d+(\.\d+)?($|-)/ && $CI_COMMIT_TAG !~ /^qa-test/ && ($CI_PIPELINE_SOURCE == "push" || $CI_PIPELINE_SOURCE == "merge_request_event")'
|
||||
|
||||
.if-schedule: &if-schedule
|
||||
if: '$CI_PIPELINE_SOURCE == "schedule"'
|
||||
|
||||
.doc-rules:build:docs-full:
|
||||
rules:
|
||||
- <<: *if-qa-test-tag
|
||||
when: never
|
||||
- <<: *if-schedule
|
||||
- <<: *if-label-build_docs
|
||||
- <<: *if-label-docs_full
|
||||
- <<: *if-dev-push
|
||||
changes: *patterns-docs-full
|
||||
|
||||
.doc-rules:build:docs-full-prod:
|
||||
rules:
|
||||
- <<: *if-qa-test-tag
|
||||
when: never
|
||||
- <<: *if-protected-no_label
|
||||
|
||||
.doc-rules:build:docs-partial:
|
||||
rules:
|
||||
- <<: *if-qa-test-tag
|
||||
when: never
|
||||
- <<: *if-dev-push
|
||||
changes: *patterns-docs-full
|
||||
when: never
|
||||
- <<: *if-dev-push
|
||||
changes: *patterns-docs-partial
|
||||
|
||||
# stage: pre_check
|
||||
check_readme_links:
|
||||
extends:
|
||||
- .pre_check_template
|
||||
tags: ["build", "amd64", "internet"]
|
||||
allow_failure: true
|
||||
rules:
|
||||
- <<: *if-protected
|
||||
- <<: *if-dev-push
|
||||
changes: *patterns-example-readme
|
||||
script:
|
||||
- python ${IDF_PATH}/tools/ci/check_readme_links.py
|
||||
|
||||
check_docs_lang_sync:
|
||||
extends:
|
||||
- .pre_check_template
|
||||
- .doc-rules:build:docs-full
|
||||
script:
|
||||
- cd docs
|
||||
- ./check_lang_folder_sync.sh
|
||||
|
||||
.build_docs_template:
|
||||
image: $ESP_IDF_DOC_ENV_IMAGE
|
||||
stage: build_doc
|
||||
tags:
|
||||
- build_docs
|
||||
script:
|
||||
- if [ -n "${BREATHE_ALT_INSTALL_URL}" ]; then pip uninstall -y breathe && pip install -U ${BREATHE_ALT_INSTALL_URL}; fi
|
||||
- cd docs
|
||||
- build-docs -t $DOCTGT -bs $DOC_BUILDERS -l $DOCLANG build
|
||||
parallel:
|
||||
matrix:
|
||||
- DOCLANG: ["en", "zh_CN"]
|
||||
DOCTGT: ["esp32", "esp32s2", "esp32s3", "esp32c3", "esp32c2", "esp32c6", "esp32h2", "esp32p4"]
|
||||
|
||||
check_docs_gh_links:
|
||||
image: $ESP_IDF_DOC_ENV_IMAGE
|
||||
extends:
|
||||
- .pre_check_template
|
||||
- .doc-rules:build:docs-full
|
||||
script:
|
||||
- cd docs
|
||||
- build-docs gh-linkcheck
|
||||
|
||||
# Doc jobs have a lot of special cases, we specify rules here directly instead
|
||||
# in dependencies.yml to simplify things
|
||||
build_docs_html_full:
|
||||
extends:
|
||||
- .build_docs_template
|
||||
- .doc-rules:build:docs-full
|
||||
needs:
|
||||
- job: fast_template_app
|
||||
artifacts: false
|
||||
optional: true
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- docs/_build/*/*/*.txt
|
||||
- docs/_build/*/*/html/*
|
||||
expire_in: 4 days
|
||||
variables:
|
||||
DOC_BUILDERS: "html"
|
||||
|
||||
build_docs_html_full_prod:
|
||||
extends:
|
||||
- .build_docs_template
|
||||
- .doc-rules:build:docs-full-prod
|
||||
dependencies: [] # Stop build_docs jobs from downloading all previous job's artifacts
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- docs/_build/*/*/*.txt
|
||||
- docs/_build/*/*/html/*
|
||||
expire_in: 4 days
|
||||
variables:
|
||||
DOC_BUILDERS: "html"
|
||||
|
||||
build_docs_html_partial:
|
||||
extends:
|
||||
- .build_docs_template
|
||||
- .doc-rules:build:docs-partial
|
||||
needs:
|
||||
- job: fast_template_app
|
||||
artifacts: false
|
||||
optional: true
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- docs/_build/*/*/*.txt
|
||||
- docs/_build/*/*/html/*
|
||||
expire_in: 4 days
|
||||
variables:
|
||||
DOC_BUILDERS: "html"
|
||||
parallel:
|
||||
matrix:
|
||||
- DOCLANG: "en"
|
||||
DOCTGT: "esp32"
|
||||
- DOCLANG: "zh_CN"
|
||||
DOCTGT: "esp32p4"
|
||||
|
||||
build_docs_pdf:
|
||||
extends:
|
||||
- .build_docs_template
|
||||
- .doc-rules:build:docs-full
|
||||
needs:
|
||||
- job: fast_template_app
|
||||
artifacts: false
|
||||
optional: true
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- docs/_build/*/*/latex/*
|
||||
expire_in: 4 days
|
||||
variables:
|
||||
DOC_BUILDERS: "latex"
|
||||
|
||||
build_docs_pdf_prod:
|
||||
extends:
|
||||
- .build_docs_template
|
||||
- .doc-rules:build:docs-full-prod
|
||||
dependencies: [] # Stop build_docs jobs from downloading all previous job's artifacts
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- docs/_build/*/*/latex/*
|
||||
expire_in: 4 days
|
||||
variables:
|
||||
DOC_BUILDERS: "latex"
|
||||
|
||||
.deploy_docs_template:
|
||||
image: $ESP_IDF_DOC_ENV_IMAGE
|
||||
variables:
|
||||
DOCS_BUILD_DIR: "${IDF_PATH}/docs/_build/"
|
||||
PYTHONUNBUFFERED: 1
|
||||
stage: test_deploy
|
||||
tags:
|
||||
- deploy
|
||||
- shiny
|
||||
script:
|
||||
- add_doc_server_ssh_keys $DOCS_DEPLOY_PRIVATEKEY $DOCS_DEPLOY_SERVER $DOCS_DEPLOY_SERVER_USER
|
||||
- export GIT_VER=$(git describe --always ${PIPELINE_COMMIT_SHA} --)
|
||||
- deploy-docs
|
||||
|
||||
# stage: test_deploy
|
||||
deploy_docs_preview:
|
||||
extends:
|
||||
- .deploy_docs_template
|
||||
rules:
|
||||
- <<: *if-label-build_docs
|
||||
- <<: *if-label-docs_full
|
||||
- <<: *if-dev-push
|
||||
changes: *patterns-docs-preview
|
||||
needs:
|
||||
- job: build_docs_html_partial
|
||||
optional: true
|
||||
- job: build_docs_html_full
|
||||
optional: true
|
||||
- job: build_docs_pdf
|
||||
optional: true
|
||||
variables:
|
||||
TYPE: "preview"
|
||||
# older branches use DOCS_DEPLOY_KEY, DOCS_SERVER, DOCS_SERVER_USER, DOCS_PATH for preview server so we keep these names for 'preview'
|
||||
DOCS_DEPLOY_PRIVATEKEY: "$DOCS_DEPLOY_KEY"
|
||||
DOCS_DEPLOY_SERVER: "$DOCS_SERVER"
|
||||
DOCS_DEPLOY_SERVER_USER: "$DOCS_SERVER_USER"
|
||||
DOCS_DEPLOY_PATH: "$DOCS_PATH"
|
||||
DOCS_DEPLOY_URL_BASE: "https://$DOCS_PREVIEW_SERVER_URL/docs/esp-idf"
|
||||
|
||||
# stage: post_deploy
|
||||
deploy_docs_production:
|
||||
# The DOCS_PROD_* variables used by this job are "Protected" so these branches must all be marked "Protected" in Gitlab settings
|
||||
extends:
|
||||
- .deploy_docs_template
|
||||
- .doc-rules:build:docs-full-prod
|
||||
stage: post_deploy
|
||||
dependencies: # set dependencies to null to avoid missing artifacts issue
|
||||
needs: # ensure runs after push_to_github succeeded
|
||||
- build_docs_html_full_prod
|
||||
- build_docs_pdf_prod
|
||||
- job: push_to_github
|
||||
artifacts: false
|
||||
variables:
|
||||
TYPE: "preview"
|
||||
DOCS_DEPLOY_PRIVATEKEY: "$DOCS_PROD_DEPLOY_KEY"
|
||||
DOCS_DEPLOY_SERVER: "$DOCS_PROD_SERVER"
|
||||
DOCS_DEPLOY_SERVER_USER: "$DOCS_PROD_SERVER_USER"
|
||||
DOCS_DEPLOY_PATH: "$DOCS_PROD_PATH"
|
||||
DOCS_DEPLOY_URL_BASE: "https://docs.espressif.com/projects/esp-idf"
|
||||
DEPLOY_STABLE: 1
|
||||
|
||||
check_doc_links:
|
||||
extends:
|
||||
- .build_docs_template
|
||||
- .doc-rules:build:docs-full-prod
|
||||
stage: post_deploy
|
||||
needs:
|
||||
- job: deploy_docs_production
|
||||
artifacts: false
|
||||
tags: ["build", "amd64", "internet"]
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- docs/_build/*/*/*.txt
|
||||
- docs/_build/*/*/linkcheck/*.txt
|
||||
expire_in: 1 week
|
||||
allow_failure: true
|
||||
script:
|
||||
- cd docs
|
||||
- build-docs -t $DOCTGT -l $DOCLANG linkcheck
|
||||
@@ -1,346 +0,0 @@
|
||||
.host_test_template:
|
||||
extends: .rules:test:host_test
|
||||
stage: host_test
|
||||
image: $ESP_ENV_IMAGE
|
||||
tags:
|
||||
- host_test
|
||||
dependencies: # set dependencies to null to avoid missing artifacts issue
|
||||
# run host_test jobs immediately, only after upload cache
|
||||
needs:
|
||||
- job: upload-pip-cache
|
||||
optional: true
|
||||
artifacts: false
|
||||
- job: upload-submodules-cache
|
||||
optional: true
|
||||
artifacts: false
|
||||
- pipeline_variables
|
||||
|
||||
test_nvs_on_host:
|
||||
extends: .host_test_template
|
||||
script:
|
||||
- cd components/nvs_flash/test_nvs_host
|
||||
- make test
|
||||
|
||||
test_nvs_coverage:
|
||||
extends:
|
||||
- .host_test_template
|
||||
- .rules:labels:nvs_coverage
|
||||
artifacts:
|
||||
paths:
|
||||
- components/nvs_flash/test_nvs_host/coverage_report
|
||||
expire_in: 1 week
|
||||
script:
|
||||
- cd components/nvs_flash/test_nvs_host
|
||||
- make coverage_report
|
||||
# the 'long' host tests take approx 11 hours on our current runners. Adding some margin here for possible CPU contention
|
||||
timeout: 18 hours
|
||||
|
||||
test_partition_table_on_host:
|
||||
extends: .host_test_template
|
||||
script:
|
||||
- cd components/partition_table/test_gen_esp32part_host
|
||||
- ./gen_esp32part_tests.py
|
||||
|
||||
test_ldgen_on_host:
|
||||
extends: .host_test_template
|
||||
script:
|
||||
- cd tools/ldgen/test
|
||||
- export PYTHONPATH=$PYTHONPATH:..
|
||||
- python -m unittest
|
||||
variables:
|
||||
LC_ALL: C.UTF-8
|
||||
|
||||
test_reproducible_build:
|
||||
extends: .host_test_template
|
||||
script:
|
||||
- ./tools/ci/test_reproducible_build.sh
|
||||
artifacts:
|
||||
when: on_failure
|
||||
paths:
|
||||
- "**/sdkconfig"
|
||||
- "**/build*/*.bin"
|
||||
- "**/build*/*.elf"
|
||||
- "**/build*/*.map"
|
||||
- "**/build*/flasher_args.json"
|
||||
- "**/build*/*.bin"
|
||||
- "**/build*/bootloader/*.bin"
|
||||
- "**/build*/partition_table/*.bin"
|
||||
expire_in: 1 week
|
||||
|
||||
test_spiffs_on_host:
|
||||
extends: .host_test_template
|
||||
script:
|
||||
- cd components/spiffs/test_spiffsgen/
|
||||
- ./test_spiffsgen.py
|
||||
|
||||
test_fatfsgen_on_host:
|
||||
extends: .host_test_template
|
||||
script:
|
||||
- cd components/fatfs/test_fatfsgen/
|
||||
- ./test_fatfsgen.py
|
||||
- ./test_wl_fatfsgen.py
|
||||
- ./test_fatfsparse.py
|
||||
|
||||
test_multi_heap_on_host:
|
||||
extends: .host_test_template
|
||||
script:
|
||||
- cd components/heap/test_multi_heap_host
|
||||
- ./test_all_configs.sh
|
||||
|
||||
test_certificate_bundle_on_host:
|
||||
extends: .host_test_template
|
||||
script:
|
||||
- cd components/mbedtls/esp_crt_bundle/test_gen_crt_bundle/
|
||||
- ./test_gen_crt_bundle.py
|
||||
|
||||
test_gdbstub_on_host:
|
||||
extends: .host_test_template
|
||||
script:
|
||||
- cd components/esp_gdbstub/test_gdbstub_host
|
||||
- make test
|
||||
|
||||
test_idf_py:
|
||||
extends: .host_test_template
|
||||
variables:
|
||||
LC_ALL: C.UTF-8
|
||||
script:
|
||||
- cd ${IDF_PATH}/tools/test_idf_py
|
||||
- ./test_idf_py.py
|
||||
- ./test_hints.py
|
||||
|
||||
# Test for create virtualenv. It must be invoked from Python, not from virtualenv.
|
||||
# Use docker image system python without any extra dependencies
|
||||
test_idf_tools:
|
||||
extends:
|
||||
- .host_test_template
|
||||
- .before_script:minimal
|
||||
artifacts:
|
||||
when: on_failure
|
||||
paths:
|
||||
- tools/tools.new.json
|
||||
- tools/test_idf_tools/test_python_env_logs.txt
|
||||
expire_in: 1 week
|
||||
image:
|
||||
name: $ESP_ENV_IMAGE
|
||||
entrypoint: [""] # use system python3. no extra pip package installed
|
||||
script:
|
||||
# Tools must be downloaded for testing
|
||||
- python3 ${IDF_PATH}/tools/idf_tools.py download required qemu-riscv32 qemu-xtensa
|
||||
- cd ${IDF_PATH}/tools/test_idf_tools
|
||||
- python3 -m pip install jsonschema
|
||||
- python3 ./test_idf_tools.py -v
|
||||
- python3 ./test_idf_tools_python_env.py
|
||||
|
||||
.test_efuse_table_on_host_template:
|
||||
extends: .host_test_template
|
||||
variables:
|
||||
IDF_TARGET: "esp32"
|
||||
artifacts:
|
||||
when: on_failure
|
||||
paths:
|
||||
- components/efuse/${IDF_TARGET}/esp_efuse_table.c
|
||||
expire_in: 1 week
|
||||
script:
|
||||
- cd ${IDF_PATH}/components/efuse/
|
||||
- ./efuse_table_gen.py -t "${IDF_TARGET}" ${IDF_PATH}/components/efuse/${IDF_TARGET}/esp_efuse_table.csv
|
||||
- git diff --exit-code -- ${IDF_TARGET}/esp_efuse_table.c || { echo 'Differences found for ${IDF_TARGET} target. Please run idf.py efuse-common-table and commit the changes.'; exit 1; }
|
||||
- cd ${IDF_PATH}/components/efuse/test_efuse_host
|
||||
- ./efuse_tests.py
|
||||
|
||||
test_efuse_table_on_host_esp32:
|
||||
extends: .test_efuse_table_on_host_template
|
||||
|
||||
test_efuse_table_on_host_esp32s2:
|
||||
extends: .test_efuse_table_on_host_template
|
||||
variables:
|
||||
IDF_TARGET: esp32s2
|
||||
|
||||
test_efuse_table_on_host_esp32s3:
|
||||
extends: .test_efuse_table_on_host_template
|
||||
variables:
|
||||
IDF_TARGET: esp32s3
|
||||
|
||||
test_efuse_table_on_host_esp32c3:
|
||||
extends: .test_efuse_table_on_host_template
|
||||
variables:
|
||||
IDF_TARGET: esp32c3
|
||||
|
||||
test_efuse_table_on_host_esp32h2:
|
||||
extends: .test_efuse_table_on_host_template
|
||||
variables:
|
||||
IDF_TARGET: esp32h2
|
||||
|
||||
test_efuse_table_on_host_esp32c6:
|
||||
extends: .test_efuse_table_on_host_template
|
||||
variables:
|
||||
IDF_TARGET: esp32c6
|
||||
|
||||
test_logtrace_proc:
|
||||
extends: .host_test_template
|
||||
artifacts:
|
||||
when: on_failure
|
||||
paths:
|
||||
- tools/esp_app_trace/test/logtrace/output
|
||||
- tools/esp_app_trace/test/logtrace/.coverage
|
||||
expire_in: 1 week
|
||||
script:
|
||||
- cd ${IDF_PATH}/tools/esp_app_trace/test/logtrace
|
||||
- ./test.sh
|
||||
|
||||
test_sysviewtrace_proc:
|
||||
extends: .host_test_template
|
||||
artifacts:
|
||||
when: on_failure
|
||||
paths:
|
||||
- tools/esp_app_trace/test/sysview/output
|
||||
- tools/esp_app_trace/test/sysview/.coverage
|
||||
expire_in: 1 week
|
||||
script:
|
||||
- cd ${IDF_PATH}/tools/esp_app_trace/test/sysview
|
||||
- ./test.sh
|
||||
|
||||
test_mkdfu:
|
||||
extends: .host_test_template
|
||||
variables:
|
||||
LC_ALL: C.UTF-8
|
||||
script:
|
||||
- cd ${IDF_PATH}/tools/test_mkdfu
|
||||
- ./test_mkdfu.py
|
||||
|
||||
test_autocomplete:
|
||||
extends:
|
||||
- .host_test_template
|
||||
artifacts:
|
||||
when: on_failure
|
||||
paths:
|
||||
- ${IDF_PATH}/*.out
|
||||
expire_in: 1 week
|
||||
script:
|
||||
- ${IDF_PATH}/tools/ci/test_autocomplete.py
|
||||
|
||||
test_detect_python:
|
||||
extends:
|
||||
- .host_test_template
|
||||
script:
|
||||
- cd ${IDF_PATH}
|
||||
- shellcheck -s sh tools/detect_python.sh
|
||||
- shellcheck -s bash tools/detect_python.sh
|
||||
- shellcheck -s dash tools/detect_python.sh
|
||||
- "bash -c '. tools/detect_python.sh && echo Our Python: ${ESP_PYTHON?Python is not set}'"
|
||||
- "dash -c '. tools/detect_python.sh && echo Our Python: ${ESP_PYTHON?Python is not set}'"
|
||||
- "zsh -c '. tools/detect_python.sh && echo Our Python: ${ESP_PYTHON?Python is not set}'"
|
||||
- "fish -c 'source tools/detect_python.fish && echo Our Python: $ESP_PYTHON'"
|
||||
|
||||
test_split_path_by_spaces:
|
||||
extends: .host_test_template
|
||||
script:
|
||||
- cd ${IDF_PATH}/tools
|
||||
- python -m unittest split_paths_by_spaces.py
|
||||
|
||||
test_mqtt_on_host:
|
||||
extends: .host_test_template
|
||||
script:
|
||||
- cd ${IDF_PATH}/components/mqtt/esp-mqtt/host_test
|
||||
- idf.py build
|
||||
- LSAN_OPTIONS=verbosity=1:log_threads=1 build/host_mqtt_client_test.elf
|
||||
|
||||
test_transport_on_host:
|
||||
extends: .host_test_template
|
||||
script:
|
||||
- cd ${IDF_PATH}/components/tcp_transport/host_test
|
||||
- idf.py build
|
||||
- LSAN_OPTIONS=verbosity=1:log_threads=1 build/host_tcp_transport_test.elf
|
||||
|
||||
test_sockets_on_host:
|
||||
extends: .host_test_template
|
||||
script:
|
||||
# test the tcp-client example with system sockets
|
||||
- cd ${IDF_PATH}/examples/protocols/sockets/tcp_client
|
||||
- echo 'CONFIG_EXAMPLE_IPV4_ADDR="127.0.0.1"' >> sdkconfig.defaults
|
||||
- idf.py --preview set-target linux
|
||||
- idf.py build
|
||||
- timeout 5 ./build/tcp_client.elf >test.log || true
|
||||
- grep "Socket unable to connect" test.log
|
||||
# test the udp-client example with lwip sockets
|
||||
- cd ${IDF_PATH}/examples/protocols/sockets/udp_client
|
||||
- idf.py --preview set-target linux
|
||||
- cat sdkconfig.ci.linux > sdkconfig
|
||||
- idf.py build
|
||||
- timeout 5 ./build/udp_client.elf >test.log || true
|
||||
- grep "Message sent" test.log
|
||||
|
||||
test_eh_frame_parser:
|
||||
extends: .host_test_template
|
||||
script:
|
||||
- cd ${IDF_PATH}/components/esp_system/test_eh_frame_parser
|
||||
- make
|
||||
- ./eh_frame_test
|
||||
|
||||
test_gen_soc_caps_kconfig:
|
||||
extends: .host_test_template
|
||||
script:
|
||||
- cd ${IDF_PATH}/tools/gen_soc_caps_kconfig/
|
||||
- ./test/test_gen_soc_caps_kconfig.py
|
||||
|
||||
test_pytest_qemu:
|
||||
extends:
|
||||
- .host_test_template
|
||||
- .before_script:build
|
||||
image: $QEMU_IMAGE
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- XUNIT_RESULT.xml
|
||||
- pytest_embedded_log/
|
||||
reports:
|
||||
junit: XUNIT_RESULT.xml
|
||||
expire_in: 1 week
|
||||
allow_failure: true # IDFCI-1752
|
||||
parallel:
|
||||
matrix:
|
||||
- IDF_TARGET: [esp32, esp32c3]
|
||||
script:
|
||||
- run_cmd python tools/ci/ci_build_apps.py . -vv
|
||||
--target $IDF_TARGET
|
||||
--pytest-apps
|
||||
-m qemu
|
||||
--collect-app-info "list_job_${CI_JOB_NAME_SLUG}.txt"
|
||||
--modified-components ${MODIFIED_COMPONENTS}
|
||||
--modified-files ${MODIFIED_FILES}
|
||||
- python tools/ci/get_known_failure_cases_file.py
|
||||
- run_cmd pytest
|
||||
--target $IDF_TARGET
|
||||
-m qemu
|
||||
--embedded-services idf,qemu
|
||||
--junitxml=XUNIT_RESULT.xml
|
||||
--ignore-result-files ${KNOWN_FAILURE_CASES_FILE_NAME}
|
||||
--app-info-filepattern \"list_job_*.txt\"
|
||||
|
||||
test_pytest_linux:
|
||||
extends:
|
||||
- .host_test_template
|
||||
- .before_script:build
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- XUNIT_RESULT.xml
|
||||
- pytest_embedded_log/
|
||||
- "**/build*/build_log.txt"
|
||||
reports:
|
||||
junit: XUNIT_RESULT.xml
|
||||
expire_in: 1 week
|
||||
script:
|
||||
- run_cmd python tools/ci/ci_build_apps.py components examples tools/test_apps -vv
|
||||
--target linux
|
||||
--pytest-apps
|
||||
-m host_test
|
||||
--collect-app-info "list_job_${CI_JOB_NAME_SLUG}.txt"
|
||||
--modified-components ${MODIFIED_COMPONENTS}
|
||||
--modified-files ${MODIFIED_FILES}
|
||||
- python tools/ci/get_known_failure_cases_file.py
|
||||
- run_cmd pytest
|
||||
--target linux
|
||||
-m host_test
|
||||
--junitxml=XUNIT_RESULT.xml
|
||||
--ignore-result-files ${KNOWN_FAILURE_CASES_FILE_NAME}
|
||||
--app-info-filepattern \"list_job_*.txt\"
|
||||
@@ -1,69 +0,0 @@
|
||||
# generate dynamic integration pipeline by `idf-integration-ci` project
|
||||
|
||||
.patterns-integration_test: &patterns-integration_test
|
||||
# add all possible patterns to make sure `gen_integration_pipeline` can be triggered.
|
||||
# fine-grained control will be done while generating the pipeline
|
||||
# find `patterns` in `idf-integration-ci` project
|
||||
- "components/**/*"
|
||||
- "tools/**/*"
|
||||
- ".gitlab-ci.yml"
|
||||
- ".gitlab/ci/common.yml"
|
||||
- ".gitlab/ci/integration_test.yml"
|
||||
- ".gitmodules"
|
||||
- "CMakeLists.txt"
|
||||
- "install.sh"
|
||||
- "export.sh"
|
||||
- "Kconfig"
|
||||
- "sdkconfig.rename"
|
||||
|
||||
# Simplify the rules
|
||||
.integration_test_rules:
|
||||
rules:
|
||||
- if: '$CI_PIPELINE_SOURCE != "merge_request_event"'
|
||||
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
|
||||
changes: *patterns-integration_test
|
||||
# support trigger by ci labels
|
||||
- if: '$CI_MERGE_REQUEST_LABELS =~ /^(?:[^,\n\r]+,)*target_test(?:,[^,\n\r]+)*$/i'
|
||||
- if: '$CI_MERGE_REQUEST_LABELS =~ /^(?:[^,\n\r]+,)*integration_test(?:,[^,\n\r]+)*$/i'
|
||||
- if: '$CI_MERGE_REQUEST_LABELS =~ /^(?:[^,\n\r]+,)*build(?:,[^,\n\r]+)*$/i'
|
||||
|
||||
gen_integration_pipeline:
|
||||
extends:
|
||||
- .before_script:minimal
|
||||
- .integration_test_rules
|
||||
image: ${CI_INTEGRATION_ASSIGN_ENV}
|
||||
stage: assign_test
|
||||
cache: []
|
||||
tags:
|
||||
- assign_test
|
||||
variables:
|
||||
SUBMODULES_TO_FETCH: "none"
|
||||
GIT_LFS_SKIP_SMUDGE: 1
|
||||
needs:
|
||||
- job: fast_template_app
|
||||
artifacts: false
|
||||
optional: true
|
||||
artifacts:
|
||||
paths:
|
||||
- idf-integration-ci/child_pipeline/
|
||||
expire_in: 2 weeks
|
||||
script:
|
||||
- add_gitlab_ssh_keys
|
||||
- retry_failed git clone ${CI_GEN_INTEGRATION_PIPELINE_REPO} idf-integration-ci
|
||||
- python $CHECKOUT_REF_SCRIPT idf-integration-ci idf-integration-ci
|
||||
- cd idf-integration-ci
|
||||
- python tools/generate_child_pipeline.py -o child_pipeline/
|
||||
|
||||
child_integration_test_pipeline:
|
||||
extends:
|
||||
- .integration_test_rules
|
||||
stage: assign_test
|
||||
needs:
|
||||
- gen_integration_pipeline
|
||||
trigger:
|
||||
include:
|
||||
- artifact: idf-integration-ci/child_pipeline/pipeline.yml
|
||||
job: gen_integration_pipeline
|
||||
forward:
|
||||
yaml_variables: false
|
||||
strategy: depend
|
||||
@@ -1,206 +0,0 @@
|
||||
.pre_check_template:
|
||||
stage: pre_check
|
||||
image: $ESP_ENV_IMAGE
|
||||
tags:
|
||||
- host_test
|
||||
dependencies: []
|
||||
|
||||
check_pre_commit:
|
||||
extends:
|
||||
- .pre_check_template
|
||||
- .before_script:minimal
|
||||
image: $PRE_COMMIT_IMAGE
|
||||
needs:
|
||||
- pipeline_variables
|
||||
script:
|
||||
- fetch_submodules
|
||||
- pre-commit run --files $MODIFIED_FILES
|
||||
- pre-commit run --hook-stage post-commit validate-sbom-manifest
|
||||
|
||||
check_MR_style_dangerjs:
|
||||
extends:
|
||||
- .pre_check_template
|
||||
image: node:18.15.0-alpine3.16
|
||||
variables:
|
||||
DANGER_GITLAB_API_TOKEN: ${ESPCI_TOKEN}
|
||||
DANGER_GITLAB_HOST: ${GITLAB_HTTP_SERVER}
|
||||
DANGER_GITLAB_API_BASE_URL: ${GITLAB_HTTP_SERVER}/api/v4
|
||||
DANGER_JIRA_USER: ${DANGER_JIRA_USER}
|
||||
DANGER_JIRA_PASSWORD: ${DANGER_JIRA_PASSWORD}
|
||||
cache:
|
||||
# pull only for most of the use cases since it's cache dir.
|
||||
# Only set "push" policy for "upload_cache" stage jobs
|
||||
key:
|
||||
files:
|
||||
- .gitlab/dangerjs/package-lock.json
|
||||
paths:
|
||||
- .gitlab/dangerjs/node_modules/
|
||||
policy: pull
|
||||
before_script:
|
||||
- cd .gitlab/dangerjs
|
||||
- npm install --no-progress --no-update-notifier # Install danger dependencies
|
||||
script:
|
||||
- npx danger ci --failOnErrors -v
|
||||
rules:
|
||||
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
|
||||
|
||||
check_version:
|
||||
# Don't run this for feature/bugfix branches, so that it is possible to modify
|
||||
# esp_idf_version.h in a branch before tagging the next version.
|
||||
extends:
|
||||
- .pre_check_template
|
||||
- .rules:protected
|
||||
tags: [ brew, github_sync ]
|
||||
variables:
|
||||
# need a full clone to get the latest tag
|
||||
# the --shallow-since=$(git log -1 --format=%as $LATEST_GIT_TAG) option is not accurate
|
||||
GIT_STRATEGY: fetch
|
||||
SUBMODULES_TO_FETCH: "none"
|
||||
GIT_DEPTH: 0
|
||||
script:
|
||||
- export IDF_PATH=$PWD
|
||||
- tools/ci/check_idf_version.sh
|
||||
|
||||
check_api_usage:
|
||||
extends: .pre_check_template
|
||||
script:
|
||||
- tools/ci/check_examples_rom_header.sh
|
||||
- tools/ci/check_api_violation.sh
|
||||
- tools/ci/check_examples_extra_component_dirs.sh
|
||||
|
||||
check_blobs:
|
||||
extends:
|
||||
- .pre_check_template
|
||||
- .rules:build:check
|
||||
variables:
|
||||
SUBMODULES_TO_FETCH: "components/esp_wifi/lib;components/esp_phy/lib;components/esp_coex/lib"
|
||||
script:
|
||||
# Check if Wi-Fi library header files match between IDF and the version used when compiling the libraries
|
||||
- IDF_TARGET=esp32 $IDF_PATH/components/esp_wifi/test_md5/test_md5.sh
|
||||
- IDF_TARGET=esp32s2 $IDF_PATH/components/esp_wifi/test_md5/test_md5.sh
|
||||
- IDF_TARGET=esp32s3 $IDF_PATH/components/esp_wifi/test_md5/test_md5.sh
|
||||
- IDF_TARGET=esp32c2 $IDF_PATH/components/esp_wifi/test_md5/test_md5.sh
|
||||
- IDF_TARGET=esp32c3 $IDF_PATH/components/esp_wifi/test_md5/test_md5.sh
|
||||
- IDF_TARGET=esp32c6 $IDF_PATH/components/esp_wifi/test_md5/test_md5.sh
|
||||
# Check if Coexistence library header files match between IDF and the version used when compiling the libraries
|
||||
- IDF_TARGET=esp32 $IDF_PATH/components/esp_coex/test_md5/test_md5.sh
|
||||
- IDF_TARGET=esp32s2 $IDF_PATH/components/esp_coex/test_md5/test_md5.sh
|
||||
- IDF_TARGET=esp32s3 $IDF_PATH/components/esp_coex/test_md5/test_md5.sh
|
||||
- IDF_TARGET=esp32c2 $IDF_PATH/components/esp_coex/test_md5/test_md5.sh
|
||||
- IDF_TARGET=esp32c3 $IDF_PATH/components/esp_coex/test_md5/test_md5.sh
|
||||
- IDF_TARGET=esp32c6 $IDF_PATH/components/esp_coex/test_md5/test_md5.sh
|
||||
- IDF_TARGET=esp32h2 $IDF_PATH/components/esp_coex/test_md5/test_md5.sh
|
||||
# Check if Wi-Fi, PHY, BT blobs contain references to specific symbols
|
||||
- bash $IDF_PATH/tools/ci/check_blobs.sh
|
||||
|
||||
check_public_headers:
|
||||
extends:
|
||||
- .pre_check_template
|
||||
- .rules:build:check
|
||||
script:
|
||||
- IDF_TARGET=esp32 python tools/ci/check_public_headers.py --jobs 4 --prefix xtensa-esp32-elf-
|
||||
- IDF_TARGET=esp32s2 python tools/ci/check_public_headers.py --jobs 4 --prefix xtensa-esp32s2-elf-
|
||||
- IDF_TARGET=esp32s3 python tools/ci/check_public_headers.py --jobs 4 --prefix xtensa-esp32s3-elf-
|
||||
- IDF_TARGET=esp32c3 python tools/ci/check_public_headers.py --jobs 4 --prefix riscv32-esp-elf-
|
||||
- IDF_TARGET=esp32c2 python tools/ci/check_public_headers.py --jobs 4 --prefix riscv32-esp-elf-
|
||||
- IDF_TARGET=esp32c6 python tools/ci/check_public_headers.py --jobs 4 --prefix riscv32-esp-elf-
|
||||
- IDF_TARGET=esp32h2 python tools/ci/check_public_headers.py --jobs 4 --prefix riscv32-esp-elf-
|
||||
- IDF_TARGET=esp32p4 python tools/ci/check_public_headers.py --jobs 4 --prefix riscv32-esp-elf-
|
||||
|
||||
check_chip_support_components:
|
||||
extends:
|
||||
- .pre_check_template
|
||||
- .rules:build:check
|
||||
artifacts:
|
||||
when: on_failure
|
||||
paths:
|
||||
- esp_hw_support_part.h
|
||||
- bootloader_support_part.h
|
||||
expire_in: 1 week
|
||||
script:
|
||||
- python tools/ci/check_soc_headers_leak.py
|
||||
- find ${IDF_PATH}/components/soc/*/include/soc/ -name "*_struct.h" -print0 | xargs -0 -n1 ./tools/ci/check_soc_struct_headers.py
|
||||
- tools/ci/check_esp_memory_utils_headers.sh
|
||||
|
||||
check_esp_err_to_name:
|
||||
extends:
|
||||
- .pre_check_template
|
||||
- .rules:build:check
|
||||
artifacts:
|
||||
when: on_failure
|
||||
paths:
|
||||
- components/esp_common/esp_err_to_name.c
|
||||
expire_in: 1 week
|
||||
script:
|
||||
- cd ${IDF_PATH}/tools/
|
||||
- ./gen_esp_err_to_name.py
|
||||
- git diff --exit-code -- ../components/esp_common/src/esp_err_to_name.c || { echo 'Differences found. Please run gen_esp_err_to_name.py and commit the changes.'; exit 1; }
|
||||
|
||||
check_esp_system:
|
||||
extends:
|
||||
- .pre_check_template
|
||||
- .rules:build
|
||||
script:
|
||||
- python components/esp_system/check_system_init_priorities.py
|
||||
|
||||
# For release tag pipelines only, make sure the tag was created with 'git tag -a' so it will update
|
||||
# the version returned by 'git describe'
|
||||
# Don't forget to update the env var `LATEST_GIT_TAG` in .gitlab/ci/common.yml
|
||||
check_version_tag:
|
||||
extends:
|
||||
- .pre_check_template
|
||||
- .rules:tag:release
|
||||
tags: [ brew, github_sync ]
|
||||
variables:
|
||||
# need a full clone to get the latest tag
|
||||
# the --shallow-since=$(git log -1 --format=%as $LATEST_GIT_TAG) option is not accurate
|
||||
GIT_STRATEGY: fetch
|
||||
SUBMODULES_TO_FETCH: "none"
|
||||
GIT_DEPTH: 0
|
||||
script:
|
||||
- (git cat-file -t $CI_COMMIT_REF_NAME | grep tag) || (echo "ESP-IDF versions must be annotated tags." && exit 1)
|
||||
|
||||
check_artifacts_expire_time:
|
||||
extends: .pre_check_template
|
||||
script:
|
||||
# check if we have set expire time for all artifacts
|
||||
- python tools/ci/check_artifacts_expire_time.py
|
||||
|
||||
check_test_scripts_build_test_rules:
|
||||
extends:
|
||||
- .pre_check_template
|
||||
- .before_script:build
|
||||
script:
|
||||
# required pytest related packages
|
||||
- run_cmd bash install.sh --enable-pytest
|
||||
- python tools/ci/check_build_test_rules.py check-test-scripts examples/ tools/test_apps components
|
||||
|
||||
check_configure_ci_environment_parsing:
|
||||
extends:
|
||||
- .pre_check_template
|
||||
- .before_script:build
|
||||
- .rules:build
|
||||
script:
|
||||
- cd tools/ci
|
||||
- python -m unittest ci_build_apps.py
|
||||
|
||||
pipeline_variables:
|
||||
extends:
|
||||
- .pre_check_template
|
||||
- .before_script:fetch:git_diff
|
||||
tags:
|
||||
- build
|
||||
script:
|
||||
- MODIFIED_FILES=$(echo "$GIT_DIFF_OUTPUT" | xargs)
|
||||
- echo "MODIFIED_FILES=$MODIFIED_FILES" >> pipeline.env
|
||||
- echo "MODIFIED_COMPONENTS=$(run_cmd python tools/ci/ci_get_mr_info.py components --modified-files $MODIFIED_FILES | xargs)" >> pipeline.env
|
||||
- |
|
||||
if echo "$CI_MERGE_REQUEST_LABELS" | egrep "(^|,)BUILD_AND_TEST_ALL_APPS(,|$)"; then
|
||||
echo "BUILD_AND_TEST_ALL_APPS=1" >> pipeline.env
|
||||
fi
|
||||
- cat pipeline.env
|
||||
- python tools/ci/artifacts_handler.py upload --type modified_files_and_components_report
|
||||
artifacts:
|
||||
reports:
|
||||
dotenv: pipeline.env
|
||||
expire_in: 4 days
|
||||
2576
.gitlab/ci/rules.yml
2576
.gitlab/ci/rules.yml
File diff suppressed because it is too large
Load Diff
@@ -1,117 +0,0 @@
|
||||
# pre_check stage
|
||||
clang_tidy_check:
|
||||
extends:
|
||||
- .pre_check_template
|
||||
- .rules:patterns:clang_tidy
|
||||
artifacts:
|
||||
paths:
|
||||
- clang_tidy_reports/
|
||||
when: always
|
||||
expire_in: 1 day
|
||||
variables:
|
||||
IDF_TOOLCHAIN: clang
|
||||
script:
|
||||
- run_cmd idf_clang_tidy $(cat tools/ci/clang_tidy_dirs.txt | xargs)
|
||||
--output-path clang_tidy_reports
|
||||
--limit-file tools/ci/static-analysis-rules.yml
|
||||
--xtensa-include-dir
|
||||
|
||||
check_pylint:
|
||||
extends:
|
||||
- .pre_check_template
|
||||
- .rules:patterns:python-files
|
||||
needs:
|
||||
- pipeline_variables
|
||||
artifacts:
|
||||
when: always
|
||||
reports:
|
||||
codequality: pylint.json
|
||||
expire_in: 1 week
|
||||
script:
|
||||
- |
|
||||
if [ -n "$CI_MERGE_REQUEST_IID" ]; then
|
||||
export files=$(echo "$GIT_DIFF_OUTPUT" | grep ".py$" | xargs);
|
||||
else
|
||||
export files=$(git ls-files "*.py" | xargs);
|
||||
fi
|
||||
- if [ -z "$files" ]; then echo "No python files found"; exit 0; fi
|
||||
- run_cmd pylint --exit-zero --load-plugins=pylint_gitlab --output-format=gitlab-codeclimate:pylint.json $files
|
||||
|
||||
# build stage
|
||||
# Sonarqube related jobs put here for this reason:
|
||||
# Here we have two jobs. code_quality_check and code_quality_report.
|
||||
#
|
||||
# code_quality_check will analyze the code changes between your MR and
|
||||
# code repo stored in sonarqube server. The analysis result is only shown in
|
||||
# the comments under this MR and won't be transferred to the server.
|
||||
#
|
||||
# code_quality_report will analyze and transfer both of the newly added code
|
||||
# and the analysis result to the server.
|
||||
#
|
||||
# Put in the front to ensure that the newly merged code can be stored in
|
||||
# sonarqube server ASAP, in order to avoid reporting unrelated code issues
|
||||
.sonar_scan_template:
|
||||
stage: build
|
||||
extends: .pre_check_template
|
||||
image:
|
||||
name: $SONARQUBE_SCANNER_IMAGE
|
||||
before_script:
|
||||
- source tools/ci/utils.sh
|
||||
- export PYTHONPATH="$CI_PROJECT_DIR/tools:$CI_PROJECT_DIR/tools/ci/python_packages:$PYTHONPATH"
|
||||
- fetch_submodules
|
||||
# Exclude the submodules, all paths ends with /**
|
||||
- submodules=$(get_all_submodules)
|
||||
# get all exclude paths specified in tools/ci/sonar_exclude_list.txt | ignore lines start with # | xargs | replace all <space> to <comma>
|
||||
- custom_excludes=$(cat $CI_PROJECT_DIR/tools/ci/sonar_exclude_list.txt | grep -v '^#' | xargs | sed -e 's/ /,/g')
|
||||
# Exclude the report dir as well
|
||||
- export EXCLUSIONS="$custom_excludes,$submodules"
|
||||
- export SONAR_SCANNER_OPTS="-Xmx2048m"
|
||||
variables:
|
||||
GIT_DEPTH: 0
|
||||
REPORT_PATTERN: clang_tidy_reports/*.txt
|
||||
artifacts:
|
||||
when: always
|
||||
paths:
|
||||
- $REPORT_PATTERN
|
||||
expire_in: 1 week
|
||||
dependencies: # Here is not a hard dependency relationship, could be skipped when only python files changed. so we do not use "needs" here.
|
||||
- clang_tidy_check
|
||||
|
||||
code_quality_check:
|
||||
extends:
|
||||
- .sonar_scan_template
|
||||
- .rules:patterns:static-code-analysis-preview
|
||||
allow_failure: true # since now it's using exit code to indicate the code analysis result,
|
||||
# we don't want to block ci when critical issues founded
|
||||
script:
|
||||
- export CI_MERGE_REQUEST_COMMITS=$(python ${CI_PROJECT_DIR}/tools/ci/ci_get_mr_info.py commits --src-branch ${CI_COMMIT_REF_NAME} | tr '\n' ',')
|
||||
# test if this branch have merge request, if not, exit 0
|
||||
- test -n "$CI_MERGE_REQUEST_IID" || exit 0
|
||||
- test -n "$CI_MERGE_REQUEST_COMMITS" || exit 0
|
||||
- sonar-scanner
|
||||
-Dsonar.analysis.mode=preview
|
||||
-Dsonar.branch.name=$CI_MERGE_REQUEST_SOURCE_BRANCH_NAME
|
||||
-Dsonar.cxx.clangtidy.reportPath=$REPORT_PATTERN
|
||||
-Dsonar.exclusions=$EXCLUSIONS
|
||||
-Dsonar.gitlab.ci_merge_request_iid=$CI_MERGE_REQUEST_IID
|
||||
-Dsonar.gitlab.commit_sha=$CI_MERGE_REQUEST_COMMITS
|
||||
-Dsonar.gitlab.merge_request_discussion=true
|
||||
-Dsonar.gitlab.ref_name=$CI_MERGE_REQUEST_SOURCE_BRANCH_NAME
|
||||
-Dsonar.host.url=$SONAR_HOST_URL
|
||||
-Dsonar.login=$SONAR_LOGIN
|
||||
|
||||
code_quality_report:
|
||||
extends:
|
||||
- .sonar_scan_template
|
||||
- .rules:protected
|
||||
allow_failure: true # since now it's using exit code to indicate the code analysis result,
|
||||
# we don't want to block ci when critical issues founded
|
||||
script:
|
||||
- sonar-scanner
|
||||
-Dsonar.branch.name=$CI_COMMIT_REF_NAME
|
||||
-Dsonar.cxx.clangtidy.reportPath=$REPORT_PATTERN
|
||||
-Dsonar.exclusions=$EXCLUSIONS
|
||||
-Dsonar.gitlab.commit_sha=$PIPELINE_COMMIT_SHA
|
||||
-Dsonar.gitlab.ref_name=$CI_COMMIT_REF_NAME
|
||||
-Dsonar.host.url=$SONAR_HOST_URL
|
||||
-Dsonar.login=$SONAR_LOGIN
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,74 +0,0 @@
|
||||
# pull only for most of the use cases for cache
|
||||
# only set "push" policy for the jobs under this file.
|
||||
# The cache would be updated when files matched specified patterns changes.
|
||||
|
||||
.upload_cache_template:
|
||||
stage: upload_cache
|
||||
image: $ESP_ENV_IMAGE
|
||||
|
||||
upload-pip-cache:
|
||||
extends:
|
||||
- .upload_cache_template
|
||||
- .before_script:minimal
|
||||
- .rules:patterns:python-cache
|
||||
tags:
|
||||
- $GEO
|
||||
- cache
|
||||
cache:
|
||||
key: pip-cache
|
||||
paths:
|
||||
- .cache/pip
|
||||
policy: push
|
||||
script:
|
||||
- rm -rf .cache/pip # clear old packages
|
||||
- bash install.sh --enable-ci --enable-pytest
|
||||
parallel:
|
||||
matrix:
|
||||
- GEO: [ 'shiny', 'brew' ]
|
||||
|
||||
upload-submodules-cache:
|
||||
extends:
|
||||
- .upload_cache_template
|
||||
- .before_script:minimal
|
||||
- .rules:patterns:submodule
|
||||
tags:
|
||||
- $GEO
|
||||
- cache
|
||||
cache:
|
||||
key: submodule-cache
|
||||
paths:
|
||||
- .cache/submodule_archives
|
||||
policy: push
|
||||
script:
|
||||
# use the default gitlab server
|
||||
- unset LOCAL_GITLAB_HTTPS_HOST
|
||||
- rm -rf .cache/submodule_archives # clear old submodule archives
|
||||
- add_gitlab_ssh_keys
|
||||
- fetch_submodules
|
||||
parallel:
|
||||
matrix:
|
||||
- GEO: [ 'shiny', 'brew' ]
|
||||
|
||||
upload-danger-npm-cache:
|
||||
stage: upload_cache
|
||||
image: node:18.15.0-alpine3.16
|
||||
extends:
|
||||
- .rules:patterns:dangerjs
|
||||
tags:
|
||||
- $GEO
|
||||
- cache
|
||||
cache:
|
||||
key:
|
||||
files:
|
||||
- .gitlab/dangerjs/package-lock.json
|
||||
paths:
|
||||
- .gitlab/dangerjs/node_modules/
|
||||
policy: push
|
||||
before_script:
|
||||
- echo "Skip before scripts ...."
|
||||
script:
|
||||
- cd .gitlab/dangerjs
|
||||
- npm install --no-progress --no-update-notifier
|
||||
parallel:
|
||||
matrix:
|
||||
- GEO: [ 'shiny', 'brew' ]
|
||||
@@ -1,172 +0,0 @@
|
||||
const {
|
||||
minimumSummaryChars,
|
||||
maximumSummaryChars,
|
||||
maximumBodyLineChars,
|
||||
allowedTypes,
|
||||
} = require("./mrCommitsConstants.js");
|
||||
const { gptStandardModelTokens } = require("./mrCommitsConstants.js");
|
||||
|
||||
const { ChatPromptTemplate } = require("langchain/prompts");
|
||||
const { SystemMessagePromptTemplate } = require("langchain/prompts");
|
||||
const { LLMChain } = require("langchain/chains");
|
||||
const { ChatOpenAI } = require("langchain/chat_models/openai");
|
||||
const openAiTokenCount = require("openai-gpt-token-counter");
|
||||
|
||||
module.exports = async function () {
|
||||
let outputDangerMessage = `\n\nPerhaps you could use an AI-generated suggestion for your commit message. Here is one `;
|
||||
|
||||
let mrDiff = await getMrGitDiff(danger.git.modified_files);
|
||||
const mrCommitMessages = getCommitMessages(danger.gitlab.commits);
|
||||
const inputPrompt = getInputPrompt();
|
||||
const inputLlmTokens = getInputLlmTokens(
|
||||
inputPrompt,
|
||||
mrDiff,
|
||||
mrCommitMessages
|
||||
);
|
||||
console.log(`Input tokens for LLM: ${inputLlmTokens}`);
|
||||
|
||||
if (inputLlmTokens >= gptStandardModelTokens) {
|
||||
mrDiff = ""; // If the input mrDiff is larger than 16k model, don't use mrDiff, use only current commit messages
|
||||
outputDangerMessage += `(based only on your current commit messages, git-diff of this MR is too big (${inputLlmTokens} tokens) for the AI models):\n\n`;
|
||||
} else {
|
||||
outputDangerMessage += `(based on your MR git-diff and your current commit messages):\n\n`;
|
||||
}
|
||||
|
||||
// Generate AI commit message
|
||||
let generatedCommitMessage = "";
|
||||
try {
|
||||
const rawCommitMessage = await createAiGitMessage(
|
||||
inputPrompt,
|
||||
mrDiff,
|
||||
mrCommitMessages
|
||||
);
|
||||
generatedCommitMessage = postProcessCommitMessage(rawCommitMessage);
|
||||
} catch (error) {
|
||||
console.error("Error in generating AI commit message: ", error);
|
||||
outputDangerMessage +=
|
||||
"\nCould not generate commit message due to an error.\n";
|
||||
}
|
||||
|
||||
// Append closing statements ("Closes https://github.com/espressif/esp-idf/issues/XXX") to the generated commit message
|
||||
let closingStatements = extractClosingStatements(mrCommitMessages);
|
||||
if (closingStatements.length > 0) {
|
||||
generatedCommitMessage += "\n\n" + closingStatements;
|
||||
}
|
||||
|
||||
// Add the generated git message, format to the markdown code block
|
||||
outputDangerMessage += `\n\`\`\`\n${generatedCommitMessage}\n\`\`\`\n`;
|
||||
outputDangerMessage +=
|
||||
"\n**NOTE: AI-generated suggestions may not always be correct, please review the suggestion before using it.**"; // Add disclaimer
|
||||
return outputDangerMessage;
|
||||
};
|
||||
|
||||
async function getMrGitDiff(mrModifiedFiles) {
|
||||
const fileDiffs = await Promise.all(
|
||||
mrModifiedFiles.map((file) => danger.git.diffForFile(file))
|
||||
);
|
||||
return fileDiffs.map((fileDiff) => fileDiff.diff.trim()).join(" ");
|
||||
}
|
||||
|
||||
function getCommitMessages(mrCommits) {
|
||||
return mrCommits.map((commit) => commit.message);
|
||||
}
|
||||
|
||||
function getInputPrompt() {
|
||||
return `You are a helpful assistant that creates suggestions for single git commit message, that user can use to describe all the changes in their merge request.
|
||||
Use git diff: {mrDiff} and users current commit messages: {mrCommitMessages} to get the changes made in the commit.
|
||||
|
||||
Output should be git commit message following the conventional commit format.
|
||||
|
||||
Output only git commit message in desired format, without comments and other text.
|
||||
|
||||
Do not include the closing statements ("Closes https://....") in the output.
|
||||
|
||||
Here are the strict rules you must follow:
|
||||
|
||||
- Avoid mentioning any JIRA tickets (e.g., "Closes JIRA-123").
|
||||
- Be specific. Don't use vague terms (e.g., "some checks", "add new ones", "few changes").
|
||||
- The commit message structure should be: <type><(scope/component)>: <summary>
|
||||
- Types allowed: ${allowedTypes.join(", ")}
|
||||
- If 'scope/component' is used, it must start with a lowercase letter.
|
||||
- The 'summary' must NOT end with a period.
|
||||
- The 'summary' must be between ${minimumSummaryChars} and ${maximumSummaryChars} characters long.
|
||||
|
||||
If a 'body' of commit message is used:
|
||||
|
||||
- Each line must be no longer than ${maximumBodyLineChars} characters.
|
||||
- It must be separated from the 'summary' by a blank line.
|
||||
|
||||
Examples of correct commit messages:
|
||||
|
||||
- With scope and body:
|
||||
fix(freertos): Fix startup timeout issue
|
||||
|
||||
This is a text of commit message body...
|
||||
- adds support for wifi6
|
||||
- adds validations for logging script
|
||||
|
||||
- Without scope and body:
|
||||
ci: added target test job for ESP32-Wifi6`;
|
||||
}
|
||||
|
||||
function getInputLlmTokens(inputPrompt, mrDiff, mrCommitMessages) {
|
||||
const mrCommitMessagesTokens = openAiTokenCount(mrCommitMessages.join(" "));
|
||||
const gitDiffTokens = openAiTokenCount(mrDiff);
|
||||
const promptTokens = openAiTokenCount(inputPrompt);
|
||||
return mrCommitMessagesTokens + gitDiffTokens + promptTokens;
|
||||
}
|
||||
|
||||
async function createAiGitMessage(inputPrompt, mrDiff, mrCommitMessages) {
|
||||
const chat = new ChatOpenAI({ engine: "gpt-3.5-turbo", temperature: 0 });
|
||||
const chatPrompt = ChatPromptTemplate.fromPromptMessages([
|
||||
SystemMessagePromptTemplate.fromTemplate(inputPrompt),
|
||||
]);
|
||||
const chain = new LLMChain({ prompt: chatPrompt, llm: chat });
|
||||
|
||||
const response = await chain.call({
|
||||
mrDiff: mrDiff,
|
||||
mrCommitMessages: mrCommitMessages,
|
||||
});
|
||||
return response.text;
|
||||
}
|
||||
|
||||
function postProcessCommitMessage(rawCommitMessage) {
|
||||
// Split the result into lines
|
||||
let lines = rawCommitMessage.split("\n");
|
||||
|
||||
// Format each line
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
let line = lines[i].trim();
|
||||
|
||||
// If the line is longer than maximumBodyLineChars, split it into multiple lines
|
||||
if (line.length > maximumBodyLineChars) {
|
||||
let newLines = [];
|
||||
while (line.length > maximumBodyLineChars) {
|
||||
let lastSpaceIndex = line.lastIndexOf(
|
||||
" ",
|
||||
maximumBodyLineChars
|
||||
);
|
||||
newLines.push(line.substring(0, lastSpaceIndex));
|
||||
line = line.substring(lastSpaceIndex + 1);
|
||||
}
|
||||
newLines.push(line);
|
||||
lines[i] = newLines.join("\n");
|
||||
}
|
||||
}
|
||||
|
||||
// Join the lines back into a single string with a newline between each one
|
||||
return lines.join("\n");
|
||||
}
|
||||
|
||||
function extractClosingStatements(mrCommitMessages) {
|
||||
let closingStatements = [];
|
||||
mrCommitMessages.forEach((message) => {
|
||||
const lines = message.split("\n");
|
||||
lines.forEach((line) => {
|
||||
if (line.startsWith("Closes")) {
|
||||
closingStatements.push(line);
|
||||
}
|
||||
});
|
||||
});
|
||||
return closingStatements.join("\n");
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
let outputStatuses = [];
|
||||
|
||||
/**
|
||||
* Logs the status of a rule with padded formatting and stores it in the `outputStatuses` array.
|
||||
* If the rule already exists in the array, its status is updated.
|
||||
* @param message The name of the rule
|
||||
* @param status The output (exit) status of the rule
|
||||
*/
|
||||
function recordRuleExitStatus(message, status) {
|
||||
// Check if the rule already exists in the array
|
||||
const existingRecord = outputStatuses.find(
|
||||
(rule) => rule.message === message
|
||||
);
|
||||
|
||||
if (existingRecord) {
|
||||
// Update the status of the existing rule
|
||||
existingRecord.status = status;
|
||||
} else {
|
||||
// If the rule doesn't exist, add it to the array
|
||||
outputStatuses.push({ message, status });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Displays all the rule output statuses stored in the `outputStatuses` array.
|
||||
* Filters out any empty lines, sorts them alphabetically, and prints the statuses
|
||||
* with a header and separator.
|
||||
* These statuses are later displayed in CI job tracelog.
|
||||
*/
|
||||
function displayAllOutputStatuses() {
|
||||
const lineLength = 100;
|
||||
const sortedStatuses = outputStatuses.sort((a, b) =>
|
||||
a.message.localeCompare(b.message)
|
||||
);
|
||||
|
||||
const formattedLines = sortedStatuses.map((statusObj) => {
|
||||
const paddingLength =
|
||||
lineLength - statusObj.message.length - statusObj.status.length;
|
||||
const paddedMessage = statusObj.message.padEnd(
|
||||
statusObj.message.length + paddingLength,
|
||||
"."
|
||||
);
|
||||
return `${paddedMessage} ${statusObj.status}`;
|
||||
});
|
||||
|
||||
console.log(
|
||||
"DangerJS checks (rules) output states:\n" + "=".repeat(lineLength + 2)
|
||||
);
|
||||
console.log(formattedLines.join("\n"));
|
||||
console.log("=".repeat(lineLength + 2));
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
displayAllOutputStatuses,
|
||||
recordRuleExitStatus,
|
||||
};
|
||||
@@ -1,51 +0,0 @@
|
||||
const { displayAllOutputStatuses } = require("./configParameters.js");
|
||||
|
||||
/*
|
||||
* Modules with checks are stored in ".gitlab/dangerjs/<module_name>". To import them, use path relative to "dangerfile.js"
|
||||
*/
|
||||
async function runChecks() {
|
||||
// Checks for merge request title
|
||||
require("./mrTitleNoDraftOrWip.js")();
|
||||
|
||||
// Checks for merge request description
|
||||
require("./mrDescriptionLongEnough.js")();
|
||||
require("./mrDescriptionReleaseNotes.js")();
|
||||
await require("./mrDescriptionJiraLinks.js")();
|
||||
|
||||
// Checks for documentation
|
||||
await require("./mrDocsTranslation.js")();
|
||||
|
||||
// Checks for MR commits
|
||||
require("./mrCommitsTooManyCommits.js")();
|
||||
await require("./mrCommitsCommitMessage.js")();
|
||||
require("./mrCommitsEmail.js")();
|
||||
|
||||
// Checks for MR code
|
||||
require("./mrSizeTooLarge.js")();
|
||||
|
||||
// Checks for MR area labels
|
||||
await require("./mrAreaLabels.js")();
|
||||
|
||||
// Checks for Source branch name
|
||||
require("./mrSourceBranchName.js")();
|
||||
|
||||
// Show DangerJS individual checks statuses - visible in CI job tracelog
|
||||
displayAllOutputStatuses();
|
||||
|
||||
// Add success log if no issues
|
||||
if (
|
||||
results.fails.length === 0 &&
|
||||
results.warnings.length === 0 &&
|
||||
results.messages.length === 0
|
||||
) {
|
||||
return message("🎉 Good Job! All checks are passing!");
|
||||
}
|
||||
}
|
||||
|
||||
runChecks();
|
||||
|
||||
// Add retry link
|
||||
const retryLink = `${process.env.DANGER_GITLAB_HOST}/${process.env.CI_PROJECT_PATH}/-/jobs/${process.env.CI_JOB_ID}`;
|
||||
markdown(
|
||||
`***\n#### :repeat: You can enforce automatic MR checks by retrying the [DangerJS job](${retryLink})\n***`
|
||||
);
|
||||
@@ -1,27 +0,0 @@
|
||||
const { recordRuleExitStatus } = require("./configParameters.js");
|
||||
|
||||
/**
|
||||
* Check if MR has area labels (light blue labels)
|
||||
*
|
||||
* @dangerjs WARN
|
||||
*/
|
||||
module.exports = async function () {
|
||||
const ruleName = "Merge request area labels";
|
||||
const projectId = 103; // ESP-IDF
|
||||
const areaLabelColor = /^#d2ebfa$/i; // match color code (case-insensitive)
|
||||
const projectLabels = await danger.gitlab.api.Labels.all(projectId); // Get all project labels
|
||||
const areaLabels = projectLabels
|
||||
.filter((label) => areaLabelColor.test(label.color))
|
||||
.map((label) => label.name); // Filter only area labels
|
||||
const mrLabels = danger.gitlab.mr.labels; // Get MR labels
|
||||
|
||||
if (!mrLabels.some((label) => areaLabels.includes(label))) {
|
||||
recordRuleExitStatus(ruleName, "Failed");
|
||||
return warn(
|
||||
`Please add some [area labels](${process.env.DANGER_GITLAB_HOST}/espressif/esp-idf/-/labels) to this MR.`
|
||||
);
|
||||
}
|
||||
|
||||
// At this point, the rule has passed
|
||||
recordRuleExitStatus(ruleName, "Passed");
|
||||
};
|
||||
@@ -1,165 +0,0 @@
|
||||
const {
|
||||
minimumSummaryChars,
|
||||
maximumSummaryChars,
|
||||
maximumBodyLineChars,
|
||||
allowedTypes,
|
||||
} = require("./mrCommitsConstants.js");
|
||||
const { recordRuleExitStatus } = require("./configParameters.js");
|
||||
|
||||
/**
|
||||
* Check that commit messages are based on the Espressif ESP-IDF project's rules for git commit messages.
|
||||
*
|
||||
* @dangerjs WARN
|
||||
*/
|
||||
module.exports = async function () {
|
||||
const ruleName = "Commit messages style";
|
||||
const mrCommits = danger.gitlab.commits;
|
||||
const lint = require("@commitlint/lint").default;
|
||||
|
||||
const lintingRules = {
|
||||
// rule definition: [(0-1 = off/on), (always/never = must be/mustn't be), (value)]
|
||||
"body-max-line-length": [1, "always", maximumBodyLineChars], // Max length of the body line
|
||||
"footer-leading-blank": [1, "always"], // Always have a blank line before the footer section
|
||||
"footer-max-line-length": [1, "always", maximumBodyLineChars], // Max length of the footer line
|
||||
"subject-max-length": [1, "always", maximumSummaryChars], // Max length of the "Summary"
|
||||
"subject-min-length": [1, "always", minimumSummaryChars], // Min length of the "Summary"
|
||||
"scope-case": [1, "always", "lower-case"], // "scope/component" must start with lower-case
|
||||
"subject-full-stop": [1, "never", "."], // "Summary" must not end with a full stop (period)
|
||||
"subject-empty": [1, "never"], // "Summary" is mandatory
|
||||
"type-case": [1, "always", "lower-case"], // "type/action" must start with lower-case
|
||||
"type-empty": [1, "never"], // "type/action" is mandatory
|
||||
"type-enum": [1, "always", allowedTypes], // "type/action" must be one of the allowed types
|
||||
"body-leading-blank": [1, "always"], // Always have a blank line before the body section
|
||||
};
|
||||
|
||||
// Switcher for AI suggestions (for poor messages)
|
||||
let generateAISuggestion = false;
|
||||
|
||||
// Search for the messages in each commit
|
||||
let issuesAllCommitMessages = [];
|
||||
|
||||
for (const commit of mrCommits) {
|
||||
const commitMessage = commit.message;
|
||||
const commitMessageTitle = commit.title;
|
||||
|
||||
let issuesSingleCommitMessage = [];
|
||||
let reportSingleCommitMessage = "";
|
||||
|
||||
// Check if the commit message contains any Jira ticket references
|
||||
const jiraTicketRegex = /[A-Z0-9]+-[0-9]+/g;
|
||||
const jiraTicketMatches = commitMessage.match(jiraTicketRegex);
|
||||
if (jiraTicketMatches) {
|
||||
const jiraTicketNames = jiraTicketMatches.join(", ");
|
||||
issuesSingleCommitMessage.push(
|
||||
`- probably contains Jira ticket reference (\`${jiraTicketNames}\`). Please remove Jira tickets from commit messages.`
|
||||
);
|
||||
}
|
||||
|
||||
// Lint commit messages with @commitlint (Conventional Commits style)
|
||||
const result = await lint(commit.message, lintingRules);
|
||||
|
||||
for (const warning of result.warnings) {
|
||||
// Custom messages for each rule with terminology used by Espressif conventional commits guide
|
||||
switch (warning.name) {
|
||||
case "subject-max-length":
|
||||
issuesSingleCommitMessage.push(
|
||||
`- *summary* appears to be too long`
|
||||
);
|
||||
break;
|
||||
case "type-empty":
|
||||
issuesSingleCommitMessage.push(
|
||||
`- *type/action* looks empty`
|
||||
);
|
||||
break;
|
||||
case "type-case":
|
||||
issuesSingleCommitMessage.push(
|
||||
`- *type/action* should start with a lowercase letter`
|
||||
);
|
||||
|
||||
break;
|
||||
case "scope-empty":
|
||||
issuesSingleCommitMessage.push(
|
||||
`- *scope/component* looks empty`
|
||||
);
|
||||
break;
|
||||
case "scope-case":
|
||||
issuesSingleCommitMessage.push(
|
||||
`- *scope/component* should be lowercase without whitespace, allowed special characters are \`_\` \`/\` \`.\` \`,\` \`*\` \`-\` \`.\``
|
||||
);
|
||||
break;
|
||||
case "subject-empty":
|
||||
issuesSingleCommitMessage.push(`- *summary* looks empty`);
|
||||
generateAISuggestion = true;
|
||||
break;
|
||||
case "subject-min-length":
|
||||
issuesSingleCommitMessage.push(
|
||||
`- *summary* looks too short`
|
||||
);
|
||||
generateAISuggestion = true;
|
||||
break;
|
||||
case "subject-case":
|
||||
issuesSingleCommitMessage.push(
|
||||
`- *summary* should start with a capital letter`
|
||||
);
|
||||
break;
|
||||
case "subject-full-stop":
|
||||
issuesSingleCommitMessage.push(
|
||||
`- *summary* should not end with a period (full stop)`
|
||||
);
|
||||
break;
|
||||
case "type-enum":
|
||||
issuesSingleCommitMessage.push(
|
||||
`- *type/action* should be one of [${allowedTypes
|
||||
.map((type) => `\`${type}\``)
|
||||
.join(", ")}]`
|
||||
);
|
||||
break;
|
||||
|
||||
default:
|
||||
issuesSingleCommitMessage.push(`- ${warning.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (issuesSingleCommitMessage.length) {
|
||||
reportSingleCommitMessage = `- the commit message \`"${commitMessageTitle}"\`:\n${issuesSingleCommitMessage
|
||||
.map((message) => ` ${message}`) // Indent each issue by 2 spaces
|
||||
.join("\n")}`;
|
||||
issuesAllCommitMessages.push(reportSingleCommitMessage);
|
||||
}
|
||||
}
|
||||
|
||||
// Create report
|
||||
if (issuesAllCommitMessages.length) {
|
||||
issuesAllCommitMessages.sort();
|
||||
const basicTips = [
|
||||
`- correct format of commit message should be: \`<type/action>(<scope/component>): <summary>\`, for example \`fix(esp32): Fixed startup timeout issue\``,
|
||||
`- allowed types are: \`${allowedTypes}\``,
|
||||
`- sufficiently descriptive message summary should be between ${minimumSummaryChars} to ${maximumSummaryChars} characters and start with upper case letter`,
|
||||
`- avoid Jira references in commit messages (unavailable/irrelevant for our customers)`,
|
||||
`- follow this [commit messages guide](${process.env.DANGER_GITLAB_HOST}/espressif/esp-idf/-/wikis/dev-proc/Commit-messages)`,
|
||||
];
|
||||
let dangerMessage = `\n**Some issues found for the commit messages in this MR:**\n${issuesAllCommitMessages.join(
|
||||
"\n"
|
||||
)}
|
||||
\n***
|
||||
\n**Please consider updating these commit messages** - here are some basic tips:\n${basicTips.join(
|
||||
"\n"
|
||||
)}
|
||||
\n \`TIP:\` You can install commit-msg pre-commit hook (\`pre-commit install -t pre-commit -t commit-msg\`) to run this check when committing.
|
||||
\n***
|
||||
`;
|
||||
|
||||
if (generateAISuggestion) {
|
||||
// Create AI generated suggestion for git commit message based of gitDiff and current commit messages
|
||||
const AImessageSuggestion =
|
||||
await require("./aiGenerateGitMessage.js")();
|
||||
dangerMessage += AImessageSuggestion;
|
||||
}
|
||||
|
||||
recordRuleExitStatus(ruleName, "Failed");
|
||||
return warn(dangerMessage);
|
||||
}
|
||||
|
||||
// At this point, the rule has passed
|
||||
recordRuleExitStatus(ruleName, "Passed");
|
||||
};
|
||||
@@ -1,16 +0,0 @@
|
||||
module.exports = {
|
||||
gptStandardModelTokens: 4096,
|
||||
minimumSummaryChars: 20,
|
||||
maximumSummaryChars: 72,
|
||||
maximumBodyLineChars: 100,
|
||||
allowedTypes: [
|
||||
"change",
|
||||
"ci",
|
||||
"docs",
|
||||
"feat",
|
||||
"fix",
|
||||
"refactor",
|
||||
"remove",
|
||||
"revert",
|
||||
],
|
||||
};
|
||||
@@ -1,23 +0,0 @@
|
||||
const { recordRuleExitStatus } = require("./configParameters.js");
|
||||
|
||||
/**
|
||||
* Check if the author is accidentally making a commit using a personal email
|
||||
*
|
||||
* @dangerjs INFO
|
||||
*/
|
||||
module.exports = function () {
|
||||
const ruleName = 'Commits from outside Espressif';
|
||||
const mrCommitAuthorEmails = danger.gitlab.commits.map(commit => commit.author_email);
|
||||
const mrCommitCommitterEmails = danger.gitlab.commits.map(commit => commit.committer_email);
|
||||
const emailPattern = /.*@espressif\.com/;
|
||||
const filteredEmails = [...mrCommitAuthorEmails, ...mrCommitCommitterEmails].filter((email) => !emailPattern.test(email));
|
||||
if (filteredEmails.length) {
|
||||
recordRuleExitStatus(ruleName, "Failed");
|
||||
return message(
|
||||
`Some of the commits were authored or committed by developers outside Espressif: ${filteredEmails.join(', ')}. Please check if this is expected.`
|
||||
);
|
||||
}
|
||||
|
||||
// At this point, the rule has passed
|
||||
recordRuleExitStatus(ruleName, 'Passed');
|
||||
};
|
||||
@@ -1,22 +0,0 @@
|
||||
const { recordRuleExitStatus } = require("./configParameters.js");
|
||||
|
||||
/**
|
||||
* Check if MR has not an excessive numbers of commits (if squashed)
|
||||
*
|
||||
* @dangerjs INFO
|
||||
*/
|
||||
module.exports = function () {
|
||||
const ruleName = 'Number of commits in merge request';
|
||||
const tooManyCommitThreshold = 2; // above this number of commits, squash commits is suggested
|
||||
const mrCommits = danger.gitlab.commits;
|
||||
|
||||
if (mrCommits.length > tooManyCommitThreshold) {
|
||||
recordRuleExitStatus(ruleName, "Passed (with suggestions)");
|
||||
return message(
|
||||
`You might consider squashing your ${mrCommits.length} commits (simplifying branch history).`
|
||||
);
|
||||
}
|
||||
|
||||
// At this point, the rule has passed
|
||||
recordRuleExitStatus(ruleName, 'Passed');
|
||||
};
|
||||
@@ -1,238 +0,0 @@
|
||||
const { recordRuleExitStatus } = require("./configParameters.js");
|
||||
|
||||
/** Check that there are valid JIRA links in MR description.
|
||||
*
|
||||
* This check extracts the "Related" section from the MR description and
|
||||
* searches for JIRA ticket references in the format "Closes [JIRA ticket key]".
|
||||
*
|
||||
* It then extracts the closing GitHub links from the corresponding JIRA tickets and
|
||||
* checks if the linked GitHub issues are still in open state.
|
||||
*
|
||||
* Finally, it checks if the required GitHub closing links are present in the MR's commit messages.
|
||||
*
|
||||
*/
|
||||
module.exports = async function () {
|
||||
const ruleName = 'Jira ticket references';
|
||||
const axios = require("axios");
|
||||
const mrDescription = danger.gitlab.mr.description;
|
||||
const mrCommitMessages = danger.gitlab.commits.map(
|
||||
(commit) => commit.message
|
||||
);
|
||||
const jiraTicketRegex = /[A-Z0-9]+-[0-9]+/;
|
||||
|
||||
let partMessages = []; // Create a blank field for future records of individual issues
|
||||
|
||||
// Parse section "Related" from MR Description
|
||||
const sectionRelated = extractSectionRelated(mrDescription);
|
||||
|
||||
if (
|
||||
!sectionRelated.header || // No section Related in MR description or ...
|
||||
!jiraTicketRegex.test(sectionRelated.content) // no Jira links in section Related
|
||||
) {
|
||||
recordRuleExitStatus(ruleName, 'Passed (with suggestions)');
|
||||
return message(
|
||||
"Please consider adding references to JIRA issues in the `Related` section of the MR description."
|
||||
);
|
||||
}
|
||||
|
||||
// Get closing (only) JIRA tickets
|
||||
const jiraTickets = findClosingJiraTickets(sectionRelated.content);
|
||||
|
||||
for (const ticket of jiraTickets) {
|
||||
ticket.jiraUIUrl = `https://jira.espressif.com:8443/browse/${ticket.ticketName}`;
|
||||
|
||||
if (!ticket.correctFormat) {
|
||||
partMessages.push(
|
||||
`- closing ticket \`${ticket.record}\` seems to be in the wrong format (or inaccessible to Jira DangerBot).. The correct format is for example \`- Closes JIRA-123\`.`
|
||||
);
|
||||
}
|
||||
|
||||
// Get closing GitHub issue links from JIRA tickets
|
||||
const closingGithubLink = await getGitHubClosingLink(ticket.ticketName);
|
||||
if (closingGithubLink) {
|
||||
ticket.closingGithubLink = closingGithubLink;
|
||||
} else if (closingGithubLink === null) {
|
||||
partMessages.push(
|
||||
`- the Jira issue number [\`${ticket.ticketName}\`](${ticket.jiraUIUrl}) seems to be invalid (please check if the ticket number is correct)`
|
||||
);
|
||||
continue; // Handle unreachable JIRA tickets; skip the following checks
|
||||
} else {
|
||||
continue; // Jira ticket have no GitHub closing link; skip the following checks
|
||||
}
|
||||
|
||||
// Get still open GitHub issues
|
||||
const githubIssueStatusOpen = await isGithubIssueOpen(
|
||||
ticket.closingGithubLink
|
||||
);
|
||||
ticket.isOpen = githubIssueStatusOpen;
|
||||
if (githubIssueStatusOpen === null) {
|
||||
// Handle unreachable GitHub issues
|
||||
partMessages.push(
|
||||
`- the GitHub issue [\`${ticket.closingGithubLink}\`](${ticket.closingGithubLink}) does not seem to exist on GitHub (referenced from JIRA ticket [\`${ticket.ticketName}\`](${ticket.jiraUIUrl}) )`
|
||||
);
|
||||
continue; // skip the following checks
|
||||
}
|
||||
|
||||
// Search in commit message if there are all GitHub closing links (from Related section) for still open GH issues
|
||||
if (ticket.isOpen) {
|
||||
if (
|
||||
!mrCommitMessages.some((item) =>
|
||||
item.includes(`Closes ${ticket.closingGithubLink}`)
|
||||
)
|
||||
) {
|
||||
partMessages.push(
|
||||
`- please add \`Closes ${ticket.closingGithubLink}\` to the commit message`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create report / DangerJS check feedback if issues with Jira links found
|
||||
if (partMessages.length) {
|
||||
createReport();
|
||||
}
|
||||
|
||||
// At this point, the rule has passed
|
||||
recordRuleExitStatus(ruleName, 'Passed');
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* This function takes in a string mrDescription which contains a Markdown-formatted text
|
||||
* related to a Merge Request (MR) in a GitLab repository. It searches for a section titled "Related"
|
||||
* and extracts the content of that section. If the section is not found, it returns an object
|
||||
* indicating that the header and content are null. If the section is found but empty, it returns
|
||||
* an object indicating that the header is present but the content is null. If the section is found
|
||||
* with content, it returns an object indicating that the header is present and the content of the
|
||||
* "Related" section.
|
||||
*
|
||||
* @param {string} mrDescription - The Markdown-formatted text related to the Merge Request.
|
||||
* @returns {{
|
||||
* header: string | boolean | null,
|
||||
* content: string | null
|
||||
* }} - An object containing the header and content of the "Related" section, if present.
|
||||
*/
|
||||
|
||||
function extractSectionRelated(mrDescription) {
|
||||
const regexSectionRelated = /## Related([\s\S]*?)(?=## |$)/;
|
||||
const sectionRelated = mrDescription.match(regexSectionRelated);
|
||||
if (!sectionRelated) {
|
||||
return { header: null, content: null }; // Section "Related" is missing
|
||||
}
|
||||
|
||||
const content = sectionRelated[1].replace(/(\r\n|\n|\r)/gm, ""); // Remove empty lines
|
||||
if (!content.length) {
|
||||
return { header: true, content: null }; // Section "Related" is present, but empty
|
||||
}
|
||||
|
||||
return { header: true, content: sectionRelated[1] }; // Found section "Related" with content
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds all JIRA tickets that are being closed in the given sectionRelatedcontent.
|
||||
* The function searches for lines that start with - Closes and have the format Closes [uppercase letters]-[numbers].
|
||||
* @param {string} sectionRelatedcontent - A string that contains lines with mentions of JIRA tickets
|
||||
* @returns {Array} An array of objects with ticketName property that has the correct format
|
||||
*/
|
||||
|
||||
function findClosingJiraTickets(sectionRelatedcontent) {
|
||||
let closingTickets = [];
|
||||
const lines = sectionRelatedcontent.split("\n");
|
||||
for (const line of lines) {
|
||||
if (!line.startsWith("- Closes")) {
|
||||
continue; // Not closing-type ticket, skip
|
||||
}
|
||||
|
||||
const correctJiraClosingLinkFormat = new RegExp(
|
||||
`^- Closes ${jiraTicketRegex.source}$`
|
||||
);
|
||||
const matchedJiraTicket = line.match(jiraTicketRegex);
|
||||
if (matchedJiraTicket) {
|
||||
if (!correctJiraClosingLinkFormat.test(line)) {
|
||||
closingTickets.push({
|
||||
record: line,
|
||||
ticketName: matchedJiraTicket[0],
|
||||
correctFormat: false,
|
||||
});
|
||||
} else {
|
||||
closingTickets.push({
|
||||
record: line,
|
||||
ticketName: matchedJiraTicket[0],
|
||||
correctFormat: true,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
return closingTickets;
|
||||
}
|
||||
|
||||
/**
|
||||
* This function takes a JIRA issue key and retrieves the description from JIRA's API.
|
||||
* It then searches the description for a GitHub closing link in the format "Closes https://github.com/owner/repo/issues/123".
|
||||
* If a GitHub closing link is found, it is returned. If no GitHub closing link is found, it returns null.
|
||||
* @param {string} jiraIssueKey - The key of the JIRA issue to search for the GitHub closing link.
|
||||
* @returns {Promise<string|null>} - A promise that resolves to a string containing the GitHub closing link if found,
|
||||
* or null if not found.
|
||||
*/
|
||||
async function getGitHubClosingLink(jiraIssueKey) {
|
||||
let jiraDescription = "";
|
||||
|
||||
// Get JIRA ticket description content
|
||||
try {
|
||||
const response = await axios({
|
||||
url: `https://jira.espressif.com:8443/rest/api/latest/issue/${jiraIssueKey}`,
|
||||
auth: {
|
||||
username: process.env.DANGER_JIRA_USER,
|
||||
password: process.env.DANGER_JIRA_PASSWORD,
|
||||
},
|
||||
});
|
||||
jiraDescription = response.data.fields.description
|
||||
? response.data.fields.description
|
||||
: ""; // if the Jira ticket has an unfilled Description, the ".description" property is missing in API response - in that case set "jiraDescription" to an empty string
|
||||
} catch (error) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Find GitHub closing link in description
|
||||
const regexClosingGhLink =
|
||||
/Closes\s+(https:\/\/github.com\/\S+\/\S+\/issues\/\d+)/;
|
||||
const closingGithubLink = jiraDescription.match(regexClosingGhLink);
|
||||
|
||||
if (closingGithubLink) {
|
||||
return closingGithubLink[1];
|
||||
} else {
|
||||
return false; // Jira issue has no GitHub closing link in description
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a GitHub issue linked in a merge request is still open.
|
||||
*
|
||||
* @param {string} link - The link to the GitHub issue.
|
||||
* @returns {Promise<boolean>} A promise that resolves to a boolean indicating if the issue is open.
|
||||
* @throws {Error} If the link is invalid or if there was an error fetching the issue.
|
||||
*/
|
||||
async function isGithubIssueOpen(link) {
|
||||
const parsedUrl = new URL(link);
|
||||
const [owner, repo] = parsedUrl.pathname.split("/").slice(1, 3);
|
||||
const issueNumber = parsedUrl.pathname.split("/").slice(-1)[0];
|
||||
|
||||
try {
|
||||
const response = await axios.get(
|
||||
`https://api.github.com/repos/${owner}/${repo}/issues/${issueNumber}`
|
||||
);
|
||||
return response.data.state === "open"; // return True if GitHub issue is open
|
||||
} catch (error) {
|
||||
return null; // GET request to issue fails
|
||||
}
|
||||
}
|
||||
|
||||
function createReport() {
|
||||
partMessages.sort();
|
||||
let dangerMessage = `Some issues found for the related JIRA tickets in this MR:\n${partMessages.join(
|
||||
"\n"
|
||||
)}`;
|
||||
recordRuleExitStatus(ruleName, "Failed");
|
||||
return warn(dangerMessage);
|
||||
}
|
||||
};
|
||||
@@ -1,24 +0,0 @@
|
||||
const { recordRuleExitStatus } = require("./configParameters.js");
|
||||
|
||||
/**
|
||||
* Check if MR Description has accurate description".
|
||||
*
|
||||
* @dangerjs WARN
|
||||
*/
|
||||
module.exports = function () {
|
||||
const ruleName = "Merge request sufficient description";
|
||||
const mrDescription = danger.gitlab.mr.description;
|
||||
const descriptionChunk = mrDescription.match(/^([^#]*)/)[1].trim(); // Extract all text before the first section header (i.e., the text before the "## Release notes")
|
||||
|
||||
const shortMrDescriptionThreshold = 50; // Description is considered too short below this number of characters
|
||||
|
||||
if (descriptionChunk.length < shortMrDescriptionThreshold) {
|
||||
recordRuleExitStatus(ruleName, "Failed");
|
||||
return warn(
|
||||
"The MR description looks very brief, please check if more details can be added."
|
||||
);
|
||||
}
|
||||
|
||||
// At this point, the rule has passed
|
||||
recordRuleExitStatus(ruleName, "Passed");
|
||||
};
|
||||
@@ -1,103 +0,0 @@
|
||||
const { recordRuleExitStatus } = require("./configParameters.js");
|
||||
|
||||
/**
|
||||
* Check if MR Description contains mandatory section "Release notes"
|
||||
*
|
||||
* Extracts the content of the "Release notes" section from the GitLab merge request description.
|
||||
*
|
||||
* @dangerjs WARN (if section missing, is empty or wrong markdown format)
|
||||
*/
|
||||
module.exports = function () {
|
||||
const ruleName = 'Merge request Release Notes section';
|
||||
const mrDescription = danger.gitlab.mr.description;
|
||||
const wiki_link = `${process.env.DANGER_GITLAB_HOST}/espressif/esp-idf/-/wikis/rfc/How-to-write-release-notes-properly`;
|
||||
|
||||
const regexSectionReleaseNotes = /## Release notes([\s\S]*?)(?=## |$)/;
|
||||
const regexValidEntry = /^\s*[-*+]\s+.+/;
|
||||
const regexNoReleaseNotes = /no release note/i;
|
||||
|
||||
const sectionReleaseNotes = mrDescription.match(regexSectionReleaseNotes);
|
||||
if (!sectionReleaseNotes) {
|
||||
recordRuleExitStatus(ruleName, "Failed");
|
||||
return warn(`The \`Release Notes\` section seems to be missing. Please check if the section header in MR description is present and in the correct markdown format ("## Release Notes").\n\nSee [Release Notes Format Rules](${wiki_link}).`);
|
||||
}
|
||||
|
||||
const releaseNotesLines = sectionReleaseNotes[1].replace(/<!--[\s\S]*?-->/g, '')
|
||||
|
||||
const lines = releaseNotesLines.split("\n").filter(s => s.trim().length > 0);
|
||||
let valid_entries_found = 0;
|
||||
let no_release_notes_found = false;
|
||||
let violations = [];
|
||||
|
||||
lines.forEach((line) => {
|
||||
if (line.match(regexValidEntry)) {
|
||||
valid_entries_found++;
|
||||
const error_msg = check_entry(line);
|
||||
if (error_msg) {
|
||||
violations.push(error_msg);
|
||||
}
|
||||
} else if (line.match(regexNoReleaseNotes)) {
|
||||
no_release_notes_found = true;
|
||||
}
|
||||
});
|
||||
|
||||
let error_output = [];
|
||||
if (violations.length > 0) {
|
||||
error_output = [...error_output, 'Invalid release note entries:', violations.join('\n')];
|
||||
}
|
||||
if (no_release_notes_found) {
|
||||
if (valid_entries_found > 0) {
|
||||
error_output.push('`No release notes` comment shows up when there is valid entry. Remove bullets before comments in release notes section.');
|
||||
}
|
||||
} else {
|
||||
if (!valid_entries_found) {
|
||||
error_output.push('The `Release Notes` section seems to have no valid entries. Add bullets before valid entries, or add `No release notes` comment to suppress this error if you mean to have no release notes.');
|
||||
}
|
||||
}
|
||||
|
||||
if (error_output.length > 0) {
|
||||
// Paragraphs joined by double `\n`s.
|
||||
error_output = [...error_output, `See [Release Notes Format Guide](${wiki_link}).`].join('\n\n');
|
||||
recordRuleExitStatus(ruleName, "Failed");
|
||||
return warn(error_output);
|
||||
}
|
||||
|
||||
// At this point, the rule has passed
|
||||
recordRuleExitStatus(ruleName, 'Passed');
|
||||
};
|
||||
|
||||
function check_entry(entry) {
|
||||
const entry_str = `- \`${entry}\``;
|
||||
const indent = " ";
|
||||
|
||||
if (entry.match(/no\s+release\s+note/i)) {
|
||||
return [entry_str, `${indent}- \`No release notes\` comment shouldn't start with bullet.`].join('\n');
|
||||
}
|
||||
|
||||
// Remove a leading escaping backslash of the special characters, https://www.markdownguide.org/basic-syntax/#characters-you-can-escape
|
||||
const escapeCharRegex = /\\([\\`*_{}[\]<>()+#-.!|])/g;
|
||||
entry = entry.replace(escapeCharRegex, '$1');
|
||||
|
||||
const regex = /^(\s*)[-*+]\s+\[([^\]]+)\]\s+(.*)$/;
|
||||
const match = regex.exec(entry);
|
||||
if (!match) {
|
||||
return [entry_str, `${indent}- Please specify the [area] to which the change belongs (see guide). If this line is just a comment, remove the bullet.`].join('\n');
|
||||
}
|
||||
|
||||
// area is in match[2]
|
||||
const description = match[3].trim();
|
||||
let violations = [];
|
||||
|
||||
if (match[1]) {
|
||||
violations.push(`${indent}- Release note entry should start from the beginning of line. (Nested release note not allowed.)`);
|
||||
}
|
||||
|
||||
if (!/^[A-Z0-9]/.test(description)) {
|
||||
violations.push(`${indent}- Release note statement should start with a capital letter or digit.`);
|
||||
}
|
||||
|
||||
if (violations.length > 0) {
|
||||
return [entry_str, ...violations].join('\n');
|
||||
}
|
||||
return null;
|
||||
}
|
||||
@@ -1,280 +0,0 @@
|
||||
const { recordRuleExitStatus } = require("./configParameters.js");
|
||||
|
||||
/**
|
||||
* Check the documentation files in this MR.
|
||||
*
|
||||
* Generate an object with all docs/ files found in this MR with paths to their EN/CN versions.
|
||||
*
|
||||
* For common files (both language versions exist in this MR), compare the lines of both files.
|
||||
* Ignore if the CN file is only a single line file with an "include" reference to the EN version.
|
||||
*
|
||||
* For files that only have a CN version in this MR, add a message to the message that an EN file also needs to be created.
|
||||
*
|
||||
* For a file that only has an EN version in this MR, try loading its CN version from the target Gitlab branch.
|
||||
* If its CN version doesn't exist in the repository or it does exist,
|
||||
* but its contents are larger than just an "include" link to the EN version (it's a full-size file),
|
||||
* add a message to the report
|
||||
*
|
||||
* Create a compiled report with the docs/ files issues found and set its severity (WARN/INFO).
|
||||
* Severity is based on the presence of "needs translation: ??" labels in this MR
|
||||
*
|
||||
* @dangerjs WARN (if docs translation issues in the MR)
|
||||
* @dangerjs INFO (if docs translation issues in the MR and the user has already added translation labels).
|
||||
* Adding translation labels "needs translation: XX" automatically notifies the Documentation team
|
||||
*
|
||||
* @dangerjs WARN (if there are no docs issues in MR, but translation labels have been added anyway)
|
||||
*
|
||||
*/
|
||||
module.exports = async function () {
|
||||
const ruleName = 'Documentation translation';
|
||||
let partMessages = []; // Create a blank field for future records of individual issues
|
||||
const pathProject = "espressif/esp-idf";
|
||||
const regexIncludeLink = /\.\.\sinclude::\s((\.\.\/)+)en\//;
|
||||
const allMrFiles = [
|
||||
...danger.git.modified_files,
|
||||
...danger.git.created_files,
|
||||
...danger.git.deleted_files,
|
||||
];
|
||||
|
||||
const docsFilesMR = parseMrDocsFiles(allMrFiles); // Create single object of all doc files in MR with names, paths and groups
|
||||
|
||||
// Both versions (EN and CN) of document found changed in this MR
|
||||
for (const file of docsFilesMR.bothFilesInMr) {
|
||||
file.contentEn = await getContentFileInMR(file.fileEnPath); // Get content of English file
|
||||
file.linesEn = file.contentEn.split("\n").length; // Get number of lines of English file
|
||||
|
||||
file.contentCn = await getContentFileInMR(file.fileCnPath); // Get content of Chinese file
|
||||
file.linesCn = file.contentCn.split("\n").length; // Get number of lines of English file
|
||||
|
||||
// Compare number of lines in both versions
|
||||
if (file.linesEn !== file.linesCn) {
|
||||
// Check if CN file is only link to EN file
|
||||
if (!regexIncludeLink.test(file.contentCn)) {
|
||||
// if not just a link ...
|
||||
partMessages.push(
|
||||
`- please synchronize the EN and CN version of \`${file.fileName}\`. [\`${file.fileEnPath}\`](${file.fileUrlRepoEN}) has ${file.linesEn} lines; [\`${file.fileCnPath}\`](${file.fileUrlRepoCN}) has ${file.linesCn} lines.`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Only Chinese version of document found changed in this MR
|
||||
for (const file of docsFilesMR.onlyCnFilesInMr) {
|
||||
partMessages.push(
|
||||
`- file \`${file.fileEnPath}\` doesn't exist in this MR or in the GitLab repo. Please add \`${file.fileEnPath}\` into this MR.`
|
||||
);
|
||||
}
|
||||
|
||||
// Only English version of document found in this MR
|
||||
for (const file of docsFilesMR.onlyEnFilesInMr) {
|
||||
const targetBranch = danger.gitlab.mr.target_branch;
|
||||
file.contentCn = await getContentFileInGitlab(
|
||||
file.fileCnPath,
|
||||
targetBranch
|
||||
); // Try to fetch CN file from target branch of Gitlab repository and store content
|
||||
|
||||
if (file.contentCn) {
|
||||
// File found on target branch in Gitlab repository
|
||||
if (!regexIncludeLink.test(file.contentCn)) {
|
||||
// File on Gitlab master is NOT just an ..include:: link to ENG version
|
||||
file.fileUrlRepoMasterCN = `${process.env.DANGER_GITLAB_HOST}/${pathProject}/-/blob/${targetBranch}/${file.fileCnPath}`;
|
||||
partMessages.push(
|
||||
`- file \`${file.fileCnPath}\` was not updated in this MR, but found unchanged full document (not just link to EN) in target branch of Gitlab repository [\`${file.fileCnPath}\`](${file.fileUrlRepoMasterCN}). Please update \`${file.fileCnPath}\` into this MR.`
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// File failed to fetch, probably does not exist in the target branch
|
||||
partMessages.push(
|
||||
`- file \`${file.fileCnPath}\` probably doesn't exist in this MR or in the GitLab repo. Please add \`${file.fileCnPath}\` into this MR.`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Create a report with found issues with documents in MR
|
||||
createReport();
|
||||
|
||||
// At this point, the rule has passed
|
||||
recordRuleExitStatus(ruleName, 'Passed');
|
||||
|
||||
/**
|
||||
* Generates an object that represents the relationships between files in two different languages found in this MR.
|
||||
*
|
||||
* @param {string[]} docsFilesEN - An array of file paths for documents in English.
|
||||
* @param {string[]} docsFilesCN - An array of file paths for documents in Chinese.
|
||||
* @returns {Object} An object with the following properties:
|
||||
* - bothFilesInMr: An array of objects that represent files that found in MR in both languages. Each object has the following properties:
|
||||
* - fileName: The name of the file.
|
||||
* - fileEnPath: The path to the file in English.
|
||||
* - fileCnPath: The path to the file in Chinese.
|
||||
* - fileUrlRepoEN: The URL link to MR branch path to the file in English.
|
||||
* - fileUrlRepoCN: The URL link to MR branch path to the file in Chinese.
|
||||
* - onlyCnFilesInMr: An array of objects that represent files that only found in MR in English. Each object has the following properties:
|
||||
* - fileName: The name of the file.
|
||||
* - fileEnPath: The path to the file in English.
|
||||
* - fileCnPath: The FUTURE path to the file in Chinese.
|
||||
* - fileUrlRepoEN: The URL link to MR branch path to the file in English.
|
||||
* - fileUrlRepoCN: The URL link to MR branch path to the file in Chinese.
|
||||
* - onlyEnFilesInMr: An array of objects that represent files that only found in MR in Chinese. Each object has the following properties:
|
||||
* - fileName: The name of the file.
|
||||
* - fileEnPath: The FUTURE path to the file in English.
|
||||
* - fileCnPath: The path to the file in Chinese.
|
||||
* - fileUrlRepoEN: The URL link to MR branch path to the file in English.
|
||||
* - fileUrlRepoCN: The URL link to MR branch path to the file in Chinese.
|
||||
*/
|
||||
function parseMrDocsFiles(allMrFiles) {
|
||||
const path = require("path");
|
||||
const mrBranch = danger.gitlab.mr.source_branch;
|
||||
|
||||
const docsEnFilesMrPath = allMrFiles.filter((file) =>
|
||||
file.startsWith("docs/en")
|
||||
); // Filter all English doc files in MR
|
||||
const docsCnFilesMrPath = allMrFiles.filter((file) =>
|
||||
file.startsWith("docs/zh_CN")
|
||||
); // Filter all Chinese doc files in MR
|
||||
|
||||
const docsEnFileNames = docsEnFilesMrPath.map((filePath) =>
|
||||
path.basename(filePath)
|
||||
); // Get (base) file names for English docs
|
||||
const docsCnFileNames = docsCnFilesMrPath.map((filePath) =>
|
||||
path.basename(filePath)
|
||||
); // Get (base) file names for Chinese docs
|
||||
|
||||
const bothFileNames = docsEnFileNames.filter((fileName) =>
|
||||
docsCnFileNames.includes(fileName)
|
||||
); // Get file names that are common to both English and Chinese docs
|
||||
const onlyEnFileNames = docsEnFileNames.filter(
|
||||
(fileName) => !docsCnFileNames.includes(fileName)
|
||||
); // Get file names that are only present in English version
|
||||
const onlyCnFileNames = docsCnFileNames.filter(
|
||||
(fileName) => !docsEnFileNames.includes(fileName)
|
||||
); // Get file names that are only present in Chinese version
|
||||
|
||||
return {
|
||||
bothFilesInMr: bothFileNames.map((fileName) => {
|
||||
const fileEnPath =
|
||||
docsEnFilesMrPath[docsEnFileNames.indexOf(fileName)];
|
||||
const fileCnPath =
|
||||
docsCnFilesMrPath[docsCnFileNames.indexOf(fileName)];
|
||||
|
||||
return {
|
||||
fileName,
|
||||
fileEnPath,
|
||||
fileCnPath,
|
||||
fileUrlRepoEN: `${process.env.DANGER_GITLAB_HOST}/${pathProject}/-/blob/${mrBranch}/${fileEnPath}`,
|
||||
fileUrlRepoCN: `${process.env.DANGER_GITLAB_HOST}/${pathProject}/-/blob/${mrBranch}/${fileCnPath}`,
|
||||
};
|
||||
}),
|
||||
onlyEnFilesInMr: onlyEnFileNames.map((fileName) => {
|
||||
const fileEnPath =
|
||||
docsEnFilesMrPath[docsEnFileNames.indexOf(fileName)];
|
||||
const fileCnPath = fileEnPath.replace("en", "zh_CN"); // Generate future CN file path
|
||||
|
||||
return {
|
||||
fileName,
|
||||
fileEnPath,
|
||||
fileCnPath,
|
||||
fileUrlRepoEN: `${process.env.DANGER_GITLAB_HOST}/${pathProject}/-/blob/${mrBranch}/${fileEnPath}`,
|
||||
fileUrlRepoCN: `${process.env.DANGER_GITLAB_HOST}/${pathProject}/-/blob/${mrBranch}/${fileCnPath}`,
|
||||
};
|
||||
}),
|
||||
onlyCnFilesInMr: onlyCnFileNames.map((fileName) => {
|
||||
const fileCnPath =
|
||||
docsCnFilesMrPath[docsCnFileNames.indexOf(fileName)];
|
||||
const fileEnPath = fileCnPath.replace("zh_CN", "en"); // Generate future EN file path
|
||||
|
||||
return {
|
||||
fileName,
|
||||
fileEnPath,
|
||||
fileCnPath,
|
||||
fileUrlRepoEN: `${process.env.DANGER_GITLAB_HOST}/${pathProject}/-/blob/${mrBranch}/${fileEnPath}`,
|
||||
fileUrlRepoCN: `${process.env.DANGER_GITLAB_HOST}/${pathProject}/-/blob/${mrBranch}/${fileCnPath}`,
|
||||
};
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the contents of a file from GitLab using the GitLab API.
|
||||
*
|
||||
* @param {string} filePath - The path of the file to retrieve.
|
||||
* @param {string} branch - The branch where the file is located.
|
||||
* @returns {string|null} - The contents of the file, with any trailing new lines trimmed, or null if the file cannot be retrieved.
|
||||
*/
|
||||
async function getContentFileInGitlab(filePath, branch) {
|
||||
const axios = require("axios");
|
||||
|
||||
const encFilePath = encodeURIComponent(filePath);
|
||||
const encBranch = encodeURIComponent(branch);
|
||||
const urlApi = `${process.env.DANGER_GITLAB_API_BASE_URL}/projects/${danger.gitlab.mr.project_id}/repository/files/${encFilePath}/raw?ref=${encBranch}`;
|
||||
|
||||
try {
|
||||
const response = await axios.get(urlApi, {
|
||||
headers: {
|
||||
"Private-Token": process.env.DANGER_GITLAB_API_TOKEN,
|
||||
},
|
||||
});
|
||||
return response.data.trim(); // Trim trailing new line
|
||||
} catch (error) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the contents of a file in a DangerJS merge request object.
|
||||
*
|
||||
* @param {string} filePath - The path of the file to retrieve.
|
||||
* @returns {string|null} - The contents of the file, with any trailing new lines trimmed, or null if the file cannot be retrieved.
|
||||
*/
|
||||
async function getContentFileInMR(filePath) {
|
||||
try {
|
||||
const content = await danger.git.diffForFile(filePath);
|
||||
const fileContentAfter = content.after.trim(); // Trim trailing new lines
|
||||
return fileContentAfter;
|
||||
} catch (error) {
|
||||
console.error(`Error while getting file content MR: ${error}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a compiled report for found documentation issues in the current MR and alerts the Documentation team if there are any "needs translation" labels present.
|
||||
*
|
||||
* Report if documentation labels have been added by mistake.
|
||||
*/
|
||||
function createReport() {
|
||||
const mrLabels = danger.gitlab.mr.labels; // Get MR labels
|
||||
const regexTranslationLabel = /needs translation:/i;
|
||||
|
||||
const translationLabelsPresent = mrLabels.some((label) =>
|
||||
regexTranslationLabel.test(label)
|
||||
); // Check if any of MR labels are "needs translation: XX"
|
||||
|
||||
// No docs issues found in MR, but translation labels have been added anyway
|
||||
if (!partMessages.length && translationLabelsPresent) {
|
||||
recordRuleExitStatus(ruleName, "Failed");
|
||||
return warn(
|
||||
`Please remove the \`needs translation: XX\` labels. For documents that need to translate from scratch, Doc team will translate them in the future. For the current stage, we only focus on updating exiting EN and CN translation to make them in sync.`
|
||||
);
|
||||
}
|
||||
|
||||
// Docs issues found in this MR
|
||||
partMessages.sort();
|
||||
let dangerMessage = `Some of the documentation files in this MR seem to have translations issues:\n${partMessages.join(
|
||||
"\n"
|
||||
)}\n`;
|
||||
|
||||
if (partMessages.length) {
|
||||
if (!translationLabelsPresent) {
|
||||
dangerMessage += `
|
||||
\nWhen synchronizing the EN and CN versions, please follow the [Documentation Code](https://docs.espressif.com/projects/esp-idf/zh_CN/latest/esp32/contribute/documenting-code.html#standardize-document-format). The total number of lines of EN and CN should be same.\n
|
||||
\nIf you have difficulty in providing translation, you can contact Documentation team by adding <kbd>needs translation: CN</kbd> or <kbd>needs translation: EN</kbd> labels into this MR and retrying Danger CI job. The documentation team will be automatically notified and will help you with the translations before the merge.\n`;
|
||||
recordRuleExitStatus(ruleName, "Failed");
|
||||
return warn(dangerMessage); // no "needs translation: XX" labels in MR; report issues as warn
|
||||
} else {
|
||||
dangerMessage += `\nTranslation labels <kbd>needs translation: CN</kbd> or <kbd>needs translation: EN</kbd> were added - this will automatically notify the Documentation team to help you with translation issues.`;
|
||||
recordRuleExitStatus(ruleName, 'Passed (with suggestions)');
|
||||
return message(dangerMessage); // "needs translation: XX" labels were found in MR and Docs team was notified; report issues as info
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -1,22 +0,0 @@
|
||||
const { recordRuleExitStatus } = require("./configParameters.js");
|
||||
|
||||
/**
|
||||
* Check if MR is too large (more than 1000 lines of changes)
|
||||
*
|
||||
* @dangerjs INFO
|
||||
*/
|
||||
module.exports = async function () {
|
||||
const ruleName = "Merge request size (number of changed lines)";
|
||||
const bigMrLinesOfCodeThreshold = 1000;
|
||||
const totalLines = await danger.git.linesOfCode();
|
||||
|
||||
if (totalLines > bigMrLinesOfCodeThreshold) {
|
||||
recordRuleExitStatus(ruleName, "Passed (with suggestions)");
|
||||
return message(
|
||||
`This MR seems to be quite large (total lines of code: ${totalLines}), you might consider splitting it into smaller MRs`
|
||||
);
|
||||
}
|
||||
|
||||
// At this point, the rule has passed
|
||||
recordRuleExitStatus(ruleName, "Passed");
|
||||
};
|
||||
@@ -1,31 +0,0 @@
|
||||
const { recordRuleExitStatus } = require("./configParameters.js");
|
||||
|
||||
/**
|
||||
* Throw Danger WARN if branch name contains more than one slash or uppercase letters
|
||||
*
|
||||
* @dangerjs INFO
|
||||
*/
|
||||
module.exports = function () {
|
||||
const ruleName = "Source branch name";
|
||||
const sourceBranch = danger.gitlab.mr.source_branch;
|
||||
|
||||
// Check if the source branch name contains more than one slash
|
||||
const slashCount = (sourceBranch.match(/\//g) || []).length;
|
||||
if (slashCount > 1) {
|
||||
recordRuleExitStatus(ruleName, "Failed");
|
||||
return warn(
|
||||
`The source branch name \`${sourceBranch}\` contains more than one slash. This can cause troubles with git sync. Please rename the branch.`
|
||||
);
|
||||
}
|
||||
|
||||
// Check if the source branch name contains any uppercase letters
|
||||
if (sourceBranch !== sourceBranch.toLowerCase()) {
|
||||
recordRuleExitStatus(ruleName, "Failed");
|
||||
return warn(
|
||||
`The source branch name \`${sourceBranch}\` contains uppercase letters. This can cause troubles on case-insensitive file systems (macOS). Please use only lowercase letters.`
|
||||
);
|
||||
}
|
||||
|
||||
// At this point, the rule has passed
|
||||
recordRuleExitStatus(ruleName, "Passed");
|
||||
};
|
||||
@@ -1,31 +0,0 @@
|
||||
const { recordRuleExitStatus } = require("./configParameters.js");
|
||||
|
||||
/**
|
||||
* Check if MR Title contains prefix "WIP: ...".
|
||||
*
|
||||
* @dangerjs WARN
|
||||
*/
|
||||
module.exports = function () {
|
||||
const ruleName = 'Merge request not in Draft or WIP state';
|
||||
const mrTitle = danger.gitlab.mr.title;
|
||||
const regexes = [
|
||||
{ prefix: "WIP", regex: /^WIP:/i },
|
||||
{ prefix: "W.I.P", regex: /^W\.I\.P/i },
|
||||
{ prefix: "[WIP]", regex: /^\[WIP/i },
|
||||
{ prefix: "[W.I.P]", regex: /^\[W\.I\.P/i },
|
||||
{ prefix: "(WIP)", regex: /^\(WIP/i },
|
||||
{ prefix: "(W.I.P)", regex: /^\(W\.I\.P/i },
|
||||
];
|
||||
|
||||
for (const item of regexes) {
|
||||
if (item.regex.test(mrTitle)) {
|
||||
recordRuleExitStatus(ruleName, "Failed");
|
||||
return warn(
|
||||
`Please remove the \`${item.prefix}\` prefix from the MR name before merging this MR.`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// At this point, the rule has passed
|
||||
recordRuleExitStatus(ruleName, "Passed");
|
||||
};
|
||||
2745
.gitlab/dangerjs/package-lock.json
generated
2745
.gitlab/dangerjs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"name": "dangerjs-esp-idf",
|
||||
"description": "Merge request automatic linter",
|
||||
"main": "dangerfile.js",
|
||||
"dependencies": {
|
||||
"danger": "^11.2.3",
|
||||
"axios": "^1.3.3",
|
||||
"langchain": "^0.0.53",
|
||||
"openai-gpt-token-counter": "^1.0.3",
|
||||
"@commitlint/lint": "^13.1.0"
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
<!-- This Template states the absolute minimum for an MR.
|
||||
If you want to have a more elaborate template or know why we have this structure,
|
||||
please use the "Mixed Template" or consult the Wiki. -->
|
||||
<!-- Add the CI labels to trigger the appropriate tests (e.g. "unit_test_esp32") --><!-- Mandatory -->
|
||||
<!-- Make sure the commit message follows the Wiki about "Commit Messages" --><!-- Mandatory -->
|
||||
|
||||
<!-- Add description of the change here --><!-- Mandatory -->
|
||||
|
||||
## Related <!-- Optional -->
|
||||
<!-- Related Jira issues and Github issues or write "No related issues"-->
|
||||
|
||||
## Release notes <!-- Mandatory -->
|
||||
<!-- Either state release notes or write "No release notes" -->
|
||||
|
||||
<!-- ## Breaking change notes --><!-- Optional -->
|
||||
@@ -1,63 +0,0 @@
|
||||
_All texts in italics are instructional and should be replaced by contents or removed._
|
||||
|
||||
## Checklist
|
||||
|
||||
_This entire section can be deleted if all items are checked._
|
||||
|
||||
* [ ] Enough information to help reviewers understand the issue, its root cause, impact, and the proposed solution
|
||||
* [ ] Enough information to help reviewers understand the feature, its functional description, example, documentation, test cases, test results, feature TODO list
|
||||
* [ ] The MR Title describes the change, including the component name, ie "lwip: Add support for IP over Pigeon"
|
||||
* [ ] All related links, including JIRA, backport, submodule MR, are mentioned in the `Related` subsection.
|
||||
* [ ] Any GitHub issues are linked inside the git commit message and corresponding release notes
|
||||
* [ ] Add label for the area this MR is part of
|
||||
* [ ] For documentation updates, check if label `Docs` and `needs translation:CN` or `needs translation:EN` have been added when the other language version still needs the update. Skip adding the label if the document is not yet translated.
|
||||
* [ ] Check if documents requiring translation fall under get-started section. If yes, add the labels mentioned above. Then the documentation team will assign a translator for you. Please inform the translator to prepare translation once your MR is ready to merge. The translation should be included in your MR to get it merged. For more information, see documentation workflow in Wiki.
|
||||
* [ ] Any necessary "needs backport" labels are added
|
||||
* [ ] Check if this is a breaking change. If it is, add notes to the `Breaking change notes` subsection below
|
||||
* [ ] Release note entry if this is a new public feature, or a fix for an issue introduced in the previous release.
|
||||
* [ ] The commit log is clean and ready to merge.
|
||||
* [ ] All relevant CI jobs have been run, i.e. jobs which cover the code changed by the MR.
|
||||
|
||||
---
|
||||
|
||||
_For issues, put enough information here to help reviewers understand the issue, its root cause, impact, and the proposed solution._
|
||||
|
||||
_If this issue is a regression, specify in the `Related` subsection below, in which commit or MR it was introduced. This helps reviewers to check if the backport labels are set correctly._
|
||||
|
||||
---
|
||||
|
||||
_For features, put enough information here to help reviewers understand the feature, its functional description, example, documentation, test cases, test results, feature TODO list._
|
||||
|
||||
---
|
||||
|
||||
_For other small/non-public changes, which are not expected to be in the release notes, can be mentioned here, include:_
|
||||
|
||||
* changes that don't apply to customers, e.g. some CI fixes, test only MR and etc
|
||||
|
||||
## Related
|
||||
|
||||
* Mention or related JIRA tasks (e.g. IDF-0000), to make sure they get updated.
|
||||
* Mention submodule MR, if there is
|
||||
* Mention backport(ed) MR, if there is
|
||||
|
||||
_Don't touch the subsection titles below, they will be parsed by scripts._
|
||||
|
||||
## Release notes (Mandatory)
|
||||
|
||||
_Changes made in this MR that should go into the **Release Notes** should be listed here. Please use **past tense** and *specify the area (see maintainers page of IDF internal wiki)*. If there is a subscope, include it and separate with slash (`/`). Minor changes can go to the descriptions above without a release notes entry._
|
||||
|
||||
_Write all the changes in a **list** (Start at the beginning of the line with `-` or `*`). If multiple changes are made, each of them should take a single line. If there is only one change to list, it should still be the only line of a list. If this MR does not need any release notes, write "No release notes" here without the `-` or `*`. e.g._
|
||||
|
||||
* [WiFi] Changed/fixed/updated xxx
|
||||
* [WiFi] Added support of xxx
|
||||
* [Peripheral Drivers/I2S] Fixed xxx (https://github.com/espressif/esp-idf/issues/xxxx)
|
||||
|
||||
## Breaking change notes
|
||||
|
||||
_Remove this subsection if not used._
|
||||
|
||||
_If there are any breaking changes, please mention it here. Talking about (1) what is not accepted any more, (2) the alternative solution and (3) the benefits/reason. e.g._
|
||||
|
||||
_Please strictly follow the breaking change restriction, which means, if there is a breaking change but you are merging to non-major versions, you have to separate the breaking part out to another MR for a major version. The breaking change subsection is only accepted in MRs merging to major versions._
|
||||
|
||||
* [VFS/UART] Now vfs_uart_set_rts_cts accept one more instance argument, to support configuration to different ports.
|
||||
168
.gitmodules
vendored
168
.gitmodules
vendored
@@ -1,145 +1,39 @@
|
||||
#
|
||||
# All the relative URL paths are intended to be GitHub ones
|
||||
# For Espressif's public projects please use '../../espressif/proj', not a '../proj'
|
||||
#
|
||||
# Submodules SBOM information
|
||||
# ---------------------------
|
||||
# Submodules, which are used directly and not forked into espressif namespace should
|
||||
# contain SBOM information here. Other submodules should have the SBOM manifest file
|
||||
# included in the root of their project's repository.
|
||||
#
|
||||
# The sbom-hash entry records the submodule's checkout SHA as presented in git-tree
|
||||
# commit object. For example spiffs submodule
|
||||
#
|
||||
# $ git ls-tree HEAD components/spiffs/spiffs
|
||||
# 160000 commit 0dbb3f71c5f6fae3747a9d935372773762baf852 components/spiffs/spiffs
|
||||
#
|
||||
# The hash can be also obtained with git submodule command
|
||||
#
|
||||
# $ git submodule status components/spiffs/spiffs
|
||||
# 0dbb3f71c5f6fae3747a9d935372773762baf852 components/spiffs/spiffs (0.2-255-g0dbb3f71c5f6)
|
||||
#
|
||||
# The submodule SHA recorded here has to match with SHA, which is presented in git-tree.
|
||||
# This is checked by CI. Also please don't forget to update the submodule version
|
||||
# if you are changing the sbom-hash. This is important for SBOM generation.
|
||||
[submodule "components/esp32/lib"]
|
||||
path = components/esp32/lib
|
||||
url = https://github.com/espressif/esp32-wifi-lib.git
|
||||
|
||||
[submodule "components/bt/controller/lib_esp32"]
|
||||
path = components/bt/controller/lib_esp32
|
||||
url = ../../espressif/esp32-bt-lib.git
|
||||
[submodule "components/esptool_py/esptool"]
|
||||
path = components/esptool_py/esptool
|
||||
url = https://github.com/espressif/esptool.git
|
||||
|
||||
[submodule "components/bootloader/subproject/components/micro-ecc/micro-ecc"]
|
||||
path = components/bootloader/subproject/components/micro-ecc/micro-ecc
|
||||
url = ../../kmackay/micro-ecc.git
|
||||
sbom-version = 1.1
|
||||
sbom-cpe = cpe:2.3:a:micro-ecc_project:micro-ecc:{}:*:*:*:*:*:*:*
|
||||
sbom-supplier = Person: Ken MacKay
|
||||
sbom-url = https://github.com/kmackay/micro-ecc
|
||||
sbom-description = A small and fast ECDH and ECDSA implementation for 8-bit, 32-bit, and 64-bit processors
|
||||
sbom-hash = 24c60e243580c7868f4334a1ba3123481fe1aa48
|
||||
[submodule "components/bt/lib"]
|
||||
path = components/bt/lib
|
||||
url = https://github.com/espressif/esp32-bt-lib.git
|
||||
|
||||
[submodule "components/micro-ecc/micro-ecc"]
|
||||
path = components/micro-ecc/micro-ecc
|
||||
url = https://github.com/kmackay/micro-ecc.git
|
||||
|
||||
[submodule "components/coap/libcoap"]
|
||||
path = components/coap/libcoap
|
||||
url = https://github.com/obgm/libcoap.git
|
||||
|
||||
[submodule "components/aws_iot/aws-iot-device-sdk-embedded-C"]
|
||||
path = components/aws_iot/aws-iot-device-sdk-embedded-C
|
||||
url = https://github.com/espressif/aws-iot-device-sdk-embedded-C.git
|
||||
|
||||
[submodule "components/nghttp/nghttp2"]
|
||||
path = components/nghttp/nghttp2
|
||||
url = https://github.com/nghttp2/nghttp2.git
|
||||
|
||||
[submodule "components/libsodium/libsodium"]
|
||||
path = components/libsodium/libsodium
|
||||
url = https://github.com/jedisct1/libsodium.git
|
||||
|
||||
[submodule "components/spiffs/spiffs"]
|
||||
path = components/spiffs/spiffs
|
||||
url = ../../pellepl/spiffs.git
|
||||
sbom-version = 0.2-255-g0dbb3f71c5f6
|
||||
sbom-supplier = Person: Peter Andersson
|
||||
sbom-url = https://github.com/pellepl/spiffs
|
||||
sbom-description = Wear-leveled SPI flash file system for embedded devices
|
||||
sbom-hash = 0dbb3f71c5f6fae3747a9d935372773762baf852
|
||||
url = https://github.com/pellepl/spiffs.git
|
||||
|
||||
[submodule "components/json/cJSON"]
|
||||
path = components/json/cJSON
|
||||
url = ../../DaveGamble/cJSON.git
|
||||
sbom-version = 1.7.17
|
||||
sbom-cpe = cpe:2.3:a:cjson_project:cjson:{}:*:*:*:*:*:*:*
|
||||
sbom-supplier = Person: Dave Gamble
|
||||
sbom-url = https://github.com/DaveGamble/cJSON
|
||||
sbom-description = Ultralightweight JSON parser in ANSI C
|
||||
sbom-hash = 87d8f0961a01bf09bef98ff89bae9fdec42181ee
|
||||
|
||||
[submodule "components/mbedtls/mbedtls"]
|
||||
path = components/mbedtls/mbedtls
|
||||
url = ../../espressif/mbedtls.git
|
||||
|
||||
[submodule "components/lwip/lwip"]
|
||||
path = components/lwip/lwip
|
||||
url = ../../espressif/esp-lwip.git
|
||||
|
||||
[submodule "components/mqtt/esp-mqtt"]
|
||||
path = components/mqtt/esp-mqtt
|
||||
url = ../../espressif/esp-mqtt.git
|
||||
|
||||
[submodule "components/protobuf-c/protobuf-c"]
|
||||
path = components/protobuf-c/protobuf-c
|
||||
url = ../../protobuf-c/protobuf-c.git
|
||||
sbom-version = 1.4.1
|
||||
sbom-cpe = cpe:2.3:a:protobuf-c_project:protobuf-c:{}:*:*:*:*:*:*:*
|
||||
sbom-supplier = Organization: protobuf-c community <https://groups.google.com/g/protobuf-c>
|
||||
sbom-url = https://github.com/protobuf-c/protobuf-c
|
||||
sbom-description = Protocol Buffers implementation in C
|
||||
sbom-hash = abc67a11c6db271bedbb9f58be85d6f4e2ea8389
|
||||
|
||||
[submodule "components/unity/unity"]
|
||||
path = components/unity/unity
|
||||
url = ../../ThrowTheSwitch/Unity.git
|
||||
sbom-version = v2.4.3-51-g7d2bf62b7e6a
|
||||
sbom-supplier = Organization: ThrowTheSwitch community <http://www.throwtheswitch.org>
|
||||
sbom-url = https://github.com/ThrowTheSwitch/Unity
|
||||
sbom-description = Simple Unit Testing for C
|
||||
sbom-hash = 7d2bf62b7e6afaf38153041a9d53c21aeeca9a25
|
||||
|
||||
[submodule "components/bt/host/nimble/nimble"]
|
||||
path = components/bt/host/nimble/nimble
|
||||
url = ../../espressif/esp-nimble.git
|
||||
|
||||
[submodule "components/esp_wifi/lib"]
|
||||
path = components/esp_wifi/lib
|
||||
url = ../../espressif/esp32-wifi-lib.git
|
||||
|
||||
[submodule "components/cmock/CMock"]
|
||||
path = components/cmock/CMock
|
||||
url = ../../ThrowTheSwitch/CMock.git
|
||||
sbom-version = v2.5.2-2-geeecc49ce8af
|
||||
sbom-supplier = Organization: ThrowTheSwitch community <http://www.throwtheswitch.org>
|
||||
sbom-url = https://github.com/ThrowTheSwitch/CMock
|
||||
sbom-description = CMock - Mock/stub generator for C
|
||||
sbom-hash = eeecc49ce8af123cf8ad40efdb9673e37b56230f
|
||||
|
||||
[submodule "components/openthread/openthread"]
|
||||
path = components/openthread/openthread
|
||||
url = ../../espressif/openthread.git
|
||||
|
||||
[submodule "components/bt/controller/lib_esp32c3_family"]
|
||||
path = components/bt/controller/lib_esp32c3_family
|
||||
url = ../../espressif/esp32c3-bt-lib.git
|
||||
|
||||
[submodule "components/esp_phy/lib"]
|
||||
path = components/esp_phy/lib
|
||||
url = ../../espressif/esp-phy-lib.git
|
||||
|
||||
[submodule "components/openthread/lib"]
|
||||
path = components/openthread/lib
|
||||
url = ../../espressif/esp-thread-lib.git
|
||||
|
||||
[submodule "components/bt/controller/lib_esp32h2/esp32h2-bt-lib"]
|
||||
path = components/bt/controller/lib_esp32h2/esp32h2-bt-lib
|
||||
url = ../../espressif/esp32h2-bt-lib.git
|
||||
|
||||
[submodule "components/bt/controller/lib_esp32c2/esp32c2-bt-lib"]
|
||||
path = components/bt/controller/lib_esp32c2/esp32c2-bt-lib
|
||||
url = ../../espressif/esp32c2-bt-lib.git
|
||||
|
||||
[submodule "components/bt/controller/lib_esp32c6/esp32c6-bt-lib"]
|
||||
path = components/bt/controller/lib_esp32c6/esp32c6-bt-lib
|
||||
url = ../../espressif/esp32c6-bt-lib.git
|
||||
|
||||
[submodule "components/heap/tlsf"]
|
||||
path = components/heap/tlsf
|
||||
url = ../../espressif/tlsf.git
|
||||
|
||||
[submodule "components/esp_coex/lib"]
|
||||
path = components/esp_coex/lib
|
||||
url = ../../espressif/esp-coex-lib.git
|
||||
|
||||
[submodule "components/bt/esp_ble_mesh/lib/lib"]
|
||||
path = components/bt/esp_ble_mesh/lib/lib
|
||||
url = ../../espressif/esp-ble-mesh-lib.git
|
||||
url = https://github.com/DaveGamble/cJSON.git
|
||||
|
||||
26
.mypy.ini
26
.mypy.ini
@@ -1,26 +0,0 @@
|
||||
[mypy]
|
||||
|
||||
# Specifies the Python version used to parse and check the target program
|
||||
python_version = 3.9
|
||||
|
||||
# Disallows defining functions without type annotations or with incomplete type annotations
|
||||
# True => enforce type annotation in all function definitions
|
||||
disallow_untyped_defs = True
|
||||
|
||||
# Shows a warning when returning a value with type Any from a function declared with a non- Any return type
|
||||
warn_return_any = True
|
||||
|
||||
# Shows errors for missing return statements on some execution paths
|
||||
warn_no_return = True
|
||||
|
||||
# Suppress error messages about imports that cannot be resolved
|
||||
# True => ignore all import errors
|
||||
ignore_missing_imports = True
|
||||
|
||||
# Disallows defining functions with incomplete type annotations
|
||||
disallow_incomplete_defs = False
|
||||
|
||||
# Directs what to do with imports when the imported module is found as a .py file and not part of the files,
|
||||
# modules and packages provided on the command line.
|
||||
# SKIP -> mypy checks only single file, not included imports
|
||||
follow_imports = skip
|
||||
@@ -1,225 +0,0 @@
|
||||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
|
||||
default_stages: [commit]
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.5.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
# note: whitespace exclusions use multiline regex, see https://pre-commit.com/#regular-expressions
|
||||
# items are:
|
||||
# 1 - some file extensions
|
||||
# 2 - any file matching *test*/*expected* (for host tests, if possible use this naming pattern always)
|
||||
# 3 - any directory named 'testdata'
|
||||
# 4 - protobuf auto-generated files
|
||||
exclude: &whitespace_excludes |
|
||||
(?x)^(
|
||||
.+\.(md|rst|map|bin)|
|
||||
.+test.*\/.*expected.*|
|
||||
.+\/testdata\/.+|
|
||||
.*_pb2.py|
|
||||
.*.pb-c.h|
|
||||
.*.pb-c.c|
|
||||
.*.yuv
|
||||
)$
|
||||
- id: end-of-file-fixer
|
||||
exclude: *whitespace_excludes
|
||||
- id: check-executables-have-shebangs
|
||||
- id: mixed-line-ending
|
||||
args: ['-f=lf']
|
||||
- id: double-quote-string-fixer
|
||||
- id: no-commit-to-branch
|
||||
name: Do not use more than one slash in the branch name
|
||||
args: ['--pattern', '^[^/]*/[^/]*/']
|
||||
- id: no-commit-to-branch
|
||||
name: Do not use uppercase letters in the branch name
|
||||
args: ['--pattern', '^[^A-Z]*[A-Z]']
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: 5.0.4
|
||||
hooks:
|
||||
- id: flake8
|
||||
args: ['--config=.flake8', '--tee', '--benchmark']
|
||||
- repo: https://github.com/asottile/reorder-python-imports
|
||||
rev: v3.12.0
|
||||
hooks:
|
||||
- id: reorder-python-imports
|
||||
name: Reorder Python imports
|
||||
args: [--py38-plus]
|
||||
exclude: >
|
||||
(?x)^(
|
||||
.*_pb2.py
|
||||
)$
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: check-executables
|
||||
name: Check File Permissions
|
||||
entry: tools/ci/check_executables.py --action executables
|
||||
language: python
|
||||
types: [executable]
|
||||
exclude: '\.pre-commit/.+'
|
||||
- id: check-executable-list
|
||||
name: Validate executable-list.txt
|
||||
entry: tools/ci/check_executables.py --action list
|
||||
language: python
|
||||
pass_filenames: false
|
||||
always_run: true
|
||||
- id: check-kconfigs
|
||||
name: Validate Kconfig files
|
||||
entry: tools/ci/check_kconfigs.py
|
||||
language: python
|
||||
additional_dependencies:
|
||||
- esp-idf-kconfig>=1.4.2,<2.0.0
|
||||
files: '^Kconfig$|Kconfig.*$'
|
||||
- id: check-deprecated-kconfigs-options
|
||||
name: Check if any Kconfig Options Deprecated
|
||||
entry: tools/ci/check_deprecated_kconfigs.py
|
||||
language: python
|
||||
files: 'sdkconfig\.ci$|sdkconfig\.rename$|sdkconfig.*$'
|
||||
- id: cmake-lint
|
||||
name: Check CMake Files Format
|
||||
entry: cmakelint --linelength=120 --spaces=4 --filter=-whitespace/indent
|
||||
language: python
|
||||
additional_dependencies:
|
||||
- cmakelint==1.4.1
|
||||
files: 'CMakeLists.txt$|\.cmake$'
|
||||
exclude: '\/third_party\/'
|
||||
- id: check-codeowners
|
||||
name: Validate Codeowner File
|
||||
entry: tools/ci/check_codeowners.py ci-check
|
||||
language: python
|
||||
always_run: true
|
||||
files: '\.gitlab/CODEOWNERS'
|
||||
pass_filenames: false
|
||||
- id: check-rules-yml
|
||||
name: Check rules.yml all rules have at lease one job applied, all rules needed exist
|
||||
entry: tools/ci/check_rules_yml.py
|
||||
language: python
|
||||
files: '\.gitlab/ci/.+\.yml|\.gitlab-ci.yml|\.gitmodules'
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- PyYAML == 5.3.1
|
||||
- id: check-generated-rules
|
||||
name: Check rules are generated (based on .gitlab/ci/dependencies/dependencies.yml)
|
||||
entry: tools/ci/generate_rules.py
|
||||
language: python
|
||||
files: '\.gitlab/ci/dependencies/.+|\.gitlab/ci/.*\.yml'
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- PyYAML == 5.3.1
|
||||
- id: mypy-check
|
||||
name: Check type annotations in python files
|
||||
entry: tools/ci/check_type_comments.py
|
||||
additional_dependencies:
|
||||
- 'mypy==0.940'
|
||||
- 'mypy-extensions==0.4.3'
|
||||
- 'types-setuptools==57.4.14'
|
||||
- 'types-PyYAML==0.1.9'
|
||||
exclude: >
|
||||
(?x)^(
|
||||
.*_pb2.py
|
||||
)$
|
||||
language: python
|
||||
types: [python]
|
||||
- id: check-requirement-files
|
||||
name: Check requirement files
|
||||
entry: tools/ci/check_requirement_files.py
|
||||
additional_dependencies:
|
||||
- 'jsonschema'
|
||||
language: python
|
||||
files: 'tools/requirements.+|tools/requirements/.+'
|
||||
pass_filenames: false
|
||||
- id: check-tools-files-patterns
|
||||
name: Check tools dir files patterns
|
||||
entry: tools/ci/check_tools_files_patterns.py
|
||||
language: python
|
||||
files: '^tools/.+'
|
||||
additional_dependencies:
|
||||
- PyYAML == 5.3.1
|
||||
pass_filenames: false
|
||||
- id: check-rules-components-patterns
|
||||
name: check patterns-build_components in rules.yml
|
||||
entry: tools/ci/check_rules_components_patterns.py
|
||||
language: python
|
||||
files: 'components/.+|.gitlab/ci/rules.yml'
|
||||
additional_dependencies:
|
||||
- PyYAML == 5.3.1
|
||||
pass_filenames: false
|
||||
- id: check-generated-soc-caps-kconfig
|
||||
name: Check soc caps kconfig files are generated (based on components/soc/IDF_TARGET/include/soc/soc_caps.h)
|
||||
entry: tools/gen_soc_caps_kconfig/gen_soc_caps_kconfig.py -d 'components/soc/*/include/soc/' 'components/esp_rom/*/' 'components/spi_flash/*/'
|
||||
language: python
|
||||
files: 'components/soc/.+/include/soc/.+_caps\.h|components/esp_rom/.+/.+_caps\.h|kconfig\.soc_caps.in|components/spi_flash/.+/.+_caps\.h'
|
||||
pass_filenames: false
|
||||
additional_dependencies:
|
||||
- pyparsing
|
||||
- id: check-all-apps-readmes
|
||||
name: Check if all apps readme files match given .build-test-rules.yml files. Modify the supported target tables
|
||||
entry: tools/ci/check_build_test_rules.py check-readmes
|
||||
language: python
|
||||
files: 'tools/test_apps/.+|examples/.+|components/.+|tools/idf_py_actions/constants.py|tools/ci/check_build_test_rules.py'
|
||||
require_serial: true
|
||||
additional_dependencies:
|
||||
- PyYAML == 5.3.1
|
||||
- idf_build_apps~=1.0
|
||||
- id: sort-build-test-rules-ymls
|
||||
name: sort .build-test-rules.yml files
|
||||
entry: tools/ci/check_build_test_rules.py sort-yaml
|
||||
language: python
|
||||
files: '\.build-test-rules\.yml'
|
||||
additional_dependencies:
|
||||
- PyYAML == 5.3.1
|
||||
- ruamel.yaml
|
||||
- id: check-build-test-rules-path-exists
|
||||
name: check path in .build-test-rules.yml exists
|
||||
entry: tools/ci/check_build_test_rules.py check-exist
|
||||
language: python
|
||||
additional_dependencies:
|
||||
- PyYAML == 5.3.1
|
||||
always_run: true
|
||||
pass_filenames: false
|
||||
require_serial: true
|
||||
- id: cleanup-ignore-lists
|
||||
name: Remove non-existing patterns from ignore lists
|
||||
entry: tools/ci/cleanup_ignore_lists.py
|
||||
language: python
|
||||
always_run: true
|
||||
require_serial: true
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.0.1
|
||||
hooks:
|
||||
- id: file-contents-sorter
|
||||
files: 'tools\/ci\/(executable-list\.txt|mypy_ignore_list\.txt|check_copyright_ignore\.txt)'
|
||||
- repo: https://github.com/espressif/check-copyright/
|
||||
rev: v1.0.3
|
||||
hooks:
|
||||
- id: check-copyright
|
||||
args: ['--ignore', 'tools/ci/check_copyright_ignore.txt', '--config', 'tools/ci/check_copyright_config.yaml']
|
||||
- repo: https://github.com/espressif/conventional-precommit-linter
|
||||
rev: v1.2.1
|
||||
hooks:
|
||||
- id: conventional-precommit-linter
|
||||
stages: [commit-msg]
|
||||
- repo: https://github.com/espressif/astyle_py.git
|
||||
rev: v1.0.5
|
||||
hooks:
|
||||
- id: astyle_py
|
||||
# If you are modifying astyle version, update tools/format.sh as well
|
||||
args: ['--astyle-version=3.4.7', '--rules=tools/ci/astyle-rules.yml']
|
||||
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||
rev: v0.9.0.5
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
name: shellcheck bash
|
||||
args: ['--shell', 'bash', '-x']
|
||||
files: 'install.sh|export.sh'
|
||||
- id: shellcheck
|
||||
name: shellcheck dash (export.sh)
|
||||
args: ['--shell', 'dash', '-x']
|
||||
files: 'export.sh'
|
||||
- repo: https://github.com/espressif/esp-idf-sbom.git
|
||||
rev: v0.13.0
|
||||
hooks:
|
||||
- id: validate-sbom-manifest
|
||||
stages: [post-commit]
|
||||
641
.pylintrc
641
.pylintrc
@@ -1,641 +0,0 @@
|
||||
[MAIN]
|
||||
|
||||
# Analyse import fallback blocks. This can be used to support both Python 2 and
|
||||
# 3 compatible code, which means that the block might have code that exists
|
||||
# only in one or another interpreter, leading to false positives when analysed.
|
||||
analyse-fallback-blocks=no
|
||||
|
||||
# Clear in-memory caches upon conclusion of linting. Useful if running pylint
|
||||
# in a server-like mode.
|
||||
clear-cache-post-run=no
|
||||
|
||||
# Load and enable all available extensions. Use --list-extensions to see a list
|
||||
# all available extensions.
|
||||
#enable-all-extensions=
|
||||
|
||||
# In error mode, messages with a category besides ERROR or FATAL are
|
||||
# suppressed, and no reports are done by default. Error mode is compatible with
|
||||
# disabling specific errors.
|
||||
#errors-only=
|
||||
|
||||
# Always return a 0 (non-error) status code, even if lint errors are found.
|
||||
# This is primarily useful in continuous integration scripts.
|
||||
#exit-zero=
|
||||
|
||||
# A comma-separated list of package or module names from where C extensions may
|
||||
# be loaded. Extensions are loading into the active Python interpreter and may
|
||||
# run arbitrary code.
|
||||
extension-pkg-allow-list=
|
||||
|
||||
# A comma-separated list of package or module names from where C extensions may
|
||||
# be loaded. Extensions are loading into the active Python interpreter and may
|
||||
# run arbitrary code. (This is an alternative name to extension-pkg-allow-list
|
||||
# for backward compatibility.)
|
||||
extension-pkg-whitelist=
|
||||
|
||||
# Return non-zero exit code if any of these messages/categories are detected,
|
||||
# even if score is above --fail-under value. Syntax same as enable. Messages
|
||||
# specified are enabled, while categories only check already-enabled messages.
|
||||
fail-on=
|
||||
|
||||
# Specify a score threshold under which the program will exit with error.
|
||||
fail-under=10
|
||||
|
||||
# Interpret the stdin as a python script, whose filename needs to be passed as
|
||||
# the module_or_package argument.
|
||||
#from-stdin=
|
||||
|
||||
# Files or directories to be skipped. They should be base names, not paths.
|
||||
ignore=CVS
|
||||
|
||||
# Add files or directories matching the regular expressions patterns to the
|
||||
# ignore-list. The regex matches against paths and can be in Posix or Windows
|
||||
# format. Because '\\' represents the directory delimiter on Windows systems,
|
||||
# it can't be used as an escape character.
|
||||
ignore-paths=
|
||||
|
||||
# Files or directories matching the regular expression patterns are skipped.
|
||||
# The regex matches against base names, not paths. The default value ignores
|
||||
# Emacs file locks
|
||||
ignore-patterns=^\.#
|
||||
|
||||
# List of module names for which member attributes should not be checked
|
||||
# (useful for modules/projects where namespaces are manipulated during runtime
|
||||
# and thus existing member attributes cannot be deduced by static analysis). It
|
||||
# supports qualified module names, as well as Unix pattern matching.
|
||||
ignored-modules=
|
||||
|
||||
# Python code to execute, usually for sys.path manipulation such as
|
||||
# pygtk.require().
|
||||
#init-hook=
|
||||
|
||||
# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
|
||||
# number of processors available to use, and will cap the count on Windows to
|
||||
# avoid hangs.
|
||||
jobs=1
|
||||
|
||||
# Control the amount of potential inferred values when inferring a single
|
||||
# object. This can help the performance when dealing with large functions or
|
||||
# complex, nested conditions.
|
||||
limit-inference-results=100
|
||||
|
||||
# List of plugins (as comma separated values of python module names) to load,
|
||||
# usually to register additional checkers.
|
||||
load-plugins=
|
||||
|
||||
# Pickle collected data for later comparisons.
|
||||
persistent=yes
|
||||
|
||||
# Minimum Python version to use for version dependent checks. Will default to
|
||||
# the version used to run pylint.
|
||||
py-version=3.8
|
||||
|
||||
# Discover python modules and packages in the file system subtree.
|
||||
recursive=no
|
||||
|
||||
# Add paths to the list of the source roots. Supports globbing patterns. The
|
||||
# source root is an absolute path or a path relative to the current working
|
||||
# directory used to determine a package namespace for modules located under the
|
||||
# source root.
|
||||
source-roots=
|
||||
|
||||
# When enabled, pylint would attempt to guess common misconfiguration and emit
|
||||
# user-friendly hints instead of false-positive error messages.
|
||||
suggestion-mode=yes
|
||||
|
||||
# Allow loading of arbitrary C extensions. Extensions are imported into the
|
||||
# active Python interpreter and may run arbitrary code.
|
||||
unsafe-load-any-extension=no
|
||||
|
||||
# In verbose mode, extra non-checker-related info will be displayed.
|
||||
#verbose=
|
||||
|
||||
|
||||
[BASIC]
|
||||
|
||||
# Naming style matching correct argument names.
|
||||
argument-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct argument names. Overrides argument-
|
||||
# naming-style. If left empty, argument names will be checked with the set
|
||||
# naming style.
|
||||
#argument-rgx=
|
||||
|
||||
# Naming style matching correct attribute names.
|
||||
attr-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct attribute names. Overrides attr-naming-
|
||||
# style. If left empty, attribute names will be checked with the set naming
|
||||
# style.
|
||||
#attr-rgx=
|
||||
|
||||
# Bad variable names which should always be refused, separated by a comma.
|
||||
bad-names=foo,
|
||||
bar,
|
||||
baz,
|
||||
toto,
|
||||
tutu,
|
||||
tata
|
||||
|
||||
# Bad variable names regexes, separated by a comma. If names match any regex,
|
||||
# they will always be refused
|
||||
bad-names-rgxs=
|
||||
|
||||
# Naming style matching correct class attribute names.
|
||||
class-attribute-naming-style=any
|
||||
|
||||
# Regular expression matching correct class attribute names. Overrides class-
|
||||
# attribute-naming-style. If left empty, class attribute names will be checked
|
||||
# with the set naming style.
|
||||
#class-attribute-rgx=
|
||||
|
||||
# Naming style matching correct class constant names.
|
||||
class-const-naming-style=UPPER_CASE
|
||||
|
||||
# Regular expression matching correct class constant names. Overrides class-
|
||||
# const-naming-style. If left empty, class constant names will be checked with
|
||||
# the set naming style.
|
||||
#class-const-rgx=
|
||||
|
||||
# Naming style matching correct class names.
|
||||
class-naming-style=PascalCase
|
||||
|
||||
# Regular expression matching correct class names. Overrides class-naming-
|
||||
# style. If left empty, class names will be checked with the set naming style.
|
||||
#class-rgx=
|
||||
|
||||
# Naming style matching correct constant names.
|
||||
const-naming-style=UPPER_CASE
|
||||
|
||||
# Regular expression matching correct constant names. Overrides const-naming-
|
||||
# style. If left empty, constant names will be checked with the set naming
|
||||
# style.
|
||||
#const-rgx=
|
||||
|
||||
# Minimum line length for functions/classes that require docstrings, shorter
|
||||
# ones are exempt.
|
||||
docstring-min-length=-1
|
||||
|
||||
# Naming style matching correct function names.
|
||||
function-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct function names. Overrides function-
|
||||
# naming-style. If left empty, function names will be checked with the set
|
||||
# naming style.
|
||||
#function-rgx=
|
||||
|
||||
# Good variable names which should always be accepted, separated by a comma.
|
||||
good-names=i,
|
||||
j,
|
||||
k,
|
||||
ex,
|
||||
Run,
|
||||
_
|
||||
|
||||
# Good variable names regexes, separated by a comma. If names match any regex,
|
||||
# they will always be accepted
|
||||
good-names-rgxs=
|
||||
|
||||
# Include a hint for the correct naming format with invalid-name.
|
||||
include-naming-hint=no
|
||||
|
||||
# Naming style matching correct inline iteration names.
|
||||
inlinevar-naming-style=any
|
||||
|
||||
# Regular expression matching correct inline iteration names. Overrides
|
||||
# inlinevar-naming-style. If left empty, inline iteration names will be checked
|
||||
# with the set naming style.
|
||||
#inlinevar-rgx=
|
||||
|
||||
# Naming style matching correct method names.
|
||||
method-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct method names. Overrides method-naming-
|
||||
# style. If left empty, method names will be checked with the set naming style.
|
||||
#method-rgx=
|
||||
|
||||
# Naming style matching correct module names.
|
||||
module-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct module names. Overrides module-naming-
|
||||
# style. If left empty, module names will be checked with the set naming style.
|
||||
#module-rgx=
|
||||
|
||||
# Colon-delimited sets of names that determine each other's naming style when
|
||||
# the name regexes allow several styles.
|
||||
name-group=
|
||||
|
||||
# Regular expression which should only match function or class names that do
|
||||
# not require a docstring.
|
||||
no-docstring-rgx=^_
|
||||
|
||||
# List of decorators that produce properties, such as abc.abstractproperty. Add
|
||||
# to this list to register other decorators that produce valid properties.
|
||||
# These decorators are taken in consideration only for invalid-name.
|
||||
property-classes=abc.abstractproperty
|
||||
|
||||
# Regular expression matching correct type alias names. If left empty, type
|
||||
# alias names will be checked with the set naming style.
|
||||
#typealias-rgx=
|
||||
|
||||
# Regular expression matching correct type variable names. If left empty, type
|
||||
# variable names will be checked with the set naming style.
|
||||
#typevar-rgx=
|
||||
|
||||
# Naming style matching correct variable names.
|
||||
variable-naming-style=snake_case
|
||||
|
||||
# Regular expression matching correct variable names. Overrides variable-
|
||||
# naming-style. If left empty, variable names will be checked with the set
|
||||
# naming style.
|
||||
#variable-rgx=
|
||||
|
||||
|
||||
[CLASSES]
|
||||
|
||||
# Warn about protected attribute access inside special methods
|
||||
check-protected-access-in-special-methods=no
|
||||
|
||||
# List of method names used to declare (i.e. assign) instance attributes.
|
||||
defining-attr-methods=__init__,
|
||||
__new__,
|
||||
setUp,
|
||||
asyncSetUp,
|
||||
__post_init__
|
||||
|
||||
# List of member names, which should be excluded from the protected access
|
||||
# warning.
|
||||
exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit
|
||||
|
||||
# List of valid names for the first argument in a class method.
|
||||
valid-classmethod-first-arg=cls
|
||||
|
||||
# List of valid names for the first argument in a metaclass class method.
|
||||
valid-metaclass-classmethod-first-arg=mcs
|
||||
|
||||
|
||||
[DESIGN]
|
||||
|
||||
# List of regular expressions of class ancestor names to ignore when counting
|
||||
# public methods (see R0903)
|
||||
exclude-too-few-public-methods=
|
||||
|
||||
# List of qualified class names to ignore when counting class parents (see
|
||||
# R0901)
|
||||
ignored-parents=
|
||||
|
||||
# Maximum number of arguments for function / method.
|
||||
max-args=5
|
||||
|
||||
# Maximum number of attributes for a class (see R0902).
|
||||
max-attributes=7
|
||||
|
||||
# Maximum number of boolean expressions in an if statement (see R0916).
|
||||
max-bool-expr=5
|
||||
|
||||
# Maximum number of branch for function / method body.
|
||||
max-branches=12
|
||||
|
||||
# Maximum number of locals for function / method body.
|
||||
max-locals=15
|
||||
|
||||
# Maximum number of parents for a class (see R0901).
|
||||
max-parents=7
|
||||
|
||||
# Maximum number of public methods for a class (see R0904).
|
||||
max-public-methods=20
|
||||
|
||||
# Maximum number of return / yield for function / method body.
|
||||
max-returns=6
|
||||
|
||||
# Maximum number of statements in function / method body.
|
||||
max-statements=50
|
||||
|
||||
# Minimum number of public methods for a class (see R0903).
|
||||
min-public-methods=2
|
||||
|
||||
|
||||
[EXCEPTIONS]
|
||||
|
||||
# Exceptions that will emit a warning when caught.
|
||||
overgeneral-exceptions=builtins.BaseException,builtins.Exception
|
||||
|
||||
|
||||
[FORMAT]
|
||||
|
||||
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
|
||||
expected-line-ending-format=
|
||||
|
||||
# Regexp for a line that is allowed to be longer than the limit.
|
||||
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
|
||||
|
||||
# Number of spaces of indent required inside a hanging or continued line.
|
||||
indent-after-paren=4
|
||||
|
||||
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
|
||||
# tab).
|
||||
indent-string=' '
|
||||
|
||||
# Maximum number of characters on a single line.
|
||||
max-line-length=160
|
||||
|
||||
# Maximum number of lines in a module.
|
||||
max-module-lines=1000
|
||||
|
||||
# Allow the body of a class to be on the same line as the declaration if body
|
||||
# contains single statement.
|
||||
single-line-class-stmt=no
|
||||
|
||||
# Allow the body of an if to be on the same line as the test if there is no
|
||||
# else.
|
||||
single-line-if-stmt=no
|
||||
|
||||
|
||||
[IMPORTS]
|
||||
|
||||
# List of modules that can be imported at any level, not just the top level
|
||||
# one.
|
||||
allow-any-import-level=
|
||||
|
||||
# Allow explicit reexports by alias from a package __init__.
|
||||
allow-reexport-from-package=no
|
||||
|
||||
# Allow wildcard imports from modules that define __all__.
|
||||
allow-wildcard-with-all=no
|
||||
|
||||
# Deprecated modules which should not be used, separated by a comma.
|
||||
deprecated-modules=
|
||||
|
||||
# Output a graph (.gv or any supported image format) of external dependencies
|
||||
# to the given file (report RP0402 must not be disabled).
|
||||
ext-import-graph=
|
||||
|
||||
# Output a graph (.gv or any supported image format) of all (i.e. internal and
|
||||
# external) dependencies to the given file (report RP0402 must not be
|
||||
# disabled).
|
||||
import-graph=
|
||||
|
||||
# Output a graph (.gv or any supported image format) of internal dependencies
|
||||
# to the given file (report RP0402 must not be disabled).
|
||||
int-import-graph=
|
||||
|
||||
# Force import order to recognize a module as part of the standard
|
||||
# compatibility libraries.
|
||||
known-standard-library=
|
||||
|
||||
# Force import order to recognize a module as part of a third party library.
|
||||
known-third-party=enchant
|
||||
|
||||
# Couples of modules and preferred modules, separated by a comma.
|
||||
preferred-modules=
|
||||
|
||||
|
||||
[LOGGING]
|
||||
|
||||
# The type of string formatting that logging methods do. `old` means using %
|
||||
# formatting, `new` is for `{}` formatting.
|
||||
logging-format-style=old
|
||||
|
||||
# Logging modules to check that the string format arguments are in logging
|
||||
# function parameter format.
|
||||
logging-modules=logging
|
||||
|
||||
|
||||
[MESSAGES CONTROL]
|
||||
|
||||
# Only show warnings with the listed confidence levels. Leave empty to show
|
||||
# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE,
|
||||
# UNDEFINED.
|
||||
confidence=HIGH,
|
||||
CONTROL_FLOW,
|
||||
INFERENCE,
|
||||
INFERENCE_FAILURE,
|
||||
UNDEFINED
|
||||
|
||||
# Disable the message, report, category or checker with the given id(s). You
|
||||
# can either give multiple identifiers separated by comma (,) or put this
|
||||
# option multiple times (only on the command line, not in the configuration
|
||||
# file where it should appear only once). You can also use "--disable=all" to
|
||||
# disable everything first and then re-enable specific checks. For example, if
|
||||
# you want to run only the similarities checker, you can use "--disable=all
|
||||
# --enable=similarities". If you want to run only the classes checker, but have
|
||||
# no Warning level messages displayed, use "--disable=all --enable=classes
|
||||
# --disable=W".
|
||||
disable=raw-checker-failed,
|
||||
bad-inline-option,
|
||||
locally-disabled,
|
||||
file-ignored,
|
||||
suppressed-message,
|
||||
useless-suppression,
|
||||
deprecated-pragma,
|
||||
use-symbolic-message-instead,
|
||||
missing-function-docstring, # Modified since here, include this line
|
||||
missing-class-docstring,
|
||||
missing-module-docstring,
|
||||
wrong-import-order,
|
||||
invalid-name,
|
||||
too-few-public-methods,
|
||||
too-many-locals,
|
||||
ungrouped-imports, # since we have isort in pre-commit
|
||||
no-name-in-module, # since we have flake8 to check this
|
||||
too-many-instance-attributes,
|
||||
|
||||
# Enable the message, report, category or checker with the given id(s). You can
|
||||
# either give multiple identifier separated by comma (,) or put this option
|
||||
# multiple time (only on the command line, not in the configuration file where
|
||||
# it should appear only once). See also the "--disable" option for examples.
|
||||
enable=c-extension-no-member
|
||||
|
||||
|
||||
[METHOD_ARGS]
|
||||
|
||||
# List of qualified names (i.e., library.method) which require a timeout
|
||||
# parameter e.g. 'requests.api.get,requests.api.post'
|
||||
timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request
|
||||
|
||||
|
||||
[MISCELLANEOUS]
|
||||
|
||||
# List of note tags to take in consideration, separated by a comma.
|
||||
notes=FIXME,
|
||||
XXX,
|
||||
TODO
|
||||
|
||||
# Regular expression of note tags to take in consideration.
|
||||
notes-rgx=
|
||||
|
||||
|
||||
[REFACTORING]
|
||||
|
||||
# Maximum number of nested blocks for function / method body
|
||||
max-nested-blocks=5
|
||||
|
||||
# Complete name of functions that never returns. When checking for
|
||||
# inconsistent-return-statements if a never returning function is called then
|
||||
# it will be considered as an explicit return statement and no message will be
|
||||
# printed.
|
||||
never-returning-functions=sys.exit,argparse.parse_error
|
||||
|
||||
|
||||
[REPORTS]
|
||||
|
||||
# Python expression which should return a score less than or equal to 10. You
|
||||
# have access to the variables 'fatal', 'error', 'warning', 'refactor',
|
||||
# 'convention', and 'info' which contain the number of messages in each
|
||||
# category, as well as 'statement' which is the total number of statements
|
||||
# analyzed. This score is used by the global evaluation report (RP0004).
|
||||
evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))
|
||||
|
||||
# Template used to display messages. This is a python new-style format string
|
||||
# used to format the message information. See doc for all details.
|
||||
msg-template=
|
||||
|
||||
# Set the output format. Available formats are text, parseable, colorized, json
|
||||
# and msvs (visual studio). You can also give a reporter class, e.g.
|
||||
# mypackage.mymodule.MyReporterClass.
|
||||
#output-format=
|
||||
|
||||
# Tells whether to display a full report or only the messages.
|
||||
reports=no
|
||||
|
||||
# Activate the evaluation score.
|
||||
score=yes
|
||||
|
||||
|
||||
[SIMILARITIES]
|
||||
|
||||
# Comments are removed from the similarity computation
|
||||
ignore-comments=yes
|
||||
|
||||
# Docstrings are removed from the similarity computation
|
||||
ignore-docstrings=yes
|
||||
|
||||
# Imports are removed from the similarity computation
|
||||
ignore-imports=yes
|
||||
|
||||
# Signatures are removed from the similarity computation
|
||||
ignore-signatures=yes
|
||||
|
||||
# Minimum lines number of a similarity.
|
||||
min-similarity-lines=4
|
||||
|
||||
|
||||
[SPELLING]
|
||||
|
||||
# Limits count of emitted suggestions for spelling mistakes.
|
||||
max-spelling-suggestions=4
|
||||
|
||||
# Spelling dictionary name. No available dictionaries : You need to install
|
||||
# both the python package and the system dependency for enchant to work..
|
||||
spelling-dict=
|
||||
|
||||
# List of comma separated words that should be considered directives if they
|
||||
# appear at the beginning of a comment and should not be checked.
|
||||
spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:
|
||||
|
||||
# List of comma separated words that should not be checked.
|
||||
spelling-ignore-words=
|
||||
|
||||
# A path to a file that contains the private dictionary; one word per line.
|
||||
spelling-private-dict-file=
|
||||
|
||||
# Tells whether to store unknown words to the private dictionary (see the
|
||||
# --spelling-private-dict-file option) instead of raising a message.
|
||||
spelling-store-unknown-words=no
|
||||
|
||||
|
||||
[STRING]
|
||||
|
||||
# This flag controls whether inconsistent-quotes generates a warning when the
|
||||
# character used as a quote delimiter is used inconsistently within a module.
|
||||
check-quote-consistency=no
|
||||
|
||||
# This flag controls whether the implicit-str-concat should generate a warning
|
||||
# on implicit string concatenation in sequences defined over several lines.
|
||||
check-str-concat-over-line-jumps=no
|
||||
|
||||
|
||||
[TYPECHECK]
|
||||
|
||||
# List of decorators that produce context managers, such as
|
||||
# contextlib.contextmanager. Add to this list to register other decorators that
|
||||
# produce valid context managers.
|
||||
contextmanager-decorators=contextlib.contextmanager
|
||||
|
||||
# List of members which are set dynamically and missed by pylint inference
|
||||
# system, and so shouldn't trigger E1101 when accessed. Python regular
|
||||
# expressions are accepted.
|
||||
generated-members=
|
||||
|
||||
# Tells whether to warn about missing members when the owner of the attribute
|
||||
# is inferred to be None.
|
||||
ignore-none=yes
|
||||
|
||||
# This flag controls whether pylint should warn about no-member and similar
|
||||
# checks whenever an opaque object is returned when inferring. The inference
|
||||
# can return multiple potential results while evaluating a Python object, but
|
||||
# some branches might not be evaluated, which results in partial inference. In
|
||||
# that case, it might be useful to still emit no-member and other checks for
|
||||
# the rest of the inferred objects.
|
||||
ignore-on-opaque-inference=yes
|
||||
|
||||
# List of symbolic message names to ignore for Mixin members.
|
||||
ignored-checks-for-mixins=no-member,
|
||||
not-async-context-manager,
|
||||
not-context-manager,
|
||||
attribute-defined-outside-init
|
||||
|
||||
# List of class names for which member attributes should not be checked (useful
|
||||
# for classes with dynamically set attributes). This supports the use of
|
||||
# qualified names.
|
||||
ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace
|
||||
|
||||
# Show a hint with possible names when a member name was not found. The aspect
|
||||
# of finding the hint is based on edit distance.
|
||||
missing-member-hint=yes
|
||||
|
||||
# The minimum edit distance a name should have in order to be considered a
|
||||
# similar match for a missing member name.
|
||||
missing-member-hint-distance=1
|
||||
|
||||
# The total number of similar names that should be taken in consideration when
|
||||
# showing a hint for a missing member.
|
||||
missing-member-max-choices=1
|
||||
|
||||
# Regex pattern to define which classes are considered mixins.
|
||||
mixin-class-rgx=.*[Mm]ixin
|
||||
|
||||
# List of decorators that change the signature of a decorated function.
|
||||
signature-mutators=
|
||||
|
||||
|
||||
[VARIABLES]
|
||||
|
||||
# List of additional names supposed to be defined in builtins. Remember that
|
||||
# you should avoid defining new builtins when possible.
|
||||
additional-builtins=
|
||||
|
||||
# Tells whether unused global variables should be treated as a violation.
|
||||
allow-global-unused-variables=yes
|
||||
|
||||
# List of names allowed to shadow builtins
|
||||
allowed-redefined-builtins=
|
||||
|
||||
# List of strings which can identify a callback function by name. A callback
|
||||
# name must start or end with one of those strings.
|
||||
callbacks=cb_,
|
||||
_cb
|
||||
|
||||
# A regular expression matching the name of dummy variables (i.e. expected to
|
||||
# not be used).
|
||||
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
|
||||
|
||||
# Argument names that match this expression will be ignored.
|
||||
ignored-argument-names=_.*|^ignored_|^unused_
|
||||
|
||||
# Tells whether we should check for unused import in __init__ files.
|
||||
init-import=no
|
||||
|
||||
# List of qualified module names which can have objects that can redefine
|
||||
# builtins.
|
||||
redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
|
||||
@@ -1,21 +0,0 @@
|
||||
# .readthedocs.yml
|
||||
# Read the Docs configuration file
|
||||
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||
|
||||
# Required
|
||||
version: 2
|
||||
|
||||
# Optionally build your docs in additional formats such as PDF and ePub
|
||||
formats:
|
||||
- pdf
|
||||
|
||||
# Optionally set the version of Python and requirements required to build your docs
|
||||
python:
|
||||
version: 2.7
|
||||
install:
|
||||
- requirements: docs/requirements.txt
|
||||
|
||||
# We need to list all the submodules included in documenation build by DOxygen
|
||||
submodules:
|
||||
include:
|
||||
- components/mqtt/esp-mqtt
|
||||
@@ -1,5 +0,0 @@
|
||||
# This is shellcheck config file
|
||||
# Files that are checked: install.sh, export.sh
|
||||
|
||||
# Do not complain about variables determined at runtime (IDF_PATH)
|
||||
disable=SC1090
|
||||
119
.vale.ini
119
.vale.ini
@@ -1,119 +0,0 @@
|
||||
###################
|
||||
### Vale Config ###
|
||||
###################
|
||||
|
||||
# This is a Vale linter configuration file.
|
||||
# - Repo: esp-idf
|
||||
# - Based on Default config: v0-1-1
|
||||
# It lists all necessary parameters to configure Vale for your project.
|
||||
# For official documentation on all config settings, see
|
||||
# https://vale.sh/docs/topics/config
|
||||
|
||||
|
||||
##############
|
||||
### Global ###
|
||||
##############
|
||||
|
||||
# This section lists core settings applying to Vale itself.
|
||||
|
||||
|
||||
# Specify path to external resources (e.g., styles and vocab files).
|
||||
# The path value may be absolute or relative to this configuration file.
|
||||
StylesPath = .vale/styles
|
||||
|
||||
|
||||
# Specify the minimum alert severity that Vale will report.
|
||||
MinAlertLevel = suggestion # "suggestion", "warning", or "error"
|
||||
|
||||
|
||||
# Specify vocabulary for special treatment.
|
||||
# Create a folder in <StylesPath>/Vocab/<name>/and add its name here
|
||||
# The folder should contain two files:
|
||||
# - accept.txt -- lists words with accepted case-sensitive spelling
|
||||
# - reject.txt -- lists words whose occurrences throw an error
|
||||
# Vocab = Espressif
|
||||
|
||||
|
||||
# Specify the packages to import into your project.
|
||||
# A package is a zip file containing a number of rules (style) written in YAML.
|
||||
# For a list of official packages, see Package Hub at https://vale.sh/hub/
|
||||
# For official documentation on packages, see
|
||||
# https://vale.sh/docs/topics/packages/
|
||||
# Before linting, navigate to your project and run `vale sync` to download
|
||||
# the official packages specified below.
|
||||
# Packages = Package1, Package2, \
|
||||
# https://example.com/path/to/package/Package.zip
|
||||
Packages = Google, Microsoft, RedHat, \
|
||||
https://dl.espressif.com/dl/esp-vale-config/Espressif-latest.zip
|
||||
|
||||
|
||||
###############
|
||||
### Formats ###
|
||||
###############
|
||||
|
||||
# This section enables association of "unknown" formats with the ones
|
||||
# supported by Vale. For official documentation on supported formats, see
|
||||
# https://vale.sh/docs/topics/scoping/
|
||||
[formats]
|
||||
|
||||
# For example, treat MDX files as Markdown files.
|
||||
# mdx = md
|
||||
|
||||
|
||||
################################
|
||||
### Format-specific settings ###
|
||||
################################
|
||||
|
||||
# This section lists the settings that apply to specific file formats
|
||||
# based on their glob pattern.
|
||||
# Settings provided under a more specific glob pattern,
|
||||
# such as [*.{md,txt}] will override those in [*].
|
||||
[*.{md,rst}]
|
||||
|
||||
|
||||
# Enable styles to activate all rules included in them.
|
||||
# BasedOnStyles = Style1, Style2
|
||||
BasedOnStyles = Vale, Espressif-latest
|
||||
|
||||
|
||||
### Deactivate individual rules ###
|
||||
### in enabled styles.
|
||||
# Style1.Rule1 = NO
|
||||
Vale.Repetition = NO
|
||||
Vale.Spelling = NO
|
||||
Espressif-latest.Admonitions = NO
|
||||
Espressif-latest.Contractions = NO
|
||||
Espressif-latest.Monospace = NO
|
||||
Espressif-latest.PascalCamelCase = NO
|
||||
|
||||
|
||||
### Change default severity level ###
|
||||
### of an activated rule.
|
||||
# Choose between "suggestion", "warning", or "error".
|
||||
# Style1.Rule2 = error
|
||||
|
||||
|
||||
### Activate individual rules ###
|
||||
### in non-enabled styles stored in <StylesPath>.
|
||||
# Style1.Rule = YES
|
||||
Google.Gender = YES
|
||||
Google.GenderBias = YES
|
||||
Google.Slang = YES
|
||||
Google.Spacing = YES
|
||||
Microsoft.DateNumbers = YES
|
||||
Microsoft.Ellipses = YES
|
||||
Microsoft.FirstPerson = YES
|
||||
Microsoft.Hyphens = YES
|
||||
Microsoft.Ordinal = YES
|
||||
Microsoft.OxfordComma = YES
|
||||
Microsoft.Percentages = YES
|
||||
Microsoft.RangeTime = YES
|
||||
Microsoft.Semicolon = YES
|
||||
Microsoft.SentenceLength = YES
|
||||
Microsoft.Suspended = YES
|
||||
Microsoft.Units = YES
|
||||
Microsoft.URLFormat = YES
|
||||
Microsoft.We = YES
|
||||
Microsoft.Wordiness = YES
|
||||
RedHat.Contractions = YES
|
||||
RedHat.RepeatedWords = YES
|
||||
283
CMakeLists.txt
283
CMakeLists.txt
@@ -1,283 +0,0 @@
|
||||
cmake_minimum_required(VERSION 3.16)
|
||||
project(esp-idf C CXX ASM)
|
||||
|
||||
if(CMAKE_CURRENT_LIST_DIR STREQUAL CMAKE_SOURCE_DIR)
|
||||
message(FATAL_ERROR "Current directory '${CMAKE_CURRENT_LIST_DIR}' is not buildable. "
|
||||
"Change directories to one of the example projects in '${CMAKE_CURRENT_LIST_DIR}/examples' and try "
|
||||
"again.")
|
||||
endif()
|
||||
|
||||
# Variables compile_options, c_compile_options, cxx_compile_options, compile_definitions, link_options shall
|
||||
# not be unset as they may already contain flags, set by toolchain-TARGET.cmake files.
|
||||
|
||||
# Add the following build specifications here, since these seem to be dependent
|
||||
# on config values on the root Kconfig.
|
||||
|
||||
if(NOT BOOTLOADER_BUILD)
|
||||
|
||||
if(CONFIG_COMPILER_OPTIMIZATION_SIZE)
|
||||
list(APPEND compile_options "-Os")
|
||||
if(CMAKE_C_COMPILER_ID MATCHES "GNU")
|
||||
list(APPEND compile_options "-freorder-blocks")
|
||||
endif()
|
||||
elseif(CONFIG_COMPILER_OPTIMIZATION_DEBUG)
|
||||
list(APPEND compile_options "-Og")
|
||||
if(CMAKE_C_COMPILER_ID MATCHES "GNU" AND NOT CONFIG_IDF_TARGET_LINUX)
|
||||
list(APPEND compile_options "-fno-shrink-wrap") # Disable shrink-wrapping to reduce binary size
|
||||
endif()
|
||||
elseif(CONFIG_COMPILER_OPTIMIZATION_NONE)
|
||||
list(APPEND compile_options "-O0")
|
||||
elseif(CONFIG_COMPILER_OPTIMIZATION_PERF)
|
||||
list(APPEND compile_options "-O2")
|
||||
endif()
|
||||
|
||||
else() # BOOTLOADER_BUILD
|
||||
|
||||
if(CONFIG_BOOTLOADER_COMPILER_OPTIMIZATION_SIZE)
|
||||
list(APPEND compile_options "-Os")
|
||||
if(CMAKE_C_COMPILER_ID MATCHES "GNU")
|
||||
list(APPEND compile_options "-freorder-blocks")
|
||||
endif()
|
||||
elseif(CONFIG_BOOTLOADER_COMPILER_OPTIMIZATION_DEBUG)
|
||||
list(APPEND compile_options "-Og")
|
||||
if(CMAKE_C_COMPILER_ID MATCHES "GNU" AND NOT CONFIG_IDF_TARGET_LINUX)
|
||||
list(APPEND compile_options "-fno-shrink-wrap") # Disable shrink-wrapping to reduce binary size
|
||||
endif()
|
||||
elseif(CONFIG_BOOTLOADER_COMPILER_OPTIMIZATION_NONE)
|
||||
list(APPEND compile_options "-O0")
|
||||
elseif(CONFIG_BOOTLOADER_COMPILER_OPTIMIZATION_PERF)
|
||||
list(APPEND compile_options "-O2")
|
||||
endif()
|
||||
|
||||
endif()
|
||||
|
||||
if(CONFIG_COMPILER_CXX_EXCEPTIONS)
|
||||
list(APPEND cxx_compile_options "-fexceptions")
|
||||
else()
|
||||
list(APPEND cxx_compile_options "-fno-exceptions")
|
||||
endif()
|
||||
|
||||
if(CONFIG_COMPILER_CXX_RTTI)
|
||||
list(APPEND cxx_compile_options "-frtti")
|
||||
else()
|
||||
list(APPEND cxx_compile_options "-fno-rtti")
|
||||
list(APPEND link_options "-fno-rtti") # used to invoke correct multilib variant (no-rtti) during linking
|
||||
endif()
|
||||
|
||||
if(CONFIG_COMPILER_SAVE_RESTORE_LIBCALLS)
|
||||
list(APPEND compile_options "-msave-restore")
|
||||
endif()
|
||||
|
||||
if(CMAKE_C_COMPILER_ID MATCHES "GNU")
|
||||
list(APPEND c_compile_options "-Wno-old-style-declaration")
|
||||
endif()
|
||||
|
||||
# Clang finds some warnings in IDF code which GCC doesn't.
|
||||
# All these warnings should be fixed before Clang is presented
|
||||
# as a toolchain choice for users.
|
||||
if(CMAKE_C_COMPILER_ID MATCHES "Clang")
|
||||
# Clang checks Doxygen comments for being in sync with function prototype.
|
||||
# There are some inconsistencies, especially in ROM headers.
|
||||
list(APPEND compile_options "-Wno-documentation")
|
||||
# GCC allows repeated typedefs when the source and target types are the same.
|
||||
# Clang doesn't allow this. This occurs in many components due to forward
|
||||
# declarations.
|
||||
list(APPEND compile_options "-Wno-typedef-redefinition")
|
||||
# This issue is seemingly related to newlib's char type functions.
|
||||
# Fix is not clear yet.
|
||||
list(APPEND compile_options "-Wno-char-subscripts")
|
||||
# Clang seems to notice format string issues which GCC doesn't.
|
||||
list(APPEND compile_options "-Wno-format-security")
|
||||
# Logic bug in essl component
|
||||
list(APPEND compile_options "-Wno-tautological-overlap-compare")
|
||||
# Some pointer checks in mDNS component check addresses which can't be NULL
|
||||
list(APPEND compile_options "-Wno-tautological-pointer-compare")
|
||||
# Similar to the above, in tcp_transport
|
||||
list(APPEND compile_options "-Wno-pointer-bool-conversion")
|
||||
# mbedTLS md5.c triggers this warning in md5_test_buf (false positive)
|
||||
list(APPEND compile_options "-Wno-string-concatenation")
|
||||
# multiple cases of implict convertions between unrelated enum types
|
||||
list(APPEND compile_options "-Wno-enum-conversion")
|
||||
# When IRAM_ATTR is specified both in function declaration and definition,
|
||||
# it produces different section names, since section names include __COUNTER__.
|
||||
# Occurs in multiple places.
|
||||
list(APPEND compile_options "-Wno-section")
|
||||
# Multiple cases of attributes unknown to clang, for example
|
||||
# __attribute__((optimize("-O3")))
|
||||
list(APPEND compile_options "-Wno-unknown-attributes")
|
||||
# Disable Clang warnings for atomic operations with access size
|
||||
# more then 4 bytes
|
||||
list(APPEND compile_options "-Wno-atomic-alignment")
|
||||
# several warnings in wpa_supplicant component
|
||||
list(APPEND compile_options "-Wno-unused-but-set-variable")
|
||||
# Clang also produces many -Wunused-function warnings which GCC doesn't.
|
||||
list(APPEND compile_options "-Wno-unused-function")
|
||||
# many warnings in bluedroid code
|
||||
# warning: field 'hdr' with variable sized type 'BT_HDR' not at the end of a struct or class is a GNU extension
|
||||
list(APPEND compile_options "-Wno-gnu-variable-sized-type-not-at-end")
|
||||
# several warnings in bluedroid code
|
||||
list(APPEND compile_options "-Wno-constant-logical-operand")
|
||||
# warning: '_Static_assert' with no message is a C2x extension
|
||||
list(APPEND compile_options "-Wno-c2x-extensions")
|
||||
# warning on xMPU_SETTINGS for esp32s2 has size 0 for C and 1 for C++
|
||||
list(APPEND compile_options "-Wno-extern-c-compat")
|
||||
# warning: implicit truncation from 'int' to a one-bit wide bit-field changes value from 1 to -1
|
||||
list(APPEND compile_options "-Wno-single-bit-bitfield-constant-conversion")
|
||||
endif()
|
||||
# More warnings may exist in unit tests and example projects.
|
||||
|
||||
if(CONFIG_COMPILER_WARN_WRITE_STRINGS)
|
||||
list(APPEND compile_options "-Wwrite-strings")
|
||||
endif()
|
||||
|
||||
if(CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_DISABLE)
|
||||
list(APPEND compile_definitions "-DNDEBUG")
|
||||
endif()
|
||||
|
||||
if(CONFIG_COMPILER_STACK_CHECK_MODE_NORM)
|
||||
list(APPEND compile_options "-fstack-protector")
|
||||
elseif(CONFIG_COMPILER_STACK_CHECK_MODE_STRONG)
|
||||
list(APPEND compile_options "-fstack-protector-strong")
|
||||
elseif(CONFIG_COMPILER_STACK_CHECK_MODE_ALL)
|
||||
list(APPEND compile_options "-fstack-protector-all")
|
||||
endif()
|
||||
|
||||
if(CONFIG_COMPILER_DUMP_RTL_FILES)
|
||||
list(APPEND compile_options "-fdump-rtl-expand")
|
||||
endif()
|
||||
|
||||
if(NOT ${CMAKE_C_COMPILER_VERSION} VERSION_LESS 8.0.0)
|
||||
if(CONFIG_COMPILER_HIDE_PATHS_MACROS)
|
||||
list(APPEND compile_options "-fmacro-prefix-map=${CMAKE_SOURCE_DIR}=.")
|
||||
list(APPEND compile_options "-fmacro-prefix-map=${IDF_PATH}=/IDF")
|
||||
endif()
|
||||
|
||||
if(CONFIG_APP_REPRODUCIBLE_BUILD)
|
||||
idf_build_set_property(DEBUG_PREFIX_MAP_GDBINIT "${BUILD_DIR}/prefix_map_gdbinit")
|
||||
|
||||
list(APPEND compile_options "-fdebug-prefix-map=${IDF_PATH}=/IDF")
|
||||
list(APPEND compile_options "-fdebug-prefix-map=${PROJECT_DIR}=/IDF_PROJECT")
|
||||
list(APPEND compile_options "-fdebug-prefix-map=${BUILD_DIR}=/IDF_BUILD")
|
||||
|
||||
# component dirs
|
||||
idf_build_get_property(python PYTHON)
|
||||
idf_build_get_property(idf_path IDF_PATH)
|
||||
idf_build_get_property(component_dirs BUILD_COMPONENT_DIRS)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${python}
|
||||
"${idf_path}/tools/generate_debug_prefix_map.py"
|
||||
"${BUILD_DIR}"
|
||||
"${component_dirs}"
|
||||
OUTPUT_VARIABLE result
|
||||
RESULT_VARIABLE ret
|
||||
)
|
||||
if(NOT ret EQUAL 0)
|
||||
message(FATAL_ERROR "This is a bug. Please report to https://github.com/espressif/esp-idf/issues")
|
||||
endif()
|
||||
|
||||
spaces2list(result)
|
||||
list(LENGTH component_dirs length)
|
||||
math(EXPR max_index "${length} - 1")
|
||||
foreach(index RANGE ${max_index})
|
||||
list(GET component_dirs ${index} folder)
|
||||
list(GET result ${index} after)
|
||||
list(APPEND compile_options "-fdebug-prefix-map=${folder}=${after}")
|
||||
endforeach()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(CONFIG_COMPILER_DISABLE_GCC12_WARNINGS)
|
||||
list(APPEND compile_options "-Wno-address"
|
||||
"-Wno-use-after-free")
|
||||
endif()
|
||||
|
||||
if(CONFIG_COMPILER_DISABLE_GCC13_WARNINGS)
|
||||
list(APPEND compile_options "-Wno-xor-used-as-pow")
|
||||
list(APPEND c_compile_options "-Wno-enum-int-mismatch")
|
||||
list(APPEND cxx_compile_options "-Wno-self-move"
|
||||
"-Wno-dangling-reference")
|
||||
endif()
|
||||
|
||||
# GCC-specific options
|
||||
if(CMAKE_C_COMPILER_ID STREQUAL "GNU")
|
||||
list(APPEND compile_options "-fstrict-volatile-bitfields"
|
||||
)
|
||||
endif()
|
||||
|
||||
if(CONFIG_ESP_SYSTEM_USE_EH_FRAME)
|
||||
list(APPEND compile_options "-fasynchronous-unwind-tables")
|
||||
list(APPEND link_options "-Wl,--eh-frame-hdr")
|
||||
endif()
|
||||
|
||||
list(APPEND link_options "-fno-lto")
|
||||
|
||||
if(CONFIG_IDF_TARGET_LINUX AND CMAKE_HOST_SYSTEM_NAME STREQUAL "Darwin")
|
||||
list(APPEND link_options "-Wl,-dead_strip")
|
||||
list(APPEND link_options "-Wl,-warn_commons")
|
||||
else()
|
||||
list(APPEND link_options "-Wl,--gc-sections")
|
||||
list(APPEND link_options "-Wl,--warn-common")
|
||||
endif()
|
||||
|
||||
# SMP FreeRTOS user provided minimal idle hook. This allows the user to provide
|
||||
# their own copy of vApplicationMinimalIdleHook()
|
||||
if(CONFIG_FREERTOS_USE_MINIMAL_IDLE_HOOK)
|
||||
list(APPEND link_options "-Wl,--wrap=vApplicationMinimalIdleHook")
|
||||
endif()
|
||||
|
||||
# Placing jump tables in flash would cause issues with code that required
|
||||
# to be placed in IRAM
|
||||
list(APPEND compile_options "-fno-jump-tables")
|
||||
if(CMAKE_C_COMPILER_ID MATCHES "GNU")
|
||||
# This flag is GCC-specific.
|
||||
# Not clear yet if some other flag should be used for Clang.
|
||||
list(APPEND compile_options "-fno-tree-switch-conversion")
|
||||
endif()
|
||||
|
||||
if(CMAKE_C_COMPILER_ID MATCHES "Clang")
|
||||
list(APPEND compile_options "-fno-use-cxa-atexit")
|
||||
endif()
|
||||
|
||||
if(COMPILER_RT_LIB_NAME)
|
||||
list(APPEND link_options "-rtlib=${CONFIG_COMPILER_RT_LIB_NAME}")
|
||||
endif()
|
||||
|
||||
# For the transition period from 32-bit time_t to 64-bit time_t,
|
||||
# auto-detect the size of this type and set corresponding variable.
|
||||
include(CheckTypeSize)
|
||||
check_type_size("time_t" TIME_T_SIZE)
|
||||
if(TIME_T_SIZE)
|
||||
idf_build_set_property(TIME_T_SIZE ${TIME_T_SIZE})
|
||||
else()
|
||||
message(FATAL_ERROR "Failed to determine sizeof(time_t)")
|
||||
endif()
|
||||
|
||||
idf_build_set_property(COMPILE_OPTIONS "${compile_options}" APPEND)
|
||||
idf_build_set_property(C_COMPILE_OPTIONS "${c_compile_options}" APPEND)
|
||||
idf_build_set_property(CXX_COMPILE_OPTIONS "${cxx_compile_options}" APPEND)
|
||||
idf_build_set_property(ASM_COMPILE_OPTIONS "${asm_compile_options}" APPEND)
|
||||
idf_build_set_property(COMPILE_DEFINITIONS "${compile_definitions}" APPEND)
|
||||
idf_build_set_property(LINK_OPTIONS "${link_options}" APPEND)
|
||||
|
||||
idf_build_get_property(build_component_targets __BUILD_COMPONENT_TARGETS)
|
||||
|
||||
# Add each component as a subdirectory, processing each component's CMakeLists.txt
|
||||
foreach(component_target ${build_component_targets})
|
||||
__component_get_property(dir ${component_target} COMPONENT_DIR)
|
||||
__component_get_property(_name ${component_target} COMPONENT_NAME)
|
||||
__component_get_property(prefix ${component_target} __PREFIX)
|
||||
__component_get_property(alias ${component_target} COMPONENT_ALIAS)
|
||||
set(COMPONENT_NAME ${_name})
|
||||
set(COMPONENT_DIR ${dir})
|
||||
set(COMPONENT_ALIAS ${alias})
|
||||
set(COMPONENT_PATH ${dir}) # for backward compatibility only, COMPONENT_DIR is preferred
|
||||
idf_build_get_property(build_prefix __PREFIX)
|
||||
set(__idf_component_context 1)
|
||||
if(NOT prefix STREQUAL build_prefix)
|
||||
add_subdirectory(${dir} ${prefix}_${_name})
|
||||
else()
|
||||
add_subdirectory(${dir} ${_name})
|
||||
endif()
|
||||
set(__idf_component_context 0)
|
||||
endforeach()
|
||||
101
COMPATIBILITY.md
101
COMPATIBILITY.md
@@ -1,101 +0,0 @@
|
||||
# Compatibility Between ESP-IDF Releases and Revisions of Espressif SoCs
|
||||
|
||||
* [中文版](./COMPATIBILITY_CN.md)
|
||||
|
||||
Espressif keeps improving the performance of its SoCs by providing new chip revisions. However, some of the improvements require special software support. Some of the software supports are even mandatory for the chip revisions to run normally.
|
||||
|
||||
This document describes the compatibility between ESP-IDF releases and Espressif SoC revisions.
|
||||
|
||||
NOTE: This document on release branches may be out-of-date. Check the [Compatibility file on master](https://github.com/espressif/esp-idf/blob/master/COMPATIBILITY.md) for the most accurate information.
|
||||
|
||||
See [Compatibility Advisory for Chip Revision Numbering Scheme](https://www.espressif.com.cn/sites/default/files/advisory_downloads/AR2022-005%20Compatibility%20Advisory%20for%20Chip%20Revision%20Numbering%20%20Scheme.pdf) on the versioning of Espressif SoC revisions.
|
||||
|
||||
You can run `esptool chip_id` to detect the series and revision of an SoC. See [SoC Errata](https://www.espressif.com.cn/en/support/documents/technical-documents?keys=errata) for more on how to distinguish between chip revisions, and the improvements provided by chip revisions. And run `idf.py --version` to know the version of current ESP-IDF.
|
||||
|
||||
## ESP-IDF Support for Different Chip Revisions
|
||||
|
||||
The sections below show the requirements to ESP-IDF version of chip revisions. Each chip revision corresponds to specific `Recommended` and `Required` versions of ESP-IDF:
|
||||
|
||||
- `Recommended`: shows from which version of ESP-IDF you can make use of all the improvements of the chip revision. Running binary compiled with ESP-IDF below the `Recommended` version of a chip revision, software may not benefit from the bugfix/features provided by the chip revision. The chip will have almost the same behavior as its previous revision.
|
||||
|
||||
- `Required`: shows the minimum version required to run the chip revision normally. Running binary compiled below the `Required` version, the binary may have unpredictable behavior.
|
||||
|
||||
Though the software can make use of all the features of a chip revision, if its version is higher than the `Recommended` version of the chip, it is still recommended to use the latest bugfix version of the release branch. The latest bugfix version fixes a number of issues and helps improve product stability.
|
||||
|
||||
For example, if we have a chip, whose `Required`/`Recommended` version of `release/v5.1` branch is `v5.1.2`/`v5.1.4`, and the latest release on that branch is `v5.1.6`. Then the chip will not boot up with ESP-IDF `v5.1`-`v5.1.1` or will have unpredictable behavior, and application may not make use of all benefits of the chip, when running with ESP-IDF `v5.1.2` or `v5.1.3`. Though `v5.1.4` well supports the chip revision, it is still recommended to upgrade ESP-IDF to `v5.1.6`.
|
||||
|
||||
### ESP32
|
||||
|
||||
#### v0.0, v1.0, v3.0
|
||||
|
||||
Supported since initial version of ESP-IDF.
|
||||
|
||||
#### v1.1
|
||||
|
||||
To be added.
|
||||
|
||||
#### v2.0
|
||||
|
||||
To be added.
|
||||
|
||||
#### v3.1
|
||||
|
||||
To be added.
|
||||
|
||||
### ESP32-S2
|
||||
|
||||
#### v0.0
|
||||
|
||||
Supported since ESP-IDF v4.2.
|
||||
|
||||
#### v1.0
|
||||
|
||||
| Release branch | Recommended | Required |
|
||||
|------------------------|-------------|----------|
|
||||
| release/v4.2 | v4.2.3 | v4.2.3 |
|
||||
| release/v4.3 | v4.3.3 | v4.3.3 |
|
||||
| release/v4.4 | v4.4.6 | v4.4.1 |
|
||||
| release/v5.0 | v5.0.4 | v5.0 |
|
||||
| release/v5.1 | v5.1.2 | v5.1 |
|
||||
| release/v5.2 and above | v5.2 | v5.2 |
|
||||
|
||||
### ESP32-C3
|
||||
|
||||
#### v0.2, v0.3
|
||||
|
||||
Supported since ESP-IDF v4.3.
|
||||
|
||||
#### v0.4
|
||||
|
||||
To be added.
|
||||
|
||||
### ESP32-S3
|
||||
|
||||
#### v0.1
|
||||
|
||||
Supported since ESP-IDF v4.4.
|
||||
|
||||
#### v0.2
|
||||
|
||||
To be added.
|
||||
|
||||
### ESP32-C2 & ESP8684
|
||||
|
||||
#### v1.0
|
||||
|
||||
Supported since ESP-IDF v5.0.
|
||||
|
||||
#### v1.1
|
||||
|
||||
To be added.
|
||||
|
||||
#### v1.2
|
||||
|
||||
To be added.
|
||||
|
||||
|
||||
## What If the ESP-IDF Version Is Lower than the `Required` Version?
|
||||
|
||||
Latest ESP-IDF versions can prevent from downloading to, or even execute binaries on unsupported chips. ESP-IDF of versions v4.4.5+, v5.0.1+, v5.1 and above have both esptool download check and bootloader loading check against the chip revision. While ESP-IDF v4.3.5 has only esptool downloading check.
|
||||
|
||||
For earlier ESP-IDF versions without such checking, which is incompatible with the given chip revision, the chips running such versions will have unpredictable behavior.
|
||||
@@ -1,101 +0,0 @@
|
||||
# ESP-IDF 版本与乐鑫芯片版本兼容性
|
||||
|
||||
* [English Version](./COMPATIBILITY.md)
|
||||
|
||||
为不断提高芯片性能,乐鑫会为其芯片发布新版本。但新芯片版本中的某些性能提升依赖特殊的软件支持,有时候新芯片版本必须在一定的软件版本下才能正常运行。
|
||||
|
||||
本文档介绍了具体 ESP-IDF 版本与乐鑫芯片版本之间的兼容性情况。
|
||||
|
||||
注意:各分支上的兼容性文档可能不是最新版本,请参考 [master 分支上的兼容性文件](https://github.com/espressif/esp-idf/blob/master/COMPATIBILITY_CN.md) 以获取最新信息。
|
||||
|
||||
有关乐鑫芯片版本的编码方式,请参考 [关于芯片版本 (Chip Revision) 编码方式的兼容性公告](https://www.espressif.com/sites/default/files/advisory_downloads/AR2022-005%20%E5%85%B3%E4%BA%8E%E8%8A%AF%E7%89%87%E7%89%88%E6%9C%AC%E7%BC%96%E7%A0%81%E6%96%B9%E5%BC%8F%20%28Chip%20Revision%29%20%E7%9A%84%E5%85%BC%E5%AE%B9%E6%80%A7%E5%85%AC%E5%91%8A.pdf)。
|
||||
|
||||
运行 `esptool chip_id` 可查看芯片系列及其版本。有关区分芯片版本及版本改进内容的更多信息,请参考 [芯片勘误表](https://www.espressif.com.cn/zh-hans/support/documents/technical-documents?keys=%E5%8B%98%E8%AF%AF%E8%A1%A8)。运行 `idf.py --version` 可查看当前的 ESP-IDF 版本。
|
||||
|
||||
## ESP-IDF 对各芯片版本的支持
|
||||
|
||||
下文介绍了 ESP-IDF 对各芯片版本的支持情况,每个芯片版本都有对应的 ESP-IDF `推荐版本` 和 `需求版本`:
|
||||
|
||||
- `推荐版本`:表示从该版本的 ESP-IDF 开始,软件可以利用芯片版本提升的性能。如果在该芯片版本上运行低于 `推荐版本` 的 ESP-IDF 来编译二进制文件,软件可能无法利用该芯片版本修复的错误或新增的功能,芯片行为将与其上一版本几乎相同。
|
||||
|
||||
- `需求版本`:表示该芯片版本正常运行所需的最低 ESP-IDF 版本。如果在该芯片版本上运行低于 `需求版本` 的 ESP-IDF 来编译二进制文件,可能会出现不可预测的芯片行为。
|
||||
|
||||
即便使用的软件版本已高于该芯片版本的对应 `推荐版本`,软件已经能够利用该芯片版本的所有功能,我们仍建议用户升级到该发布分支的最新 bugfix 版本。新的 bugfix 版本修复了一些问题,有助于提升产品稳定性。
|
||||
|
||||
例如,对于某一芯片版本,其 `release/v5.1` 分支的 `需求版本` 和 `推荐版本` 分别是 `v5.1.2` 和 `v5.1.4`,而该分支的最新版本是 `v5.1.6`。那么,在使用 ESP-IDF `v5.1` - `v5.1.1` 时,芯片将无法启动,或会出现不可预测的行为,而在使用 ESP-IDF `v5.1.2` 或 `v5.1.3` 时,应用程序可能无法使用芯片的部分性能。此外,虽然 `v5.1.4` 已支持该芯片版本,但仍建议将 ESP-IDF 升级到 `v5.1.6`。
|
||||
|
||||
### ESP32
|
||||
|
||||
#### v0.0、v1.0 和 v3.0
|
||||
|
||||
从最初版本的 ESP-IDF 开始支持。
|
||||
|
||||
#### v1.1
|
||||
|
||||
待更新。
|
||||
|
||||
#### v2.0
|
||||
|
||||
待更新。
|
||||
|
||||
#### v3.1
|
||||
|
||||
待更新。
|
||||
|
||||
### ESP32-S2
|
||||
|
||||
#### v0.0
|
||||
|
||||
从 ESP-IDF v4.2 开始支持。
|
||||
|
||||
#### v1.0
|
||||
|
||||
| 发布分支 | 推荐版本 | 需求版本 |
|
||||
|------------------------|-------------|----------|
|
||||
| release/v4.2 | v4.2.3 | v4.2.3 |
|
||||
| release/v4.3 | v4.3.3 | v4.3.3 |
|
||||
| release/v4.4 | v4.4.6 | v4.4.1 |
|
||||
| release/v5.0 | v5.0.4 | v5.0 |
|
||||
| release/v5.1 | v5.1.2 | v5.1 |
|
||||
| release/v5.2 及以上 | v5.2 | v5.2 |
|
||||
|
||||
### ESP32-C3
|
||||
|
||||
#### v0.2 和 v0.3
|
||||
|
||||
从 ESP-IDF v4.3 开始支持。
|
||||
|
||||
#### v0.4
|
||||
|
||||
待更新。
|
||||
|
||||
### ESP32-S3
|
||||
|
||||
#### v0.1
|
||||
|
||||
从 ESP-IDF v4.4 开始支持。
|
||||
|
||||
#### v0.2
|
||||
|
||||
待更新。
|
||||
|
||||
### ESP32-C2 & ESP8684
|
||||
|
||||
#### v1.0
|
||||
|
||||
从 ESP-IDF v5.0 开始支持。
|
||||
|
||||
#### v1.1
|
||||
|
||||
待更新。
|
||||
|
||||
#### v1.2
|
||||
|
||||
待更新。
|
||||
|
||||
|
||||
## 如果 ESP-IDF 版本低于 `需求版本` 会出现什么情况?
|
||||
|
||||
使用最新的 ESP-IDF 版本时,软件会阻止下载二进制文件到不支持的芯片版本上,甚至可以防止二进制文件在不支持的芯片版本上被执行。v4.4.5+、v5.0.1+、v5.1 及以上版本的 ESP-IDF 都支持针对芯片版本的 esptool 下载检查和引导加载器加载检查,但 ESP-IDF v4.3.5 只支持 esptool 下载检查。
|
||||
|
||||
更早的 ESP-IDF 版本没有此类检查,若与芯片版本不兼容,芯片运行软件时可能会出现不可预测的行为。
|
||||
@@ -1,6 +0,0 @@
|
||||
# Contributing to ESP-IDF
|
||||
|
||||
Contributions to ESP-IDF - fixing bugs, adding features, adding documentation - are welcome! We accept contributions via Github Pull Requests.
|
||||
|
||||
Please see the [Contributions Guide](https://docs.espressif.com/projects/esp-idf/en/latest/esp32/contribute/index.html) for more information.
|
||||
|
||||
55
CONTRIBUTING.rst
Normal file
55
CONTRIBUTING.rst
Normal file
@@ -0,0 +1,55 @@
|
||||
Contributions Guide
|
||||
===================
|
||||
|
||||
We welcome contributions to the esp-idf project!
|
||||
|
||||
How to Contribute
|
||||
-----------------
|
||||
|
||||
Contributions to esp-idf - fixing bugs, adding features, adding documentation - are welcome. We accept contributions via `Github Pull Requests <https://help.github.com/articles/about-pull-requests/>`_.
|
||||
|
||||
Before Contributing
|
||||
-------------------
|
||||
|
||||
Before sending us a Pull Request, please consider this list of points:
|
||||
|
||||
* Is the contribution entirely your own work, or already licensed under an Apache License 2.0 compatible Open Source License? If not then we unfortunately cannot accept it.
|
||||
|
||||
* Does any new code conform to the esp-idf :doc:`Style Guide <style-guide>`?
|
||||
|
||||
* Does the code documentation follow requirements in :doc:`documenting-code`?
|
||||
|
||||
* Is the code adequately commented for people to understand how it is structured?
|
||||
|
||||
* Is there documentation or examples that go with code contributions? There are additional suggestions for writing good examples in :idf:`examples` readme.
|
||||
|
||||
* Are comments and documentation written in clear English, with no spelling or grammar errors?
|
||||
|
||||
* If the contribution contains multiple commits, are they grouped together into logical changes (one major change per pull request)? Are any commits with names like "fixed typo" `squashed into previous commits <http://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit/>`_?
|
||||
|
||||
* If you're unsure about any of these points, please open the Pull Request anyhow and then ask us for feedback.
|
||||
|
||||
Pull Request Process
|
||||
--------------------
|
||||
|
||||
After you open the Pull Request, there will probably be some discussion in the comments field of the request itself.
|
||||
|
||||
Once the Pull Request is ready to merge, it will first be merged into our internal git system for in-house automated testing.
|
||||
|
||||
If this process passes, it will be merged onto the public github repository.
|
||||
|
||||
Legal Part
|
||||
----------
|
||||
|
||||
Before a contribution can be accepted, you will need to sign our :doc:`contributor-agreement`. You will be prompted for this automatically as part of the Pull Request process.
|
||||
|
||||
Related Documents
|
||||
-----------------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
style-guide
|
||||
documenting-code
|
||||
../api-reference/template
|
||||
contributor-agreement
|
||||
738
Kconfig
738
Kconfig
@@ -1,593 +1,155 @@
|
||||
#
|
||||
# Please run the following command for opening a page with more information about this configuration file:
|
||||
# idf.py docs -sp api-reference/kconfig.html
|
||||
# For a description of the syntax of this configuration file,
|
||||
# see kconfig/kconfig-language.txt.
|
||||
#
|
||||
mainmenu "Espressif IoT Development Framework Configuration"
|
||||
|
||||
orsource "./components/soc/$IDF_TARGET/include/soc/Kconfig.soc_caps.in"
|
||||
|
||||
config IDF_CMAKE
|
||||
bool
|
||||
default "y"
|
||||
|
||||
config IDF_ENV_FPGA
|
||||
bool
|
||||
option env="IDF_ENV_FPGA"
|
||||
help
|
||||
- This option is for internal use only.
|
||||
- Enabling this option will help enable all FPGA support so as to
|
||||
run ESP-IDF on an FPGA. This can help reproduce some issues that
|
||||
only happens on FPGA condition, or when you have to burn some
|
||||
efuses multiple times.
|
||||
|
||||
config IDF_ENV_BRINGUP
|
||||
bool
|
||||
default "y" if IDF_TARGET_ESP32P4
|
||||
help
|
||||
- This option is ONLY used when doing new chip bringup.
|
||||
- This option will only enable necessary hw / sw settings for running
|
||||
a hello_world application.
|
||||
|
||||
|
||||
config IDF_CI_BUILD
|
||||
bool
|
||||
default y if "$(IDF_CI_BUILD)" = "y" || "$(IDF_CI_BUILD)" = 1
|
||||
|
||||
config IDF_DOC_BUILD
|
||||
bool
|
||||
default y if "$(IDF_DOC_BUILD)" = "y" || "$(IDF_DOC_BUILD)" = 1
|
||||
|
||||
config IDF_TOOLCHAIN
|
||||
# This option records the IDF target when sdkconfig is generated the first time.
|
||||
# It is not updated if environment variable $IDF_TOOLCHAIN changes later, and
|
||||
# the build system is responsible for detecting the mismatch between
|
||||
# CONFIG_IDF_TOOLCHAIN and $IDF_TOOLCHAIN.
|
||||
string
|
||||
default "$IDF_TOOLCHAIN"
|
||||
|
||||
config IDF_TOOLCHAIN_CLANG
|
||||
bool
|
||||
default "y" if IDF_TOOLCHAIN="clang"
|
||||
|
||||
config IDF_TARGET_ARCH_RISCV
|
||||
bool
|
||||
default "n"
|
||||
|
||||
config IDF_TARGET_ARCH_XTENSA
|
||||
bool
|
||||
default "n"
|
||||
|
||||
config IDF_TARGET_ARCH
|
||||
string
|
||||
default "riscv" if IDF_TARGET_ARCH_RISCV
|
||||
default "xtensa" if IDF_TARGET_ARCH_XTENSA
|
||||
|
||||
config IDF_TARGET
|
||||
# This option records the IDF target when sdkconfig is generated the first time.
|
||||
# It is not updated if environment variable $IDF_TARGET changes later, and
|
||||
# the build system is responsible for detecting the mismatch between
|
||||
# CONFIG_IDF_TARGET and $IDF_TARGET.
|
||||
string
|
||||
default "$IDF_TARGET"
|
||||
|
||||
config IDF_INIT_VERSION
|
||||
# This option records the IDF version when sdkconfig is generated the first time.
|
||||
# It is not updated if environment variable $IDF_VERSION changes later
|
||||
string
|
||||
default "$IDF_INIT_VERSION"
|
||||
|
||||
config IDF_TARGET_LINUX
|
||||
bool
|
||||
default "y" if IDF_TARGET="linux"
|
||||
|
||||
config IDF_TARGET_ESP32
|
||||
bool
|
||||
default "y" if IDF_TARGET="esp32"
|
||||
select IDF_TARGET_ARCH_XTENSA
|
||||
|
||||
config IDF_TARGET_ESP32S2
|
||||
bool
|
||||
default "y" if IDF_TARGET="esp32s2"
|
||||
select FREERTOS_UNICORE
|
||||
select IDF_TARGET_ARCH_XTENSA
|
||||
|
||||
config IDF_TARGET_ESP32S3
|
||||
bool
|
||||
default "y" if IDF_TARGET="esp32s3"
|
||||
select IDF_TARGET_ARCH_XTENSA
|
||||
|
||||
config IDF_TARGET_ESP32C3
|
||||
bool
|
||||
default "y" if IDF_TARGET="esp32c3"
|
||||
select FREERTOS_UNICORE
|
||||
select IDF_TARGET_ARCH_RISCV
|
||||
|
||||
config IDF_TARGET_ESP32C2
|
||||
bool
|
||||
default "y" if IDF_TARGET="esp32c2"
|
||||
select FREERTOS_UNICORE
|
||||
select IDF_TARGET_ARCH_RISCV
|
||||
|
||||
config IDF_TARGET_ESP32C6
|
||||
bool
|
||||
default "y" if IDF_TARGET="esp32c6"
|
||||
select FREERTOS_UNICORE
|
||||
select IDF_TARGET_ARCH_RISCV
|
||||
|
||||
config IDF_TARGET_ESP32P4
|
||||
bool
|
||||
default "y" if IDF_TARGET="esp32p4"
|
||||
select IDF_TARGET_ARCH_RISCV
|
||||
|
||||
config IDF_TARGET_ESP32H2
|
||||
bool
|
||||
default "y" if IDF_TARGET="esp32h2"
|
||||
select FREERTOS_UNICORE
|
||||
select IDF_TARGET_ARCH_RISCV
|
||||
|
||||
config IDF_TARGET_LINUX
|
||||
bool
|
||||
default "y" if IDF_TARGET="linux"
|
||||
|
||||
config IDF_FIRMWARE_CHIP_ID
|
||||
hex
|
||||
default 0x0000 if IDF_TARGET_ESP32
|
||||
default 0x0002 if IDF_TARGET_ESP32S2
|
||||
default 0x0005 if IDF_TARGET_ESP32C3
|
||||
default 0x0009 if IDF_TARGET_ESP32S3
|
||||
default 0x000C if IDF_TARGET_ESP32C2
|
||||
default 0x000D if IDF_TARGET_ESP32C6
|
||||
default 0x0010 if IDF_TARGET_ESP32H2
|
||||
default 0x0012 if IDF_TARGET_ESP32P4
|
||||
default 0xFFFF
|
||||
|
||||
|
||||
menu "Build type"
|
||||
|
||||
choice APP_BUILD_TYPE
|
||||
prompt "Application build type"
|
||||
default APP_BUILD_TYPE_APP_2NDBOOT
|
||||
help
|
||||
Select the way the application is built.
|
||||
|
||||
By default, the application is built as a binary file in a format compatible with
|
||||
the ESP-IDF bootloader. In addition to this application, 2nd stage bootloader is
|
||||
also built. Application and bootloader binaries can be written into flash and
|
||||
loaded/executed from there.
|
||||
|
||||
Another option, useful for only very small and limited applications, is to only link
|
||||
the .elf file of the application, such that it can be loaded directly into RAM over
|
||||
JTAG or UART. Note that since IRAM and DRAM sizes are very limited, it is not possible
|
||||
to build any complex application this way. However for some kinds of testing and debugging,
|
||||
this option may provide faster iterations, since the application does not need to be
|
||||
written into flash.
|
||||
|
||||
Note: when APP_BUILD_TYPE_RAM is selected and loaded with JTAG, ESP-IDF does not contain
|
||||
all the startup code required to initialize the CPUs and ROM memory (data/bss).
|
||||
Therefore it is necessary to execute a bit of ROM code prior to executing the application.
|
||||
A gdbinit file may look as follows (for ESP32):
|
||||
|
||||
# Connect to a running instance of OpenOCD
|
||||
target remote :3333
|
||||
# Reset and halt the target
|
||||
mon reset halt
|
||||
# Run to a specific point in ROM code,
|
||||
# where most of initialization is complete.
|
||||
thb *0x40007d54
|
||||
c
|
||||
# Load the application into RAM
|
||||
load
|
||||
# Run till app_main
|
||||
tb app_main
|
||||
c
|
||||
|
||||
Execute this gdbinit file as follows:
|
||||
|
||||
xtensa-esp32-elf-gdb build/app-name.elf -x gdbinit
|
||||
|
||||
Example gdbinit files for other targets can be found in tools/test_apps/system/gdb_loadable_elf/
|
||||
|
||||
When loading the BIN with UART, the ROM will jump to ram and run the app after finishing the ROM
|
||||
startup code, so there's no additional startup initialization required. You can use the
|
||||
`load_ram` in esptool.py to load the generated .bin file into ram and execute.
|
||||
|
||||
Example:
|
||||
esptool.py --chip {chip} -p {port} -b {baud} --no-stub load_ram {app.bin}
|
||||
|
||||
Recommended sdkconfig.defaults for building loadable ELF files is as follows.
|
||||
CONFIG_APP_BUILD_TYPE_RAM is required, other options help reduce application
|
||||
memory footprint.
|
||||
|
||||
CONFIG_APP_BUILD_TYPE_RAM=y
|
||||
CONFIG_VFS_SUPPORT_TERMIOS=
|
||||
CONFIG_NEWLIB_NANO_FORMAT=y
|
||||
CONFIG_ESP_SYSTEM_PANIC_PRINT_HALT=y
|
||||
CONFIG_ESP_DEBUG_STUBS_ENABLE=
|
||||
CONFIG_ESP_ERR_TO_NAME_LOOKUP=
|
||||
|
||||
|
||||
config APP_BUILD_TYPE_APP_2NDBOOT
|
||||
bool
|
||||
prompt "Default (binary application + 2nd stage bootloader)"
|
||||
depends on !IDF_TARGET_LINUX
|
||||
select APP_BUILD_GENERATE_BINARIES
|
||||
select APP_BUILD_BOOTLOADER
|
||||
select APP_BUILD_USE_FLASH_SECTIONS
|
||||
|
||||
config APP_BUILD_TYPE_RAM
|
||||
bool
|
||||
prompt "Build app runs entirely in RAM (EXPERIMENTAL)"
|
||||
select APP_BUILD_GENERATE_BINARIES
|
||||
|
||||
endchoice # APP_BUILD_TYPE
|
||||
|
||||
# Hidden options, set according to the choice above
|
||||
config APP_BUILD_GENERATE_BINARIES
|
||||
bool # Whether to generate .bin files or not
|
||||
|
||||
config APP_BUILD_BOOTLOADER
|
||||
bool # Whether to build the bootloader
|
||||
|
||||
config APP_BUILD_TYPE_PURE_RAM_APP
|
||||
bool
|
||||
prompt "Build app without SPI_FLASH/PSRAM support (saves ram)"
|
||||
depends on APP_BUILD_TYPE_RAM
|
||||
help
|
||||
If this option is enabled, external memory and related peripherals, such as Cache, MMU,
|
||||
Flash and PSRAM, won't be initialized. Corresponding drivers won't be introduced either.
|
||||
Components that depend on the spi_flash component will also be unavailable, such as
|
||||
app_update, etc. When this option is enabled, about 26KB of RAM space can be saved.
|
||||
|
||||
config APP_BUILD_USE_FLASH_SECTIONS
|
||||
bool # Whether to place code/data into memory-mapped flash sections
|
||||
|
||||
config APP_REPRODUCIBLE_BUILD
|
||||
bool "Enable reproducible build"
|
||||
default n
|
||||
select COMPILER_HIDE_PATHS_MACROS
|
||||
help
|
||||
If enabled, all date, time, and path information would be eliminated. A .gdbinit file would be create
|
||||
automatically. (or will be append if you have one already)
|
||||
|
||||
config APP_NO_BLOBS
|
||||
bool "No Binary Blobs"
|
||||
default n
|
||||
help
|
||||
If enabled, this disables the linking of binary libraries in the application build. Note
|
||||
that after enabling this Wi-Fi/Bluetooth will not work.
|
||||
|
||||
config APP_COMPATIBLE_PRE_V2_1_BOOTLOADERS
|
||||
bool "App compatible with bootloaders before ESP-IDF v2.1"
|
||||
select APP_COMPATIBLE_PRE_V3_1_BOOTLOADERS
|
||||
depends on IDF_TARGET_ESP32
|
||||
default n
|
||||
help
|
||||
Bootloaders before ESP-IDF v2.1 did less initialisation of the
|
||||
system clock. This setting needs to be enabled to build an app
|
||||
which can be booted by these older bootloaders.
|
||||
|
||||
If this setting is enabled, the app can be booted by any bootloader
|
||||
from IDF v1.0 up to the current version.
|
||||
|
||||
If this setting is disabled, the app can only be booted by bootloaders
|
||||
from IDF v2.1 or newer.
|
||||
|
||||
Enabling this setting adds approximately 1KB to the app's IRAM usage.
|
||||
|
||||
config APP_COMPATIBLE_PRE_V3_1_BOOTLOADERS
|
||||
bool "App compatible with bootloader and partition table before ESP-IDF v3.1"
|
||||
depends on IDF_TARGET_ESP32
|
||||
default n
|
||||
help
|
||||
Partition tables before ESP-IDF V3.1 do not contain an MD5 checksum
|
||||
field, and the bootloader before ESP-IDF v3.1 cannot read a partition
|
||||
table that contains an MD5 checksum field.
|
||||
|
||||
Enable this option only if your app needs to boot on a bootloader and/or
|
||||
partition table that was generated from a version *before* ESP-IDF v3.1.
|
||||
|
||||
If this option and Flash Encryption are enabled at the same time, and any
|
||||
data partitions in the partition table are marked Encrypted, then the
|
||||
partition encrypted flag should be manually verified in the app before accessing
|
||||
the partition (see CVE-2021-27926).
|
||||
|
||||
config APP_INIT_CLK
|
||||
bool
|
||||
depends on IDF_TARGET_ESP32
|
||||
default y if APP_COMPATIBLE_PRE_V2_1_BOOTLOADERS
|
||||
default y if APP_BUILD_TYPE_RAM
|
||||
|
||||
|
||||
endmenu # Build type
|
||||
|
||||
source "$COMPONENT_KCONFIGS_PROJBUILD_SOURCE_FILE"
|
||||
|
||||
menu "Compiler options"
|
||||
|
||||
choice COMPILER_OPTIMIZATION
|
||||
prompt "Optimization Level"
|
||||
default COMPILER_OPTIMIZATION_DEBUG
|
||||
help
|
||||
This option sets compiler optimization level (gcc -O argument) for the app.
|
||||
|
||||
- The "Debug" setting will add the -0g flag to CFLAGS.
|
||||
- The "Size" setting will add the -0s flag to CFLAGS.
|
||||
- The "Performance" setting will add the -O2 flag to CFLAGS.
|
||||
- The "None" setting will add the -O0 flag to CFLAGS.
|
||||
|
||||
The "Size" setting cause the compiled code to be smaller and faster, but
|
||||
may lead to difficulties of correlating code addresses to source file
|
||||
lines when debugging.
|
||||
|
||||
The "Performance" setting causes the compiled code to be larger and faster,
|
||||
but will be easier to correlated code addresses to source file lines.
|
||||
|
||||
"None" with -O0 produces compiled code without optimization.
|
||||
|
||||
Note that custom optimization levels may be unsupported.
|
||||
|
||||
Compiler optimization for the IDF bootloader is set separately,
|
||||
see the BOOTLOADER_COMPILER_OPTIMIZATION setting.
|
||||
|
||||
config COMPILER_OPTIMIZATION_DEBUG
|
||||
bool "Debug (-Og)"
|
||||
config COMPILER_OPTIMIZATION_SIZE
|
||||
bool "Optimize for size (-Os)"
|
||||
config COMPILER_OPTIMIZATION_PERF
|
||||
bool "Optimize for performance (-O2)"
|
||||
config COMPILER_OPTIMIZATION_NONE
|
||||
bool "Debug without optimization (-O0)"
|
||||
|
||||
endchoice
|
||||
|
||||
choice COMPILER_OPTIMIZATION_ASSERTION_LEVEL
|
||||
prompt "Assertion level"
|
||||
default COMPILER_OPTIMIZATION_ASSERTIONS_ENABLE
|
||||
help
|
||||
Assertions can be:
|
||||
|
||||
- Enabled. Failure will print verbose assertion details. This is the default.
|
||||
|
||||
- Set to "silent" to save code size (failed assertions will abort() but user
|
||||
needs to use the aborting address to find the line number with the failed assertion.)
|
||||
|
||||
- Disabled entirely (not recommended for most configurations.) -DNDEBUG is added
|
||||
to CPPFLAGS in this case.
|
||||
|
||||
config COMPILER_OPTIMIZATION_ASSERTIONS_ENABLE
|
||||
prompt "Enabled"
|
||||
bool
|
||||
help
|
||||
Enable assertions. Assertion content and line number will be printed on failure.
|
||||
|
||||
config COMPILER_OPTIMIZATION_ASSERTIONS_SILENT
|
||||
prompt "Silent (saves code size)"
|
||||
bool
|
||||
help
|
||||
Enable silent assertions. Failed assertions will abort(), user needs to
|
||||
use the aborting address to find the line number with the failed assertion.
|
||||
|
||||
config COMPILER_OPTIMIZATION_ASSERTIONS_DISABLE
|
||||
prompt "Disabled (sets -DNDEBUG)"
|
||||
bool
|
||||
help
|
||||
If assertions are disabled, -DNDEBUG is added to CPPFLAGS.
|
||||
|
||||
endchoice # assertions
|
||||
|
||||
choice COMPILER_FLOAT_LIB_FROM
|
||||
prompt "Compiler float lib source"
|
||||
default COMPILER_FLOAT_LIB_FROM_RVFPLIB if ESP_ROM_HAS_RVFPLIB
|
||||
default COMPILER_FLOAT_LIB_FROM_GCCLIB
|
||||
help
|
||||
In the soft-fp part of libgcc, riscv version is written in C,
|
||||
and handles all edge cases in IEEE754, which makes it larger
|
||||
and performance is slow.
|
||||
|
||||
RVfplib is an optimized RISC-V library for FP arithmetic on 32-bit
|
||||
integer processors, for single and double-precision FP.
|
||||
RVfplib is "fast", but it has a few exceptions from IEEE 754 compliance.
|
||||
|
||||
config COMPILER_FLOAT_LIB_FROM_GCCLIB
|
||||
bool "libgcc"
|
||||
config COMPILER_FLOAT_LIB_FROM_RVFPLIB
|
||||
depends on ESP_ROM_HAS_RVFPLIB
|
||||
bool "librvfp"
|
||||
endchoice # COMPILER_FLOAT_LIB_FROM
|
||||
|
||||
config COMPILER_OPTIMIZATION_ASSERTION_LEVEL
|
||||
int
|
||||
default 0 if COMPILER_OPTIMIZATION_ASSERTIONS_DISABLE
|
||||
default 1 if COMPILER_OPTIMIZATION_ASSERTIONS_SILENT
|
||||
default 2 if COMPILER_OPTIMIZATION_ASSERTIONS_ENABLE
|
||||
|
||||
config COMPILER_OPTIMIZATION_CHECKS_SILENT
|
||||
bool "Disable messages in ESP_RETURN_ON_* and ESP_EXIT_ON_* macros"
|
||||
default n
|
||||
help
|
||||
If enabled, the error messages will be discarded in following check macros:
|
||||
- ESP_RETURN_ON_ERROR
|
||||
- ESP_EXIT_ON_ERROR
|
||||
- ESP_RETURN_ON_FALSE
|
||||
- ESP_EXIT_ON_FALSE
|
||||
|
||||
menuconfig COMPILER_HIDE_PATHS_MACROS
|
||||
bool "Replace ESP-IDF and project paths in binaries"
|
||||
default y
|
||||
help
|
||||
When expanding the __FILE__ and __BASE_FILE__ macros, replace paths inside ESP-IDF
|
||||
with paths relative to the placeholder string "IDF", and convert paths inside the
|
||||
project directory to relative paths.
|
||||
|
||||
This allows building the project with assertions or other code that embeds file paths,
|
||||
without the binary containing the exact path to the IDF or project directories.
|
||||
|
||||
This option passes -fmacro-prefix-map options to the GCC command line. To replace additional
|
||||
paths in your binaries, modify the project CMakeLists.txt file to pass custom -fmacro-prefix-map or
|
||||
-ffile-prefix-map arguments.
|
||||
|
||||
menuconfig COMPILER_CXX_EXCEPTIONS
|
||||
bool "Enable C++ exceptions"
|
||||
default n
|
||||
help
|
||||
Enabling this option compiles all IDF C++ files with exception support enabled.
|
||||
|
||||
Disabling this option disables C++ exception support in all compiled files, and any libstdc++ code
|
||||
which throws an exception will abort instead.
|
||||
|
||||
Enabling this option currently adds an additional ~500 bytes of heap overhead
|
||||
when an exception is thrown in user code for the first time.
|
||||
|
||||
config COMPILER_CXX_EXCEPTIONS_EMG_POOL_SIZE
|
||||
int "Emergency Pool Size"
|
||||
default 0
|
||||
depends on COMPILER_CXX_EXCEPTIONS
|
||||
help
|
||||
Size (in bytes) of the emergency memory pool for C++ exceptions. This pool will be used to allocate
|
||||
memory for thrown exceptions when there is not enough memory on the heap.
|
||||
|
||||
config COMPILER_CXX_RTTI
|
||||
bool "Enable C++ run-time type info (RTTI)"
|
||||
default n
|
||||
help
|
||||
Enabling this option compiles all C++ files with RTTI support enabled.
|
||||
This increases binary size (typically by tens of kB) but allows using
|
||||
dynamic_cast conversion and typeid operator.
|
||||
|
||||
choice COMPILER_STACK_CHECK_MODE
|
||||
prompt "Stack smashing protection mode"
|
||||
default COMPILER_STACK_CHECK_MODE_NONE
|
||||
help
|
||||
Stack smashing protection mode. Emit extra code to check for buffer overflows, such as stack
|
||||
smashing attacks. This is done by adding a guard variable to functions with vulnerable objects.
|
||||
The guards are initialized when a function is entered and then checked when the function exits.
|
||||
If a guard check fails, program is halted. Protection has the following modes:
|
||||
|
||||
- In NORMAL mode (GCC flag: -fstack-protector) only functions that call alloca, and functions with
|
||||
buffers larger than 8 bytes are protected.
|
||||
|
||||
- STRONG mode (GCC flag: -fstack-protector-strong) is like NORMAL, but includes additional functions
|
||||
to be protected -- those that have local array definitions, or have references to local frame
|
||||
addresses.
|
||||
|
||||
- In OVERALL mode (GCC flag: -fstack-protector-all) all functions are protected.
|
||||
|
||||
Modes have the following impact on code performance and coverage:
|
||||
|
||||
- performance: NORMAL > STRONG > OVERALL
|
||||
|
||||
- coverage: NORMAL < STRONG < OVERALL
|
||||
|
||||
The performance impact includes increasing the amount of stack memory required for each task.
|
||||
|
||||
config COMPILER_STACK_CHECK_MODE_NONE
|
||||
bool "None"
|
||||
config COMPILER_STACK_CHECK_MODE_NORM
|
||||
bool "Normal"
|
||||
config COMPILER_STACK_CHECK_MODE_STRONG
|
||||
bool "Strong"
|
||||
config COMPILER_STACK_CHECK_MODE_ALL
|
||||
bool "Overall"
|
||||
endchoice
|
||||
|
||||
config COMPILER_STACK_CHECK
|
||||
bool
|
||||
default !COMPILER_STACK_CHECK_MODE_NONE
|
||||
help
|
||||
Stack smashing protection.
|
||||
|
||||
config COMPILER_WARN_WRITE_STRINGS
|
||||
bool "Enable -Wwrite-strings warning flag"
|
||||
default "n"
|
||||
help
|
||||
Adds -Wwrite-strings flag for the C/C++ compilers.
|
||||
|
||||
For C, this gives string constants the type ``const char[]`` so that
|
||||
copying the address of one into a non-const ``char *`` pointer
|
||||
produces a warning. This warning helps to find at compile time code
|
||||
that tries to write into a string constant.
|
||||
|
||||
For C++, this warns about the deprecated conversion from string
|
||||
literals to ``char *``.
|
||||
|
||||
config COMPILER_SAVE_RESTORE_LIBCALLS
|
||||
bool "Enable -msave-restore flag to reduce code size"
|
||||
depends on IDF_TARGET_ARCH_RISCV
|
||||
help
|
||||
Adds -msave-restore to C/C++ compilation flags.
|
||||
|
||||
When this flag is enabled, compiler will call library functions to
|
||||
save/restore registers in function prologues/epilogues. This results
|
||||
in lower overall code size, at the expense of slightly reduced performance.
|
||||
|
||||
This option can be enabled for RISC-V targets only.
|
||||
|
||||
config COMPILER_DISABLE_GCC12_WARNINGS
|
||||
bool "Disable new warnings introduced in GCC 12"
|
||||
default "n"
|
||||
help
|
||||
Enable this option if use GCC 12 or newer, and want to disable warnings which don't appear with
|
||||
GCC 11.
|
||||
|
||||
config COMPILER_DISABLE_GCC13_WARNINGS
|
||||
bool "Disable new warnings introduced in GCC 13"
|
||||
default "n"
|
||||
help
|
||||
Enable this option if use GCC 13 or newer, and want to disable warnings which don't appear with
|
||||
GCC 12.
|
||||
|
||||
config COMPILER_DUMP_RTL_FILES
|
||||
bool "Dump RTL files during compilation"
|
||||
help
|
||||
If enabled, RTL files will be produced during compilation. These files
|
||||
can be used by other tools, for example to calculate call graphs.
|
||||
|
||||
choice COMPILER_RT_LIB
|
||||
prompt "Compiler runtime library"
|
||||
default COMPILER_RT_LIB_CLANGRT if IDF_TOOLCHAIN_CLANG
|
||||
default COMPILER_RT_LIB_HOST if IDF_TARGET_LINUX
|
||||
default COMPILER_RT_LIB_GCCLIB
|
||||
help
|
||||
Select runtime library to be used by compiler.
|
||||
- GCC toolchain supports libgcc only.
|
||||
- Clang allows to choose between libgcc or libclang_rt.
|
||||
- For host builds ("linux" target), uses the default library.
|
||||
|
||||
config COMPILER_RT_LIB_GCCLIB
|
||||
depends on !IDF_TARGET_LINUX
|
||||
bool "libgcc"
|
||||
config COMPILER_RT_LIB_CLANGRT
|
||||
depends on IDF_TOOLCHAIN_CLANG && !IDF_TARGET_LINUX
|
||||
bool "libclang_rt"
|
||||
config COMPILER_RT_LIB_HOST
|
||||
depends on IDF_TARGET_LINUX
|
||||
bool "Host"
|
||||
endchoice
|
||||
|
||||
config COMPILER_RT_LIB_NAME
|
||||
string
|
||||
default "clang_rt.builtins" if COMPILER_RT_LIB_CLANGRT
|
||||
default "gcc" if COMPILER_RT_LIB_GCCLIB
|
||||
default "" if COMPILER_RT_LIB_HOST
|
||||
|
||||
endmenu # Compiler Options
|
||||
|
||||
menu "Component config"
|
||||
source "$COMPONENT_KCONFIGS_SOURCE_FILE"
|
||||
endmenu
|
||||
|
||||
config IDF_EXPERIMENTAL_FEATURES
|
||||
bool "Make experimental features visible"
|
||||
default "n"
|
||||
help
|
||||
By enabling this option, ESP-IDF experimental feature options will be visible.
|
||||
|
||||
Note you should still enable a certain experimental feature option to use it, and you
|
||||
should read the corresponding risk warning and known issue list carefully.
|
||||
|
||||
Current experimental feature list:
|
||||
|
||||
- CONFIG_ESPTOOLPY_FLASHFREQ_120M && CONFIG_ESPTOOLPY_FLASH_SAMPLE_MODE_DTR
|
||||
- CONFIG_SPIRAM_SPEED_120M && CONFIG_SPIRAM_MODE_OCT
|
||||
- CONFIG_BOOTLOADER_CACHE_32BIT_ADDR_QUAD_FLASH
|
||||
- CONFIG_MBEDTLS_USE_CRYPTO_ROM_IMPL
|
||||
menu "SDK tool configuration"
|
||||
config TOOLPREFIX
|
||||
string "Compiler toolchain path/prefix"
|
||||
default "xtensa-esp32-elf-"
|
||||
help
|
||||
The prefix/path that is used to call the toolchain. The default setting assumes
|
||||
a crosstool-ng gcc setup that is in your PATH.
|
||||
|
||||
config PYTHON
|
||||
string "Python 2 interpreter"
|
||||
default "python"
|
||||
help
|
||||
The executable name/path that is used to run python. On some systems Python 2.x
|
||||
may need to be invoked as python2.
|
||||
|
||||
config MAKE_WARN_UNDEFINED_VARIABLES
|
||||
bool "'make' warns on undefined variables"
|
||||
default "y"
|
||||
help
|
||||
Adds --warn-undefined-variables to MAKEFLAGS. This causes make to
|
||||
print a warning any time an undefined variable is referenced.
|
||||
|
||||
This option helps find places where a variable reference is misspelled
|
||||
or otherwise missing, but it can be unwanted if you have Makefiles which
|
||||
depend on undefined variables expanding to an empty string.
|
||||
|
||||
endmenu # SDK tool configuration
|
||||
|
||||
source "$COMPONENT_KCONFIGS_PROJBUILD"
|
||||
|
||||
menu "Compiler options"
|
||||
|
||||
choice OPTIMIZATION_COMPILER
|
||||
prompt "Optimization Level"
|
||||
default OPTIMIZATION_LEVEL_DEBUG
|
||||
help
|
||||
This option sets compiler optimization level (gcc -O argument).
|
||||
|
||||
- for "Release" setting, -Os flag is added to CFLAGS.
|
||||
- for "Debug" setting, -Og flag is added to CFLAGS.
|
||||
|
||||
"Release" with -Os produces smaller & faster compiled code but it
|
||||
may be harder to correlated code addresses to source files when debugging.
|
||||
|
||||
To add custom optimization settings, set CFLAGS and/or CPPFLAGS
|
||||
in project makefile, before including $(IDF_PATH)/make/project.mk. Note that
|
||||
custom optimization levels may be unsupported.
|
||||
|
||||
config OPTIMIZATION_LEVEL_DEBUG
|
||||
bool "Debug (-Og)"
|
||||
config OPTIMIZATION_LEVEL_RELEASE
|
||||
bool "Release (-Os)"
|
||||
endchoice
|
||||
|
||||
choice OPTIMIZATION_ASSERTION_LEVEL
|
||||
prompt "Assertion level"
|
||||
default OPTIMIZATION_ASSERTIONS_ENABLED
|
||||
help
|
||||
Assertions can be:
|
||||
- Enabled. Failure will print verbose assertion details. This is the default.
|
||||
|
||||
- Set to "silent" to save code size (failed assertions will abort() but user
|
||||
needs to use the aborting address to find the line number with the failed assertion.)
|
||||
|
||||
- Disabled entirely (not recommended for most configurations.) -DNDEBUG is added
|
||||
to CPPFLAGS in this case.
|
||||
|
||||
config OPTIMIZATION_ASSERTIONS_ENABLED
|
||||
prompt "Enabled"
|
||||
bool
|
||||
help
|
||||
Enable assertions. Assertion content and line number will be printed on failure.
|
||||
|
||||
config OPTIMIZATION_ASSERTIONS_SILENT
|
||||
prompt "Silent (saves code size)"
|
||||
bool
|
||||
help
|
||||
Enable silent assertions. Failed assertions will abort(), user needs to
|
||||
use the aborting address to find the line number with the failed assertion.
|
||||
|
||||
config OPTIMIZATION_ASSERTIONS_DISABLED
|
||||
prompt "Disabled (sets -DNDEBUG)"
|
||||
bool
|
||||
help
|
||||
If assertions are disabled, -DNDEBUG is added to CPPFLAGS.
|
||||
|
||||
endchoice # assertions
|
||||
|
||||
menuconfig CXX_EXCEPTIONS
|
||||
bool "Enable C++ exceptions"
|
||||
default n
|
||||
help
|
||||
Enabling this option compiles all IDF C++ files with exception support enabled.
|
||||
|
||||
Disabling this option disables C++ exception support in all compiled files, and any libstdc++ code which throws
|
||||
an exception will abort instead.
|
||||
|
||||
Enabling this option currently adds an additional ~500 bytes of heap overhead
|
||||
when an exception is thrown in user code for the first time.
|
||||
|
||||
config CXX_EXCEPTIONS_EMG_POOL_SIZE
|
||||
int "Emergency Pool Size"
|
||||
default 0
|
||||
depends on CXX_EXCEPTIONS
|
||||
help
|
||||
Size (in bytes) of the emergency memory pool for C++ exceptions. This pool will be used to allocate
|
||||
memory for thrown exceptions when there is not enough memory on the heap.
|
||||
|
||||
choice STACK_CHECK_MODE
|
||||
prompt "Stack smashing protection mode"
|
||||
default STACK_CHECK_NONE
|
||||
help
|
||||
Stack smashing protection mode. Emit extra code to check for buffer overflows, such as stack
|
||||
smashing attacks. This is done by adding a guard variable to functions with vulnerable objects.
|
||||
The guards are initialized when a function is entered and then checked when the function exits.
|
||||
If a guard check fails, program is halted. Protection has the following modes:
|
||||
- In NORMAL mode (GCC flag: -fstack-protector) only functions that call alloca, and functions with buffers larger than
|
||||
8 bytes are protected.
|
||||
- STRONG mode (GCC flag: -fstack-protector-strong) is like NORMAL, but includes additional functions to be protected -- those that
|
||||
have local array definitions, or have references to local frame addresses.
|
||||
- In OVERALL mode (GCC flag: -fstack-protector-all) all functions are protected.
|
||||
|
||||
Modes have the following impact on code performance and coverage:
|
||||
- performance: NORMAL > STRONG > OVERALL
|
||||
- coverage: NORMAL < STRONG < OVERALL
|
||||
|
||||
|
||||
config STACK_CHECK_NONE
|
||||
bool "None"
|
||||
config STACK_CHECK_NORM
|
||||
bool "Normal"
|
||||
config STACK_CHECK_STRONG
|
||||
bool "Strong"
|
||||
config STACK_CHECK_ALL
|
||||
bool "Overall"
|
||||
endchoice
|
||||
|
||||
config STACK_CHECK
|
||||
bool
|
||||
default !STACK_CHECK_NONE
|
||||
help
|
||||
Stack smashing protection.
|
||||
|
||||
endmenu # Compiler Options
|
||||
|
||||
menu "Component config"
|
||||
source "$COMPONENT_KCONFIGS"
|
||||
endmenu
|
||||
|
||||
141
README.md
141
README.md
@@ -1,130 +1,117 @@
|
||||
# Espressif IoT Development Framework
|
||||
|
||||
* [中文版](./README_CN.md)
|
||||
[](https://esp-idf.readthedocs.io/en/latest/?badge=latest)
|
||||
|
||||
ESP-IDF is the development framework for Espressif SoCs supported on Windows, Linux and macOS.
|
||||
ESP-IDF is the official development framework for the [ESP32](https://espressif.com/en/products/hardware/esp32/overview) chip.
|
||||
|
||||
# ESP-IDF Release Support Schedule
|
||||
|
||||

|
||||
|
||||
- Please read [the support policy](SUPPORT_POLICY.md) and [the documentation](https://docs.espressif.com/projects/esp-idf/en/latest/esp32/versions.html) for more information about ESP-IDF versions.
|
||||
- Please see the [End-of-Life Advisories](https://www.espressif.com/en/support/documents/advisories?keys=&field_type_of_advisory_tid%5B%5D=817) for information about ESP-IDF releases with discontinued support.
|
||||
|
||||
# ESP-IDF Release and SoC Compatibility
|
||||
|
||||
The following table shows ESP-IDF support of Espressif SoCs where ![alt text][preview] and ![alt text][supported] denote preview status and support, respectively. The preview support is usually limited in time and intended for beta versions of chips. Please use an ESP-IDF release where the desired SoC is already supported.
|
||||
|
||||
|Chip | v4.3 | v4.4 | v5.0 | v5.1 | v5.2 | |
|
||||
|:----------- | :---------------------:| :---------------------:| :---------------------:| :--------------------: | :--------------------: | :----------------------------------------------------------|
|
||||
|ESP32 | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | |
|
||||
|ESP32-S2 | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | |
|
||||
|ESP32-C3 | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | |
|
||||
|ESP32-S3 | | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | [Announcement](https://www.espressif.com/en/news/ESP32_S3) |
|
||||
|ESP32-C2 | | | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | [Announcement](https://www.espressif.com/en/news/ESP32-C2) |
|
||||
|ESP32-C6 | | | | ![alt text][supported] | ![alt text][supported] | [Announcement](https://www.espressif.com/en/news/ESP32_C6) |
|
||||
|ESP32-H2 | | | | ![alt text][supported] | ![alt text][supported] | [Announcement](https://www.espressif.com/en/news/ESP32_H2) |
|
||||
|ESP32-P4 | | | | | ![alt text][preview] | [Announcement](https://www.espressif.com/en/news/ESP32-P4) |
|
||||
|
||||
[supported]: https://img.shields.io/badge/-supported-green "supported"
|
||||
[preview]: https://img.shields.io/badge/-preview-orange "preview"
|
||||
|
||||
There are variants of revisions for a series of chips. See [Compatibility Between ESP-IDF Releases and Revisions of Espressif SoCs](https://github.com/espressif/esp-idf/blob/master/COMPATIBILITY.md) for the details of the compatibility between ESP-IDF and chip revisions.
|
||||
|
||||
Espressif SoCs released before 2016 (ESP8266 and ESP8285) are supported by [RTOS SDK](https://github.com/espressif/ESP8266_RTOS_SDK) instead.
|
||||
|
||||
# Developing With ESP-IDF
|
||||
# Developing With the ESP-IDF
|
||||
|
||||
## Setting Up ESP-IDF
|
||||
|
||||
See https://idf.espressif.com/ for links to detailed instructions on how to set up the ESP-IDF depending on chip you use.
|
||||
See setup guides for detailed instructions to set up the ESP-IDF:
|
||||
|
||||
**Note:** Each SoC series and each ESP-IDF release has its own documentation. Please see Section [Versions](https://docs.espressif.com/projects/esp-idf/en/latest/esp32/versions.html) on how to find documentation and how to checkout specific release of ESP-IDF.
|
||||
|
||||
### Non-GitHub forks
|
||||
|
||||
ESP-IDF uses relative locations as its submodules URLs ([.gitmodules](.gitmodules)). So they link to GitHub. If ESP-IDF is forked to a Git repository which is not on GitHub, you will need to run the script [tools/set-submodules-to-github.sh](tools/set-submodules-to-github.sh) after git clone.
|
||||
|
||||
The script sets absolute URLs for all submodules, allowing `git submodule update --init --recursive` to complete. If cloning ESP-IDF from GitHub, this step is not needed.
|
||||
* [Windows Setup Guide](https://esp-idf.readthedocs.io/en/latest/get-started/windows-setup.html)
|
||||
* [Mac OS Setup Guide](https://esp-idf.readthedocs.io/en/latest/get-started/macos-setup.html)
|
||||
* [Linux Setup Guide](https://esp-idf.readthedocs.io/en/latest/get-started/linux-setup.html)
|
||||
|
||||
## Finding a Project
|
||||
|
||||
As well as the [esp-idf-template](https://github.com/espressif/esp-idf-template) project mentioned in Getting Started, ESP-IDF comes with some example projects in the [examples](examples) directory.
|
||||
As well as the [esp-idf-template](https://github.com/espressif/esp-idf-template) project mentioned in the setup guide, ESP-IDF comes with some example projects in the [examples](examples) directory.
|
||||
|
||||
Once you've found the project you want to work with, change to its directory and you can configure and build it.
|
||||
|
||||
To start your own project based on an example, copy the example project directory outside of the ESP-IDF directory.
|
||||
|
||||
# Quick Reference
|
||||
|
||||
See the Getting Started guide links above for a detailed setup guide. This is a quick reference for common commands when working with ESP-IDF projects:
|
||||
|
||||
## Setup Build Environment
|
||||
|
||||
(See the Getting Started guide listed above for a full list of required steps with more details.)
|
||||
|
||||
* Install host build dependencies mentioned in the Getting Started guide.
|
||||
* Run the install script to set up the build environment. The options include `install.bat` or `install.ps1` for Windows, and `install.sh` or `install.fish` for Unix shells.
|
||||
* Run the export script on Windows (`export.bat`) or source it on Unix (`source export.sh`) in every shell environment before using ESP-IDF.
|
||||
|
||||
## Configuring the Project
|
||||
|
||||
* `idf.py set-target <chip_name>` sets the target of the project to `<chip_name>`. Run `idf.py set-target` without any arguments to see a list of supported targets.
|
||||
* `idf.py menuconfig` opens a text-based configuration menu where you can configure the project.
|
||||
`make menuconfig`
|
||||
|
||||
* Opens a text-based configuration menu for the project.
|
||||
* Use up & down arrow keys to navigate the menu.
|
||||
* Use Enter key to go into a submenu, Escape key to go out or to exit.
|
||||
* Type `?` to see a help screen. Enter key exits the help screen.
|
||||
* Use Space key, or `Y` and `N` keys to enable (Yes) and disable (No) configuration items with checkboxes "`[*]`"
|
||||
* Pressing `?` while highlighting a configuration item displays help about that item.
|
||||
* Type `/` to search the configuration items.
|
||||
|
||||
Once done configuring, press Escape multiple times to exit and say "Yes" to save the new configuration when prompted.
|
||||
|
||||
## Compiling the Project
|
||||
|
||||
`idf.py build`
|
||||
`make all`
|
||||
|
||||
... will compile app, bootloader and generate a partition table based on the config.
|
||||
|
||||
## Flashing the Project
|
||||
|
||||
When the build finishes, it will print a command line to use esptool.py to flash the chip. However you can also do this automatically by running:
|
||||
When `make all` finishes, it will print a command line to use esptool.py to flash the chip. However you can also do this from make by running:
|
||||
|
||||
`idf.py -p PORT flash`
|
||||
`make flash`
|
||||
|
||||
Replace PORT with the name of your serial port (like `COM3` on Windows, `/dev/ttyUSB0` on Linux, or `/dev/cu.usbserial-X` on MacOS. If the `-p` option is left out, `idf.py flash` will try to flash the first available serial port.
|
||||
This will flash the entire project (app, bootloader and partition table) to a new chip. The settings for serial port flashing can be configured with `make menuconfig`.
|
||||
|
||||
This will flash the entire project (app, bootloader and partition table) to a new chip. The settings for serial port flashing can be configured with `idf.py menuconfig`.
|
||||
|
||||
You don't need to run `idf.py build` before running `idf.py flash`, `idf.py flash` will automatically rebuild anything which needs it.
|
||||
You don't need to run `make all` before running `make flash`, `make flash` will automatically rebuild anything which needs it.
|
||||
|
||||
## Viewing Serial Output
|
||||
|
||||
The `idf.py monitor` target uses the [esp-idf-monitor tool](https://github.com/espressif/esp-idf-monitor) to display serial output from Espressif SoCs. esp-idf-monitor also has a range of features to decode crash output and interact with the device. [Check the documentation page for details](https://docs.espressif.com/projects/esp-idf/en/latest/get-started/idf-monitor.html).
|
||||
The `make monitor` target uses the [idf_monitor tool](https://esp-idf.readthedocs.io/en/latest/get-started/idf-monitor.html) to display serial output from the ESP32. idf_monitor also has a range of features to decode crash output and interact with the device. [Check the documentation page for details](https://esp-idf.readthedocs.io/en/latest/get-started/idf-monitor.html).
|
||||
|
||||
Exit the monitor by typing Ctrl-].
|
||||
|
||||
To build, flash and monitor output in one pass, you can run:
|
||||
To flash and monitor output in one pass, you can run:
|
||||
|
||||
`idf.py flash monitor`
|
||||
`make flash monitor`
|
||||
|
||||
## Compiling & Flashing Only the App
|
||||
## Compiling & Flashing Just the App
|
||||
|
||||
After the initial flash, you may just want to build and flash just your app, not the bootloader and partition table:
|
||||
|
||||
* `idf.py app` - build just the app.
|
||||
* `idf.py app-flash` - flash just the app.
|
||||
* `make app` - build just the app.
|
||||
* `make app-flash` - flash just the app.
|
||||
|
||||
`idf.py app-flash` will automatically rebuild the app if any source files have changed.
|
||||
`make app-flash` will automatically rebuild the app if it needs it.
|
||||
|
||||
(In normal development there's no downside to reflashing the bootloader and partition table each time, if they haven't changed.)
|
||||
|
||||
## Parallel Builds
|
||||
|
||||
ESP-IDF supports compiling multiple files in parallel, so all of the above commands can be run as `make -jN` where `N` is the number of parallel make processes to run (generally N should be equal to or one more than the number of CPU cores in your system.)
|
||||
|
||||
Multiple make functions can be combined into one. For example: to build the app & bootloader using 5 jobs in parallel, then flash everything, and then display serial output from the ESP32 run:
|
||||
|
||||
```
|
||||
make -j5 flash monitor
|
||||
```
|
||||
|
||||
## The Partition Table
|
||||
|
||||
Once you've compiled your project, the "build" directory will contain a binary file with a name like "my_app.bin". This is an ESP32 image binary that can be loaded by the bootloader.
|
||||
|
||||
A single ESP32's flash can contain multiple apps, as well as many different kinds of data (calibration data, filesystems, parameter storage, etc). For this reason a partition table is flashed to offset 0x8000 in the flash.
|
||||
|
||||
Each entry in the partition table has a name (label), type (app, data, or something else), subtype and the offset in flash where the partition is loaded.
|
||||
|
||||
The simplest way to use the partition table is to `make menuconfig` and choose one of the simple predefined partition tables:
|
||||
|
||||
* "Single factory app, no OTA"
|
||||
* "Factory app, two OTA definitions"
|
||||
|
||||
In both cases the factory app is flashed at offset 0x10000. If you `make partition_table` then it will print a summary of the partition table.
|
||||
|
||||
For more details about partition tables and how to create custom variations, view the [`docs/api-guides/partition-tables.rst`](docs/api-guides/partition-tables.rst) file.
|
||||
|
||||
## Erasing Flash
|
||||
|
||||
The `idf.py flash` target does not erase the entire flash contents. However it is sometimes useful to set the device back to a totally erased state, particularly when making partition table changes or OTA app updates. To erase the entire flash, run `idf.py erase-flash`.
|
||||
The `make flash` target does not erase the entire flash contents. However it is sometimes useful to set the device back to a totally erased state, particularly when making partition table changes or OTA app updates. To erase the entire flash, run `make erase_flash`.
|
||||
|
||||
This can be combined with other targets, ie `idf.py -p PORT erase-flash flash` will erase everything and then re-flash the new app, bootloader and partition table.
|
||||
This can be combined with other targets, ie `make erase_flash flash` will erase everything and then re-flash the new app, bootloader and partition table.
|
||||
|
||||
# Resources
|
||||
|
||||
* Documentation for the latest version: https://docs.espressif.com/projects/esp-idf/. This documentation is built from the [docs directory](docs) of this repository.
|
||||
|
||||
* [Beginner's Guide to Key Concepts and Resources of ESP-IDF](https://youtu.be/J8zc8mMNKtc?feature=shared)
|
||||
* Documentation for the latest version: https://esp-idf.readthedocs.io/. This documentation is built from the [docs directory](docs) of this repository.
|
||||
|
||||
* The [esp32.com forum](https://esp32.com/) is a place to ask questions and find community resources.
|
||||
|
||||
* [Check the Issues section on github](https://github.com/espressif/esp-idf/issues) if you find a bug or have a feature request. Please check existing Issues before opening a new one.
|
||||
|
||||
* If you're interested in contributing to ESP-IDF, please check the [Contributions Guide](https://docs.espressif.com/projects/esp-idf/en/latest/contribute/index.html).
|
||||
* If you're interested in contributing to ESP-IDF, please check the [Contributions Guide](https://esp-idf.readthedocs.io/en/latest/contribute/index.html).
|
||||
|
||||
|
||||
|
||||
130
README_CN.md
130
README_CN.md
@@ -1,130 +0,0 @@
|
||||
# Espressif 物联网开发框架
|
||||
|
||||
* [English Version](./README.md)
|
||||
|
||||
ESP-IDF 是乐鑫官方推出的物联网开发框架,支持 Windows、Linux 和 macOS 操作系统。
|
||||
|
||||
# ESP-IDF 版本支持期限
|
||||
|
||||

|
||||
|
||||
- 请参考 [ESP-IDF 支持政策](SUPPORT_POLICY_CN.md) 以及 [相关文档](https://docs.espressif.com/projects/esp-idf/zh_CN/latest/esp32/versions.html) 了解更多关于 ESP-IDF 版本的信息。
|
||||
- 请参考 [ESP-IDF 版本停止维护 (EOL) 公告](https://www.espressif.com/zh-hans/support/documents/advisories?keys=&field_type_of_advisory_tid%5B%5D=817)。
|
||||
|
||||
# ESP-IDF 与乐鑫芯片
|
||||
|
||||
下表总结了乐鑫芯片在 ESP-IDF 各版本中的支持状态,其中 ![alt text][supported] 代表已支持,![alt text][preview] 代表目前处于预览支持状态。预览支持状态通常有时间限制,而且仅适用于测试版芯片。请确保使用与芯片相匹配的 ESP-IDF 版本。
|
||||
|
||||
|芯片 | v4.3 | v4.4 | v5.0 | v5.1 | v5.2 | |
|
||||
|:----------- | :---------------------:| :---------------------:| :---------------------:| :--------------------: | :--------------------: | :-------------------------------------------------------------- |
|
||||
|ESP32 | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | |
|
||||
|ESP32-S2 | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | |
|
||||
|ESP32-C3 | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | |
|
||||
|ESP32-S3 | | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | [芯片发布公告](https://www.espressif.com/zh-hans/news/ESP32_S3) |
|
||||
|ESP32-C2 | | | ![alt text][supported] | ![alt text][supported] | ![alt text][supported] | [芯片发布公告](https://www.espressif.com/zh-hans/news/ESP32-C2) |
|
||||
|ESP32-C6 | | | | ![alt text][supported] | ![alt text][supported] | [芯片发布公告](https://www.espressif.com/zh-hans/news/ESP32_C6) |
|
||||
|ESP32-H2 | | | | ![alt text][supported] | ![alt text][supported] | [芯片发布公告](https://www.espressif.com/zh-hans/news/ESP32_H2) |
|
||||
|ESP32-P4 | | | | | ![alt text][preview] | [芯片发布公告](https://www.espressif.com/en/news/ESP32-P4) |
|
||||
|
||||
[supported]: https://img.shields.io/badge/-%E6%94%AF%E6%8C%81-green "supported"
|
||||
[preview]: https://img.shields.io/badge/-%E9%A2%84%E8%A7%88-orange "preview"
|
||||
|
||||
每款乐鑫芯片都可能有不同版本。建议参考 [ESP-IDF 版本与乐鑫芯片版本兼容性](https://github.com/espressif/esp-idf/blob/master/COMPATIBILITY_CN.md),了解 ESP-IDF 版本与各芯片版本之间的兼容性。
|
||||
|
||||
对于 2016 年之前发布的乐鑫芯片(包括 ESP8266 和 ESP8285),请参考 [RTOS SDK](https://github.com/espressif/ESP8266_RTOS_SDK)。
|
||||
|
||||
# 使用 ESP-IDF 进行开发
|
||||
|
||||
## 搭建 ESP-IDF 开发环境
|
||||
|
||||
关于不同芯片如何搭建 ESP-IDF 的开发环境,请参考 https://idf.espressif.com/ 。
|
||||
|
||||
**注意:** 不同系列芯片和不同 ESP-IDF 版本都有其对应的文档。请参阅[版本](https://docs.espressif.com/projects/esp-idf/zh_CN/latest/esp32/versions.html)部分,获得关于如何查找文档以及如何检出 ESP-IDF 的特定发行版的详细信息。
|
||||
|
||||
### 非 GitHub 分叉的 ESP-IDF 项目
|
||||
|
||||
ESP-IDF 中的子模块采用相对路径([详见 .gitmodules 文件](.gitmodules)),所以它们会指向 GitHub。 如果 ESP-IDF 被分叉到的仓库不在 GitHub 上,那么你需要在克隆结束后运行该脚本 [tools/set-submodules-to-github.sh](tools/set-submodules-to-github.sh)。
|
||||
|
||||
这个脚本会为所有的子模块设置绝对路径,接着可以通过 `git submodule update --init --recursive` 完成子模块的更新。如果 ESP-IDF 是从 GitHub 上克隆得到,则不需要此步骤。
|
||||
|
||||
## 寻找项目
|
||||
|
||||
除了入门指南中提到的 [esp-idf 模板项目](https://github.com/espressif/esp-idf-template),ESP-IDF 的 [examples](examples) 目录下还带有很多其它示例项目。
|
||||
|
||||
一旦找到了需要的项目,便可以进入该目录,执行配置和构建操作。
|
||||
|
||||
如果要基于示例工程开始你自己的项目,请将示例工程复制到 ESP-IDF 目录之外。
|
||||
|
||||
# 快速参考
|
||||
|
||||
详细的使用方法请参考上面入门指南的链接,这里仅仅列举一些 ESP-IDF 项目开发中常用的命令:
|
||||
|
||||
## 设置构建环境
|
||||
|
||||
请参考入门指南中列出的详细步骤。
|
||||
|
||||
* 在主机中安装入门指南中提到的构建所依赖的工具。
|
||||
* 运行安装脚本来设置构建环境。可为 Windows shell 选择 `install.bat` 或 `install.ps1`,为 Unix shell 选择 `install.sh` 和 `install.fish`。
|
||||
* 在使用 ESP-IDF 之前,需要在 shell 中运行导出脚本。Windows 下可运行 `export.bat`,Unix 下可运行 `source export.sh`。
|
||||
|
||||
## 配置项目
|
||||
|
||||
* `idf.py set-target <chip_name>` 可将项目的目标芯片设置为 `<chip_name>`。运行 `idf.py set-target`,不用带任何参数,可查看所有支持的目标芯片列表。
|
||||
* `idf.py menuconfig` 可打开一个基于文本的配置菜单,可以用来对项目进行配置。
|
||||
|
||||
## 编译项目
|
||||
|
||||
`idf.py build`
|
||||
|
||||
编译应用程序,引导程序,并根据配置生成分区表。
|
||||
|
||||
## 烧写项目
|
||||
|
||||
当构建结束,终端会打印出一条命令行,告知如何使用 esptool.py 工具烧写项目到芯片中。但你也可以运行下面这条命令来自动烧写:
|
||||
|
||||
`idf.py -p PORT flash`
|
||||
|
||||
将其中的 PORT 替换为系统中实际串口的名字(比如 Windows 下的 `COM3`,Linux 下的 `/dev/ttyUSB0`,或者 macOS 下的 `/dev/cu.usbserial-X`。如果省略 `-p` 选项,`idf.py flash` 会尝试使用第一个可用的串口进行烧写。
|
||||
|
||||
这会烧写整个项目(包括应用程序,引导程序和分区表)到芯片中,此外还可以使用 `idf.py menuconfig` 来调整串口烧写相关的配置。
|
||||
|
||||
不必先运行 `idf.py build` 再运行 `idf.py flash`,`idf.py flash` 会根据需要自动重新构建项目。
|
||||
|
||||
## 观察串口输入
|
||||
|
||||
`idf.py monitor` 会调用 [esp-idf-monitor 工具](https://github.com/espressif/esp-idf-monitor)来显示乐鑫芯片的串口输出。esp-idf-monitor 还包含一系列的功能来解析程序崩溃后的输出结果并与设备进行交互。更多详细内容,请参阅[文档](https://docs.espressif.com/projects/esp-idf/en/latest/get-started/idf-monitor.html).
|
||||
|
||||
输入 `Ctrl-]` 可退出监视器。
|
||||
|
||||
想要一次性执行构建、烧写和监视,可以运行如下命令:
|
||||
|
||||
`idf.py flash monitor`
|
||||
|
||||
## 仅编译并烧写应用程序
|
||||
|
||||
在第一次烧写过后,你可能只想构建并烧写你的应用程序,不包括引导程序和分区表:
|
||||
|
||||
* `idf.py app` - 仅构建应用程序。
|
||||
* `idf.py app-flash` - 仅烧写应用程序。
|
||||
|
||||
`idf.py app-flash` 会自动判断是否有源文件发生了改变然后重新构建应用程序。
|
||||
|
||||
(在正常的开发中,即使引导程序和分区表没有发生变化,每次都重新烧写它们并不会带来什么危害。)
|
||||
|
||||
## 擦除 Flash
|
||||
|
||||
`idf.py flash` 并不会擦除 flash 上所有的内容,但是有时候我们需要设备恢复到完全擦除的状态,尤其是分区表发生了变化或者 OTA 应用升级时。要擦除整块 flash 请运行 `idf.py erase-flash`。
|
||||
|
||||
这条命令还可以和其余命令整合在一起,`idf.py -p PORT erase-flash flash` 会擦除一切然后重新烧写新的应用程序、引导程序和分区表。
|
||||
|
||||
# 其它参考资源
|
||||
|
||||
* 最新版的文档:https://docs.espressif.com/projects/esp-idf/ ,该文档是由本仓库 [docs 目录](docs) 构建得到。
|
||||
|
||||
* [初学者指南:主要概念和资源](https://www.bilibili.com/video/BV1114y1r7du/)
|
||||
|
||||
* 可以前往 [esp32.com 论坛](https://esp32.com/) 提问,挖掘社区资源。
|
||||
|
||||
* 如果你在使用中发现了错误或者需要新的功能,请先[查看 GitHub Issues](https://github.com/espressif/esp-idf/issues),确保该问题没有重复提交。
|
||||
|
||||
* 如果你有兴趣为 ESP-IDF 作贡献,请先阅读[贡献指南](https://docs.espressif.com/projects/esp-idf/en/latest/contribute/index.html)。
|
||||
@@ -1,9 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Please refer to https://docs.espressif.com/projects/esp-idf/en/latest/esp32/versions.html#support-periods for more details on ESP-IDF supported versions and support period policy.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please refer to [Espressif Security Incident Response Process](https://www.espressif.com/sites/default/files/Espressif%20Security%20Incident%20Response%20Process%20v1.0_EN.pdf) on the guidelines to report a security vulnerability. Please do **NOT** create a public GitHub issue.
|
||||
@@ -1,58 +0,0 @@
|
||||
The latest support policy for ESP-IDF can be found at [https://github.com/espressif/esp-idf/blob/master/SUPPORT_POLICY.md](https://github.com/espressif/esp-idf/blob/master/SUPPORT_POLICY.md)
|
||||
|
||||
Support Period Policy
|
||||
=====================
|
||||
|
||||
* [中文版](./SUPPORT_POLICY_CN.md)
|
||||
|
||||
Each ESP-IDF major and minor release (V4.1, V4.2, etc) is supported for 30 months after the initial stable release date.
|
||||
|
||||
Supported means that the ESP-IDF team will continue to apply bug fixes, security fixes, etc to the release branch on GitHub, and periodically make new bugfix releases as needed.
|
||||
|
||||
Support period is divided into "Service" and "Maintenance" period:
|
||||
|
||||
| Period | Duration | Recommended for new projects? |
|
||||
| ------- | ------------ | ------------------------------------- |
|
||||
| Service | 12 months | Yes |
|
||||
| Maintenance | 18 months | No |
|
||||
|
||||
During the Service period, bugfixes releases are more frequent. In some cases, support for new features may be added during the Service period (this is reserved for features which are needed to meet particular regulatory requirements or standards for new products, and which carry a very low risk of introducing regressions.)
|
||||
|
||||
During the Maintenance period, the version is still supported but only bugfixes for high severity issues or security issues will be applied.
|
||||
|
||||
Using an “In Service” version is recommended when starting a new project.
|
||||
|
||||
Users are encouraged to upgrade all projects to a newer ESP-IDF release before the support period finishes and the release becomes End of Life (EOL). It is our policy to not continue fixing bugs in End of Life releases.
|
||||
|
||||
Pre-release versions (betas, previews, `-rc` and `-dev` versions, etc) are not covered by any support period. Sometimes a particular feature is marked as "Preview" in a release, which means it is also not covered by the support period.
|
||||
|
||||
ESP-IDF should be used in an up-to-date software environment. The operating system and other third-party tools should be supported by their maintainers. ESP-IDF cannot keep compatibility with unsupported third-party tools.
|
||||
|
||||
The ESP-IDF Programming Guide has information about the [different versions of ESP-IDF](https://docs.espressif.com/projects/esp-idf/en/latest/versions.html) (major, minor, bugfix, etc).
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
ESP-IDF V3.3 was released in September 2019. It was supported for 30 months until February 2022.
|
||||
|
||||
- The first V3.3 release was `v3.3` in September 2019.
|
||||
- The ESP-IDF team continues to backport bug fixes, security fixes, etc to the release branch `release/v3.3`.
|
||||
- Periodically stable bugfix releases are created from the release branch. For example `v3.3.1`, `v3.3.2`, etc. Users are encouraged to always update to the latest bugfix release.
|
||||
- V3.3 bugfix releases continue until February 2022, when all V3.3.x releases become End of Life.
|
||||
|
||||
Existing Releases
|
||||
-----------------
|
||||
|
||||
ESP-IDF release V4.1 and all newer releases will follow this support period policy. The support period for each release will be announced when the release is made.
|
||||
|
||||
For releases made before the current support period policy was announced, the original support periods apply:
|
||||
|
||||
* ESP-IDF V4.0.x will be supported until October 2021
|
||||
* ESP-IDF V3.3.x will be supported until February 2022
|
||||
* ESP-IDF versions before V3.3 are already End of Life.
|
||||
|
||||
Policy History
|
||||
--------------
|
||||
|
||||
* September 2019. This policy splits ESP-IDF releases into Standard and Long Term Support.
|
||||
* July 2020. All releases from now will have the same support period, which is equal to the previous Long Term Support period. Added “In Service” period, during which versions will receive more updates.
|
||||
@@ -1,58 +0,0 @@
|
||||
有关 ESP-IDF 的最新支持政策,详见 [支持期限政策](https://github.com/espressif/esp-idf/blob/master/SUPPORT_POLICY_CN.md)
|
||||
|
||||
支持期限政策
|
||||
=====================
|
||||
|
||||
* [英文版](./SUPPORT_POLICY.md)
|
||||
|
||||
ESP-IDF 的每个主要版本和次要版本(如 V4.1、V4.2 等)自其首次稳定版本发布之日起将支持 30 个月。
|
||||
|
||||
支持意味着 ESP-IDF 团队将会对 GitHub 上的发布分支继续进行 bug 修复、安全修补等,并根据需求定期发布新的 bugfix 版本。
|
||||
|
||||
支持周期包括“服务周期“和“维护周期”:
|
||||
|
||||
| 周期 | 期限 | 是否建议新项目使用? |
|
||||
| ------- | ------------ | ------------------------------------- |
|
||||
| 服务周期 | 12 个月 | 是 |
|
||||
| 维护周期 | 18 个月 | 否 |
|
||||
|
||||
在版本的服务周期内,bug 修复版本会较为频繁发布。某些情况下,版本在其服务周期内可能会增加对新功能的支持(这仅适用于需要达到新产品特定规管标准或要求的功能,并且引入新问题的风险极低。)
|
||||
|
||||
在版本的维护周期内,版本仍然会继续支持,但仅会对严重问题或安全问题进行 bug 修复。
|
||||
|
||||
建议在开始新项目的时候使用处于服务周期的版本。
|
||||
|
||||
在某一版本支持期限结束,停止更新维护 (EOL) 前,建议用户升级所有项目至较新的 ESP-IDF 版本。根据《支持期限政策》,我们将停止对 EOL 版本进行 bug 修复。
|
||||
|
||||
《支持期限政策》不适用于预发布版本(包括 beta、preview、-rc 和 -dev 版本等)。有时,在发布的版本中存在被标记为 "Preview" 的特定功能,则该功能也不在支持期限内。
|
||||
|
||||
请确保在最新的软件环境中使用 ESP-IDF。操作系统和其他第三方工具应得到其维护者的支持。ESP-IDF 无法兼容不受支持的第三方工具。
|
||||
|
||||
有关 [ESP-IDF 不同版本](https://docs.espressif.com/projects/esp-idf/zh_CN/latest/esp32/versions.html)(主要版本、次要版本、bugfix 版本等)信息,可参阅《ESP-IDF 编程指南》。
|
||||
|
||||
示例
|
||||
-------
|
||||
|
||||
ESP-IDF V3.3 于 2019 年 9 月发布,将支持 30 个月至 2022 年 2 月停止。
|
||||
|
||||
- V3.3 的首个发布版本为 2019 年 9 月发布的 v3.3。
|
||||
- ESP-IDF 团队将持续进行 bug 修复、安全修补等更新,并 backport 至分支 release/v3.3。
|
||||
- 定期从 release 分支创建稳定的 bugfix 版本,比如,v3.3.1、v3.3.2 等,并建议用户保持使用最新的 bugfix 版本。
|
||||
- V3.3 的 bugfix 版本发布将持续至 2022 年 2 月,届时所有 V3.3.x 将停止更新维护。
|
||||
|
||||
现有版本
|
||||
-----------------
|
||||
|
||||
ESP-IDF V4.1 及所有后续更新版本都将遵守该《支持期限政策》。每一版本发布时将同时公布其支持期限。
|
||||
|
||||
对于该政策公布之日前发布的版本,应适用下述支持期限:
|
||||
|
||||
* ESP-IDF V4.0.x 将支持至 2021 年 10 月。
|
||||
* ESP-IDF V3.3.x 将支持至 2022 年 2 月。
|
||||
* ESP-IDF v3.3 之前的版本已经停止支持。
|
||||
|
||||
政策历史
|
||||
--------------
|
||||
|
||||
* 2019 年 9 月。该政策将 ESP-IDF 版本发布分为标准支持版本和长期支持版本。
|
||||
* 2020 年 7 月。从现在开始,所有发布的版本都将有相同的支持周期,即之前的长期支持周期。新增了”服务周期“,期间版本的更新将较为频繁。
|
||||
@@ -9,10 +9,9 @@
|
||||
if [ -z ${IDF_PATH} ]; then
|
||||
echo "IDF_PATH must be set before including this script."
|
||||
else
|
||||
IDF_ADD_PATHS_EXTRAS="${IDF_PATH}/components/esptool_py/esptool"
|
||||
IDF_ADD_PATHS_EXTRAS="${IDF_ADD_PATHS_EXTRAS}:${IDF_PATH}/components/espcoredump"
|
||||
IDF_ADD_PATHS_EXTRAS="${IDF_ADD_PATHS_EXTRAS}:${IDF_PATH}/components/partition_table/"
|
||||
IDF_ADD_PATHS_EXTRAS="${IDF_ADD_PATHS_EXTRAS}:${IDF_PATH}/tools/"
|
||||
export PATH="${IDF_ADD_PATHS_EXTRAS}:${PATH}"
|
||||
IDF_ADD_PATHS_EXTRAS="${IDF_PATH}/components/esptool_py/esptool:${IDF_PATH}/components/espcoredump:${IDF_PATH}/components/partition_table/"
|
||||
export PATH="${PATH}:${IDF_ADD_PATHS_EXTRAS}"
|
||||
echo "Added to PATH: ${IDF_ADD_PATHS_EXTRAS}"
|
||||
fi
|
||||
|
||||
|
||||
|
||||
@@ -1,164 +0,0 @@
|
||||
# Core Components
|
||||
|
||||
## Overview
|
||||
|
||||
This document contains details about what the core components are, what they contain, and how they are organized.
|
||||
|
||||
## Organization
|
||||
|
||||
The core components are organized into two groups.
|
||||
|
||||
The first group (referred to as `G0`) includes `hal`, `arch` (where `arch` is either `riscv` or `xtensa` depending on the chip), `esp_rom`, `esp_common`, and `soc`. This group contains information about and provides low-level access to the underlying hardware. In the case of `esp_common`, it contains hardware-agnostic code and utilities. These components may have dependencies on each other within the group, but outside dependencies should be minimized. The reason for this approach is that these components are fundamental, and many other components may require them. Ideally, the dependency relationship only goes one way, making it easier for this group to be usable in other projects.
|
||||
|
||||
The second group (referred to as `G1`) operates at a higher level than the first group. `G1` includes the components `esp_hw_support`, `esp_system`, `newlib`, `spi_flash`, `freertos`, `log`, and `heap`. Like the first group, circular dependencies within this group are allowed, and these components can have dependencies on the first group. G1 components represent essential software mechanisms for building other components.
|
||||
|
||||
## Descriptions
|
||||
|
||||
The following is a short description of the components mentioned above.
|
||||
|
||||
### `G0` Components
|
||||
|
||||
#### `hal`
|
||||
|
||||
Contains the hardware abstraction layer and low-level operation implementations for the various peripherals. The low-level functions assign meaningful names to register-level manipulations; the hardware abstraction provide operations one level above this, grouping these low-level functions
|
||||
into routines that achieve a meaningful action or state of the peripheral.
|
||||
|
||||
Example:
|
||||
|
||||
- `spi_flash_ll_set_address` is a low-level function part of the hardware abstraction `spi_flash_hal_read_block`
|
||||
|
||||
#### `arch`
|
||||
|
||||
Contains low-level architecture operations and definitions, including those for customizations (can be thought of on the same level as the low-level functions of `hal`).
|
||||
This can also contain files provided by the architecture vendor.
|
||||
|
||||
Example:
|
||||
|
||||
- `xt_set_exception_handler`
|
||||
- `rv_utils_intr_enable`
|
||||
- `ERI_PERFMON_MAX`
|
||||
|
||||
#### `esp_common`
|
||||
|
||||
Contains hardware-agnostic definitions, constants, macros, utilities, 'pure' and/or algorithmic functions that is useable by all other components (that is, barring there being a more appropriate component to put them in).
|
||||
|
||||
Example:
|
||||
|
||||
- `BIT(nr)` and other bit manipulation utilities in the future
|
||||
- `IDF_DEPRECATED(REASON)`
|
||||
- `ESP_IDF_VERSION_MAJOR`
|
||||
|
||||
#### `soc`
|
||||
|
||||
Contains description of the underlying hardware: register structure, addresses, pins, capabilities, etc.
|
||||
|
||||
Example:
|
||||
|
||||
- `DR_REG_DPORT_BASE`
|
||||
- `SOC_MCPWM_SUPPORTED`
|
||||
- `uart_dev_s`
|
||||
|
||||
#### `esp_rom`
|
||||
|
||||
Contains headers, linker scripts, abstraction layer, patches, and other related files to ROM functions.
|
||||
|
||||
Example:
|
||||
|
||||
- `esp32.rom.eco3.ld`
|
||||
- `rom/aes.h`
|
||||
|
||||
### `G1` Components
|
||||
|
||||
#### `spi_flash`
|
||||
|
||||
SPI flash device access implementation.
|
||||
|
||||
#### `freertos`
|
||||
|
||||
FreeRTOS port to targets supported by ESP-IDF.
|
||||
|
||||
#### `log`
|
||||
|
||||
Logging library.
|
||||
|
||||
#### `heap`
|
||||
|
||||
Heap implementation.
|
||||
|
||||
#### `newlib`
|
||||
|
||||
Some functions n the standard library are implemented here, especially those needing other `G1` components.
|
||||
|
||||
Example:
|
||||
|
||||
- `malloc` is implemented in terms of the component `heap`'s functions
|
||||
- `gettimeofday` is implemented in terms of system time in `esp_system`
|
||||
|
||||
#### `esp_mm`
|
||||
|
||||
Memory management. Currently, this encompasses:
|
||||
|
||||
- Memory mapping for MMU supported memories
|
||||
- Memory synchronisation via Cache
|
||||
- Utils such as APIs to convert between virtual address and physical address
|
||||
|
||||
#### `esp_psram`
|
||||
|
||||
Contains implementation of PSRAM services
|
||||
|
||||
#### `esp_system`
|
||||
|
||||
Contains implementation of system services and controls system behavior. The implementations
|
||||
here may take hardware resources and/or decide on a hardware state needed for support of a system service/feature/mechanism.
|
||||
Currently, this encompasses the following, but not limited to:
|
||||
|
||||
- Startup and initialization
|
||||
- Panic and debug
|
||||
- Reset and reset reason
|
||||
- Task and interrupt watchdogs
|
||||
|
||||
#### `esp_hw_support`
|
||||
|
||||
Contains implementations that provide hardware operations, arbitration, or resource sharing, especially those that
|
||||
is used in the system. Unlike `esp_system`, implementations here do not decide on a hardware state or takes hardware resource, acting
|
||||
merely as facilitator to hardware access. Currently, this encompasses the following, but not limited to:
|
||||
|
||||
- Interrupt allocation
|
||||
- Sleep functions
|
||||
- Memory functions (external SPIRAM, async memory, etc.)
|
||||
- Clock and clock control
|
||||
- Random generation
|
||||
- CPU utilities
|
||||
- MAC settings
|
||||
|
||||
### `esp_hw_support` vs `esp_system`
|
||||
|
||||
This section details list some implementations and the reason for placing it in either `esp_hw_support` or `esp_system`.
|
||||
|
||||
#### `task_wdt.c` (`esp_system`) vs `intr_alloc.c` (`esp_hw_support`)
|
||||
|
||||
The task watchdog fits the definition of taking and configuring hardware resources (wdt, interrupt) for implementation of a system service/mechanism.
|
||||
|
||||
This is in contrast with interrupt allocation that merely facilitates access to the underlying hardware for other implementations -
|
||||
drivers, user code, and even the task watchdog mentioned previously!
|
||||
|
||||
#### `crosscore_int.c` (`esp_system`)
|
||||
|
||||
The current implementation of crosscore interrupts is tightly coupled with a number of interrupt reasons
|
||||
associated with system services/mechanisms: REASON_YIELD (scheduler), REASON_FREQ_SWITCH (power management)
|
||||
REASON_PRINT_BACKTRACE (panic and debug).
|
||||
|
||||
However, if an implementation exists that makes it possible to register an arbitrary interrupt reason - a
|
||||
lower level inter-processor call if you will, then this implementation is a good candidate for `esp_hw_support`.
|
||||
The current implementation in `esp_system` can then just register the interrupt reasons mentioned above.
|
||||
|
||||
#### `esp_mac.h`, `esp_chip_info.h`, `esp_random.h` (`esp_hw_support`)
|
||||
|
||||
The functions in these headers used to be in `esp_system.h`, but have been split-off.
|
||||
|
||||
The remaining functions in `esp_system.h` are those that deal with system behavior, such
|
||||
as `esp_register_shutdown_handler`, or are proxy for other system components's APIs such as
|
||||
`esp_get_free_heap_size`.
|
||||
|
||||
The functions split-off from `esp_system.h` are much more hardware manipulation oriented such as:
|
||||
`esp_read_mac`, `esp_random` and `esp_chip_info`.
|
||||
@@ -1,132 +0,0 @@
|
||||
idf_build_get_property(target IDF_TARGET)
|
||||
|
||||
if(${target} STREQUAL "linux")
|
||||
return() # This component is not supported by the POSIX/Linux simulator
|
||||
endif()
|
||||
|
||||
set(srcs
|
||||
"app_trace.c"
|
||||
"app_trace_util.c"
|
||||
"host_file_io.c")
|
||||
|
||||
if(CONFIG_APPTRACE_GCOV_ENABLE)
|
||||
if("${CMAKE_C_COMPILER_ID}" STREQUAL "GNU")
|
||||
list(APPEND srcs
|
||||
"gcov/gcov_rtio.c")
|
||||
else()
|
||||
fail_at_build_time(app_trace "Only GNU compiler can link with Gcov library")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(include_dirs "include")
|
||||
|
||||
set(priv_include_dirs "private_include" "port/include")
|
||||
|
||||
if(CONFIG_APPTRACE_MEMBUFS_APPTRACE_PROTO_ENABLE)
|
||||
list(APPEND srcs
|
||||
"app_trace_membufs_proto.c")
|
||||
|
||||
if(CONFIG_IDF_TARGET_ARCH_XTENSA)
|
||||
list(APPEND srcs
|
||||
"port/xtensa/port.c")
|
||||
endif()
|
||||
if(CONFIG_IDF_TARGET_ARCH_RISCV)
|
||||
list(APPEND srcs
|
||||
"port/riscv/port.c")
|
||||
endif()
|
||||
endif()
|
||||
list(APPEND srcs
|
||||
"port/port_uart.c")
|
||||
|
||||
if(CONFIG_APPTRACE_SV_ENABLE)
|
||||
list(APPEND include_dirs
|
||||
sys_view/Config
|
||||
sys_view/SEGGER
|
||||
sys_view/Sample/FreeRTOSV10.4)
|
||||
|
||||
list(APPEND srcs
|
||||
"sys_view/SEGGER/SEGGER_SYSVIEW.c"
|
||||
"sys_view/Sample/FreeRTOSV10.4/Config/esp/SEGGER_SYSVIEW_Config_FreeRTOS.c"
|
||||
"sys_view/Sample/FreeRTOSV10.4/SEGGER_SYSVIEW_FreeRTOS.c"
|
||||
"sys_view/esp/SEGGER_RTT_esp.c"
|
||||
"sys_view/ext/heap_trace_module.c"
|
||||
"sys_view/ext/logging.c")
|
||||
endif()
|
||||
|
||||
if(CONFIG_HEAP_TRACING_TOHOST)
|
||||
list(APPEND srcs "heap_trace_tohost.c")
|
||||
set_source_files_properties(heap_trace_tohost.c
|
||||
PROPERTIES COMPILE_FLAGS
|
||||
-Wno-frame-address)
|
||||
endif()
|
||||
|
||||
idf_component_register(SRCS "${srcs}"
|
||||
INCLUDE_DIRS "${include_dirs}"
|
||||
PRIV_INCLUDE_DIRS "${priv_include_dirs}"
|
||||
# Requires "driver" for GPTimer in "SEGGER_SYSVIEW_Config_FreeRTOS.c"
|
||||
PRIV_REQUIRES soc driver
|
||||
REQUIRES esp_timer
|
||||
LDFRAGMENTS linker.lf)
|
||||
|
||||
# Force app_trace to also appear later than gcov in link line
|
||||
idf_component_get_property(app_trace app_trace COMPONENT_LIB)
|
||||
|
||||
if(CONFIG_APPTRACE_GCOV_ENABLE)
|
||||
if(CMAKE_C_COMPILER_ID MATCHES "Clang")
|
||||
# Coverage info is not supported when clang is used
|
||||
# TODO: LLVM-214
|
||||
message(FATAL_ERROR "Coverage info is not supported when building with Clang!")
|
||||
endif()
|
||||
|
||||
# The original Gcov library from toolchain will be objcopy with symbols redefinitions (see file gcov/io_sym.map).
|
||||
# This needs because ESP has no file-system onboard, and redefined functions solves this problem and transmits
|
||||
# output file to host PC.
|
||||
|
||||
# Set a name for Gcov library
|
||||
set(GCOV_LIB libgcov_rtio)
|
||||
|
||||
# Set include direcrory of Gcov internal headers
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -print-file-name=plugin
|
||||
OUTPUT_VARIABLE gcc_plugin_dir
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
ERROR_QUIET)
|
||||
set_source_files_properties(gcov/gcov_rtio.c
|
||||
PROPERTIES COMPILE_FLAGS "-I${gcc_plugin_dir}/include")
|
||||
|
||||
# Copy libgcov.a with symbols redefinition
|
||||
find_library(GCOV_LIBRARY_PATH gcov ${CMAKE_C_IMPLICIT_LINK_DIRECTORIES})
|
||||
add_custom_command(OUTPUT ${GCOV_LIB}.a
|
||||
COMMAND ${_CMAKE_TOOLCHAIN_PREFIX}objcopy
|
||||
--redefine-syms ${CMAKE_CURRENT_LIST_DIR}/gcov/io_sym.map
|
||||
${GCOV_LIBRARY_PATH} ${GCOV_LIB}.a
|
||||
MAIN_DEPENDENCY ${GCOV_LIBRARY_PATH}
|
||||
VERBATIM)
|
||||
add_custom_target(${GCOV_LIB}_target DEPENDS ${GCOV_LIB}.a)
|
||||
add_library(${GCOV_LIB} STATIC IMPORTED)
|
||||
set_target_properties(${GCOV_LIB}
|
||||
PROPERTIES
|
||||
IMPORTED_LOCATION ${CMAKE_CURRENT_BINARY_DIR}/${GCOV_LIB}.a)
|
||||
add_dependencies(${GCOV_LIB} ${GCOV_LIB}_target)
|
||||
add_dependencies(${COMPONENT_LIB} ${GCOV_LIB})
|
||||
|
||||
# disable --coverage for this component, as it is used as transport for gcov
|
||||
target_compile_options(${COMPONENT_LIB} PRIVATE "-fno-profile-arcs" "-fno-test-coverage")
|
||||
target_link_options(${COMPONENT_LIB} INTERFACE "-Wl,--wrap=__gcov_init")
|
||||
|
||||
target_link_libraries(${COMPONENT_LIB} INTERFACE ${GCOV_LIB} $<TARGET_FILE:${app_trace}> c)
|
||||
else()
|
||||
target_link_libraries(${COMPONENT_LIB} INTERFACE $<TARGET_FILE:${app_trace}> c)
|
||||
endif()
|
||||
|
||||
# This function adds a dependency on the given component if the component is included into the build.
|
||||
function(maybe_add_component component_name)
|
||||
idf_build_get_property(components BUILD_COMPONENTS)
|
||||
if(${component_name} IN_LIST components)
|
||||
idf_component_get_property(lib_name ${component_name} COMPONENT_LIB)
|
||||
target_link_libraries(${COMPONENT_LIB} PUBLIC ${lib_name})
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
if(CONFIG_APPTRACE_DEST_UART0 OR CONFIG_APPTRACE_DEST_UART1 OR CONFIG_APPTRACE_DEST_UART2)
|
||||
maybe_add_component(driver)
|
||||
endif()
|
||||
@@ -1,399 +1,193 @@
|
||||
menu "Application Level Tracing"
|
||||
|
||||
choice APPTRACE_DESTINATION1
|
||||
prompt "Data Destination 1"
|
||||
default APPTRACE_DEST_NONE
|
||||
help
|
||||
Select destination for application trace: JTAG or none (to disable).
|
||||
choice ESP32_APPTRACE_DESTINATION
|
||||
prompt "Data Destination"
|
||||
default ESP32_APPTRACE_DEST_NONE
|
||||
help
|
||||
Select destination for application trace: trace memory or none (to disable).
|
||||
|
||||
config APPTRACE_DEST_JTAG
|
||||
bool "JTAG"
|
||||
select APPTRACE_DEST_TRAX if IDF_TARGET_ARCH_XTENSA
|
||||
select APPTRACE_MEMBUFS_APPTRACE_PROTO_ENABLE
|
||||
select APPTRACE_ENABLE
|
||||
config ESP32_APPTRACE_DEST_TRAX
|
||||
bool "Trace memory"
|
||||
select ESP32_APPTRACE_ENABLE
|
||||
config ESP32_APPTRACE_DEST_NONE
|
||||
bool "None"
|
||||
endchoice
|
||||
|
||||
config APPTRACE_DEST_NONE
|
||||
bool "None"
|
||||
endchoice
|
||||
config ESP32_APPTRACE_ENABLE
|
||||
bool
|
||||
depends on !ESP32_TRAX
|
||||
select MEMMAP_TRACEMEM
|
||||
select MEMMAP_TRACEMEM_TWOBANKS
|
||||
default n
|
||||
help
|
||||
Enables/disable application tracing module.
|
||||
|
||||
config APPTRACE_DEST_UART
|
||||
bool
|
||||
config ESP32_APPTRACE_LOCK_ENABLE
|
||||
bool
|
||||
default !SYSVIEW_ENABLE
|
||||
help
|
||||
Enables/disable application tracing module internal sync lock.
|
||||
|
||||
config APPTRACE_DEST_UART_NOUSB
|
||||
bool
|
||||
config ESP32_APPTRACE_ONPANIC_HOST_FLUSH_TMO
|
||||
int "Timeout for flushing last trace data to host on panic"
|
||||
depends on ESP32_APPTRACE_ENABLE
|
||||
range -1 5000
|
||||
default -1
|
||||
help
|
||||
Timeout for flushing last trace data to host in case of panic. In ms.
|
||||
Use -1 to disable timeout and wait forever.
|
||||
|
||||
choice APPTRACE_DESTINATION2
|
||||
prompt "Data Destination 2"
|
||||
default APPTRACE_DEST_UART_NONE
|
||||
help
|
||||
Select destination for application trace: UART(XX) or none (to disable).
|
||||
config ESP32_APPTRACE_POSTMORTEM_FLUSH_TRAX_THRESH
|
||||
int "Threshold for flushing last trace data to host on panic"
|
||||
depends on ESP32_APPTRACE_DEST_TRAX
|
||||
range 0 16384
|
||||
default 0
|
||||
help
|
||||
Threshold for flushing last trace data to host on panic in post-mortem mode.
|
||||
This is minimal amount of data needed to perform flush. In bytes.
|
||||
|
||||
config APPTRACE_DEST_UART0
|
||||
bool "UART0"
|
||||
select APPTRACE_ENABLE
|
||||
select APPTRACE_DEST_UART
|
||||
select APPTRACE_DEST_UART_NOUSB
|
||||
depends on (ESP_CONSOLE_UART_NUM !=0)
|
||||
config ESP32_APPTRACE_PENDING_DATA_SIZE_MAX
|
||||
int "Size of the pending data buffer"
|
||||
depends on ESP32_APPTRACE_DEST_TRAX
|
||||
default 0
|
||||
help
|
||||
Size of the buffer for events in bytes. It is useful for buffering events from
|
||||
the time critical code (scheduler, ISRs etc). If this parameter is 0 then
|
||||
events will be discarded when main HW buffer is full.
|
||||
|
||||
config APPTRACE_DEST_UART1
|
||||
bool "UART1"
|
||||
select APPTRACE_ENABLE
|
||||
select APPTRACE_DEST_UART
|
||||
select APPTRACE_DEST_UART_NOUSB
|
||||
depends on (ESP_CONSOLE_UART_NUM !=1)
|
||||
menu "FreeRTOS SystemView Tracing"
|
||||
config SYSVIEW_ENABLE
|
||||
bool "SystemView Tracing Enable"
|
||||
depends on ESP32_APPTRACE_ENABLE
|
||||
default n
|
||||
help
|
||||
Enables supporrt for SEGGER SystemView tracing functionality.
|
||||
|
||||
config APPTRACE_DEST_UART2
|
||||
bool "UART2"
|
||||
select APPTRACE_ENABLE
|
||||
select APPTRACE_DEST_UART
|
||||
select APPTRACE_DEST_UART_NOUSB
|
||||
depends on (ESP_CONSOLE_UART_NUM !=2) && (SOC_UART_NUM > 2)
|
||||
choice SYSVIEW_TS_SOURCE
|
||||
prompt "Timer to use as timestamp source"
|
||||
depends on SYSVIEW_ENABLE
|
||||
default SYSVIEW_TS_SOURCE_CCOUNT if FREERTOS_UNICORE && !PM_ENABLE
|
||||
default SYSVIEW_TS_SOURCE_TIMER_00 if !FREERTOS_UNICORE && !PM_ENABLE
|
||||
default SYSVIEW_TS_SOURCE_ESP_TIMER if PM_ENABLE
|
||||
help
|
||||
SystemView needs to use a hardware timer as the source of timestamps
|
||||
when tracing. This option selects the timer for it.
|
||||
|
||||
config APPTRACE_DEST_USB_CDC
|
||||
bool "USB_CDC"
|
||||
select APPTRACE_ENABLE
|
||||
select APPTRACE_DEST_UART
|
||||
depends on !ESP_CONSOLE_USB_CDC && (IDF_TARGET_ESP32C3 || IDF_TARGET_ESP32S3) && !USB_ENABLED
|
||||
config SYSVIEW_TS_SOURCE_CCOUNT
|
||||
bool "CPU cycle counter (CCOUNT)"
|
||||
depends on FREERTOS_UNICORE && !PM_ENABLE
|
||||
|
||||
config APPTRACE_DEST_UART_NONE
|
||||
bool "None"
|
||||
endchoice
|
||||
config SYSVIEW_TS_SOURCE_TIMER_00
|
||||
bool "Timer 0, Group 0"
|
||||
depends on !PM_ENABLE
|
||||
|
||||
config APPTRACE_UART_TX_GPIO
|
||||
int "UART TX on GPIO#"
|
||||
depends on APPTRACE_DEST_UART_NOUSB
|
||||
range 0 46
|
||||
default 12 if IDF_TARGET_ESP32
|
||||
default 12 if IDF_TARGET_ESP32C3
|
||||
default 12
|
||||
help
|
||||
This GPIO is used for UART TX pin.
|
||||
config SYSVIEW_TS_SOURCE_TIMER_01
|
||||
bool "Timer 1, Group 0"
|
||||
depends on !PM_ENABLE
|
||||
|
||||
config APPTRACE_UART_RX_GPIO
|
||||
int "UART RX on GPIO#"
|
||||
depends on APPTRACE_DEST_UART_NOUSB
|
||||
range 0 46
|
||||
default 13 if IDF_TARGET_ESP32
|
||||
default 13 if IDF_TARGET_ESP32C3
|
||||
default 13
|
||||
help
|
||||
This GPIO is used for UART RX pin.
|
||||
config SYSVIEW_TS_SOURCE_TIMER_10
|
||||
bool "Timer 0, Group 1"
|
||||
depends on !PM_ENABLE
|
||||
|
||||
config APPTRACE_UART_BAUDRATE
|
||||
int
|
||||
prompt "UART baud rate" if APPTRACE_DEST_UART
|
||||
depends on APPTRACE_DEST_UART
|
||||
default 1000000
|
||||
range 1200 8000000
|
||||
range 1200 1000000
|
||||
help
|
||||
This baud rate is used for UART.
|
||||
config SYSVIEW_TS_SOURCE_TIMER_11
|
||||
bool "Timer 1, Group 1"
|
||||
depends on !PM_ENABLE
|
||||
|
||||
The app's maximum baud rate depends on the UART clock source. If Power Management is disabled,
|
||||
the UART clock source is the APB clock and all baud rates in the available range will be sufficiently
|
||||
accurate. If Power Management is enabled, REF_TICK clock source is used so the baud rate is divided
|
||||
from 1MHz. Baud rates above 1Mbps are not possible and values between 500Kbps and 1Mbps may not be
|
||||
accurate.
|
||||
config SYSVIEW_TS_SOURCE_ESP_TIMER
|
||||
bool "esp_timer high resolution timer"
|
||||
|
||||
config APPTRACE_UART_RX_BUFF_SIZE
|
||||
int
|
||||
prompt "UART RX ring buffer size" if APPTRACE_DEST_UART
|
||||
depends on APPTRACE_DEST_UART
|
||||
default 128
|
||||
range 64 32768
|
||||
help
|
||||
Size of the UART input ring buffer.
|
||||
This size related to the baudrate, system tick frequency and amount of data to transfer.
|
||||
The data placed to this buffer before sent out to the interface.
|
||||
endchoice
|
||||
|
||||
config APPTRACE_UART_TX_BUFF_SIZE
|
||||
int
|
||||
prompt "UART TX ring buffer size" if APPTRACE_DEST_UART
|
||||
depends on APPTRACE_DEST_UART
|
||||
default 4096
|
||||
range 2048 32768
|
||||
help
|
||||
Size of the UART output ring buffer.
|
||||
This size related to the baudrate, system tick frequency and amount of data to transfer.
|
||||
config SYSVIEW_EVT_OVERFLOW_ENABLE
|
||||
bool "Trace Buffer Overflow Event"
|
||||
depends on SYSVIEW_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "Trace Buffer Overflow" event.
|
||||
|
||||
config APPTRACE_UART_TX_MSG_SIZE
|
||||
int
|
||||
prompt "UART TX message size" if APPTRACE_DEST_UART
|
||||
depends on APPTRACE_DEST_UART
|
||||
default 128
|
||||
range 64 32768
|
||||
help
|
||||
Maximum size of the single message to transfer.
|
||||
config SYSVIEW_EVT_ISR_ENTER_ENABLE
|
||||
bool "ISR Enter Event"
|
||||
depends on SYSVIEW_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "ISR Enter" event.
|
||||
|
||||
config APPTRACE_UART_TASK_PRIO
|
||||
int
|
||||
prompt "UART Task Priority" if APPTRACE_DEST_UART
|
||||
default 1
|
||||
range 1 32
|
||||
help
|
||||
UART task priority. In case of high events rate,
|
||||
this parameter could be changed up to (configMAX_PRIORITIES-1).
|
||||
config SYSVIEW_EVT_ISR_EXIT_ENABLE
|
||||
bool "ISR Exit Event"
|
||||
depends on SYSVIEW_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "ISR Exit" event.
|
||||
|
||||
config APPTRACE_DEST_TRAX
|
||||
bool
|
||||
depends on IDF_TARGET_ARCH_XTENSA && !ESP32_TRAX && !ESP32S2_TRAX && !ESP32S3_TRAX
|
||||
select ESP32_MEMMAP_TRACEMEM
|
||||
select ESP32S2_MEMMAP_TRACEMEM
|
||||
select ESP32S3_MEMMAP_TRACEMEM
|
||||
select ESP32_MEMMAP_TRACEMEM_TWOBANKS
|
||||
select ESP32S2_MEMMAP_TRACEMEM_TWOBANKS
|
||||
select ESP32S3_MEMMAP_TRACEMEM_TWOBANKS
|
||||
default n
|
||||
help
|
||||
Enables/disable TRAX tracing HW.
|
||||
config SYSVIEW_EVT_ISR_TO_SCHEDULER_ENABLE
|
||||
bool "ISR Exit to Scheduler Event"
|
||||
depends on SYSVIEW_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "ISR to Scheduler" event.
|
||||
|
||||
config APPTRACE_MEMBUFS_APPTRACE_PROTO_ENABLE
|
||||
bool
|
||||
default n
|
||||
help
|
||||
Enables/disable swapping memory buffers tracing protocol.
|
||||
config SYSVIEW_EVT_TASK_START_EXEC_ENABLE
|
||||
bool "Task Start Execution Event"
|
||||
depends on SYSVIEW_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "Task Start Execution" event.
|
||||
|
||||
config APPTRACE_ENABLE
|
||||
bool
|
||||
default n
|
||||
help
|
||||
Enables/disable application tracing module.
|
||||
config SYSVIEW_EVT_TASK_STOP_EXEC_ENABLE
|
||||
bool "Task Stop Execution Event"
|
||||
depends on SYSVIEW_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "Task Stop Execution" event.
|
||||
|
||||
config APPTRACE_LOCK_ENABLE
|
||||
bool
|
||||
default !APPTRACE_SV_ENABLE
|
||||
help
|
||||
Enables/disable application tracing module internal sync lock.
|
||||
config SYSVIEW_EVT_TASK_START_READY_ENABLE
|
||||
bool "Task Start Ready State Event"
|
||||
depends on SYSVIEW_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "Task Start Ready State" event.
|
||||
|
||||
config APPTRACE_ONPANIC_HOST_FLUSH_TMO
|
||||
int "Timeout for flushing last trace data to host on panic"
|
||||
depends on APPTRACE_ENABLE
|
||||
range -1 5000
|
||||
default -1
|
||||
help
|
||||
Timeout for flushing last trace data to host in case of panic. In ms.
|
||||
Use -1 to disable timeout and wait forever.
|
||||
config SYSVIEW_EVT_TASK_STOP_READY_ENABLE
|
||||
bool "Task Stop Ready State Event"
|
||||
depends on SYSVIEW_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "Task Stop Ready State" event.
|
||||
|
||||
config APPTRACE_POSTMORTEM_FLUSH_THRESH
|
||||
int "Threshold for flushing last trace data to host on panic"
|
||||
depends on APPTRACE_ENABLE
|
||||
range 0 16384
|
||||
default 0
|
||||
help
|
||||
Threshold for flushing last trace data to host on panic in post-mortem mode.
|
||||
This is minimal amount of data needed to perform flush. In bytes.
|
||||
config SYSVIEW_EVT_TASK_CREATE_ENABLE
|
||||
bool "Task Create Event"
|
||||
depends on SYSVIEW_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "Task Create" event.
|
||||
|
||||
config APPTRACE_BUF_SIZE
|
||||
int "Size of the apptrace buffer"
|
||||
depends on APPTRACE_MEMBUFS_APPTRACE_PROTO_ENABLE && !APPTRACE_DEST_TRAX
|
||||
default 16384
|
||||
help
|
||||
Size of the memory buffer for trace data in bytes.
|
||||
config SYSVIEW_EVT_TASK_TERMINATE_ENABLE
|
||||
bool "Task Terminate Event"
|
||||
depends on SYSVIEW_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "Task Terminate" event.
|
||||
|
||||
config APPTRACE_PENDING_DATA_SIZE_MAX
|
||||
int "Size of the pending data buffer"
|
||||
depends on APPTRACE_MEMBUFS_APPTRACE_PROTO_ENABLE
|
||||
default 0
|
||||
help
|
||||
Size of the buffer for events in bytes. It is useful for buffering events from
|
||||
the time critical code (scheduler, ISRs etc). If this parameter is 0 then
|
||||
events will be discarded when main HW buffer is full.
|
||||
config SYSVIEW_EVT_IDLE_ENABLE
|
||||
bool "System Idle Event"
|
||||
depends on SYSVIEW_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "System Idle" event.
|
||||
|
||||
menu "FreeRTOS SystemView Tracing"
|
||||
depends on APPTRACE_ENABLE
|
||||
config APPTRACE_SV_ENABLE
|
||||
bool "SystemView Tracing Enable"
|
||||
depends on APPTRACE_ENABLE
|
||||
default n
|
||||
help
|
||||
Enables supporrt for SEGGER SystemView tracing functionality.
|
||||
config SYSVIEW_EVT_TIMER_ENTER_ENABLE
|
||||
bool "Timer Enter Event"
|
||||
depends on SYSVIEW_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "Timer Enter" event.
|
||||
|
||||
choice APPTRACE_SV_DEST
|
||||
prompt "SystemView destination"
|
||||
depends on APPTRACE_SV_ENABLE
|
||||
default APPTRACE_SV_DEST_JTAG
|
||||
help
|
||||
SystemView witt transfer data trough defined interface.
|
||||
|
||||
config APPTRACE_SV_DEST_JTAG
|
||||
bool "Data destination JTAG"
|
||||
depends on !PM_ENABLE && !APPTRACE_DEST_NONE
|
||||
help
|
||||
Send SEGGER SystemView events through JTAG interface.
|
||||
|
||||
config APPTRACE_SV_DEST_UART
|
||||
bool "Data destination UART"
|
||||
depends on APPTRACE_DEST_UART
|
||||
help
|
||||
Send SEGGER SystemView events through UART interface.
|
||||
|
||||
endchoice
|
||||
|
||||
choice APPTRACE_SV_CPU
|
||||
prompt "CPU to trace"
|
||||
depends on APPTRACE_SV_DEST_UART && !FREERTOS_UNICORE
|
||||
default APPTRACE_SV_DEST_CPU_0
|
||||
help
|
||||
Define the CPU to trace by SystemView.
|
||||
|
||||
config APPTRACE_SV_DEST_CPU_0
|
||||
bool "CPU0"
|
||||
help
|
||||
Send SEGGER SystemView events for Pro CPU.
|
||||
|
||||
config APPTRACE_SV_DEST_CPU_1
|
||||
bool "CPU1"
|
||||
help
|
||||
Send SEGGER SystemView events for App CPU.
|
||||
|
||||
endchoice
|
||||
|
||||
|
||||
choice APPTRACE_SV_TS_SOURCE
|
||||
prompt "Timer to use as timestamp source"
|
||||
depends on APPTRACE_SV_ENABLE
|
||||
default APPTRACE_SV_TS_SOURCE_CCOUNT if FREERTOS_UNICORE && !PM_ENABLE && !IDF_TARGET_ESP32C3
|
||||
default APPTRACE_SV_TS_SOURCE_GPTIMER if !FREERTOS_UNICORE && !PM_ENABLE && !IDF_TARGET_ESP32C3
|
||||
default APPTRACE_SV_TS_SOURCE_ESP_TIMER if PM_ENABLE || IDF_TARGET_ESP32C3
|
||||
help
|
||||
SystemView needs to use a hardware timer as the source of timestamps
|
||||
when tracing. This option selects the timer for it.
|
||||
|
||||
config APPTRACE_SV_TS_SOURCE_CCOUNT
|
||||
bool "CPU cycle counter (CCOUNT)"
|
||||
depends on FREERTOS_UNICORE && !PM_ENABLE && !IDF_TARGET_ESP32C3
|
||||
|
||||
config APPTRACE_SV_TS_SOURCE_GPTIMER
|
||||
bool "General Purpose Timer (Timer Group)"
|
||||
depends on !PM_ENABLE && !IDF_TARGET_ESP32C3
|
||||
|
||||
config APPTRACE_SV_TS_SOURCE_ESP_TIMER
|
||||
bool "esp_timer high resolution timer"
|
||||
|
||||
endchoice
|
||||
|
||||
config APPTRACE_SV_MAX_TASKS
|
||||
int "Maximum supported tasks"
|
||||
depends on APPTRACE_SV_ENABLE
|
||||
range 1 64
|
||||
default 16
|
||||
help
|
||||
Configures maximum supported tasks in sysview debug
|
||||
|
||||
config APPTRACE_SV_BUF_WAIT_TMO
|
||||
int "Trace buffer wait timeout"
|
||||
depends on APPTRACE_SV_ENABLE
|
||||
default 500
|
||||
help
|
||||
Configures timeout (in us) to wait for free space in trace buffer.
|
||||
Set to -1 to wait forever and avoid lost events.
|
||||
|
||||
config APPTRACE_SV_EVT_OVERFLOW_ENABLE
|
||||
bool "Trace Buffer Overflow Event"
|
||||
depends on APPTRACE_SV_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "Trace Buffer Overflow" event.
|
||||
|
||||
config APPTRACE_SV_EVT_ISR_ENTER_ENABLE
|
||||
bool "ISR Enter Event"
|
||||
depends on APPTRACE_SV_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "ISR Enter" event.
|
||||
|
||||
config APPTRACE_SV_EVT_ISR_EXIT_ENABLE
|
||||
bool "ISR Exit Event"
|
||||
depends on APPTRACE_SV_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "ISR Exit" event.
|
||||
|
||||
config APPTRACE_SV_EVT_ISR_TO_SCHED_ENABLE
|
||||
bool "ISR Exit to Scheduler Event"
|
||||
depends on APPTRACE_SV_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "ISR to Scheduler" event.
|
||||
|
||||
config APPTRACE_SV_EVT_TASK_START_EXEC_ENABLE
|
||||
bool "Task Start Execution Event"
|
||||
depends on APPTRACE_SV_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "Task Start Execution" event.
|
||||
|
||||
config APPTRACE_SV_EVT_TASK_STOP_EXEC_ENABLE
|
||||
bool "Task Stop Execution Event"
|
||||
depends on APPTRACE_SV_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "Task Stop Execution" event.
|
||||
|
||||
config APPTRACE_SV_EVT_TASK_START_READY_ENABLE
|
||||
bool "Task Start Ready State Event"
|
||||
depends on APPTRACE_SV_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "Task Start Ready State" event.
|
||||
|
||||
config APPTRACE_SV_EVT_TASK_STOP_READY_ENABLE
|
||||
bool "Task Stop Ready State Event"
|
||||
depends on APPTRACE_SV_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "Task Stop Ready State" event.
|
||||
|
||||
config APPTRACE_SV_EVT_TASK_CREATE_ENABLE
|
||||
bool "Task Create Event"
|
||||
depends on APPTRACE_SV_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "Task Create" event.
|
||||
|
||||
config APPTRACE_SV_EVT_TASK_TERMINATE_ENABLE
|
||||
bool "Task Terminate Event"
|
||||
depends on APPTRACE_SV_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "Task Terminate" event.
|
||||
|
||||
config APPTRACE_SV_EVT_IDLE_ENABLE
|
||||
bool "System Idle Event"
|
||||
depends on APPTRACE_SV_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "System Idle" event.
|
||||
|
||||
config APPTRACE_SV_EVT_TIMER_ENTER_ENABLE
|
||||
bool "Timer Enter Event"
|
||||
depends on APPTRACE_SV_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "Timer Enter" event.
|
||||
|
||||
config APPTRACE_SV_EVT_TIMER_EXIT_ENABLE
|
||||
bool "Timer Exit Event"
|
||||
depends on APPTRACE_SV_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "Timer Exit" event.
|
||||
|
||||
endmenu
|
||||
|
||||
config APPTRACE_GCOV_ENABLE
|
||||
bool "GCOV to Host Enable"
|
||||
depends on APPTRACE_ENABLE && !APPTRACE_SV_ENABLE
|
||||
select ESP_DEBUG_STUBS_ENABLE
|
||||
default n
|
||||
help
|
||||
Enables support for GCOV data transfer to host.
|
||||
|
||||
config APPTRACE_GCOV_DUMP_TASK_STACK_SIZE
|
||||
int "Gcov dump task stack size"
|
||||
depends on APPTRACE_GCOV_ENABLE
|
||||
default 2048
|
||||
help
|
||||
Configures stack size of Gcov dump task
|
||||
config SYSVIEW_EVT_TIMER_EXIT_ENABLE
|
||||
bool "Timer Exit Event"
|
||||
depends on SYSVIEW_ENABLE
|
||||
default y
|
||||
help
|
||||
Enables "Timer Exit" event.
|
||||
|
||||
endmenu
|
||||
endmenu
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,372 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <string.h>
|
||||
#include "sdkconfig.h"
|
||||
#include "esp_log.h"
|
||||
#include "esp_cpu.h"
|
||||
#include "esp_app_trace_membufs_proto.h"
|
||||
|
||||
/** Trace data header. Every user data chunk is prepended with this header.
|
||||
* User allocates block with esp_apptrace_buffer_get and then fills it with data,
|
||||
* in multithreading environment it can happen that tasks gets buffer and then gets interrupted,
|
||||
* so it is possible that user data are incomplete when memory block is exposed to the host.
|
||||
* In this case host SW will see that wr_sz < block_sz and will report error.
|
||||
*/
|
||||
typedef struct {
|
||||
#if CONFIG_APPTRACE_SV_ENABLE
|
||||
uint8_t block_sz; // size of allocated block for user data
|
||||
uint8_t wr_sz; // size of actually written data
|
||||
#else
|
||||
uint16_t block_sz; // size of allocated block for user data
|
||||
uint16_t wr_sz; // size of actually written data
|
||||
#endif
|
||||
} esp_tracedata_hdr_t;
|
||||
|
||||
/** TODO: docs
|
||||
*/
|
||||
typedef struct {
|
||||
uint16_t block_sz; // size of allocated block for user data
|
||||
} esp_hostdata_hdr_t;
|
||||
|
||||
#if CONFIG_APPTRACE_SV_ENABLE
|
||||
#define ESP_APPTRACE_USR_BLOCK_CORE(_cid_) (0)
|
||||
#define ESP_APPTRACE_USR_BLOCK_LEN(_v_) (_v_)
|
||||
#define ESP_APPTRACE_USR_DATA_LEN_MAX(_hw_data_) 255UL
|
||||
#else
|
||||
#define ESP_APPTRACE_USR_BLOCK_CORE(_cid_) ((_cid_) << 15)
|
||||
#define ESP_APPTRACE_USR_BLOCK_LEN(_v_) (~(1 << 15) & (_v_))
|
||||
#define ESP_APPTRACE_USR_DATA_LEN_MAX(_hw_data_) (ESP_APPTRACE_INBLOCK(_hw_data_)->sz - sizeof(esp_tracedata_hdr_t))
|
||||
#endif
|
||||
#define ESP_APPTRACE_USR_BLOCK_RAW_SZ(_s_) ((_s_) + sizeof(esp_tracedata_hdr_t))
|
||||
|
||||
#define ESP_APPTRACE_INBLOCK_MARKER(_hw_data_) ((_hw_data_)->state.markers[(_hw_data_)->state.in_block % 2])
|
||||
#define ESP_APPTRACE_INBLOCK_MARKER_UPD(_hw_data_, _v_) do {(_hw_data_)->state.markers[(_hw_data_)->state.in_block % 2] += (_v_);}while(0)
|
||||
#define ESP_APPTRACE_INBLOCK(_hw_data_) (&(_hw_data_)->blocks[(_hw_data_)->state.in_block % 2])
|
||||
|
||||
const static char *TAG = "esp_apptrace";
|
||||
|
||||
static uint32_t esp_apptrace_membufs_down_buffer_write_nolock(esp_apptrace_membufs_proto_data_t *proto, uint8_t *data, uint32_t size);
|
||||
|
||||
|
||||
esp_err_t esp_apptrace_membufs_init(esp_apptrace_membufs_proto_data_t *proto, const esp_apptrace_mem_block_t blocks_cfg[2])
|
||||
{
|
||||
// disabled by default
|
||||
esp_apptrace_rb_init(&proto->rb_down, NULL, 0);
|
||||
// membufs proto init
|
||||
for (unsigned i = 0; i < 2; i++) {
|
||||
proto->blocks[i].start = blocks_cfg[i].start;
|
||||
proto->blocks[i].sz = blocks_cfg[i].sz;
|
||||
proto->state.markers[i] = 0;
|
||||
}
|
||||
proto->state.in_block = 0;
|
||||
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
|
||||
esp_apptrace_rb_init(&proto->rb_pend, proto->pending_data,
|
||||
sizeof(proto->pending_data));
|
||||
#endif
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
void esp_apptrace_membufs_down_buffer_config(esp_apptrace_membufs_proto_data_t *data, uint8_t *buf, uint32_t size)
|
||||
{
|
||||
esp_apptrace_rb_init(&data->rb_down, buf, size);
|
||||
}
|
||||
|
||||
// assumed to be protected by caller from multi-core/thread access
|
||||
static esp_err_t esp_apptrace_membufs_swap(esp_apptrace_membufs_proto_data_t *proto)
|
||||
{
|
||||
int prev_block_num = proto->state.in_block % 2;
|
||||
int new_block_num = prev_block_num ? (0) : (1);
|
||||
esp_err_t res = ESP_OK;
|
||||
|
||||
res = proto->hw->swap_start(proto->state.in_block);
|
||||
if (res != ESP_OK) {
|
||||
return res;
|
||||
}
|
||||
|
||||
proto->state.markers[new_block_num] = 0;
|
||||
// switch to new block
|
||||
proto->state.in_block++;
|
||||
|
||||
proto->hw->swap(new_block_num);
|
||||
|
||||
// handle data from host
|
||||
esp_hostdata_hdr_t *hdr = (esp_hostdata_hdr_t *)proto->blocks[new_block_num].start;
|
||||
// ESP_APPTRACE_LOGV("Host data %d, sz %d @ %p", proto->hw->host_data_pending(), hdr->block_sz, hdr);
|
||||
if (proto->hw->host_data_pending() && hdr->block_sz > 0) {
|
||||
// TODO: add support for multiple blocks from host, currently there is no need for that
|
||||
uint8_t *p = proto->blocks[new_block_num].start + proto->blocks[new_block_num].sz;
|
||||
ESP_APPTRACE_LOGD("Recvd %d bytes from host (@ 0x%x) [%x %x %x %x %x %x %x %x .. %x %x %x %x %x %x %x %x]",
|
||||
hdr->block_sz, proto->blocks[new_block_num].start,
|
||||
*(proto->blocks[new_block_num].start+0), *(proto->blocks[new_block_num].start+1),
|
||||
*(proto->blocks[new_block_num].start+2), *(proto->blocks[new_block_num].start+3),
|
||||
*(proto->blocks[new_block_num].start+4), *(proto->blocks[new_block_num].start+5),
|
||||
*(proto->blocks[new_block_num].start+6), *(proto->blocks[new_block_num].start+7),
|
||||
*(p-8), *(p-7), *(p-6), *(p-5), *(p-4), *(p-3), *(p-2), *(p-1));
|
||||
uint32_t sz = esp_apptrace_membufs_down_buffer_write_nolock(proto, (uint8_t *)(hdr+1), hdr->block_sz);
|
||||
if (sz != hdr->block_sz) {
|
||||
ESP_APPTRACE_LOGE("Failed to write %d bytes to down buffer (%d %d)!", hdr->block_sz - sz, hdr->block_sz, sz);
|
||||
}
|
||||
hdr->block_sz = 0;
|
||||
}
|
||||
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
|
||||
// copy pending data to block if any
|
||||
while (proto->state.markers[new_block_num] < proto->blocks[new_block_num].sz) {
|
||||
uint32_t read_sz = esp_apptrace_rb_read_size_get(&proto->rb_pend);
|
||||
if (read_sz == 0) {
|
||||
break; // no more data in pending buffer
|
||||
}
|
||||
if (read_sz > proto->blocks[new_block_num].sz - proto->state.markers[new_block_num]) {
|
||||
read_sz = proto->blocks[new_block_num].sz - proto->state.markers[new_block_num];
|
||||
}
|
||||
uint8_t *ptr = esp_apptrace_rb_consume(&proto->rb_pend, read_sz);
|
||||
if (!ptr) {
|
||||
assert(false && "Failed to consume pended bytes!!");
|
||||
break;
|
||||
}
|
||||
ESP_APPTRACE_LOGD("Pump %d pend bytes [%x %x %x %x : %x %x %x %x : %x %x %x %x : %x %x...%x %x]",
|
||||
read_sz, *(ptr+0), *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
|
||||
*(ptr+5), *(ptr+6), *(ptr+7), *(ptr+8), *(ptr+9), *(ptr+10), *(ptr+11), *(ptr+12), *(ptr+13), *(ptr+read_sz-2), *(ptr+read_sz-1));
|
||||
memcpy(proto->blocks[new_block_num].start + proto->state.markers[new_block_num], ptr, read_sz);
|
||||
proto->state.markers[new_block_num] += read_sz;
|
||||
}
|
||||
#endif
|
||||
proto->hw->swap_end(proto->state.in_block, proto->state.markers[prev_block_num]);
|
||||
return res;
|
||||
}
|
||||
|
||||
static esp_err_t esp_apptrace_membufs_swap_waitus(esp_apptrace_membufs_proto_data_t *proto, esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
int res;
|
||||
|
||||
while ((res = esp_apptrace_membufs_swap(proto)) != ESP_OK) {
|
||||
res = esp_apptrace_tmo_check(tmo);
|
||||
if (res != ESP_OK) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
uint8_t *esp_apptrace_membufs_down_buffer_get(esp_apptrace_membufs_proto_data_t *proto, uint32_t *size, esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
uint8_t *ptr = NULL;
|
||||
|
||||
while (1) {
|
||||
uint32_t sz = esp_apptrace_rb_read_size_get(&proto->rb_down);
|
||||
if (sz != 0) {
|
||||
*size = MIN(*size, sz);
|
||||
ptr = esp_apptrace_rb_consume(&proto->rb_down, *size);
|
||||
if (!ptr) {
|
||||
assert(false && "Failed to consume bytes from down buffer!");
|
||||
}
|
||||
break;
|
||||
}
|
||||
// may need to flush
|
||||
if (proto->hw->host_data_pending()) {
|
||||
ESP_APPTRACE_LOGD("force flush");
|
||||
int res = esp_apptrace_membufs_swap_waitus(proto, tmo);
|
||||
if (res != ESP_OK) {
|
||||
ESP_APPTRACE_LOGE("Failed to switch to another block to recv data from host!");
|
||||
/*do not return error because data can be in down buffer already*/
|
||||
}
|
||||
} else {
|
||||
// check tmo only if there is no data from host
|
||||
int res = esp_apptrace_tmo_check(tmo);
|
||||
if (res != ESP_OK) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
esp_err_t esp_apptrace_membufs_down_buffer_put(esp_apptrace_membufs_proto_data_t *proto, uint8_t *ptr, esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
/* nothing todo */
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
static uint32_t esp_apptrace_membufs_down_buffer_write_nolock(esp_apptrace_membufs_proto_data_t *proto, uint8_t *data, uint32_t size)
|
||||
{
|
||||
uint32_t total_sz = 0;
|
||||
|
||||
while (total_sz < size) {
|
||||
ESP_APPTRACE_LOGD("esp_apptrace_trax_down_buffer_write_nolock WRS %d-%d-%d %d", proto->rb_down.wr, proto->rb_down.rd,
|
||||
proto->rb_down.cur_size, size);
|
||||
uint32_t wr_sz = esp_apptrace_rb_write_size_get(&proto->rb_down);
|
||||
if (wr_sz == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (wr_sz > size - total_sz) {
|
||||
wr_sz = size - total_sz;
|
||||
}
|
||||
ESP_APPTRACE_LOGD("esp_apptrace_trax_down_buffer_write_nolock wr %d", wr_sz);
|
||||
uint8_t *ptr = esp_apptrace_rb_produce(&proto->rb_down, wr_sz);
|
||||
if (!ptr) {
|
||||
assert(false && "Failed to produce bytes to down buffer!");
|
||||
}
|
||||
ESP_APPTRACE_LOGD("esp_apptrace_trax_down_buffer_write_nolock wr %d to 0x%x from 0x%x", wr_sz, ptr, data + total_sz + wr_sz);
|
||||
memcpy(ptr, data + total_sz, wr_sz);
|
||||
total_sz += wr_sz;
|
||||
ESP_APPTRACE_LOGD("esp_apptrace_trax_down_buffer_write_nolock wr %d/%d", wr_sz, total_sz);
|
||||
}
|
||||
return total_sz;
|
||||
}
|
||||
|
||||
static inline uint8_t *esp_apptrace_membufs_wait4buf(esp_apptrace_membufs_proto_data_t *proto, uint16_t size, esp_apptrace_tmo_t *tmo, int *pended)
|
||||
{
|
||||
uint8_t *ptr = NULL;
|
||||
|
||||
int res = esp_apptrace_membufs_swap_waitus(proto, tmo);
|
||||
if (res != ESP_OK) {
|
||||
return NULL;
|
||||
}
|
||||
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
|
||||
// check if we still have pending data
|
||||
if (esp_apptrace_rb_read_size_get(&proto->rb_pend) > 0) {
|
||||
// if after block switch we still have pending data (not all pending data have been pumped to block)
|
||||
// alloc new pending buffer
|
||||
*pended = 1;
|
||||
ptr = esp_apptrace_rb_produce(&proto->rb_pend, size);
|
||||
if (!ptr) {
|
||||
ESP_APPTRACE_LOGE("Failed to alloc pend buf 1: w-r-s %d-%d-%d!", proto->rb_pend.wr, proto->rb_pend.rd, proto->rb_pend.cur_size);
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
// update block pointers
|
||||
if (ESP_APPTRACE_INBLOCK_MARKER(proto) + size > ESP_APPTRACE_INBLOCK(proto)->sz) {
|
||||
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
|
||||
*pended = 1;
|
||||
ptr = esp_apptrace_rb_produce(&proto->rb_pend, size);
|
||||
if (ptr == NULL) {
|
||||
ESP_APPTRACE_LOGE("Failed to alloc pend buf 2: w-r-s %d-%d-%d!", proto->rb_pend.wr, proto->rb_pend.rd, proto->rb_pend.cur_size);
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
*pended = 0;
|
||||
ptr = ESP_APPTRACE_INBLOCK(proto)->start + ESP_APPTRACE_INBLOCK_MARKER(proto);
|
||||
}
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static inline uint8_t *esp_apptrace_membufs_pkt_start(uint8_t *ptr, uint16_t size)
|
||||
{
|
||||
// it is safe to use esp_cpu_get_core_id() in macro call because arg is used only once inside it
|
||||
((esp_tracedata_hdr_t *)ptr)->block_sz = ESP_APPTRACE_USR_BLOCK_CORE(esp_cpu_get_core_id()) | size;
|
||||
((esp_tracedata_hdr_t *)ptr)->wr_sz = 0;
|
||||
return ptr + sizeof(esp_tracedata_hdr_t);
|
||||
}
|
||||
|
||||
static inline void esp_apptrace_membufs_pkt_end(uint8_t *ptr)
|
||||
{
|
||||
esp_tracedata_hdr_t *hdr = (esp_tracedata_hdr_t *)(ptr - sizeof(esp_tracedata_hdr_t));
|
||||
// update written size
|
||||
hdr->wr_sz = hdr->block_sz;
|
||||
}
|
||||
|
||||
uint8_t *esp_apptrace_membufs_up_buffer_get(esp_apptrace_membufs_proto_data_t *proto, uint32_t size, esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
uint8_t *buf_ptr = NULL;
|
||||
|
||||
if (size > ESP_APPTRACE_USR_DATA_LEN_MAX(proto)) {
|
||||
ESP_APPTRACE_LOGE("Too large user data size %d!", size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// check for data in the pending buffer
|
||||
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
|
||||
if (esp_apptrace_rb_read_size_get(&proto->rb_pend) > 0) {
|
||||
// if we have buffered data try to switch block
|
||||
esp_apptrace_membufs_swap(proto);
|
||||
// if switch was successful, part or all pended data have been copied to block
|
||||
}
|
||||
if (esp_apptrace_rb_read_size_get(&proto->rb_pend) > 0) {
|
||||
// if we have buffered data alloc new pending buffer
|
||||
ESP_APPTRACE_LOGD("Get %d bytes from PEND buffer", size);
|
||||
buf_ptr = esp_apptrace_rb_produce(&proto->rb_pend, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
|
||||
if (buf_ptr == NULL) {
|
||||
int pended_buf;
|
||||
buf_ptr = esp_apptrace_membufs_wait4buf(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size), tmo, &pended_buf);
|
||||
if (buf_ptr && !pended_buf) {
|
||||
ESP_APPTRACE_LOGD("Get %d bytes from block", size);
|
||||
// update cur block marker
|
||||
ESP_APPTRACE_INBLOCK_MARKER_UPD(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
#else
|
||||
if (1) {
|
||||
#endif
|
||||
if (ESP_APPTRACE_INBLOCK_MARKER(proto) + ESP_APPTRACE_USR_BLOCK_RAW_SZ(size) > ESP_APPTRACE_INBLOCK(proto)->sz) {
|
||||
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
|
||||
ESP_APPTRACE_LOGD("Block full. Get %d bytes from PEND buffer", size);
|
||||
buf_ptr = esp_apptrace_rb_produce(&proto->rb_pend, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
|
||||
#endif
|
||||
if (buf_ptr == NULL) {
|
||||
int pended_buf;
|
||||
ESP_APPTRACE_LOGD(" full. Get %d bytes from pend buffer", size);
|
||||
buf_ptr = esp_apptrace_membufs_wait4buf(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size), tmo, &pended_buf);
|
||||
if (buf_ptr && !pended_buf) {
|
||||
ESP_APPTRACE_LOGD("Got %d bytes from block", size);
|
||||
// update cur block marker
|
||||
ESP_APPTRACE_INBLOCK_MARKER_UPD(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ESP_APPTRACE_LOGD("Get %d bytes from buffer", size);
|
||||
// fit to curr nlock
|
||||
buf_ptr = ESP_APPTRACE_INBLOCK(proto)->start + ESP_APPTRACE_INBLOCK_MARKER(proto);
|
||||
// update cur block marker
|
||||
ESP_APPTRACE_INBLOCK_MARKER_UPD(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
|
||||
}
|
||||
}
|
||||
if (buf_ptr) {
|
||||
buf_ptr = esp_apptrace_membufs_pkt_start(buf_ptr, size);
|
||||
}
|
||||
|
||||
return buf_ptr;
|
||||
}
|
||||
|
||||
esp_err_t esp_apptrace_membufs_up_buffer_put(esp_apptrace_membufs_proto_data_t *proto, uint8_t *ptr, esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
esp_apptrace_membufs_pkt_end(ptr);
|
||||
// TODO: mark block as busy in order not to re-use it for other tracing calls until it is completely written
|
||||
// TODO: avoid potential situation when all memory is consumed by low prio tasks which can not complete writing due to
|
||||
// higher prio tasks and the latter can not allocate buffers at all
|
||||
// this is abnormal situation can be detected on host which will receive only uncompleted buffers
|
||||
// workaround: use own memcpy which will kick-off dead tracing calls
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t esp_apptrace_membufs_flush_nolock(esp_apptrace_membufs_proto_data_t *proto, uint32_t min_sz, esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
int res = ESP_OK;
|
||||
|
||||
if (ESP_APPTRACE_INBLOCK_MARKER(proto) < min_sz) {
|
||||
ESP_APPTRACE_LOGI("Ignore flush request for min %d bytes. Bytes in block: %d.", min_sz, ESP_APPTRACE_INBLOCK_MARKER(proto));
|
||||
return ESP_OK;
|
||||
}
|
||||
// switch block while size of data (including that in pending buffer) is more than min size
|
||||
while (ESP_APPTRACE_INBLOCK_MARKER(proto) > min_sz) {
|
||||
ESP_APPTRACE_LOGD("Try to flush %d bytes. Wait until block switch for %lld us", ESP_APPTRACE_INBLOCK_MARKER(proto), tmo->tmo);
|
||||
res = esp_apptrace_membufs_swap_waitus(proto, tmo);
|
||||
if (res != ESP_OK) {
|
||||
if (tmo->tmo != ESP_APPTRACE_TMO_INFINITE)
|
||||
ESP_APPTRACE_LOGW("Failed to switch to another block in %lld us!", tmo->tmo);
|
||||
else
|
||||
ESP_APPTRACE_LOGE("Failed to switch to another block in %lld us!", tmo->tmo);
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
@@ -1,49 +1,39 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2017-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
// Copyright 2017 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
#include "esp_app_trace_util.h"
|
||||
#include "sdkconfig.h"
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
///////////////////////////////// Locks /////////////////////////////////////
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if ESP_APPTRACE_PRINT_LOCK
|
||||
static esp_apptrace_lock_t s_log_lock = {.irq_stat = 0, .portmux = portMUX_INITIALIZER_UNLOCKED};
|
||||
#endif
|
||||
|
||||
int esp_apptrace_log_lock(void)
|
||||
{
|
||||
#if ESP_APPTRACE_PRINT_LOCK
|
||||
esp_apptrace_tmo_t tmo;
|
||||
esp_apptrace_tmo_init(&tmo, ESP_APPTRACE_TMO_INFINITE);
|
||||
int ret = esp_apptrace_lock_take(&s_log_lock, &tmo);
|
||||
return ret;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
void esp_apptrace_log_unlock(void)
|
||||
{
|
||||
#if ESP_APPTRACE_PRINT_LOCK
|
||||
esp_apptrace_lock_give(&s_log_lock);
|
||||
#endif
|
||||
}
|
||||
#include "esp_clk.h"
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
///////////////////////////////// TIMEOUT /////////////////////////////////////
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#define ESP_APPTRACE_CPUTICKS2US(_t_, _cpu_freq_) ((_t_)/(_cpu_freq_/1000000))
|
||||
#define ESP_APPTRACE_US2CPUTICKS(_t_, _cpu_freq_) ((_t_)*(_cpu_freq_/1000000))
|
||||
|
||||
esp_err_t esp_apptrace_tmo_check(esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
if (tmo->tmo != (int64_t)-1) {
|
||||
tmo->elapsed = esp_timer_get_time() - tmo->start;
|
||||
int cpu_freq = esp_clk_cpu_freq();
|
||||
if (tmo->tmo != ESP_APPTRACE_TMO_INFINITE) {
|
||||
unsigned cur = portGET_RUN_TIME_COUNTER_VALUE();
|
||||
if (tmo->start <= cur) {
|
||||
tmo->elapsed = ESP_APPTRACE_CPUTICKS2US(cur - tmo->start, cpu_freq);
|
||||
} else {
|
||||
tmo->elapsed = ESP_APPTRACE_CPUTICKS2US(0xFFFFFFFF - tmo->start + cur, cpu_freq);
|
||||
}
|
||||
if (tmo->elapsed >= tmo->tmo) {
|
||||
return ESP_ERR_TIMEOUT;
|
||||
}
|
||||
@@ -57,26 +47,45 @@ esp_err_t esp_apptrace_tmo_check(esp_apptrace_tmo_t *tmo)
|
||||
|
||||
esp_err_t esp_apptrace_lock_take(esp_apptrace_lock_t *lock, esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
esp_err_t ret;
|
||||
int res;
|
||||
|
||||
while (1) {
|
||||
// Try enter a critical section (i.e., take the spinlock) with 0 timeout
|
||||
if (portTRY_ENTER_CRITICAL(&(lock->mux), 0) == pdTRUE) {
|
||||
// do not overwrite lock->int_state before we actually acquired the mux
|
||||
unsigned int_state = portENTER_CRITICAL_NESTED();
|
||||
// FIXME: if mux is busy it is not good idea to loop during the whole tmo with disabled IRQs.
|
||||
// So we check mux state using zero tmo, restore IRQs and let others tasks/IRQs to run on this CPU
|
||||
// while we are doing our own tmo check.
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
bool success = vPortCPUAcquireMutexTimeout(&lock->mux, 0, __FUNCTION__, __LINE__);
|
||||
#else
|
||||
bool success = vPortCPUAcquireMutexTimeout(&lock->mux, 0);
|
||||
#endif
|
||||
if (success) {
|
||||
lock->int_state = int_state;
|
||||
return ESP_OK;
|
||||
}
|
||||
// Failed to enter the critical section, so interrupts are still enabled. Check if we have timed out.
|
||||
ret = esp_apptrace_tmo_check(tmo);
|
||||
if (ret != ESP_OK) {
|
||||
break; // Timed out, exit now
|
||||
portEXIT_CRITICAL_NESTED(int_state);
|
||||
// we can be preempted from this place till the next call (above) to portENTER_CRITICAL_NESTED()
|
||||
res = esp_apptrace_tmo_check(tmo);
|
||||
if (res != ESP_OK) {
|
||||
break;
|
||||
}
|
||||
// Haven't timed out, try again
|
||||
}
|
||||
return ret;
|
||||
return res;
|
||||
}
|
||||
|
||||
esp_err_t esp_apptrace_lock_give(esp_apptrace_lock_t *lock)
|
||||
{
|
||||
portEXIT_CRITICAL(&(lock->mux));
|
||||
// save lock's irq state value for this CPU
|
||||
unsigned int_state = lock->int_state;
|
||||
// after call to the following func we can not be sure that lock->int_state
|
||||
// is not overwritten by other CPU who has acquired the mux just after we released it. See esp_apptrace_lock_take().
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
vPortCPUReleaseMutex(&lock->mux, __FUNCTION__, __LINE__);
|
||||
#else
|
||||
vPortCPUReleaseMutex(&lock->mux);
|
||||
#endif
|
||||
portEXIT_CRITICAL_NESTED(int_state);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
|
||||
29
components/app_trace/component.mk
Normal file
29
components/app_trace/component.mk
Normal file
@@ -0,0 +1,29 @@
|
||||
#
|
||||
# Component Makefile
|
||||
#
|
||||
|
||||
COMPONENT_SRCDIRS := .
|
||||
|
||||
COMPONENT_ADD_INCLUDEDIRS = include
|
||||
|
||||
COMPONENT_ADD_LDFLAGS = -lapp_trace
|
||||
|
||||
# do not produce gcov info for this module, it is used as transport for gcov
|
||||
CFLAGS := $(subst --coverage,,$(CFLAGS))
|
||||
|
||||
ifdef CONFIG_SYSVIEW_ENABLE
|
||||
|
||||
COMPONENT_ADD_INCLUDEDIRS += \
|
||||
sys_view/Config \
|
||||
sys_view/SEGGER \
|
||||
sys_view/Sample/OS
|
||||
|
||||
COMPONENT_SRCDIRS += \
|
||||
gcov \
|
||||
sys_view/SEGGER \
|
||||
sys_view/Sample/OS \
|
||||
sys_view/Sample/Config \
|
||||
sys_view/esp32
|
||||
else
|
||||
COMPONENT_SRCDIRS += gcov
|
||||
endif
|
||||
@@ -1,203 +1,93 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2017-2024 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
// Copyright 2017 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This module implements runtime file I/O API for GCOV.
|
||||
|
||||
#include <string.h>
|
||||
#include "esp_task_wdt.h"
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "freertos/task.h"
|
||||
#include "freertos/semphr.h"
|
||||
#include "soc/timer_periph.h"
|
||||
#include "soc/cpu.h"
|
||||
#include "soc/timer_group_struct.h"
|
||||
#include "soc/timer_group_reg.h"
|
||||
#include "esp_app_trace.h"
|
||||
#include "esp_freertos_hooks.h"
|
||||
#include "esp_private/dbg_stubs.h"
|
||||
#include "esp_ipc.h"
|
||||
#include "esp_attr.h"
|
||||
#include "hal/wdt_hal.h"
|
||||
#if CONFIG_IDF_TARGET_ESP32
|
||||
#include "esp32/rom/libc_stubs.h"
|
||||
#elif CONFIG_IDF_TARGET_ESP32S2
|
||||
#include "esp32s2/rom/libc_stubs.h"
|
||||
#endif
|
||||
|
||||
#if CONFIG_APPTRACE_GCOV_ENABLE
|
||||
|
||||
#define ESP_GCOV_DOWN_BUF_SIZE 4200
|
||||
#if CONFIG_ESP32_APPTRACE_ENABLE
|
||||
|
||||
#define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL
|
||||
#include "esp_log.h"
|
||||
const static char *TAG = "esp_gcov_rtio";
|
||||
static volatile bool s_create_gcov_task = false;
|
||||
static volatile bool s_gcov_task_running = false;
|
||||
|
||||
extern void __gcov_dump(void);
|
||||
extern void __gcov_reset(void);
|
||||
static void (*s_gcov_exit)(void);
|
||||
static uint8_t s_gcov_down_buf[256];
|
||||
|
||||
void gcov_dump_task(void *pvParameter)
|
||||
void esp_gcov_dump()
|
||||
{
|
||||
int dump_result = 0;
|
||||
bool *running = (bool *)pvParameter;
|
||||
|
||||
ESP_EARLY_LOGV(TAG, "%s stack use in %d", __FUNCTION__, uxTaskGetStackHighWaterMark(NULL));
|
||||
|
||||
ESP_EARLY_LOGV(TAG, "Alloc apptrace down buf %d bytes", ESP_GCOV_DOWN_BUF_SIZE);
|
||||
void *down_buf = malloc(ESP_GCOV_DOWN_BUF_SIZE);
|
||||
if (down_buf == NULL) {
|
||||
ESP_EARLY_LOGE(TAG, "Could not allocate memory for the buffer");
|
||||
dump_result = ESP_ERR_NO_MEM;
|
||||
goto gcov_exit;
|
||||
}
|
||||
ESP_EARLY_LOGV(TAG, "Config apptrace down buf");
|
||||
esp_apptrace_down_buffer_config(down_buf, ESP_GCOV_DOWN_BUF_SIZE);
|
||||
ESP_EARLY_LOGV(TAG, "Dump data...");
|
||||
__gcov_dump();
|
||||
// reset dump status to allow incremental data accumulation
|
||||
__gcov_reset();
|
||||
free(down_buf);
|
||||
ESP_EARLY_LOGV(TAG, "Finish file transfer session");
|
||||
dump_result = esp_apptrace_fstop(ESP_APPTRACE_DEST_TRAX);
|
||||
if (dump_result != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to send files transfer stop cmd (%d)!", dump_result);
|
||||
}
|
||||
|
||||
gcov_exit:
|
||||
ESP_EARLY_LOGV(TAG, "dump_result %d", dump_result);
|
||||
if (running) {
|
||||
*running = false;
|
||||
}
|
||||
|
||||
ESP_EARLY_LOGV(TAG, "%s stack use out %d", __FUNCTION__, uxTaskGetStackHighWaterMark(NULL));
|
||||
|
||||
vTaskDelete(NULL);
|
||||
}
|
||||
|
||||
void gcov_create_task(void *arg)
|
||||
{
|
||||
ESP_EARLY_LOGV(TAG, "%s", __FUNCTION__);
|
||||
xTaskCreatePinnedToCore(&gcov_dump_task, "gcov_dump_task", CONFIG_APPTRACE_GCOV_DUMP_TASK_STACK_SIZE,
|
||||
(void *)&s_gcov_task_running, configMAX_PRIORITIES - 1, NULL, 0);
|
||||
}
|
||||
|
||||
static IRAM_ATTR
|
||||
void gcov_create_task_tick_hook(void)
|
||||
{
|
||||
extern esp_err_t esp_ipc_start_gcov_from_isr(uint32_t cpu_id, esp_ipc_func_t func, void* arg);
|
||||
if (s_create_gcov_task) {
|
||||
if (esp_ipc_start_gcov_from_isr(xPortGetCoreID(), &gcov_create_task, NULL) == ESP_OK) {
|
||||
s_create_gcov_task = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Triggers gcov info dump task
|
||||
* This function is to be called by OpenOCD, not by normal user code.
|
||||
* TODO: what about interrupted flash access (when cache disabled)
|
||||
*
|
||||
* @return ESP_OK on success, otherwise see esp_err_t
|
||||
*/
|
||||
static int esp_dbg_stub_gcov_entry(void)
|
||||
{
|
||||
/* we are in isr context here */
|
||||
s_create_gcov_task = true;
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
void gcov_rtio_init(void)
|
||||
{
|
||||
uint32_t stub_entry = 0;
|
||||
ESP_EARLY_LOGV(TAG, "%s", __FUNCTION__);
|
||||
assert(esp_dbg_stub_entry_get(ESP_DBG_STUB_ENTRY_GCOV, &stub_entry) == ESP_OK);
|
||||
if (stub_entry != 0) {
|
||||
/* "__gcov_init()" can be called several times. We must avoid multiple tick hook registration */
|
||||
return;
|
||||
}
|
||||
esp_dbg_stub_entry_set(ESP_DBG_STUB_ENTRY_GCOV, (uint32_t)&esp_dbg_stub_gcov_entry);
|
||||
assert(esp_dbg_stub_entry_get(ESP_DBG_STUB_ENTRY_CAPABILITIES, &stub_entry) == ESP_OK);
|
||||
esp_dbg_stub_entry_set(ESP_DBG_STUB_ENTRY_CAPABILITIES, stub_entry | ESP_DBG_STUB_CAP_GCOV_TASK);
|
||||
esp_register_freertos_tick_hook(gcov_create_task_tick_hook);
|
||||
}
|
||||
|
||||
void esp_gcov_dump(void)
|
||||
{
|
||||
ESP_EARLY_LOGV(TAG, "%s", __FUNCTION__);
|
||||
#if CONFIG_FREERTOS_UNICORE == 0
|
||||
esp_cpu_stall(!xPortGetCoreID());
|
||||
#endif
|
||||
|
||||
while (!esp_apptrace_host_is_connected(ESP_APPTRACE_DEST_TRAX)) {
|
||||
vTaskDelay(pdMS_TO_TICKS(10));
|
||||
// to avoid complains that task watchdog got triggered for other tasks
|
||||
TIMERG0.wdt_wprotect=TIMG_WDT_WKEY_VALUE;
|
||||
TIMERG0.wdt_feed=1;
|
||||
TIMERG0.wdt_wprotect=0;
|
||||
// to avoid reboot on INT_WDT
|
||||
TIMERG1.wdt_wprotect=TIMG_WDT_WKEY_VALUE;
|
||||
TIMERG1.wdt_feed=1;
|
||||
TIMERG1.wdt_wprotect=0;
|
||||
}
|
||||
|
||||
/* We are not in isr context here. Waiting for the completion is safe */
|
||||
s_gcov_task_running = true;
|
||||
s_create_gcov_task = true;
|
||||
while (s_gcov_task_running) {
|
||||
vTaskDelay(pdMS_TO_TICKS(10));
|
||||
if (s_gcov_exit) {
|
||||
esp_apptrace_down_buffer_config(s_gcov_down_buf, sizeof(s_gcov_down_buf));
|
||||
s_gcov_exit();
|
||||
}
|
||||
|
||||
int ret = esp_apptrace_fstop(ESP_APPTRACE_DEST_TRAX);
|
||||
if (ret != ESP_OK) {
|
||||
ESP_LOGE(TAG, "Failed to send files transfer stop cmd (%d)!\n", ret);
|
||||
}
|
||||
}
|
||||
|
||||
int gcov_rtio_atexit(void (*function)(void))
|
||||
{
|
||||
s_gcov_exit = function;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *gcov_rtio_fopen(const char *path, const char *mode)
|
||||
{
|
||||
ESP_EARLY_LOGV(TAG, "%s '%s' '%s'", __FUNCTION__, path, mode);
|
||||
void *f = esp_apptrace_fopen(ESP_APPTRACE_DEST_TRAX, path, mode);
|
||||
ESP_EARLY_LOGV(TAG, "%s ret %p", __FUNCTION__, f);
|
||||
return f;
|
||||
return esp_apptrace_fopen(ESP_APPTRACE_DEST_TRAX, path, mode);
|
||||
}
|
||||
|
||||
int gcov_rtio_fclose(void *stream)
|
||||
{
|
||||
ESP_EARLY_LOGV(TAG, "%s", __FUNCTION__);
|
||||
return esp_apptrace_fclose(ESP_APPTRACE_DEST_TRAX, stream);
|
||||
}
|
||||
|
||||
size_t gcov_rtio_fread(void *ptr, size_t size, size_t nmemb, void *stream)
|
||||
{
|
||||
ESP_EARLY_LOGV(TAG, "%s read %u", __FUNCTION__, size * nmemb);
|
||||
size_t sz = esp_apptrace_fread(ESP_APPTRACE_DEST_TRAX, ptr, size, nmemb, stream);
|
||||
ESP_EARLY_LOGV(TAG, "%s actually read %u", __FUNCTION__, sz);
|
||||
return sz;
|
||||
}
|
||||
|
||||
size_t gcov_rtio_fwrite(const void *ptr, size_t size, size_t nmemb, void *stream)
|
||||
{
|
||||
ESP_EARLY_LOGV(TAG, "%s", __FUNCTION__);
|
||||
return esp_apptrace_fwrite(ESP_APPTRACE_DEST_TRAX, ptr, size, nmemb, stream);
|
||||
}
|
||||
|
||||
int gcov_rtio_fseek(void *stream, long offset, int whence)
|
||||
{
|
||||
int ret = esp_apptrace_fseek(ESP_APPTRACE_DEST_TRAX, stream, offset, whence);
|
||||
ESP_EARLY_LOGV(TAG, "%s(%p %ld %d) = %d", __FUNCTION__, stream, offset, whence, ret);
|
||||
return ret;
|
||||
return esp_apptrace_fseek(ESP_APPTRACE_DEST_TRAX, stream, offset, whence);
|
||||
}
|
||||
|
||||
long gcov_rtio_ftell(void *stream)
|
||||
{
|
||||
long ret = esp_apptrace_ftell(ESP_APPTRACE_DEST_TRAX, stream);
|
||||
ESP_EARLY_LOGV(TAG, "%s(%p) = %ld", __FUNCTION__, stream, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gcov_rtio_feof(void *stream)
|
||||
{
|
||||
int ret = esp_apptrace_feof(ESP_APPTRACE_DEST_TRAX, stream);
|
||||
ESP_EARLY_LOGV(TAG, "%s(%p) = %d", __FUNCTION__, stream, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void gcov_rtio_setbuf(void *arg1 __attribute__ ((unused)), void *arg2 __attribute__ ((unused)))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
/* Wrappers for Gcov functions */
|
||||
|
||||
extern void __real___gcov_init(void *info);
|
||||
void __wrap___gcov_init(void *info)
|
||||
{
|
||||
__real___gcov_init(info);
|
||||
gcov_rtio_init();
|
||||
return esp_apptrace_ftell(ESP_APPTRACE_DEST_TRAX, stream);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
fopen gcov_rtio_fopen
|
||||
fclose gcov_rtio_fclose
|
||||
fwrite gcov_rtio_fwrite
|
||||
fread gcov_rtio_fread
|
||||
fseek gcov_rtio_fseek
|
||||
ftell gcov_rtio_ftell
|
||||
setbuf gcov_rtio_setbuf
|
||||
feof gcov_rtio_feof
|
||||
@@ -1,115 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
#include <sdkconfig.h>
|
||||
|
||||
#define HEAP_TRACE_SRCFILE /* don't warn on inclusion here */
|
||||
#include "esp_heap_trace.h"
|
||||
#undef HEAP_TRACE_SRCFILE
|
||||
#include "esp_heap_caps.h"
|
||||
#if CONFIG_APPTRACE_SV_ENABLE
|
||||
#include "esp_app_trace.h"
|
||||
#include "esp_sysview_trace.h"
|
||||
#endif
|
||||
|
||||
#define STACK_DEPTH CONFIG_HEAP_TRACING_STACK_DEPTH
|
||||
|
||||
#ifdef CONFIG_HEAP_TRACING_TOHOST
|
||||
|
||||
#if !CONFIG_APPTRACE_SV_ENABLE
|
||||
#error None of the heap tracing backends is enabled! You must enable SystemView compatible tracing to use this feature.
|
||||
#endif
|
||||
|
||||
static bool s_tracing;
|
||||
|
||||
esp_err_t heap_trace_init_tohost(void)
|
||||
{
|
||||
if (s_tracing) {
|
||||
return ESP_ERR_INVALID_STATE;
|
||||
}
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t heap_trace_start(heap_trace_mode_t mode_param)
|
||||
{
|
||||
#if CONFIG_APPTRACE_SV_ENABLE
|
||||
esp_err_t ret = esp_sysview_heap_trace_start((uint32_t)-1);
|
||||
if (ret != ESP_OK) {
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
s_tracing = true;
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t heap_trace_stop(void)
|
||||
{
|
||||
esp_err_t ret = ESP_ERR_NOT_SUPPORTED;
|
||||
#if CONFIG_APPTRACE_SV_ENABLE
|
||||
ret = esp_sysview_heap_trace_stop();
|
||||
#endif
|
||||
s_tracing = false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
esp_err_t heap_trace_resume(void)
|
||||
{
|
||||
return heap_trace_start(HEAP_TRACE_ALL);
|
||||
}
|
||||
|
||||
size_t heap_trace_get_count(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
esp_err_t heap_trace_get(size_t index, heap_trace_record_t *record)
|
||||
{
|
||||
return ESP_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
esp_err_t heap_trace_summary(heap_trace_summary_t *summary)
|
||||
{
|
||||
return ESP_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
void heap_trace_dump(void)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void heap_trace_dump_caps(__attribute__((unused)) const uint32_t caps)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
/* Add a new allocation to the heap trace records */
|
||||
static HEAP_IRAM_ATTR void record_allocation(const heap_trace_record_t *record)
|
||||
{
|
||||
if (!s_tracing) {
|
||||
return;
|
||||
}
|
||||
#if CONFIG_APPTRACE_SV_ENABLE
|
||||
esp_sysview_heap_trace_alloc(record->address, record->size, record->alloced_by);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* record a free event in the heap trace log
|
||||
|
||||
For HEAP_TRACE_ALL, this means filling in the freed_by pointer.
|
||||
For HEAP_TRACE_LEAKS, this means removing the record from the log.
|
||||
*/
|
||||
static HEAP_IRAM_ATTR void record_free(void *p, void **callers)
|
||||
{
|
||||
if (!s_tracing) {
|
||||
return;
|
||||
}
|
||||
#if CONFIG_APPTRACE_SV_ENABLE
|
||||
esp_sysview_heap_trace_free(p, callers);
|
||||
#endif
|
||||
}
|
||||
|
||||
#include "heap_trace.inc"
|
||||
|
||||
#endif /*CONFIG_HEAP_TRACING_TOHOST*/
|
||||
@@ -1,8 +1,16 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2017-2023 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
// Copyright 2017 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Hot It Works
|
||||
// ************
|
||||
@@ -17,8 +25,9 @@
|
||||
#include <string.h>
|
||||
#include "esp_app_trace.h"
|
||||
|
||||
#if CONFIG_APPTRACE_ENABLE
|
||||
#if CONFIG_ESP32_APPTRACE_ENABLE
|
||||
|
||||
#define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL
|
||||
#include "esp_log.h"
|
||||
const static char *TAG = "esp_host_file_io";
|
||||
|
||||
@@ -29,7 +38,6 @@ const static char *TAG = "esp_host_file_io";
|
||||
#define ESP_APPTRACE_FILE_CMD_FSEEK 0x4
|
||||
#define ESP_APPTRACE_FILE_CMD_FTELL 0x5
|
||||
#define ESP_APPTRACE_FILE_CMD_STOP 0x6 // indicates that there is no files to transfer
|
||||
#define ESP_APPTRACE_FILE_CMD_FEOF 0x7
|
||||
|
||||
/** File operation header */
|
||||
typedef struct {
|
||||
@@ -69,11 +77,6 @@ typedef struct {
|
||||
void * file;
|
||||
} esp_apptrace_fseek_args_t;
|
||||
|
||||
/** Helper structure for feof */
|
||||
typedef struct {
|
||||
void *file;
|
||||
} esp_apptrace_feof_args_t;
|
||||
|
||||
/** Helper structure for ftell */
|
||||
typedef struct {
|
||||
void *file;
|
||||
@@ -84,7 +87,6 @@ static esp_err_t esp_apptrace_file_cmd_send(esp_apptrace_dest_t dest, uint8_t cm
|
||||
esp_err_t ret;
|
||||
esp_apptrace_fcmd_hdr_t *hdr;
|
||||
|
||||
ESP_EARLY_LOGV(TAG, "%s %d", __func__, cmd);
|
||||
uint8_t *ptr = esp_apptrace_buffer_get(dest, sizeof(*hdr) + args_len, ESP_APPTRACE_TMO_INFINITE); //TODO: finite tmo
|
||||
if (ptr == NULL) {
|
||||
return ESP_ERR_NO_MEM;
|
||||
@@ -99,13 +101,13 @@ static esp_err_t esp_apptrace_file_cmd_send(esp_apptrace_dest_t dest, uint8_t cm
|
||||
// now indicate that this buffer is ready to be sent off to host
|
||||
ret = esp_apptrace_buffer_put(dest, ptr, ESP_APPTRACE_TMO_INFINITE);//TODO: finite tmo
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to put apptrace buffer (%d)!", ret);
|
||||
ESP_LOGE(TAG, "Failed to put apptrace buffer (%d)!", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = esp_apptrace_flush(dest, ESP_APPTRACE_TMO_INFINITE);//TODO: finite tmo
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to flush apptrace buffer (%d)!", ret);
|
||||
ESP_LOGE(TAG, "Failed to flush apptrace buffer (%d)!", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -117,12 +119,11 @@ static esp_err_t esp_apptrace_file_rsp_recv(esp_apptrace_dest_t dest, uint8_t *b
|
||||
uint32_t tot_rd = 0;
|
||||
while (tot_rd < buf_len) {
|
||||
uint32_t rd_size = buf_len - tot_rd;
|
||||
esp_err_t ret = esp_apptrace_read(dest, buf + tot_rd, &rd_size, ESP_APPTRACE_TMO_INFINITE); //TODO: finite tmo
|
||||
esp_err_t ret = esp_apptrace_read(dest, buf, &rd_size, ESP_APPTRACE_TMO_INFINITE); //TODO: finite tmo
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to read (%d)!", ret);
|
||||
ESP_LOGE(TAG, "Failed to read response (%d)!", ret);
|
||||
return ret;
|
||||
}
|
||||
ESP_EARLY_LOGV(TAG, "%s read %d bytes", __FUNCTION__, rd_size);
|
||||
tot_rd += rd_size;
|
||||
}
|
||||
|
||||
@@ -141,11 +142,6 @@ void *esp_apptrace_fopen(esp_apptrace_dest_t dest, const char *path, const char
|
||||
{
|
||||
esp_apptrace_fopen_args_t cmd_args;
|
||||
|
||||
ESP_EARLY_LOGV(TAG, "esp_apptrace_fopen '%s' '%s'", path, mode);
|
||||
if (path == NULL || mode == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
cmd_args.path = path;
|
||||
cmd_args.path_len = strlen(path) + 1;
|
||||
cmd_args.mode = mode;
|
||||
@@ -154,7 +150,7 @@ void *esp_apptrace_fopen(esp_apptrace_dest_t dest, const char *path, const char
|
||||
esp_err_t ret = esp_apptrace_file_cmd_send(dest, ESP_APPTRACE_FILE_CMD_FOPEN, esp_apptrace_fopen_args_prepare,
|
||||
&cmd_args, cmd_args.path_len+cmd_args.mode_len);
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to send file cmd (%d)!", ret);
|
||||
ESP_LOGE(TAG, "Failed to send file cmd (%d)!", ret);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -162,7 +158,7 @@ void *esp_apptrace_fopen(esp_apptrace_dest_t dest, const char *path, const char
|
||||
void *resp;
|
||||
ret = esp_apptrace_file_rsp_recv(dest, (uint8_t *)&resp, sizeof(resp));
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to read response (%d)!", ret);
|
||||
ESP_LOGE(TAG, "Failed to read response (%d)!", ret);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -184,7 +180,7 @@ int esp_apptrace_fclose(esp_apptrace_dest_t dest, void *stream)
|
||||
esp_err_t ret = esp_apptrace_file_cmd_send(dest, ESP_APPTRACE_FILE_CMD_FCLOSE, esp_apptrace_fclose_args_prepare,
|
||||
&cmd_args, sizeof(cmd_args));
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to send file cmd (%d)!", ret);
|
||||
ESP_LOGE(TAG, "Failed to send file cmd (%d)!", ret);
|
||||
return EOF;
|
||||
}
|
||||
|
||||
@@ -192,7 +188,7 @@ int esp_apptrace_fclose(esp_apptrace_dest_t dest, void *stream)
|
||||
int resp;
|
||||
ret = esp_apptrace_file_rsp_recv(dest, (uint8_t *)&resp, sizeof(resp));
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to read response (%d)!", ret);
|
||||
ESP_LOGE(TAG, "Failed to read response (%d)!", ret);
|
||||
return EOF;
|
||||
}
|
||||
|
||||
@@ -211,19 +207,13 @@ size_t esp_apptrace_fwrite(esp_apptrace_dest_t dest, const void *ptr, size_t siz
|
||||
{
|
||||
esp_apptrace_fwrite_args_t cmd_args;
|
||||
|
||||
ESP_EARLY_LOGV(TAG, "esp_apptrace_fwrite f %p l %d", stream, size*nmemb);
|
||||
|
||||
if (ptr == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
cmd_args.buf = (void *)ptr;
|
||||
cmd_args.size = size * nmemb;
|
||||
cmd_args.file = stream;
|
||||
esp_err_t ret = esp_apptrace_file_cmd_send(dest, ESP_APPTRACE_FILE_CMD_FWRITE, esp_apptrace_fwrite_args_prepare,
|
||||
&cmd_args, sizeof(cmd_args.file)+cmd_args.size);
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to send file cmd (%d)!", ret);
|
||||
ESP_LOGE(TAG, "Failed to send file cmd (%d)!", ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -231,14 +221,11 @@ size_t esp_apptrace_fwrite(esp_apptrace_dest_t dest, const void *ptr, size_t siz
|
||||
size_t resp;
|
||||
ret = esp_apptrace_file_rsp_recv(dest, (uint8_t *)&resp, sizeof(resp));
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to read response (%d)!", ret);
|
||||
ESP_LOGE(TAG, "Failed to read response (%d)!", ret);
|
||||
return 0;
|
||||
}
|
||||
/* OpenOCD writes it like that:
|
||||
* fwrite(buf, size, 1, file);
|
||||
* So, if 1 was returned that means fwrite succeed
|
||||
*/
|
||||
return resp == 1 ? nmemb : 0;
|
||||
|
||||
return resp;
|
||||
}
|
||||
|
||||
static void esp_apptrace_fread_args_prepare(uint8_t *buf, void *priv)
|
||||
@@ -253,18 +240,12 @@ size_t esp_apptrace_fread(esp_apptrace_dest_t dest, void *ptr, size_t size, size
|
||||
{
|
||||
esp_apptrace_fread_args_t cmd_args;
|
||||
|
||||
ESP_EARLY_LOGV(TAG, "esp_apptrace_fread f %p l %d", stream, size*nmemb);
|
||||
|
||||
if (ptr == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
cmd_args.size = size * nmemb;
|
||||
cmd_args.file = stream;
|
||||
esp_err_t ret = esp_apptrace_file_cmd_send(dest, ESP_APPTRACE_FILE_CMD_FREAD, esp_apptrace_fread_args_prepare,
|
||||
&cmd_args, sizeof(cmd_args));
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to send file cmd (%d)!", ret);
|
||||
ESP_LOGE(TAG, "Failed to send file cmd (%d)!", ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -272,23 +253,17 @@ size_t esp_apptrace_fread(esp_apptrace_dest_t dest, void *ptr, size_t size, size
|
||||
size_t resp;
|
||||
ret = esp_apptrace_file_rsp_recv(dest, (uint8_t *)&resp, sizeof(resp));
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to read response (%d)!", ret);
|
||||
ESP_LOGE(TAG, "Failed to read response (%d)!", ret);
|
||||
return 0;
|
||||
}
|
||||
if (resp == 0) {
|
||||
return 0;
|
||||
if (resp > 0) {
|
||||
ret = esp_apptrace_file_rsp_recv(dest, ptr, resp);
|
||||
if (ret != ESP_OK) {
|
||||
ESP_LOGE(TAG, "Failed to read file data (%d)!", ret);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
ret = esp_apptrace_file_rsp_recv(dest, ptr, resp);
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to read file data (%d)!", ret);
|
||||
return 0;
|
||||
}
|
||||
/* OpenOCD reads it like that:
|
||||
* fread(buf, 1 ,size, file);
|
||||
* So, total read bytes count returns
|
||||
*/
|
||||
return resp/size; // return the number of items read
|
||||
return resp;
|
||||
}
|
||||
|
||||
static void esp_apptrace_fseek_args_prepare(uint8_t *buf, void *priv)
|
||||
@@ -296,23 +271,19 @@ static void esp_apptrace_fseek_args_prepare(uint8_t *buf, void *priv)
|
||||
esp_apptrace_fseek_args_t *args = priv;
|
||||
|
||||
memcpy(buf, &args->file, sizeof(args->file));
|
||||
memcpy(buf + sizeof(args->file), &args->offset, sizeof(args->offset));
|
||||
memcpy(buf + sizeof(args->file) + sizeof(args->offset), &args->whence, sizeof(args->whence));
|
||||
}
|
||||
|
||||
int esp_apptrace_fseek(esp_apptrace_dest_t dest, void *stream, long offset, int whence)
|
||||
{
|
||||
esp_apptrace_fseek_args_t cmd_args;
|
||||
|
||||
ESP_EARLY_LOGV(TAG, "esp_apptrace_fseek f %p o 0x%lx w %d", stream, offset, whence);
|
||||
|
||||
cmd_args.file = stream;
|
||||
cmd_args.offset = offset;
|
||||
cmd_args.whence = whence;
|
||||
esp_err_t ret = esp_apptrace_file_cmd_send(dest, ESP_APPTRACE_FILE_CMD_FSEEK, esp_apptrace_fseek_args_prepare,
|
||||
&cmd_args, sizeof(cmd_args));
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to send file cmd (%d)!", ret);
|
||||
ESP_LOGE(TAG, "Failed to send file cmd (%d)!", ret);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -320,7 +291,7 @@ int esp_apptrace_fseek(esp_apptrace_dest_t dest, void *stream, long offset, int
|
||||
int resp;
|
||||
ret = esp_apptrace_file_rsp_recv(dest, (uint8_t *)&resp, sizeof(resp));
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to read response (%d)!", ret);
|
||||
ESP_LOGE(TAG, "Failed to read response (%d)!", ret);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -342,7 +313,7 @@ int esp_apptrace_ftell(esp_apptrace_dest_t dest, void *stream)
|
||||
esp_err_t ret = esp_apptrace_file_cmd_send(dest, ESP_APPTRACE_FILE_CMD_FTELL, esp_apptrace_ftell_args_prepare,
|
||||
&cmd_args, sizeof(cmd_args));
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to send file cmd (%d)!", ret);
|
||||
ESP_LOGE(TAG, "Failed to send file cmd (%d)!", ret);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -350,7 +321,7 @@ int esp_apptrace_ftell(esp_apptrace_dest_t dest, void *stream)
|
||||
int resp;
|
||||
ret = esp_apptrace_file_rsp_recv(dest, (uint8_t *)&resp, sizeof(resp));
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to read response (%d)!", ret);
|
||||
ESP_LOGE(TAG, "Failed to read response (%d)!", ret);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -359,42 +330,11 @@ int esp_apptrace_ftell(esp_apptrace_dest_t dest, void *stream)
|
||||
|
||||
int esp_apptrace_fstop(esp_apptrace_dest_t dest)
|
||||
{
|
||||
ESP_EARLY_LOGV(TAG, "%s", __func__);
|
||||
esp_err_t ret = esp_apptrace_file_cmd_send(dest, ESP_APPTRACE_FILE_CMD_STOP, NULL, NULL, 0);
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to send files transfer stop cmd (%d)!", ret);
|
||||
ESP_LOGE(TAG, "Failed to send files transfer stop cmd (%d)!", ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void esp_apptrace_feof_args_prepare(uint8_t *buf, void *priv)
|
||||
{
|
||||
esp_apptrace_feof_args_t *args = priv;
|
||||
|
||||
memcpy(buf, &args->file, sizeof(args->file));
|
||||
}
|
||||
|
||||
int esp_apptrace_feof(esp_apptrace_dest_t dest, void *stream)
|
||||
{
|
||||
esp_apptrace_feof_args_t cmd_args;
|
||||
|
||||
cmd_args.file = stream;
|
||||
esp_err_t ret = esp_apptrace_file_cmd_send(dest, ESP_APPTRACE_FILE_CMD_FEOF, esp_apptrace_feof_args_prepare,
|
||||
&cmd_args, sizeof(cmd_args));
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to send file cmd (%d)!", ret);
|
||||
return EOF;
|
||||
}
|
||||
|
||||
// now read the answer
|
||||
int resp;
|
||||
ret = esp_apptrace_file_rsp_recv(dest, (uint8_t *)&resp, sizeof(resp));
|
||||
if (ret != ESP_OK) {
|
||||
ESP_EARLY_LOGE(TAG, "Failed to read response (%d)!", ret);
|
||||
return EOF;
|
||||
}
|
||||
|
||||
return resp;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,8 +1,16 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2017-2023 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
// Copyright 2017 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#ifndef ESP_APP_TRACE_H_
|
||||
#define ESP_APP_TRACE_H_
|
||||
|
||||
@@ -10,19 +18,12 @@
|
||||
#include "esp_err.h"
|
||||
#include "esp_app_trace_util.h" // ESP_APPTRACE_TMO_INFINITE
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Application trace data destinations bits.
|
||||
*/
|
||||
typedef enum {
|
||||
ESP_APPTRACE_DEST_JTAG = 1, ///< JTAG destination
|
||||
ESP_APPTRACE_DEST_TRAX = ESP_APPTRACE_DEST_JTAG, ///< xxx_TRAX name is obsolete, use more common xxx_JTAG
|
||||
ESP_APPTRACE_DEST_UART, ///< UART destination
|
||||
ESP_APPTRACE_DEST_MAX = ESP_APPTRACE_DEST_UART+1,
|
||||
ESP_APPTRACE_DEST_NUM
|
||||
ESP_APPTRACE_DEST_TRAX = 0x1, ///< JTAG destination
|
||||
ESP_APPTRACE_DEST_UART0 = 0x2, ///< UART destination
|
||||
} esp_apptrace_dest_t;
|
||||
|
||||
/**
|
||||
@@ -32,11 +33,11 @@ typedef enum {
|
||||
*
|
||||
* @return ESP_OK on success, otherwise see esp_err_t
|
||||
*/
|
||||
esp_err_t esp_apptrace_init(void);
|
||||
esp_err_t esp_apptrace_init();
|
||||
|
||||
/**
|
||||
* @brief Configures down buffer.
|
||||
* @note Needs to be called before attempting to receive any data using esp_apptrace_down_buffer_get and esp_apptrace_read.
|
||||
* @note Needs to be called before initiating any data transfer using esp_apptrace_buffer_get and esp_apptrace_write.
|
||||
* This function does not protect internal data by lock.
|
||||
*
|
||||
* @param buf Address of buffer to use for down channel (host to target) data.
|
||||
@@ -46,23 +47,23 @@ void esp_apptrace_down_buffer_config(uint8_t *buf, uint32_t size);
|
||||
|
||||
/**
|
||||
* @brief Allocates buffer for trace data.
|
||||
* Once the data in the buffer is ready to be sent, esp_apptrace_buffer_put must be called to indicate it.
|
||||
* After data in buffer are ready to be sent off esp_apptrace_buffer_put must be called to indicate it.
|
||||
*
|
||||
* @param dest Indicates HW interface to send data.
|
||||
* @param size Size of data to write to trace buffer.
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinitely.
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinetly.
|
||||
*
|
||||
* @return non-NULL on success, otherwise NULL.
|
||||
*/
|
||||
uint8_t *esp_apptrace_buffer_get(esp_apptrace_dest_t dest, uint32_t size, uint32_t tmo);
|
||||
|
||||
/**
|
||||
* @brief Indicates that the data in the buffer is ready to be sent.
|
||||
* This function is a counterpart of and must be preceded by esp_apptrace_buffer_get.
|
||||
* @brief Indicates that the data in buffer are ready to be sent off.
|
||||
* This function is a counterpart of and must be preceeded by esp_apptrace_buffer_get.
|
||||
*
|
||||
* @param dest Indicates HW interface to send data. Should be identical to the same parameter in call to esp_apptrace_buffer_get.
|
||||
* @param ptr Address of trace buffer to release. Should be the value returned by call to esp_apptrace_buffer_get.
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinitely.
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinetly.
|
||||
*
|
||||
* @return ESP_OK on success, otherwise see esp_err_t
|
||||
*/
|
||||
@@ -74,17 +75,17 @@ esp_err_t esp_apptrace_buffer_put(esp_apptrace_dest_t dest, uint8_t *ptr, uint32
|
||||
* @param dest Indicates HW interface to send data.
|
||||
* @param data Address of data to write to trace buffer.
|
||||
* @param size Size of data to write to trace buffer.
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinitely.
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinetly.
|
||||
*
|
||||
* @return ESP_OK on success, otherwise see esp_err_t
|
||||
*/
|
||||
esp_err_t esp_apptrace_write(esp_apptrace_dest_t dest, const void *data, uint32_t size, uint32_t tmo);
|
||||
|
||||
/**
|
||||
* @brief vprintf-like function to send log messages to host via specified HW interface.
|
||||
* @brief vprintf-like function to sent log messages to host via specified HW interface.
|
||||
*
|
||||
* @param dest Indicates HW interface to send data.
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinitely.
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinetly.
|
||||
* @param fmt Address of format string.
|
||||
* @param ap List of arguments.
|
||||
*
|
||||
@@ -93,7 +94,7 @@ esp_err_t esp_apptrace_write(esp_apptrace_dest_t dest, const void *data, uint32_
|
||||
int esp_apptrace_vprintf_to(esp_apptrace_dest_t dest, uint32_t tmo, const char *fmt, va_list ap);
|
||||
|
||||
/**
|
||||
* @brief vprintf-like function to send log messages to host.
|
||||
* @brief vprintf-like function to sent log messages to host.
|
||||
*
|
||||
* @param fmt Address of format string.
|
||||
* @param ap List of arguments.
|
||||
@@ -106,7 +107,7 @@ int esp_apptrace_vprintf(const char *fmt, va_list ap);
|
||||
* @brief Flushes remaining data in trace buffer to host.
|
||||
*
|
||||
* @param dest Indicates HW interface to flush data on.
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinitely.
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinetly.
|
||||
*
|
||||
* @return ESP_OK on success, otherwise see esp_err_t
|
||||
*/
|
||||
@@ -114,11 +115,11 @@ esp_err_t esp_apptrace_flush(esp_apptrace_dest_t dest, uint32_t tmo);
|
||||
|
||||
/**
|
||||
* @brief Flushes remaining data in trace buffer to host without locking internal data.
|
||||
* This is a special version of esp_apptrace_flush which should be called from panic handler.
|
||||
* This is special version of esp_apptrace_flush which should be called from panic handler.
|
||||
*
|
||||
* @param dest Indicates HW interface to flush data on.
|
||||
* @param min_sz Threshold for flushing data. If current filling level is above this value, data will be flushed. TRAX destinations only.
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinitely.
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinetly.
|
||||
*
|
||||
* @return ESP_OK on success, otherwise see esp_err_t
|
||||
*/
|
||||
@@ -130,31 +131,31 @@ esp_err_t esp_apptrace_flush_nolock(esp_apptrace_dest_t dest, uint32_t min_sz, u
|
||||
* @param dest Indicates HW interface to read the data on.
|
||||
* @param data Address of buffer to put data from trace buffer.
|
||||
* @param size Pointer to store size of read data. Before call to this function pointed memory must hold requested size of data
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinitely.
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinetly.
|
||||
*
|
||||
* @return ESP_OK on success, otherwise see esp_err_t
|
||||
*/
|
||||
esp_err_t esp_apptrace_read(esp_apptrace_dest_t dest, void *data, uint32_t *size, uint32_t tmo);
|
||||
|
||||
/**
|
||||
* @brief Retrieves incoming data buffer if any.
|
||||
* Once data in the buffer is processed, esp_apptrace_down_buffer_put must be called to indicate it.
|
||||
* @brief Rertrieves incoming data buffer if any.
|
||||
* After data in buffer are processed esp_apptrace_down_buffer_put must be called to indicate it.
|
||||
*
|
||||
* @param dest Indicates HW interface to receive data.
|
||||
* @param size Address to store size of available data in down buffer. Must be initialized with requested value.
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinitely.
|
||||
* @param size Address to store size of available data in down buffer. Must be initializaed with requested value.
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinetly.
|
||||
*
|
||||
* @return non-NULL on success, otherwise NULL.
|
||||
*/
|
||||
uint8_t *esp_apptrace_down_buffer_get(esp_apptrace_dest_t dest, uint32_t *size, uint32_t tmo);
|
||||
|
||||
/**
|
||||
* @brief Indicates that the data in the down buffer is processed.
|
||||
* This function is a counterpart of and must be preceded by esp_apptrace_down_buffer_get.
|
||||
* @brief Indicates that the data in down buffer are processesd.
|
||||
* This function is a counterpart of and must be preceeded by esp_apptrace_down_buffer_get.
|
||||
*
|
||||
* @param dest Indicates HW interface to receive data. Should be identical to the same parameter in call to esp_apptrace_down_buffer_get.
|
||||
* @param ptr Address of trace buffer to release. Should be the value returned by call to esp_apptrace_down_buffer_get.
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinitely.
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinetly.
|
||||
*
|
||||
* @return ESP_OK on success, otherwise see esp_err_t
|
||||
*/
|
||||
@@ -245,8 +246,8 @@ int esp_apptrace_fseek(esp_apptrace_dest_t dest, void *stream, long offset, int
|
||||
int esp_apptrace_ftell(esp_apptrace_dest_t dest, void *stream);
|
||||
|
||||
/**
|
||||
* @brief Indicates to the host that all file operations are complete.
|
||||
* This function should be called after all file operations are finished and
|
||||
* @brief Indicates to the host that all file operations are completed.
|
||||
* This function should be called after all file operations are finished and
|
||||
* indicate to the host that it can perform cleanup operations (close open files etc.).
|
||||
*
|
||||
* @param dest Indicates HW interface to use.
|
||||
@@ -255,25 +256,10 @@ int esp_apptrace_ftell(esp_apptrace_dest_t dest, void *stream);
|
||||
*/
|
||||
int esp_apptrace_fstop(esp_apptrace_dest_t dest);
|
||||
|
||||
/**
|
||||
* @brief Test end-of-file indicator on a stream.
|
||||
* This function has the same semantic as 'feof' except for the first argument.
|
||||
*
|
||||
* @param dest Indicates HW interface to use.
|
||||
* @param stream File handle returned by esp_apptrace_fopen.
|
||||
*
|
||||
* @return Non-Zero if end-of-file indicator is set for stream. See feof for details.
|
||||
*/
|
||||
int esp_apptrace_feof(esp_apptrace_dest_t dest, void *stream);
|
||||
|
||||
/**
|
||||
* @brief Triggers gcov info dump.
|
||||
* This function waits for the host to connect to target before dumping data.
|
||||
*/
|
||||
void esp_gcov_dump(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,31 +1,34 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2017-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
// Copyright 2017 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#ifndef ESP_APP_TRACE_UTIL_H_
|
||||
#define ESP_APP_TRACE_UTIL_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "freertos/FreeRTOS.h"
|
||||
#include "esp_err.h"
|
||||
#include "esp_timer.h"
|
||||
|
||||
/** Infinite waiting timeout */
|
||||
#define ESP_APPTRACE_TMO_INFINITE ((uint32_t)-1)
|
||||
|
||||
/** Structure which holds data necessary for measuring time intervals.
|
||||
/** Structure which holds data necessary for measuring time intervals.
|
||||
*
|
||||
* After initialization via esp_apptrace_tmo_init() user needs to call esp_apptrace_tmo_check()
|
||||
* periodically to check timeout for expiration.
|
||||
*/
|
||||
typedef struct {
|
||||
int64_t start; ///< time interval start (in us)
|
||||
int64_t tmo; ///< timeout value (in us)
|
||||
int64_t elapsed; ///< elapsed time (in us)
|
||||
uint32_t start; ///< time interval start (in CPU ticks)
|
||||
uint32_t tmo; ///< timeout value (in us)
|
||||
uint32_t elapsed; ///< elapsed time (in us)
|
||||
} esp_apptrace_tmo_t;
|
||||
|
||||
/**
|
||||
@@ -36,28 +39,27 @@ typedef struct {
|
||||
*/
|
||||
static inline void esp_apptrace_tmo_init(esp_apptrace_tmo_t *tmo, uint32_t user_tmo)
|
||||
{
|
||||
tmo->start = esp_timer_get_time();
|
||||
tmo->tmo = user_tmo == ESP_APPTRACE_TMO_INFINITE ? (int64_t)-1 : (int64_t)user_tmo;
|
||||
tmo->elapsed = 0;
|
||||
tmo->start = portGET_RUN_TIME_COUNTER_VALUE();
|
||||
tmo->tmo = user_tmo;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Checks timeout for expiration.
|
||||
*
|
||||
* @param tmo Pointer to timeout structure.
|
||||
* @param tmo Pointer to timeout structure to be initialized.
|
||||
*
|
||||
* @return number of remaining us till tmo.
|
||||
* @return ESP_OK on success, otherwise \see esp_err_t
|
||||
*/
|
||||
esp_err_t esp_apptrace_tmo_check(esp_apptrace_tmo_t *tmo);
|
||||
|
||||
static inline uint32_t esp_apptrace_tmo_remaining_us(esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
return tmo->tmo != (int64_t)-1 ? (tmo->elapsed - tmo->tmo) : ESP_APPTRACE_TMO_INFINITE;
|
||||
return tmo->tmo != ESP_APPTRACE_TMO_INFINITE ? (tmo->elapsed - tmo->tmo) : ESP_APPTRACE_TMO_INFINITE;
|
||||
}
|
||||
|
||||
/** Tracing module synchronization lock */
|
||||
typedef struct {
|
||||
spinlock_t mux;
|
||||
portMUX_TYPE mux;
|
||||
unsigned int_state;
|
||||
} esp_apptrace_lock_t;
|
||||
|
||||
@@ -68,7 +70,7 @@ typedef struct {
|
||||
*/
|
||||
static inline void esp_apptrace_lock_init(esp_apptrace_lock_t *lock)
|
||||
{
|
||||
portMUX_INITIALIZE(&lock->mux);
|
||||
vPortCPUInitializeMutex(&lock->mux);
|
||||
lock->int_state = 0;
|
||||
}
|
||||
|
||||
@@ -93,7 +95,7 @@ esp_err_t esp_apptrace_lock_give(esp_apptrace_lock_t *lock);
|
||||
|
||||
/** Ring buffer control structure.
|
||||
*
|
||||
* @note For purposes of application tracing module if there is no enough space for user data and write pointer can be wrapped
|
||||
* @note For purposes of application tracing module if there is no enough space for user data and write pointer can be wrapped
|
||||
* current ring buffer size can be temporarily shrinked in order to provide buffer with requested size.
|
||||
*/
|
||||
typedef struct {
|
||||
@@ -161,32 +163,4 @@ uint32_t esp_apptrace_rb_read_size_get(esp_apptrace_rb_t *rb);
|
||||
*/
|
||||
uint32_t esp_apptrace_rb_write_size_get(esp_apptrace_rb_t *rb);
|
||||
|
||||
int esp_apptrace_log_lock(void);
|
||||
void esp_apptrace_log_unlock(void);
|
||||
|
||||
#define ESP_APPTRACE_LOG( format, ... ) \
|
||||
do { \
|
||||
esp_apptrace_log_lock(); \
|
||||
esp_rom_printf(format, ##__VA_ARGS__); \
|
||||
esp_apptrace_log_unlock(); \
|
||||
} while(0)
|
||||
|
||||
#define ESP_APPTRACE_LOG_LEV( _L_, level, format, ... ) \
|
||||
do { \
|
||||
if (LOG_LOCAL_LEVEL >= level) { \
|
||||
ESP_APPTRACE_LOG(LOG_FORMAT(_L_, format), esp_log_early_timestamp(), TAG, ##__VA_ARGS__); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#define ESP_APPTRACE_LOGE( format, ... ) ESP_APPTRACE_LOG_LEV(E, ESP_LOG_ERROR, format, ##__VA_ARGS__)
|
||||
#define ESP_APPTRACE_LOGW( format, ... ) ESP_APPTRACE_LOG_LEV(W, ESP_LOG_WARN, format, ##__VA_ARGS__)
|
||||
#define ESP_APPTRACE_LOGI( format, ... ) ESP_APPTRACE_LOG_LEV(I, ESP_LOG_INFO, format, ##__VA_ARGS__)
|
||||
#define ESP_APPTRACE_LOGD( format, ... ) ESP_APPTRACE_LOG_LEV(D, ESP_LOG_DEBUG, format, ##__VA_ARGS__)
|
||||
#define ESP_APPTRACE_LOGV( format, ... ) ESP_APPTRACE_LOG_LEV(V, ESP_LOG_VERBOSE, format, ##__VA_ARGS__)
|
||||
#define ESP_APPTRACE_LOGO( format, ... ) ESP_APPTRACE_LOG_LEV(E, ESP_LOG_NONE, format, ##__VA_ARGS__)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif //ESP_APP_TRACE_UTIL_H_
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2018-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
#ifndef ESP_SYSVIEW_TRACE_H_
|
||||
#define ESP_SYSVIEW_TRACE_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stdarg.h>
|
||||
#include "esp_err.h"
|
||||
#include "SEGGER_RTT.h" // SEGGER_RTT_ESP_Flush
|
||||
#include "esp_app_trace_util.h" // ESP_APPTRACE_TMO_INFINITE
|
||||
|
||||
/**
|
||||
* @brief Flushes remaining data in SystemView trace buffer to host.
|
||||
*
|
||||
* @param tmo Timeout for operation (in us). Use ESP_APPTRACE_TMO_INFINITE to wait indefinetly.
|
||||
*
|
||||
* @return ESP_OK.
|
||||
*/
|
||||
static inline esp_err_t esp_sysview_flush(uint32_t tmo)
|
||||
{
|
||||
SEGGER_RTT_ESP_Flush(0, tmo);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief vprintf-like function to sent log messages to the host.
|
||||
*
|
||||
* @param format Address of format string.
|
||||
* @param args List of arguments.
|
||||
*
|
||||
* @return Number of bytes written.
|
||||
*/
|
||||
int esp_sysview_vprintf(const char * format, va_list args);
|
||||
|
||||
/**
|
||||
* @brief Starts SystemView heap tracing.
|
||||
*
|
||||
* @param tmo Timeout (in us) to wait for the host to be connected. Use -1 to wait forever.
|
||||
*
|
||||
* @return ESP_OK on success, ESP_ERR_TIMEOUT if operation has been timed out.
|
||||
*/
|
||||
esp_err_t esp_sysview_heap_trace_start(uint32_t tmo);
|
||||
|
||||
/**
|
||||
* @brief Stops SystemView heap tracing.
|
||||
*
|
||||
* @return ESP_OK.
|
||||
*/
|
||||
esp_err_t esp_sysview_heap_trace_stop(void);
|
||||
|
||||
/**
|
||||
* @brief Sends heap allocation event to the host.
|
||||
*
|
||||
* @param addr Address of allocated block.
|
||||
* @param size Size of allocated block.
|
||||
* @param callers Pointer to array with callstack addresses.
|
||||
* Array size must be CONFIG_HEAP_TRACING_STACK_DEPTH.
|
||||
*/
|
||||
void esp_sysview_heap_trace_alloc(void *addr, uint32_t size, const void *callers);
|
||||
|
||||
/**
|
||||
* @brief Sends heap de-allocation event to the host.
|
||||
*
|
||||
* @param addr Address of de-allocated block.
|
||||
* @param callers Pointer to array with callstack addresses.
|
||||
* Array size must be CONFIG_HEAP_TRACING_STACK_DEPTH.
|
||||
*/
|
||||
void esp_sysview_heap_trace_free(void *addr, const void *callers);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif //ESP_SYSVIEW_TRACE_H_
|
||||
@@ -1,23 +0,0 @@
|
||||
[mapping:app_trace]
|
||||
archive: libapp_trace.a
|
||||
entries:
|
||||
app_trace (noflash)
|
||||
port_uart (noflash)
|
||||
app_trace_util (noflash)
|
||||
if APPTRACE_MEMBUFS_APPTRACE_PROTO_ENABLE:
|
||||
app_trace_membufs_proto (noflash)
|
||||
if APPTRACE_DEST_JTAG = y:
|
||||
port (noflash)
|
||||
if APPTRACE_SV_ENABLE = y:
|
||||
SEGGER_SYSVIEW (noflash)
|
||||
SEGGER_RTT_esp (noflash)
|
||||
SEGGER_SYSVIEW_Config_FreeRTOS (noflash)
|
||||
SEGGER_SYSVIEW_FreeRTOS (noflash)
|
||||
|
||||
[mapping:app_trace_driver]
|
||||
archive: libdriver.a
|
||||
entries:
|
||||
if APPTRACE_SV_TS_SOURCE_GPTIMER = y:
|
||||
gptimer (noflash)
|
||||
else:
|
||||
* (default)
|
||||
@@ -1,36 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifndef ESP_APP_TRACE_PORT_H_
|
||||
#define ESP_APP_TRACE_PORT_H_
|
||||
|
||||
#include "esp_app_trace_util.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/** Apptrace HW interface. */
|
||||
typedef struct {
|
||||
esp_err_t (*init)(void *hw_data);
|
||||
uint8_t *(*get_up_buffer)(void *hw_data, uint32_t, esp_apptrace_tmo_t *);
|
||||
esp_err_t (*put_up_buffer)(void *hw_data, uint8_t *, esp_apptrace_tmo_t *);
|
||||
esp_err_t (*flush_up_buffer_nolock)(void *hw_data, uint32_t, esp_apptrace_tmo_t *);
|
||||
esp_err_t (*flush_up_buffer)(void *hw_data, esp_apptrace_tmo_t *);
|
||||
void (*down_buffer_config)(void *hw_data, uint8_t *buf, uint32_t size);
|
||||
uint8_t *(*get_down_buffer)(void *hw_data, uint32_t *, esp_apptrace_tmo_t *);
|
||||
esp_err_t (*put_down_buffer)(void *hw_data, uint8_t *, esp_apptrace_tmo_t *);
|
||||
bool (*host_is_connected)(void *hw_data);
|
||||
} esp_apptrace_hw_t;
|
||||
|
||||
esp_apptrace_hw_t *esp_apptrace_jtag_hw_get(void **data);
|
||||
esp_apptrace_hw_t *esp_apptrace_uart_hw_get(int num, void **data);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -1,348 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2017-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include "soc/soc.h"
|
||||
#include "esp_log.h"
|
||||
#include "esp_cpu.h"
|
||||
#include "esp_app_trace_port.h"
|
||||
|
||||
#include "driver/uart.h"
|
||||
#include "hal/uart_ll.h"
|
||||
#include "string.h"
|
||||
#include "driver/gpio.h"
|
||||
|
||||
|
||||
#define APPTRACE_DEST_UART (CONFIG_APPTRACE_DEST_UART0 | CONFIG_APPTRACE_DEST_UART1 | CONFIG_APPTRACE_DEST_UART2)
|
||||
|
||||
#define APP_TRACE_MAX_TX_BUFF_UART CONFIG_APPTRACE_UART_TX_BUFF_SIZE
|
||||
#define APP_TRACE_MAX_TX_MSG_UART CONFIG_APPTRACE_UART_TX_MSG_SIZE
|
||||
|
||||
/** UART HW transport data */
|
||||
typedef struct {
|
||||
uint8_t inited;
|
||||
#if CONFIG_APPTRACE_LOCK_ENABLE
|
||||
esp_apptrace_lock_t lock; // sync lock
|
||||
#endif
|
||||
uart_port_t port_num;
|
||||
// TX data ring buffer
|
||||
uint8_t *tx_data_buff;
|
||||
int32_t tx_data_buff_in;
|
||||
int32_t tx_data_buff_out;
|
||||
// TX message buffer
|
||||
uint8_t *tx_msg_buff;
|
||||
uint32_t tx_msg_buff_size;
|
||||
|
||||
// RX message buffer
|
||||
uint8_t *down_buffer;
|
||||
uint32_t down_buffer_size;
|
||||
// Buffer overflow flags
|
||||
bool message_buff_overflow;
|
||||
bool circular_buff_overflow;
|
||||
} esp_apptrace_uart_data_t;
|
||||
|
||||
#if APPTRACE_DEST_UART
|
||||
static esp_err_t esp_apptrace_uart_init(esp_apptrace_uart_data_t *hw_data);
|
||||
static esp_err_t esp_apptrace_uart_flush(esp_apptrace_uart_data_t *hw_data, esp_apptrace_tmo_t *tmo);
|
||||
static esp_err_t esp_apptrace_uart_flush_nolock(esp_apptrace_uart_data_t *hw_data, uint32_t min_sz, esp_apptrace_tmo_t *tmo);
|
||||
static uint8_t *esp_apptrace_uart_up_buffer_get(esp_apptrace_uart_data_t *hw_data, uint32_t size, esp_apptrace_tmo_t *tmo);
|
||||
static esp_err_t esp_apptrace_uart_up_buffer_put(esp_apptrace_uart_data_t *hw_data, uint8_t *ptr, esp_apptrace_tmo_t *tmo);
|
||||
static void esp_apptrace_uart_down_buffer_config(esp_apptrace_uart_data_t *hw_data, uint8_t *buf, uint32_t size);
|
||||
static uint8_t *esp_apptrace_uart_down_buffer_get(esp_apptrace_uart_data_t *hw_data, uint32_t *size, esp_apptrace_tmo_t *tmo);
|
||||
static esp_err_t esp_apptrace_uart_down_buffer_put(esp_apptrace_uart_data_t *hw_data, uint8_t *ptr, esp_apptrace_tmo_t *tmo);
|
||||
static bool esp_apptrace_uart_host_is_connected(esp_apptrace_uart_data_t *hw_data);
|
||||
|
||||
#endif // APPTRACE_DEST_UART
|
||||
const static char *TAG = "esp_apptrace_uart";
|
||||
|
||||
esp_apptrace_hw_t *esp_apptrace_uart_hw_get(int num, void **data)
|
||||
{
|
||||
ESP_LOGD(TAG,"esp_apptrace_uart_hw_get - %i", num);
|
||||
#if APPTRACE_DEST_UART
|
||||
static esp_apptrace_uart_data_t s_uart_hw_data = {
|
||||
};
|
||||
static esp_apptrace_hw_t s_uart_hw = {
|
||||
.init = (esp_err_t (*)(void *))esp_apptrace_uart_init,
|
||||
.get_up_buffer = (uint8_t *(*)(void *, uint32_t, esp_apptrace_tmo_t *))esp_apptrace_uart_up_buffer_get,
|
||||
.put_up_buffer = (esp_err_t (*)(void *, uint8_t *, esp_apptrace_tmo_t *))esp_apptrace_uart_up_buffer_put,
|
||||
.flush_up_buffer_nolock = (esp_err_t (*)(void *, uint32_t, esp_apptrace_tmo_t *))esp_apptrace_uart_flush_nolock,
|
||||
.flush_up_buffer = (esp_err_t (*)(void *, esp_apptrace_tmo_t *))esp_apptrace_uart_flush,
|
||||
.down_buffer_config = (void (*)(void *, uint8_t *, uint32_t ))esp_apptrace_uart_down_buffer_config,
|
||||
.get_down_buffer = (uint8_t *(*)(void *, uint32_t *, esp_apptrace_tmo_t *))esp_apptrace_uart_down_buffer_get,
|
||||
.put_down_buffer = (esp_err_t (*)(void *, uint8_t *, esp_apptrace_tmo_t *))esp_apptrace_uart_down_buffer_put,
|
||||
.host_is_connected = (bool (*)(void *))esp_apptrace_uart_host_is_connected,
|
||||
};
|
||||
s_uart_hw_data.port_num = num;
|
||||
*data = &s_uart_hw_data;
|
||||
return &s_uart_hw;
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if APPTRACE_DEST_UART
|
||||
|
||||
static esp_err_t esp_apptrace_uart_lock(esp_apptrace_uart_data_t *hw_data, esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
#if CONFIG_APPTRACE_LOCK_ENABLE
|
||||
esp_err_t ret = esp_apptrace_lock_take(&hw_data->lock, tmo);
|
||||
if (ret != ESP_OK) {
|
||||
return ESP_FAIL;
|
||||
}
|
||||
#endif
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
static esp_err_t esp_apptrace_uart_unlock(esp_apptrace_uart_data_t *hw_data)
|
||||
{
|
||||
esp_err_t ret = ESP_OK;
|
||||
#if CONFIG_APPTRACE_LOCK_ENABLE
|
||||
ret = esp_apptrace_lock_give(&hw_data->lock);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void esp_apptrace_uart_hw_init(void)
|
||||
{
|
||||
ESP_APPTRACE_LOGI("Initialized UART on CPU%d", esp_cpu_get_core_id());
|
||||
}
|
||||
|
||||
|
||||
/*****************************************************************************************/
|
||||
/***************************** Apptrace HW iface *****************************************/
|
||||
/*****************************************************************************************/
|
||||
|
||||
static esp_err_t esp_apptrace_send_uart_data(esp_apptrace_uart_data_t *hw_data, const char *data, uint32_t size, esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
esp_err_t res = esp_apptrace_uart_lock(hw_data, tmo);
|
||||
if (res != ESP_OK) {
|
||||
return res;
|
||||
}
|
||||
// We store current out position to handle it without lock
|
||||
volatile int32_t out_position = hw_data->tx_data_buff_out;
|
||||
|
||||
int len_free = APP_TRACE_MAX_TX_BUFF_UART - (hw_data->tx_data_buff_in - out_position);
|
||||
if (out_position > hw_data->tx_data_buff_in) {
|
||||
len_free = out_position - hw_data->tx_data_buff_in;
|
||||
}
|
||||
int check_len = APP_TRACE_MAX_TX_BUFF_UART - hw_data->tx_data_buff_in;
|
||||
if (size <= len_free)
|
||||
{
|
||||
if ( check_len >= size) {
|
||||
memcpy(&hw_data->tx_data_buff[hw_data->tx_data_buff_in], data, size);
|
||||
hw_data->tx_data_buff_in += size;
|
||||
} else {
|
||||
memcpy(&hw_data->tx_data_buff[hw_data->tx_data_buff_in], data, APP_TRACE_MAX_TX_BUFF_UART - hw_data->tx_data_buff_in);
|
||||
memcpy(&hw_data->tx_data_buff[0], &data[APP_TRACE_MAX_TX_BUFF_UART - hw_data->tx_data_buff_in], size - (APP_TRACE_MAX_TX_BUFF_UART - hw_data->tx_data_buff_in));
|
||||
hw_data->tx_data_buff_in = size - (APP_TRACE_MAX_TX_BUFF_UART - hw_data->tx_data_buff_in);
|
||||
}
|
||||
if (hw_data->tx_data_buff_in >= APP_TRACE_MAX_TX_BUFF_UART) {
|
||||
hw_data->tx_data_buff_in = 0;
|
||||
}
|
||||
} else {
|
||||
hw_data->circular_buff_overflow = true;
|
||||
}
|
||||
|
||||
if (esp_apptrace_uart_unlock(hw_data) != ESP_OK) {
|
||||
assert(false && "Failed to unlock apptrace data!");
|
||||
}
|
||||
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
static void send_buff_data(esp_apptrace_uart_data_t *hw_data, esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
if (hw_data->tx_data_buff_in == hw_data->tx_data_buff_out) {
|
||||
return;
|
||||
}
|
||||
// We store current in position to handle it without lock
|
||||
volatile int32_t in_position = hw_data->tx_data_buff_in;
|
||||
if (in_position > hw_data->tx_data_buff_out) {
|
||||
int bytes_sent = uart_write_bytes(hw_data->port_num, &hw_data->tx_data_buff[hw_data->tx_data_buff_out], in_position - hw_data->tx_data_buff_out);
|
||||
hw_data->tx_data_buff_out += bytes_sent;
|
||||
} else {
|
||||
int bytes_sent = uart_write_bytes(hw_data->port_num, &hw_data->tx_data_buff[hw_data->tx_data_buff_out], APP_TRACE_MAX_TX_BUFF_UART - hw_data->tx_data_buff_out);
|
||||
hw_data->tx_data_buff_out += bytes_sent;
|
||||
if (hw_data->tx_data_buff_out >= APP_TRACE_MAX_TX_BUFF_UART) {
|
||||
hw_data->tx_data_buff_out = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#define APP_TRACE_UART_STOP_WAIT_TMO 1000000 //us
|
||||
|
||||
static void esp_apptrace_send_uart_tx_task(void *arg)
|
||||
{
|
||||
esp_apptrace_uart_data_t *hw_data = (esp_apptrace_uart_data_t *)arg;
|
||||
esp_apptrace_tmo_t tmo;
|
||||
esp_apptrace_tmo_init(&tmo, APP_TRACE_UART_STOP_WAIT_TMO);
|
||||
|
||||
vTaskDelay(10);
|
||||
while (1) {
|
||||
send_buff_data(hw_data, &tmo);
|
||||
vTaskDelay(10);
|
||||
if (hw_data->circular_buff_overflow == true)
|
||||
{
|
||||
hw_data->circular_buff_overflow = false;
|
||||
ESP_LOGE(TAG, "Buffer overflow. Please increase UART baudrate, or increase UART TX ring buffer size in menuconfig.");
|
||||
}
|
||||
if (hw_data->message_buff_overflow == true)
|
||||
{
|
||||
hw_data->message_buff_overflow = false;
|
||||
ESP_LOGE(TAG, "Message size more then message buffer!");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static const int APP_TRACE_UART_RX_BUF_SIZE = 4024;
|
||||
|
||||
static esp_err_t esp_apptrace_uart_init(esp_apptrace_uart_data_t *hw_data)
|
||||
{
|
||||
int core_id = esp_cpu_get_core_id();
|
||||
if (core_id == 0) {
|
||||
hw_data->tx_data_buff = (uint8_t *)heap_caps_malloc(APP_TRACE_MAX_TX_BUFF_UART, MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
|
||||
if (hw_data->tx_data_buff == NULL){
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
hw_data->tx_data_buff_in = 0;
|
||||
hw_data->tx_data_buff_out = 0;
|
||||
hw_data->tx_msg_buff = (uint8_t *)heap_caps_malloc(APP_TRACE_MAX_TX_MSG_UART, MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
|
||||
if (hw_data->tx_msg_buff == NULL)
|
||||
{
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
hw_data->tx_msg_buff_size = 0;
|
||||
hw_data->down_buffer_size = 0;
|
||||
hw_data->message_buff_overflow = false;
|
||||
hw_data->circular_buff_overflow = false;
|
||||
|
||||
|
||||
const uart_config_t uart_config = {
|
||||
.baud_rate = CONFIG_APPTRACE_UART_BAUDRATE,
|
||||
.data_bits = UART_DATA_8_BITS,
|
||||
.parity = UART_PARITY_DISABLE,
|
||||
.stop_bits = UART_STOP_BITS_1,
|
||||
.flow_ctrl = UART_HW_FLOWCTRL_DISABLE,
|
||||
.source_clk = UART_SCLK_DEFAULT,
|
||||
};
|
||||
ESP_LOGI(TAG, "UART baud rate: %i", CONFIG_APPTRACE_UART_BAUDRATE);
|
||||
// We won't use a buffer for sending data.
|
||||
esp_err_t err = uart_driver_install(hw_data->port_num, APP_TRACE_UART_RX_BUF_SIZE, APP_TRACE_UART_RX_BUF_SIZE, 0, NULL, 0);
|
||||
assert((err == ESP_OK) && "Not possible to install UART. Please check and change menuconfig parameters!");
|
||||
err = uart_param_config(hw_data->port_num, &uart_config);
|
||||
assert((err == ESP_OK) && "Not possible to configure UART. Please check and change menuconfig parameters!");
|
||||
err = uart_set_pin(hw_data->port_num, CONFIG_APPTRACE_UART_TX_GPIO, CONFIG_APPTRACE_UART_RX_GPIO, UART_PIN_NO_CHANGE, UART_PIN_NO_CHANGE);
|
||||
assert((err == ESP_OK) && "Not possible to configure UART RX/TX pins. Please check and change menuconfig parameters!");
|
||||
|
||||
int uart_prio = CONFIG_APPTRACE_UART_TASK_PRIO;
|
||||
if (uart_prio >= (configMAX_PRIORITIES-1)) uart_prio = configMAX_PRIORITIES - 1;
|
||||
err = xTaskCreate(esp_apptrace_send_uart_tx_task, "app_trace_uart_tx_task", 2500, hw_data, uart_prio, NULL);
|
||||
assert((err == pdPASS) && "Not possible to configure UART. Not possible to create task!");
|
||||
|
||||
#if CONFIG_APPTRACE_LOCK_ENABLE
|
||||
esp_apptrace_lock_init(&hw_data->lock);
|
||||
#endif
|
||||
}
|
||||
// init UART on this CPU
|
||||
esp_apptrace_uart_hw_init();
|
||||
hw_data->inited |= 1 << core_id;
|
||||
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
static uint8_t *esp_apptrace_uart_up_buffer_get(esp_apptrace_uart_data_t *hw_data, uint32_t size, esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
uint8_t *ptr;
|
||||
if (size > APP_TRACE_MAX_TX_MSG_UART) {
|
||||
hw_data->message_buff_overflow = true;
|
||||
return NULL;
|
||||
}
|
||||
if (hw_data->tx_msg_buff_size != 0)
|
||||
{
|
||||
// A previous message was not sent.
|
||||
return NULL;
|
||||
}
|
||||
esp_err_t res = esp_apptrace_uart_lock(hw_data, tmo);
|
||||
if (res != ESP_OK) {
|
||||
return NULL;
|
||||
}
|
||||
ptr = hw_data->tx_msg_buff;
|
||||
hw_data->tx_msg_buff_size = size;
|
||||
|
||||
// now we can safely unlock apptrace to allow other tasks/ISRs to get other buffers and write their data
|
||||
if (esp_apptrace_uart_unlock(hw_data) != ESP_OK) {
|
||||
assert(false && "Failed to unlock apptrace data!");
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static esp_err_t esp_apptrace_uart_up_buffer_put(esp_apptrace_uart_data_t *hw_data, uint8_t *ptr, esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
esp_err_t res = esp_apptrace_send_uart_data(hw_data, (const char *)ptr, hw_data->tx_msg_buff_size, tmo);
|
||||
// Clear size to indicate that we've sent data
|
||||
hw_data->tx_msg_buff_size = 0;
|
||||
return res;
|
||||
}
|
||||
|
||||
static void esp_apptrace_uart_down_buffer_config(esp_apptrace_uart_data_t *hw_data, uint8_t *buf, uint32_t size)
|
||||
{
|
||||
hw_data->down_buffer = (uint8_t *)malloc(size);
|
||||
if (hw_data->down_buffer == NULL){
|
||||
assert(false && "Failed to allocate apptrace uart down buffer!");
|
||||
}
|
||||
hw_data->down_buffer_size = size;
|
||||
}
|
||||
|
||||
static uint8_t *esp_apptrace_uart_down_buffer_get(esp_apptrace_uart_data_t *hw_data, uint32_t *size, esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
uint8_t *ptr = NULL;
|
||||
|
||||
if (*size > hw_data->down_buffer_size) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
esp_err_t res = esp_apptrace_uart_lock(hw_data, tmo);
|
||||
if (res != ESP_OK) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size_t uart_fifolen = 0;
|
||||
uart_get_buffered_data_len(hw_data->port_num, &uart_fifolen);
|
||||
if (uart_fifolen > 0) {
|
||||
if (*size < uart_fifolen) {
|
||||
uart_fifolen = *size;
|
||||
}
|
||||
*size = uart_fifolen;
|
||||
ptr = hw_data->down_buffer;
|
||||
*size =uart_read_bytes(hw_data->port_num, ptr, uart_fifolen, 0);
|
||||
}
|
||||
|
||||
if (esp_apptrace_uart_unlock(hw_data) != ESP_OK) {
|
||||
assert(false && "Failed to unlock apptrace data!");
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static esp_err_t esp_apptrace_uart_down_buffer_put(esp_apptrace_uart_data_t *hw_data, uint8_t *ptr, esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
static bool esp_apptrace_uart_host_is_connected(esp_apptrace_uart_data_t *hw_data)
|
||||
{
|
||||
return hw_data->inited & 1;
|
||||
}
|
||||
|
||||
static esp_err_t esp_apptrace_uart_flush_nolock(esp_apptrace_uart_data_t *hw_data, uint32_t min_sz, esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
static esp_err_t esp_apptrace_uart_flush(esp_apptrace_uart_data_t *hw_data, esp_apptrace_tmo_t *tmo)
|
||||
{
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
#endif // APPTRACE_DEST_UART
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user