From 8770f562e057cbdf7c8035ee3fd39603ff416d39 Mon Sep 17 00:00:00 2001 From: Luo Xu Date: Thu, 14 Aug 2025 21:43:30 +0800 Subject: [PATCH 1/2] feat(ble_mesh): Add lock to avoid tx being modified at the same time (cherry picked from commit 334e2bf8ab40a6facf6ed8926c42307443e72ad9) Co-authored-by: luoxu --- components/bt/esp_ble_mesh/core/transport.c | 53 +++++++++++++-------- 1 file changed, 33 insertions(+), 20 deletions(-) diff --git a/components/bt/esp_ble_mesh/core/transport.c b/components/bt/esp_ble_mesh/core/transport.c index fd3fd60aa1..33054e847c 100644 --- a/components/bt/esp_ble_mesh/core/transport.c +++ b/components/bt/esp_ble_mesh/core/transport.c @@ -94,6 +94,7 @@ static struct seg_tx { const struct bt_mesh_send_cb *cb; void *cb_data; struct k_delayed_work rtx_timer; /* Segment Retransmission timer */ + bt_mesh_mutex_t lock; } seg_tx[CONFIG_BLE_MESH_TX_SEG_MSG_COUNT]; static struct seg_rx { @@ -120,17 +121,16 @@ static struct seg_rx { static uint8_t seg_rx_buf_data[(CONFIG_BLE_MESH_RX_SEG_MSG_COUNT * CONFIG_BLE_MESH_RX_SDU_MAX)]; -static bt_mesh_mutex_t seg_tx_lock; static bt_mesh_mutex_t seg_rx_lock; -static inline void bt_mesh_seg_tx_lock(void) +static inline void bt_mesh_seg_tx_lock(struct seg_tx *tx) { - bt_mesh_mutex_lock(&seg_tx_lock); + bt_mesh_r_mutex_lock(&tx->lock); } -static inline void bt_mesh_seg_tx_unlock(void) +static inline void bt_mesh_seg_tx_unlock(struct seg_tx *tx) { - bt_mesh_mutex_unlock(&seg_tx_lock); + bt_mesh_r_mutex_unlock(&tx->lock); } static inline void bt_mesh_seg_rx_lock(void) @@ -331,7 +331,7 @@ static void seg_tx_reset(struct seg_tx *tx) { int i; - bt_mesh_seg_tx_lock(); + bt_mesh_seg_tx_lock(tx); k_delayed_work_cancel(&tx->rtx_timer); @@ -351,7 +351,7 @@ static void seg_tx_reset(struct seg_tx *tx) tx->nack_count = 0U; - bt_mesh_seg_tx_unlock(); + bt_mesh_seg_tx_unlock(tx); if (bt_mesh_atomic_test_and_clear_bit(bt_mesh.flags, BLE_MESH_IVU_PENDING)) { BT_DBG("Proceeding with pending IV Update"); @@ -380,6 +380,7 @@ static inline void seg_tx_complete(struct seg_tx *tx, int err) static void schedule_retransmit(struct seg_tx *tx) { + bt_mesh_seg_tx_lock(tx); /* It's possible that a segment broadcast hasn't finished, * but the tx are already released. Only the seg_pending * of this segment remains unprocessed. So, here, we @@ -391,20 +392,24 @@ static void schedule_retransmit(struct seg_tx *tx) if (tx->seg_pending) { tx->seg_pending--; } + bt_mesh_seg_tx_unlock(tx); return; } if (--tx->seg_pending) { + bt_mesh_seg_tx_unlock(tx); return; } if (!BLE_MESH_ADDR_IS_UNICAST(tx->dst) && !tx->attempts) { BT_INFO("Complete tx sdu to group"); seg_tx_complete(tx, 0); + bt_mesh_seg_tx_unlock(tx); return; } k_delayed_work_submit(&tx->rtx_timer, SEG_RETRANSMIT_TIMEOUT(tx)); + bt_mesh_seg_tx_unlock(tx); } static void seg_first_send_start(uint16_t duration, int err, void *user_data) @@ -450,11 +455,11 @@ static void seg_tx_send_unacked(struct seg_tx *tx) { int i, err = 0; - bt_mesh_seg_tx_lock(); + bt_mesh_seg_tx_lock(tx); if (!(tx->attempts--)) { BT_WARN("Ran out of retransmit attempts"); - bt_mesh_seg_tx_unlock(); + bt_mesh_seg_tx_unlock(tx); seg_tx_complete(tx, -ETIMEDOUT); return; } @@ -487,13 +492,13 @@ static void seg_tx_send_unacked(struct seg_tx *tx) &seg_sent_cb, tx); if (err) { BT_ERR("Sending segment failed"); - bt_mesh_seg_tx_unlock(); + bt_mesh_seg_tx_unlock(tx); seg_tx_complete(tx, -EIO); return; } } - bt_mesh_seg_tx_unlock(); + bt_mesh_seg_tx_unlock(tx); } static void seg_retransmit(struct k_work *work) @@ -511,6 +516,7 @@ static int send_seg(struct bt_mesh_net_tx *net_tx, struct net_buf_simple *sdu, uint16_t seq_zero = 0U; uint8_t seg_hdr = 0U; uint8_t seg_o = 0U; + int err = 0; int i; BT_DBG("src 0x%04x dst 0x%04x app_idx 0x%04x aszmic %u sdu_len %u", @@ -583,16 +589,17 @@ static int send_seg(struct bt_mesh_net_tx *net_tx, struct net_buf_simple *sdu, return -ENOBUFS; } + bt_mesh_seg_tx_lock(tx); + for (seg_o = 0U; sdu->len; seg_o++) { struct net_buf *seg = NULL; uint16_t len = 0U; - int err = 0; seg = bt_mesh_adv_create(BLE_MESH_ADV_DATA, BUF_TIMEOUT); if (!seg) { BT_ERR("Out of segment buffers"); - seg_tx_reset(tx); - return -ENOBUFS; + err = -ENOBUFS; + break; } net_buf_reserve(seg, BLE_MESH_NET_HDR_LEN); @@ -637,8 +644,7 @@ static int send_seg(struct bt_mesh_net_tx *net_tx, struct net_buf_simple *sdu, tx); if (err) { BT_ERR("Sending segment failed (err %d)", err); - seg_tx_reset(tx); - return err; + break; } /* If security credentials is updated in the network layer, @@ -650,6 +656,13 @@ static int send_seg(struct bt_mesh_net_tx *net_tx, struct net_buf_simple *sdu, } } + bt_mesh_seg_tx_unlock(tx); + + if (err) { + seg_tx_reset(tx); + return err; + } + /* This can happen if segments only went into the Friend Queue */ if (IS_ENABLED(CONFIG_BLE_MESH_FRIEND) && !tx->seg[0]) { seg_tx_reset(tx); @@ -984,9 +997,9 @@ static int trans_ack(struct bt_mesh_net_rx *rx, uint8_t hdr, while ((bit = find_lsb_set(ack))) { if (tx->seg[bit - 1]) { BT_INFO("Seg %u/%u acked", bit - 1, tx->seg_n); - bt_mesh_seg_tx_lock(); + bt_mesh_seg_tx_lock(tx); seg_tx_done(tx, bit - 1); - bt_mesh_seg_tx_unlock(); + bt_mesh_seg_tx_unlock(tx); } ack &= ~BIT(bit - 1); @@ -1786,6 +1799,7 @@ void bt_mesh_trans_init(void) for (i = 0; i < ARRAY_SIZE(seg_tx); i++) { k_delayed_work_init(&seg_tx[i].rtx_timer, seg_retransmit); + bt_mesh_r_mutex_create(&seg_tx[i].lock); } for (i = 0; i < ARRAY_SIZE(seg_rx); i++) { @@ -1795,7 +1809,6 @@ void bt_mesh_trans_init(void) seg_rx[i].buf.data = seg_rx[i].buf.__buf; } - bt_mesh_mutex_create(&seg_tx_lock); bt_mesh_mutex_create(&seg_rx_lock); } @@ -1810,13 +1823,13 @@ void bt_mesh_trans_deinit(bool erase) for (i = 0; i < ARRAY_SIZE(seg_tx); i++) { k_delayed_work_free(&seg_tx[i].rtx_timer); + bt_mesh_r_mutex_free(&seg_tx[i].lock); } for (i = 0; i < ARRAY_SIZE(seg_rx); i++) { k_delayed_work_free(&seg_rx[i].ack_timer); } - bt_mesh_mutex_free(&seg_tx_lock); bt_mesh_mutex_free(&seg_rx_lock); } #endif /* CONFIG_BLE_MESH_DEINIT */ From 9acb50df44c20d6660977632c0cce7d4b654f40b Mon Sep 17 00:00:00 2001 From: luoxu Date: Thu, 11 Sep 2025 16:16:29 +0800 Subject: [PATCH 2/2] feat(ble_mesh): add adv lock to avoid allocated buffers cherry-picked from 4905b6eae12134ecd291156b2ce1e98aba4e1fed --- components/bt/esp_ble_mesh/core/adv.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/components/bt/esp_ble_mesh/core/adv.c b/components/bt/esp_ble_mesh/core/adv.c index f2bd9c60d8..22323ca340 100644 --- a/components/bt/esp_ble_mesh/core/adv.c +++ b/components/bt/esp_ble_mesh/core/adv.c @@ -59,6 +59,8 @@ struct bt_mesh_queue { }; static struct bt_mesh_queue adv_queue; +static bt_mesh_mutex_t adv_buf_alloc_lock; + /* We reserve one queue item for bt_mesh_adv_update() */ #if CONFIG_BLE_MESH_SUPPORT_BLE_ADV #define BLE_MESH_ADV_QUEUE_SIZE (CONFIG_BLE_MESH_ADV_BUF_COUNT + CONFIG_BLE_MESH_BLE_ADV_BUF_COUNT + 1) @@ -375,8 +377,10 @@ struct net_buf *bt_mesh_adv_create_from_pool(struct net_buf_pool *pool, return NULL; } + bt_mesh_r_mutex_lock(&adv_buf_alloc_lock); buf = net_buf_alloc(pool, timeout); if (!buf) { + bt_mesh_r_mutex_unlock(&adv_buf_alloc_lock); return NULL; } @@ -390,6 +394,7 @@ struct net_buf *bt_mesh_adv_create_from_pool(struct net_buf_pool *pool, adv->type = type; + bt_mesh_r_mutex_unlock(&adv_buf_alloc_lock); return buf; } @@ -669,6 +674,7 @@ void bt_mesh_adv_init(void) __ASSERT(ret == pdTRUE, "Failed to create adv thread"); (void)ret; #endif /* CONFIG_BLE_MESH_FREERTOS_STATIC_ALLOC_EXTERNAL && (CONFIG_SPIRAM_CACHE_WORKAROUND || !CONFIG_IDF_TARGET_ESP32) && CONFIG_SPIRAM_ALLOW_STACK_EXTERNAL_MEMORY */ + bt_mesh_r_mutex_create(&adv_buf_alloc_lock); } #if CONFIG_BLE_MESH_DEINIT @@ -724,6 +730,7 @@ void bt_mesh_adv_deinit(void) #if CONFIG_BLE_MESH_SUPPORT_BLE_ADV bt_mesh_ble_adv_deinit(); #endif /* CONFIG_BLE_MESH_SUPPORT_BLE_ADV */ + bt_mesh_r_mutex_free(&adv_buf_alloc_lock); } #endif /* CONFIG_BLE_MESH_DEINIT */