From afa162b73ad9a53b8186d3a32ac589cb83edaa5d Mon Sep 17 00:00:00 2001 From: Emil Muratov Date: Fri, 13 Dec 2024 16:32:15 +0900 Subject: [PATCH 1/6] Coalesce poll events on queue eviction Refer: https://github.com/mathieucarbou/ESPAsyncWebServer/discussions/165 Let's try to coalesce two (or more) consecutive poll events into one this usually happens with poor implemented user-callbacks that are runs too long and makes poll events to stack in the queue if consecutive user callback for a same connection runs longer that poll time then it will fill the queue with events until it deadlocks. This is a workaround to mitigate such poor designs and won't let other events/connections to starve the task time. It won't be effective if user would run multiple simultaneous long running callbacks due to message interleaving. todo: implement some kind of fair dequeing or (better) simply punish user for a bad designed callbacks by resetting hog connections --- src/AsyncTCP.cpp | 38 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 18cd24a..ae79d4c 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -61,6 +61,12 @@ extern "C" { #define INVALID_CLOSED_SLOT -1 +/* + TCP poll interval is specified in terms of the TCP coarse timer interval, which is called twice a second + https://github.com/espressif/esp-lwip/blob/2acf959a2bb559313cd2bf9306c24612ba3d0e19/src/core/tcp.c#L1895 +*/ +#define CONFIG_ASYNC_TCP_POLL_TIMER 1 + /* * TCP/IP Event Task * */ @@ -148,7 +154,34 @@ static inline bool _prepend_async_event(lwip_event_packet_t** e) { } static inline bool _get_async_event(lwip_event_packet_t** e) { - return _async_queue && xQueueReceive(_async_queue, e, portMAX_DELAY) == pdPASS; + if (!_async_queue) { + return false; + } + + if (xQueueReceive(_async_queue, e, portMAX_DELAY) != pdPASS) + return false; + + /* + Let's try to coalesce two (or more) consecutive poll events into one + this usually happens with poor implemented user-callbacks that are runs too long and makes poll events to stack in the queue + if consecutive user callback for a same connection runs longer that poll time then it will fill the queue with events until it deadlocks. + This is a workaround to mitigate such poor designs and won't let other events/connections to starve the task time. + It won't be effective if user would run multiple simultaneous long running callbacks due to message interleaving. + todo: implement some kind of fair dequeing or (better) simply punish user for a bad designed callbacks by resetting hog connections + */ + lwip_event_packet_t* next_pkt = NULL; + while (xQueuePeek(_async_queue, &next_pkt, 0) == pdPASS){ + if (next_pkt->arg == (*e)->arg && next_pkt->event == LWIP_TCP_POLL && (*e)->event == LWIP_TCP_POLL){ + if (xQueueReceive(_async_queue, &next_pkt, 0) == pdPASS){ + free(next_pkt); + next_pkt = NULL; + log_w("coalescing polls, async callback might be too slow!"); + } else + return true; + } else + return true; + } + return true; } static bool _remove_events_with_arg(void* arg) { @@ -311,6 +344,7 @@ static int8_t _tcp_connected(void* arg, tcp_pcb* pcb, int8_t err) { static int8_t _tcp_poll(void* arg, struct tcp_pcb* pcb) { // throttle polling events queing when event queue is getting filled up, let it handle _onack's + //log_d("qs:%u", uxQueueMessagesWaiting(_async_queue)); if (uxQueueMessagesWaiting(_async_queue) > (rand() % CONFIG_ASYNC_TCP_QUEUE_SIZE / 2 + CONFIG_ASYNC_TCP_QUEUE_SIZE / 4)) { log_d("throttling"); return ERR_OK; @@ -612,7 +646,7 @@ AsyncClient::AsyncClient(tcp_pcb* pcb) tcp_recv(_pcb, &_tcp_recv); tcp_sent(_pcb, &_tcp_sent); tcp_err(_pcb, &_tcp_error); - tcp_poll(_pcb, &_tcp_poll, 1); + tcp_poll(_pcb, &_tcp_poll, CONFIG_ASYNC_TCP_POLL_TIMER); if (!_allocate_closed_slot()) { _close(); } From 80b2733de54c06e367389b930ad0d257bfbb4680 Mon Sep 17 00:00:00 2001 From: Emil Muratov Date: Sat, 14 Dec 2024 23:32:26 +0900 Subject: [PATCH 2/6] make _async_service_task's watchdog constantly watching with periodic feed --- src/AsyncTCP.cpp | 29 ++++++++++++++++++----------- src/AsyncTCP.h | 7 +++++-- 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index ae79d4c..00ac960 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -158,8 +158,14 @@ static inline bool _get_async_event(lwip_event_packet_t** e) { return false; } +#if CONFIG_ASYNC_TCP_USE_WDT + // need to return periodically to feed the dog + if (xQueueReceive(_async_queue, e, pdMS_TO_TICKS(1000)) != pdPASS) + return false; +#else if (xQueueReceive(_async_queue, e, portMAX_DELAY) != pdPASS) return false; +#endif /* Let's try to coalesce two (or more) consecutive poll events into one @@ -175,7 +181,7 @@ static inline bool _get_async_event(lwip_event_packet_t** e) { if (xQueueReceive(_async_queue, &next_pkt, 0) == pdPASS){ free(next_pkt); next_pkt = NULL; - log_w("coalescing polls, async callback might be too slow!"); + log_d("coalescing polls, async callback might be too slow!"); } else return true; } else @@ -255,22 +261,23 @@ static void _handle_async_event(lwip_event_packet_t* e) { } static void _async_service_task(void* pvParameters) { +#if CONFIG_ASYNC_TCP_USE_WDT + if (esp_task_wdt_add(NULL) != ESP_OK) { + log_w("Failed to add async task to WDT"); + } +#endif lwip_event_packet_t* packet = NULL; for (;;) { if (_get_async_event(&packet)) { -#if CONFIG_ASYNC_TCP_USE_WDT - if (esp_task_wdt_add(NULL) != ESP_OK) { - log_e("Failed to add async task to WDT"); - } -#endif _handle_async_event(packet); -#if CONFIG_ASYNC_TCP_USE_WDT - if (esp_task_wdt_delete(NULL) != ESP_OK) { - log_e("Failed to remove loop task from WDT"); - } -#endif } +#if CONFIG_ASYNC_TCP_USE_WDT + esp_task_wdt_reset(); +#endif } +#if CONFIG_ASYNC_TCP_USE_WDT + esp_task_wdt_delete(NULL); +#endif vTaskDelete(NULL); _async_service_task_handle = NULL; } diff --git a/src/AsyncTCP.h b/src/AsyncTCP.h index ab2609f..2df01c7 100644 --- a/src/AsyncTCP.h +++ b/src/AsyncTCP.h @@ -48,13 +48,16 @@ extern "C" { #include } #define CONFIG_ASYNC_TCP_RUNNING_CORE -1 // any available core - #define CONFIG_ASYNC_TCP_USE_WDT 0 #endif // If core is not defined, then we are running in Arduino or PIO #ifndef CONFIG_ASYNC_TCP_RUNNING_CORE #define CONFIG_ASYNC_TCP_RUNNING_CORE -1 // any available core - #define CONFIG_ASYNC_TCP_USE_WDT 1 // if enabled, adds between 33us and 200us per event +#endif + +// guard AsyncTCP task with watchdog +#ifndef CONFIG_ASYNC_TCP_USE_WDT + #define CONFIG_ASYNC_TCP_USE_WDT 1 #endif #ifndef CONFIG_ASYNC_TCP_STACK_SIZE From 7bfbb947d0a412559e213fdf321184b483e98597 Mon Sep 17 00:00:00 2001 From: Emil Muratov Date: Sun, 15 Dec 2024 12:04:52 +0900 Subject: [PATCH 3/6] queue deadlock and congestion aviodance - do not let `_remove_events_with_arg()` call to block on queue pertubation, otherwise it could deadlock - in any case do not block on adding LWIP_TCP_POLL event to the queue, it does not make much sense anyway due to poll events are repetitive - `_get_async_event()` will discard LWIP_TCP_POLL events when q gets filled up this will work in combination with throttling poll events when adding to the queue Poor designed apps and multiple parallel connections could flood the queue with interleaved poll events that can't be properly throttled or coalesced. So we can discard it in the eviction task after coalescing It will work in this way: - queue is up to 1/4 full - all events are entering the queue and serviced - queue is from 1/4 and up to 3/4 full - new poll events are throttled on enqueue with linear probability - queue is from 3/4 up to full top - all new poll events are ignored on enqueue and existing poll events already in the queue are discarded on eviction with linear probability giving away priority for all other events to be serviced. It is expected that on a new poll timer connection polls could reenter the queue --- src/AsyncTCP.cpp | 67 +++++++++++++++++------------ src/AsyncTCP.h | 109 +++++++++++++++++++++++++++++++++++++---------- 2 files changed, 126 insertions(+), 50 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 00ac960..722da63 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -145,12 +145,12 @@ static inline bool _init_async_event_queue() { return true; } -static inline bool _send_async_event(lwip_event_packet_t** e) { - return _async_queue && xQueueSend(_async_queue, e, portMAX_DELAY) == pdPASS; +static inline bool _send_async_event(lwip_event_packet_t** e, TickType_t wait = portMAX_DELAY) { + return _async_queue && xQueueSend(_async_queue, e, wait) == pdPASS; } -static inline bool _prepend_async_event(lwip_event_packet_t** e) { - return _async_queue && xQueueSendToFront(_async_queue, e, portMAX_DELAY) == pdPASS; +static inline bool _prepend_async_event(lwip_event_packet_t** e, TickType_t wait = portMAX_DELAY) { + return _async_queue && xQueueSendToFront(_async_queue, e, wait) == pdPASS; } static inline bool _get_async_event(lwip_event_packet_t** e) { @@ -167,6 +167,9 @@ static inline bool _get_async_event(lwip_event_packet_t** e) { return false; #endif + if ((*e)->event != LWIP_TCP_POLL) + return true; + /* Let's try to coalesce two (or more) consecutive poll events into one this usually happens with poor implemented user-callbacks that are runs too long and makes poll events to stack in the queue @@ -177,16 +180,31 @@ static inline bool _get_async_event(lwip_event_packet_t** e) { */ lwip_event_packet_t* next_pkt = NULL; while (xQueuePeek(_async_queue, &next_pkt, 0) == pdPASS){ - if (next_pkt->arg == (*e)->arg && next_pkt->event == LWIP_TCP_POLL && (*e)->event == LWIP_TCP_POLL){ + if (next_pkt->arg == (*e)->arg && next_pkt->event == LWIP_TCP_POLL){ if (xQueueReceive(_async_queue, &next_pkt, 0) == pdPASS){ free(next_pkt); next_pkt = NULL; - log_d("coalescing polls, async callback might be too slow!"); - } else - return true; - } else - return true; + log_d("coalescing polls, network congestion or async callbacks might be too slow!"); + continue; + } + } else { + /* + poor designed apps using asynctcp without proper dataflow control could flood the queue with interleaved pool/ack events. + We can try to mitigate it by discarding poll events when queue grows too much. + Let's discard poll events using linear probability curve starting from 3/4 of queue length + Poll events are periodic and connection could get another chance next time + */ + if (uxQueueMessagesWaiting(_async_queue) > (rand() % CONFIG_ASYNC_TCP_QUEUE_SIZE / 4 + CONFIG_ASYNC_TCP_QUEUE_SIZE * 3 / 4)) { + free(next_pkt); + next_pkt = NULL; + log_d("discarding poll due to queue congestion"); + // evict next event from a queue + return _get_async_event(e); + } + } + return true; } + // last resort return return true; } @@ -206,8 +224,11 @@ static bool _remove_events_with_arg(void* arg) { if ((int)first_packet->arg == (int)arg) { free(first_packet); first_packet = NULL; - // return first packet to the back of the queue - } else if (xQueueSend(_async_queue, &first_packet, portMAX_DELAY) != pdPASS) { + + // try to return first packet to the back of the queue + } else if (xQueueSend(_async_queue, &first_packet, 0) != pdPASS) { + // we can't wait here if queue is full, because this call has been done from the only consumer task of this queue + // otherwise it would deadlock, we have to discard the event return false; } } @@ -219,7 +240,9 @@ static bool _remove_events_with_arg(void* arg) { if ((int)packet->arg == (int)arg) { free(packet); packet = NULL; - } else if (xQueueSend(_async_queue, &packet, portMAX_DELAY) != pdPASS) { + } else if (xQueueSend(_async_queue, &packet, 0) != pdPASS) { + // we can't wait here if queue is full, because this call has been done from the only consumer task of this queue + // otherwise it would deadlock, we have to discard the event return false; } } @@ -362,7 +385,8 @@ static int8_t _tcp_poll(void* arg, struct tcp_pcb* pcb) { e->event = LWIP_TCP_POLL; e->arg = arg; e->poll.pcb = pcb; - if (!_send_async_event(&e)) { + // poll events are not critical 'cause those are repetitive, so we may not wait the queue in any case + if (!_send_async_event(&e, 0)) { free((void*)(e)); } return ERR_OK; @@ -684,7 +708,7 @@ AsyncClient& AsyncClient::operator=(const AsyncClient& other) { tcp_recv(_pcb, &_tcp_recv); tcp_sent(_pcb, &_tcp_sent); tcp_err(_pcb, &_tcp_error); - tcp_poll(_pcb, &_tcp_poll, 1); + tcp_poll(_pcb, &_tcp_poll, CONFIG_ASYNC_TCP_POLL_TIMER); } return *this; } @@ -782,7 +806,7 @@ bool AsyncClient::_connect(ip_addr_t addr, uint16_t port) { tcp_err(pcb, &_tcp_error); tcp_recv(pcb, &_tcp_recv); tcp_sent(pcb, &_tcp_sent); - tcp_poll(pcb, &_tcp_poll, 1); + tcp_poll(pcb, &_tcp_poll, CONFIG_ASYNC_TCP_POLL_TIMER); TCP_MUTEX_UNLOCK(); esp_err_t err = _tcp_connect(pcb, _closed_slot, &addr, port, (tcp_connected_fn)&_tcp_connected); @@ -1131,10 +1155,6 @@ void AsyncClient::_dns_found(struct ip_addr* ipaddr) { * Public Helper Methods * */ -void AsyncClient::stop() { - close(false); -} - bool AsyncClient::free() { if (!_pcb) { return true; @@ -1145,13 +1165,6 @@ bool AsyncClient::free() { return false; } -size_t AsyncClient::write(const char* data) { - if (data == NULL) { - return 0; - } - return write(data, strlen(data)); -} - size_t AsyncClient::write(const char* data, size_t size, uint8_t apiflags) { size_t will_send = add(data, size, apiflags); if (!will_send || !send()) { diff --git a/src/AsyncTCP.h b/src/AsyncTCP.h index 2df01c7..17e3db9 100644 --- a/src/AsyncTCP.h +++ b/src/AsyncTCP.h @@ -109,34 +109,86 @@ class AsyncClient { bool connect(const IPv6Address& ip, uint16_t port); #endif bool connect(const char* host, uint16_t port); + /** + * @brief close connection + * + * @param now - ignored + */ void close(bool now = false); - void stop(); + // same as close() + void stop(){ close(false); }; int8_t abort(); bool free(); - bool canSend(); // ack is not pending - size_t space(); // space available in the TCP window - size_t add(const char* data, size_t size, uint8_t apiflags = ASYNC_WRITE_FLAG_COPY); // add for sending - bool send(); // send all data added with the method above + // ack is not pending + bool canSend(); + // TCP buffer space available + size_t space(); - // write equals add()+send() - size_t write(const char* data); - size_t write(const char* data, size_t size, uint8_t apiflags = ASYNC_WRITE_FLAG_COPY); // only when canSend() == true + /** + * @brief add data to be send (but do not send yet) + * @note add() would call lwip's tcp_write() + By default apiflags=ASYNC_WRITE_FLAG_COPY + You could try to use apiflags with this flag unset to pass data by reference and avoid copy to socket buffer, + but looks like it does not work for Arduino's lwip in ESP32/IDF at least + it is enforced in https://github.com/espressif/esp-lwip/blob/0606eed9d8b98a797514fdf6eabb4daf1c8c8cd9/src/core/tcp_out.c#L422C5-L422C30 + if LWIP_NETIF_TX_SINGLE_PBUF is set, and it is set indeed in IDF + https://github.com/espressif/esp-idf/blob/a0f798cfc4bbd624aab52b2c194d219e242d80c1/components/lwip/port/include/lwipopts.h#L744 + * + * @param data + * @param size + * @param apiflags + * @return size_t amount of data that has been copied + */ + size_t add(const char* data, size_t size, uint8_t apiflags = ASYNC_WRITE_FLAG_COPY); + + /** + * @brief send data previously add()'ed + * + * @return true on success + * @return false on error + */ + bool send(); + + /** + * @brief add and enqueue data for sending + * @note it is same as add() + send() + * @note only make sense when canSend() == true + * + * @param data + * @param size + * @param apiflags + * @return size_t + */ + size_t write(const char* data, size_t size, uint8_t apiflags = ASYNC_WRITE_FLAG_COPY); + + /** + * @brief add and enque data for sending + * @note treats data as null-terminated string + * + * @param data + * @return size_t + */ + size_t write(const char* data){ return data == NULL ? 0 : write(data, strlen(data)); }; uint8_t state(); bool connecting(); bool connected(); bool disconnecting(); bool disconnected(); - bool freeable(); // disconnected or disconnecting + + // disconnected or disconnecting + bool freeable(); uint16_t getMss(); uint32_t getRxTimeout(); - void setRxTimeout(uint32_t timeout); // no RX data timeout for the connection in seconds + // no RX data timeout for the connection in seconds + void setRxTimeout(uint32_t timeout); uint32_t getAckTimeout(); - void setAckTimeout(uint32_t timeout); // no ACK timeout for the last sent packet in milliseconds + // no ACK timeout for the last sent packet in milliseconds + void setAckTimeout(uint32_t timeout); void setNoDelay(bool nodelay); bool getNoDelay(); @@ -165,23 +217,34 @@ class AsyncClient { IPAddress localIP(); uint16_t localPort(); - void onConnect(AcConnectHandler cb, void* arg = 0); // on successful connect - void onDisconnect(AcConnectHandler cb, void* arg = 0); // disconnected - void onAck(AcAckHandler cb, void* arg = 0); // ack received - void onError(AcErrorHandler cb, void* arg = 0); // unsuccessful connect or error - void onData(AcDataHandler cb, void* arg = 0); // data received (called if onPacket is not used) - void onPacket(AcPacketHandler cb, void* arg = 0); // data received - void onTimeout(AcTimeoutHandler cb, void* arg = 0); // ack timeout - void onPoll(AcConnectHandler cb, void* arg = 0); // every 125ms when connected + // set callback - on successful connect + void onConnect(AcConnectHandler cb, void* arg = 0); + // set callback - disconnected + void onDisconnect(AcConnectHandler cb, void* arg = 0); + // set callback - ack received + void onAck(AcAckHandler cb, void* arg = 0); + // set callback - unsuccessful connect or error + void onError(AcErrorHandler cb, void* arg = 0); + // set callback - data received (called if onPacket is not used) + void onData(AcDataHandler cb, void* arg = 0); + // set callback - data received + void onPacket(AcPacketHandler cb, void* arg = 0); + // set callback - ack timeout + void onTimeout(AcTimeoutHandler cb, void* arg = 0); + // set callback - every 125ms when connected + void onPoll(AcConnectHandler cb, void* arg = 0); - void ackPacket(struct pbuf* pb); // ack pbuf from onPacket - size_t ack(size_t len); // ack data that you have not acked using the method below - void ackLater() { _ack_pcb = false; } // will not ack the current packet. Call from onData + // ack pbuf from onPacket + void ackPacket(struct pbuf* pb); + // ack data that you have not acked using the method below + size_t ack(size_t len); + // will not ack the current packet. Call from onData + void ackLater() { _ack_pcb = false; } const char* errorToString(int8_t error); const char* stateToString(); - // Do not use any of the functions below! + // internal callbacks - Do NOT call any of the functions below in user code! static int8_t _s_poll(void* arg, struct tcp_pcb* tpcb); static int8_t _s_recv(void* arg, struct tcp_pcb* tpcb, struct pbuf* pb, int8_t err); static int8_t _s_fin(void* arg, struct tcp_pcb* tpcb, int8_t err); From 269124956a06f5c7ae121d092d70db6d136b5fa0 Mon Sep 17 00:00:00 2001 From: Mathieu Carbou Date: Sun, 15 Dec 2024 16:51:34 +0100 Subject: [PATCH 4/6] Applying clang formatting --- src/AsyncTCP.cpp | 12 ++++++------ src/AsyncTCP.h | 34 +++++++++++++++++----------------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/src/AsyncTCP.cpp b/src/AsyncTCP.cpp index 722da63..12486a0 100644 --- a/src/AsyncTCP.cpp +++ b/src/AsyncTCP.cpp @@ -65,7 +65,7 @@ extern "C" { TCP poll interval is specified in terms of the TCP coarse timer interval, which is called twice a second https://github.com/espressif/esp-lwip/blob/2acf959a2bb559313cd2bf9306c24612ba3d0e19/src/core/tcp.c#L1895 */ -#define CONFIG_ASYNC_TCP_POLL_TIMER 1 +#define CONFIG_ASYNC_TCP_POLL_TIMER 1 /* * TCP/IP Event Task @@ -179,9 +179,9 @@ static inline bool _get_async_event(lwip_event_packet_t** e) { todo: implement some kind of fair dequeing or (better) simply punish user for a bad designed callbacks by resetting hog connections */ lwip_event_packet_t* next_pkt = NULL; - while (xQueuePeek(_async_queue, &next_pkt, 0) == pdPASS){ - if (next_pkt->arg == (*e)->arg && next_pkt->event == LWIP_TCP_POLL){ - if (xQueueReceive(_async_queue, &next_pkt, 0) == pdPASS){ + while (xQueuePeek(_async_queue, &next_pkt, 0) == pdPASS) { + if (next_pkt->arg == (*e)->arg && next_pkt->event == LWIP_TCP_POLL) { + if (xQueueReceive(_async_queue, &next_pkt, 0) == pdPASS) { free(next_pkt); next_pkt = NULL; log_d("coalescing polls, network congestion or async callbacks might be too slow!"); @@ -225,7 +225,7 @@ static bool _remove_events_with_arg(void* arg) { free(first_packet); first_packet = NULL; - // try to return first packet to the back of the queue + // try to return first packet to the back of the queue } else if (xQueueSend(_async_queue, &first_packet, 0) != pdPASS) { // we can't wait here if queue is full, because this call has been done from the only consumer task of this queue // otherwise it would deadlock, we have to discard the event @@ -374,7 +374,7 @@ static int8_t _tcp_connected(void* arg, tcp_pcb* pcb, int8_t err) { static int8_t _tcp_poll(void* arg, struct tcp_pcb* pcb) { // throttle polling events queing when event queue is getting filled up, let it handle _onack's - //log_d("qs:%u", uxQueueMessagesWaiting(_async_queue)); + // log_d("qs:%u", uxQueueMessagesWaiting(_async_queue)); if (uxQueueMessagesWaiting(_async_queue) > (rand() % CONFIG_ASYNC_TCP_QUEUE_SIZE / 2 + CONFIG_ASYNC_TCP_QUEUE_SIZE / 4)) { log_d("throttling"); return ERR_OK; diff --git a/src/AsyncTCP.h b/src/AsyncTCP.h index 17e3db9..ee81ff5 100644 --- a/src/AsyncTCP.h +++ b/src/AsyncTCP.h @@ -57,7 +57,7 @@ extern "C" { // guard AsyncTCP task with watchdog #ifndef CONFIG_ASYNC_TCP_USE_WDT - #define CONFIG_ASYNC_TCP_USE_WDT 1 + #define CONFIG_ASYNC_TCP_USE_WDT 1 #endif #ifndef CONFIG_ASYNC_TCP_STACK_SIZE @@ -111,12 +111,12 @@ class AsyncClient { bool connect(const char* host, uint16_t port); /** * @brief close connection - * + * * @param now - ignored */ void close(bool now = false); // same as close() - void stop(){ close(false); }; + void stop() { close(false); }; int8_t abort(); bool free(); @@ -134,17 +134,17 @@ class AsyncClient { it is enforced in https://github.com/espressif/esp-lwip/blob/0606eed9d8b98a797514fdf6eabb4daf1c8c8cd9/src/core/tcp_out.c#L422C5-L422C30 if LWIP_NETIF_TX_SINGLE_PBUF is set, and it is set indeed in IDF https://github.com/espressif/esp-idf/blob/a0f798cfc4bbd624aab52b2c194d219e242d80c1/components/lwip/port/include/lwipopts.h#L744 - * - * @param data - * @param size - * @param apiflags + * + * @param data + * @param size + * @param apiflags * @return size_t amount of data that has been copied */ size_t add(const char* data, size_t size, uint8_t apiflags = ASYNC_WRITE_FLAG_COPY); /** * @brief send data previously add()'ed - * + * * @return true on success * @return false on error */ @@ -154,22 +154,22 @@ class AsyncClient { * @brief add and enqueue data for sending * @note it is same as add() + send() * @note only make sense when canSend() == true - * - * @param data - * @param size - * @param apiflags - * @return size_t + * + * @param data + * @param size + * @param apiflags + * @return size_t */ size_t write(const char* data, size_t size, uint8_t apiflags = ASYNC_WRITE_FLAG_COPY); /** * @brief add and enque data for sending * @note treats data as null-terminated string - * - * @param data - * @return size_t + * + * @param data + * @return size_t */ - size_t write(const char* data){ return data == NULL ? 0 : write(data, strlen(data)); }; + size_t write(const char* data) { return data == NULL ? 0 : write(data, strlen(data)); }; uint8_t state(); bool connecting(); From a7c2468f542a61823469f0f5b64d61b0654d9f54 Mon Sep 17 00:00:00 2001 From: Mathieu Carbou Date: Sun, 15 Dec 2024 17:00:32 +0100 Subject: [PATCH 5/6] update example --- examples/Client/Client.ino | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/examples/Client/Client.ino b/examples/Client/Client.ino index 20d1bcf..202edb3 100644 --- a/examples/Client/Client.ino +++ b/examples/Client/Client.ino @@ -2,14 +2,14 @@ #include #include -// #define HOST "homeassistant.local" -// #define PORT 8123 - -// #define HOST "www.google.com" -// #define PORT 80 - +// Run a server at the root of the project with: +// > python3 -m http.server 3333 +// Now you can open a browser and test it works by visiting http://192.168.125.122:3333/ or http://192.168.125.122:3333/README.md #define HOST "192.168.125.122" -#define PORT 4000 +#define PORT 3333 + +// WiFi SSID to connect to +#define WIFI_SSID "IoT" // 16 slots on esp32 (CONFIG_LWIP_MAX_ACTIVE_TCP) #define MAX_CLIENTS CONFIG_LWIP_MAX_ACTIVE_TCP @@ -48,7 +48,7 @@ void makeRequest() { // Serial.printf("** data received by client: %" PRIu16 ": len=%u\n", client->localPort(), len); }); - client->write("GET / HTTP/1.1\r\nHost: " HOST "\r\nUser-Agent: ESP\r\nConnection: close\r\n\r\n"); + client->write("GET /README.md HTTP/1.1\r\nHost: " HOST "\r\nUser-Agent: ESP\r\nConnection: close\r\n\r\n"); }); if (client->connect(HOST, PORT)) { @@ -63,7 +63,7 @@ void setup() { continue; WiFi.mode(WIFI_STA); - WiFi.begin("IoT"); + WiFi.begin(WIFI_SSID); while (WiFi.status() != WL_CONNECTED) { delay(500); Serial.print("."); From c10ba6b1c7815650a76cf198e8705b1b49b5b499 Mon Sep 17 00:00:00 2001 From: Mathieu Carbou Date: Sun, 15 Dec 2024 17:07:41 +0100 Subject: [PATCH 6/6] update example --- examples/Client/Client.ino | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/Client/Client.ino b/examples/Client/Client.ino index 202edb3..609af6d 100644 --- a/examples/Client/Client.ino +++ b/examples/Client/Client.ino @@ -45,7 +45,7 @@ void makeRequest() { }); client->onData([](void* arg, AsyncClient* client, void* data, size_t len) { - // Serial.printf("** data received by client: %" PRIu16 ": len=%u\n", client->localPort(), len); + Serial.printf("** data received by client: %" PRIu16 ": len=%u\n", client->localPort(), len); }); client->write("GET /README.md HTTP/1.1\r\nHost: " HOST "\r\nUser-Agent: ESP\r\nConnection: close\r\n\r\n");