mirror of
https://github.com/me-no-dev/AsyncTCP.git
synced 2025-09-27 14:50:55 +02:00
Applying clang formatting
This commit is contained in:
@@ -65,7 +65,7 @@ extern "C" {
|
|||||||
TCP poll interval is specified in terms of the TCP coarse timer interval, which is called twice a second
|
TCP poll interval is specified in terms of the TCP coarse timer interval, which is called twice a second
|
||||||
https://github.com/espressif/esp-lwip/blob/2acf959a2bb559313cd2bf9306c24612ba3d0e19/src/core/tcp.c#L1895
|
https://github.com/espressif/esp-lwip/blob/2acf959a2bb559313cd2bf9306c24612ba3d0e19/src/core/tcp.c#L1895
|
||||||
*/
|
*/
|
||||||
#define CONFIG_ASYNC_TCP_POLL_TIMER 1
|
#define CONFIG_ASYNC_TCP_POLL_TIMER 1
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TCP/IP Event Task
|
* TCP/IP Event Task
|
||||||
@@ -179,9 +179,9 @@ static inline bool _get_async_event(lwip_event_packet_t** e) {
|
|||||||
todo: implement some kind of fair dequeing or (better) simply punish user for a bad designed callbacks by resetting hog connections
|
todo: implement some kind of fair dequeing or (better) simply punish user for a bad designed callbacks by resetting hog connections
|
||||||
*/
|
*/
|
||||||
lwip_event_packet_t* next_pkt = NULL;
|
lwip_event_packet_t* next_pkt = NULL;
|
||||||
while (xQueuePeek(_async_queue, &next_pkt, 0) == pdPASS){
|
while (xQueuePeek(_async_queue, &next_pkt, 0) == pdPASS) {
|
||||||
if (next_pkt->arg == (*e)->arg && next_pkt->event == LWIP_TCP_POLL){
|
if (next_pkt->arg == (*e)->arg && next_pkt->event == LWIP_TCP_POLL) {
|
||||||
if (xQueueReceive(_async_queue, &next_pkt, 0) == pdPASS){
|
if (xQueueReceive(_async_queue, &next_pkt, 0) == pdPASS) {
|
||||||
free(next_pkt);
|
free(next_pkt);
|
||||||
next_pkt = NULL;
|
next_pkt = NULL;
|
||||||
log_d("coalescing polls, network congestion or async callbacks might be too slow!");
|
log_d("coalescing polls, network congestion or async callbacks might be too slow!");
|
||||||
@@ -225,7 +225,7 @@ static bool _remove_events_with_arg(void* arg) {
|
|||||||
free(first_packet);
|
free(first_packet);
|
||||||
first_packet = NULL;
|
first_packet = NULL;
|
||||||
|
|
||||||
// try to return first packet to the back of the queue
|
// try to return first packet to the back of the queue
|
||||||
} else if (xQueueSend(_async_queue, &first_packet, 0) != pdPASS) {
|
} else if (xQueueSend(_async_queue, &first_packet, 0) != pdPASS) {
|
||||||
// we can't wait here if queue is full, because this call has been done from the only consumer task of this queue
|
// we can't wait here if queue is full, because this call has been done from the only consumer task of this queue
|
||||||
// otherwise it would deadlock, we have to discard the event
|
// otherwise it would deadlock, we have to discard the event
|
||||||
@@ -374,7 +374,7 @@ static int8_t _tcp_connected(void* arg, tcp_pcb* pcb, int8_t err) {
|
|||||||
|
|
||||||
static int8_t _tcp_poll(void* arg, struct tcp_pcb* pcb) {
|
static int8_t _tcp_poll(void* arg, struct tcp_pcb* pcb) {
|
||||||
// throttle polling events queing when event queue is getting filled up, let it handle _onack's
|
// throttle polling events queing when event queue is getting filled up, let it handle _onack's
|
||||||
//log_d("qs:%u", uxQueueMessagesWaiting(_async_queue));
|
// log_d("qs:%u", uxQueueMessagesWaiting(_async_queue));
|
||||||
if (uxQueueMessagesWaiting(_async_queue) > (rand() % CONFIG_ASYNC_TCP_QUEUE_SIZE / 2 + CONFIG_ASYNC_TCP_QUEUE_SIZE / 4)) {
|
if (uxQueueMessagesWaiting(_async_queue) > (rand() % CONFIG_ASYNC_TCP_QUEUE_SIZE / 2 + CONFIG_ASYNC_TCP_QUEUE_SIZE / 4)) {
|
||||||
log_d("throttling");
|
log_d("throttling");
|
||||||
return ERR_OK;
|
return ERR_OK;
|
||||||
|
@@ -57,7 +57,7 @@ extern "C" {
|
|||||||
|
|
||||||
// guard AsyncTCP task with watchdog
|
// guard AsyncTCP task with watchdog
|
||||||
#ifndef CONFIG_ASYNC_TCP_USE_WDT
|
#ifndef CONFIG_ASYNC_TCP_USE_WDT
|
||||||
#define CONFIG_ASYNC_TCP_USE_WDT 1
|
#define CONFIG_ASYNC_TCP_USE_WDT 1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef CONFIG_ASYNC_TCP_STACK_SIZE
|
#ifndef CONFIG_ASYNC_TCP_STACK_SIZE
|
||||||
@@ -111,12 +111,12 @@ class AsyncClient {
|
|||||||
bool connect(const char* host, uint16_t port);
|
bool connect(const char* host, uint16_t port);
|
||||||
/**
|
/**
|
||||||
* @brief close connection
|
* @brief close connection
|
||||||
*
|
*
|
||||||
* @param now - ignored
|
* @param now - ignored
|
||||||
*/
|
*/
|
||||||
void close(bool now = false);
|
void close(bool now = false);
|
||||||
// same as close()
|
// same as close()
|
||||||
void stop(){ close(false); };
|
void stop() { close(false); };
|
||||||
int8_t abort();
|
int8_t abort();
|
||||||
bool free();
|
bool free();
|
||||||
|
|
||||||
@@ -134,17 +134,17 @@ class AsyncClient {
|
|||||||
it is enforced in https://github.com/espressif/esp-lwip/blob/0606eed9d8b98a797514fdf6eabb4daf1c8c8cd9/src/core/tcp_out.c#L422C5-L422C30
|
it is enforced in https://github.com/espressif/esp-lwip/blob/0606eed9d8b98a797514fdf6eabb4daf1c8c8cd9/src/core/tcp_out.c#L422C5-L422C30
|
||||||
if LWIP_NETIF_TX_SINGLE_PBUF is set, and it is set indeed in IDF
|
if LWIP_NETIF_TX_SINGLE_PBUF is set, and it is set indeed in IDF
|
||||||
https://github.com/espressif/esp-idf/blob/a0f798cfc4bbd624aab52b2c194d219e242d80c1/components/lwip/port/include/lwipopts.h#L744
|
https://github.com/espressif/esp-idf/blob/a0f798cfc4bbd624aab52b2c194d219e242d80c1/components/lwip/port/include/lwipopts.h#L744
|
||||||
*
|
*
|
||||||
* @param data
|
* @param data
|
||||||
* @param size
|
* @param size
|
||||||
* @param apiflags
|
* @param apiflags
|
||||||
* @return size_t amount of data that has been copied
|
* @return size_t amount of data that has been copied
|
||||||
*/
|
*/
|
||||||
size_t add(const char* data, size_t size, uint8_t apiflags = ASYNC_WRITE_FLAG_COPY);
|
size_t add(const char* data, size_t size, uint8_t apiflags = ASYNC_WRITE_FLAG_COPY);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief send data previously add()'ed
|
* @brief send data previously add()'ed
|
||||||
*
|
*
|
||||||
* @return true on success
|
* @return true on success
|
||||||
* @return false on error
|
* @return false on error
|
||||||
*/
|
*/
|
||||||
@@ -154,22 +154,22 @@ class AsyncClient {
|
|||||||
* @brief add and enqueue data for sending
|
* @brief add and enqueue data for sending
|
||||||
* @note it is same as add() + send()
|
* @note it is same as add() + send()
|
||||||
* @note only make sense when canSend() == true
|
* @note only make sense when canSend() == true
|
||||||
*
|
*
|
||||||
* @param data
|
* @param data
|
||||||
* @param size
|
* @param size
|
||||||
* @param apiflags
|
* @param apiflags
|
||||||
* @return size_t
|
* @return size_t
|
||||||
*/
|
*/
|
||||||
size_t write(const char* data, size_t size, uint8_t apiflags = ASYNC_WRITE_FLAG_COPY);
|
size_t write(const char* data, size_t size, uint8_t apiflags = ASYNC_WRITE_FLAG_COPY);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief add and enque data for sending
|
* @brief add and enque data for sending
|
||||||
* @note treats data as null-terminated string
|
* @note treats data as null-terminated string
|
||||||
*
|
*
|
||||||
* @param data
|
* @param data
|
||||||
* @return size_t
|
* @return size_t
|
||||||
*/
|
*/
|
||||||
size_t write(const char* data){ return data == NULL ? 0 : write(data, strlen(data)); };
|
size_t write(const char* data) { return data == NULL ? 0 : write(data, strlen(data)); };
|
||||||
|
|
||||||
uint8_t state();
|
uint8_t state();
|
||||||
bool connecting();
|
bool connecting();
|
||||||
|
Reference in New Issue
Block a user