mirror of
https://github.com/me-no-dev/AsyncTCP.git
synced 2025-09-27 23:00:55 +02:00
Applying clang formatting
This commit is contained in:
@@ -65,7 +65,7 @@ extern "C" {
|
|||||||
TCP poll interval is specified in terms of the TCP coarse timer interval, which is called twice a second
|
TCP poll interval is specified in terms of the TCP coarse timer interval, which is called twice a second
|
||||||
https://github.com/espressif/esp-lwip/blob/2acf959a2bb559313cd2bf9306c24612ba3d0e19/src/core/tcp.c#L1895
|
https://github.com/espressif/esp-lwip/blob/2acf959a2bb559313cd2bf9306c24612ba3d0e19/src/core/tcp.c#L1895
|
||||||
*/
|
*/
|
||||||
#define CONFIG_ASYNC_TCP_POLL_TIMER 1
|
#define CONFIG_ASYNC_TCP_POLL_TIMER 1
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TCP/IP Event Task
|
* TCP/IP Event Task
|
||||||
@@ -179,9 +179,9 @@ static inline bool _get_async_event(lwip_event_packet_t** e) {
|
|||||||
todo: implement some kind of fair dequeing or (better) simply punish user for a bad designed callbacks by resetting hog connections
|
todo: implement some kind of fair dequeing or (better) simply punish user for a bad designed callbacks by resetting hog connections
|
||||||
*/
|
*/
|
||||||
lwip_event_packet_t* next_pkt = NULL;
|
lwip_event_packet_t* next_pkt = NULL;
|
||||||
while (xQueuePeek(_async_queue, &next_pkt, 0) == pdPASS){
|
while (xQueuePeek(_async_queue, &next_pkt, 0) == pdPASS) {
|
||||||
if (next_pkt->arg == (*e)->arg && next_pkt->event == LWIP_TCP_POLL){
|
if (next_pkt->arg == (*e)->arg && next_pkt->event == LWIP_TCP_POLL) {
|
||||||
if (xQueueReceive(_async_queue, &next_pkt, 0) == pdPASS){
|
if (xQueueReceive(_async_queue, &next_pkt, 0) == pdPASS) {
|
||||||
free(next_pkt);
|
free(next_pkt);
|
||||||
next_pkt = NULL;
|
next_pkt = NULL;
|
||||||
log_d("coalescing polls, network congestion or async callbacks might be too slow!");
|
log_d("coalescing polls, network congestion or async callbacks might be too slow!");
|
||||||
@@ -225,7 +225,7 @@ static bool _remove_events_with_arg(void* arg) {
|
|||||||
free(first_packet);
|
free(first_packet);
|
||||||
first_packet = NULL;
|
first_packet = NULL;
|
||||||
|
|
||||||
// try to return first packet to the back of the queue
|
// try to return first packet to the back of the queue
|
||||||
} else if (xQueueSend(_async_queue, &first_packet, 0) != pdPASS) {
|
} else if (xQueueSend(_async_queue, &first_packet, 0) != pdPASS) {
|
||||||
// we can't wait here if queue is full, because this call has been done from the only consumer task of this queue
|
// we can't wait here if queue is full, because this call has been done from the only consumer task of this queue
|
||||||
// otherwise it would deadlock, we have to discard the event
|
// otherwise it would deadlock, we have to discard the event
|
||||||
@@ -374,7 +374,7 @@ static int8_t _tcp_connected(void* arg, tcp_pcb* pcb, int8_t err) {
|
|||||||
|
|
||||||
static int8_t _tcp_poll(void* arg, struct tcp_pcb* pcb) {
|
static int8_t _tcp_poll(void* arg, struct tcp_pcb* pcb) {
|
||||||
// throttle polling events queing when event queue is getting filled up, let it handle _onack's
|
// throttle polling events queing when event queue is getting filled up, let it handle _onack's
|
||||||
//log_d("qs:%u", uxQueueMessagesWaiting(_async_queue));
|
// log_d("qs:%u", uxQueueMessagesWaiting(_async_queue));
|
||||||
if (uxQueueMessagesWaiting(_async_queue) > (rand() % CONFIG_ASYNC_TCP_QUEUE_SIZE / 2 + CONFIG_ASYNC_TCP_QUEUE_SIZE / 4)) {
|
if (uxQueueMessagesWaiting(_async_queue) > (rand() % CONFIG_ASYNC_TCP_QUEUE_SIZE / 2 + CONFIG_ASYNC_TCP_QUEUE_SIZE / 4)) {
|
||||||
log_d("throttling");
|
log_d("throttling");
|
||||||
return ERR_OK;
|
return ERR_OK;
|
||||||
|
@@ -57,7 +57,7 @@ extern "C" {
|
|||||||
|
|
||||||
// guard AsyncTCP task with watchdog
|
// guard AsyncTCP task with watchdog
|
||||||
#ifndef CONFIG_ASYNC_TCP_USE_WDT
|
#ifndef CONFIG_ASYNC_TCP_USE_WDT
|
||||||
#define CONFIG_ASYNC_TCP_USE_WDT 1
|
#define CONFIG_ASYNC_TCP_USE_WDT 1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef CONFIG_ASYNC_TCP_STACK_SIZE
|
#ifndef CONFIG_ASYNC_TCP_STACK_SIZE
|
||||||
@@ -116,7 +116,7 @@ class AsyncClient {
|
|||||||
*/
|
*/
|
||||||
void close(bool now = false);
|
void close(bool now = false);
|
||||||
// same as close()
|
// same as close()
|
||||||
void stop(){ close(false); };
|
void stop() { close(false); };
|
||||||
int8_t abort();
|
int8_t abort();
|
||||||
bool free();
|
bool free();
|
||||||
|
|
||||||
@@ -169,7 +169,7 @@ class AsyncClient {
|
|||||||
* @param data
|
* @param data
|
||||||
* @return size_t
|
* @return size_t
|
||||||
*/
|
*/
|
||||||
size_t write(const char* data){ return data == NULL ? 0 : write(data, strlen(data)); };
|
size_t write(const char* data) { return data == NULL ? 0 : write(data, strlen(data)); };
|
||||||
|
|
||||||
uint8_t state();
|
uint8_t state();
|
||||||
bool connecting();
|
bool connecting();
|
||||||
|
Reference in New Issue
Block a user