mirror of
https://github.com/me-no-dev/ESPAsyncWebServer.git
synced 2025-09-27 06:40:56 +02:00
Fix self-deadlock onConnect introduced by ul-gh/fix_#884 branch
Commit 344062ae5b
intended to protect the
client list for AsyncEventSource from concurrent access by wrapping
accesses with locking. However, this introduces a self-deadlock scenario
if application code in the onConnect callback (now called with the
client list lock held) tries to broadcast a message to existing
connections in the same AsyncEventSource (for example, to alert of a
new incoming connection). The broadcast call tries to take the lock but
blocks because the lock is already taken, by the same task, before
calling the callback. Fixed by making use of the AsyncWebLockGuard class
which is tailor-made to address this scenario.
This commit is contained in:
@@ -298,38 +298,34 @@ void AsyncEventSource::_addClient(AsyncEventSourceClient * client){
|
|||||||
client->write((const char *)temp, 2053);
|
client->write((const char *)temp, 2053);
|
||||||
free(temp);
|
free(temp);
|
||||||
}*/
|
}*/
|
||||||
_client_queue_lock.lock();
|
AsyncWebLockGuard l(_client_queue_lock);
|
||||||
_clients.add(client);
|
_clients.add(client);
|
||||||
if(_connectcb)
|
if(_connectcb)
|
||||||
_connectcb(client);
|
_connectcb(client);
|
||||||
_client_queue_lock.unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AsyncEventSource::_handleDisconnect(AsyncEventSourceClient * client){
|
void AsyncEventSource::_handleDisconnect(AsyncEventSourceClient * client){
|
||||||
_client_queue_lock.lock();
|
AsyncWebLockGuard l(_client_queue_lock);
|
||||||
_clients.remove(client);
|
_clients.remove(client);
|
||||||
_client_queue_lock.unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AsyncEventSource::close(){
|
void AsyncEventSource::close(){
|
||||||
// While the whole loop is not done, the linked list is locked and so the
|
// While the whole loop is not done, the linked list is locked and so the
|
||||||
// iterator should remain valid even when AsyncEventSource::_handleDisconnect()
|
// iterator should remain valid even when AsyncEventSource::_handleDisconnect()
|
||||||
// is called very early
|
// is called very early
|
||||||
_client_queue_lock.lock();
|
AsyncWebLockGuard l(_client_queue_lock);
|
||||||
for(const auto &c: _clients){
|
for(const auto &c: _clients){
|
||||||
if(c->connected())
|
if(c->connected())
|
||||||
c->close();
|
c->close();
|
||||||
}
|
}
|
||||||
_client_queue_lock.unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// pmb fix
|
// pmb fix
|
||||||
size_t AsyncEventSource::avgPacketsWaiting() const {
|
size_t AsyncEventSource::avgPacketsWaiting() const {
|
||||||
size_t aql = 0;
|
size_t aql = 0;
|
||||||
uint32_t nConnectedClients = 0;
|
uint32_t nConnectedClients = 0;
|
||||||
_client_queue_lock.lock();
|
AsyncWebLockGuard l(_client_queue_lock);
|
||||||
if (_clients.isEmpty()) {
|
if (_clients.isEmpty()) {
|
||||||
_client_queue_lock.unlock();
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
for(const auto &c: _clients){
|
for(const auto &c: _clients){
|
||||||
@@ -338,29 +334,26 @@ size_t AsyncEventSource::avgPacketsWaiting() const {
|
|||||||
++nConnectedClients;
|
++nConnectedClients;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_client_queue_lock.unlock();
|
|
||||||
return ((aql) + (nConnectedClients/2)) / (nConnectedClients); // round up
|
return ((aql) + (nConnectedClients/2)) / (nConnectedClients); // round up
|
||||||
}
|
}
|
||||||
|
|
||||||
void AsyncEventSource::send(
|
void AsyncEventSource::send(
|
||||||
const char *message, const char *event, uint32_t id, uint32_t reconnect){
|
const char *message, const char *event, uint32_t id, uint32_t reconnect){
|
||||||
String ev = generateEventMessage(message, event, id, reconnect);
|
String ev = generateEventMessage(message, event, id, reconnect);
|
||||||
_client_queue_lock.lock();
|
AsyncWebLockGuard l(_client_queue_lock);
|
||||||
for(const auto &c: _clients){
|
for(const auto &c: _clients){
|
||||||
if(c->connected()) {
|
if(c->connected()) {
|
||||||
c->_write(ev.c_str(), ev.length());
|
c->_write(ev.c_str(), ev.length());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_client_queue_lock.unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t AsyncEventSource::count() const {
|
size_t AsyncEventSource::count() const {
|
||||||
size_t n_clients;
|
size_t n_clients;
|
||||||
_client_queue_lock.lock();
|
AsyncWebLockGuard l(_client_queue_lock);
|
||||||
n_clients = _clients.count_if([](AsyncEventSourceClient *c){
|
n_clients = _clients.count_if([](AsyncEventSourceClient *c){
|
||||||
return c->connected();
|
return c->connected();
|
||||||
});
|
});
|
||||||
_client_queue_lock.unlock();
|
|
||||||
return n_clients;
|
return n_clients;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -104,7 +104,7 @@ class AsyncEventSource: public AsyncWebHandler {
|
|||||||
LinkedList<AsyncEventSourceClient *> _clients;
|
LinkedList<AsyncEventSourceClient *> _clients;
|
||||||
// Same as for individual messages, protect mutations of _clients list
|
// Same as for individual messages, protect mutations of _clients list
|
||||||
// since simultaneous access from different tasks is possible
|
// since simultaneous access from different tasks is possible
|
||||||
AsyncPlainLock _client_queue_lock;
|
AsyncWebLock _client_queue_lock;
|
||||||
ArEventHandlerFunction _connectcb;
|
ArEventHandlerFunction _connectcb;
|
||||||
ArAuthorizeConnectHandler _authorizeConnectHandler;
|
ArAuthorizeConnectHandler _authorizeConnectHandler;
|
||||||
public:
|
public:
|
||||||
|
Reference in New Issue
Block a user