mirror of
https://github.com/me-no-dev/ESPAsyncWebServer.git
synced 2025-09-27 06:40:56 +02:00
Fix self-deadlock onConnect introduced by ul-gh/fix_#884 branch
Commit 344062ae5b
intended to protect the
client list for AsyncEventSource from concurrent access by wrapping
accesses with locking. However, this introduces a self-deadlock scenario
if application code in the onConnect callback (now called with the
client list lock held) tries to broadcast a message to existing
connections in the same AsyncEventSource (for example, to alert of a
new incoming connection). The broadcast call tries to take the lock but
blocks because the lock is already taken, by the same task, before
calling the callback. Fixed by making use of the AsyncWebLockGuard class
which is tailor-made to address this scenario.
This commit is contained in:
@@ -298,38 +298,34 @@ void AsyncEventSource::_addClient(AsyncEventSourceClient * client){
|
||||
client->write((const char *)temp, 2053);
|
||||
free(temp);
|
||||
}*/
|
||||
_client_queue_lock.lock();
|
||||
AsyncWebLockGuard l(_client_queue_lock);
|
||||
_clients.add(client);
|
||||
if(_connectcb)
|
||||
_connectcb(client);
|
||||
_client_queue_lock.unlock();
|
||||
}
|
||||
|
||||
void AsyncEventSource::_handleDisconnect(AsyncEventSourceClient * client){
|
||||
_client_queue_lock.lock();
|
||||
AsyncWebLockGuard l(_client_queue_lock);
|
||||
_clients.remove(client);
|
||||
_client_queue_lock.unlock();
|
||||
}
|
||||
|
||||
void AsyncEventSource::close(){
|
||||
// While the whole loop is not done, the linked list is locked and so the
|
||||
// iterator should remain valid even when AsyncEventSource::_handleDisconnect()
|
||||
// is called very early
|
||||
_client_queue_lock.lock();
|
||||
AsyncWebLockGuard l(_client_queue_lock);
|
||||
for(const auto &c: _clients){
|
||||
if(c->connected())
|
||||
c->close();
|
||||
}
|
||||
_client_queue_lock.unlock();
|
||||
}
|
||||
|
||||
// pmb fix
|
||||
size_t AsyncEventSource::avgPacketsWaiting() const {
|
||||
size_t aql = 0;
|
||||
uint32_t nConnectedClients = 0;
|
||||
_client_queue_lock.lock();
|
||||
AsyncWebLockGuard l(_client_queue_lock);
|
||||
if (_clients.isEmpty()) {
|
||||
_client_queue_lock.unlock();
|
||||
return 0;
|
||||
}
|
||||
for(const auto &c: _clients){
|
||||
@@ -338,29 +334,26 @@ size_t AsyncEventSource::avgPacketsWaiting() const {
|
||||
++nConnectedClients;
|
||||
}
|
||||
}
|
||||
_client_queue_lock.unlock();
|
||||
return ((aql) + (nConnectedClients/2)) / (nConnectedClients); // round up
|
||||
}
|
||||
|
||||
void AsyncEventSource::send(
|
||||
const char *message, const char *event, uint32_t id, uint32_t reconnect){
|
||||
String ev = generateEventMessage(message, event, id, reconnect);
|
||||
_client_queue_lock.lock();
|
||||
AsyncWebLockGuard l(_client_queue_lock);
|
||||
for(const auto &c: _clients){
|
||||
if(c->connected()) {
|
||||
c->_write(ev.c_str(), ev.length());
|
||||
}
|
||||
}
|
||||
_client_queue_lock.unlock();
|
||||
}
|
||||
|
||||
size_t AsyncEventSource::count() const {
|
||||
size_t n_clients;
|
||||
_client_queue_lock.lock();
|
||||
AsyncWebLockGuard l(_client_queue_lock);
|
||||
n_clients = _clients.count_if([](AsyncEventSourceClient *c){
|
||||
return c->connected();
|
||||
});
|
||||
_client_queue_lock.unlock();
|
||||
return n_clients;
|
||||
}
|
||||
|
||||
|
@@ -104,7 +104,7 @@ class AsyncEventSource: public AsyncWebHandler {
|
||||
LinkedList<AsyncEventSourceClient *> _clients;
|
||||
// Same as for individual messages, protect mutations of _clients list
|
||||
// since simultaneous access from different tasks is possible
|
||||
AsyncPlainLock _client_queue_lock;
|
||||
AsyncWebLock _client_queue_lock;
|
||||
ArEventHandlerFunction _connectcb;
|
||||
ArAuthorizeConnectHandler _authorizeConnectHandler;
|
||||
public:
|
||||
|
Reference in New Issue
Block a user