Provide better names to event loop-related IConnection methods

This commit is contained in:
sangelovic
2020-02-02 22:22:26 +01:00
parent dee6adce02
commit 3a4f343fb9
13 changed files with 128 additions and 66 deletions

View File

@@ -62,12 +62,12 @@ public:
static void SetUpTestCase()
{
s_connection->requestName(INTERFACE_NAME);
s_connection->enterProcessingLoopAsync();
s_connection->enterEventLoopAsync();
}
static void TearDownTestCase()
{
s_connection->leaveProcessingLoop();
s_connection->leaveEventLoop();
s_connection->releaseName(INTERFACE_NAME);
}

View File

@@ -78,13 +78,13 @@ TEST(Connection, CannotReleaseNonrequestedName)
ASSERT_THROW(connection->releaseName("some.random.nonrequested.name"), sdbus::Error);
}
TEST(Connection, CanEnterAndLeaveProcessingLoop)
TEST(Connection, CanEnterAndLeaveEventLoop)
{
auto connection = sdbus::createConnection();
connection->requestName(INTERFACE_NAME);
std::thread t([&](){ connection->enterProcessingLoop(); });
connection->leaveProcessingLoop();
std::thread t([&](){ connection->enterEventLoop(); });
connection->leaveEventLoop();
t.join();

View File

@@ -97,5 +97,5 @@ int main(int /*argc*/, char */*argv*/[])
const char* objectPath = "/org/sdbuscpp/perftests";
PerftestAdaptor server(*connection, objectPath);
connection->enterProcessingLoop();
connection->enterEventLoop();
}

View File

@@ -392,7 +392,7 @@ int main(int argc, char *argv[])
{
CelsiusThermometerAdaptor thermometer(con, CELSIUS_THERMOMETER_OBJECT_PATH);
service2ThreadReady = true;
con.enterProcessingLoop();
con.enterEventLoop();
});
auto service1Connection = sdbus::createSystemBusConnection(SERVICE_1_BUS_NAME);
@@ -402,7 +402,7 @@ int main(int argc, char *argv[])
ConcatenatorAdaptor concatenator(con, CONCATENATOR_OBJECT_PATH);
FahrenheitThermometerAdaptor thermometer(con, FAHRENHEIT_THERMOMETER_OBJECT_PATH, false);
service1ThreadReady = true;
con.enterProcessingLoop();
con.enterEventLoop();
});
// Wait for both services to export their D-Bus objects
@@ -480,8 +480,8 @@ int main(int argc, char *argv[])
// We could run the loop in a sync way, but we want it to run also when proxies are destroyed for better
// coverage of multi-threaded scenarios, so we run it async and use condition variable for exit notification
//con.enterProcessingLoop();
con.enterProcessingLoopAsync();
//con.enterEventLoop();
con.enterEventLoopAsync();
std::unique_lock<std::mutex> lock(clientThreadExitMutex);
clientThreadExitCond.wait(lock, [&]{return clientThreadExit;});
@@ -493,17 +493,17 @@ int main(int argc, char *argv[])
std::this_thread::sleep_for(std::chrono::milliseconds(loopDuration));
//clientConnection->leaveProcessingLoop();
//clientConnection->leaveEventLoop();
std::unique_lock<std::mutex> lock(clientThreadExitMutex);
clientThreadExit = true;
lock.unlock();
clientThreadExitCond.notify_one();
clientThread.join();
service1Connection->leaveProcessingLoop();
service1Connection->leaveEventLoop();
service1Thread.join();
service2Connection->leaveProcessingLoop();
service2Connection->leaveEventLoop();
service2Thread.join();
}