Websocket close will automatically drain:

fix #642

This changes websocket::stream close and async_close to automatically
read and discard incoming message frames before shutting down the
connection. It is no longer necessary to manually drain the connection.
This commit is contained in:
Vinnie Falco
2017-08-13 11:55:18 -07:00
parent fa087e19f1
commit 5940002c96
3 changed files with 192 additions and 48 deletions

View File

@@ -13,6 +13,7 @@ WebSocket:
* Refactor read_op * Refactor read_op
* Refactor close_op * Refactor close_op
* Refactor read_op + fail_op * Refactor read_op + fail_op
* Websocket close will automatically drain
-------------------------------------------------------------------------------- --------------------------------------------------------------------------------

View File

@@ -10,6 +10,7 @@
#ifndef BOOST_BEAST_WEBSOCKET_IMPL_CLOSE_IPP #ifndef BOOST_BEAST_WEBSOCKET_IMPL_CLOSE_IPP
#define BOOST_BEAST_WEBSOCKET_IMPL_CLOSE_IPP #define BOOST_BEAST_WEBSOCKET_IMPL_CLOSE_IPP
#include <boost/beast/websocket/teardown.hpp>
#include <boost/beast/core/handler_ptr.hpp> #include <boost/beast/core/handler_ptr.hpp>
#include <boost/beast/core/flat_static_buffer.hpp> #include <boost/beast/core/flat_static_buffer.hpp>
#include <boost/beast/core/type_traits.hpp> #include <boost/beast/core/type_traits.hpp>
@@ -39,9 +40,26 @@ template<class Handler>
class stream<NextLayer>::close_op class stream<NextLayer>::close_op
: public boost::asio::coroutine : public boost::asio::coroutine
{ {
Handler h_; struct state
stream<NextLayer>& ws_; {
token tok_; stream<NextLayer>& ws;
token tok;
detail::frame_streambuf fb;
state(
Handler&,
stream<NextLayer>& ws_,
close_reason const& cr)
: ws(ws_)
, tok(ws.t_.unique())
{
// Serialize the close frame
ws.template write_close<
flat_static_buffer_base>(fb, cr);
}
};
handler_ptr<state, Handler> d_;
public: public:
close_op(close_op&&) = default; close_op(close_op&&) = default;
@@ -52,20 +70,8 @@ public:
DeducedHandler&& h, DeducedHandler&& h,
stream<NextLayer>& ws, stream<NextLayer>& ws,
close_reason const& cr) close_reason const& cr)
: h_(std::forward<DeducedHandler>(h)) : d_(std::forward<DeducedHandler>(h), ws, cr)
, ws_(ws)
{ {
// Serialize the close frame
ws_.wr_.fb.reset();
ws_.template write_close<
flat_static_buffer_base>(
ws_.wr_.fb, cr);
}
Handler&
handler()
{
return h_;
} }
void void
@@ -79,7 +85,7 @@ public:
{ {
using boost::asio::asio_handler_allocate; using boost::asio::asio_handler_allocate;
return asio_handler_allocate( return asio_handler_allocate(
size, std::addressof(op->h_)); size, std::addressof(op->d_.handler()));
} }
friend friend
@@ -88,7 +94,7 @@ public:
{ {
using boost::asio::asio_handler_deallocate; using boost::asio::asio_handler_deallocate;
asio_handler_deallocate( asio_handler_deallocate(
p, size, std::addressof(op->h_)); p, size, std::addressof(op->d_.handler()));
} }
friend friend
@@ -96,7 +102,7 @@ public:
{ {
using boost::asio::asio_handler_is_continuation; using boost::asio::asio_handler_is_continuation;
return asio_handler_is_continuation( return asio_handler_is_continuation(
std::addressof(op->h_)); std::addressof(op->d_.handler()));
} }
template<class Function> template<class Function>
@@ -105,7 +111,7 @@ public:
{ {
using boost::asio::asio_handler_invoke; using boost::asio::asio_handler_invoke;
asio_handler_invoke( asio_handler_invoke(
f, std::addressof(op->h_)); f, std::addressof(op->d_.handler()));
} }
}; };
@@ -113,21 +119,24 @@ template<class NextLayer>
template<class Handler> template<class Handler>
void void
stream<NextLayer>::close_op<Handler>:: stream<NextLayer>::close_op<Handler>::
operator()(error_code ec, std::size_t) operator()(error_code ec, std::size_t bytes_transferred)
{ {
using beast::detail::clamp;
auto& d = *d_;
close_code code{};
BOOST_ASIO_CORO_REENTER(*this) BOOST_ASIO_CORO_REENTER(*this)
{ {
// Maybe suspend // Maybe suspend
if(! ws_.wr_block_) if(! d.ws.wr_block_)
{ {
// Acquire the write block // Acquire the write block
ws_.wr_block_ = tok_; d.ws.wr_block_ = d.tok;
// Make sure the stream is open // Make sure the stream is open
if(ws_.failed_) if(d.ws.failed_)
{ {
BOOST_ASIO_CORO_YIELD BOOST_ASIO_CORO_YIELD
ws_.get_io_service().post( d.ws.get_io_service().post(
bind_handler(std::move(*this), bind_handler(std::move(*this),
boost::asio::error::operation_aborted)); boost::asio::error::operation_aborted));
goto upcall; goto upcall;
@@ -136,21 +145,21 @@ operator()(error_code ec, std::size_t)
else else
{ {
// Suspend // Suspend
BOOST_ASSERT(ws_.wr_block_ != tok_); BOOST_ASSERT(d.ws.wr_block_ != d.tok);
BOOST_ASIO_CORO_YIELD BOOST_ASIO_CORO_YIELD
ws_.close_op_.save(std::move(*this)); d.ws.close_op_.emplace(std::move(*this));
// Acquire the write block // Acquire the write block
BOOST_ASSERT(! ws_.wr_block_); BOOST_ASSERT(! d.ws.wr_block_);
ws_.wr_block_ = tok_; d.ws.wr_block_ = d.tok;
// Resume // Resume
BOOST_ASIO_CORO_YIELD BOOST_ASIO_CORO_YIELD
ws_.get_io_service().post(std::move(*this)); d.ws.get_io_service().post(std::move(*this));
BOOST_ASSERT(ws_.wr_block_ == tok_); BOOST_ASSERT(d.ws.wr_block_ == d.tok);
// Make sure the stream is open // Make sure the stream is open
if(ws_.failed_) if(d.ws.failed_)
{ {
ec = boost::asio::error::operation_aborted; ec = boost::asio::error::operation_aborted;
goto upcall; goto upcall;
@@ -158,19 +167,158 @@ operator()(error_code ec, std::size_t)
} }
// Send close frame // Send close frame
BOOST_ASSERT(! d.ws.wr_close_);
d.ws.wr_close_ = true;
BOOST_ASIO_CORO_YIELD BOOST_ASIO_CORO_YIELD
boost::asio::async_write(ws_.stream_, boost::asio::async_write(d.ws.stream_,
ws_.wr_.fb.data(), std::move(*this)); d.fb.data(), std::move(*this));
if(ec) if(ec)
ws_.failed_ = true; {
d.ws.failed_ = true;
goto upcall;
}
if(d.ws.rd_close_)
{
// This happens when the read_op gets a close frame
// at the same time we are sending the close frame. The
// read_op will be suspended on the write block.
goto teardown;
}
if(! d.ws.rd_block_)
{
// Acquire the read block
d.ws.rd_block_ = d.tok;
}
else
{
// The read_op is currently running so it will see
// the close frame and call teardown. We will suspend
// to cause async_read to return error::closed, before
// we return error::success.
// Suspend
BOOST_ASSERT(d.ws.rd_block_ != d.tok);
BOOST_ASIO_CORO_YIELD
d.ws.rd_op_.emplace(std::move(*this));
// Acquire the read block
BOOST_ASSERT(! d.ws.rd_block_);
d.ws.rd_block_ = d.tok;
// Resume
BOOST_ASIO_CORO_YIELD
d.ws.get_io_service().post(std::move(*this));
BOOST_ASSERT(d.ws.rd_block_ == d.tok);
// Handle the stream closing while suspended
if(d.ws.failed_)
{
ec = boost::asio::error::operation_aborted;
goto upcall;
}
}
// Drain
if(d.ws.rd_.remain > 0)
goto read_payload;
for(;;)
{
// Read frame header
while(! d.ws.parse_fh(
d.ws.rd_.fh, d.ws.rd_.buf, code))
{
if(code != close_code::none)
break;
BOOST_ASIO_CORO_YIELD
d.ws.stream_.async_read_some(
d.ws.rd_.buf.prepare(read_size(d.ws.rd_.buf,
d.ws.rd_.buf.max_size())),
std::move(*this));
if(ec)
{
d.ws.failed_ = true;
goto upcall;
}
d.ws.rd_.buf.commit(bytes_transferred);
}
if(detail::is_control(d.ws.rd_.fh.op))
{
// Process control frame
if(d.ws.rd_.fh.op == detail::opcode::close)
{
BOOST_ASSERT(! d.ws.rd_close_);
d.ws.rd_close_ = true;
auto const mb = buffer_prefix(
clamp(d.ws.rd_.fh.len),
d.ws.rd_.buf.data());
if(d.ws.rd_.fh.len > 0 && d.ws.rd_.fh.mask)
detail::mask_inplace(mb, d.ws.rd_.key);
detail::read_close(d.ws.cr_, mb, code);
if(code != close_code::none)
{
// Protocol error
goto upcall;
}
d.ws.rd_.buf.consume(clamp(d.ws.rd_.fh.len));
break;
}
d.ws.rd_.buf.consume(clamp(d.ws.rd_.fh.len));
}
else
{
read_payload:
while(d.ws.rd_.buf.size() < d.ws.rd_.remain)
{
d.ws.rd_.remain -= d.ws.rd_.buf.size();
d.ws.rd_.buf.consume(d.ws.rd_.buf.size());
BOOST_ASIO_CORO_YIELD
d.ws.stream_.async_read_some(
d.ws.rd_.buf.prepare(read_size(d.ws.rd_.buf,
d.ws.rd_.buf.max_size())),
std::move(*this));
if(ec)
{
d.ws.failed_ = true;
goto upcall;
}
d.ws.rd_.buf.commit(bytes_transferred);
}
BOOST_ASSERT(d.ws.rd_.buf.size() >= d.ws.rd_.remain);
d.ws.rd_.buf.consume(clamp(d.ws.rd_.remain));
d.ws.rd_.remain = 0;
}
}
teardown:
// Teardown
BOOST_ASSERT(d.ws.wr_block_ == d.tok);
using beast::websocket::async_teardown;
BOOST_ASIO_CORO_YIELD
async_teardown(d.ws.role_,
d.ws.stream_, std::move(*this));
BOOST_ASSERT(d.ws.wr_block_ == d.tok);
if(ec == boost::asio::error::eof)
{
// Rationale:
// http://stackoverflow.com/questions/25587403/boost-asio-ssl-async-shutdown-always-finishes-with-an-error
ec.assign(0, ec.category());
}
d.ws.failed_ = true;
upcall: upcall:
BOOST_ASSERT(ws_.wr_block_ == tok_); BOOST_ASSERT(d.ws.wr_block_ == d.tok);
ws_.wr_block_.reset(); d.ws.wr_block_.reset();
ws_.rd_op_.maybe_invoke() || if(d.ws.rd_block_)
ws_.ping_op_.maybe_invoke() || {
ws_.wr_op_.maybe_invoke(); BOOST_ASSERT(d.ws.rd_block_ = d.tok);
h_(ec); d.ws.r_rd_op_.maybe_invoke();
}
d.ws.rd_op_.maybe_invoke() ||
d.ws.ping_op_.maybe_invoke() ||
d.ws.wr_op_.maybe_invoke();
d_.invoke(ec);
} }
} }
@@ -205,13 +353,7 @@ close(close_reason const& cr, error_code& ec)
} }
// If rd_close_ is set then we already sent a close // If rd_close_ is set then we already sent a close
BOOST_ASSERT(! rd_close_); BOOST_ASSERT(! rd_close_);
if(wr_close_)
{
// Can't call close twice, abort operation
BOOST_ASSERT(! wr_close_); BOOST_ASSERT(! wr_close_);
ec = boost::asio::error::operation_aborted;
return;
}
wr_close_ = true; wr_close_ = true;
{ {
detail::frame_streambuf fb; detail::frame_streambuf fb;

View File

@@ -560,6 +560,7 @@ operator()(
upcall: upcall:
BOOST_ASSERT(ws_.rd_block_ == tok_); BOOST_ASSERT(ws_.rd_block_ == tok_);
ws_.rd_block_.reset(); ws_.rd_block_.reset();
ws_.r_close_op_.maybe_invoke();
ws_.close_op_.maybe_invoke() || ws_.close_op_.maybe_invoke() ||
ws_.ping_op_.maybe_invoke() || ws_.ping_op_.maybe_invoke() ||
ws_.wr_op_.maybe_invoke(); ws_.wr_op_.maybe_invoke();