Allow close, ping, and write to happen concurrently

This commit is contained in:
Vinnie Falco
2017-06-29 11:36:14 -07:00
parent 4ff3b524c6
commit 6496aa89ee
6 changed files with 405 additions and 439 deletions

View File

@ -7,6 +7,9 @@ WebSockets:
* Fine tune websocket op asserts
* Refactor websocket composed ops
* Allow close, ping, and write to happen concurrently
* Fix race in websocket read op
* Fix websocket write op
--------------------------------------------------------------------------------

View File

@ -125,7 +125,7 @@ operator()(error_code ec, std::size_t)
{
// suspend
d.state = 1;
d.ws.wr_op_.emplace(std::move(*this));
d.ws.close_op_.emplace(std::move(*this));
return;
}
d.ws.wr_block_ = &d;

View File

@ -133,10 +133,9 @@ operator()(error_code ec, std::size_t)
if(d.ws.failed_ || d.ws.wr_close_)
{
// call handler
d.ws.get_io_service().post(
return d.ws.get_io_service().post(
bind_handler(std::move(*this),
boost::asio::error::operation_aborted));
return;
}
do_write:
@ -176,6 +175,7 @@ operator()(error_code ec, std::size_t)
upcall:
BOOST_ASSERT(d.ws.wr_block_ == &d);
d.ws.wr_block_ = nullptr;
d.ws.close_op_.maybe_invoke() ||
d.ws.rd_op_.maybe_invoke() ||
d.ws.wr_op_.maybe_invoke();
d_.invoke(ec);

View File

@ -81,7 +81,6 @@ public:
: d_(std::forward<DeducedHandler>(h),
ws, std::forward<Args>(args)...)
{
(*this)(error_code{}, 0, false);
}
void operator()()
@ -165,9 +164,8 @@ operator()(error_code ec,
do_control_payload = 8,
do_control = 9,
do_pong_resume = 10,
do_pong = 12,
do_ponged = 12,
do_close_resume = 14,
do_close = 16,
do_teardown = 17,
do_fail = 19,
@ -221,7 +219,7 @@ operator()(error_code ec,
d.remain = d.fh.len;
if(d.fh.mask)
detail::prepare_key(d.key, d.fh.key);
// fall through
BEAST_FALLTHROUGH;
case do_read_payload + 1:
d.state = do_read_payload + 2;
@ -452,15 +450,15 @@ operator()(error_code ec,
if(d.ws.wr_block_)
{
// suspend
d.state = do_pong_resume;
BOOST_ASSERT(d.ws.wr_block_ != &d);
d.state = do_pong_resume;
d.ws.rd_op_.emplace(std::move(*this));
return;
}
d.state = do_pong;
break;
d.ws.wr_block_ = &d;
goto go_pong;
}
else if(d.fh.op == detail::opcode::pong)
if(d.fh.op == detail::opcode::pong)
{
code = close_code::none;
ping_data payload;
@ -496,12 +494,13 @@ operator()(error_code ec,
if(d.ws.wr_block_)
{
// suspend
BOOST_ASSERT(d.ws.wr_block_ != &d);
d.state = do_close_resume;
d.ws.rd_op_.emplace(std::move(*this));
return;
}
d.state = do_close;
break;
d.ws.wr_block_ = &d;
goto go_close;
}
d.state = do_teardown;
break;
@ -513,48 +512,47 @@ operator()(error_code ec,
BOOST_ASSERT(! d.ws.wr_block_);
d.ws.wr_block_ = &d;
d.state = do_pong_resume + 1;
// The current context is safe but might not be
// the same as the one for this operation (since
// we are being called from a write operation).
// Call post to make sure we are invoked the same
// way as the final handler for this operation.
d.ws.get_io_service().post(bind_handler(
std::move(*this), ec, bytes_transferred));
std::move(*this), ec, 0));
return;
case do_pong_resume + 1:
BOOST_ASSERT(d.ws.wr_block_ == &d);
if(d.ws.failed_)
{
// call handler
ec = boost::asio::error::operation_aborted;
goto upcall;
}
BEAST_FALLTHROUGH;
//------------------------------------------------------------------
case do_pong:
if(d.ws.wr_close_)
{
// ignore ping when closing
if(d.ws.wr_block_)
{
BOOST_ASSERT(d.ws.wr_block_ == &d);
d.ws.wr_block_ = nullptr;
}
d.fb.consume(d.fb.size());
d.state = do_read_fh;
break;
}
BEAST_FALLTHROUGH;
//------------------------------------------------------------------
go_pong:
// send pong
if(! d.ws.wr_block_)
d.ws.wr_block_ = &d;
else
BOOST_ASSERT(d.ws.wr_block_ == &d);
d.state = do_pong + 1;
d.state = do_ponged;
boost::asio::async_write(d.ws.stream_,
d.fb.data(), std::move(*this));
return;
case do_pong + 1:
case do_ponged:
d.ws.wr_block_ = nullptr;
d.fb.consume(d.fb.size());
d.state = do_read_fh;
d.ws.wr_block_ = nullptr;
break;
//------------------------------------------------------------------
@ -582,19 +580,15 @@ operator()(error_code ec,
}
if(d.ws.wr_close_)
{
// call handler
// already sent a close frame
ec = error::closed;
goto upcall;
}
d.state = do_close;
break;
BEAST_FALLTHROUGH;
//------------------------------------------------------------------
case do_close:
if(! d.ws.wr_block_)
d.ws.wr_block_ = &d;
else
go_close:
BOOST_ASSERT(d.ws.wr_block_ == &d);
d.state = do_teardown;
d.ws.wr_close_ = true;
@ -629,34 +623,45 @@ operator()(error_code ec,
if(d.ws.wr_block_)
{
// suspend
BOOST_ASSERT(d.ws.wr_block_ != &d);
d.state = do_fail + 2;
d.ws.rd_op_.emplace(std::move(*this));
return;
}
// fall through
d.ws.wr_block_ = &d;
BEAST_FALLTHROUGH;
case do_fail + 1:
BOOST_ASSERT(d.ws.wr_block_ == &d);
d.ws.failed_ = true;
// send close frame
d.state = do_fail + 4;
d.ws.wr_close_ = true;
BOOST_ASSERT(! d.ws.wr_block_);
d.ws.wr_block_ = &d;
boost::asio::async_write(d.ws.stream_,
d.fb.data(), std::move(*this));
return;
case do_fail + 2:
// resume
BOOST_ASSERT(! d.ws.wr_block_);
d.ws.wr_block_ = &d;
d.state = do_fail + 3;
// The current context is safe but might not be
// the same as the one for this operation (since
// we are being called from a write operation).
// Call post to make sure we are invoked the same
// way as the final handler for this operation.
d.ws.get_io_service().post(bind_handler(
std::move(*this), ec, bytes_transferred));
return;
case do_fail + 3:
if(d.ws.failed_)
BOOST_ASSERT(d.ws.wr_block_ == &d);
if(d.ws.failed_ || d.ws.wr_close_)
{
d.state = do_fail + 5;
break;
// call handler
ec = error::failed;
goto upcall;
}
d.state = do_fail + 1;
break;
@ -683,6 +688,7 @@ operator()(error_code ec,
upcall:
if(d.ws.wr_block_ == &d)
d.ws.wr_block_ = nullptr;
d.ws.close_op_.maybe_invoke() ||
d.ws.ping_op_.maybe_invoke() ||
d.ws.wr_op_.maybe_invoke();
bool const fin = (! ec) ? d.fh.fin : false;
@ -704,7 +710,8 @@ async_read_frame(DynamicBuffer& buffer, ReadHandler&& handler)
void(error_code, bool)> init{handler};
read_frame_op<DynamicBuffer, handler_type<
ReadHandler, void(error_code, bool)>>{
init.completion_handler,*this, buffer};
init.completion_handler,*this, buffer}(
{}, 0, false);
return init.result.get();
}

View File

@ -70,24 +70,16 @@ public:
: d_(std::forward<DeducedHandler>(h),
ws, std::forward<Args>(args)...)
{
(*this)(error_code{}, 0, false);
}
void operator()()
{
(*this)(error_code{}, 0, true);
}
void operator()(error_code const& ec)
{
(*this)(ec, 0, true);
(*this)({}, 0, true);
}
void operator()(error_code ec,
std::size_t bytes_transferred);
void operator()(error_code ec,
std::size_t bytes_transferred, bool again);
std::size_t bytes_transferred,
bool again = true);
friend
void* asio_handler_allocate(
@ -123,19 +115,6 @@ public:
}
};
template<class NextLayer>
template<class Buffers, class Handler>
void
stream<NextLayer>::
write_frame_op<Buffers, Handler>::
operator()(error_code ec, std::size_t bytes_transferred)
{
auto& d = *d_;
if(ec)
d.ws.failed_ = true;
(*this)(ec, bytes_transferred, true);
}
template<class NextLayer>
template<class Buffers, class Handler>
void
@ -162,9 +141,12 @@ operator()(error_code ec,
auto& d = *d_;
d.cont = d.cont || again;
if(ec)
goto upcall;
for(;;)
{
BOOST_ASSERT(d.ws.wr_block_ == &d);
d.ws.failed_ = true;
goto upcall;
}
loop:
switch(d.step)
{
case do_init:
@ -225,17 +207,11 @@ operator()(error_code ec,
}
}
d.step = do_maybe_suspend;
break;
goto loop;
//----------------------------------------------------------------------
case do_nomask_nofrag:
BOOST_ASSERT(! d.ws.wr_block_);
d.ws.wr_block_ = &d;
BEAST_FALLTHROUGH;
case do_nomask_nofrag + 1:
{
BOOST_ASSERT(d.ws.wr_block_ == &d);
d.fh.fin = d.fin;
d.fh.len = buffer_size(d.cb);
@ -244,20 +220,14 @@ operator()(error_code ec,
d.ws.wr_.cont = ! d.fin;
// Send frame
d.step = do_upcall;
boost::asio::async_write(d.ws.stream_,
return boost::asio::async_write(d.ws.stream_,
buffer_cat(d.fh_buf.data(), d.cb),
std::move(*this));
return;
}
//----------------------------------------------------------------------
go_nomask_frag:
case do_nomask_frag:
BOOST_ASSERT(! d.ws.wr_block_);
d.ws.wr_block_ = &d;
BEAST_FALLTHROUGH;
case do_nomask_frag + 1:
{
BOOST_ASSERT(d.ws.wr_block_ == &d);
auto const n = clamp(
@ -270,42 +240,36 @@ operator()(error_code ec,
d.ws.wr_.cont = ! d.fin;
// Send frame
d.step = d.remain == 0 ?
do_upcall : do_nomask_frag + 2;
boost::asio::async_write(d.ws.stream_,
buffer_cat(d.fh_buf.data(),
buffer_prefix(n, d.cb)),
std::move(*this));
return;
do_upcall : do_nomask_frag + 1;
return boost::asio::async_write(
d.ws.stream_, buffer_cat(
d.fh_buf.data(), buffer_prefix(
n, d.cb)), std::move(*this));
}
case do_nomask_frag + 2:
case do_nomask_frag + 1:
BOOST_ASSERT(d.ws.wr_block_ == &d);
d.ws.wr_block_ = nullptr;
d.cb.consume(
bytes_transferred - d.fh_buf.size());
d.fh_buf.consume(d.fh_buf.size());
d.fh.op = detail::opcode::cont;
if(d.ws.wr_block_ == &d)
d.ws.wr_block_ = nullptr;
// Allow outgoing control frames to
// be sent in between message frames:
if(d.ws.rd_op_.maybe_invoke() ||
// be sent in between message frames
if( d.ws.close_op_.maybe_invoke() ||
d.ws.rd_op_.maybe_invoke() ||
d.ws.ping_op_.maybe_invoke())
{
d.step = do_maybe_suspend;
d.ws.get_io_service().post(
return d.ws.get_io_service().post(
std::move(*this));
return;
}
d.step = d.entry_state;
break;
d.ws.wr_block_ = &d;
goto go_nomask_frag;
//----------------------------------------------------------------------
case do_mask_nofrag:
BOOST_ASSERT(! d.ws.wr_block_);
d.ws.wr_block_ = &d;
BEAST_FALLTHROUGH;
case do_mask_nofrag + 1:
{
BOOST_ASSERT(d.ws.wr_block_ == &d);
d.remain = buffer_size(d.cb);
@ -325,14 +289,13 @@ operator()(error_code ec,
d.ws.wr_.cont = ! d.fin;
// Send frame header and partial payload
d.step = d.remain == 0 ?
do_upcall : do_mask_nofrag + 2;
boost::asio::async_write(d.ws.stream_,
buffer_cat(d.fh_buf.data(), b),
std::move(*this));
return;
do_upcall : do_mask_nofrag + 1;
return boost::asio::async_write(
d.ws.stream_, buffer_cat(d.fh_buf.data(),
b), std::move(*this));
}
case do_mask_nofrag + 2:
case do_mask_nofrag + 1:
{
d.cb.consume(d.ws.wr_.buf_size);
auto const n =
@ -342,22 +305,17 @@ operator()(error_code ec,
buffer_copy(b, d.cb);
detail::mask_inplace(b, d.key);
d.remain -= n;
// Send parial payload
// Send partial payload
if(d.remain == 0)
d.step = do_upcall;
boost::asio::async_write(
return boost::asio::async_write(
d.ws.stream_, b, std::move(*this));
return;
}
//----------------------------------------------------------------------
go_mask_frag:
case do_mask_frag:
BOOST_ASSERT(! d.ws.wr_block_);
d.ws.wr_block_ = &d;
BEAST_FALLTHROUGH;
case do_mask_frag + 1:
{
BOOST_ASSERT(d.ws.wr_block_ == &d);
auto const n = clamp(
@ -376,23 +334,24 @@ operator()(error_code ec,
d.ws.wr_.cont = ! d.fin;
// Send frame
d.step = d.remain == 0 ?
do_upcall : do_mask_frag + 2;
boost::asio::async_write(d.ws.stream_,
buffer_cat(d.fh_buf.data(), b),
do_upcall : do_mask_frag + 1;
return boost::asio::async_write(
d.ws.stream_, buffer_cat(
d.fh_buf.data(), b),
std::move(*this));
return;
}
case do_mask_frag + 2:
case do_mask_frag + 1:
BOOST_ASSERT(d.ws.wr_block_ == &d);
d.ws.wr_block_ = nullptr;
d.cb.consume(
bytes_transferred - d.fh_buf.size());
d.fh_buf.consume(d.fh_buf.size());
d.fh.op = detail::opcode::cont;
BOOST_ASSERT(d.ws.wr_block_ == &d);
d.ws.wr_block_ = nullptr;
// Allow outgoing control frames to
// be sent in between message frames:
if(d.ws.rd_op_.maybe_invoke() ||
if( d.ws.close_op_.maybe_invoke() ||
d.ws.rd_op_.maybe_invoke() ||
d.ws.ping_op_.maybe_invoke())
{
d.step = do_maybe_suspend;
@ -400,17 +359,13 @@ operator()(error_code ec,
std::move(*this));
return;
}
d.step = d.entry_state;
break;
d.ws.wr_block_ = &d;
goto go_mask_frag;
//----------------------------------------------------------------------
go_deflate:
case do_deflate:
BOOST_ASSERT(! d.ws.wr_block_);
d.ws.wr_block_ = &d;
BEAST_FALLTHROUGH;
case do_deflate + 1:
{
BOOST_ASSERT(d.ws.wr_block_ == &d);
auto b = buffer(d.ws.wr_.buf.get(),
@ -451,21 +406,22 @@ operator()(error_code ec,
d.ws.wr_.cont = ! d.fin;
// Send frame
d.step = more ?
do_deflate + 2 : do_deflate + 3;
do_deflate + 1 : do_deflate + 2;
boost::asio::async_write(d.ws.stream_,
buffer_cat(fh_buf.data(), b),
std::move(*this));
return;
}
case do_deflate + 2:
d.fh.op = detail::opcode::cont;
d.fh.rsv1 = false;
case do_deflate + 1:
BOOST_ASSERT(d.ws.wr_block_ == &d);
d.ws.wr_block_ = nullptr;
d.fh.op = detail::opcode::cont;
d.fh.rsv1 = false;
// Allow outgoing control frames to
// be sent in between message frames:
if(d.ws.rd_op_.maybe_invoke() ||
if( d.ws.close_op_.maybe_invoke() ||
d.ws.rd_op_.maybe_invoke() ||
d.ws.ping_op_.maybe_invoke())
{
d.step = do_maybe_suspend;
@ -473,10 +429,11 @@ operator()(error_code ec,
std::move(*this));
return;
}
d.step = d.entry_state;
break;
d.ws.wr_block_ = &d;
goto go_deflate;
case do_deflate + 3:
case do_deflate + 2:
BOOST_ASSERT(d.ws.wr_block_ == &d);
if(d.fh.fin && (
(d.ws.role_ == role_type::client &&
d.ws.pmd_config_.client_no_context_takeover) ||
@ -488,26 +445,24 @@ operator()(error_code ec,
//----------------------------------------------------------------------
case do_maybe_suspend:
{
if(d.ws.wr_block_)
{
// suspend
BOOST_ASSERT(d.ws.wr_block_ != &d);
d.step = do_maybe_suspend + 1;
d.ws.wr_op_.emplace(std::move(*this));
return;
}
d.ws.wr_block_ = &d;
if(d.ws.failed_ || d.ws.wr_close_)
{
// call handler
d.step = do_upcall;
d.ws.get_io_service().post(
return d.ws.get_io_service().post(
bind_handler(std::move(*this),
boost::asio::error::operation_aborted));
return;
boost::asio::error::operation_aborted, 0));
}
d.step = d.entry_state;
break;
}
goto loop;
case do_maybe_suspend + 1:
BOOST_ASSERT(! d.ws.wr_block_);
@ -519,7 +474,7 @@ operator()(error_code ec,
// Call post to make sure we are invoked the same
// way as the final handler for this operation.
d.ws.get_io_service().post(bind_handler(
std::move(*this), ec));
std::move(*this), ec, 0));
return;
case do_maybe_suspend + 2:
@ -530,18 +485,18 @@ operator()(error_code ec,
ec = boost::asio::error::operation_aborted;
goto upcall;
}
d.step = d.entry_state + 1;
break;
d.step = d.entry_state;
goto loop;
//----------------------------------------------------------------------
case do_upcall:
goto upcall;
}
}
upcall:
if(d.ws.wr_block_ == &d)
d.ws.wr_block_ = nullptr;
d.ws.close_op_.maybe_invoke() ||
d.ws.rd_op_.maybe_invoke() ||
d.ws.ping_op_.maybe_invoke();
d_.invoke(ec);
@ -901,7 +856,7 @@ async_write_frame(bool fin,
void(error_code)> init{handler};
write_frame_op<ConstBufferSequence, handler_type<
WriteHandler, void(error_code)>>{init.completion_handler,
*this, fin, bs};
*this, fin, bs}({}, 0, false);
return init.result.get();
}

View File

@ -150,9 +150,10 @@ class stream
op* wr_block_; // op currenly writing
ping_data* ping_data_; // where to put the payload
detail::pausation rd_op_; // parked read op
detail::pausation wr_op_; // parked write op
detail::pausation ping_op_; // parked ping op
detail::pausation rd_op_; // paused read op
detail::pausation wr_op_; // paused write op
detail::pausation ping_op_; // paused ping op
detail::pausation close_op_; // paused close op
close_reason cr_; // set from received close frame
// State information for the message being received