Refactor stream operations and tests plus coverage

This commit is contained in:
Vinnie Falco
2017-08-31 17:52:09 -07:00
parent 9f089c2a33
commit 51a5a36118
22 changed files with 1290 additions and 354 deletions

View File

@ -3,6 +3,7 @@ Version 111:
WebSocket: WebSocket:
* Fix utf8 check split code point at buffer end * Fix utf8 check split code point at buffer end
* Refactor stream operations and tests plus coverage
-------------------------------------------------------------------------------- --------------------------------------------------------------------------------

View File

@ -187,6 +187,12 @@ public:
void void
save(F&& f); save(F&& f);
explicit
operator bool() const
{
return base_ != nullptr;
}
bool bool
maybe_invoke() maybe_invoke()
{ {

View File

@ -57,14 +57,17 @@ parse_bits(string_view s)
return -1; return -1;
if(s[0] < '1' || s[0] > '9') if(s[0] < '1' || s[0] > '9')
return -1; return -1;
int i = 0; unsigned i = 0;
for(auto c : s) for(auto c : s)
{ {
if(c < '0' || c > '9') if(c < '0' || c > '9')
return -1; return -1;
auto const i0 = i;
i = 10 * i + (c - '0'); i = 10 * i + (c - '0');
if(i < i0)
return -1;
} }
return i; return static_cast<int>(i);
} }
// Parse permessage-deflate request fields // Parse permessage-deflate request fields
@ -354,43 +357,6 @@ pmd_normalize(pmd_offer& offer)
//-------------------------------------------------------------------- //--------------------------------------------------------------------
// Decompress into a DynamicBuffer
//
template<class InflateStream, class DynamicBuffer>
void
inflate(
InflateStream& zi,
DynamicBuffer& buffer,
boost::asio::const_buffer const& in,
error_code& ec)
{
using boost::asio::buffer_cast;
using boost::asio::buffer_size;
zlib::z_params zs;
zs.avail_in = buffer_size(in);
zs.next_in = buffer_cast<void const*>(in);
for(;;)
{
// VFALCO we could be smarter about the size
auto const bs = buffer.prepare(
read_size_or_throw(buffer, 65536));
auto const out = *bs.begin();
zs.avail_out = buffer_size(out);
zs.next_out = buffer_cast<void*>(out);
zi.write(zs, zlib::Flush::sync, ec);
buffer.commit(zs.total_out);
zs.total_out = 0;
if( ec == zlib::error::need_buffers ||
ec == zlib::error::end_of_stream)
{
ec.assign(0, ec.category());
break;
}
if(ec)
return;
}
}
// Compress a buffer sequence // Compress a buffer sequence
// Returns: `true` if more calls are needed // Returns: `true` if more calls are needed
// //

View File

@ -22,8 +22,6 @@
#include <boost/throw_exception.hpp> #include <boost/throw_exception.hpp>
#include <memory> #include <memory>
#include <iostream>
namespace boost { namespace boost {
namespace beast { namespace beast {
namespace websocket { namespace websocket {
@ -46,7 +44,7 @@ class stream<NextLayer>::close_op
detail::frame_buffer fb; detail::frame_buffer fb;
error_code ev; error_code ev;
token tok; token tok;
bool cont; bool cont = false;
state( state(
Handler&, Handler&,
@ -121,7 +119,8 @@ public:
template<class NextLayer> template<class NextLayer>
template<class Handler> template<class Handler>
void void
stream<NextLayer>::close_op<Handler>:: stream<NextLayer>::
close_op<Handler>::
operator()( operator()(
error_code ec, error_code ec,
std::size_t bytes_transferred, std::size_t bytes_transferred,
@ -140,7 +139,7 @@ operator()(
d.ws.wr_block_ = d.tok; d.ws.wr_block_ = d.tok;
// Make sure the stream is open // Make sure the stream is open
if(d.ws.check_fail(ec)) if(! d.ws.check_open(ec))
goto upcall; goto upcall;
} }
else else
@ -160,29 +159,33 @@ operator()(
BOOST_ASSERT(d.ws.wr_block_ == d.tok); BOOST_ASSERT(d.ws.wr_block_ == d.tok);
// Make sure the stream is open // Make sure the stream is open
if(d.ws.check_fail(ec)) if(! d.ws.check_open(ec))
goto upcall; goto upcall;
} }
// Can't call close twice // Can't call close twice
BOOST_ASSERT(! d.ws.wr_close_); BOOST_ASSERT(! d.ws.wr_close_);
// Change status to closing
BOOST_ASSERT(d.ws.status_ == status::open);
d.ws.status_ = status::closing;
// Send close frame // Send close frame
d.ws.wr_close_ = true; d.ws.wr_close_ = true;
BOOST_ASIO_CORO_YIELD BOOST_ASIO_CORO_YIELD
boost::asio::async_write(d.ws.stream_, boost::asio::async_write(d.ws.stream_,
d.fb.data(), std::move(*this)); d.fb.data(), std::move(*this));
if(d.ws.check_fail(ec)) if(! d.ws.check_ok(ec))
goto upcall; goto upcall;
if(d.ws.rd_close_) if(d.ws.rd_close_)
{ {
// This happens when the read_op gets a close frame // This happens when the read_op gets a close frame
// at the same time we are sending the close frame. The // at the same time close_op is sending the close frame.
// read_op will be suspended on the write block. // The read_op will be suspended on the write block.
goto teardown; goto teardown;
} }
// Maybe suspend // Maybe suspend
if(! d.ws.rd_block_) if(! d.ws.rd_block_)
{ {
@ -206,7 +209,9 @@ operator()(
BOOST_ASSERT(d.ws.rd_block_ == d.tok); BOOST_ASSERT(d.ws.rd_block_ == d.tok);
// Make sure the stream is open // Make sure the stream is open
if(d.ws.check_fail(ec)) BOOST_ASSERT(d.ws.status_ != status::open);
BOOST_ASSERT(d.ws.status_ != status::closed);
if( d.ws.status_ == status::failed)
goto upcall; goto upcall;
BOOST_ASSERT(! d.ws.rd_close_); BOOST_ASSERT(! d.ws.rd_close_);
@ -231,7 +236,7 @@ operator()(
d.ws.rd_buf_.prepare(read_size(d.ws.rd_buf_, d.ws.rd_buf_.prepare(read_size(d.ws.rd_buf_,
d.ws.rd_buf_.max_size())), d.ws.rd_buf_.max_size())),
std::move(*this)); std::move(*this));
if(d.ws.check_fail(ec)) if(! d.ws.check_ok(ec))
goto upcall; goto upcall;
d.ws.rd_buf_.commit(bytes_transferred); d.ws.rd_buf_.commit(bytes_transferred);
} }
@ -271,7 +276,7 @@ operator()(
d.ws.rd_buf_.prepare(read_size(d.ws.rd_buf_, d.ws.rd_buf_.prepare(read_size(d.ws.rd_buf_,
d.ws.rd_buf_.max_size())), d.ws.rd_buf_.max_size())),
std::move(*this)); std::move(*this));
if(d.ws.check_fail(ec)) if(! d.ws.check_ok(ec))
goto upcall; goto upcall;
d.ws.rd_buf_.commit(bytes_transferred); d.ws.rd_buf_.commit(bytes_transferred);
} }
@ -289,7 +294,6 @@ operator()(
async_teardown(d.ws.role_, async_teardown(d.ws.role_,
d.ws.stream_, std::move(*this)); d.ws.stream_, std::move(*this));
BOOST_ASSERT(d.ws.wr_block_ == d.tok); BOOST_ASSERT(d.ws.wr_block_ == d.tok);
BOOST_ASSERT(d.ws.open_);
if(ec == boost::asio::error::eof) if(ec == boost::asio::error::eof)
{ {
// Rationale: // Rationale:
@ -298,7 +302,11 @@ operator()(
} }
if(! ec) if(! ec)
ec = d.ev; ec = d.ev;
d.ws.open_ = false; if(ec)
d.ws.status_ = status::failed;
else
d.ws.status_ = status::closed;
d.ws.close();
upcall: upcall:
BOOST_ASSERT(d.ws.wr_block_ == d.tok); BOOST_ASSERT(d.ws.wr_block_ == d.tok);
@ -346,7 +354,7 @@ close(close_reason const& cr, error_code& ec)
using beast::detail::clamp; using beast::detail::clamp;
ec.assign(0, ec.category()); ec.assign(0, ec.category());
// Make sure the stream is open // Make sure the stream is open
if(check_fail(ec)) if(! check_open(ec))
return; return;
// If rd_close_ is set then we already sent a close // If rd_close_ is set then we already sent a close
BOOST_ASSERT(! rd_close_); BOOST_ASSERT(! rd_close_);
@ -357,8 +365,9 @@ close(close_reason const& cr, error_code& ec)
write_close<flat_static_buffer_base>(fb, cr); write_close<flat_static_buffer_base>(fb, cr);
boost::asio::write(stream_, fb.data(), ec); boost::asio::write(stream_, fb.data(), ec);
} }
if(check_fail(ec)) if(! check_ok(ec))
return; return;
status_ = status::closing;
// Drain the connection // Drain the connection
close_code code{}; close_code code{};
if(rd_remain_ > 0) if(rd_remain_ > 0)
@ -375,7 +384,7 @@ close(close_reason const& cr, error_code& ec)
stream_.read_some( stream_.read_some(
rd_buf_.prepare(read_size(rd_buf_, rd_buf_.prepare(read_size(rd_buf_,
rd_buf_.max_size())), ec); rd_buf_.max_size())), ec);
if(check_fail(ec)) if(! check_ok(ec))
return; return;
rd_buf_.commit(bytes_transferred); rd_buf_.commit(bytes_transferred);
} }
@ -414,8 +423,7 @@ close(close_reason const& cr, error_code& ec)
stream_.read_some( stream_.read_some(
rd_buf_.prepare(read_size(rd_buf_, rd_buf_.prepare(read_size(rd_buf_,
rd_buf_.max_size())), ec); rd_buf_.max_size())), ec);
open_ = ! ec; if(! check_ok(ec))
if(! open_)
return; return;
rd_buf_.commit(bytes_transferred); rd_buf_.commit(bytes_transferred);
} }

View File

@ -131,7 +131,7 @@ operator()(error_code ec, std::size_t)
d.ws.wr_block_ = d.tok; d.ws.wr_block_ = d.tok;
// Make sure the stream is open // Make sure the stream is open
if(d.ws.check_fail(ec)) if(! d.ws.check_open(ec))
{ {
BOOST_ASIO_CORO_YIELD BOOST_ASIO_CORO_YIELD
d.ws.get_io_service().post( d.ws.get_io_service().post(
@ -156,7 +156,7 @@ operator()(error_code ec, std::size_t)
BOOST_ASSERT(d.ws.wr_block_ == d.tok); BOOST_ASSERT(d.ws.wr_block_ == d.tok);
// Make sure the stream is open // Make sure the stream is open
if(d.ws.check_fail(ec)) if(! d.ws.check_open(ec))
goto upcall; goto upcall;
} }
@ -164,7 +164,7 @@ operator()(error_code ec, std::size_t)
BOOST_ASIO_CORO_YIELD BOOST_ASIO_CORO_YIELD
boost::asio::async_write(d.ws.stream_, boost::asio::async_write(d.ws.stream_,
d.fb.data(), std::move(*this)); d.fb.data(), std::move(*this));
if(d.ws.check_fail(ec)) if(! d.ws.check_ok(ec))
goto upcall; goto upcall;
upcall: upcall:
@ -195,15 +195,14 @@ void
stream<NextLayer>:: stream<NextLayer>::
ping(ping_data const& payload, error_code& ec) ping(ping_data const& payload, error_code& ec)
{ {
ec.assign(0, ec.category());
// Make sure the stream is open // Make sure the stream is open
if(check_fail(ec)) if(! check_open(ec))
return; return;
detail::frame_buffer fb; detail::frame_buffer fb;
write_ping<flat_static_buffer_base>( write_ping<flat_static_buffer_base>(
fb, detail::opcode::ping, payload); fb, detail::opcode::ping, payload);
boost::asio::write(stream_, fb.data(), ec); boost::asio::write(stream_, fb.data(), ec);
if(check_fail(ec)) if(! check_ok(ec))
return; return;
} }
@ -223,15 +222,14 @@ void
stream<NextLayer>:: stream<NextLayer>::
pong(ping_data const& payload, error_code& ec) pong(ping_data const& payload, error_code& ec)
{ {
ec.assign(0, ec.category());
// Make sure the stream is open // Make sure the stream is open
if(check_fail(ec)) if(! check_open(ec))
return; return;
detail::frame_buffer fb; detail::frame_buffer fb;
write_ping<flat_static_buffer_base>( write_ping<flat_static_buffer_base>(
fb, detail::opcode::pong, payload); fb, detail::opcode::pong, payload);
boost::asio::write(stream_, fb.data(), ec); boost::asio::write(stream_, fb.data(), ec);
if(check_fail(ec)) if(! check_ok(ec))
return; return;
} }

View File

@ -53,7 +53,7 @@ class stream<NextLayer>::read_some_op
token tok_; token tok_;
close_code code_; close_code code_;
bool did_read_ = false; bool did_read_ = false;
bool cont_; bool cont_ = false;
public: public:
read_some_op(read_some_op&&) = default; read_some_op(read_some_op&&) = default;
@ -138,14 +138,19 @@ operator()(
BOOST_ASIO_CORO_REENTER(*this) BOOST_ASIO_CORO_REENTER(*this)
{ {
// Maybe suspend // Maybe suspend
do_maybe_suspend:
if(! ws_.rd_block_) if(! ws_.rd_block_)
{ {
// Acquire the read block // Acquire the read block
ws_.rd_block_ = tok_; ws_.rd_block_ = tok_;
// Make sure the stream is open // Make sure the stream is not closed
if(ws_.check_fail(ec)) if( ws_.status_ == status::closed ||
ws_.status_ == status::failed)
{
ec = boost::asio::error::operation_aborted;
goto upcall; goto upcall;
}
} }
else else
{ {
@ -164,10 +169,18 @@ operator()(
ws_.get_io_service().post(std::move(*this)); ws_.get_io_service().post(std::move(*this));
BOOST_ASSERT(ws_.rd_block_ == tok_); BOOST_ASSERT(ws_.rd_block_ == tok_);
// Make sure the stream is open // The only way to get read blocked is if
if(ws_.check_fail(ec)) // a `close_op` wrote a close frame
goto upcall; BOOST_ASSERT(ws_.wr_close_);
BOOST_ASSERT(ws_.status_ != status::open);
ec = boost::asio::error::operation_aborted;
goto upcall;
} }
// if status_ == status::closing, we want to suspend
// the read operation until the close completes,
// then finish the read with operation_aborted.
loop: loop:
BOOST_ASSERT(ws_.rd_block_ == tok_); BOOST_ASSERT(ws_.rd_block_ == tok_);
// See if we need to read a frame header. This // See if we need to read a frame header. This
@ -195,19 +208,21 @@ operator()(
ws_.rd_buf_, ws_.rd_buf_.max_size())), ws_.rd_buf_, ws_.rd_buf_.max_size())),
std::move(*this)); std::move(*this));
BOOST_ASSERT(ws_.rd_block_ == tok_); BOOST_ASSERT(ws_.rd_block_ == tok_);
if(ws_.check_fail(ec)) if(! ws_.check_ok(ec))
goto upcall; goto upcall;
ws_.rd_buf_.commit(bytes_transferred); ws_.rd_buf_.commit(bytes_transferred);
// Allow a close operation to // Allow a close operation
// drain the connection if necessary. // to acquire the read block
BOOST_ASSERT(ws_.rd_block_ == tok_); BOOST_ASSERT(ws_.rd_block_ == tok_);
ws_.rd_block_.reset(); ws_.rd_block_.reset();
if( ws_.paused_r_close_.maybe_invoke()) if( ws_.paused_r_close_.maybe_invoke())
{ {
// Suspend
BOOST_ASSERT(ws_.rd_block_); BOOST_ASSERT(ws_.rd_block_);
goto do_suspend; goto do_suspend;
} }
// Acquire read block
ws_.rd_block_ = tok_; ws_.rd_block_ = tok_;
} }
// Immediately apply the mask to the portion // Immediately apply the mask to the portion
@ -236,7 +251,7 @@ operator()(
detail::read_ping(payload, b); detail::read_ping(payload, b);
ws_.rd_buf_.consume(len); ws_.rd_buf_.consume(len);
// Ignore ping when closing // Ignore ping when closing
if(ws_.wr_close_) if(ws_.status_ == status::closing)
goto loop; goto loop;
if(ws_.ctrl_cb_) if(ws_.ctrl_cb_)
ws_.ctrl_cb_(frame_type::ping, payload); ws_.ctrl_cb_(frame_type::ping, payload);
@ -245,6 +260,15 @@ operator()(
flat_static_buffer_base>(ws_.rd_fb_, flat_static_buffer_base>(ws_.rd_fb_,
detail::opcode::pong, payload); detail::opcode::pong, payload);
} }
//BOOST_ASSERT(! ws_.paused_r_close_);
// Allow a close operation
// to acquire the read block
BOOST_ASSERT(ws_.rd_block_ == tok_);
ws_.rd_block_.reset();
ws_.paused_r_close_.maybe_invoke();
// Maybe suspend // Maybe suspend
if(! ws_.wr_block_) if(! ws_.wr_block_)
{ {
@ -268,18 +292,8 @@ operator()(
BOOST_ASSERT(ws_.wr_block_ == tok_); BOOST_ASSERT(ws_.wr_block_ == tok_);
// Make sure the stream is open // Make sure the stream is open
if(ws_.check_fail(ec)) if(! ws_.check_open(ec))
goto upcall; goto upcall;
// Ignore ping when closing
if(ws_.wr_close_)
{
ws_.wr_block_.reset();
ws_.paused_close_.maybe_invoke() ||
ws_.paused_ping_.maybe_invoke() ||
ws_.paused_wr_.maybe_invoke();
goto loop;
}
} }
// Send pong // Send pong
@ -288,13 +302,13 @@ operator()(
boost::asio::async_write(ws_.stream_, boost::asio::async_write(ws_.stream_,
ws_.rd_fb_.data(), std::move(*this)); ws_.rd_fb_.data(), std::move(*this));
BOOST_ASSERT(ws_.wr_block_ == tok_); BOOST_ASSERT(ws_.wr_block_ == tok_);
if(ws_.check_fail(ec)) if(! ws_.check_ok(ec))
goto upcall; goto upcall;
ws_.wr_block_.reset(); ws_.wr_block_.reset();
ws_.paused_close_.maybe_invoke() || ws_.paused_close_.maybe_invoke() ||
ws_.paused_ping_.maybe_invoke() || ws_.paused_ping_.maybe_invoke() ||
ws_.paused_wr_.maybe_invoke(); ws_.paused_wr_.maybe_invoke();
goto loop; goto do_maybe_suspend;
} }
// Handle pong frame // Handle pong frame
if(ws_.rd_fh_.op == detail::opcode::pong) if(ws_.rd_fh_.op == detail::opcode::pong)
@ -335,17 +349,19 @@ operator()(
if(ws_.ctrl_cb_) if(ws_.ctrl_cb_)
ws_.ctrl_cb_(frame_type::close, ws_.ctrl_cb_(frame_type::close,
ws_.cr_.reason); ws_.cr_.reason);
if(! ws_.wr_close_) // See if we are already closing
if(ws_.status_ == status::closing)
{ {
// _Start the WebSocket Closing Handshake_ // _Close the WebSocket Connection_
code_ = cr.code == close_code::none ? BOOST_ASSERT(ws_.wr_close_);
close_code::normal : code_ = close_code::none;
static_cast<close_code>(cr.code);
ev_ = error::closed; ev_ = error::closed;
goto close; goto close;
} }
// _Close the WebSocket Connection_ // _Start the WebSocket Closing Handshake_
code_ = close_code::none; code_ = cr.code == close_code::none ?
close_code::normal :
static_cast<close_code>(cr.code);
ev_ = error::closed; ev_ = error::closed;
goto close; goto close;
} }
@ -372,7 +388,7 @@ operator()(
ws_.rd_buf_.prepare(read_size( ws_.rd_buf_.prepare(read_size(
ws_.rd_buf_, ws_.rd_buf_.max_size())), ws_.rd_buf_, ws_.rd_buf_.max_size())),
std::move(*this)); std::move(*this));
if(ws_.check_fail(ec)) if(! ws_.check_ok(ec))
goto upcall; goto upcall;
ws_.rd_buf_.commit(bytes_transferred); ws_.rd_buf_.commit(bytes_transferred);
if(ws_.rd_fh_.mask) if(ws_.rd_fh_.mask)
@ -413,7 +429,7 @@ operator()(
BOOST_ASIO_CORO_YIELD BOOST_ASIO_CORO_YIELD
ws_.stream_.async_read_some(buffer_prefix( ws_.stream_.async_read_some(buffer_prefix(
clamp(ws_.rd_remain_), cb_), std::move(*this)); clamp(ws_.rd_remain_), cb_), std::move(*this));
if(ws_.check_fail(ec)) if(! ws_.check_ok(ec))
goto upcall; goto upcall;
BOOST_ASSERT(bytes_transferred > 0); BOOST_ASSERT(bytes_transferred > 0);
auto const mb = buffer_prefix( auto const mb = buffer_prefix(
@ -456,7 +472,7 @@ operator()(
ws_.rd_buf_.prepare(read_size( ws_.rd_buf_.prepare(read_size(
ws_.rd_buf_, ws_.rd_buf_.max_size())), ws_.rd_buf_, ws_.rd_buf_.max_size())),
std::move(*this)); std::move(*this));
if(ws_.check_fail(ec)) if(! ws_.check_ok(ec))
goto upcall; goto upcall;
BOOST_ASSERT(bytes_transferred > 0); BOOST_ASSERT(bytes_transferred > 0);
ws_.rd_buf_.commit(bytes_transferred); ws_.rd_buf_.commit(bytes_transferred);
@ -498,8 +514,7 @@ operator()(
zs.next_in = empty_block; zs.next_in = empty_block;
zs.avail_in = sizeof(empty_block); zs.avail_in = sizeof(empty_block);
ws_.pmd_->zi.write(zs, zlib::Flush::sync, ec); ws_.pmd_->zi.write(zs, zlib::Flush::sync, ec);
if(ws_.check_fail(ec)) BOOST_ASSERT(! ec);
goto upcall;
// VFALCO See: // VFALCO See:
// https://github.com/madler/zlib/issues/280 // https://github.com/madler/zlib/issues/280
BOOST_ASSERT(zs.total_out == 0); BOOST_ASSERT(zs.total_out == 0);
@ -521,7 +536,7 @@ operator()(
} }
ws_.pmd_->zi.write(zs, zlib::Flush::sync, ec); ws_.pmd_->zi.write(zs, zlib::Flush::sync, ec);
BOOST_ASSERT(ec != zlib::error::end_of_stream); BOOST_ASSERT(ec != zlib::error::end_of_stream);
if(ws_.check_fail(ec)) if(! ws_.check_ok(ec))
goto upcall; goto upcall;
if(ws_.rd_msg_max_ && beast::detail::sum_exceeds( if(ws_.rd_msg_max_ && beast::detail::sum_exceeds(
ws_.rd_size_, zs.total_out, ws_.rd_msg_max_)) ws_.rd_size_, zs.total_out, ws_.rd_msg_max_))
@ -560,7 +575,7 @@ operator()(
ws_.wr_block_ = tok_; ws_.wr_block_ = tok_;
// Make sure the stream is open // Make sure the stream is open
BOOST_ASSERT(ws_.open_); BOOST_ASSERT(ws_.status_ == status::open);
} }
else else
{ {
@ -579,10 +594,13 @@ operator()(
BOOST_ASSERT(ws_.wr_block_ == tok_); BOOST_ASSERT(ws_.wr_block_ == tok_);
// Make sure the stream is open // Make sure the stream is open
if(ws_.check_fail(ec)) if(! ws_.check_open(ec))
goto upcall; goto upcall;
} }
// Set the status
ws_.status_ = status::closing;
if(! ws_.wr_close_) if(! ws_.wr_close_)
{ {
ws_.wr_close_ = true; ws_.wr_close_ = true;
@ -600,9 +618,7 @@ operator()(
ws_.stream_, ws_.rd_fb_.data(), ws_.stream_, ws_.rd_fb_.data(),
std::move(*this)); std::move(*this));
BOOST_ASSERT(ws_.wr_block_ == tok_); BOOST_ASSERT(ws_.wr_block_ == tok_);
if(! ws_.check_ok(ec))
// Make sure the stream is open
if(ws_.check_fail(ec))
goto upcall; goto upcall;
} }
@ -621,11 +637,15 @@ operator()(
} }
if(! ec) if(! ec)
ec = ev_; ec = ev_;
ws_.open_ = false; if(ec && ec != error::closed)
ws_.status_ = status::failed;
else
ws_.status_ = status::closed;
ws_.close();
upcall: upcall:
BOOST_ASSERT(ws_.rd_block_ == tok_); if(ws_.rd_block_ == tok_)
ws_.rd_block_.reset(); ws_.rd_block_.reset();
ws_.paused_r_close_.maybe_invoke(); ws_.paused_r_close_.maybe_invoke();
if(ws_.wr_block_ == tok_) if(ws_.wr_block_ == tok_)
{ {
@ -755,7 +775,7 @@ operator()(
BOOST_ASIO_CORO_YIELD BOOST_ASIO_CORO_YIELD
read_some_op<buffers_type, read_op>{ read_some_op<buffers_type, read_op>{
std::move(*this), ws_, *mb}( std::move(*this), ws_, *mb}(
{}, 0, true); {}, 0, false);
if(ec) if(ec)
break; break;
b_.commit(bytes_transferred); b_.commit(bytes_transferred);
@ -956,7 +976,7 @@ read_some(
std::size_t bytes_written = 0; std::size_t bytes_written = 0;
ec.assign(0, ec.category()); ec.assign(0, ec.category());
// Make sure the stream is open // Make sure the stream is open
if(check_fail(ec)) if(! check_open(ec))
return 0; return 0;
loop: loop:
// See if we need to read a frame header. This // See if we need to read a frame header. This
@ -979,7 +999,7 @@ loop:
rd_buf_.prepare(read_size( rd_buf_.prepare(read_size(
rd_buf_, rd_buf_.max_size())), rd_buf_, rd_buf_.max_size())),
ec); ec);
if(check_fail(ec)) if(! check_ok(ec))
return bytes_written; return bytes_written;
rd_buf_.commit(bytes_transferred); rd_buf_.commit(bytes_transferred);
} }
@ -1018,7 +1038,7 @@ loop:
write_ping<flat_static_buffer_base>(fb, write_ping<flat_static_buffer_base>(fb,
detail::opcode::pong, payload); detail::opcode::pong, payload);
boost::asio::write(stream_, fb.data(), ec); boost::asio::write(stream_, fb.data(), ec);
if(check_fail(ec)) if(! check_ok(ec))
return bytes_written; return bytes_written;
goto loop; goto loop;
} }
@ -1082,7 +1102,7 @@ loop:
rd_buf_.commit(stream_.read_some( rd_buf_.commit(stream_.read_some(
rd_buf_.prepare(read_size(rd_buf_, rd_buf_.prepare(read_size(rd_buf_,
rd_buf_.max_size())), ec)); rd_buf_.max_size())), ec));
if(check_fail(ec)) if(! check_ok(ec))
return bytes_written; return bytes_written;
if(rd_fh_.mask) if(rd_fh_.mask)
detail::mask_inplace( detail::mask_inplace(
@ -1125,7 +1145,7 @@ loop:
auto const bytes_transferred = auto const bytes_transferred =
stream_.read_some(buffer_prefix( stream_.read_some(buffer_prefix(
clamp(rd_remain_), buffers), ec); clamp(rd_remain_), buffers), ec);
if(check_fail(ec)) if(! check_ok(ec))
return bytes_written; return bytes_written;
BOOST_ASSERT(bytes_transferred > 0); BOOST_ASSERT(bytes_transferred > 0);
auto const mb = buffer_prefix( auto const mb = buffer_prefix(
@ -1187,7 +1207,7 @@ loop:
rd_buf_.prepare(read_size( rd_buf_.prepare(read_size(
rd_buf_, rd_buf_.max_size())), rd_buf_, rd_buf_.max_size())),
ec); ec);
if(check_fail(ec)) if(! check_ok(ec))
return bytes_written; return bytes_written;
BOOST_ASSERT(bytes_transferred > 0); BOOST_ASSERT(bytes_transferred > 0);
rd_buf_.commit(bytes_transferred); rd_buf_.commit(bytes_transferred);
@ -1217,8 +1237,6 @@ loop:
zs.avail_in = sizeof(empty_block); zs.avail_in = sizeof(empty_block);
pmd_->zi.write(zs, zlib::Flush::sync, ec); pmd_->zi.write(zs, zlib::Flush::sync, ec);
BOOST_ASSERT(! ec); BOOST_ASSERT(! ec);
if(check_fail(ec))
return bytes_written;
// VFALCO See: // VFALCO See:
// https://github.com/madler/zlib/issues/280 // https://github.com/madler/zlib/issues/280
BOOST_ASSERT(zs.total_out == 0); BOOST_ASSERT(zs.total_out == 0);
@ -1240,7 +1258,7 @@ loop:
} }
pmd_->zi.write(zs, zlib::Flush::sync, ec); pmd_->zi.write(zs, zlib::Flush::sync, ec);
BOOST_ASSERT(ec != zlib::error::end_of_stream); BOOST_ASSERT(ec != zlib::error::end_of_stream);
if(check_fail(ec)) if(! check_ok(ec))
return bytes_written; return bytes_written;
if(rd_msg_max_ && beast::detail::sum_exceeds( if(rd_msg_max_ && beast::detail::sum_exceeds(
rd_size_, zs.total_out, rd_msg_max_)) rd_size_, zs.total_out, rd_msg_max_))

View File

@ -30,8 +30,6 @@ is_upgrade(http::header<true,
return false; return false;
if(! http::token_list{req["Upgrade"]}.exists("websocket")) if(! http::token_list{req["Upgrade"]}.exists("websocket"))
return false; return false;
if(! req.count(http::field::sec_websocket_version))
return false;
return true; return true;
} }

View File

@ -134,7 +134,7 @@ open(role_type role)
{ {
// VFALCO TODO analyze and remove dupe code in reset() // VFALCO TODO analyze and remove dupe code in reset()
role_ = role; role_ = role;
open_ = true; status_ = status::open;
rd_remain_ = 0; rd_remain_ = 0;
rd_cont_ = false; rd_cont_ = false;
rd_done_ = true; rd_done_ = true;
@ -193,8 +193,7 @@ void
stream<NextLayer>:: stream<NextLayer>::
reset() reset()
{ {
BOOST_ASSERT(! open_); BOOST_ASSERT(status_ != status::open);
open_ = false; // VFALCO is this needed?
rd_remain_ = 0; rd_remain_ = 0;
rd_cont_ = false; rd_cont_ = false;
rd_done_ = true; rd_done_ = true;
@ -593,8 +592,6 @@ build_response(http::request<Body,
return err("Missing Host"); return err("Missing Host");
if(! req.count(http::field::sec_websocket_key)) if(! req.count(http::field::sec_websocket_key))
return err("Missing Sec-WebSocket-Key"); return err("Missing Sec-WebSocket-Key");
if(! http::token_list{req[http::field::upgrade]}.exists("websocket"))
return err("Missing websocket Upgrade token");
auto const key = req[http::field::sec_websocket_key]; auto const key = req[http::field::sec_websocket_key];
if(key.size() > detail::sec_ws_key_type::max_size_n) if(key.size() > detail::sec_ws_key_type::max_size_n)
return err("Invalid Sec-WebSocket-Key"); return err("Invalid Sec-WebSocket-Key");
@ -684,6 +681,7 @@ do_fail(
error_code& ec) // set to the error, else set to ev error_code& ec) // set to the error, else set to ev
{ {
BOOST_ASSERT(ev); BOOST_ASSERT(ev);
status_ = status::closing;
if(code != close_code::none && ! wr_close_) if(code != close_code::none && ! wr_close_)
{ {
wr_close_ = true; wr_close_ = true;
@ -691,8 +689,7 @@ do_fail(
write_close< write_close<
flat_static_buffer_base>(fb, code); flat_static_buffer_base>(fb, code);
boost::asio::write(stream_, fb.data(), ec); boost::asio::write(stream_, fb.data(), ec);
open_ = ! ec; if(! check_ok(ec))
if(! open_)
return; return;
} }
using beast::websocket::teardown; using beast::websocket::teardown;
@ -703,11 +700,13 @@ do_fail(
// http://stackoverflow.com/questions/25587403/boost-asio-ssl-async-shutdown-always-finishes-with-an-error // http://stackoverflow.com/questions/25587403/boost-asio-ssl-async-shutdown-always-finishes-with-an-error
ec.assign(0, ec.category()); ec.assign(0, ec.category());
} }
open_ = ! ec; if(! ec)
if(! open_) ec = ev;
return; if(ec && ec != error::closed)
ec = ev; status_ = status::failed;
open_ = false; else
status_ = status::closed;
close();
} }
} // websocket } // websocket

View File

@ -49,7 +49,7 @@ class stream<NextLayer>::write_some_op
int how_; int how_;
bool fin_; bool fin_;
bool more_; bool more_;
bool cont_; bool cont_ = false;
public: public:
write_some_op(write_some_op&&) = default; write_some_op(write_some_op&&) = default;
@ -206,7 +206,7 @@ operator()(
ws_.wr_block_ = tok_; ws_.wr_block_ = tok_;
// Make sure the stream is open // Make sure the stream is open
if(ws_.check_fail(ec)) if(! ws_.check_open(ec))
goto upcall; goto upcall;
} }
else else
@ -227,7 +227,7 @@ operator()(
BOOST_ASSERT(ws_.wr_block_ == tok_); BOOST_ASSERT(ws_.wr_block_ == tok_);
// Make sure the stream is open // Make sure the stream is open
if(ws_.check_fail(ec)) if(! ws_.check_open(ec))
goto upcall; goto upcall;
} }
@ -242,13 +242,11 @@ operator()(
ws_.wr_fb_, fh_); ws_.wr_fb_, fh_);
ws_.wr_cont_ = ! fin_; ws_.wr_cont_ = ! fin_;
// Send frame // Send frame
BOOST_ASSERT(ws_.wr_block_ == tok_);
BOOST_ASIO_CORO_YIELD BOOST_ASIO_CORO_YIELD
boost::asio::async_write(ws_.stream_, boost::asio::async_write(ws_.stream_,
buffer_cat(ws_.wr_fb_.data(), cb_), buffer_cat(ws_.wr_fb_.data(), cb_),
std::move(*this)); std::move(*this));
BOOST_ASSERT(ws_.wr_block_ == tok_); if(! ws_.check_ok(ec))
if(ws_.check_fail(ec))
goto upcall; goto upcall;
goto upcall; goto upcall;
} }
@ -267,15 +265,13 @@ operator()(
ws_.wr_fb_, fh_); ws_.wr_fb_, fh_);
ws_.wr_cont_ = ! fin_; ws_.wr_cont_ = ! fin_;
// Send frame // Send frame
BOOST_ASSERT(ws_.wr_block_ == tok_);
BOOST_ASIO_CORO_YIELD BOOST_ASIO_CORO_YIELD
boost::asio::async_write( boost::asio::async_write(
ws_.stream_, buffer_cat( ws_.stream_, buffer_cat(
ws_.wr_fb_.data(), buffer_prefix( ws_.wr_fb_.data(), buffer_prefix(
clamp(fh_.len), cb_)), clamp(fh_.len), cb_)),
std::move(*this)); std::move(*this));
BOOST_ASSERT(ws_.wr_block_ == tok_); if(! ws_.check_ok(ec))
if(ws_.check_fail(ec))
goto upcall; goto upcall;
if(remain_ == 0) if(remain_ == 0)
break; break;
@ -317,14 +313,12 @@ operator()(
remain_ -= n; remain_ -= n;
ws_.wr_cont_ = ! fin_; ws_.wr_cont_ = ! fin_;
// Send frame header and partial payload // Send frame header and partial payload
BOOST_ASSERT(ws_.wr_block_ == tok_);
BOOST_ASIO_CORO_YIELD BOOST_ASIO_CORO_YIELD
boost::asio::async_write( boost::asio::async_write(
ws_.stream_, buffer_cat(ws_.wr_fb_.data(), ws_.stream_, buffer_cat(ws_.wr_fb_.data(),
buffer(ws_.wr_buf_.get(), n)), buffer(ws_.wr_buf_.get(), n)),
std::move(*this)); std::move(*this));
BOOST_ASSERT(ws_.wr_block_ == tok_); if(! ws_.check_ok(ec))
if(ws_.check_fail(ec))
goto upcall; goto upcall;
while(remain_ > 0) while(remain_ > 0)
{ {
@ -336,13 +330,11 @@ operator()(
ws_.wr_buf_.get(), n), key_); ws_.wr_buf_.get(), n), key_);
remain_ -= n; remain_ -= n;
// Send partial payload // Send partial payload
BOOST_ASSERT(ws_.wr_block_ == tok_);
BOOST_ASIO_CORO_YIELD BOOST_ASIO_CORO_YIELD
boost::asio::async_write(ws_.stream_, boost::asio::async_write(ws_.stream_,
buffer(ws_.wr_buf_.get(), n), buffer(ws_.wr_buf_.get(), n),
std::move(*this)); std::move(*this));
BOOST_ASSERT(ws_.wr_block_ == tok_); if(! ws_.check_ok(ec))
if(ws_.check_fail(ec))
goto upcall; goto upcall;
} }
goto upcall; goto upcall;
@ -369,14 +361,12 @@ operator()(
ws_.wr_fb_, fh_); ws_.wr_fb_, fh_);
ws_.wr_cont_ = ! fin_; ws_.wr_cont_ = ! fin_;
// Send frame // Send frame
BOOST_ASSERT(ws_.wr_block_ == tok_);
BOOST_ASIO_CORO_YIELD BOOST_ASIO_CORO_YIELD
boost::asio::async_write(ws_.stream_, boost::asio::async_write(ws_.stream_,
buffer_cat(ws_.wr_fb_.data(), buffer_cat(ws_.wr_fb_.data(),
buffer(ws_.wr_buf_.get(), n)), buffer(ws_.wr_buf_.get(), n)),
std::move(*this)); std::move(*this));
BOOST_ASSERT(ws_.wr_block_ == tok_); if(! ws_.check_ok(ec))
if(ws_.check_fail(ec))
goto upcall; goto upcall;
if(remain_ == 0) if(remain_ == 0)
break; break;
@ -408,7 +398,7 @@ operator()(
ws_.wr_buf_size_); ws_.wr_buf_size_);
more_ = detail::deflate( more_ = detail::deflate(
ws_.pmd_->zo, b, cb_, fin_, ec); ws_.pmd_->zo, b, cb_, fin_, ec);
if(ws_.check_fail(ec)) if(! ws_.check_ok(ec))
goto upcall; goto upcall;
n = buffer_size(b); n = buffer_size(b);
if(n == 0) if(n == 0)
@ -434,13 +424,11 @@ operator()(
flat_static_buffer_base>(ws_.wr_fb_, fh_); flat_static_buffer_base>(ws_.wr_fb_, fh_);
ws_.wr_cont_ = ! fin_; ws_.wr_cont_ = ! fin_;
// Send frame // Send frame
BOOST_ASSERT(ws_.wr_block_ == tok_);
BOOST_ASIO_CORO_YIELD BOOST_ASIO_CORO_YIELD
boost::asio::async_write(ws_.stream_, boost::asio::async_write(ws_.stream_,
buffer_cat(ws_.wr_fb_.data(), buffer_cat(ws_.wr_fb_.data(),
mutable_buffers_1{b}), std::move(*this)); mutable_buffers_1{b}), std::move(*this));
BOOST_ASSERT(ws_.wr_block_ == tok_); if(! ws_.check_ok(ec))
if(ws_.check_fail(ec))
goto upcall; goto upcall;
if(more_) if(more_)
{ {
@ -460,7 +448,6 @@ operator()(
} }
else else
{ {
BOOST_ASSERT(ws_.wr_block_ == tok_);
if(fh_.fin && ( if(fh_.fin && (
(ws_.role_ == role_type::client && (ws_.role_ == role_type::client &&
ws_.pmd_config_.client_no_context_takeover) || ws_.pmd_config_.client_no_context_takeover) ||
@ -524,7 +511,7 @@ write_some(bool fin,
using boost::asio::buffer_size; using boost::asio::buffer_size;
ec.assign(0, ec.category()); ec.assign(0, ec.category());
// Make sure the stream is open // Make sure the stream is open
if(check_fail(ec)) if(! check_open(ec))
return; return;
detail::frame_header fh; detail::frame_header fh;
if(! wr_cont_) if(! wr_cont_)
@ -552,8 +539,7 @@ write_some(bool fin,
wr_buf_.get(), wr_buf_size_); wr_buf_.get(), wr_buf_size_);
auto const more = detail::deflate( auto const more = detail::deflate(
pmd_->zo, b, cb, fin, ec); pmd_->zo, b, cb, fin, ec);
open_ = ! ec; if(! check_ok(ec))
if(! open_)
return; return;
auto const n = buffer_size(b); auto const n = buffer_size(b);
if(n == 0) if(n == 0)
@ -581,7 +567,7 @@ write_some(bool fin,
wr_cont_ = ! fin; wr_cont_ = ! fin;
boost::asio::write(stream_, boost::asio::write(stream_,
buffer_cat(fh_buf.data(), b), ec); buffer_cat(fh_buf.data(), b), ec);
if(check_fail(ec)) if(! check_ok(ec))
return; return;
if(! more) if(! more)
break; break;
@ -608,7 +594,7 @@ write_some(bool fin,
wr_cont_ = ! fin; wr_cont_ = ! fin;
boost::asio::write(stream_, boost::asio::write(stream_,
buffer_cat(fh_buf.data(), buffers), ec); buffer_cat(fh_buf.data(), buffers), ec);
if(check_fail(ec)) if(! check_ok(ec))
return; return;
} }
else else
@ -630,7 +616,7 @@ write_some(bool fin,
boost::asio::write(stream_, boost::asio::write(stream_,
buffer_cat(fh_buf.data(), buffer_cat(fh_buf.data(),
buffer_prefix(n, cb)), ec); buffer_prefix(n, cb)), ec);
if(check_fail(ec)) if(! check_ok(ec))
return; return;
if(remain == 0) if(remain == 0)
break; break;
@ -662,7 +648,7 @@ write_some(bool fin,
wr_cont_ = ! fin; wr_cont_ = ! fin;
boost::asio::write(stream_, boost::asio::write(stream_,
buffer_cat(fh_buf.data(), b), ec); buffer_cat(fh_buf.data(), b), ec);
if(check_fail(ec)) if(! check_ok(ec))
return; return;
} }
while(remain > 0) while(remain > 0)
@ -674,7 +660,7 @@ write_some(bool fin,
remain -= n; remain -= n;
detail::mask_inplace(b, key); detail::mask_inplace(b, key);
boost::asio::write(stream_, b, ec); boost::asio::write(stream_, b, ec);
if(check_fail(ec)) if(! check_ok(ec))
return; return;
} }
} }
@ -702,7 +688,7 @@ write_some(bool fin,
flat_static_buffer_base>(fh_buf, fh); flat_static_buffer_base>(fh_buf, fh);
boost::asio::write(stream_, boost::asio::write(stream_,
buffer_cat(fh_buf.data(), b), ec); buffer_cat(fh_buf.data(), b), ec);
if(check_fail(ec)) if(! check_ok(ec))
return; return;
if(remain == 0) if(remain == 0)
break; break;

View File

@ -132,6 +132,9 @@ class stream
struct op {}; struct op {};
using control_cb_type =
std::function<void(frame_type, string_view)>;
// tokens are used to order reads and writes // tokens are used to order reads and writes
class token class token
{ {
@ -157,8 +160,13 @@ class stream
zlib::inflate_stream zi; zlib::inflate_stream zi;
}; };
using control_cb_type = enum class status
std::function<void(frame_type, string_view)>; {
open,
closing,
closed,
failed
};
NextLayer stream_; // the wrapped stream NextLayer stream_; // the wrapped stream
close_reason cr_; // set from received close frame close_reason cr_; // set from received close frame
@ -190,8 +198,8 @@ class stream
token tok_; // used to order asynchronous ops token tok_; // used to order asynchronous ops
role_type role_ // server or client role_type role_ // server or client
= role_type::client; = role_type::client;
bool open_ // `true` if connected status status_
= false; = status::closed;
token wr_block_; // op currenly writing token wr_block_; // op currenly writing
bool wr_close_ // did we write a close frame? bool wr_close_ // did we write a close frame?
@ -366,7 +374,7 @@ public:
bool bool
is_open() const is_open() const
{ {
return open_; return status_ == status::open;
} }
/** Returns `true` if the latest message data indicates binary. /** Returns `true` if the latest message data indicates binary.
@ -3394,19 +3402,27 @@ private:
void begin_msg(); void begin_msg();
bool bool
check_fail(error_code& ec) check_open(error_code& ec)
{ {
if(! open_) if(status_ != status::open)
{ {
ec = boost::asio::error::operation_aborted; ec = boost::asio::error::operation_aborted;
return true; return false;
} }
ec.assign(0, ec.category());
return true;
}
bool
check_ok(error_code& ec)
{
if(ec) if(ec)
{ {
open_ = false; if(status_ != status::closed)
return true; status_ = status::failed;
return false;
} }
return false; return true;
} }
template<class DynamicBuffer> template<class DynamicBuffer>

View File

@ -560,6 +560,35 @@ public:
"Sec-WebSocket-Version: 13\r\n" "Sec-WebSocket-Version: 13\r\n"
"\r\n" "\r\n"
); );
// oversize key
check(error::handshake_failed,
"GET / HTTP/1.1\r\n"
"Host: localhost:80\r\n"
"Upgrade: WebSocket\r\n"
"Connection: upgrade\r\n"
"Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQdGhlIHNhbXBsZSBub25jZQ==\r\n"
"Sec-WebSocket-Version: 13\r\n"
"\r\n"
);
// bad version
check(error::handshake_failed,
"GET / HTTP/1.1\r\n"
"Host: localhost:80\r\n"
"Upgrade: WebSocket\r\n"
"Connection: upgrade\r\n"
"Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\n"
"Sec-WebSocket-Version: 12\r\n"
"\r\n"
);
// missing version
check(error::handshake_failed,
"GET / HTTP/1.1\r\n"
"Host: localhost:80\r\n"
"Upgrade: WebSocket\r\n"
"Connection: upgrade\r\n"
"Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==\r\n"
"\r\n"
);
// valid request // valid request
check({}, check({},
"GET / HTTP/1.1\r\n" "GET / HTTP/1.1\r\n"

View File

@ -39,6 +39,14 @@ public:
w.close(ws, close_code::going_away); w.close(ws, close_code::going_away);
}); });
// close with code and reason
doTest(pmd, [&](ws_type& ws)
{
w.close(ws, {
close_code::going_away,
"going away"});
});
// already closed // already closed
{ {
echo_server es{log}; echo_server es{log};
@ -125,6 +133,17 @@ public:
} }
} }
// drain masked close frame
{
echo_server es{log, kind::async_client};
stream<test::stream> ws{ios_};
ws.next_layer().connect(es.stream());
ws.set_option(pmd);
es.async_handshake();
ws.accept();
w.close(ws, {});
}
// close with incomplete read message // close with incomplete read message
doTest(pmd, [&](ws_type& ws) doTest(pmd, [&](ws_type& ws)
{ {
@ -147,8 +166,10 @@ public:
} }
void void
testCloseSuspend() testSuspend()
{ {
using boost::asio::buffer;
// suspend on ping // suspend on ping
doFailLoop([&](test::fail_counter& fc) doFailLoop([&](test::fail_counter& fc)
{ {
@ -270,10 +291,10 @@ public:
ws.async_read(b, ws.async_read(b,
[&](error_code ec, std::size_t) [&](error_code ec, std::size_t)
{ {
++count;
if(ec != error::failed) if(ec != error::failed)
BOOST_THROW_EXCEPTION( BOOST_THROW_EXCEPTION(
system_error{ec}); system_error{ec});
BEAST_EXPECT(++count == 1);
}); });
while(! ws.wr_block_) while(! ws.wr_block_)
{ {
@ -285,10 +306,10 @@ public:
ws.async_close({}, ws.async_close({},
[&](error_code ec) [&](error_code ec)
{ {
++count;
if(ec != boost::asio::error::operation_aborted) if(ec != boost::asio::error::operation_aborted)
BOOST_THROW_EXCEPTION( BOOST_THROW_EXCEPTION(
system_error{ec}); system_error{ec});
BEAST_EXPECT(++count == 2);
}); });
BEAST_EXPECT(count == 0); BEAST_EXPECT(count == 0);
ios.run(); ios.run();
@ -311,10 +332,10 @@ public:
ws.async_read(b, ws.async_read(b,
[&](error_code ec, std::size_t) [&](error_code ec, std::size_t)
{ {
++count;
if(ec != error::closed) if(ec != error::closed)
BOOST_THROW_EXCEPTION( BOOST_THROW_EXCEPTION(
system_error{ec}); system_error{ec});
BEAST_EXPECT(++count == 1);
}); });
while(! ws.wr_block_) while(! ws.wr_block_)
{ {
@ -326,16 +347,256 @@ public:
ws.async_close({}, ws.async_close({},
[&](error_code ec) [&](error_code ec)
{ {
++count;
if(ec != boost::asio::error::operation_aborted) if(ec != boost::asio::error::operation_aborted)
BOOST_THROW_EXCEPTION( BOOST_THROW_EXCEPTION(
system_error{ec}); system_error{ec});
BEAST_EXPECT(++count == 2);
}); });
BEAST_EXPECT(count == 0); BEAST_EXPECT(count == 0);
ios.run(); ios.run();
BEAST_EXPECT(count == 2); BEAST_EXPECT(count == 2);
}); });
// teardown on received close
doFailLoop([&](test::fail_counter& fc)
{
echo_server es{log};
boost::asio::io_service ios;
stream<test::stream> ws{ios, fc};
ws.next_layer().connect(es.stream());
ws.handshake("localhost", "/");
// add a close frame to the input
ws.next_layer().append(string_view{
"\x88\x00", 2});
std::size_t count = 0;
std::string const s = "Hello, world!";
ws.async_write(buffer(s),
[&](error_code ec)
{
if(ec)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 1);
});
multi_buffer b;
ws.async_read(b,
[&](error_code ec, std::size_t)
{
if(ec != boost::asio::error::operation_aborted)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 3);
});
ws.async_close({},
[&](error_code ec)
{
if(ec)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 2);
});
BEAST_EXPECT(count == 0);
ios.run();
BEAST_EXPECT(count == 3);
});
// check for deadlock
doFailLoop([&](test::fail_counter& fc)
{
echo_server es{log};
boost::asio::io_service ios;
stream<test::stream> ws{ios, fc};
ws.next_layer().connect(es.stream());
ws.handshake("localhost", "/");
// add a ping frame to the input
ws.next_layer().append(string_view{
"\x89\x00", 2});
std::size_t count = 0;
multi_buffer b;
std::string const s = "Hello, world!";
ws.async_write(buffer(s),
[&](error_code ec)
{
if(ec)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 1);
});
ws.async_read(b,
[&](error_code ec, std::size_t)
{
if(ec != boost::asio::error::operation_aborted)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 3);
});
BEAST_EXPECT(ws.rd_block_);
ws.async_close({},
[&](error_code ec)
{
if(ec)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 2);
});
BEAST_EXPECT(ws.is_open());
BEAST_EXPECT(ws.wr_block_);
BEAST_EXPECT(count == 0);
ios.run();
BEAST_EXPECT(count == 3);
});
// Four-way: close, read, write, ping
doFailLoop([&](test::fail_counter& fc)
{
echo_server es{log};
boost::asio::io_service ios;
stream<test::stream> ws{ios, fc};
ws.next_layer().connect(es.stream());
ws.handshake("localhost", "/");
std::size_t count = 0;
std::string const s = "Hello, world!";
multi_buffer b;
ws.async_close({},
[&](error_code ec)
{
if(ec)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 1);
});
ws.async_read(b,
[&](error_code ec, std::size_t)
{
if(ec != boost::asio::error::operation_aborted)
BOOST_THROW_EXCEPTION(
system_error{ec});
++count;
});
ws.async_write(buffer(s),
[&](error_code ec)
{
if(ec != boost::asio::error::operation_aborted)
BOOST_THROW_EXCEPTION(
system_error{ec});
++count;
});
ws.async_ping({},
[&](error_code ec)
{
if(ec != boost::asio::error::operation_aborted)
BOOST_THROW_EXCEPTION(
system_error{ec});
++count;
});
BEAST_EXPECT(count == 0);
ios.run();
BEAST_EXPECT(count == 4);
});
// Four-way: read, write, ping, close
doFailLoop([&](test::fail_counter& fc)
{
echo_server es{log};
boost::asio::io_service ios;
stream<test::stream> ws{ios, fc};
ws.next_layer().connect(es.stream());
ws.handshake("localhost", "/");
std::size_t count = 0;
std::string const s = "Hello, world!";
multi_buffer b;
ws.async_read(b,
[&](error_code ec, std::size_t)
{
if(ec && ec != boost::asio::error::operation_aborted)
{
BEAST_EXPECTS(ec, ec.message());
BOOST_THROW_EXCEPTION(
system_error{ec});
}
if(! ec)
BEAST_EXPECT(to_string(b.data()) == s);
++count;
});
ws.async_write(buffer(s),
[&](error_code ec)
{
if(ec)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 1);
});
ws.async_ping({},
[&](error_code ec)
{
if(ec != boost::asio::error::operation_aborted)
{
BEAST_EXPECTS(ec, ec.message());
BOOST_THROW_EXCEPTION(
system_error{ec});
}
++count;
});
ws.async_close({},
[&](error_code ec)
{
if(ec)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 2);
});
BEAST_EXPECT(count == 0);
ios.run();
BEAST_EXPECT(count == 4);
});
// Four-way: ping, read, write, close
doFailLoop([&](test::fail_counter& fc)
{
echo_server es{log};
boost::asio::io_service ios;
stream<test::stream> ws{ios, fc};
ws.next_layer().connect(es.stream());
ws.handshake("localhost", "/");
std::size_t count = 0;
std::string const s = "Hello, world!";
multi_buffer b;
ws.async_ping({},
[&](error_code ec)
{
if(ec)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 1);
});
ws.async_read(b,
[&](error_code ec, std::size_t)
{
if(ec != boost::asio::error::operation_aborted)
BOOST_THROW_EXCEPTION(
system_error{ec});
++count;
});
ws.async_write(buffer(s),
[&](error_code ec)
{
if(ec != boost::asio::error::operation_aborted)
BOOST_THROW_EXCEPTION(
system_error{ec});
++count;
});
ws.async_close({},
[&](error_code ec)
{
if(ec)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 2);
});
BEAST_EXPECT(count == 0);
ios.run();
BEAST_EXPECT(count == 4);
});
} }
void void
@ -357,7 +618,7 @@ public:
run() override run() override
{ {
testClose(); testClose();
testCloseSuspend(); testSuspend();
testContHook(); testContHook();
} }
}; };

View File

@ -7,12 +7,8 @@
// Official repository: https://github.com/boostorg/beast // Official repository: https://github.com/boostorg/beast
// //
#include <boost/beast/websocket/stream.hpp>
#include <boost/beast/websocket/detail/frame.hpp> #include <boost/beast/websocket/detail/frame.hpp>
#include <boost/beast/unit_test/suite.hpp> #include <boost/beast/unit_test/suite.hpp>
#include <boost/beast/test/yield_to.hpp>
#include <initializer_list>
#include <climits>
namespace boost { namespace boost {
namespace beast { namespace beast {
@ -21,7 +17,6 @@ namespace detail {
class frame_test class frame_test
: public beast::unit_test::suite : public beast::unit_test::suite
, public test::enable_yield_to
{ {
public: public:
void testCloseCodes() void testCloseCodes()
@ -47,7 +42,7 @@ public:
test_fh() test_fh()
{ {
op = detail::opcode::text; op = detail::opcode::text;
fin = false; fin = true;
mask = false; mask = false;
rsv1 = false; rsv1 = false;
rsv2 = false; rsv2 = false;
@ -57,8 +52,20 @@ public:
} }
}; };
void
testWriteFrame()
{
test_fh fh;
fh.rsv2 = true;
fh.rsv3 = true;
fh.len = 65536;
frame_buffer fb;
write(fb, fh);
}
void run() override void run() override
{ {
testWriteFrame();
testCloseCodes(); testCloseCodes();
} }
}; };

View File

@ -214,10 +214,265 @@ public:
); );
} }
// Compression Extensions for WebSocket
//
// https://tools.ietf.org/html/rfc7692
//
void
testExtRead()
{
detail::pmd_offer po;
auto const accept =
[&](string_view s)
{
http::fields f;
f.set(http::field::sec_websocket_extensions, s);
po = detail::pmd_offer();
detail::pmd_read(po, f);
BEAST_EXPECT(po.accept);
};
auto const reject =
[&](string_view s)
{
http::fields f;
f.set(http::field::sec_websocket_extensions, s);
po = detail::pmd_offer();
detail::pmd_read(po, f);
BEAST_EXPECT(! po.accept);
};
// duplicate parameters
reject("permessage-deflate; server_max_window_bits=8; server_max_window_bits=8");
// missing value
reject("permessage-deflate; server_max_window_bits");
reject("permessage-deflate; server_max_window_bits=");
// invalid value
reject("permessage-deflate; server_max_window_bits=-1");
reject("permessage-deflate; server_max_window_bits=7");
reject("permessage-deflate; server_max_window_bits=16");
reject("permessage-deflate; server_max_window_bits=999999999999999999999999");
reject("permessage-deflate; server_max_window_bits=9a");
// duplicate parameters
reject("permessage-deflate; client_max_window_bits=8; client_max_window_bits=8");
// optional value excluded
accept("permessage-deflate; client_max_window_bits");
BEAST_EXPECT(po.client_max_window_bits == -1);
accept("permessage-deflate; client_max_window_bits=");
BEAST_EXPECT(po.client_max_window_bits == -1);
// invalid value
reject("permessage-deflate; client_max_window_bits=-1");
reject("permessage-deflate; client_max_window_bits=7");
reject("permessage-deflate; client_max_window_bits=16");
reject("permessage-deflate; client_max_window_bits=999999999999999999999999");
// duplicate parameters
reject("permessage-deflate; server_no_context_takeover; server_no_context_takeover");
// valueless parameter
accept("permessage-deflate; server_no_context_takeover");
BEAST_EXPECT(po.server_no_context_takeover);
accept("permessage-deflate; server_no_context_takeover=");
BEAST_EXPECT(po.server_no_context_takeover);
// disallowed value
reject("permessage-deflate; server_no_context_takeover=-1");
reject("permessage-deflate; server_no_context_takeover=x");
reject("permessage-deflate; server_no_context_takeover=\"yz\"");
reject("permessage-deflate; server_no_context_takeover=999999999999999999999999");
// duplicate parameters
reject("permessage-deflate; client_no_context_takeover; client_no_context_takeover");
// valueless parameter
accept("permessage-deflate; client_no_context_takeover");
BEAST_EXPECT(po.client_no_context_takeover);
accept("permessage-deflate; client_no_context_takeover=");
BEAST_EXPECT(po.client_no_context_takeover);
// disallowed value
reject("permessage-deflate; client_no_context_takeover=-1");
reject("permessage-deflate; client_no_context_takeover=x");
reject("permessage-deflate; client_no_context_takeover=\"yz\"");
reject("permessage-deflate; client_no_context_takeover=999999999999999999999999");
// unknown extension parameter
reject("permessage-deflate; unknown");
reject("permessage-deflate; unknown=");
reject("permessage-deflate; unknown=1");
reject("permessage-deflate; unknown=x");
reject("permessage-deflate; unknown=\"xy\"");
}
void
testExtWrite()
{
detail::pmd_offer po;
auto const check =
[&](string_view match)
{
http::fields f;
detail::pmd_write(f, po);
BEAST_EXPECT(
f[http::field::sec_websocket_extensions]
== match);
};
po.accept = true;
po.server_max_window_bits = 0;
po.client_max_window_bits = 0;
po.server_no_context_takeover = false;
po.client_no_context_takeover = false;
check("permessage-deflate");
po.server_max_window_bits = 10;
check("permessage-deflate; server_max_window_bits=10");
po.server_max_window_bits = -1;
check("permessage-deflate; server_max_window_bits");
po.server_max_window_bits = 0;
po.client_max_window_bits = 10;
check("permessage-deflate; client_max_window_bits=10");
po.client_max_window_bits = -1;
check("permessage-deflate; client_max_window_bits");
po.client_max_window_bits = 0;
po.server_no_context_takeover = true;
check("permessage-deflate; server_no_context_takeover");
po.server_no_context_takeover = false;
po.client_no_context_takeover = true;
check("permessage-deflate; client_no_context_takeover");
}
void
testExtNegotiate()
{
permessage_deflate pmd;
auto const reject =
[&](
string_view offer)
{
detail::pmd_offer po;
{
http::fields f;
f.set(http::field::sec_websocket_extensions, offer);
detail::pmd_read(po, f);
}
http::fields f;
detail::pmd_offer config;
detail::pmd_negotiate(f, config, po, pmd);
BEAST_EXPECT(! config.accept);
};
auto const accept =
[&](
string_view offer,
string_view result)
{
detail::pmd_offer po;
{
http::fields f;
f.set(http::field::sec_websocket_extensions, offer);
detail::pmd_read(po, f);
}
http::fields f;
detail::pmd_offer config;
detail::pmd_negotiate(f, config, po, pmd);
auto const got =
f[http::field::sec_websocket_extensions];
BEAST_EXPECTS(got == result, got);
{
detail::pmd_offer poc;
detail::pmd_read(poc, f);
detail::pmd_normalize(poc);
BEAST_EXPECT(poc.accept);
}
BEAST_EXPECT(config.server_max_window_bits != 0);
BEAST_EXPECT(config.client_max_window_bits != 0);
};
pmd.server_enable = true;
pmd.server_max_window_bits = 15;
pmd.client_max_window_bits = 15;
pmd.server_no_context_takeover = false;
pmd.client_no_context_takeover = false;
// default
accept(
"permessage-deflate",
"permessage-deflate");
// non-default server_max_window_bits
accept(
"permessage-deflate; server_max_window_bits=14",
"permessage-deflate; server_max_window_bits=14");
// explicit default server_max_window_bits
accept(
"permessage-deflate; server_max_window_bits=15",
"permessage-deflate");
// minimum window size of 8 bits (a zlib bug)
accept(
"permessage-deflate; server_max_window_bits=8",
"permessage-deflate; server_max_window_bits=9");
// non-default server_max_window_bits setting
pmd.server_max_window_bits = 10;
accept(
"permessage-deflate",
"permessage-deflate; server_max_window_bits=10");
// clamped server_max_window_bits setting #1
pmd.server_max_window_bits = 10;
accept(
"permessage-deflate; server_max_window_bits=14",
"permessage-deflate; server_max_window_bits=10");
// clamped server_max_window_bits setting #2
pmd.server_max_window_bits=8;
accept(
"permessage-deflate; server_max_window_bits=14",
"permessage-deflate; server_max_window_bits=9");
pmd.server_max_window_bits = 15;
// present with no value
accept(
"permessage-deflate; client_max_window_bits",
"permessage-deflate");
// present with no value, non-default setting
pmd.client_max_window_bits = 10;
accept(
"permessage-deflate; client_max_window_bits",
"permessage-deflate; client_max_window_bits=10");
// absent, non-default setting
pmd.client_max_window_bits = 10;
reject(
"permessage-deflate");
}
void void
run() override run() override
{ {
testHandshake(); testHandshake();
testExtRead();
testExtWrite();
testExtNegotiate();
} }
}; };

View File

@ -89,40 +89,10 @@ public:
{ {
doTestPing(AsyncClient{yield}); doTestPing(AsyncClient{yield});
}); });
// suspend on write
{
echo_server es{log};
error_code ec;
boost::asio::io_service ios;
stream<test::stream> ws{ios};
ws.next_layer().connect(es.stream());
ws.handshake("localhost", "/", ec);
BEAST_EXPECTS(! ec, ec.message());
std::size_t count = 0;
ws.async_write(sbuf("*"),
[&](error_code ec)
{
++count;
BEAST_EXPECTS(! ec, ec.message());
});
BEAST_EXPECT(ws.wr_block_);
ws.async_ping("",
[&](error_code ec)
{
++count;
BEAST_EXPECTS(
ec == boost::asio::error::operation_aborted,
ec.message());
});
ws.async_close({}, [&](error_code){});
ios.run();
BEAST_EXPECT(count == 2);
}
} }
void void
testPingSuspend() testSuspend()
{ {
// suspend on write // suspend on write
doFailLoop([&](test::fail_counter& fc) doFailLoop([&](test::fail_counter& fc)
@ -352,6 +322,45 @@ public:
BEAST_EXPECT(count == 2); BEAST_EXPECT(count == 2);
}); });
// don't ping on close
doFailLoop([&](test::fail_counter& fc)
{
echo_server es{log};
error_code ec;
boost::asio::io_service ios;
stream<test::stream> ws{ios, fc};
ws.next_layer().connect(es.stream());
ws.handshake("localhost", "/");
std::size_t count = 0;
ws.async_write(sbuf("*"),
[&](error_code ec)
{
++count;
if(ec)
BOOST_THROW_EXCEPTION(
system_error{ec});
});
BEAST_EXPECT(ws.wr_block_);
ws.async_ping("",
[&](error_code ec)
{
++count;
if(ec != boost::asio::error::operation_aborted)
BOOST_THROW_EXCEPTION(
system_error{ec});
});
ws.async_close({},
[&](error_code)
{
++count;
if(ec)
BOOST_THROW_EXCEPTION(
system_error{ec});
});
ios.run();
BEAST_EXPECT(count == 3);
});
{ {
echo_server es{log, kind::async}; echo_server es{log, kind::async};
boost::asio::io_service ios; boost::asio::io_service ios;
@ -437,7 +446,7 @@ public:
run() override run() override
{ {
testPing(); testPing();
testPingSuspend(); testSuspend();
testContHook(); testContHook();
} }
}; };

View File

@ -12,6 +12,8 @@
#include "test.hpp" #include "test.hpp"
#include <boost/asio/write.hpp>
namespace boost { namespace boost {
namespace beast { namespace beast {
namespace websocket { namespace websocket {
@ -104,7 +106,7 @@ public:
}); });
// two part message // two part message
// this triggers "fill the read buffer first" // triggers "fill the read buffer first"
doTest(pmd, [&](ws_type& ws) doTest(pmd, [&](ws_type& ws)
{ {
w.write_raw(ws, sbuf( w.write_raw(ws, sbuf(
@ -202,6 +204,58 @@ public:
BEAST_EXPECT(to_string(b.data()) == "Hello, World!"); BEAST_EXPECT(to_string(b.data()) == "Hello, World!");
}); });
// masked message, big
doStreamLoop([&](test::stream& ts)
{
echo_server es{log, kind::async_client};
ws_type ws{ts};
ws.next_layer().connect(es.stream());
ws.set_option(pmd);
es.async_handshake();
try
{
w.accept(ws);
std::string const s(2000, '*');
ws.auto_fragment(false);
ws.binary(false);
w.write(ws, buffer(s));
multi_buffer b;
w.read(ws, b);
BEAST_EXPECT(ws.got_text());
BEAST_EXPECT(to_string(b.data()) == s);
ws.next_layer().close();
}
catch(...)
{
ts.close();
throw;
}
});
// close
doFailLoop([&](test::fail_counter& fc)
{
echo_server es{log, kind::async};
boost::asio::io_service ios;
stream<test::stream> ws{ios, fc};
ws.next_layer().connect(es.stream());
ws.handshake("localhost", "/");
// Cause close to be received
es.async_close();
std::size_t count = 0;
multi_buffer b;
ws.async_read(b,
[&](error_code ec, std::size_t)
{
++count;
if(ec != error::closed)
BOOST_THROW_EXCEPTION(
system_error{ec});
});
ios.run();
BEAST_EXPECT(count == 1);
});
// already closed // already closed
doTest(pmd, [&](ws_type& ws) doTest(pmd, [&](ws_type& ws)
{ {
@ -249,7 +303,7 @@ public:
doReadTest(w, ws, close_code::protocol_error); doReadTest(w, ws, close_code::protocol_error);
}); });
// receive bad close // bad close
doTest(pmd, [&](ws_type& ws) doTest(pmd, [&](ws_type& ws)
{ {
put(ws.next_layer().buffer(), cbuf( put(ws.next_layer().buffer(), cbuf(
@ -257,15 +311,6 @@ public:
doFailTest(w, ws, error::failed); doFailTest(w, ws, error::failed);
}); });
// expected cont
doTest(pmd, [&](ws_type& ws)
{
w.write_some(ws, false, boost::asio::null_buffers{});
w.write_raw(ws, cbuf(
0x81, 0x80, 0xff, 0xff, 0xff, 0xff));
doReadTest(w, ws, close_code::protocol_error);
});
// message size above 2^64 // message size above 2^64
doTest(pmd, [&](ws_type& ws) doTest(pmd, [&](ws_type& ws)
{ {
@ -284,14 +329,6 @@ public:
doFailTest(w, ws, error::failed); doFailTest(w, ws, error::failed);
}); });
// unexpected cont
doTest(pmd, [&](ws_type& ws)
{
w.write_raw(ws, cbuf(
0x80, 0x80, 0xff, 0xff, 0xff, 0xff));
doReadTest(w, ws, close_code::protocol_error);
});
// bad utf8 // bad utf8
doTest(pmd, [&](ws_type& ws) doTest(pmd, [&](ws_type& ws)
{ {
@ -313,10 +350,23 @@ public:
doTest(pmd, [&](ws_type& ws) doTest(pmd, [&](ws_type& ws)
{ {
std::string const s = std::string const s =
random_string() + "\x81\x7e\x0f\xa1" +
"Hello, world!" "\xc0"; std::string(4000, '*') + "\xc0";
w.write(ws, buffer(s)); ws.next_layer().append(s);
doReadTest(w, ws, close_code::bad_payload); multi_buffer b;
try
{
do
{
b.commit(w.read_some(ws, b.prepare(4000)));
}
while(! ws.is_message_done());
}
catch(system_error const& se)
{
if(se.code() != error::failed)
throw;
}
}); });
// close frames // close frames
@ -441,6 +491,34 @@ public:
BEAST_EXPECT(to_string(b.data()) == s); BEAST_EXPECT(to_string(b.data()) == s);
}); });
// masked message
doStreamLoop([&](test::stream& ts)
{
echo_server es{log, kind::async_client};
ws_type ws{ts};
ws.next_layer().connect(es.stream());
ws.set_option(pmd);
es.async_handshake();
try
{
w.accept(ws);
std::string const s = "Hello, world!";
ws.auto_fragment(false);
ws.binary(false);
w.write(ws, buffer(s));
multi_buffer b;
w.read(ws, b);
BEAST_EXPECT(ws.got_text());
BEAST_EXPECT(to_string(b.data()) == s);
ws.next_layer().close();
}
catch(...)
{
ts.close();
throw;
}
});
// empty message // empty message
doTest(pmd, [&](ws_type& ws) doTest(pmd, [&](ws_type& ws)
{ {
@ -571,11 +649,77 @@ public:
} }
void void
testReadSuspend() testSuspend()
{ {
using boost::asio::buffer; using boost::asio::buffer;
#if 1
// suspend on read block
doFailLoop([&](test::fail_counter& fc)
{
echo_server es{log};
boost::asio::io_service ios;
stream<test::stream> ws{ios, fc};
ws.next_layer().connect(es.stream());
ws.handshake("localhost", "/");
std::size_t count = 0;
ws.async_close({},
[&](error_code ec)
{
if(ec)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 1);
});
while(! ws.rd_block_)
ios.run_one();
multi_buffer b;
ws.async_read(b,
[&](error_code ec, std::size_t)
{
if(ec != boost::asio::error::operation_aborted)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 2);
});
ios.run();
BEAST_EXPECT(count == 2);
});
#endif
// suspend on write // suspend on release read block
doFailLoop([&](test::fail_counter& fc)
{
//log << "fc.count()==" << fc.count() << std::endl;
echo_server es{log};
boost::asio::io_service ios;
stream<test::stream> ws{ios, fc};
ws.next_layer().connect(es.stream());
ws.handshake("localhost", "/");
std::size_t count = 0;
multi_buffer b;
ws.async_read(b,
[&](error_code ec, std::size_t)
{
if(ec != boost::asio::error::operation_aborted)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 2);
});
BOOST_ASSERT(ws.rd_block_);
ws.async_close({},
[&](error_code ec)
{
if(ec)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 1);
});
ios.run();
BEAST_EXPECT(count == 2);
});
#if 1
// suspend on write pong
doFailLoop([&](test::fail_counter& fc) doFailLoop([&](test::fail_counter& fc)
{ {
echo_server es{log}; echo_server es{log};
@ -592,25 +736,238 @@ public:
ws.async_read(b, ws.async_read(b,
[&](error_code ec, std::size_t) [&](error_code ec, std::size_t)
{ {
++count;
if(ec) if(ec)
BOOST_THROW_EXCEPTION( BOOST_THROW_EXCEPTION(
system_error{ec}); system_error{ec});
BEAST_EXPECT(to_string(b.data()) == s); BEAST_EXPECT(to_string(b.data()) == s);
++count;
}); });
BEAST_EXPECT(ws.rd_block_); BEAST_EXPECT(ws.rd_block_);
ws.async_write(buffer(s), ws.async_write(buffer(s),
[&](error_code ec) [&](error_code ec)
{ {
++count;
if(ec) if(ec)
BOOST_THROW_EXCEPTION( BOOST_THROW_EXCEPTION(
system_error{ec}); system_error{ec});
++count;
}); });
BEAST_EXPECT(ws.wr_block_); BEAST_EXPECT(ws.wr_block_);
ios.run(); ios.run();
BEAST_EXPECT(count == 2); BEAST_EXPECT(count == 2);
}); });
// Ignore ping when closing
doFailLoop([&](test::fail_counter& fc)
{
echo_server es{log};
boost::asio::io_service ios;
stream<test::stream> ws{ios, fc};
ws.next_layer().connect(es.stream());
ws.handshake("localhost", "/");
std::size_t count = 0;
// insert fragmented message with
// a ping in between the frames.
ws.next_layer().append(string_view(
"\x01\x01*"
"\x89\x00"
"\x80\x01*", 8));
multi_buffer b;
ws.async_read(b,
[&](error_code ec, std::size_t)
{
if(ec)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(to_string(b.data()) == "**");
BEAST_EXPECT(++count == 1);
b.consume(b.size());
ws.async_read(b,
[&](error_code ec, std::size_t)
{
if(ec != boost::asio::error::operation_aborted)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 3);
});
});
BEAST_EXPECT(ws.rd_block_);
ws.async_close({},
[&](error_code ec)
{
if(ec)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 2);
});
BEAST_EXPECT(ws.wr_block_);
ios.run();
BEAST_EXPECT(count == 3);
});
// See if we are already closing
doFailLoop([&](test::fail_counter& fc)
{
echo_server es{log};
boost::asio::io_service ios;
stream<test::stream> ws{ios, fc};
ws.next_layer().connect(es.stream());
ws.handshake("localhost", "/");
std::size_t count = 0;
// insert fragmented message with
// a close in between the frames.
ws.next_layer().append(string_view(
"\x01\x01*"
"\x88\x00"
"\x80\x01*", 8));
multi_buffer b;
ws.async_read(b,
[&](error_code ec, std::size_t)
{
if(ec != boost::asio::error::operation_aborted)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 2);
});
BEAST_EXPECT(ws.rd_block_);
ws.async_close({},
[&](error_code ec)
{
if(ec)
BOOST_THROW_EXCEPTION(
system_error{ec});
BEAST_EXPECT(++count == 1);
});
BEAST_EXPECT(ws.wr_block_);
ios.run();
BEAST_EXPECT(count == 2);
});
#endif
}
void
testParseFrame()
{
auto const bad =
[&](string_view s)
{
echo_server es{log};
boost::asio::io_service ios;
stream<test::stream> ws{ios};
ws.next_layer().connect(es.stream());
ws.handshake("localhost", "/");
ws.next_layer().append(s);
error_code ec;
multi_buffer b;
ws.read(b, ec);
BEAST_EXPECT(ec);
};
// chopped frame header
{
echo_server es{log};
boost::asio::io_service ios;
stream<test::stream> ws{ios};
ws.next_layer().connect(es.stream());
ws.handshake("localhost", "/");
ws.next_layer().append(
"\x81\x7e\x01");
std::size_t count = 0;
std::string const s(257, '*');
error_code ec;
multi_buffer b;
ws.async_read(b,
[&](error_code ec, std::size_t)
{
++count;
BEAST_EXPECTS(! ec, ec.message());
BEAST_EXPECT(to_string(b.data()) == s);
});
ios.run_one();
es.stream().write_some(
boost::asio::buffer("\x01" + s));
ios.run();
BEAST_EXPECT(count == 1);
}
// new data frame when continuation expected
bad("\x01\x01*" "\x81\x01*");
// reserved bits not cleared
bad("\xb1\x01*");
bad("\xc1\x01*");
bad("\xd1\x01*");
// continuation without an active message
bad("\x80\x01*");
// reserved bits not cleared (cont)
bad("\x01\x01*" "\xb0\x01*");
bad("\x01\x01*" "\xc0\x01*");
bad("\x01\x01*" "\xd0\x01*");
// reserved opcode
bad("\x83\x01*");
// fragmented control message
bad("\x09\x01*");
// invalid length for control message
bad("\x89\x7e\x01\x01");
// reserved bits not cleared (control)
bad("\xb9\x01*");
bad("\xc9\x01*");
bad("\xd9\x01*");
// unmasked frame from client
{
echo_server es{log, kind::async_client};
boost::asio::io_service ios;
stream<test::stream> ws{ios};
ws.next_layer().connect(es.stream());
es.async_handshake();
ws.accept();
ws.next_layer().append(
"\x81\x01*");
error_code ec;
multi_buffer b;
ws.read(b, ec);
BEAST_EXPECT(ec);
}
// masked frame from server
bad("\x81\x80\xff\xff\xff\xff");
// chopped control frame payload
{
echo_server es{log};
boost::asio::io_service ios;
stream<test::stream> ws{ios};
ws.next_layer().connect(es.stream());
ws.handshake("localhost", "/");
ws.next_layer().append(
"\x89\x02*");
std::size_t count = 0;
error_code ec;
multi_buffer b;
ws.async_read(b,
[&](error_code ec, std::size_t)
{
++count;
BEAST_EXPECTS(! ec, ec.message());
BEAST_EXPECT(to_string(b.data()) == "**");
});
ios.run_one();
es.stream().write_some(
boost::asio::buffer(
"*" "\x81\x02**"));
ios.run();
BEAST_EXPECT(count == 1);
}
// length not canonical
bad(string_view("\x81\x7e\x00\x7d", 4));
bad(string_view("\x81\x7f\x00\x00\x00\x00\x00\x00\xff\xff", 10));
} }
void void
@ -631,6 +988,7 @@ public:
buf, sizeof(buf)}}; buf, sizeof(buf)}};
using boost::asio::asio_handler_is_continuation; using boost::asio::asio_handler_is_continuation;
asio_handler_is_continuation(&op); asio_handler_is_continuation(&op);
pass();
} }
{ {
struct handler struct handler
@ -645,6 +1003,7 @@ public:
handler{}, ws, b, 32, true}; handler{}, ws, b, 32, true};
using boost::asio::asio_handler_is_continuation; using boost::asio::asio_handler_is_continuation;
asio_handler_is_continuation(&op); asio_handler_is_continuation(&op);
pass();
} }
} }
@ -652,7 +1011,8 @@ public:
run() override run() override
{ {
testRead(); testRead();
testReadSuspend(); testSuspend();
testParseFrame();
testContHook(); testContHook();
} }
}; };

View File

@ -34,8 +34,6 @@ public:
req.insert("Connection", "upgrade"); req.insert("Connection", "upgrade");
BEAST_EXPECT(! is_upgrade(req)); BEAST_EXPECT(! is_upgrade(req));
req.insert("Upgrade", "websocket"); req.insert("Upgrade", "websocket");
BEAST_EXPECT(! is_upgrade(req));
req.insert("Sec-WebSocket-Version", "13");
BEAST_EXPECT(is_upgrade(req)); BEAST_EXPECT(is_upgrade(req));
} }

View File

@ -22,7 +22,7 @@ public:
void void
testOptions() testOptions()
{ {
stream<test::stream> ws(ios_); stream<test::stream> ws{ios_};
ws.auto_fragment(true); ws.auto_fragment(true);
ws.write_buffer_size(2048); ws.write_buffer_size(2048);
ws.binary(false); ws.binary(false);
@ -36,48 +36,71 @@ public:
{ {
pass(); pass();
} }
auto const bad =
[&](permessage_deflate const& pmd)
{
stream<test::stream> ws{ios_};
try
{
ws.set_option(pmd);
fail("", __FILE__, __LINE__);
}
catch(std::exception const&)
{
pass();
}
};
{
permessage_deflate pmd;
pmd.server_max_window_bits = 16;
bad(pmd);
}
{
permessage_deflate pmd;
pmd.server_max_window_bits = 8;
bad(pmd);
}
{
permessage_deflate pmd;
pmd.client_max_window_bits = 16;
bad(pmd);
}
{
permessage_deflate pmd;
pmd.client_max_window_bits = 8;
bad(pmd);
}
{
permessage_deflate pmd;
pmd.compLevel = -1;
bad(pmd);
}
{
permessage_deflate pmd;
pmd.compLevel = 10;
bad(pmd);
}
{
permessage_deflate pmd;
pmd.memLevel = 0;
bad(pmd);
}
{
permessage_deflate pmd;
pmd.memLevel = 10;
bad(pmd);
}
} }
//--------------------------------------------------------------------------
template<class Wrap>
void
doTestStream(Wrap const& w,
permessage_deflate const& pmd)
{
using boost::asio::buffer;
// send pong
doTest(pmd, [&](ws_type& ws)
{
w.pong(ws, "");
});
// send auto fragmented message
doTest(pmd, [&](ws_type& ws)
{
ws.auto_fragment(true);
ws.write_buffer_size(8);
w.write(ws, sbuf("Now is the time for all good men"));
multi_buffer b;
w.read(ws, b);
BEAST_EXPECT(to_string(b.data()) == "Now is the time for all good men");
});
// send message with write buffer limit
doTest(pmd, [&](ws_type& ws)
{
std::string s(2000, '*');
ws.write_buffer_size(1200);
w.write(ws, buffer(s.data(), s.size()));
multi_buffer b;
w.read(ws, b);
BEAST_EXPECT(to_string(b.data()) == s);
});
}
//--------------------------------------------------------------------------
void void
run() override run() override
{ {
@ -103,42 +126,6 @@ public:
sizeof(websocket::stream<test::stream&>) << std::endl; sizeof(websocket::stream<test::stream&>) << std::endl;
testOptions(); testOptions();
#if 0
auto const testStream =
[this](permessage_deflate const& pmd)
{
doTestStream(SyncClient{}, pmd);
yield_to(
[&](yield_context yield)
{
doTestStream(AsyncClient{yield}, pmd);
});
};
permessage_deflate pmd;
pmd.client_enable = false;
pmd.server_enable = false;
testStream(pmd);
pmd.client_enable = true;
pmd.server_enable = true;
pmd.client_max_window_bits = 10;
pmd.client_no_context_takeover = false;
pmd.compLevel = 1;
pmd.memLevel = 1;
testStream(pmd);
pmd.client_enable = true;
pmd.server_enable = true;
pmd.client_max_window_bits = 10;
pmd.client_no_context_takeover = true;
pmd.compLevel = 1;
pmd.memLevel = 1;
testStream(pmd);
#endif
} }
}; };

View File

@ -264,7 +264,7 @@ public:
Test const& f, std::size_t limit = 200) Test const& f, std::size_t limit = 200)
{ {
std::size_t n; std::size_t n;
for(n = 0; n <= limit; ++n) for(n = 0; n < limit; ++n)
{ {
test::fail_counter fc{n}; test::fail_counter fc{n};
try try
@ -288,7 +288,7 @@ public:
{ {
// This number has to be high for the // This number has to be high for the
// test that writes the large buffer. // test that writes the large buffer.
static std::size_t constexpr limit = 1000; static std::size_t constexpr limit = 200;
doFailLoop( doFailLoop(
[&](test::fail_counter& fc) [&](test::fail_counter& fc)
@ -308,12 +308,12 @@ public:
{ {
// This number has to be high for the // This number has to be high for the
// test that writes the large buffer. // test that writes the large buffer.
static std::size_t constexpr limit = 1000; static std::size_t constexpr limit = 200;
for(int i = 0; i < 2; ++i) for(int i = 0; i < 2; ++i)
{ {
std::size_t n; std::size_t n;
for(n = 0; n <= limit; ++n) for(n = 0; n < limit; ++n)
{ {
test::fail_counter fc{n}; test::fail_counter fc{n};
test::stream ts{ios_, fc}; test::stream ts{ios_, fc};

View File

@ -73,6 +73,20 @@ public:
BEAST_EXPECT(b.size() == 0); BEAST_EXPECT(b.size() == 0);
}); });
// fragmented message
doTest(pmd, [&](ws_type& ws)
{
ws.auto_fragment(false);
ws.binary(false);
std::string const s = "Hello, world!";
w.write_some(ws, false, buffer(s.data(), 5));
w.write_some(ws, true, buffer(s.data() + 5, s.size() - 5));
multi_buffer b;
w.read(ws, b);
BEAST_EXPECT(ws.got_text());
BEAST_EXPECT(to_string(b.data()) == s);
});
// continuation // continuation
doTest(pmd, [&](ws_type& ws) doTest(pmd, [&](ws_type& ws)
{ {

View File

@ -114,6 +114,7 @@ struct fail_error_code : error_code
class fail_counter class fail_counter
{ {
std::size_t n_; std::size_t n_;
std::size_t i_ = 0;
error_code ec_; error_code ec_;
public: public:
@ -131,13 +132,20 @@ public:
{ {
} }
/// Returns the fail index
std::size_t
count() const
{
return n_;
}
/// Throw an exception on the Nth failure /// Throw an exception on the Nth failure
void void
fail() fail()
{ {
if(n_ > 0) if(i_ < n_)
--n_; ++i_;
if(! n_) if(i_ == n_)
BOOST_THROW_EXCEPTION(system_error{ec_}); BOOST_THROW_EXCEPTION(system_error{ec_});
} }
@ -145,9 +153,9 @@ public:
bool bool
fail(error_code& ec) fail(error_code& ec)
{ {
if(n_ > 0) if(i_ < n_)
--n_; ++i_;
if(! n_) if(i_ == n_)
{ {
ec = ec_; ec = ec_;
return true; return true;

View File

@ -78,6 +78,11 @@ class stream
std::size_t write_max = std::size_t write_max =
(std::numeric_limits<std::size_t>::max)(); (std::numeric_limits<std::size_t>::max)();
~state()
{
BOOST_ASSERT(! op);
}
explicit explicit
state( state(
boost::asio::io_service& ios_, boost::asio::io_service& ios_,
@ -87,11 +92,6 @@ class stream
{ {
} }
~state()
{
BOOST_ASSERT(! op);
}
void void
on_write() on_write()
{ {
@ -119,6 +119,10 @@ public:
/// Destructor /// Destructor
~stream() ~stream()
{ {
{
std::unique_lock<std::mutex> lock{in_->m};
in_->op.reset();
}
auto out = out_.lock(); auto out = out_.lock();
if(out) if(out)
{ {
@ -612,16 +616,17 @@ teardown(
stream& s, stream& s,
boost::system::error_code& ec) boost::system::error_code& ec)
{ {
if(s.in_->fc) if( s.in_->fc &&
{ s.in_->fc->fail(ec))
if(s.in_->fc->fail(ec)) return;
return;
} s.close();
if( s.in_->fc &&
s.in_->fc->fail(ec))
ec = boost::asio::error::eof;
else else
{
s.close();
ec.assign(0, ec.category()); ec.assign(0, ec.category());
}
} }
template<class TeardownHandler> template<class TeardownHandler>
@ -633,10 +638,17 @@ async_teardown(
TeardownHandler&& handler) TeardownHandler&& handler)
{ {
error_code ec; error_code ec;
if(s.in_->fc && s.in_->fc->fail(ec)) if( s.in_->fc &&
s.in_->fc->fail(ec))
return s.get_io_service().post( return s.get_io_service().post(
bind_handler(std::move(handler), ec)); bind_handler(std::move(handler), ec));
s.close(); s.close();
if( s.in_->fc &&
s.in_->fc->fail(ec))
ec = boost::asio::error::eof;
else
ec.assign(0, ec.category());
s.get_io_service().post( s.get_io_service().post(
bind_handler(std::move(handler), ec)); bind_handler(std::move(handler), ec));
} }