Files
boost_beast/include/beast/websocket/impl/write_frame_op.ipp
Vinnie Falco 039244cda4 WebSocket ping, fixes, coverage:
* Improve test coverage
* tests for invokable in composed ops

* Update documentation
* Add License badge to README
* Target Windows 7 SDK and later
* Make role_type private
* Remove extra unused masking functions
* Allow stream reuse / reconnect after failure
* Restructure logic of composed operations
* Allow 0 for read_message_max meaning no limit
* Respect keep alive when building HTTP responses
* Check version in upgrade request
* Response with 426 status on unsupported WebSocket version
* Remove unnecessary Sec-WebSocket-Key in HTTP responses
* Rename to mask_buffer_size

* Remove maybe_throw
* Add ping, async_ping, async_on_pong
* Add ping_op
* Add pong_op
* Fix crash in accept_op
* Fix suspend in close_op
* Fix read_frame_op logic
* Fix crash in read_op
* Fix races in echo sync and async echo servers
2017-07-20 08:12:07 -07:00

283 lines
7.3 KiB
C++

//
// Copyright (c) 2013-2016 Vinnie Falco (vinnie dot falco at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef BEAST_WEBSOCKET_IMPL_WRITE_FRAME_OP_HPP
#define BEAST_WEBSOCKET_IMPL_WRITE_FRAME_OP_HPP
#include <beast/core/buffer_cat.hpp>
#include <beast/core/bind_handler.hpp>
#include <beast/core/consuming_buffers.hpp>
#include <beast/core/handler_alloc.hpp>
#include <beast/core/static_streambuf.hpp>
#include <beast/websocket/detail/frame.hpp>
#include <algorithm>
#include <cassert>
#include <memory>
namespace beast {
namespace websocket {
// write a frame
//
template<class NextLayer>
template<class Buffers, class Handler>
class stream<NextLayer>::write_frame_op
{
using alloc_type =
handler_alloc<char, Handler>;
struct data : op
{
stream<NextLayer>& ws;
consuming_buffers<Buffers> cb;
Handler h;
detail::frame_header fh;
detail::fh_streambuf fh_buf;
detail::prepared_key_type key;
void* tmp;
std::size_t tmp_size;
std::uint64_t remain;
bool cont;
int state = 0;
template<class DeducedHandler>
data(DeducedHandler&& h_, stream<NextLayer>& ws_,
bool fin, Buffers const& bs)
: ws(ws_)
, cb(bs)
, h(std::forward<DeducedHandler>(h_))
, cont(boost_asio_handler_cont_helpers::
is_continuation(h))
{
fh.op = ws.wr_cont_ ?
opcode::cont : ws.wr_opcode_;
ws.wr_cont_ = ! fin;
fh.fin = fin;
fh.rsv1 = false;
fh.rsv2 = false;
fh.rsv3 = false;
fh.len = boost::asio::buffer_size(cb);
fh.mask = ws.role_ == detail::role_type::client;
if(fh.mask)
{
fh.key = ws.maskgen_();
detail::prepare_key(key, fh.key);
tmp_size = detail::clamp(
fh.len, ws.mask_buf_size_);
tmp = boost_asio_handler_alloc_helpers::
allocate(tmp_size, h);
remain = fh.len;
}
else
{
tmp = nullptr;
}
detail::write<static_streambuf>(fh_buf, fh);
}
~data()
{
if(tmp)
boost_asio_handler_alloc_helpers::
deallocate(tmp, tmp_size, h);
}
};
std::shared_ptr<data> d_;
public:
write_frame_op(write_frame_op&&) = default;
write_frame_op(write_frame_op const&) = default;
template<class DeducedHandler, class... Args>
write_frame_op(DeducedHandler&& h,
stream<NextLayer>& ws, Args&&... args)
: d_(std::make_shared<data>(
std::forward<DeducedHandler>(h), ws,
std::forward<Args>(args)...))
{
(*this)(error_code{}, false);
}
void operator()()
{
(*this)(error_code{});
}
void operator()(error_code ec, std::size_t);
void operator()(error_code ec, bool again = true);
friend
void* asio_handler_allocate(
std::size_t size, write_frame_op* op)
{
return boost_asio_handler_alloc_helpers::
allocate(size, op->d_->h);
}
friend
void asio_handler_deallocate(
void* p, std::size_t size, write_frame_op* op)
{
return boost_asio_handler_alloc_helpers::
deallocate(p, size, op->d_->h);
}
friend
bool asio_handler_is_continuation(write_frame_op* op)
{
return op->d_->cont;
}
template <class Function>
friend
void asio_handler_invoke(Function&& f, write_frame_op* op)
{
return boost_asio_handler_invoke_helpers::
invoke(f, op->d_->h);
}
};
template<class NextLayer>
template<class Buffers, class Handler>
void
stream<NextLayer>::
write_frame_op<Buffers, Handler>::
operator()(error_code ec, std::size_t)
{
auto& d = *d_;
if(ec)
d.ws.failed_ = true;
(*this)(ec);
}
template<class NextLayer>
template<class Buffers, class Handler>
void
stream<NextLayer>::
write_frame_op<Buffers, Handler>::
operator()(error_code ec, bool again)
{
using boost::asio::buffer_copy;
using boost::asio::mutable_buffers_1;
auto& d = *d_;
d.cont = d.cont || again;
if(ec)
goto upcall;
for(;;)
{
switch(d.state)
{
case 0:
if(d.ws.wr_block_)
{
// suspend
d.state = 3;
d.ws.wr_op_.template emplace<
write_frame_op>(std::move(*this));
return;
}
if(d.ws.failed_ || d.ws.wr_close_)
{
// call handler
d.state = 99;
d.ws.get_io_service().post(
bind_handler(std::move(*this),
boost::asio::error::operation_aborted));
return;
}
// fall through
case 1:
{
if(! d.fh.mask)
{
// send header and entire payload
d.state = 99;
assert(! d.ws.wr_block_);
d.ws.wr_block_ = &d;
boost::asio::async_write(d.ws.stream_,
buffer_cat(d.fh_buf.data(), d.cb),
std::move(*this));
return;
}
auto const n =
detail::clamp(d.remain, d.tmp_size);
mutable_buffers_1 mb{d.tmp, n};
buffer_copy(mb, d.cb);
d.cb.consume(n);
d.remain -= n;
detail::mask_inplace(mb, d.key);
// send header and payload
d.state = d.remain > 0 ? 2 : 99;
assert(! d.ws.wr_block_);
d.ws.wr_block_ = &d;
boost::asio::async_write(d.ws.stream_,
buffer_cat(d.fh_buf.data(),
mb), std::move(*this));
return;
}
// sent masked payload
case 2:
{
auto const n =
detail::clamp(d.remain, d.tmp_size);
mutable_buffers_1 mb{d.tmp,
static_cast<std::size_t>(n)};
buffer_copy(mb, d.cb);
d.cb.consume(n);
d.remain -= n;
detail::mask_inplace(mb, d.key);
// send payload
if(d.remain == 0)
d.state = 99;
assert(d.ws.wr_block_ == &d);
boost::asio::async_write(
d.ws.stream_, mb, std::move(*this));
return;
}
case 3:
d.state = 4;
d.ws.get_io_service().post(bind_handler(
std::move(*this), ec));
return;
case 4:
if(d.ws.failed_ || d.ws.wr_close_)
{
// call handler
ec = boost::asio::error::operation_aborted;
goto upcall;
}
d.state = 1;
break;
case 99:
goto upcall;
}
}
upcall:
if(d.tmp)
{
boost_asio_handler_alloc_helpers::
deallocate(d.tmp, d.tmp_size, d.h);
d.tmp = nullptr;
}
if(d.ws.wr_block_ == &d)
d.ws.wr_block_ = nullptr;
d.ws.rd_op_.maybe_invoke();
d.h(ec);
}
} // websocket
} // beast
#endif