Files
boost_beast/include/boost/beast/websocket/impl/write.hpp

743 lines
24 KiB
C++
Raw Normal View History

//
2017-07-24 09:42:36 -07:00
// Copyright (c) 2016-2017 Vinnie Falco (vinnie dot falco at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
2017-07-20 13:40:34 -07:00
// Official repository: https://github.com/boostorg/beast
//
#ifndef BOOST_BEAST_WEBSOCKET_IMPL_WRITE_HPP
#define BOOST_BEAST_WEBSOCKET_IMPL_WRITE_HPP
2017-07-20 13:40:34 -07:00
#include <boost/beast/websocket/detail/mask.hpp>
#include <boost/beast/core/async_op_base.hpp>
2017-07-20 13:40:34 -07:00
#include <boost/beast/core/bind_handler.hpp>
2019-02-02 20:44:04 -08:00
#include <boost/beast/core/buffer_size.hpp>
#include <boost/beast/core/buffers_cat.hpp>
#include <boost/beast/core/buffers_prefix.hpp>
2018-11-11 20:59:57 -08:00
#include <boost/beast/core/buffers_range.hpp>
#include <boost/beast/core/buffers_suffix.hpp>
2017-07-20 13:40:34 -07:00
#include <boost/beast/core/flat_static_buffer.hpp>
2019-02-04 21:52:54 -08:00
#include <boost/beast/core/stream_traits.hpp>
2019-01-20 12:25:30 -08:00
#include <boost/beast/core/detail/bind_continuation.hpp>
2017-07-20 13:40:34 -07:00
#include <boost/beast/core/detail/clamp.hpp>
#include <boost/beast/core/detail/config.hpp>
#include <boost/beast/websocket/detail/frame.hpp>
2017-08-12 20:50:23 -07:00
#include <boost/asio/coroutine.hpp>
#include <boost/assert.hpp>
2017-06-08 05:54:47 -07:00
#include <boost/config.hpp>
2017-05-22 15:30:12 -07:00
#include <boost/throw_exception.hpp>
#include <algorithm>
#include <memory>
2017-07-20 13:40:34 -07:00
namespace boost {
namespace beast {
namespace websocket {
template<class NextLayer, bool deflateSupported>
template<class Buffers, class Handler>
class stream<NextLayer, deflateSupported>::write_some_op
: public beast::async_op_base<
2019-02-04 21:52:54 -08:00
Handler, beast::executor_type<stream>>
, public net::coroutine
{
2019-01-20 12:25:30 -08:00
enum
{
do_nomask_nofrag,
do_nomask_frag,
do_mask_nofrag,
do_mask_frag,
do_deflate
};
stream& ws_;
buffers_suffix<Buffers> cb_;
2017-08-12 20:50:23 -07:00
detail::frame_header fh_;
detail::prepared_key key_;
std::size_t bytes_transferred_ = 0;
2017-08-12 20:50:23 -07:00
std::size_t remain_;
std::size_t in_;
2017-08-12 20:50:23 -07:00
int how_;
bool fin_;
2018-08-02 07:20:12 -07:00
bool more_ = false; // for ubsan
bool cont_ = false;
public:
2018-02-18 18:46:12 -08:00
static constexpr int id = 2; // for soft_mutex
template<class Handler_>
2017-08-12 20:50:23 -07:00
write_some_op(
Handler_&& h,
stream<NextLayer, deflateSupported>& ws,
2017-08-12 20:50:23 -07:00
bool fin,
Buffers const& bs)
2019-01-09 16:11:15 -08:00
: beast::async_op_base<Handler,
2019-02-04 21:52:54 -08:00
beast::executor_type<stream>>(
2019-01-09 16:11:15 -08:00
std::forward<Handler_>(h), ws.get_executor())
2017-08-12 20:50:23 -07:00
, ws_(ws)
, cb_(bs)
, fin_(fin)
{
2019-01-20 12:25:30 -08:00
auto& impl = *ws_.impl_;
2017-08-12 20:50:23 -07:00
// Set up the outgoing frame header
2019-01-20 12:25:30 -08:00
if(! impl.wr_cont)
{
2019-01-20 12:25:30 -08:00
impl.begin_msg();
fh_.rsv1 = impl.wr_compress;
}
else
{
2017-08-12 20:50:23 -07:00
fh_.rsv1 = false;
}
2017-08-12 20:50:23 -07:00
fh_.rsv2 = false;
fh_.rsv3 = false;
2019-01-20 12:25:30 -08:00
fh_.op = impl.wr_cont ?
detail::opcode::cont : impl.wr_opcode;
2017-08-12 20:50:23 -07:00
fh_.mask =
2019-01-20 12:25:30 -08:00
impl.role == role_type::client;
2017-08-12 20:50:23 -07:00
// Choose a write algorithm
2019-01-20 12:25:30 -08:00
if(impl.wr_compress)
{
2017-08-12 20:50:23 -07:00
how_ = do_deflate;
}
2017-08-12 20:50:23 -07:00
else if(! fh_.mask)
{
2019-01-20 12:25:30 -08:00
if(! impl.wr_frag)
{
2017-08-12 20:50:23 -07:00
how_ = do_nomask_nofrag;
}
else
{
2019-01-20 12:25:30 -08:00
BOOST_ASSERT(impl.wr_buf_size != 0);
2017-08-12 20:50:23 -07:00
remain_ = buffer_size(cb_);
2019-01-20 12:25:30 -08:00
if(remain_ > impl.wr_buf_size)
2017-08-12 20:50:23 -07:00
how_ = do_nomask_frag;
else
2017-08-12 20:50:23 -07:00
how_ = do_nomask_nofrag;
}
}
else
{
2019-01-20 12:25:30 -08:00
if(! impl.wr_frag)
{
2017-08-12 20:50:23 -07:00
how_ = do_mask_nofrag;
}
else
{
2019-01-20 12:25:30 -08:00
BOOST_ASSERT(impl.wr_buf_size != 0);
2017-08-12 20:50:23 -07:00
remain_ = buffer_size(cb_);
2019-01-20 12:25:30 -08:00
if(remain_ > impl.wr_buf_size)
2017-08-12 20:50:23 -07:00
how_ = do_mask_frag;
else
2017-08-12 20:50:23 -07:00
how_ = do_mask_nofrag;
}
}
2019-01-20 12:25:30 -08:00
(*this)({}, 0, false);
}
2019-01-20 12:25:30 -08:00
void operator()(
error_code ec = {},
std::size_t bytes_transferred = 0,
bool cont = true);
};
template<class NextLayer, bool deflateSupported>
template<class Buffers, class Handler>
void
stream<NextLayer, deflateSupported>::
write_some_op<Buffers, Handler>::
operator()(
error_code ec,
std::size_t bytes_transferred,
bool cont)
{
using beast::detail::clamp;
std::size_t n;
net::mutable_buffer b;
auto& impl = *ws_.impl_;
BOOST_ASIO_CORO_REENTER(*this)
{
// Acquire the write lock
if(! impl.wr_block.try_lock(this))
{
do_suspend:
2017-08-12 20:50:23 -07:00
BOOST_ASIO_CORO_YIELD
2019-01-20 12:25:30 -08:00
impl.paused_wr.emplace(std::move(*this));
impl.wr_block.lock(this);
2017-08-12 20:50:23 -07:00
BOOST_ASIO_CORO_YIELD
2019-01-20 12:25:30 -08:00
net::post(std::move(*this));
BOOST_ASSERT(impl.wr_block.is_locked(this));
}
2019-01-20 12:25:30 -08:00
if(impl.check_stop_now(ec))
goto upcall;
2017-08-12 20:50:23 -07:00
//------------------------------------------------------------------
2017-08-12 20:50:23 -07:00
if(how_ == do_nomask_nofrag)
{
2019-01-20 12:25:30 -08:00
// send a single frame
2017-08-12 20:50:23 -07:00
fh_.fin = fin_;
fh_.len = buffer_size(cb_);
2019-01-20 12:25:30 -08:00
impl.wr_fb.clear();
2017-08-12 20:50:23 -07:00
detail::write<flat_static_buffer_base>(
2019-01-20 12:25:30 -08:00
impl.wr_fb, fh_);
impl.wr_cont = ! fin_;
2017-08-12 20:50:23 -07:00
BOOST_ASIO_CORO_YIELD
2019-01-20 12:25:30 -08:00
net::async_write(impl.stream,
buffers_cat(impl.wr_fb.data(), cb_),
beast::detail::bind_continuation(std::move(*this)));
bytes_transferred_ += clamp(fh_.len);
2019-01-20 12:25:30 -08:00
if(impl.check_stop_now(ec))
goto upcall;
2017-08-12 20:50:23 -07:00
goto upcall;
}
2017-08-12 20:50:23 -07:00
//------------------------------------------------------------------
2019-01-20 12:25:30 -08:00
if(how_ == do_nomask_frag)
{
2019-01-20 12:25:30 -08:00
// send multiple frames
2017-08-12 20:50:23 -07:00
for(;;)
{
2019-01-20 12:25:30 -08:00
n = clamp(remain_, impl.wr_buf_size);
fh_.len = n;
remain_ -= n;
2017-08-12 20:50:23 -07:00
fh_.fin = fin_ ? remain_ == 0 : false;
2019-01-20 12:25:30 -08:00
impl.wr_fb.clear();
2017-08-12 20:50:23 -07:00
detail::write<flat_static_buffer_base>(
2019-01-20 12:25:30 -08:00
impl.wr_fb, fh_);
impl.wr_cont = ! fin_;
2017-08-12 20:50:23 -07:00
// Send frame
BOOST_ASIO_CORO_YIELD
2019-01-20 12:25:30 -08:00
net::async_write(impl.stream, buffers_cat(
impl.wr_fb.data(),
buffers_prefix(clamp(fh_.len), cb_)),
beast::detail::bind_continuation(std::move(*this)));
n = clamp(fh_.len); // restore `n` on yield
bytes_transferred_ += n;
2019-01-20 12:25:30 -08:00
if(impl.check_stop_now(ec))
goto upcall;
2017-08-12 20:50:23 -07:00
if(remain_ == 0)
break;
cb_.consume(n);
2017-08-12 20:50:23 -07:00
fh_.op = detail::opcode::cont;
2019-01-20 12:25:30 -08:00
// Give up the write lock in between each frame
// so that outgoing control frames might be sent.
impl.wr_block.unlock(this);
if( impl.paused_close.maybe_invoke() ||
impl.paused_rd.maybe_invoke() ||
impl.paused_ping.maybe_invoke())
2017-08-12 20:50:23 -07:00
{
2019-01-20 12:25:30 -08:00
BOOST_ASSERT(impl.wr_block.is_locked());
goto do_suspend;
2017-08-12 20:50:23 -07:00
}
2019-01-20 12:25:30 -08:00
impl.wr_block.lock(this);
2017-08-12 20:50:23 -07:00
}
goto upcall;
}
2017-08-12 20:50:23 -07:00
//------------------------------------------------------------------
2019-01-20 12:25:30 -08:00
if(how_ == do_mask_nofrag)
{
2019-01-20 12:25:30 -08:00
// send a single frame using multiple writes
remain_ = beast::buffer_size(cb_);
2017-08-12 20:50:23 -07:00
fh_.fin = fin_;
fh_.len = remain_;
fh_.key = ws_.create_mask();
2017-08-12 20:50:23 -07:00
detail::prepare_key(key_, fh_.key);
2019-01-20 12:25:30 -08:00
impl.wr_fb.clear();
2017-08-12 20:50:23 -07:00
detail::write<flat_static_buffer_base>(
2019-01-20 12:25:30 -08:00
impl.wr_fb, fh_);
n = clamp(remain_, impl.wr_buf_size);
net::buffer_copy(net::buffer(
2019-01-20 12:25:30 -08:00
impl.wr_buf.get(), n), cb_);
detail::mask_inplace(net::buffer(
2019-01-20 12:25:30 -08:00
impl.wr_buf.get(), n), key_);
2017-08-12 20:50:23 -07:00
remain_ -= n;
2019-01-20 12:25:30 -08:00
impl.wr_cont = ! fin_;
// write frame header and some payload
2017-08-12 20:50:23 -07:00
BOOST_ASIO_CORO_YIELD
2019-01-20 12:25:30 -08:00
net::async_write(impl.stream, buffers_cat(
impl.wr_fb.data(),
net::buffer(impl.wr_buf.get(), n)),
beast::detail::bind_continuation(std::move(*this)));
// VFALCO What about consuming the buffer on error?
bytes_transferred_ +=
2019-01-20 12:25:30 -08:00
bytes_transferred - impl.wr_fb.size();
if(impl.check_stop_now(ec))
goto upcall;
2017-08-12 20:50:23 -07:00
while(remain_ > 0)
{
2019-01-20 12:25:30 -08:00
cb_.consume(impl.wr_buf_size);
n = clamp(remain_, impl.wr_buf_size);
net::buffer_copy(net::buffer(
2019-01-20 12:25:30 -08:00
impl.wr_buf.get(), n), cb_);
detail::mask_inplace(net::buffer(
2019-01-20 12:25:30 -08:00
impl.wr_buf.get(), n), key_);
2017-08-12 20:50:23 -07:00
remain_ -= n;
2019-01-20 12:25:30 -08:00
// write more payload
2017-08-12 20:50:23 -07:00
BOOST_ASIO_CORO_YIELD
2019-01-20 12:25:30 -08:00
net::async_write(impl.stream,
net::buffer(impl.wr_buf.get(), n),
beast::detail::bind_continuation(std::move(*this)));
bytes_transferred_ += bytes_transferred;
2019-01-20 12:25:30 -08:00
if(impl.check_stop_now(ec))
goto upcall;
2017-08-12 20:50:23 -07:00
}
goto upcall;
}
2017-08-12 20:50:23 -07:00
//------------------------------------------------------------------
2019-01-20 12:25:30 -08:00
if(how_ == do_mask_frag)
{
2019-01-20 12:25:30 -08:00
// send multiple frames
2017-08-12 20:50:23 -07:00
for(;;)
{
2019-01-20 12:25:30 -08:00
n = clamp(remain_, impl.wr_buf_size);
2017-08-12 20:50:23 -07:00
remain_ -= n;
fh_.len = n;
fh_.key = ws_.create_mask();
2017-08-12 20:50:23 -07:00
fh_.fin = fin_ ? remain_ == 0 : false;
detail::prepare_key(key_, fh_.key);
net::buffer_copy(net::buffer(
2019-01-20 12:25:30 -08:00
impl.wr_buf.get(), n), cb_);
detail::mask_inplace(net::buffer(
2019-01-20 12:25:30 -08:00
impl.wr_buf.get(), n), key_);
impl.wr_fb.clear();
2017-08-12 20:50:23 -07:00
detail::write<flat_static_buffer_base>(
2019-01-20 12:25:30 -08:00
impl.wr_fb, fh_);
impl.wr_cont = ! fin_;
2017-08-12 20:50:23 -07:00
// Send frame
BOOST_ASIO_CORO_YIELD
2019-01-20 12:25:30 -08:00
net::async_write(impl.stream, buffers_cat(
impl.wr_fb.data(),
net::buffer(impl.wr_buf.get(), n)),
beast::detail::bind_continuation(std::move(*this)));
n = bytes_transferred - impl.wr_fb.size();
bytes_transferred_ += n;
2019-01-20 12:25:30 -08:00
if(impl.check_stop_now(ec))
goto upcall;
2017-08-12 20:50:23 -07:00
if(remain_ == 0)
break;
cb_.consume(n);
2017-08-12 20:50:23 -07:00
fh_.op = detail::opcode::cont;
2019-01-20 12:25:30 -08:00
// Give up the write lock in between each frame
// so that outgoing control frames might be sent.
impl.wr_block.unlock(this);
if( impl.paused_close.maybe_invoke() ||
impl.paused_rd.maybe_invoke() ||
impl.paused_ping.maybe_invoke())
2017-08-12 20:50:23 -07:00
{
2019-01-20 12:25:30 -08:00
BOOST_ASSERT(impl.wr_block.is_locked());
goto do_suspend;
2017-08-12 20:50:23 -07:00
}
2019-01-20 12:25:30 -08:00
impl.wr_block.lock(this);
2017-08-12 20:50:23 -07:00
}
goto upcall;
}
2017-08-12 20:50:23 -07:00
//------------------------------------------------------------------
2019-01-20 12:25:30 -08:00
if(how_ == do_deflate)
{
2019-01-20 12:25:30 -08:00
// send compressed frames
2017-08-12 20:50:23 -07:00
for(;;)
{
2019-01-20 12:25:30 -08:00
b = net::buffer(impl.wr_buf.get(),
impl.wr_buf_size);
more_ = impl.deflate(b, cb_, fin_, in_, ec);
if(impl.check_stop_now(ec))
2017-08-12 20:50:23 -07:00
goto upcall;
n = buffer_size(b);
if(n == 0)
{
2019-01-20 12:25:30 -08:00
// The input was consumed, but there is
// no output due to compression latency.
2017-08-12 20:50:23 -07:00
BOOST_ASSERT(! fin_);
BOOST_ASSERT(buffer_size(cb_) == 0);
goto upcall;
}
if(fh_.mask)
{
fh_.key = ws_.create_mask();
2017-08-12 20:50:23 -07:00
detail::prepared_key key;
detail::prepare_key(key, fh_.key);
detail::mask_inplace(b, key);
}
fh_.fin = ! more_;
fh_.len = n;
2019-01-20 12:25:30 -08:00
impl.wr_fb.clear();
2017-08-12 20:50:23 -07:00
detail::write<
2019-01-20 12:25:30 -08:00
flat_static_buffer_base>(impl.wr_fb, fh_);
impl.wr_cont = ! fin_;
2017-08-12 20:50:23 -07:00
// Send frame
BOOST_ASIO_CORO_YIELD
2019-01-20 12:25:30 -08:00
net::async_write(impl.stream, buffers_cat(
impl.wr_fb.data(), b),
beast::detail::bind_continuation(std::move(*this)));
bytes_transferred_ += in_;
2019-01-20 12:25:30 -08:00
if(impl.check_stop_now(ec))
goto upcall;
2017-08-12 20:50:23 -07:00
if(more_)
{
fh_.op = detail::opcode::cont;
fh_.rsv1 = false;
2019-01-20 12:25:30 -08:00
// Give up the write lock in between each frame
// so that outgoing control frames might be sent.
impl.wr_block.unlock(this);
if( impl.paused_close.maybe_invoke() ||
impl.paused_rd.maybe_invoke() ||
impl.paused_ping.maybe_invoke())
2017-08-12 20:50:23 -07:00
{
2019-01-20 12:25:30 -08:00
BOOST_ASSERT(impl.wr_block.is_locked());
goto do_suspend;
2017-08-12 20:50:23 -07:00
}
2019-01-20 12:25:30 -08:00
impl.wr_block.lock(this);
2017-08-12 20:50:23 -07:00
}
else
{
if(fh_.fin)
2019-01-20 12:25:30 -08:00
impl.do_context_takeover_write(impl.role);
2017-08-12 20:50:23 -07:00
goto upcall;
}
}
}
2017-08-12 20:50:23 -07:00
//--------------------------------------------------------------------------
2017-08-12 20:50:23 -07:00
upcall:
2019-01-20 12:25:30 -08:00
impl.wr_block.unlock(this);
impl.paused_close.maybe_invoke()
|| impl.paused_rd.maybe_invoke()
|| impl.paused_ping.maybe_invoke();
if(! cont)
{
BOOST_ASIO_CORO_YIELD
net::post(bind_front_handler(
std::move(*this), ec, bytes_transferred_));
}
this->invoke(cont, ec, bytes_transferred_);
}
}
2017-06-20 21:28:17 -07:00
//------------------------------------------------------------------------------
template<class NextLayer, bool deflateSupported>
template<class ConstBufferSequence>
std::size_t
stream<NextLayer, deflateSupported>::
write_some(bool fin, ConstBufferSequence const& buffers)
{
static_assert(is_sync_stream<next_layer_type>::value,
"SyncStream requirements not met");
static_assert(net::is_const_buffer_sequence<
ConstBufferSequence>::value,
"ConstBufferSequence requirements not met");
error_code ec;
auto const bytes_transferred =
write_some(fin, buffers, ec);
if(ec)
2017-05-22 15:30:12 -07:00
BOOST_THROW_EXCEPTION(system_error{ec});
return bytes_transferred;
}
template<class NextLayer, bool deflateSupported>
template<class ConstBufferSequence>
std::size_t
stream<NextLayer, deflateSupported>::
write_some(bool fin,
ConstBufferSequence const& buffers, error_code& ec)
{
static_assert(is_sync_stream<next_layer_type>::value,
"SyncStream requirements not met");
static_assert(net::is_const_buffer_sequence<
ConstBufferSequence>::value,
"ConstBufferSequence requirements not met");
2016-10-24 20:02:38 -04:00
using beast::detail::clamp;
2019-01-20 12:25:30 -08:00
auto& impl = *impl_;
std::size_t bytes_transferred = 0;
ec = {};
2019-01-20 12:25:30 -08:00
if(impl.check_stop_now(ec))
return bytes_transferred;
detail::frame_header fh;
2019-01-20 12:25:30 -08:00
if(! impl.wr_cont)
{
2019-01-20 12:25:30 -08:00
impl.begin_msg();
fh.rsv1 = impl.wr_compress;
}
else
{
fh.rsv1 = false;
}
fh.rsv2 = false;
fh.rsv3 = false;
2019-01-20 12:25:30 -08:00
fh.op = impl.wr_cont ?
detail::opcode::cont : impl.wr_opcode;
fh.mask = impl.role == role_type::client;
auto remain = buffer_size(buffers);
2019-01-20 12:25:30 -08:00
if(impl.wr_compress)
{
2019-01-20 12:25:30 -08:00
buffers_suffix<
2019-01-20 12:25:30 -08:00
ConstBufferSequence> cb(buffers);
for(;;)
{
auto b = net::buffer(
2019-01-20 12:25:30 -08:00
impl.wr_buf.get(), impl.wr_buf_size);
auto const more = impl.deflate(
b, cb, fin, bytes_transferred, ec);
2019-01-20 12:25:30 -08:00
if(impl.check_stop_now(ec))
return bytes_transferred;
auto const n = buffer_size(b);
if(n == 0)
{
// The input was consumed, but there
// is no output due to compression
// latency.
BOOST_ASSERT(! fin);
BOOST_ASSERT(buffer_size(cb) == 0);
fh.fin = false;
break;
}
if(fh.mask)
{
fh.key = this->create_mask();
detail::prepared_key key;
detail::prepare_key(key, fh.key);
detail::mask_inplace(b, key);
}
fh.fin = ! more;
fh.len = n;
detail::fh_buffer fh_buf;
detail::write<
flat_static_buffer_base>(fh_buf, fh);
2019-01-20 12:25:30 -08:00
impl.wr_cont = ! fin;
net::write(impl.stream,
buffers_cat(fh_buf.data(), b), ec);
2019-01-20 12:25:30 -08:00
if(impl.check_stop_now(ec))
return bytes_transferred;
if(! more)
break;
fh.op = detail::opcode::cont;
fh.rsv1 = false;
}
if(fh.fin)
2019-01-20 12:25:30 -08:00
impl.do_context_takeover_write(impl.role);
}
else if(! fh.mask)
{
2019-01-20 12:25:30 -08:00
if(! impl.wr_frag)
{
// no mask, no autofrag
fh.fin = fin;
fh.len = remain;
detail::fh_buffer fh_buf;
detail::write<
flat_static_buffer_base>(fh_buf, fh);
2019-01-20 12:25:30 -08:00
impl.wr_cont = ! fin;
net::write(impl.stream,
buffers_cat(fh_buf.data(), buffers), ec);
2019-01-20 12:25:30 -08:00
if(impl.check_stop_now(ec))
return bytes_transferred;
bytes_transferred += remain;
}
else
{
// no mask, autofrag
2019-01-20 12:25:30 -08:00
BOOST_ASSERT(impl.wr_buf_size != 0);
buffers_suffix<
ConstBufferSequence> cb{buffers};
for(;;)
{
2019-01-20 12:25:30 -08:00
auto const n = clamp(remain, impl.wr_buf_size);
remain -= n;
fh.len = n;
fh.fin = fin ? remain == 0 : false;
detail::fh_buffer fh_buf;
detail::write<
flat_static_buffer_base>(fh_buf, fh);
2019-01-20 12:25:30 -08:00
impl.wr_cont = ! fin;
net::write(impl.stream,
beast::buffers_cat(fh_buf.data(),
beast::buffers_prefix(n, cb)), ec);
bytes_transferred += n;
2019-01-20 12:25:30 -08:00
if(impl.check_stop_now(ec))
return bytes_transferred;
if(remain == 0)
break;
fh.op = detail::opcode::cont;
cb.consume(n);
}
}
}
2019-01-20 12:25:30 -08:00
else if(! impl.wr_frag)
{
// mask, no autofrag
fh.fin = fin;
fh.len = remain;
fh.key = this->create_mask();
detail::prepared_key key;
detail::prepare_key(key, fh.key);
detail::fh_buffer fh_buf;
detail::write<
flat_static_buffer_base>(fh_buf, fh);
buffers_suffix<
ConstBufferSequence> cb{buffers};
{
auto const n =
2019-01-20 12:25:30 -08:00
clamp(remain, impl.wr_buf_size);
auto const b =
2019-01-20 12:25:30 -08:00
net::buffer(impl.wr_buf.get(), n);
net::buffer_copy(b, cb);
cb.consume(n);
remain -= n;
detail::mask_inplace(b, key);
2019-01-20 12:25:30 -08:00
impl.wr_cont = ! fin;
net::write(impl.stream,
buffers_cat(fh_buf.data(), b), ec);
bytes_transferred += n;
2019-01-20 12:25:30 -08:00
if(impl.check_stop_now(ec))
return bytes_transferred;
}
while(remain > 0)
{
auto const n =
2019-01-20 12:25:30 -08:00
clamp(remain, impl.wr_buf_size);
auto const b =
2019-01-20 12:25:30 -08:00
net::buffer(impl.wr_buf.get(), n);
net::buffer_copy(b, cb);
cb.consume(n);
remain -= n;
detail::mask_inplace(b, key);
2019-01-20 12:25:30 -08:00
net::write(impl.stream, b, ec);
bytes_transferred += n;
2019-01-20 12:25:30 -08:00
if(impl.check_stop_now(ec))
return bytes_transferred;
}
}
else
{
// mask, autofrag
2019-01-20 12:25:30 -08:00
BOOST_ASSERT(impl.wr_buf_size != 0);
buffers_suffix<
ConstBufferSequence> cb(buffers);
for(;;)
{
fh.key = this->create_mask();
detail::prepared_key key;
detail::prepare_key(key, fh.key);
auto const n =
2019-01-20 12:25:30 -08:00
clamp(remain, impl.wr_buf_size);
auto const b =
2019-01-20 12:25:30 -08:00
net::buffer(impl.wr_buf.get(), n);
net::buffer_copy(b, cb);
detail::mask_inplace(b, key);
fh.len = n;
remain -= n;
fh.fin = fin ? remain == 0 : false;
2019-01-20 12:25:30 -08:00
impl.wr_cont = ! fh.fin;
detail::fh_buffer fh_buf;
detail::write<
flat_static_buffer_base>(fh_buf, fh);
2019-01-20 12:25:30 -08:00
net::write(impl.stream,
buffers_cat(fh_buf.data(), b), ec);
bytes_transferred += n;
2019-01-20 12:25:30 -08:00
if(impl.check_stop_now(ec))
return bytes_transferred;
if(remain == 0)
break;
fh.op = detail::opcode::cont;
cb.consume(n);
}
}
return bytes_transferred;
}
template<class NextLayer, bool deflateSupported>
template<class ConstBufferSequence, class WriteHandler>
BOOST_ASIO_INITFN_RESULT_TYPE(
WriteHandler, void(error_code, std::size_t))
stream<NextLayer, deflateSupported>::
async_write_some(bool fin,
2017-06-20 21:28:17 -07:00
ConstBufferSequence const& bs, WriteHandler&& handler)
{
static_assert(is_async_stream<next_layer_type>::value,
"AsyncStream requirements not met");
static_assert(net::is_const_buffer_sequence<
ConstBufferSequence>::value,
"ConstBufferSequence requirements not met");
BOOST_BEAST_HANDLER_INIT(
WriteHandler, void(error_code, std::size_t));
write_some_op<ConstBufferSequence, BOOST_ASIO_HANDLER_TYPE(
2019-01-20 12:25:30 -08:00
WriteHandler, void(error_code, std::size_t))>(
std::move(init.completion_handler), *this, fin, bs);
return init.result.get();
}
2017-06-20 21:28:17 -07:00
//------------------------------------------------------------------------------
template<class NextLayer, bool deflateSupported>
template<class ConstBufferSequence>
std::size_t
stream<NextLayer, deflateSupported>::
write(ConstBufferSequence const& buffers)
{
static_assert(is_sync_stream<next_layer_type>::value,
"SyncStream requirements not met");
static_assert(net::is_const_buffer_sequence<
ConstBufferSequence>::value,
"ConstBufferSequence requirements not met");
error_code ec;
auto const bytes_transferred = write(buffers, ec);
if(ec)
2017-05-22 15:30:12 -07:00
BOOST_THROW_EXCEPTION(system_error{ec});
return bytes_transferred;
}
template<class NextLayer, bool deflateSupported>
template<class ConstBufferSequence>
std::size_t
stream<NextLayer, deflateSupported>::
write(ConstBufferSequence const& buffers, error_code& ec)
{
static_assert(is_sync_stream<next_layer_type>::value,
"SyncStream requirements not met");
static_assert(net::is_const_buffer_sequence<
ConstBufferSequence>::value,
"ConstBufferSequence requirements not met");
return write_some(true, buffers, ec);
}
template<class NextLayer, bool deflateSupported>
2017-06-20 21:28:17 -07:00
template<class ConstBufferSequence, class WriteHandler>
BOOST_ASIO_INITFN_RESULT_TYPE(
WriteHandler, void(error_code, std::size_t))
stream<NextLayer, deflateSupported>::
2017-06-20 21:28:17 -07:00
async_write(
ConstBufferSequence const& bs, WriteHandler&& handler)
{
static_assert(is_async_stream<next_layer_type>::value,
"AsyncStream requirements not met");
static_assert(net::is_const_buffer_sequence<
2017-06-20 21:28:17 -07:00
ConstBufferSequence>::value,
"ConstBufferSequence requirements not met");
BOOST_BEAST_HANDLER_INIT(
WriteHandler, void(error_code, std::size_t));
write_some_op<ConstBufferSequence, BOOST_ASIO_HANDLER_TYPE(
2019-01-20 12:25:30 -08:00
WriteHandler, void(error_code, std::size_t))>(
std::move(init.completion_handler), *this, true, bs);
2017-06-20 21:28:17 -07:00
return init.result.get();
}
} // websocket
} // beast
2017-07-20 13:40:34 -07:00
} // boost
#endif