Files
boost_beast/include/boost/beast/websocket/impl/write.ipp

853 lines
26 KiB
Plaintext
Raw Normal View History

//
2017-07-24 09:42:36 -07:00
// Copyright (c) 2016-2017 Vinnie Falco (vinnie dot falco at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
2017-07-20 13:40:34 -07:00
// Official repository: https://github.com/boostorg/beast
//
2017-07-20 13:40:34 -07:00
#ifndef BOOST_BEAST_WEBSOCKET_IMPL_WRITE_IPP
#define BOOST_BEAST_WEBSOCKET_IMPL_WRITE_IPP
#include <boost/beast/core/async_op_base.hpp>
2017-07-20 13:40:34 -07:00
#include <boost/beast/core/bind_handler.hpp>
#include <boost/beast/core/buffers_cat.hpp>
#include <boost/beast/core/buffers_prefix.hpp>
2018-11-11 20:59:57 -08:00
#include <boost/beast/core/buffers_range.hpp>
#include <boost/beast/core/buffers_suffix.hpp>
2017-07-20 13:40:34 -07:00
#include <boost/beast/core/flat_static_buffer.hpp>
#include <boost/beast/core/type_traits.hpp>
#include <boost/beast/core/detail/clamp.hpp>
#include <boost/beast/core/detail/config.hpp>
#include <boost/beast/core/detail/get_executor_type.hpp>
2017-07-20 13:40:34 -07:00
#include <boost/beast/websocket/detail/frame.hpp>
2017-08-12 20:50:23 -07:00
#include <boost/asio/coroutine.hpp>
#include <boost/assert.hpp>
2017-06-08 05:54:47 -07:00
#include <boost/config.hpp>
2017-05-22 15:30:12 -07:00
#include <boost/throw_exception.hpp>
#include <algorithm>
#include <memory>
2017-07-20 13:40:34 -07:00
namespace boost {
namespace beast {
namespace websocket {
namespace detail {
// Compress a buffer sequence
// Returns: `true` if more calls are needed
//
template<>
template<class ConstBufferSequence>
bool
stream_base<true>::
deflate(
net::mutable_buffer& out,
buffers_suffix<ConstBufferSequence>& cb,
bool fin,
std::size_t& total_in,
error_code& ec)
{
using net::buffer;
BOOST_ASSERT(out.size() >= 6);
auto& zo = this->pmd_->zo;
zlib::z_params zs;
zs.avail_in = 0;
zs.next_in = nullptr;
zs.avail_out = out.size();
zs.next_out = out.data();
for(auto in : beast::buffers_range_ref(cb))
{
zs.avail_in = in.size();
if(zs.avail_in == 0)
continue;
zs.next_in = in.data();
zo.write(zs, zlib::Flush::none, ec);
if(ec)
{
if(ec != zlib::error::need_buffers)
return false;
BOOST_ASSERT(zs.avail_out == 0);
BOOST_ASSERT(zs.total_out == out.size());
ec.assign(0, ec.category());
break;
}
if(zs.avail_out == 0)
{
BOOST_ASSERT(zs.total_out == out.size());
break;
}
BOOST_ASSERT(zs.avail_in == 0);
}
total_in = zs.total_in;
cb.consume(zs.total_in);
if(zs.avail_out > 0 && fin)
{
auto const remain = net::buffer_size(cb);
if(remain == 0)
{
// Inspired by Mark Adler
// https://github.com/madler/zlib/issues/149
//
// VFALCO We could do this flush twice depending
// on how much space is in the output.
zo.write(zs, zlib::Flush::block, ec);
BOOST_ASSERT(! ec || ec == zlib::error::need_buffers);
if(ec == zlib::error::need_buffers)
ec.assign(0, ec.category());
if(ec)
return false;
if(zs.avail_out >= 6)
{
zo.write(zs, zlib::Flush::full, ec);
BOOST_ASSERT(! ec);
// remove flush marker
zs.total_out -= 4;
out = buffer(out.data(), zs.total_out);
return false;
}
}
}
ec.assign(0, ec.category());
out = buffer(out.data(), zs.total_out);
return true;
}
template<>
inline
void
stream_base<true>::
do_context_takeover_write(role_type role)
{
if((role == role_type::client &&
this->pmd_config_.client_no_context_takeover) ||
(role == role_type::server &&
this->pmd_config_.server_no_context_takeover))
{
this->pmd_->zo.reset();
}
}
} // detail
//------------------------------------------------------------------------------
template<class NextLayer, bool deflateSupported>
template<class Buffers, class Handler>
class stream<NextLayer, deflateSupported>::write_some_op
: public beast::async_op_base<
Handler, beast::detail::get_executor_type<stream>>
, public net::coroutine
{
stream& ws_;
buffers_suffix<Buffers> cb_;
2017-08-12 20:50:23 -07:00
detail::frame_header fh_;
detail::prepared_key key_;
std::size_t bytes_transferred_ = 0;
2017-08-12 20:50:23 -07:00
std::size_t remain_;
std::size_t in_;
2017-08-12 20:50:23 -07:00
int how_;
bool fin_;
2018-08-02 07:20:12 -07:00
bool more_ = false; // for ubsan
bool cont_ = false;
public:
2018-02-18 18:46:12 -08:00
static constexpr int id = 2; // for soft_mutex
template<class Handler_>
2017-08-12 20:50:23 -07:00
write_some_op(
Handler_&& h,
stream<NextLayer, deflateSupported>& ws,
2017-08-12 20:50:23 -07:00
bool fin,
Buffers const& bs)
2019-01-09 16:11:15 -08:00
: beast::async_op_base<Handler,
beast::detail::get_executor_type<stream>>(
std::forward<Handler_>(h), ws.get_executor())
2017-08-12 20:50:23 -07:00
, ws_(ws)
, cb_(bs)
, fin_(fin)
{
}
2017-08-12 20:50:23 -07:00
void operator()(
error_code ec = {},
std::size_t bytes_transferred = 0,
bool cont = true);
};
template<class NextLayer, bool deflateSupported>
template<class Buffers, class Handler>
void
stream<NextLayer, deflateSupported>::
write_some_op<Buffers, Handler>::
operator()(
error_code ec,
std::size_t bytes_transferred,
bool cont)
{
2016-10-24 20:02:38 -04:00
using beast::detail::clamp;
using net::buffer;
using net::buffer_copy;
using net::buffer_size;
using net::mutable_buffer;
enum
{
2017-08-12 20:50:23 -07:00
do_nomask_nofrag,
do_nomask_frag,
do_mask_nofrag,
do_mask_frag,
do_deflate
};
2017-08-12 20:50:23 -07:00
std::size_t n;
net::mutable_buffer b;
cont_ = cont;
2017-08-12 20:50:23 -07:00
BOOST_ASIO_CORO_REENTER(*this)
{
2017-08-12 20:50:23 -07:00
// Set up the outgoing frame header
2017-08-26 15:18:02 -07:00
if(! ws_.wr_cont_)
{
2017-08-26 15:18:02 -07:00
ws_.begin_msg();
fh_.rsv1 = ws_.wr_compress_;
}
else
{
2017-08-12 20:50:23 -07:00
fh_.rsv1 = false;
}
2017-08-12 20:50:23 -07:00
fh_.rsv2 = false;
fh_.rsv3 = false;
2017-08-26 15:18:02 -07:00
fh_.op = ws_.wr_cont_ ?
2017-08-12 20:50:23 -07:00
detail::opcode::cont : ws_.wr_opcode_;
fh_.mask =
ws_.role_ == role_type::client;
// Choose a write algorithm
2017-08-26 15:18:02 -07:00
if(ws_.wr_compress_)
{
2017-08-12 20:50:23 -07:00
how_ = do_deflate;
}
2017-08-12 20:50:23 -07:00
else if(! fh_.mask)
{
2017-08-26 15:18:02 -07:00
if(! ws_.wr_frag_)
{
2017-08-12 20:50:23 -07:00
how_ = do_nomask_nofrag;
}
else
{
2017-08-26 15:18:02 -07:00
BOOST_ASSERT(ws_.wr_buf_size_ != 0);
2017-08-12 20:50:23 -07:00
remain_ = buffer_size(cb_);
2017-08-26 15:18:02 -07:00
if(remain_ > ws_.wr_buf_size_)
2017-08-12 20:50:23 -07:00
how_ = do_nomask_frag;
else
2017-08-12 20:50:23 -07:00
how_ = do_nomask_nofrag;
}
}
else
{
2017-08-26 15:18:02 -07:00
if(! ws_.wr_frag_)
{
2017-08-12 20:50:23 -07:00
how_ = do_mask_nofrag;
}
else
{
2017-08-26 15:18:02 -07:00
BOOST_ASSERT(ws_.wr_buf_size_ != 0);
2017-08-12 20:50:23 -07:00
remain_ = buffer_size(cb_);
2017-08-26 15:18:02 -07:00
if(remain_ > ws_.wr_buf_size_)
2017-08-12 20:50:23 -07:00
how_ = do_mask_frag;
else
2017-08-12 20:50:23 -07:00
how_ = do_mask_nofrag;
}
}
2017-08-12 20:50:23 -07:00
// Maybe suspend
if(ws_.wr_block_.try_lock(this))
{
2017-08-12 20:50:23 -07:00
// Make sure the stream is open
if(! ws_.check_open(ec))
2017-08-12 20:50:23 -07:00
goto upcall;
}
else
{
do_suspend:
2017-08-12 20:50:23 -07:00
// Suspend
BOOST_ASIO_CORO_YIELD
2017-12-02 11:22:51 -08:00
ws_.paused_wr_.emplace(std::move(*this));
2017-08-12 20:50:23 -07:00
// Acquire the write block
ws_.wr_block_.lock(this);
2017-08-12 20:50:23 -07:00
// Resume
BOOST_ASIO_CORO_YIELD
net::post(
ws_.get_executor(), std::move(*this));
BOOST_ASSERT(ws_.wr_block_.is_locked(this));
2017-08-12 20:50:23 -07:00
// Make sure the stream is open
if(! ws_.check_open(ec))
2017-08-12 20:50:23 -07:00
goto upcall;
}
2017-08-12 20:50:23 -07:00
//------------------------------------------------------------------
2017-08-12 20:50:23 -07:00
if(how_ == do_nomask_nofrag)
{
2017-08-12 20:50:23 -07:00
fh_.fin = fin_;
fh_.len = buffer_size(cb_);
ws_.wr_fb_.clear();
2017-08-12 20:50:23 -07:00
detail::write<flat_static_buffer_base>(
2017-08-26 15:18:02 -07:00
ws_.wr_fb_, fh_);
ws_.wr_cont_ = ! fin_;
2017-08-12 20:50:23 -07:00
// Send frame
BOOST_ASIO_CORO_YIELD
net::async_write(ws_.stream_,
buffers_cat(ws_.wr_fb_.data(), cb_),
2017-08-12 20:50:23 -07:00
std::move(*this));
if(! ws_.check_ok(ec))
goto upcall;
bytes_transferred_ += clamp(fh_.len);
2017-08-12 20:50:23 -07:00
goto upcall;
}
2017-08-12 20:50:23 -07:00
//------------------------------------------------------------------
else if(how_ == do_nomask_frag)
{
2017-08-12 20:50:23 -07:00
for(;;)
{
n = clamp(remain_, ws_.wr_buf_size_);
fh_.len = n;
remain_ -= n;
2017-08-12 20:50:23 -07:00
fh_.fin = fin_ ? remain_ == 0 : false;
ws_.wr_fb_.clear();
2017-08-12 20:50:23 -07:00
detail::write<flat_static_buffer_base>(
2017-08-26 15:18:02 -07:00
ws_.wr_fb_, fh_);
ws_.wr_cont_ = ! fin_;
2017-08-12 20:50:23 -07:00
// Send frame
BOOST_ASIO_CORO_YIELD
net::async_write(
ws_.stream_, buffers_cat(
ws_.wr_fb_.data(), buffers_prefix(
2017-08-12 20:50:23 -07:00
clamp(fh_.len), cb_)),
std::move(*this));
if(! ws_.check_ok(ec))
2017-08-12 20:50:23 -07:00
goto upcall;
n = clamp(fh_.len); // because yield
bytes_transferred_ += n;
2017-08-12 20:50:23 -07:00
if(remain_ == 0)
break;
cb_.consume(n);
2017-08-12 20:50:23 -07:00
fh_.op = detail::opcode::cont;
// Allow outgoing control frames to
// be sent in between message frames
ws_.wr_block_.unlock(this);
2017-08-26 15:18:02 -07:00
if( ws_.paused_close_.maybe_invoke() ||
ws_.paused_rd_.maybe_invoke() ||
ws_.paused_ping_.maybe_invoke())
2017-08-12 20:50:23 -07:00
{
BOOST_ASSERT(ws_.wr_block_.is_locked());
goto do_suspend;
2017-08-12 20:50:23 -07:00
}
ws_.wr_block_.lock(this);
2017-08-12 20:50:23 -07:00
}
goto upcall;
}
2017-08-12 20:50:23 -07:00
//------------------------------------------------------------------
else if(how_ == do_mask_nofrag)
{
2017-08-12 20:50:23 -07:00
remain_ = buffer_size(cb_);
fh_.fin = fin_;
fh_.len = remain_;
fh_.key = ws_.create_mask();
2017-08-12 20:50:23 -07:00
detail::prepare_key(key_, fh_.key);
ws_.wr_fb_.clear();
2017-08-12 20:50:23 -07:00
detail::write<flat_static_buffer_base>(
2017-08-26 15:18:02 -07:00
ws_.wr_fb_, fh_);
n = clamp(remain_, ws_.wr_buf_size_);
2017-08-12 20:50:23 -07:00
buffer_copy(buffer(
2017-08-26 15:18:02 -07:00
ws_.wr_buf_.get(), n), cb_);
2017-08-12 20:50:23 -07:00
detail::mask_inplace(buffer(
2017-08-26 15:18:02 -07:00
ws_.wr_buf_.get(), n), key_);
2017-08-12 20:50:23 -07:00
remain_ -= n;
2017-08-26 15:18:02 -07:00
ws_.wr_cont_ = ! fin_;
2017-08-12 20:50:23 -07:00
// Send frame header and partial payload
BOOST_ASIO_CORO_YIELD
net::async_write(
ws_.stream_, buffers_cat(ws_.wr_fb_.data(),
2017-08-26 15:18:02 -07:00
buffer(ws_.wr_buf_.get(), n)),
2017-08-12 20:50:23 -07:00
std::move(*this));
if(! ws_.check_ok(ec))
2017-08-12 20:50:23 -07:00
goto upcall;
bytes_transferred_ +=
bytes_transferred - ws_.wr_fb_.size();
2017-08-12 20:50:23 -07:00
while(remain_ > 0)
{
2017-08-26 15:18:02 -07:00
cb_.consume(ws_.wr_buf_size_);
n = clamp(remain_, ws_.wr_buf_size_);
2017-08-12 20:50:23 -07:00
buffer_copy(buffer(
2017-08-26 15:18:02 -07:00
ws_.wr_buf_.get(), n), cb_);
2017-08-12 20:50:23 -07:00
detail::mask_inplace(buffer(
2017-08-26 15:18:02 -07:00
ws_.wr_buf_.get(), n), key_);
2017-08-12 20:50:23 -07:00
remain_ -= n;
// Send partial payload
BOOST_ASIO_CORO_YIELD
net::async_write(ws_.stream_,
2017-08-26 15:18:02 -07:00
buffer(ws_.wr_buf_.get(), n),
2017-08-12 20:50:23 -07:00
std::move(*this));
if(! ws_.check_ok(ec))
2017-08-12 20:50:23 -07:00
goto upcall;
bytes_transferred_ += bytes_transferred;
2017-08-12 20:50:23 -07:00
}
goto upcall;
}
2017-08-12 20:50:23 -07:00
//------------------------------------------------------------------
else if(how_ == do_mask_frag)
{
2017-08-12 20:50:23 -07:00
for(;;)
{
2017-08-26 15:18:02 -07:00
n = clamp(remain_, ws_.wr_buf_size_);
2017-08-12 20:50:23 -07:00
remain_ -= n;
fh_.len = n;
fh_.key = ws_.create_mask();
2017-08-12 20:50:23 -07:00
fh_.fin = fin_ ? remain_ == 0 : false;
detail::prepare_key(key_, fh_.key);
buffer_copy(buffer(
2017-08-26 15:18:02 -07:00
ws_.wr_buf_.get(), n), cb_);
2017-08-12 20:50:23 -07:00
detail::mask_inplace(buffer(
2017-08-26 15:18:02 -07:00
ws_.wr_buf_.get(), n), key_);
ws_.wr_fb_.clear();
2017-08-12 20:50:23 -07:00
detail::write<flat_static_buffer_base>(
2017-08-26 15:18:02 -07:00
ws_.wr_fb_, fh_);
ws_.wr_cont_ = ! fin_;
2017-08-12 20:50:23 -07:00
// Send frame
BOOST_ASIO_CORO_YIELD
net::async_write(ws_.stream_,
buffers_cat(ws_.wr_fb_.data(),
2017-08-26 15:18:02 -07:00
buffer(ws_.wr_buf_.get(), n)),
2017-08-12 20:50:23 -07:00
std::move(*this));
if(! ws_.check_ok(ec))
2017-08-12 20:50:23 -07:00
goto upcall;
n = bytes_transferred - ws_.wr_fb_.size();
bytes_transferred_ += n;
2017-08-12 20:50:23 -07:00
if(remain_ == 0)
break;
cb_.consume(n);
2017-08-12 20:50:23 -07:00
fh_.op = detail::opcode::cont;
// Allow outgoing control frames to
// be sent in between message frames:
ws_.wr_block_.unlock(this);
2017-08-26 15:18:02 -07:00
if( ws_.paused_close_.maybe_invoke() ||
ws_.paused_rd_.maybe_invoke() ||
ws_.paused_ping_.maybe_invoke())
2017-08-12 20:50:23 -07:00
{
BOOST_ASSERT(ws_.wr_block_.is_locked());
goto do_suspend;
2017-08-12 20:50:23 -07:00
}
ws_.wr_block_.lock(this);
2017-08-12 20:50:23 -07:00
}
goto upcall;
}
2017-08-12 20:50:23 -07:00
//------------------------------------------------------------------
else if(how_ == do_deflate)
{
2017-08-12 20:50:23 -07:00
for(;;)
{
2017-08-26 15:18:02 -07:00
b = buffer(ws_.wr_buf_.get(),
ws_.wr_buf_size_);
more_ = ws_.deflate(b, cb_, fin_, in_, ec);
if(! ws_.check_ok(ec))
2017-08-12 20:50:23 -07:00
goto upcall;
n = buffer_size(b);
if(n == 0)
{
// The input was consumed, but there
// is no output due to compression
// latency.
BOOST_ASSERT(! fin_);
BOOST_ASSERT(buffer_size(cb_) == 0);
goto upcall;
}
if(fh_.mask)
{
fh_.key = ws_.create_mask();
2017-08-12 20:50:23 -07:00
detail::prepared_key key;
detail::prepare_key(key, fh_.key);
detail::mask_inplace(b, key);
}
fh_.fin = ! more_;
fh_.len = n;
ws_.wr_fb_.clear();
2017-08-12 20:50:23 -07:00
detail::write<
2017-08-26 15:18:02 -07:00
flat_static_buffer_base>(ws_.wr_fb_, fh_);
ws_.wr_cont_ = ! fin_;
2017-08-12 20:50:23 -07:00
// Send frame
BOOST_ASIO_CORO_YIELD
net::async_write(ws_.stream_,
buffers_cat(ws_.wr_fb_.data(), b),
std::move(*this));
if(! ws_.check_ok(ec))
2017-08-12 20:50:23 -07:00
goto upcall;
bytes_transferred_ += in_;
2017-08-12 20:50:23 -07:00
if(more_)
{
fh_.op = detail::opcode::cont;
fh_.rsv1 = false;
// Allow outgoing control frames to
// be sent in between message frames:
ws_.wr_block_.unlock(this);
2017-08-26 15:18:02 -07:00
if( ws_.paused_close_.maybe_invoke() ||
ws_.paused_rd_.maybe_invoke() ||
ws_.paused_ping_.maybe_invoke())
2017-08-12 20:50:23 -07:00
{
BOOST_ASSERT(ws_.wr_block_.is_locked());
goto do_suspend;
2017-08-12 20:50:23 -07:00
}
ws_.wr_block_.lock(this);
2017-08-12 20:50:23 -07:00
}
else
{
if(fh_.fin)
ws_.do_context_takeover_write(ws_.role_);
2017-08-12 20:50:23 -07:00
goto upcall;
}
}
}
2017-08-12 20:50:23 -07:00
//--------------------------------------------------------------------------
2017-08-12 20:50:23 -07:00
upcall:
ws_.wr_block_.unlock(this);
2017-08-26 15:18:02 -07:00
ws_.paused_close_.maybe_invoke() ||
ws_.paused_rd_.maybe_invoke() ||
ws_.paused_ping_.maybe_invoke();
if(! cont_)
{
BOOST_ASIO_CORO_YIELD
net::post(
ws_.get_executor(),
beast::bind_front_handler(
std::move(*this),
ec, bytes_transferred_));
}
this->invoke(ec, bytes_transferred_);
}
}
2017-06-20 21:28:17 -07:00
//------------------------------------------------------------------------------
template<class NextLayer, bool deflateSupported>
template<class ConstBufferSequence>
std::size_t
stream<NextLayer, deflateSupported>::
write_some(bool fin, ConstBufferSequence const& buffers)
{
static_assert(is_sync_stream<next_layer_type>::value,
"SyncStream requirements not met");
static_assert(net::is_const_buffer_sequence<
ConstBufferSequence>::value,
"ConstBufferSequence requirements not met");
error_code ec;
auto const bytes_transferred =
write_some(fin, buffers, ec);
if(ec)
2017-05-22 15:30:12 -07:00
BOOST_THROW_EXCEPTION(system_error{ec});
return bytes_transferred;
}
template<class NextLayer, bool deflateSupported>
template<class ConstBufferSequence>
std::size_t
stream<NextLayer, deflateSupported>::
write_some(bool fin,
ConstBufferSequence const& buffers, error_code& ec)
{
static_assert(is_sync_stream<next_layer_type>::value,
"SyncStream requirements not met");
static_assert(net::is_const_buffer_sequence<
ConstBufferSequence>::value,
"ConstBufferSequence requirements not met");
2016-10-24 20:02:38 -04:00
using beast::detail::clamp;
using net::buffer;
using net::buffer_copy;
using net::buffer_size;
std::size_t bytes_transferred = 0;
ec.assign(0, ec.category());
2017-08-12 20:50:23 -07:00
// Make sure the stream is open
if(! check_open(ec))
return bytes_transferred;
detail::frame_header fh;
2017-08-26 15:18:02 -07:00
if(! wr_cont_)
{
2017-08-26 15:18:02 -07:00
begin_msg();
fh.rsv1 = wr_compress_;
}
else
{
fh.rsv1 = false;
}
fh.rsv2 = false;
fh.rsv3 = false;
2017-08-26 15:18:02 -07:00
fh.op = wr_cont_ ?
detail::opcode::cont : wr_opcode_;
2017-06-24 10:13:17 -07:00
fh.mask = role_ == role_type::client;
auto remain = buffer_size(buffers);
2017-08-26 15:18:02 -07:00
if(wr_compress_)
{
buffers_suffix<
ConstBufferSequence> cb{buffers};
for(;;)
{
auto b = buffer(
2017-08-26 15:18:02 -07:00
wr_buf_.get(), wr_buf_size_);
auto const more = this->deflate(
b, cb, fin, bytes_transferred, ec);
if(! check_ok(ec))
return bytes_transferred;
auto const n = buffer_size(b);
if(n == 0)
{
// The input was consumed, but there
// is no output due to compression
// latency.
BOOST_ASSERT(! fin);
BOOST_ASSERT(buffer_size(cb) == 0);
fh.fin = false;
break;
}
if(fh.mask)
{
fh.key = this->create_mask();
detail::prepared_key key;
detail::prepare_key(key, fh.key);
detail::mask_inplace(b, key);
}
fh.fin = ! more;
fh.len = n;
detail::fh_buffer fh_buf;
detail::write<
flat_static_buffer_base>(fh_buf, fh);
2017-08-26 15:18:02 -07:00
wr_cont_ = ! fin;
net::write(stream_,
buffers_cat(fh_buf.data(), b), ec);
if(! check_ok(ec))
return bytes_transferred;
if(! more)
break;
fh.op = detail::opcode::cont;
fh.rsv1 = false;
}
if(fh.fin)
this->do_context_takeover_write(role_);
}
else if(! fh.mask)
{
2017-08-26 15:18:02 -07:00
if(! wr_frag_)
{
// no mask, no autofrag
fh.fin = fin;
fh.len = remain;
detail::fh_buffer fh_buf;
detail::write<
flat_static_buffer_base>(fh_buf, fh);
2017-08-26 15:18:02 -07:00
wr_cont_ = ! fin;
net::write(stream_,
buffers_cat(fh_buf.data(), buffers), ec);
if(! check_ok(ec))
return bytes_transferred;
bytes_transferred += remain;
}
else
{
// no mask, autofrag
2017-08-26 15:18:02 -07:00
BOOST_ASSERT(wr_buf_size_ != 0);
buffers_suffix<
ConstBufferSequence> cb{buffers};
for(;;)
{
2017-08-26 15:18:02 -07:00
auto const n = clamp(remain, wr_buf_size_);
remain -= n;
fh.len = n;
fh.fin = fin ? remain == 0 : false;
detail::fh_buffer fh_buf;
detail::write<
flat_static_buffer_base>(fh_buf, fh);
2017-08-26 15:18:02 -07:00
wr_cont_ = ! fin;
net::write(stream_,
buffers_cat(fh_buf.data(),
buffers_prefix(n, cb)), ec);
if(! check_ok(ec))
return bytes_transferred;
bytes_transferred += n;
if(remain == 0)
break;
fh.op = detail::opcode::cont;
cb.consume(n);
}
}
}
else if(! wr_frag_)
{
// mask, no autofrag
fh.fin = fin;
fh.len = remain;
fh.key = this->create_mask();
detail::prepared_key key;
detail::prepare_key(key, fh.key);
detail::fh_buffer fh_buf;
detail::write<
flat_static_buffer_base>(fh_buf, fh);
buffers_suffix<
ConstBufferSequence> cb{buffers};
{
2017-08-26 15:18:02 -07:00
auto const n = clamp(remain, wr_buf_size_);
auto const b = buffer(wr_buf_.get(), n);
buffer_copy(b, cb);
cb.consume(n);
remain -= n;
detail::mask_inplace(b, key);
2017-08-26 15:18:02 -07:00
wr_cont_ = ! fin;
net::write(stream_,
buffers_cat(fh_buf.data(), b), ec);
if(! check_ok(ec))
return bytes_transferred;
bytes_transferred += n;
}
while(remain > 0)
{
2017-08-26 15:18:02 -07:00
auto const n = clamp(remain, wr_buf_size_);
auto const b = buffer(wr_buf_.get(), n);
buffer_copy(b, cb);
cb.consume(n);
remain -= n;
detail::mask_inplace(b, key);
net::write(stream_, b, ec);
if(! check_ok(ec))
return bytes_transferred;
bytes_transferred += n;
}
}
else
{
// mask, autofrag
2017-08-26 15:18:02 -07:00
BOOST_ASSERT(wr_buf_size_ != 0);
buffers_suffix<
ConstBufferSequence> cb{buffers};
for(;;)
{
fh.key = this->create_mask();
detail::prepared_key key;
detail::prepare_key(key, fh.key);
2017-08-26 15:18:02 -07:00
auto const n = clamp(remain, wr_buf_size_);
auto const b = buffer(wr_buf_.get(), n);
buffer_copy(b, cb);
detail::mask_inplace(b, key);
fh.len = n;
remain -= n;
fh.fin = fin ? remain == 0 : false;
2017-08-26 15:18:02 -07:00
wr_cont_ = ! fh.fin;
detail::fh_buffer fh_buf;
detail::write<
flat_static_buffer_base>(fh_buf, fh);
net::write(stream_,
buffers_cat(fh_buf.data(), b), ec);
if(! check_ok(ec))
return bytes_transferred;
bytes_transferred += n;
if(remain == 0)
break;
fh.op = detail::opcode::cont;
cb.consume(n);
}
}
return bytes_transferred;
}
template<class NextLayer, bool deflateSupported>
template<class ConstBufferSequence, class WriteHandler>
BOOST_ASIO_INITFN_RESULT_TYPE(
WriteHandler, void(error_code, std::size_t))
stream<NextLayer, deflateSupported>::
async_write_some(bool fin,
2017-06-20 21:28:17 -07:00
ConstBufferSequence const& bs, WriteHandler&& handler)
{
static_assert(is_async_stream<next_layer_type>::value,
"AsyncStream requirements not met");
static_assert(net::is_const_buffer_sequence<
ConstBufferSequence>::value,
"ConstBufferSequence requirements not met");
BOOST_BEAST_HANDLER_INIT(
WriteHandler, void(error_code, std::size_t));
write_some_op<ConstBufferSequence, BOOST_ASIO_HANDLER_TYPE(
WriteHandler, void(error_code, std::size_t))>{
std::move(init.completion_handler), *this, fin, bs}(
{}, 0, false);
return init.result.get();
}
2017-06-20 21:28:17 -07:00
//------------------------------------------------------------------------------
template<class NextLayer, bool deflateSupported>
template<class ConstBufferSequence>
std::size_t
stream<NextLayer, deflateSupported>::
write(ConstBufferSequence const& buffers)
{
static_assert(is_sync_stream<next_layer_type>::value,
"SyncStream requirements not met");
static_assert(net::is_const_buffer_sequence<
ConstBufferSequence>::value,
"ConstBufferSequence requirements not met");
error_code ec;
auto const bytes_transferred = write(buffers, ec);
if(ec)
2017-05-22 15:30:12 -07:00
BOOST_THROW_EXCEPTION(system_error{ec});
return bytes_transferred;
}
template<class NextLayer, bool deflateSupported>
template<class ConstBufferSequence>
std::size_t
stream<NextLayer, deflateSupported>::
write(ConstBufferSequence const& buffers, error_code& ec)
{
static_assert(is_sync_stream<next_layer_type>::value,
"SyncStream requirements not met");
static_assert(net::is_const_buffer_sequence<
ConstBufferSequence>::value,
"ConstBufferSequence requirements not met");
return write_some(true, buffers, ec);
}
template<class NextLayer, bool deflateSupported>
2017-06-20 21:28:17 -07:00
template<class ConstBufferSequence, class WriteHandler>
BOOST_ASIO_INITFN_RESULT_TYPE(
WriteHandler, void(error_code, std::size_t))
stream<NextLayer, deflateSupported>::
2017-06-20 21:28:17 -07:00
async_write(
ConstBufferSequence const& bs, WriteHandler&& handler)
{
static_assert(is_async_stream<next_layer_type>::value,
"AsyncStream requirements not met");
static_assert(net::is_const_buffer_sequence<
2017-06-20 21:28:17 -07:00
ConstBufferSequence>::value,
"ConstBufferSequence requirements not met");
BOOST_BEAST_HANDLER_INIT(
WriteHandler, void(error_code, std::size_t));
write_some_op<ConstBufferSequence, BOOST_ASIO_HANDLER_TYPE(
WriteHandler, void(error_code, std::size_t))>{
std::move(init.completion_handler), *this, true, bs}(
{}, 0, false);
2017-06-20 21:28:17 -07:00
return init.result.get();
}
} // websocket
} // beast
2017-07-20 13:40:34 -07:00
} // boost
#endif