Files
boost_beast/include/boost/beast/websocket/impl/close.hpp

405 lines
13 KiB
C++
Raw Normal View History

2017-07-20 08:01:46 -07:00
//
2017-07-24 09:42:36 -07:00
// Copyright (c) 2016-2017 Vinnie Falco (vinnie dot falco at gmail dot com)
2017-07-20 08:01:46 -07:00
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
2017-07-20 13:40:34 -07:00
// Official repository: https://github.com/boostorg/beast
//
2017-07-20 08:01:46 -07:00
#ifndef BOOST_BEAST_WEBSOCKET_IMPL_CLOSE_HPP
#define BOOST_BEAST_WEBSOCKET_IMPL_CLOSE_HPP
2017-07-20 08:01:46 -07:00
#include <boost/beast/websocket/teardown.hpp>
#include <boost/beast/websocket/detail/mask.hpp>
2019-02-13 08:00:07 -08:00
#include <boost/beast/websocket/impl/stream_impl.hpp>
#include <boost/beast/core/async_op_base.hpp>
2017-07-20 13:40:34 -07:00
#include <boost/beast/core/flat_static_buffer.hpp>
2019-02-04 21:52:54 -08:00
#include <boost/beast/core/stream_traits.hpp>
2019-01-20 12:25:30 -08:00
#include <boost/beast/core/detail/bind_continuation.hpp>
2017-08-13 09:45:49 -07:00
#include <boost/asio/coroutine.hpp>
#include <boost/asio/post.hpp>
2017-05-22 15:30:12 -07:00
#include <boost/throw_exception.hpp>
2017-07-20 08:01:46 -07:00
#include <memory>
2017-07-20 13:40:34 -07:00
namespace boost {
2017-07-20 08:01:46 -07:00
namespace beast {
namespace websocket {
2017-08-13 09:45:49 -07:00
/* Close the WebSocket Connection
This composed operation sends the close frame if it hasn't already
been sent, then reads and discards frames until receiving a close
frame. Finally it invokes the teardown operation to shut down the
underlying connection.
*/
template<class NextLayer, bool deflateSupported>
2017-07-20 08:01:46 -07:00
template<class Handler>
class stream<NextLayer, deflateSupported>::close_op
: public beast::stable_async_op_base<
2019-02-04 21:52:54 -08:00
Handler, beast::executor_type<stream>>
, public net::coroutine
2017-07-20 08:01:46 -07:00
{
2019-02-13 08:00:07 -08:00
boost::weak_ptr<impl_type> wp_;
2019-01-20 12:25:30 -08:00
error_code ev_;
detail::frame_buffer& fb_;
2017-07-20 08:01:46 -07:00
public:
2019-02-13 08:00:07 -08:00
static constexpr int id = 5; // for soft_mutex
2018-02-18 18:46:12 -08:00
template<class Handler_>
2017-08-13 09:45:49 -07:00
close_op(
Handler_&& h,
2019-02-13 08:00:07 -08:00
boost::shared_ptr<impl_type> const& sp,
2017-08-13 09:45:49 -07:00
close_reason const& cr)
2019-02-13 08:00:07 -08:00
: stable_async_op_base<Handler,
beast::executor_type<stream>>(
std::forward<Handler_>(h),
sp->stream.get_executor())
, wp_(sp)
, fb_(beast::allocate_stable<
detail::frame_buffer>(*this))
2017-07-20 08:01:46 -07:00
{
2019-01-20 12:25:30 -08:00
// Serialize the close frame
2019-02-13 08:00:07 -08:00
sp->template write_close<
2019-01-20 12:25:30 -08:00
flat_static_buffer_base>(fb_, cr);
(*this)({}, 0, false);
2017-07-20 08:01:46 -07:00
}
void
operator()(
error_code ec = {},
std::size_t bytes_transferred = 0,
bool cont = true)
2017-07-20 08:01:46 -07:00
{
using beast::detail::clamp;
2019-02-13 08:00:07 -08:00
auto sp = wp_.lock();
if(! sp)
return this->invoke(cont,
net::error::operation_aborted);
auto& impl = *sp;
BOOST_ASIO_CORO_REENTER(*this)
2017-06-29 09:48:30 -07:00
{
2019-01-20 12:25:30 -08:00
// Acquire the write lock
if(! impl.wr_block.try_lock(this))
{
BOOST_ASIO_CORO_YIELD
2019-02-13 08:00:07 -08:00
impl.op_close.emplace(std::move(*this));
2019-01-20 12:25:30 -08:00
impl.wr_block.lock(this);
BOOST_ASIO_CORO_YIELD
2019-01-20 12:25:30 -08:00
net::post(std::move(*this));
BOOST_ASSERT(impl.wr_block.is_locked(this));
}
2019-01-20 12:25:30 -08:00
if(impl.check_stop_now(ec))
goto upcall;
// Can't call close twice
2019-01-20 12:25:30 -08:00
// TODO return a custom error code
BOOST_ASSERT(! impl.wr_close);
// Send close frame
2019-01-20 12:25:30 -08:00
impl.wr_close = true;
impl.change_status(status::closing);
2019-02-13 08:00:07 -08:00
impl.update_timer(this->get_executor());
BOOST_ASIO_CORO_YIELD
2019-01-20 12:25:30 -08:00
net::async_write(impl.stream, fb_.data(),
beast::detail::bind_continuation(std::move(*this)));
if(impl.check_stop_now(ec))
goto upcall;
2019-01-20 12:25:30 -08:00
if(impl.rd_close)
{
// This happens when the read_op gets a close frame
// at the same time close_op is sending the close frame.
// The read_op will be suspended on the write block.
goto teardown;
}
2019-01-20 12:25:30 -08:00
// Acquire the read lock
if(! impl.rd_block.try_lock(this))
{
BOOST_ASIO_CORO_YIELD
2019-02-13 08:00:07 -08:00
impl.op_r_close.emplace(std::move(*this));
2019-01-20 12:25:30 -08:00
impl.rd_block.lock(this);
BOOST_ASIO_CORO_YIELD
2019-01-20 12:25:30 -08:00
net::post(std::move(*this));
BOOST_ASSERT(impl.rd_block.is_locked(this));
if(impl.check_stop_now(ec))
goto upcall;
2019-01-20 12:25:30 -08:00
BOOST_ASSERT(! impl.rd_close);
}
2019-01-20 12:25:30 -08:00
// Read until a receiving a close frame
// TODO There should be a timeout on this
if(impl.rd_remain > 0)
goto read_payload;
for(;;)
{
// Read frame header
2019-02-13 08:00:07 -08:00
while(! impl.parse_fh(
2019-01-20 12:25:30 -08:00
impl.rd_fh, impl.rd_buf, ev_))
{
2019-01-20 12:25:30 -08:00
if(ev_)
2017-08-17 22:38:50 -07:00
goto teardown;
BOOST_ASIO_CORO_YIELD
2019-01-20 12:25:30 -08:00
impl.stream.async_read_some(
impl.rd_buf.prepare(read_size(
impl.rd_buf, impl.rd_buf.max_size())),
beast::detail::bind_continuation(std::move(*this)));
impl.rd_buf.commit(bytes_transferred);
if(impl.check_stop_now(ec))
goto upcall;
}
2019-01-20 12:25:30 -08:00
if(detail::is_control(impl.rd_fh.op))
{
2019-01-20 12:25:30 -08:00
// Discard ping or pong frame
if(impl.rd_fh.op != detail::opcode::close)
{
2019-01-20 12:25:30 -08:00
impl.rd_buf.consume(clamp(impl.rd_fh.len));
continue;
}
2019-01-20 12:25:30 -08:00
// Process close frame
// TODO Should we invoke the control callback?
BOOST_ASSERT(! impl.rd_close);
impl.rd_close = true;
auto const mb = buffers_prefix(
clamp(impl.rd_fh.len),
impl.rd_buf.data());
if(impl.rd_fh.len > 0 && impl.rd_fh.mask)
detail::mask_inplace(mb, impl.rd_key);
detail::read_close(impl.cr, mb, ev_);
if(ev_)
goto teardown;
impl.rd_buf.consume(clamp(impl.rd_fh.len));
goto teardown;
}
2019-01-20 12:25:30 -08:00
read_payload:
// Discard message frame
while(impl.rd_buf.size() < impl.rd_remain)
{
2019-01-20 12:25:30 -08:00
impl.rd_remain -= impl.rd_buf.size();
impl.rd_buf.consume(impl.rd_buf.size());
BOOST_ASIO_CORO_YIELD
impl.stream.async_read_some(
impl.rd_buf.prepare(read_size(
impl.rd_buf, impl.rd_buf.max_size())),
beast::detail::bind_continuation(std::move(*this)));
impl.rd_buf.commit(bytes_transferred);
if(impl.check_stop_now(ec))
goto upcall;
}
2019-01-20 12:25:30 -08:00
BOOST_ASSERT(impl.rd_buf.size() >= impl.rd_remain);
impl.rd_buf.consume(clamp(impl.rd_remain));
impl.rd_remain = 0;
}
teardown:
// Teardown
2019-01-20 12:25:30 -08:00
BOOST_ASSERT(impl.wr_block.is_locked(this));
using beast::websocket::async_teardown;
BOOST_ASIO_CORO_YIELD
2019-01-20 12:25:30 -08:00
async_teardown(impl.role, impl.stream,
beast::detail::bind_continuation(std::move(*this)));
BOOST_ASSERT(impl.wr_block.is_locked(this));
if(ec == net::error::eof)
{
// Rationale:
// http://stackoverflow.com/questions/25587403/boost-asio-ssl-async-shutdown-always-finishes-with-an-error
ec = {};
}
if(! ec)
2019-01-20 12:25:30 -08:00
ec = ev_;
if(ec)
2019-01-20 12:25:30 -08:00
impl.change_status(status::failed);
else
2019-01-20 12:25:30 -08:00
impl.change_status(status::closed);
impl.close();
upcall:
2019-01-20 12:25:30 -08:00
impl.wr_block.unlock(this);
impl.rd_block.try_unlock(this)
2019-02-13 08:00:07 -08:00
&& impl.op_r_rd.maybe_invoke();
impl.op_rd.maybe_invoke()
|| impl.op_idle_ping.maybe_invoke()
|| impl.op_ping.maybe_invoke()
|| impl.op_wr.maybe_invoke();
2019-01-20 12:25:30 -08:00
this->invoke(cont, ec);
}
2017-07-20 08:01:46 -07:00
}
};
2017-07-20 08:01:46 -07:00
2019-02-20 20:25:01 -08:00
template<class NextLayer, bool deflateSupported>
struct stream<NextLayer, deflateSupported>::
run_close_op
{
template<class CloseHandler>
void
operator()(
CloseHandler&& h,
boost::shared_ptr<impl_type> const& sp,
close_reason const& cr)
{
// If you get an error on the following line it means
// that your handler does not meet the documented type
// requirements for the handler.
static_assert(
beast::detail::is_invocable<CloseHandler,
void(error_code)>::value,
"CloseHandler type requirements not met");
close_op<
typename std::decay<CloseHandler>::type>(
std::forward<CloseHandler>(h),
sp,
cr);
}
};
//------------------------------------------------------------------------------
template<class NextLayer, bool deflateSupported>
void
stream<NextLayer, deflateSupported>::
close(close_reason const& cr)
{
static_assert(is_sync_stream<next_layer_type>::value,
2019-02-20 19:19:59 -08:00
"SyncStream type requirements not met");
error_code ec;
close(cr, ec);
if(ec)
2017-05-22 15:30:12 -07:00
BOOST_THROW_EXCEPTION(system_error{ec});
}
template<class NextLayer, bool deflateSupported>
void
stream<NextLayer, deflateSupported>::
close(close_reason const& cr, error_code& ec)
{
static_assert(is_sync_stream<next_layer_type>::value,
2019-02-20 19:19:59 -08:00
"SyncStream type requirements not met");
using beast::detail::clamp;
2019-01-20 12:25:30 -08:00
auto& impl = *impl_;
ec = {};
2019-01-20 12:25:30 -08:00
if(impl.check_stop_now(ec))
2017-08-13 09:45:49 -07:00
return;
2019-01-20 12:25:30 -08:00
BOOST_ASSERT(! impl.rd_close);
// Can't call close twice
// TODO return a custom error code
BOOST_ASSERT(! impl.wr_close);
// Send close frame
{
2019-01-20 12:25:30 -08:00
impl.wr_close = true;
impl.change_status(status::closing);
detail::frame_buffer fb;
2019-02-13 08:00:07 -08:00
impl.template write_close<flat_static_buffer_base>(fb, cr);
2019-01-20 12:25:30 -08:00
net::write(impl.stream, fb.data(), ec);
if(impl.check_stop_now(ec))
return;
}
2019-01-20 12:25:30 -08:00
// Read until a receiving a close frame
error_code ev;
if(impl.rd_remain > 0)
goto read_payload;
for(;;)
{
// Read frame header
2019-02-13 08:00:07 -08:00
while(! impl.parse_fh(
2019-01-20 12:25:30 -08:00
impl.rd_fh, impl.rd_buf, ev))
{
2019-01-20 12:25:30 -08:00
if(ev)
{
// Protocol violation
return do_fail(close_code::none, ev, ec);
}
impl.rd_buf.commit(impl.stream.read_some(
impl.rd_buf.prepare(read_size(
impl.rd_buf, impl.rd_buf.max_size())), ec));
if(impl.check_stop_now(ec))
return;
}
2019-01-20 12:25:30 -08:00
if(detail::is_control(impl.rd_fh.op))
{
2019-01-20 12:25:30 -08:00
// Discard ping/pong frame
if(impl.rd_fh.op != detail::opcode::close)
{
2019-01-20 12:25:30 -08:00
impl.rd_buf.consume(clamp(impl.rd_fh.len));
continue;
}
2019-01-20 12:25:30 -08:00
// Handle close frame
// TODO Should we invoke the control callback?
BOOST_ASSERT(! impl.rd_close);
impl.rd_close = true;
auto const mb = buffers_prefix(
clamp(impl.rd_fh.len),
impl.rd_buf.data());
if(impl.rd_fh.len > 0 && impl.rd_fh.mask)
detail::mask_inplace(mb, impl.rd_key);
detail::read_close(impl.cr, mb, ev);
if(ev)
{
2019-01-20 12:25:30 -08:00
// Protocol violation
return do_fail(close_code::none, ev, ec);
}
2019-01-20 12:25:30 -08:00
impl.rd_buf.consume(clamp(impl.rd_fh.len));
break;
}
read_payload:
// Discard message frame
while(impl.rd_buf.size() < impl.rd_remain)
{
impl.rd_remain -= impl.rd_buf.size();
impl.rd_buf.consume(impl.rd_buf.size());
impl.rd_buf.commit(
impl.stream.read_some(
impl.rd_buf.prepare(
read_size(
impl.rd_buf,
impl.rd_buf.max_size())),
ec));
if(impl.check_stop_now(ec))
return;
}
2019-01-20 12:25:30 -08:00
BOOST_ASSERT(
impl.rd_buf.size() >= impl.rd_remain);
impl.rd_buf.consume(clamp(impl.rd_remain));
impl.rd_remain = 0;
}
// _Close the WebSocket Connection_
do_fail(close_code::none, error::closed, ec);
if(ec == error::closed)
ec = {};
}
template<class NextLayer, bool deflateSupported>
template<class CloseHandler>
BOOST_ASIO_INITFN_RESULT_TYPE(
CloseHandler, void(error_code))
stream<NextLayer, deflateSupported>::
async_close(close_reason const& cr, CloseHandler&& handler)
{
static_assert(is_async_stream<next_layer_type>::value,
2019-02-20 19:19:59 -08:00
"AsyncStream type requirements not met");
2019-02-20 20:25:01 -08:00
return net::async_initiate<
CloseHandler,
void(error_code)>(
run_close_op{},
handler,
impl_,
cr);
}
2017-07-20 08:01:46 -07:00
} // websocket
} // beast
2017-07-20 13:40:34 -07:00
} // boost
2017-07-20 08:01:46 -07:00
#endif