// // Copyright (c) 2013-2016 Vinnie Falco (vinnie dot falco at gmail dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef BEAST_WEBSOCKET_IMPL_WRITE_FRAME_OP_HPP #define BEAST_WEBSOCKET_IMPL_WRITE_FRAME_OP_HPP #include #include #include #include #include #include #include #include #include namespace beast { namespace websocket { // write a frame // template template class stream::write_frame_op { using alloc_type = handler_alloc; struct data : op { stream& ws; consuming_buffers cb; Handler h; detail::frame_header fh; detail::fh_streambuf fh_buf; detail::prepared_key_type key; void* tmp; std::size_t tmp_size; std::uint64_t remain; bool cont; int state = 0; template data(DeducedHandler&& h_, stream& ws_, bool fin, Buffers const& bs) : ws(ws_) , cb(bs) , h(std::forward(h_)) , cont(boost_asio_handler_cont_helpers:: is_continuation(h)) { fh.op = ws.wr_cont_ ? opcode::cont : ws.wr_opcode_; ws.wr_cont_ = ! fin; fh.fin = fin; fh.rsv1 = false; fh.rsv2 = false; fh.rsv3 = false; fh.len = boost::asio::buffer_size(cb); fh.mask = ws.role_ == detail::role_type::client; if(fh.mask) { fh.key = ws.maskgen_(); detail::prepare_key(key, fh.key); tmp_size = detail::clamp( fh.len, ws.mask_buf_size_); tmp = boost_asio_handler_alloc_helpers:: allocate(tmp_size, h); remain = fh.len; } else { tmp = nullptr; } detail::write(fh_buf, fh); } ~data() { if(tmp) boost_asio_handler_alloc_helpers:: deallocate(tmp, tmp_size, h); } }; std::shared_ptr d_; public: write_frame_op(write_frame_op&&) = default; write_frame_op(write_frame_op const&) = default; template write_frame_op(DeducedHandler&& h, stream& ws, Args&&... args) : d_(std::make_shared( std::forward(h), ws, std::forward(args)...)) { (*this)(error_code{}, false); } void operator()() { (*this)(error_code{}); } void operator()(error_code ec, std::size_t); void operator()(error_code ec, bool again = true); friend void* asio_handler_allocate( std::size_t size, write_frame_op* op) { return boost_asio_handler_alloc_helpers:: allocate(size, op->d_->h); } friend void asio_handler_deallocate( void* p, std::size_t size, write_frame_op* op) { return boost_asio_handler_alloc_helpers:: deallocate(p, size, op->d_->h); } friend bool asio_handler_is_continuation(write_frame_op* op) { return op->d_->cont; } template friend void asio_handler_invoke(Function&& f, write_frame_op* op) { return boost_asio_handler_invoke_helpers:: invoke(f, op->d_->h); } }; template template void stream:: write_frame_op:: operator()(error_code ec, std::size_t) { auto& d = *d_; if(ec) d.ws.failed_ = true; (*this)(ec); } template template void stream:: write_frame_op:: operator()(error_code ec, bool again) { using boost::asio::buffer_copy; using boost::asio::mutable_buffers_1; auto& d = *d_; d.cont = d.cont || again; if(ec) goto upcall; for(;;) { switch(d.state) { case 0: if(d.ws.wr_block_) { // suspend d.state = 3; d.ws.wr_op_.template emplace< write_frame_op>(std::move(*this)); return; } if(d.ws.failed_ || d.ws.wr_close_) { // call handler d.state = 99; d.ws.get_io_service().post( bind_handler(std::move(*this), boost::asio::error::operation_aborted)); return; } // fall through case 1: { if(! d.fh.mask) { // send header and entire payload d.state = 99; assert(! d.ws.wr_block_); d.ws.wr_block_ = &d; boost::asio::async_write(d.ws.stream_, buffer_cat(d.fh_buf.data(), d.cb), std::move(*this)); return; } auto const n = detail::clamp(d.remain, d.tmp_size); mutable_buffers_1 mb{d.tmp, n}; buffer_copy(mb, d.cb); d.cb.consume(n); d.remain -= n; detail::mask_inplace(mb, d.key); // send header and payload d.state = d.remain > 0 ? 2 : 99; assert(! d.ws.wr_block_); d.ws.wr_block_ = &d; boost::asio::async_write(d.ws.stream_, buffer_cat(d.fh_buf.data(), mb), std::move(*this)); return; } // sent masked payload case 2: { auto const n = detail::clamp(d.remain, d.tmp_size); mutable_buffers_1 mb{d.tmp, static_cast(n)}; buffer_copy(mb, d.cb); d.cb.consume(n); d.remain -= n; detail::mask_inplace(mb, d.key); // send payload if(d.remain == 0) d.state = 99; assert(d.ws.wr_block_ == &d); boost::asio::async_write( d.ws.stream_, mb, std::move(*this)); return; } case 3: d.state = 4; d.ws.get_io_service().post(bind_handler( std::move(*this), ec)); return; case 4: if(d.ws.failed_ || d.ws.wr_close_) { // call handler ec = boost::asio::error::operation_aborted; goto upcall; } d.state = 1; break; case 99: goto upcall; } } upcall: if(d.tmp) { boost_asio_handler_alloc_helpers:: deallocate(d.tmp, d.tmp_size, d.h); d.tmp = nullptr; } if(d.ws.wr_block_ == &d) d.ws.wr_block_ = nullptr; d.ws.rd_op_.maybe_invoke(); d.h(ec); } } // websocket } // beast #endif