rpc: keep track of acceptors, and cancel them in StopRPCThreads

Fixes #4156.

The problem is that the boost::asio::io_service destructor
waits for the acceptors to finish (on windows, and boost 1.55).

Fix this by keeping track of the acceptors and cancelling them before
stopping the event loops.
This commit is contained in:
Wladimir J. van der Laan 2014-05-09 10:01:50 +02:00
parent 381b25dfde
commit cef44941e7

View file

@ -39,6 +39,7 @@ static ssl::context* rpc_ssl_context = NULL;
static boost::thread_group* rpc_worker_group = NULL; static boost::thread_group* rpc_worker_group = NULL;
static boost::asio::io_service::work *rpc_dummy_work = NULL; static boost::asio::io_service::work *rpc_dummy_work = NULL;
static std::vector<CSubNet> rpc_allow_subnets; //!< List of subnets to allow RPC connections from static std::vector<CSubNet> rpc_allow_subnets; //!< List of subnets to allow RPC connections from
static std::vector< boost::shared_ptr<ip::tcp::acceptor> > rpc_acceptors;
void RPCTypeCheck(const Array& params, void RPCTypeCheck(const Array& params,
const list<Value_type>& typesExpected, const list<Value_type>& typesExpected,
@ -593,12 +594,13 @@ void StartRPCThreads()
asio::ip::address bindAddress = loopback ? asio::ip::address_v6::loopback() : asio::ip::address_v6::any(); asio::ip::address bindAddress = loopback ? asio::ip::address_v6::loopback() : asio::ip::address_v6::any();
ip::tcp::endpoint endpoint(bindAddress, GetArg("-rpcport", Params().RPCPort())); ip::tcp::endpoint endpoint(bindAddress, GetArg("-rpcport", Params().RPCPort()));
boost::system::error_code v6_only_error; boost::system::error_code v6_only_error;
boost::shared_ptr<ip::tcp::acceptor> acceptor(new ip::tcp::acceptor(*rpc_io_service));
bool fListening = false; bool fListening = false;
std::string strerr; std::string strerr;
try try
{ {
boost::shared_ptr<ip::tcp::acceptor> acceptor(new ip::tcp::acceptor(*rpc_io_service));
rpc_acceptors.push_back(acceptor);
acceptor->open(endpoint.protocol()); acceptor->open(endpoint.protocol());
acceptor->set_option(boost::asio::ip::tcp::acceptor::reuse_address(true)); acceptor->set_option(boost::asio::ip::tcp::acceptor::reuse_address(true));
@ -616,7 +618,6 @@ void StartRPCThreads()
{ {
strerr = strprintf(_("An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s"), endpoint.port(), e.what()); strerr = strprintf(_("An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s"), endpoint.port(), e.what());
} }
try { try {
// If dual IPv6/IPv4 failed (or we're opening loopback interfaces only), open IPv4 separately // If dual IPv6/IPv4 failed (or we're opening loopback interfaces only), open IPv4 separately
if (!fListening || loopback || v6_only_error) if (!fListening || loopback || v6_only_error)
@ -624,7 +625,8 @@ void StartRPCThreads()
bindAddress = loopback ? asio::ip::address_v4::loopback() : asio::ip::address_v4::any(); bindAddress = loopback ? asio::ip::address_v4::loopback() : asio::ip::address_v4::any();
endpoint.address(bindAddress); endpoint.address(bindAddress);
acceptor.reset(new ip::tcp::acceptor(*rpc_io_service)); boost::shared_ptr<ip::tcp::acceptor> acceptor(new ip::tcp::acceptor(*rpc_io_service));
rpc_acceptors.push_back(acceptor);
acceptor->open(endpoint.protocol()); acceptor->open(endpoint.protocol());
acceptor->set_option(boost::asio::ip::tcp::acceptor::reuse_address(true)); acceptor->set_option(boost::asio::ip::tcp::acceptor::reuse_address(true));
acceptor->bind(endpoint); acceptor->bind(endpoint);
@ -668,7 +670,16 @@ void StopRPCThreads()
{ {
if (rpc_io_service == NULL) return; if (rpc_io_service == NULL) return;
// First, cancel all timers and acceptors
// This is not done automatically by ->stop(), and in some cases the destructor of
// asio::io_service can hang if this is skipped.
BOOST_FOREACH(const boost::shared_ptr<ip::tcp::acceptor> &acceptor, rpc_acceptors)
acceptor->cancel();
rpc_acceptors.clear();
BOOST_FOREACH(const PAIRTYPE(std::string, boost::shared_ptr<deadline_timer>) &timer, deadlineTimers)
timer.second->cancel();
deadlineTimers.clear(); deadlineTimers.clear();
rpc_io_service->stop(); rpc_io_service->stop();
if (rpc_worker_group != NULL) if (rpc_worker_group != NULL)
rpc_worker_group->join_all(); rpc_worker_group->join_all();