diff --git a/configure.ac b/configure.ac index 7d0ebbc5bb802..b1e8b7e11ef40 100644 --- a/configure.ac +++ b/configure.ac @@ -1171,13 +1171,6 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include [ AC_MSG_RESULT(no)] ) -AC_MSG_CHECKING(for getentropy) -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], - [[ getentropy(nullptr, 32) ]])], - [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_GETENTROPY, 1,[Define this symbol if the BSD getentropy system call is available]) ], - [ AC_MSG_RESULT(no)] -) - AC_MSG_CHECKING(for getentropy via random.h) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include #include ]], diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 471c38be44ac8..92e2f086ffe9a 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -77,6 +77,7 @@ BITCOIN_TESTS =\ test/addrman_tests.cpp \ test/amount_tests.cpp \ test/allocator_tests.cpp \ + test/banman_tests.cpp \ test/base32_tests.cpp \ test/base58_tests.cpp \ test/base64_tests.cpp \ diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp index 1c2d4c71b47b6..83b9d7c0b9e1d 100644 --- a/src/bitcoin-cli.cpp +++ b/src/bitcoin-cli.cpp @@ -52,6 +52,7 @@ static constexpr int DEFAULT_WAIT_CLIENT_TIMEOUT = 0; static const bool DEFAULT_NAMED=false; static const int CONTINUE_EXECUTION=-1; static constexpr int8_t UNKNOWN_NETWORK{-1}; +static constexpr std::array NETWORKS{"ipv4", "ipv6", "onion", "i2p", "cjdns"}; /** Default number of blocks to generate for RPC generatetoaddress. */ static const std::string DEFAULT_NBLOCKS = "1"; @@ -259,11 +260,10 @@ class BaseRequestHandler class AddrinfoRequestHandler : public BaseRequestHandler { private: - static constexpr std::array m_networks{"ipv4", "ipv6", "onion", "i2p"}; int8_t NetworkStringToId(const std::string& str) const { - for (size_t i = 0; i < m_networks.size(); ++i) { - if (str == m_networks.at(i)) return i; + for (size_t i = 0; i < NETWORKS.size(); ++i) { + if (str == NETWORKS[i]) return i; } return UNKNOWN_NETWORK; } @@ -286,7 +286,7 @@ class AddrinfoRequestHandler : public BaseRequestHandler throw std::runtime_error("-addrinfo requires dashd server to be running v21.0 and up"); } // Count the number of peers known to our node, by network. - std::array counts{{}}; + std::array counts{{}}; for (const UniValue& node : nodes) { std::string network_name{node["network"].get_str()}; const int8_t network_id{NetworkStringToId(network_name)}; @@ -296,8 +296,8 @@ class AddrinfoRequestHandler : public BaseRequestHandler // Prepare result to return to user. UniValue result{UniValue::VOBJ}, addresses{UniValue::VOBJ}; uint64_t total{0}; // Total address count - for (size_t i = 0; i < m_networks.size(); ++i) { - addresses.pushKV(m_networks.at(i), counts.at(i)); + for (size_t i = 0; i < NETWORKS.size(); ++i) { + addresses.pushKV(NETWORKS[i], counts.at(i)); total += counts.at(i); } addresses.pushKV("total", total); @@ -384,14 +384,13 @@ class NetinfoRequestHandler : public BaseRequestHandler { private: static constexpr uint8_t MAX_DETAIL_LEVEL{4}; - static constexpr std::array m_networks{"ipv4", "ipv6", "onion", "i2p"}; - std::array, 3> m_counts{{{}}}; //!< Peer counts by (in/out/total, networks/total) + std::array, 3> m_counts{{{}}}; //!< Peer counts by (in/out/total, networks/total) uint8_t m_block_relay_peers_count{0}; uint8_t m_manual_peers_count{0}; int8_t NetworkStringToId(const std::string& str) const { - for (size_t i = 0; i < m_networks.size(); ++i) { - if (str == m_networks.at(i)) return i; + for (size_t i = 0; i < NETWORKS.size(); ++i) { + if (str == NETWORKS[i]) return i; } return UNKNOWN_NETWORK; } @@ -493,9 +492,9 @@ class NetinfoRequestHandler : public BaseRequestHandler const bool is_block_relay{peer["relaytxes"].isNull() ? false : !peer["relaytxes"].get_bool()}; const std::string conn_type{peer["connection_type"].get_str()}; ++m_counts.at(is_outbound).at(network_id); // in/out by network - ++m_counts.at(is_outbound).at(m_networks.size()); // in/out overall + ++m_counts.at(is_outbound).at(NETWORKS.size()); // in/out overall ++m_counts.at(2).at(network_id); // total by network - ++m_counts.at(2).at(m_networks.size()); // total overall + ++m_counts.at(2).at(NETWORKS.size()); // total overall if (is_block_relay) ++m_block_relay_peers_count; if (conn_type == "manual") ++m_manual_peers_count; if (DetailsRequested()) { @@ -592,7 +591,7 @@ class NetinfoRequestHandler : public BaseRequestHandler for (int8_t n : reachable_networks) { result += strprintf("%8i", m_counts.at(i).at(n)); // network peers count } - result += strprintf(" %5i", m_counts.at(i).at(m_networks.size())); // total peers count + result += strprintf(" %5i", m_counts.at(i).at(NETWORKS.size())); // total peers count if (i == 1) { // the outbound row has two extra columns for block relay and manual peer counts result += strprintf(" %5i", m_block_relay_peers_count); if (m_manual_peers_count) result += strprintf(" %5i", m_manual_peers_count); diff --git a/src/chain.h b/src/chain.h index b3622121de90f..7e1296153a12c 100644 --- a/src/chain.h +++ b/src/chain.h @@ -59,37 +59,40 @@ class CBlockFileInfo READWRITE(VARINT(obj.nTimeLast)); } - void SetNull() { - nBlocks = 0; - nSize = 0; - nUndoSize = 0; - nHeightFirst = 0; - nHeightLast = 0; - nTimeFirst = 0; - nTimeLast = 0; - } - - CBlockFileInfo() { - SetNull(); - } - - std::string ToString() const; - - /** update statistics (does not update nSize) */ - void AddBlock(unsigned int nHeightIn, uint64_t nTimeIn) { - if (nBlocks==0 || nHeightFirst > nHeightIn) - nHeightFirst = nHeightIn; - if (nBlocks==0 || nTimeFirst > nTimeIn) - nTimeFirst = nTimeIn; - nBlocks++; - if (nHeightIn > nHeightLast) - nHeightLast = nHeightIn; - if (nTimeIn > nTimeLast) - nTimeLast = nTimeIn; - } + void SetNull() + { + nBlocks = 0; + nSize = 0; + nUndoSize = 0; + nHeightFirst = 0; + nHeightLast = 0; + nTimeFirst = 0; + nTimeLast = 0; + } + + CBlockFileInfo() + { + SetNull(); + } + + std::string ToString() const; + + /** update statistics (does not update nSize) */ + void AddBlock(unsigned int nHeightIn, uint64_t nTimeIn) + { + if (nBlocks == 0 || nHeightFirst > nHeightIn) + nHeightFirst = nHeightIn; + if (nBlocks == 0 || nTimeFirst > nTimeIn) + nTimeFirst = nTimeIn; + nBlocks++; + if (nHeightIn > nHeightLast) + nHeightLast = nHeightIn; + if (nTimeIn > nTimeLast) + nTimeLast = nTimeIn; + } }; -enum BlockStatus: uint32_t { +enum BlockStatus : uint32_t { //! Unused. BLOCK_VALID_UNKNOWN = 0, @@ -226,7 +229,7 @@ class CBlockIndex FlatFilePos ret; if (nStatus & BLOCK_HAVE_DATA) { ret.nFile = nFile; - ret.nPos = nDataPos; + ret.nPos = nDataPos; } return ret; } @@ -237,7 +240,7 @@ class CBlockIndex FlatFilePos ret; if (nStatus & BLOCK_HAVE_UNDO) { ret.nFile = nFile; - ret.nPos = nUndoPos; + ret.nPos = nUndoPos; } return ret; } @@ -245,13 +248,13 @@ class CBlockIndex CBlockHeader GetBlockHeader() const { CBlockHeader block; - block.nVersion = nVersion; + block.nVersion = nVersion; if (pprev) block.hashPrevBlock = pprev->GetBlockHash(); block.hashMerkleRoot = hashMerkleRoot; - block.nTime = nTime; - block.nBits = nBits; - block.nNonce = nNonce; + block.nTime = nTime; + block.nBits = nBits; + block.nNonce = nNonce; return block; } @@ -293,7 +296,7 @@ class CBlockIndex *(--pbegin) = pindex->GetBlockTime(); std::sort(pbegin, pend); - return pbegin[(pend - pbegin)/2]; + return pbegin[(pend - pbegin) / 2]; } std::string ToString() const; @@ -360,12 +363,14 @@ class CDiskBlockIndex : public CBlockIndex uint256 hash; uint256 hashPrev; - CDiskBlockIndex() { + CDiskBlockIndex() + { hash = uint256(); hashPrev = uint256(); } - explicit CDiskBlockIndex(const CBlockIndex* pindex) : CBlockIndex(*pindex) { + explicit CDiskBlockIndex(const CBlockIndex* pindex) : CBlockIndex(*pindex) + { hash = (hash == uint256() ? pindex->GetBlockHash() : hash); hashPrev = (pprev ? pprev->GetBlockHash() : uint256()); } @@ -399,12 +404,12 @@ class CDiskBlockIndex : public CBlockIndex if(hash != uint256()) return hash; // should never really get here, keeping this as a fallback CBlockHeader block; - block.nVersion = nVersion; - block.hashPrevBlock = hashPrev; - block.hashMerkleRoot = hashMerkleRoot; - block.nTime = nTime; - block.nBits = nBits; - block.nNonce = nNonce; + block.nVersion = nVersion; + block.hashPrevBlock = hashPrev; + block.hashMerkleRoot = hashMerkleRoot; + block.nTime = nTime; + block.nBits = nBits; + block.nNonce = nNonce; return block.GetHash(); } @@ -413,35 +418,45 @@ class CDiskBlockIndex : public CBlockIndex }; /** An in-memory indexed chain of blocks. */ -class CChain { +class CChain +{ private: std::vector vChain; public: + CChain() = default; + CChain(const CChain&) = delete; + CChain& operator=(const CChain&) = delete; + /** Returns the index entry for the genesis block of this chain, or nullptr if none. */ - CBlockIndex *Genesis() const { + CBlockIndex* Genesis() const + { return vChain.size() > 0 ? vChain[0] : nullptr; } /** Returns the index entry for the tip of this chain, or nullptr if none. */ - CBlockIndex *Tip() const { + CBlockIndex* Tip() const + { return vChain.size() > 0 ? vChain[vChain.size() - 1] : nullptr; } /** Returns the index entry at a particular height in this chain, or nullptr if no such height exists. */ - CBlockIndex *operator[](int nHeight) const { + CBlockIndex* operator[](int nHeight) const + { if (nHeight < 0 || nHeight >= (int)vChain.size()) return nullptr; return vChain[nHeight]; } /** Efficiently check whether a block is present in this chain. */ - bool Contains(const CBlockIndex *pindex) const { + bool Contains(const CBlockIndex* pindex) const + { return (*this)[pindex->nHeight] == pindex; } /** Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip. */ - CBlockIndex *Next(const CBlockIndex *pindex) const { + CBlockIndex* Next(const CBlockIndex* pindex) const + { if (Contains(pindex)) return (*this)[pindex->nHeight + 1]; else @@ -449,18 +464,19 @@ class CChain { } /** Return the maximal height in the chain. Is equal to chain.Tip() ? chain.Tip()->nHeight : -1. */ - int Height() const { + int Height() const + { return vChain.size() - 1; } /** Set/initialize a chain with a given tip. */ - void SetTip(CBlockIndex *pindex); + void SetTip(CBlockIndex* pindex); /** Return a CBlockLocator that refers to a block in this chain (by default the tip). */ - CBlockLocator GetLocator(const CBlockIndex *pindex = nullptr) const; + CBlockLocator GetLocator(const CBlockIndex* pindex = nullptr) const; /** Find the last common block between this chain and a block index entry. */ - const CBlockIndex *FindFork(const CBlockIndex *pindex) const; + const CBlockIndex* FindFork(const CBlockIndex* pindex) const; /** Find the earliest block with timestamp equal or greater than the given time and height equal or greater than the given height. */ CBlockIndex* FindEarliestAtLeast(int64_t nTime, int height) const; diff --git a/src/context.h b/src/context.h index 70fc00cd95f7f..d19694930630b 100644 --- a/src/context.h +++ b/src/context.h @@ -8,6 +8,7 @@ #include #include +class ArgsManager; class ChainstateManager; class CTxMemPool; class CBlockPolicyEstimator; @@ -16,6 +17,7 @@ struct NodeContext; struct WalletContext; using CoreContext = std::variant, std::reference_wrapper, std::reference_wrapper, std::reference_wrapper, diff --git a/src/init.cpp b/src/init.cpp index 1bd565b7605f0..f633ac533fae3 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -2439,6 +2439,13 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) if (connect.size() != 1 || connect[0] != "0") { connOptions.m_specified_outgoing = connect; } + if (!connOptions.m_specified_outgoing.empty() && !connOptions.vSeedNodes.empty()) { + LogPrintf("-seednode is ignored when -connect is used\n"); + } + + if (args.IsArgSet("-dnsseed") && args.GetBoolArg("-dnsseed", DEFAULT_DNSSEED) && args.IsArgSet("-proxy")) { + LogPrintf("-dnsseed is ignored when -connect is used and -proxy is specified\n"); + } } std::string sem_str = args.GetArg("-socketevents", DEFAULT_SOCKETEVENTS); diff --git a/src/net.cpp b/src/net.cpp index 4fcbbfb3c126e..a54b17b92ab32 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -2471,6 +2471,8 @@ void CConnman::ThreadDNSAddressSeed() } LogPrintf("Loading addresses from DNS seed %s\n", seed); + // If -proxy is in use, we make an ADDR_FETCH connection to the DNS resolved peer address + // for the base dns seed domain in chainparams if (HaveNameProxy()) { AddAddrFetch(seed); } else { @@ -2493,8 +2495,9 @@ void CConnman::ThreadDNSAddressSeed() } addrman.Add(vAdd, resolveSource); } else { - // We now avoid directly using results from DNS Seeds which do not support service bit filtering, - // instead using them as a addrfetch to get nodes with our desired service bits. + // If the seed does not support a subdomain with our desired service bits, + // we make an ADDR_FETCH connection to the DNS resolved peer address for the + // base dns seed domain in chainparams AddAddrFetch(seed); } } @@ -2589,7 +2592,6 @@ void CConnman::ThreadOpenConnections(const std::vector connect, CDe { for (int64_t nLoop = 0;; nLoop++) { - ProcessAddrFetch(); for (const std::string& strAddr : connect) { CAddress addr(CService(), NODE_NONE); diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 3d134aa7fa1c3..43f151890ba85 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -193,10 +193,10 @@ static constexpr uint64_t CMPCTBLOCKS_VERSION{1}; namespace { /** Blocks that are in flight, and that are in the queue to be downloaded. */ struct QueuedBlock { - uint256 hash; - const CBlockIndex* pindex; //!< Optional. - bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request. - std::unique_ptr partialBlock; //!< Optional, used for CMPCTBLOCK downloads + /** BlockIndex. We must have this since we only request blocks when we've already validated the header. */ + const CBlockIndex* pindex; + /** Optional, used for CMPCTBLOCK downloads */ + std::unique_ptr partialBlock; }; /** @@ -398,7 +398,6 @@ struct CNodeState { //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty. std::chrono::microseconds m_downloading_since{0us}; int nBlocksInFlight{0}; - int nBlocksInFlightValidHeaders{0}; //! Whether we consider this a preferred download peer. bool fPreferredDownload{false}; //! Whether this peer wants invs or headers (when possible) for block announcements. @@ -898,16 +897,20 @@ class PeerManagerImpl final : public PeerManager /** Height of the highest block announced using BIP 152 high-bandwidth mode. */ int m_highest_fast_announce GUARDED_BY(::cs_main){0}; - /* Returns a bool indicating whether we requested this block. - * Also used if a block was /not/ received and timed out or started with another peer + /** Have we requested this block from a peer */ + bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + /** Remove this block from our tracked requested blocks. Called if: + * - the block has been recieved from a peer + * - the request for the block has timed out */ - bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + void RemoveBlockRequest(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /* Mark a block as in flight * Returns false, still setting pit, if the block was already in flight from the same peer * pit will only be valid as long as the same cs_main lock is being held */ - bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + bool BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main); @@ -946,7 +949,7 @@ class PeerManagerImpl final : public PeerManager std::list lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main); /** Number of peers from which we're downloading blocks. */ - int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0; + int m_peers_downloading_from GUARDED_BY(cs_main) = 0; /** Storage for orphan information */ TxOrphanage m_orphanage; @@ -1074,32 +1077,42 @@ std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::micros return m_next_inv_to_inbounds; } -bool PeerManagerImpl::MarkBlockAsReceived(const uint256& hash) +bool PeerManagerImpl::IsBlockRequested(const uint256& hash) { - std::map::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); - if (itInFlight != mapBlocksInFlight.end()) { - CNodeState *state = State(itInFlight->second.first); - assert(state != nullptr); - state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders; - if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) { - // Last validated block on the queue was received. - nPeersWithValidatedDownloads--; - } - if (state->vBlocksInFlight.begin() == itInFlight->second.second) { - // First block on the queue was received, update the start download time for the next one - state->m_downloading_since = std::max(state->m_downloading_since, GetTime()); - } - state->vBlocksInFlight.erase(itInFlight->second.second); - state->nBlocksInFlight--; - state->m_stalling_since = 0us; - mapBlocksInFlight.erase(itInFlight); - return true; + return mapBlocksInFlight.find(hash) != mapBlocksInFlight.end(); +} + +void PeerManagerImpl::RemoveBlockRequest(const uint256& hash) +{ + auto it = mapBlocksInFlight.find(hash); + if (it == mapBlocksInFlight.end()) { + // Block was not requested + return; } - return false; + + auto [node_id, list_it] = it->second; + CNodeState *state = State(node_id); + assert(state != nullptr); + + if (state->vBlocksInFlight.begin() == list_it) { + // First block on the queue was received, update the start download time for the next one + state->m_downloading_since = std::max(state->m_downloading_since, GetTime()); + } + state->vBlocksInFlight.erase(list_it); + + state->nBlocksInFlight--; + if (state->nBlocksInFlight == 0) { + // Last validated block on the queue was received. + m_peers_downloading_from--; + } + state->m_stalling_since = 0us; + mapBlocksInFlight.erase(it); } -bool PeerManagerImpl::MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex *pindex, std::list::iterator **pit) +bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list::iterator **pit) { + const uint256& hash{block.GetBlockHash()}; + CNodeState *state = State(nodeid); assert(state != nullptr); @@ -1113,22 +1126,20 @@ bool PeerManagerImpl::MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, co } // Make sure it's not listed somewhere already. - MarkBlockAsReceived(hash); + RemoveBlockRequest(hash); std::list::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), - {hash, pindex, pindex != nullptr, std::unique_ptr(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)}); + {&block, std::unique_ptr(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)}); state->nBlocksInFlight++; - state->nBlocksInFlightValidHeaders += it->fValidatedHeaders; if (state->nBlocksInFlight == 1) { // We're starting a block download (batch) from this peer. state->m_downloading_since = GetTime(); - } - if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) { - nPeersWithValidatedDownloads++; + m_peers_downloading_from++; } itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first; - if (pit) + if (pit) { *pit = &itInFlight->second.second; + } return true; } @@ -1310,7 +1321,7 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co if (pindex->nStatus & BLOCK_HAVE_DATA || m_chainman.ActiveChain().Contains(pindex)) { if (pindex->HaveTxsDownloaded()) state->pindexLastCommonBlock = pindex; - } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) { + } else if (!IsBlockRequested(pindex->GetBlockHash())) { // The block is not already downloaded, and not yet in flight. if (pindex->nHeight > nWindowEnd) { // We reached the end of the window. @@ -1565,12 +1576,12 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) { nSyncStarted--; for (const QueuedBlock& entry : state->vBlocksInFlight) { - mapBlocksInFlight.erase(entry.hash); + mapBlocksInFlight.erase(entry.pindex->GetBlockHash()); } WITH_LOCK(g_cs_orphans, m_orphanage.EraseForPeer(nodeid)); m_num_preferred_download_peers -= state->fPreferredDownload; - nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0); - assert(nPeersWithValidatedDownloads >= 0); + m_peers_downloading_from -= (state->nBlocksInFlight != 0); + assert(m_peers_downloading_from >= 0); m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; assert(m_outbound_peers_with_protect_from_disconnect >= 0); @@ -1580,7 +1591,7 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) { // Do a consistency check after the last peer is removed. assert(mapBlocksInFlight.empty()); assert(m_num_preferred_download_peers == 0); - assert(nPeersWithValidatedDownloads == 0); + assert(m_peers_downloading_from == 0); assert(m_outbound_peers_with_protect_from_disconnect == 0); } } // cs_main @@ -1811,10 +1822,10 @@ std::optional PeerManagerImpl::FetchBlock(NodeId peer_id, const CBl // Mark block as in-flight unless it already is (for this peer). // If a block was already in-flight for a different peer, its BLOCKTXN // response will be dropped. - const uint256& hash{block_index.GetBlockHash()}; - if (!MarkBlockAsInFlight(peer_id, hash, &block_index)) return "Already requested from this peer"; + if (!BlockRequested(peer_id, block_index)) return "Already requested from this peer"; // Construct message to request the block + const uint256& hash{block_index.GetBlockHash()}; std::vector invs{CInv(MSG_BLOCK, hash)}; // Send block request message to the peer @@ -2797,7 +2808,7 @@ void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, c // Calculate all the blocks we'd need to switch to pindexLast, up to a limit. while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && - !mapBlocksInFlight.count(pindexWalk->GetBlockHash())) { + !IsBlockRequested(pindexWalk->GetBlockHash())) { // We don't have this block, and it's not yet in flight. vToFetch.push_back(pindexWalk); } @@ -2820,7 +2831,7 @@ void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, c break; } vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash())); - MarkBlockAsInFlight(pfrom.GetId(), pindex->GetBlockHash(), pindex); + BlockRequested(pfrom.GetId(), *pindex); LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", pindex->GetBlockHash().ToString(), pfrom.GetId()); } @@ -3781,7 +3792,7 @@ void PeerManagerImpl::ProcessMessage( statsClient.inc(strprintf("message.received.inv_%s", inv.GetCommand()), 1.0f); UpdateBlockAvailability(pfrom.GetId(), inv.hash); - if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) { + if (!fAlreadyHave && !fImporting && !fReindex && !IsBlockRequested(inv.hash)) { // Headers-first is the primary method of announcement on // the network. If a node fell back to sending blocks by // inv, it may be for a re-org, or because we haven't @@ -4361,7 +4372,7 @@ void PeerManagerImpl::ProcessMessage( if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) || (fAlreadyInFlight && blockInFlightIt->second.first == pfrom.GetId())) { std::list::iterator *queuedBlockIt = nullptr; - if (!MarkBlockAsInFlight(pfrom.GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) { + if (!BlockRequested(pfrom.GetId(), *pindex, &queuedBlockIt)) { if (!(*queuedBlockIt)->partialBlock) (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool)); else { @@ -4374,7 +4385,7 @@ void PeerManagerImpl::ProcessMessage( PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock; ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact); if (status == READ_STATUS_INVALID) { - MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case Misbehaving does not result in a disconnect + RemoveBlockRequest(pindex->GetBlockHash()); // Reset in-flight state in case Misbehaving does not result in a disconnect Misbehaving(pfrom.GetId(), 100, "invalid compact block"); return; } else if (status == READ_STATUS_FAILED) { @@ -4468,7 +4479,7 @@ void PeerManagerImpl::ProcessMessage( // process from some other peer. We do this after calling // ProcessNewBlock so that a malleated cmpctblock announcement // can't be used to interfere with block relay. - MarkBlockAsReceived(pblock->GetHash()); + RemoveBlockRequest(pblock->GetHash()); } } return; @@ -4500,7 +4511,7 @@ void PeerManagerImpl::ProcessMessage( PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock; ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn); if (status == READ_STATUS_INVALID) { - MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case Misbehaving does not result in a disconnect + RemoveBlockRequest(resp.blockhash); // Reset in-flight state in case Misbehaving does not result in a disconnect Misbehaving(pfrom.GetId(), 100, "invalid compact block/non-matching block transactions"); return; } else if (status == READ_STATUS_FAILED) { @@ -4526,7 +4537,7 @@ void PeerManagerImpl::ProcessMessage( // though the block was successfully read, and rely on the // handling in ProcessNewBlock to ensure the block index is // updated, etc. - MarkBlockAsReceived(resp.blockhash); // it is now an empty pointer + RemoveBlockRequest(resp.blockhash); // it is now an empty pointer fBlockRead = true; // mapBlockSource is used for potentially punishing peers and // updating which peers send us compact blocks, so the race @@ -4605,9 +4616,10 @@ void PeerManagerImpl::ProcessMessage( const uint256 hash(pblock->GetHash()); { LOCK(cs_main); - // Also always process if we requested the block explicitly, as we may - // need it even though it is not a candidate for a new best tip. - forceProcessing |= MarkBlockAsReceived(hash); + // Always process the block if we requested it, since we may + // need it even when it's not a candidate for a new best tip. + forceProcessing = IsBlockRequested(hash); + RemoveBlockRequest(hash); // mapBlockSource is only used for punishing peers and setting // which peers send us compact blocks, so the race between here and // cs_main in ProcessNewBlock is fine. @@ -5863,9 +5875,9 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // to unreasonably increase our timeout. if (state.vBlocksInFlight.size() > 0) { QueuedBlock &queuedBlock = state.vBlocksInFlight.front(); - int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0); + int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1; if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) { - LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->GetId()); + LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->GetId()); pto->fDisconnect = true; return true; } @@ -5917,7 +5929,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) FindNextBlocksToDownload(*peer, MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller); for (const CBlockIndex *pindex : vToDownload) { vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash())); - MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), pindex); + BlockRequested(pto->GetId(), *pindex); LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), pindex->nHeight, pto->GetId()); } diff --git a/src/net_types.cpp b/src/net_types.cpp index c8f57fe6c644a..e4101a9876356 100644 --- a/src/net_types.cpp +++ b/src/net_types.cpp @@ -4,12 +4,16 @@ #include +#include #include #include #include +static const char* BANMAN_JSON_VERSION_KEY{"version"}; + CBanEntry::CBanEntry(const UniValue& json) - : nVersion(json["version"].get_int()), nCreateTime(json["ban_created"].get_int64()), + : nVersion(json[BANMAN_JSON_VERSION_KEY].get_int()), + nCreateTime(json["ban_created"].get_int64()), nBanUntil(json["banned_until"].get_int64()) { } @@ -17,7 +21,7 @@ CBanEntry::CBanEntry(const UniValue& json) UniValue CBanEntry::ToJson() const { UniValue json(UniValue::VOBJ); - json.pushKV("version", nVersion); + json.pushKV(BANMAN_JSON_VERSION_KEY, nVersion); json.pushKV("ban_created", nCreateTime); json.pushKV("banned_until", nBanUntil); return json; @@ -54,11 +58,16 @@ UniValue BanMapToJson(const banmap_t& bans) void BanMapFromJson(const UniValue& bans_json, banmap_t& bans) { for (const auto& ban_entry_json : bans_json.getValues()) { + const int version{ban_entry_json[BANMAN_JSON_VERSION_KEY].get_int()}; + if (version != CBanEntry::CURRENT_VERSION) { + LogPrintf("Dropping entry with unknown version (%s) from ban list\n", version); + continue; + } CSubNet subnet; const auto& subnet_str = ban_entry_json[BANMAN_JSON_ADDR_KEY].get_str(); if (!LookupSubNet(subnet_str, subnet)) { - throw std::runtime_error( - strprintf("Cannot parse banned address or subnet: %s", subnet_str)); + LogPrintf("Dropping entry with unparseable address or subnet (%s) from ban list\n", subnet_str); + continue; } bans.insert_or_assign(subnet, CBanEntry{ban_entry_json}); } diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp index 8f7a83b1c2fd5..fc66231389314 100644 --- a/src/qt/bitcoin.cpp +++ b/src/qt/bitcoin.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #ifdef ENABLE_WALLET #include @@ -148,11 +149,6 @@ static void initTranslations(QTranslator &qtTranslatorBase, QTranslator &qtTrans QApplication::installTranslator(&translator); } -static std::string JoinErrors(const std::vector& errors) -{ - return Join(errors, "\n", [](const std::string& error) { return "- " + error; }); -} - static bool InitSettings() { if (!gArgs.GetSettingsPath()) { @@ -162,13 +158,13 @@ static bool InitSettings() std::vector errors; if (!gArgs.ReadSettingsFile(&errors)) { bilingual_str error = _("Settings file could not be read"); - InitError(Untranslated(strprintf("%s:\n%s\n", error.original, JoinErrors(errors)))); + InitError(Untranslated(strprintf("%s:\n%s\n", error.original, MakeUnorderedList(errors)))); QMessageBox messagebox(QMessageBox::Critical, PACKAGE_NAME, QString::fromStdString(strprintf("%s.", error.translated)), QMessageBox::Reset | QMessageBox::Abort); /*: Explanatory text shown on startup when the settings file cannot be read. Prompts user to make a choice between resetting or aborting. */ messagebox.setInformativeText(QObject::tr("Do you want to reset settings to default values, or to abort without making changes?")); - messagebox.setDetailedText(QString::fromStdString(JoinErrors(errors))); + messagebox.setDetailedText(QString::fromStdString(MakeUnorderedList(errors))); messagebox.setTextFormat(Qt::PlainText); messagebox.setDefaultButton(QMessageBox::Reset); switch (messagebox.exec()) { @@ -184,14 +180,14 @@ static bool InitSettings() errors.clear(); if (!gArgs.WriteSettingsFile(&errors)) { bilingual_str error = _("Settings file could not be written"); - InitError(Untranslated(strprintf("%s:\n%s\n", error.original, JoinErrors(errors)))); + InitError(Untranslated(strprintf("%s:\n%s\n", error.original, MakeUnorderedList(errors)))); QMessageBox messagebox(QMessageBox::Critical, PACKAGE_NAME, QString::fromStdString(strprintf("%s.", error.translated)), QMessageBox::Ok); /*: Explanatory text shown on startup when the settings file could not be written. Prompts user to check that we have the ability to write to the file. Explains that the user has the option of running without a settings file.*/ - messagebox.setInformativeText(QObject::tr("A fatal error occured. Check that settings file is writable, or try running with -nosettings.")); - messagebox.setDetailedText(QString::fromStdString(JoinErrors(errors))); + messagebox.setInformativeText(QObject::tr("A fatal error occurred. Check that settings file is writable, or try running with -nosettings.")); + messagebox.setDetailedText(QString::fromStdString(MakeUnorderedList(errors))); messagebox.setTextFormat(Qt::PlainText); messagebox.setDefaultButton(QMessageBox::Ok); messagebox.exec(); diff --git a/src/random.cpp b/src/random.cpp index 3ccbe82c4a1de..21c56c106a46e 100644 --- a/src/random.cpp +++ b/src/random.cpp @@ -34,10 +34,8 @@ #include #include #endif -#if defined(HAVE_GETENTROPY) || (defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX)) -#include -#endif #if defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX) +#include #include #endif #ifdef HAVE_SYSCTL_ARND @@ -307,16 +305,14 @@ void GetOSRand(unsigned char *ent32) RandFailure(); } } -#elif defined(HAVE_GETENTROPY) && defined(__OpenBSD__) - /* On OpenBSD this can return up to 256 bytes of entropy, will return an - * error if more are requested. - * The call cannot return less than the requested number of bytes. - getentropy is explicitly limited to openbsd here, as a similar (but not - the same) function may exist on other platforms via glibc. +#elif defined(__OpenBSD__) + /* OpenBSD. From the arc4random(3) man page: + "Use of these functions is encouraged for almost all random number + consumption because the other interfaces are deficient in either + quality, portability, standardization, or availability." + The function call is always successful. */ - if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) { - RandFailure(); - } + arc4random_buf(ent32, NUM_OS_RANDOM_BYTES); // Silence a compiler warning about unused function. (void)GetDevURandom; #elif defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX) diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 16055175fb45b..24898db0cc048 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -40,6 +40,7 @@ #include #include #include +#include #include #include #include @@ -1619,7 +1620,7 @@ static RPCHelpMan verifychain() "\nVerifies blockchain database.\n", { {"checklevel", RPCArg::Type::NUM, /* default */ strprintf("%d, range=0-4", DEFAULT_CHECKLEVEL), - strprintf("How thorough the block verification is:\n - %s", Join(CHECKLEVEL_DOC, "\n- "))}, + strprintf("How thorough the block verification is:\n - %s", MakeUnorderedList(CHECKLEVEL_DOC))}, {"nblocks", RPCArg::Type::NUM, /* default */ strprintf("%d, 0=all", DEFAULT_CHECKBLOCKS), "The number of blocks to check."}, }, RPCResult{ @@ -1775,9 +1776,8 @@ RPCHelpMan getblockchaininfo() [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { - std::string strChainName = gArgs.IsArgSet("-devnet") ? gArgs.GetDevNetName() : Params().NetworkIDString(); - const NodeContext& node = EnsureAnyNodeContext(request.context); + const ArgsManager& args{EnsureArgsman(node)}; ChainstateManager& chainman = EnsureChainman(node); LOCK(cs_main); @@ -1791,7 +1791,11 @@ RPCHelpMan getblockchaininfo() const auto ehfSignals = node.mnhf_manager->GetSignalsStage(tip); UniValue obj(UniValue::VOBJ); - obj.pushKV("chain", strChainName); + if (args.IsArgSet("-devnet")) { + obj.pushKV("chain", args.GetDevNetName()); + } else { + obj.pushKV("chain", Params().NetworkIDString()); + } obj.pushKV("blocks", height); obj.pushKV("headers", chainman.m_best_header ? chainman.m_best_header->nHeight : -1); obj.pushKV("bestblockhash", tip->GetBlockHash().GetHex()); @@ -1813,7 +1817,7 @@ RPCHelpMan getblockchaininfo() obj.pushKV("pruneheight", block->nHeight); // if 0, execution bypasses the whole if block. - bool automatic_pruning = (gArgs.GetArg("-prune", 0) != 1); + bool automatic_pruning{args.GetArg("-prune", 0) != 1}; obj.pushKV("automatic_pruning", automatic_pruning); if (automatic_pruning) { obj.pushKV("prune_target_size", nPruneTarget); @@ -2642,13 +2646,18 @@ static RPCHelpMan savemempool() return RPCHelpMan{"savemempool", "\nDumps the mempool to disk. It will fail until the previous dump is fully loaded.\n", {}, - RPCResult{RPCResult::Type::NONE, "", ""}, + RPCResult{ + RPCResult::Type::OBJ, "", "", + { + {RPCResult::Type::STR, "filename", "the directory and file where the mempool was saved"}, + }}, RPCExamples{ HelpExampleCli("savemempool", "") + HelpExampleRpc("savemempool", "") }, [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { + const ArgsManager& args{EnsureAnyArgsman(request.context)}; const CTxMemPool& mempool = EnsureAnyMemPool(request.context); if (!mempool.IsLoaded()) { @@ -2659,7 +2668,10 @@ static RPCHelpMan savemempool() throw JSONRPCError(RPC_MISC_ERROR, "Unable to dump mempool to disk"); } - return NullUniValue; + UniValue ret(UniValue::VOBJ); + ret.pushKV("filename", fs::path((args.GetDataDirNet() / "mempool.dat")).u8string()); + + return ret; }, }; } @@ -3001,10 +3013,11 @@ static RPCHelpMan dumptxoutset() }, [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { - const fs::path path = fsbridge::AbsPathJoin(gArgs.GetDataDirNet(), fs::u8path(request.params[0].get_str())); + const ArgsManager& args{EnsureAnyArgsman(request.context)}; + const fs::path path = fsbridge::AbsPathJoin(args.GetDataDirNet(), fs::u8path(request.params[0].get_str())); // Write to a temporary path and then move into `path` on completion // to avoid confusion due to an interruption. - const fs::path temppath = fsbridge::AbsPathJoin(gArgs.GetDataDirNet(), fs::u8path(request.params[0].get_str() + ".incomplete")); + const fs::path temppath = fsbridge::AbsPathJoin(args.GetDataDirNet(), fs::u8path(request.params[0].get_str() + ".incomplete")); if (fs::exists(path)) { throw JSONRPCError( diff --git a/src/rpc/server_util.cpp b/src/rpc/server_util.cpp index fc79204c1ac8e..d533934075d03 100644 --- a/src/rpc/server_util.cpp +++ b/src/rpc/server_util.cpp @@ -37,6 +37,19 @@ CTxMemPool& EnsureAnyMemPool(const CoreContext& context) return EnsureMemPool(EnsureAnyNodeContext(context)); } +ArgsManager& EnsureArgsman(const NodeContext& node) +{ + if (!node.args) { + throw JSONRPCError(RPC_INTERNAL_ERROR, "Node args not found"); + } + return *node.args; +} + +ArgsManager& EnsureAnyArgsman(const CoreContext& context) +{ + return EnsureArgsman(EnsureAnyNodeContext(context)); +} + ChainstateManager& EnsureChainman(const NodeContext& node) { if (!node.chainman) { diff --git a/src/rpc/server_util.h b/src/rpc/server_util.h index 7960dc09c62ea..9bcf1b7d081d2 100644 --- a/src/rpc/server_util.h +++ b/src/rpc/server_util.h @@ -7,10 +7,11 @@ #include +class ArgsManager; class CBlockPolicyEstimator; class CConnman; -class ChainstateManager; class CTxMemPool; +class ChainstateManager; class PeerManager; struct NodeContext; struct LLMQContext; @@ -18,6 +19,8 @@ struct LLMQContext; NodeContext& EnsureAnyNodeContext(const CoreContext& context); CTxMemPool& EnsureMemPool(const NodeContext& node); CTxMemPool& EnsureAnyMemPool(const CoreContext& context); +ArgsManager& EnsureArgsman(const NodeContext& node); +ArgsManager& EnsureAnyArgsman(const CoreContext& context); ChainstateManager& EnsureChainman(const NodeContext& node); ChainstateManager& EnsureAnyChainman(const CoreContext& context); CBlockPolicyEstimator& EnsureFeeEstimator(const NodeContext& node); diff --git a/src/test/banman_tests.cpp b/src/test/banman_tests.cpp new file mode 100644 index 0000000000000..27ce9ad63825c --- /dev/null +++ b/src/test/banman_tests.cpp @@ -0,0 +1,43 @@ +// Copyright (c) 2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include +#include +#include +#include + + +#include + +BOOST_FIXTURE_TEST_SUITE(banman_tests, BasicTestingSetup) + +BOOST_AUTO_TEST_CASE(file) +{ + SetMockTime(777s); + const fs::path banlist_path{m_args.GetDataDirBase() / "banlist_test"}; + { + const std::string entries_write{ + "{ \"banned_nets\": [" + " { \"version\": 1, \"ban_created\": 0, \"banned_until\": 778, \"address\": \"aaaaaaaaa\" }," + " { \"version\": 2, \"ban_created\": 0, \"banned_until\": 778, \"address\": \"bbbbbbbbb\" }," + " { \"version\": 1, \"ban_created\": 0, \"banned_until\": 778, \"address\": \"1.0.0.0/8\" }" + "] }", + }; + assert(WriteBinaryFile(banlist_path + ".json", entries_write)); + { + // The invalid entries will be dropped, but the valid one remains + ASSERT_DEBUG_LOG("Dropping entry with unparseable address or subnet (aaaaaaaaa) from ban list"); + ASSERT_DEBUG_LOG("Dropping entry with unknown version (2) from ban list"); + BanMan banman{banlist_path, /*client_interface=*/nullptr, /*default_ban_time=*/0}; + banmap_t entries_read; + banman.GetBanned(entries_read); + assert(entries_read.size() == 1); + } + } +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/fs_tests.cpp b/src/test/fs_tests.cpp index b06d2b747eb3b..4a8803a205017 100644 --- a/src/test/fs_tests.cpp +++ b/src/test/fs_tests.cpp @@ -118,6 +118,40 @@ BOOST_AUTO_TEST_CASE(fsbridge_fstream) } } +BOOST_AUTO_TEST_CASE(rename) +{ + const fs::path tmpfolder{m_args.GetDataDirBase()}; + + const fs::path path1{GetUniquePath(tmpfolder)}; + const fs::path path2{GetUniquePath(tmpfolder)}; + + const std::string path1_contents{"1111"}; + const std::string path2_contents{"2222"}; + + { + std::ofstream file{path1}; + file << path1_contents; + } + + { + std::ofstream file{path2}; + file << path2_contents; + } + + // Rename path1 -> path2. + BOOST_CHECK(RenameOver(path1, path2)); + + BOOST_CHECK(!fs::exists(path1)); + + { + std::ifstream file{path2}; + std::string contents; + file >> contents; + BOOST_CHECK_EQUAL(contents, path1_contents); + } + fs::remove(path2); +} + #ifndef WIN32 BOOST_AUTO_TEST_CASE(create_directories) { diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 823d096bc5df7..9bf3dc3d25048 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -29,23 +29,23 @@ #include #include -CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef& _tx, const CAmount& _nFee, - int64_t _nTime, unsigned int _entryHeight, - bool _spendsCoinbase, unsigned int _sigOps, LockPoints lp) - : tx(_tx), nFee(_nFee), nTxSize(tx->GetTotalSize()), nUsageSize(RecursiveDynamicUsage(tx)), nTime(_nTime), entryHeight(_entryHeight), - spendsCoinbase(_spendsCoinbase), sigOpCount(_sigOps), lockPoints(lp) -{ - nCountWithDescendants = 1; - nSizeWithDescendants = GetTxSize(); - nModFeesWithDescendants = nFee; - - feeDelta = 0; - - nCountWithAncestors = 1; - nSizeWithAncestors = GetTxSize(); - nModFeesWithAncestors = nFee; - nSigOpCountWithAncestors = sigOpCount; -} +CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef& tx, CAmount fee, + int64_t time, unsigned int entry_height, + bool spends_coinbase, int64_t sigops_count, LockPoints lp) + : tx{tx}, + nFee{fee}, + nTxSize(tx->GetTotalSize()), + nUsageSize{RecursiveDynamicUsage(tx)}, + nTime{time}, + entryHeight{entry_height}, + spendsCoinbase{spends_coinbase}, + sigOpCount{sigops_count}, + lockPoints{lp}, + nSizeWithDescendants{GetTxSize()}, + nModFeesWithDescendants{nFee}, + nSizeWithAncestors{GetTxSize()}, + nModFeesWithAncestors{nFee}, + nSigOpCountWithAncestors{sigOpCount} {} void CTxMemPoolEntry::UpdateFeeDelta(int64_t newFeeDelta) { diff --git a/src/txmempool.h b/src/txmempool.h index e281ddaa4bf6a..d3f878a13d8fa 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -47,19 +47,16 @@ using CBLSLazyPublicKey = CBLSLazyWrapper; /** Fake height value used in Coin to signify they are only in the memory pool (since 0.8) */ static const uint32_t MEMPOOL_HEIGHT = 0x7FFFFFFF; -struct LockPoints -{ +struct LockPoints { // Will be set to the blockchain height and median time past // values that would be necessary to satisfy all relative locktime // constraints (BIP68) of this tx given our view of block chain history - int height; - int64_t time; + int height{0}; + int64_t time{0}; // As long as the current chain descends from the highest height block // containing one of the inputs used in the calculation, then the cached // values are still valid even after a reorg. - CBlockIndex* maxInputBlock; - - LockPoints() : height(0), time(0), maxInputBlock(nullptr) { } + CBlockIndex* maxInputBlock{nullptr}; }; struct CompareIteratorByHash { @@ -106,28 +103,28 @@ class CTxMemPoolEntry const int64_t nTime; //!< Local time when entering the mempool const unsigned int entryHeight; //!< Chain height when entering the mempool const bool spendsCoinbase; //!< keep track of transactions that spend a coinbase - const unsigned int sigOpCount; //!< Legacy sig ops plus P2SH sig op count - int64_t feeDelta; //!< Used for determining the priority of the transaction for mining in a block - LockPoints lockPoints; //!< Track the height and time at which tx was final + const int64_t sigOpCount; //!< Legacy sig ops plus P2SH sig op count + int64_t feeDelta{0}; //!< Used for determining the priority of the transaction for mining in a block + LockPoints lockPoints; //!< Track the height and time at which tx was final // Information about descendants of this transaction that are in the // mempool; if we remove this transaction we must remove all of these // descendants as well. - uint64_t nCountWithDescendants; //!< number of descendant transactions + uint64_t nCountWithDescendants{1}; //!< number of descendant transactions uint64_t nSizeWithDescendants; //!< ... and size CAmount nModFeesWithDescendants; //!< ... and total fees (all including us) // Analogous statistics for ancestor transactions - uint64_t nCountWithAncestors; + uint64_t nCountWithAncestors{1}; uint64_t nSizeWithAncestors; CAmount nModFeesWithAncestors; - unsigned int nSigOpCountWithAncestors; + int64_t nSigOpCountWithAncestors; public: - CTxMemPoolEntry(const CTransactionRef& _tx, const CAmount& _nFee, - int64_t _nTime, unsigned int _entryHeight, - bool spendsCoinbase, - unsigned int nSigOps, LockPoints lp); + CTxMemPoolEntry(const CTransactionRef& tx, CAmount fee, + int64_t time, unsigned int entry_height, + bool spends_coinbase, + int64_t sigops_count, LockPoints lp); const CTransaction& GetTx() const { return *this->tx; } CTransactionRef GetSharedTx() const { return this->tx; } @@ -135,7 +132,7 @@ class CTxMemPoolEntry size_t GetTxSize() const; std::chrono::seconds GetTime() const { return std::chrono::seconds{nTime}; } unsigned int GetHeight() const { return entryHeight; } - unsigned int GetSigOpCount() const { return sigOpCount; } + int64_t GetSigOpCount() const { return sigOpCount; } int64_t GetModifiedFee() const { return nFee + feeDelta; } size_t DynamicMemoryUsage() const { return nUsageSize; } const LockPoints& GetLockPoints() const { return lockPoints; } @@ -159,7 +156,7 @@ class CTxMemPoolEntry uint64_t GetCountWithAncestors() const { return nCountWithAncestors; } uint64_t GetSizeWithAncestors() const { return nSizeWithAncestors; } CAmount GetModFeesWithAncestors() const { return nModFeesWithAncestors; } - unsigned int GetSigOpCountWithAncestors() const { return nSigOpCountWithAncestors; } + int64_t GetSigOpCountWithAncestors() const { return nSigOpCountWithAncestors; } const Parents& GetMemPoolParentsConst() const { return m_parents; } const Children& GetMemPoolChildrenConst() const { return m_children; } diff --git a/src/util/string.h b/src/util/string.h index 72fea9fae8e3a..beb161a2e6ef2 100644 --- a/src/util/string.h +++ b/src/util/string.h @@ -78,6 +78,14 @@ inline std::string Join(const std::vector& list, const std::string& return Join(list, separator); } +/** + * Create an unordered multi-line list of items. + */ +inline std::string MakeUnorderedList(const std::vector& items) +{ + return Join(items, "\n", [](const std::string& item) { return "- " + item; }); +} + /** * Check if a string does not contain any embedded NUL (\0) characters */ diff --git a/src/util/system.cpp b/src/util/system.cpp index 0de01bf84aa63..a65ed64966c38 100644 --- a/src/util/system.cpp +++ b/src/util/system.cpp @@ -539,11 +539,11 @@ bool ArgsManager::InitSettings(std::string& error) std::vector errors; if (!ReadSettingsFile(&errors)) { - error = strprintf("Failed loading settings file:\n- %s\n", Join(errors, "\n- ")); + error = strprintf("Failed loading settings file:\n%s\n", MakeUnorderedList(errors)); return false; } if (!WriteSettingsFile(&errors)) { - error = strprintf("Failed saving settings file:\n- %s\n", Join(errors, "\n- ")); + error = strprintf("Failed saving settings file:\n%s\n", MakeUnorderedList(errors)); return false; } return true; @@ -1138,9 +1138,20 @@ std::vector ArgsManager::GetSettingsList(const std::string& bool RenameOver(fs::path src, fs::path dest) { +#ifdef __MINGW64__ + // This is a workaround for a bug in libstdc++ which + // implements std::filesystem::rename with _wrename function. + // This bug has been fixed in upstream: + // - GCC 10.3: 8dd1c1085587c9f8a21bb5e588dfe1e8cdbba79e + // - GCC 11.1: 1dfd95f0a0ca1d9e6cbc00e6cbfd1fa20a98f312 + // For more details see the commits mentioned above. + return MoveFileExW(src.wstring().c_str(), dest.wstring().c_str(), + MOVEFILE_REPLACE_EXISTING) != 0; +#else std::error_code error; fs::rename(src, dest, error); return !error; +#endif } /** diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py index d176576a4afc2..28abf73e2333d 100755 --- a/test/functional/feature_config_args.py +++ b/test/functional/feature_config_args.py @@ -215,11 +215,43 @@ def test_seed_peers(self): ]): self.nodes[0].setmocktime(start + 65) + def test_connect_with_seednode(self): + self.log.info('Test -connect with -seednode') + seednode_ignored = ['-seednode is ignored when -connect is used\n'] + dnsseed_ignored = ['-dnsseed is ignored when -connect is used and -proxy is specified\n'] + addcon_thread_started = ['addcon thread start\n'] + self.stop_node(0) + + # When -connect is supplied, expanding addrman via getaddr calls to ADDR_FETCH(-seednode) + # nodes is irrelevant and -seednode is ignored. + with self.nodes[0].assert_debug_log(expected_msgs=seednode_ignored): + self.start_node(0, extra_args=['-connect=fakeaddress1', '-seednode=fakeaddress2']) + + # With -proxy, an ADDR_FETCH connection is made to a peer that the dns seed resolves to. + # ADDR_FETCH connections are not used when -connect is used. + with self.nodes[0].assert_debug_log(expected_msgs=dnsseed_ignored): + self.restart_node(0, extra_args=['-connect=fakeaddress1', '-dnsseed=1', '-proxy=1.2.3.4']) + + # If the user did not disable -dnsseed, but it was soft-disabled because they provided -connect, + # they shouldn't see a warning about -dnsseed being ignored. + with self.nodes[0].assert_debug_log(expected_msgs=addcon_thread_started, + unexpected_msgs=dnsseed_ignored): + self.restart_node(0, extra_args=['-connect=fakeaddress1', '-proxy=1.2.3.4']) + + # We have to supply expected_msgs as it's a required argument + # The expected_msg must be something we are confident will be logged after the unexpected_msg + # These cases test for -connect being supplied but only to disable it + for connect_arg in ['-connect=0', '-noconnect']: + with self.nodes[0].assert_debug_log(expected_msgs=addcon_thread_started, + unexpected_msgs=seednode_ignored): + self.restart_node(0, extra_args=[connect_arg, '-seednode=fakeaddress2']) + def run_test(self): self.test_log_buffer() self.test_args_log() self.test_seed_peers() self.test_networkactive() + self.test_connect_with_seednode() self.test_config_file_parser() diff --git a/test/functional/mempool_persist.py b/test/functional/mempool_persist.py index 6de7af5a42367..62b9b990f2ea7 100755 --- a/test/functional/mempool_persist.py +++ b/test/functional/mempool_persist.py @@ -133,8 +133,9 @@ def run_test(self): mempooldat1 = os.path.join(self.nodes[1].datadir, self.chain, 'mempool.dat') self.log.debug("Remove the mempool.dat file. Verify that savemempool to disk via RPC re-creates it") os.remove(mempooldat0) - self.nodes[0].savemempool() + result0 = self.nodes[0].savemempool() assert os.path.isfile(mempooldat0) + assert_equal(result0['filename'], mempooldat0) self.log.debug("Stop nodes, make node1 use mempool.dat from node0. Verify it has 6 transactions") os.rename(mempooldat0, mempooldat1)