54#include <boost/multi_index/hashed_index.hpp>
55#include <boost/multi_index/member.hpp>
56#include <boost/multi_index/ordered_index.hpp>
57#include <boost/multi_index_container.hpp>
115 "Max protocol message length must be greater than largest "
116 "possible INV message");
173 std::chrono::seconds(2),
174 std::chrono::seconds(2),
175 std::chrono::seconds(60),
182 std::chrono::seconds(2),
183 std::chrono::seconds(2),
184 std::chrono::seconds(60),
217 "MAX_BLOCKTXN_DEPTH too high");
276 std::chrono::seconds{1},
277 "INVENTORY_RELAY_MAX too low");
328 std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
333 std::chrono::seconds timeAdded;
335 StalledTxId(
TxId txid_, std::chrono::seconds timeAdded_)
336 : txid(txid_), timeAdded(timeAdded_){};
342using StalledTxIdSet = boost::multi_index_container<
344 boost::multi_index::indexed_by<
346 boost::multi_index::hashed_unique<
347 boost::multi_index::tag<by_txid>,
348 boost::multi_index::member<StalledTxId, TxId, &StalledTxId::txid>,
351 boost::multi_index::ordered_non_unique<
352 boost::multi_index::tag<by_time>,
353 boost::multi_index::member<StalledTxId, std::chrono::seconds,
354 &StalledTxId::timeAdded>>>>;
391 std::atomic<ServiceFlags> m_their_services{
NODE_NONE};
394 Mutex m_misbehavior_mutex;
399 bool m_should_discourage
GUARDED_BY(m_misbehavior_mutex){
false};
402 Mutex m_block_inv_mutex;
408 std::vector<BlockHash> m_blocks_for_inv_relay
GUARDED_BY(m_block_inv_mutex);
414 std::vector<BlockHash>
415 m_blocks_for_headers_relay
GUARDED_BY(m_block_inv_mutex);
426 std::atomic<int> m_starting_height{-1};
429 std::atomic<uint64_t> m_ping_nonce_sent{0};
431 std::atomic<std::chrono::microseconds> m_ping_start{0us};
433 std::atomic<bool> m_ping_queued{
false};
443 std::chrono::microseconds m_next_send_feefilter
456 bool m_relay_txs
GUARDED_BY(m_bloom_filter_mutex){
false};
461 std::unique_ptr<CBloomFilter>
477 GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
483 std::set<TxId> m_tx_inventory_to_send
GUARDED_BY(m_tx_inventory_mutex);
489 bool m_send_mempool
GUARDED_BY(m_tx_inventory_mutex){
false};
491 std::atomic<std::chrono::seconds> m_last_mempool_req{0s};
496 std::chrono::microseconds
497 m_next_inv_send_time
GUARDED_BY(m_tx_inventory_mutex){0};
503 std::atomic<Amount> m_fee_filter_received{
Amount::zero()};
509 m_avalanche_stalled_txids
GUARDED_BY(m_tx_inventory_mutex);
517 LOCK(m_tx_relay_mutex);
519 m_tx_relay = std::make_unique<Peer::TxRelay>();
520 return m_tx_relay.get();
524 return WITH_LOCK(m_tx_relay_mutex,
return m_tx_relay.get());
526 const TxRelay *GetTxRelay() const
528 return WITH_LOCK(m_tx_relay_mutex,
return m_tx_relay.get());
533 std::set<avalanche::ProofId>
534 m_proof_inventory_to_send
GUARDED_BY(m_proof_inventory_mutex);
537 GUARDED_BY(m_proof_inventory_mutex){10000, 0.000001};
544 std::chrono::microseconds m_next_inv_send_time{0};
548 std::atomic<std::chrono::seconds> lastSharedProofsUpdate{0s};
549 std::atomic<bool> compactproofs_requested{
false};
556 const std::unique_ptr<ProofRelay> m_proof_relay;
561 std::vector<CAddress>
573 std::unique_ptr<CRollingBloomFilter>
591 std::atomic_bool m_addr_relay_enabled{
false};
595 mutable Mutex m_addr_send_times_mutex;
597 std::chrono::microseconds
598 m_next_addr_send
GUARDED_BY(m_addr_send_times_mutex){0};
600 std::chrono::microseconds
601 m_next_local_addr_send
GUARDED_BY(m_addr_send_times_mutex){0};
606 std::atomic_bool m_wants_addrv2{
false};
610 mutable Mutex m_addr_token_bucket_mutex;
615 double m_addr_token_bucket
GUARDED_BY(m_addr_token_bucket_mutex){1.0};
617 std::chrono::microseconds
619 GetTime<std::chrono::microseconds>()};
621 std::atomic<uint64_t> m_addr_rate_limited{0};
626 std::atomic<uint64_t> m_addr_processed{0};
632 bool m_inv_triggered_getheaders_before_sync
636 Mutex m_getdata_requests_mutex;
638 std::deque<CInv> m_getdata_requests
GUARDED_BY(m_getdata_requests_mutex);
645 Mutex m_headers_sync_mutex;
650 std::unique_ptr<HeadersSyncState>
655 std::atomic<bool> m_sent_sendheaders{
false};
658 std::chrono::microseconds m_headers_sync_timeout
669 : m_id(id), m_our_services{our_services},
670 m_proof_relay(fRelayProofs ?
std::make_unique<ProofRelay>()
674 mutable Mutex m_tx_relay_mutex;
677 std::unique_ptr<TxRelay> m_tx_relay
GUARDED_BY(m_tx_relay_mutex);
680using PeerRef = std::shared_ptr<Peer>;
698 bool fSyncStarted{
false};
701 std::chrono::microseconds m_stalling_since{0us};
702 std::list<QueuedBlock> vBlocksInFlight;
705 std::chrono::microseconds m_downloading_since{0us};
707 bool fPreferredDownload{
false};
712 bool m_requested_hb_cmpctblocks{
false};
714 bool m_provides_cmpctblocks{
false};
742 struct ChainSyncTimeoutState {
745 std::chrono::seconds m_timeout{0s};
749 bool m_sent_getheaders{
false};
752 bool m_protect{
false};
755 ChainSyncTimeoutState m_chain_sync;
758 int64_t m_last_block_announcement{0};
761 const bool m_is_inbound;
763 CNodeState(
bool is_inbound) : m_is_inbound(is_inbound) {}
774 const std::shared_ptr<const CBlock> &pblock,
782 bool fInitialDownload)
override
788 const std::shared_ptr<const CBlock> &pblock)
override
797 !m_headers_presync_mutex);
799 std::atomic<bool> &interrupt)
override
801 !m_recent_confirmed_transactions_mutex,
802 !m_most_recent_block_mutex, !cs_proofrequest,
803 !m_headers_presync_mutex, g_msgproc_mutex);
806 !m_recent_confirmed_transactions_mutex,
807 !m_most_recent_block_mutex, !cs_proofrequest,
813 std::optional<std::string>
820 void RelayTransaction(const
TxId &txid) override
822 void RelayProof(const
avalanche::ProofId &proofid) override
824 void SetBestHeight(
int height)
override { m_best_height = height; };
827 Misbehaving(*
Assert(GetPeerRef(peer_id)),
"");
830 const std::string &msg_type,
DataStream &vRecv,
831 const std::chrono::microseconds time_received,
832 const std::atomic<bool> &interruptMsgProc)
override
834 !m_recent_confirmed_transactions_mutex,
835 !m_most_recent_block_mutex, !cs_proofrequest,
836 !m_headers_presync_mutex, g_msgproc_mutex);
838 int64_t time_in_seconds)
override;
845 void ConsiderEviction(
CNode &pto, Peer &peer,
846 std::chrono::seconds time_in_seconds)
853 void EvictExtraOutboundPeers(std::chrono::seconds now)
860 void ReattemptInitialBroadcast(
CScheduler &scheduler)
866 void UpdateAvalancheStatistics()
const;
871 void AvalanchePeriodicNetworking(
CScheduler &scheduler)
const;
889 void Misbehaving(Peer &peer,
const std::string &message);
901 void MaybePunishNodeForBlock(
NodeId nodeid,
903 bool via_compact_block,
904 const std::string &message =
"")
912 const
std::
string &message = "")
924 bool MaybeDiscourageAndDisconnect(
CNode &pnode, Peer &peer);
942 bool maybe_add_extra_compact_tx)
945 struct PackageToValidate {
947 const std::vector<NodeId> m_senders;
952 : m_txns{parent, child}, m_senders{parent_sender, child_sender} {}
955 Assume(m_txns.size() == 2);
957 "parent %s (sender=%d) + child %s (sender=%d)",
958 m_txns.front()->GetId().ToString(), m_senders.front(),
959 m_txns.back()->GetId().ToString(), m_senders.back());
968 void ProcessPackageResult(
const PackageToValidate &package_to_validate,
978 std::optional<PackageToValidate> Find1P1CPackage(
const CTransactionRef &ptx,
1005 bool ProcessOrphanTx(
const Config &config, Peer &peer)
1018 void ProcessHeadersMessage(
const Config &config,
CNode &pfrom, Peer &peer,
1019 std::vector<CBlockHeader> &&headers,
1020 bool via_compact_block)
1030 bool CheckHeadersPoW(
const std::vector<CBlockHeader> &headers,
1040 void HandleUnconnectingHeaders(
CNode &pfrom, Peer &peer,
1041 const std::vector<CBlockHeader> &headers)
1045 CheckHeadersAreContinuous(
const std::vector<CBlockHeader> &headers)
const;
1065 bool IsContinuationOfLowWorkHeadersSync(Peer &peer,
CNode &pfrom,
1066 std::vector<CBlockHeader> &headers)
1068 !m_headers_presync_mutex, g_msgproc_mutex);
1082 bool TryLowWorkHeadersSync(Peer &peer,
CNode &pfrom,
1084 std::vector<CBlockHeader> &headers)
1086 !m_headers_presync_mutex, g_msgproc_mutex);
1092 bool IsAncestorOfBestHeaderOrTip(
const CBlockIndex *header)
1106 void HeadersDirectFetchBlocks(
const Config &config,
CNode &pfrom,
1109 void UpdatePeerStateForReceivedHeaders(
CNode &pfrom, Peer &peer,
1111 bool received_new_header,
1112 bool may_have_more_headers)
1115 void SendBlockTransactions(
CNode &pfrom, Peer &peer,
const CBlock &block,
1124 std::chrono::microseconds current_time)
1134 std::chrono::microseconds current_time,
bool preferred)
1139 m_connman.PushMessage(&
node, std::move(msg));
1141 template <
typename... Args>
1142 void MakeAndPushMessage(
CNode &
node, std::string msg_type,
1143 Args &&...args)
const {
1145 std::forward<Args>(args)...));
1149 void PushNodeVersion(
const Config &config,
CNode &pnode,
const Peer &peer);
1157 void MaybeSendPing(
CNode &node_to, Peer &peer,
1158 std::chrono::microseconds now);
1161 void MaybeSendAddr(
CNode &
node, Peer &peer,
1162 std::chrono::microseconds current_time)
1169 void MaybeSendSendHeaders(
CNode &
node, Peer &peer)
1173 void MaybeSendFeefilter(
CNode &
node, Peer &peer,
1174 std::chrono::microseconds current_time)
1186 void RelayAddress(
NodeId originator,
const CAddress &addr,
bool fReachable)
1207 Mutex cs_proofrequest;
1212 std::atomic<int> m_best_height{-1};
1217 const Options m_opts;
1219 bool RejectIncomingTxs(
const CNode &peer)
const;
1231 mutable Mutex m_peer_mutex;
1238 std::map<NodeId, PeerRef> m_peer_map
GUARDED_BY(m_peer_mutex);
1247 const CNodeState *State(
NodeId pnode)
const
1252 std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us};
1259 m_last_block_inv_triggering_headers_sync
GUARDED_BY(g_msgproc_mutex){};
1267 std::map<BlockHash, std::pair<NodeId, bool>>
1277 std::atomic<std::chrono::seconds> m_block_stalling_timeout{
1291 bool AlreadyHaveTx(
const TxId &txid,
bool include_reconsiderable)
1293 !m_recent_confirmed_transactions_mutex);
1357 mutable Mutex m_recent_confirmed_transactions_mutex;
1359 GUARDED_BY(m_recent_confirmed_transactions_mutex){24'000, 0.000'001};
1368 std::chrono::microseconds
1369 NextInvToInbounds(std::chrono::microseconds now,
1370 std::chrono::seconds average_interval)
1375 mutable Mutex m_most_recent_block_mutex;
1376 std::shared_ptr<const CBlock>
1377 m_most_recent_block
GUARDED_BY(m_most_recent_block_mutex);
1378 std::shared_ptr<const CBlockHeaderAndShortTxIDs>
1379 m_most_recent_compact_block
GUARDED_BY(m_most_recent_block_mutex);
1381 std::unique_ptr<const std::map<TxId, CTransactionRef>>
1382 m_most_recent_block_txs
GUARDED_BY(m_most_recent_block_mutex);
1387 Mutex m_headers_presync_mutex;
1398 using HeadersPresyncStats =
1399 std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>;
1401 std::map<NodeId, HeadersPresyncStats>
1402 m_headers_presync_stats
GUARDED_BY(m_headers_presync_mutex){};
1406 std::atomic_bool m_headers_presync_should_signal{
false};
1414 bool IsBlockRequested(
const BlockHash &hash)
1418 bool IsBlockRequestedFromOutbound(
const BlockHash &hash)
1429 void RemoveBlockRequest(
const BlockHash &hash,
1430 std::optional<NodeId> from_peer)
1439 bool BlockRequested(
const Config &config,
NodeId nodeid,
1441 std::list<QueuedBlock>::iterator **pit =
nullptr)
1450 void FindNextBlocksToDownload(const Peer &peer,
unsigned int count,
1456 void TryDownloadingHistoricalBlocks(
1457 const Peer &peer,
unsigned int count,
1491 const Peer &peer, CNodeState *state,
1493 int nWindowEnd, const
CChain *activeChain =
nullptr,
1494 NodeId *nodeStaller =
nullptr)
1504 std::atomic<
std::chrono::seconds> m_last_tip_update{0s};
1511 const std::chrono::seconds mempool_req,
1512 const std::chrono::seconds now)
1517 void ProcessGetData(
const Config &config,
CNode &pfrom, Peer &peer,
1518 const std::atomic<bool> &interruptMsgProc)
1520 peer.m_getdata_requests_mutex,
1526 const std::shared_ptr<const CBlock> &block,
1527 bool force_processing,
bool min_pow_checked);
1535 void MaybeSetPeerAsAnnouncingHeaderAndIDs(
NodeId nodeid)
1554 std::vector<CTransactionRef>
1555 vExtraTxnForCompact
GUARDED_BY(g_msgproc_mutex);
1557 size_t vExtraTxnForCompactIt
GUARDED_BY(g_msgproc_mutex) = 0;
1562 void ProcessBlockAvailability(
NodeId nodeid)
1577 bool BlockRequestAllowed(const
CBlockIndex *pindex)
1579 bool AlreadyHaveBlock(const
BlockHash &block_hash)
1581 bool AlreadyHaveProof(const
avalanche::ProofId &proofid);
1582 void ProcessGetBlockData(const
Config &config,
CNode &pfrom, Peer &peer,
1605 bool PrepareBlockFilterRequest(
CNode &
node, Peer &peer,
1607 uint32_t start_height,
1609 uint32_t max_height_diff,
1651 uint32_t GetAvalancheVoteForBlock(const
BlockHash &hash) const
1662 const
TxId &
id) const
1664 !m_recent_confirmed_transactions_mutex);
1673 bool SetupAddressRelay(const
CNode &
node, Peer &peer)
1676 void AddAddressKnown(Peer &peer, const
CAddress &addr)
1678 void PushAddress(Peer &peer, const
CAddress &addr)
1686 bool ReceivedAvalancheProof(
CNode &
node, Peer &peer,
1692 const
std::chrono::seconds now)
1695 bool isPreferredDownloadPeer(const
CNode &pfrom);
1698const CNodeState *PeerManagerImpl::State(
NodeId pnode) const
1700 std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
1701 if (it == m_node_states.end()) {
1708CNodeState *PeerManagerImpl::State(
NodeId pnode)
1710 return const_cast<CNodeState *
>(std::as_const(*this).State(pnode));
1718static bool IsAddrCompatible(
const Peer &peer,
const CAddress &addr) {
1722void PeerManagerImpl::AddAddressKnown(Peer &peer,
const CAddress &addr) {
1723 assert(peer.m_addr_known);
1724 peer.m_addr_known->insert(addr.
GetKey());
1727void PeerManagerImpl::PushAddress(Peer &peer,
const CAddress &addr) {
1731 assert(peer.m_addr_known);
1732 if (addr.
IsValid() && !peer.m_addr_known->contains(addr.
GetKey()) &&
1733 IsAddrCompatible(peer, addr)) {
1734 if (peer.m_addrs_to_send.size() >= m_opts.max_addr_to_send) {
1735 peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] =
1738 peer.m_addrs_to_send.push_back(addr);
1743static void AddKnownTx(Peer &peer,
const TxId &txid) {
1744 auto tx_relay = peer.GetTxRelay();
1749 LOCK(tx_relay->m_tx_inventory_mutex);
1750 tx_relay->m_tx_inventory_known_filter.insert(txid);
1754 if (peer.m_proof_relay !=
nullptr) {
1755 LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
1756 peer.m_proof_relay->m_proof_inventory_known_filter.insert(proofid);
1760bool PeerManagerImpl::isPreferredDownloadPeer(
const CNode &pfrom) {
1762 const CNodeState *state = State(pfrom.
GetId());
1763 return state && state->fPreferredDownload;
1766static bool CanServeBlocks(
const Peer &peer) {
1774static bool IsLimitedPeer(
const Peer &peer) {
1779std::chrono::microseconds
1780PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
1781 std::chrono::seconds average_interval) {
1782 if (m_next_inv_to_inbounds.load() < now) {
1787 m_next_inv_to_inbounds =
1788 now + m_rng.rand_exp_duration(average_interval);
1790 return m_next_inv_to_inbounds;
1793bool PeerManagerImpl::IsBlockRequested(
const BlockHash &hash) {
1794 return mapBlocksInFlight.count(hash);
1797bool PeerManagerImpl::IsBlockRequestedFromOutbound(
const BlockHash &hash) {
1798 for (
auto range = mapBlocksInFlight.equal_range(hash);
1799 range.first != range.second; range.first++) {
1800 auto [nodeid, block_it] = range.first->second;
1801 CNodeState &nodestate = *
Assert(State(nodeid));
1802 if (!nodestate.m_is_inbound) {
1810void PeerManagerImpl::RemoveBlockRequest(
const BlockHash &hash,
1811 std::optional<NodeId> from_peer) {
1812 auto range = mapBlocksInFlight.equal_range(hash);
1813 if (range.first == range.second) {
1821 while (range.first != range.second) {
1822 auto [node_id, list_it] = range.first->second;
1824 if (from_peer && *from_peer != node_id) {
1829 CNodeState &state = *
Assert(State(node_id));
1831 if (state.vBlocksInFlight.begin() == list_it) {
1834 state.m_downloading_since =
1835 std::max(state.m_downloading_since,
1836 GetTime<std::chrono::microseconds>());
1838 state.vBlocksInFlight.erase(list_it);
1840 if (state.vBlocksInFlight.empty()) {
1842 m_peers_downloading_from--;
1844 state.m_stalling_since = 0us;
1846 range.first = mapBlocksInFlight.erase(range.first);
1850bool PeerManagerImpl::BlockRequested(
const Config &config,
NodeId nodeid,
1852 std::list<QueuedBlock>::iterator **pit) {
1855 CNodeState *state = State(nodeid);
1856 assert(state !=
nullptr);
1861 for (
auto range = mapBlocksInFlight.equal_range(hash);
1862 range.first != range.second; range.first++) {
1863 if (range.first->second.first == nodeid) {
1865 *pit = &range.first->second.second;
1872 RemoveBlockRequest(hash, nodeid);
1874 std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(
1875 state->vBlocksInFlight.end(),
1876 {&block, std::unique_ptr<PartiallyDownloadedBlock>(
1877 pit ? new PartiallyDownloadedBlock(config, &m_mempool)
1879 if (state->vBlocksInFlight.size() == 1) {
1881 state->m_downloading_since = GetTime<std::chrono::microseconds>();
1882 m_peers_downloading_from++;
1885 auto itInFlight = mapBlocksInFlight.insert(
1886 std::make_pair(hash, std::make_pair(nodeid, it)));
1889 *pit = &itInFlight->second.second;
1895void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(
NodeId nodeid) {
1901 if (m_opts.ignore_incoming_txs) {
1905 CNodeState *nodestate = State(nodeid);
1910 if (!nodestate->m_provides_cmpctblocks) {
1913 int num_outbound_hb_peers = 0;
1914 for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin();
1915 it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
1916 if (*it == nodeid) {
1917 lNodesAnnouncingHeaderAndIDs.erase(it);
1918 lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
1921 CNodeState *state = State(*it);
1922 if (state !=
nullptr && !state->m_is_inbound) {
1923 ++num_outbound_hb_peers;
1926 if (nodestate->m_is_inbound) {
1929 if (lNodesAnnouncingHeaderAndIDs.size() >= 3 &&
1930 num_outbound_hb_peers == 1) {
1931 CNodeState *remove_node =
1932 State(lNodesAnnouncingHeaderAndIDs.front());
1933 if (remove_node !=
nullptr && !remove_node->m_is_inbound) {
1936 std::swap(lNodesAnnouncingHeaderAndIDs.front(),
1937 *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
1944 if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
1948 lNodesAnnouncingHeaderAndIDs.front(), [
this](
CNode *pnodeStop) {
1949 MakeAndPushMessage(*pnodeStop, NetMsgType::SENDCMPCT,
1951 CMPCTBLOCKS_VERSION);
1954 pnodeStop->m_bip152_highbandwidth_to = false;
1957 lNodesAnnouncingHeaderAndIDs.pop_front();
1964 lNodesAnnouncingHeaderAndIDs.push_back(pfrom->
GetId());
1969bool PeerManagerImpl::TipMayBeStale() {
1972 if (m_last_tip_update.load() == 0s) {
1973 m_last_tip_update = GetTime<std::chrono::seconds>();
1975 return m_last_tip_update.load() <
1976 GetTime<std::chrono::seconds>() -
1979 mapBlocksInFlight.empty();
1982bool PeerManagerImpl::CanDirectFetch() {
1988static bool PeerHasHeader(CNodeState *state,
const CBlockIndex *pindex)
1990 if (state->pindexBestKnownBlock &&
1991 pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) {
1994 if (state->pindexBestHeaderSent &&
1995 pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) {
2001void PeerManagerImpl::ProcessBlockAvailability(
NodeId nodeid) {
2002 CNodeState *state = State(nodeid);
2003 assert(state !=
nullptr);
2005 if (!state->hashLastUnknownBlock.IsNull()) {
2009 if (state->pindexBestKnownBlock ==
nullptr ||
2010 pindex->
nChainWork >= state->pindexBestKnownBlock->nChainWork) {
2011 state->pindexBestKnownBlock = pindex;
2013 state->hashLastUnknownBlock.SetNull();
2018void PeerManagerImpl::UpdateBlockAvailability(
NodeId nodeid,
2020 CNodeState *state = State(nodeid);
2021 assert(state !=
nullptr);
2023 ProcessBlockAvailability(nodeid);
2028 if (state->pindexBestKnownBlock ==
nullptr ||
2029 pindex->
nChainWork >= state->pindexBestKnownBlock->nChainWork) {
2030 state->pindexBestKnownBlock = pindex;
2035 state->hashLastUnknownBlock = hash;
2041void PeerManagerImpl::FindNextBlocksToDownload(
2042 const Peer &peer,
unsigned int count,
2043 std::vector<const CBlockIndex *> &vBlocks,
NodeId &nodeStaller) {
2048 vBlocks.reserve(vBlocks.size() +
count);
2049 CNodeState *state = State(peer.m_id);
2050 assert(state !=
nullptr);
2053 ProcessBlockAvailability(peer.m_id);
2055 if (state->pindexBestKnownBlock ==
nullptr ||
2056 state->pindexBestKnownBlock->nChainWork <
2058 state->pindexBestKnownBlock->nChainWork <
2068 const CBlockIndex *snap_base{m_chainman.GetSnapshotBaseBlock()};
2069 if (snap_base && state->pindexBestKnownBlock->GetAncestor(
2070 snap_base->nHeight) != snap_base) {
2072 "Not downloading blocks from peer=%d, which doesn't have the "
2073 "snapshot block in its best chain.\n",
2082 if (state->pindexLastCommonBlock ==
nullptr ||
2084 state->pindexLastCommonBlock->nHeight < snap_base->nHeight)) {
2085 state->pindexLastCommonBlock =
2087 .
ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight,
2094 state->pindexLastCommonBlock, state->pindexBestKnownBlock);
2095 if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) {
2099 const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
2107 FindNextBlocks(vBlocks, peer, state, pindexWalk,
count, nWindowEnd,
2111void PeerManagerImpl::TryDownloadingHistoricalBlocks(
2112 const Peer &peer,
unsigned int count,
2113 std::vector<const CBlockIndex *> &vBlocks,
const CBlockIndex *from_tip,
2118 if (vBlocks.size() >=
count) {
2122 vBlocks.reserve(
count);
2123 CNodeState *state =
Assert(State(peer.m_id));
2125 if (state->pindexBestKnownBlock ==
nullptr ||
2126 state->pindexBestKnownBlock->GetAncestor(target_block->
nHeight) !=
2141 FindNextBlocks(vBlocks, peer, state, from_tip,
count,
2146void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex *> &vBlocks,
2147 const Peer &peer, CNodeState *state,
2149 unsigned int count,
int nWindowEnd,
2150 const CChain *activeChain,
2152 std::vector<const CBlockIndex *> vToFetch;
2154 std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
2156 while (pindexWalk->
nHeight < nMaxHeight) {
2161 int nToFetch = std::min(nMaxHeight - pindexWalk->
nHeight,
2162 std::max<int>(
count - vBlocks.size(), 128));
2163 vToFetch.resize(nToFetch);
2164 pindexWalk = state->pindexBestKnownBlock->
GetAncestor(
2165 pindexWalk->
nHeight + nToFetch);
2166 vToFetch[nToFetch - 1] = pindexWalk;
2167 for (
unsigned int i = nToFetch - 1; i > 0; i--) {
2168 vToFetch[i - 1] = vToFetch[i]->
pprev;
2181 if (pindex->nStatus.hasData() ||
2182 (activeChain && activeChain->
Contains(pindex))) {
2184 state->pindexLastCommonBlock = pindex;
2186 }
else if (!IsBlockRequested(pindex->
GetBlockHash())) {
2188 if (pindex->
nHeight > nWindowEnd) {
2190 if (vBlocks.size() == 0 && waitingfor != peer.m_id) {
2194 *nodeStaller = waitingfor;
2199 vBlocks.push_back(pindex);
2200 if (vBlocks.size() ==
count) {
2203 }
else if (waitingfor == -1) {
2215template <
class InvId>
2219 return !
node.HasPermission(
2232template <
class InvId>
2233static std::chrono::microseconds
2237 std::chrono::microseconds current_time,
bool preferred) {
2238 auto delay = std::chrono::microseconds{0};
2250 return current_time + delay;
2253void PeerManagerImpl::PushNodeVersion(
const Config &config,
CNode &pnode,
2255 uint64_t my_services{peer.m_our_services};
2256 const int64_t nTime{
count_seconds(GetTime<std::chrono::seconds>())};
2258 const int nNodeStartingHeight{m_best_height};
2269 const bool tx_relay{!RejectIncomingTxs(pnode)};
2278 nNodeStartingHeight, tx_relay, extraEntropy);
2282 "send version message: version %d, blocks=%d, them=%s, "
2283 "txrelay=%d, peer=%d\n",
2288 "send version message: version %d, blocks=%d, "
2289 "txrelay=%d, peer=%d\n",
2294void PeerManagerImpl::AddTxAnnouncement(
2296 std::chrono::microseconds current_time) {
2304 const bool preferred = isPreferredDownloadPeer(
node);
2306 current_time, preferred);
2308 m_txrequest.ReceivedInv(
node.GetId(), txid, preferred, reqtime);
2311void PeerManagerImpl::AddProofAnnouncement(
2313 std::chrono::microseconds current_time,
bool preferred) {
2324 m_proofrequest.ReceivedInv(
node.GetId(), proofid, preferred, reqtime);
2327void PeerManagerImpl::UpdateLastBlockAnnounceTime(
NodeId node,
2328 int64_t time_in_seconds) {
2330 CNodeState *state = State(
node);
2332 state->m_last_block_announcement = time_in_seconds;
2336void PeerManagerImpl::InitializeNode(
const Config &config,
CNode &
node,
2341 m_node_states.emplace_hint(m_node_states.end(),
2342 std::piecewise_construct,
2343 std::forward_as_tuple(nodeid),
2344 std::forward_as_tuple(
node.IsInboundConn()));
2345 assert(m_txrequest.Count(nodeid) == 0);
2353 PeerRef peer = std::make_shared<Peer>(nodeid, our_services, !!m_avalanche);
2356 m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
2358 if (!
node.IsInboundConn()) {
2359 PushNodeVersion(config,
node, *peer);
2363void PeerManagerImpl::ReattemptInitialBroadcast(
CScheduler &scheduler) {
2366 for (
const TxId &txid : unbroadcast_txids) {
2368 if (m_mempool.
exists(txid)) {
2369 RelayTransaction(txid);
2380 auto unbroadcasted_proofids =
2384 auto it = unbroadcasted_proofids.begin();
2385 while (it != unbroadcasted_proofids.end()) {
2388 if (!pm.isBoundToPeer(*it)) {
2389 pm.removeUnbroadcastProof(*it);
2390 it = unbroadcasted_proofids.erase(it);
2397 return unbroadcasted_proofids;
2401 for (
const auto &proofid : unbroadcasted_proofids) {
2402 RelayProof(proofid);
2409 const auto reattemptBroadcastInterval =
2411 scheduler.
scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
2412 reattemptBroadcastInterval);
2415void PeerManagerImpl::UpdateAvalancheStatistics()
const {
2421void PeerManagerImpl::AvalanchePeriodicNetworking(
CScheduler &scheduler)
const {
2422 const auto now = GetTime<std::chrono::seconds>();
2423 std::vector<NodeId> avanode_ids;
2424 bool fQuorumEstablished;
2425 bool fShouldRequestMoreNodes;
2435 fShouldRequestMoreNodes =
2443 avanode_ids.push_back(pnode->GetId());
2446 PeerRef peer = GetPeerRef(pnode->
GetId());
2447 if (peer ==
nullptr) {
2451 if (peer->m_proof_relay &&
2452 now > (peer->m_proof_relay->lastSharedProofsUpdate.load() +
2454 peer->m_proof_relay->sharedProofs = {};
2458 if (avanode_ids.empty()) {
2466 for (
NodeId avanodeId : avanode_ids) {
2467 const bool sentGetavaaddr =
2470 MakeAndPushMessage(*pavanode, NetMsgType::GETAVAADDR);
2471 PeerRef peer = GetPeerRef(avanodeId);
2472 WITH_LOCK(peer->m_addr_token_bucket_mutex,
2473 peer->m_addr_token_bucket +=
2474 m_opts.max_addr_to_send);
2482 if (sentGetavaaddr && fQuorumEstablished && !fShouldRequestMoreNodes) {
2497 avanode_ids.resize(std::min<size_t>(avanode_ids.size(), 3));
2500 for (
NodeId nodeid : avanode_ids) {
2503 PeerRef peer = GetPeerRef(nodeid);
2504 if (peer->m_proof_relay) {
2506 peer->m_proof_relay->compactproofs_requested =
true;
2516 const auto avalanchePeriodicNetworkingInterval =
2518 scheduler.
scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
2519 avalanchePeriodicNetworkingInterval);
2522void PeerManagerImpl::FinalizeNode(
const Config &config,
const CNode &
node) {
2532 PeerRef peer = RemovePeer(nodeid);
2535 m_peer_map.erase(nodeid);
2537 CNodeState *state = State(nodeid);
2538 assert(state !=
nullptr);
2540 if (state->fSyncStarted) {
2544 for (
const QueuedBlock &entry : state->vBlocksInFlight) {
2546 mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash());
2547 while (range.first != range.second) {
2548 auto [node_id, list_it] = range.first->second;
2549 if (node_id != nodeid) {
2552 range.first = mapBlocksInFlight.erase(range.first);
2559 m_txrequest.DisconnectedPeer(nodeid);
2560 m_num_preferred_download_peers -= state->fPreferredDownload;
2561 m_peers_downloading_from -= (!state->vBlocksInFlight.empty());
2562 assert(m_peers_downloading_from >= 0);
2563 m_outbound_peers_with_protect_from_disconnect -=
2564 state->m_chain_sync.m_protect;
2565 assert(m_outbound_peers_with_protect_from_disconnect >= 0);
2567 m_node_states.erase(nodeid);
2569 if (m_node_states.empty()) {
2571 assert(mapBlocksInFlight.empty());
2572 assert(m_num_preferred_download_peers == 0);
2573 assert(m_peers_downloading_from == 0);
2574 assert(m_outbound_peers_with_protect_from_disconnect == 0);
2575 assert(m_txrequest.Size() == 0);
2577 return orphanage.Size();
2582 if (
node.fSuccessfullyConnected && !
node.IsBlockOnlyConn() &&
2583 !
node.IsInboundConn()) {
2590 LOCK(m_headers_presync_mutex);
2591 m_headers_presync_stats.erase(nodeid);
2594 WITH_LOCK(cs_proofrequest, m_proofrequest.DisconnectedPeer(nodeid));
2599PeerRef PeerManagerImpl::GetPeerRef(
NodeId id)
const {
2601 auto it = m_peer_map.find(
id);
2602 return it != m_peer_map.end() ? it->second :
nullptr;
2605PeerRef PeerManagerImpl::RemovePeer(
NodeId id) {
2608 auto it = m_peer_map.find(
id);
2609 if (it != m_peer_map.end()) {
2610 ret = std::move(it->second);
2611 m_peer_map.erase(it);
2616bool PeerManagerImpl::GetNodeStateStats(
NodeId nodeid,
2620 const CNodeState *state = State(nodeid);
2621 if (state ==
nullptr) {
2625 ? state->pindexBestKnownBlock->nHeight
2628 ? state->pindexLastCommonBlock->nHeight
2630 for (
const QueuedBlock &queue : state->vBlocksInFlight) {
2637 PeerRef peer = GetPeerRef(nodeid);
2638 if (peer ==
nullptr) {
2650 auto ping_wait{0us};
2651 if ((0 != peer->m_ping_nonce_sent) &&
2652 (0 != peer->m_ping_start.load().count())) {
2654 GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
2657 if (
auto tx_relay = peer->GetTxRelay()) {
2659 return tx_relay->m_relay_txs);
2671 LOCK(peer->m_headers_sync_mutex);
2672 if (peer->m_headers_sync) {
2680void PeerManagerImpl::AddToCompactExtraTransactions(
const CTransactionRef &tx) {
2681 if (m_opts.max_extra_txs <= 0) {
2685 if (!vExtraTxnForCompact.size()) {
2686 vExtraTxnForCompact.resize(m_opts.max_extra_txs);
2689 vExtraTxnForCompact[vExtraTxnForCompactIt] = tx;
2690 vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs;
2693void PeerManagerImpl::Misbehaving(Peer &peer,
const std::string &message) {
2694 LOCK(peer.m_misbehavior_mutex);
2696 const std::string message_prefixed =
2697 message.empty() ?
"" : (
": " + message);
2698 peer.m_should_discourage =
true;
2703void PeerManagerImpl::MaybePunishNodeForBlock(
NodeId nodeid,
2705 bool via_compact_block,
2706 const std::string &message) {
2707 PeerRef peer{GetPeerRef(nodeid)};
2718 if (!via_compact_block) {
2720 Misbehaving(*peer, message);
2727 CNodeState *node_state = State(nodeid);
2728 if (node_state ==
nullptr) {
2735 if (!via_compact_block && !node_state->m_is_inbound) {
2737 Misbehaving(*peer, message);
2747 Misbehaving(*peer, message);
2753 Misbehaving(*peer, message);
2759 if (message !=
"") {
2764void PeerManagerImpl::MaybePunishNodeForTx(
NodeId nodeid,
2766 const std::string &message) {
2767 PeerRef peer{GetPeerRef(nodeid)};
2774 Misbehaving(*peer, message);
2792 if (message !=
"") {
2797bool PeerManagerImpl::BlockRequestAllowed(
const CBlockIndex *pindex) {
2803 (m_chainman.m_best_header !=
nullptr) &&
2804 (m_chainman.m_best_header->GetBlockTime() - pindex->
GetBlockTime() <
2807 *m_chainman.m_best_header, *pindex, *m_chainman.m_best_header,
2811std::optional<std::string>
2812PeerManagerImpl::FetchBlock(
const Config &config,
NodeId peer_id,
2815 return "Loading blocks ...";
2821 CNodeState *state = State(peer_id);
2822 if (state ==
nullptr) {
2823 return "Peer does not exist";
2827 RemoveBlockRequest(block_index.
GetBlockHash(), std::nullopt);
2830 if (!BlockRequested(config, peer_id, block_index)) {
2831 return "Already requested from this peer";
2840 this->MakeAndPushMessage(*node, NetMsgType::GETDATA, invs);
2843 return "Node not fully connected";
2848 return std::nullopt;
2851std::unique_ptr<PeerManager>
2855 return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman,
2864 : m_rng{opts.deterministic_rng},
2866 m_chainparams(chainman.GetParams()), m_connman(connman),
2867 m_addrman(addrman), m_banman(banman), m_chainman(chainman),
2868 m_mempool(pool), m_avalanche(
avalanche), m_opts{opts} {}
2870void PeerManagerImpl::StartScheduledTasks(
CScheduler &scheduler) {
2877 "peer eviction timer should be less than stale tip check timer");
2880 this->CheckForStaleTipAndEvictPeers();
2886 const auto reattemptBroadcastInterval =
2888 scheduler.
scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
2889 reattemptBroadcastInterval);
2894 UpdateAvalancheStatistics();
2900 const auto avalanchePeriodicNetworkingInterval =
2902 scheduler.
scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
2903 avalanchePeriodicNetworkingInterval);
2912void PeerManagerImpl::BlockConnected(
2913 ChainstateRole role,
const std::shared_ptr<const CBlock> &pblock,
2917 m_last_tip_update = GetTime<std::chrono::seconds>();
2921 auto stalling_timeout = m_block_stalling_timeout.load();
2924 const auto new_timeout =
2925 std::max(std::chrono::duration_cast<std::chrono::seconds>(
2926 stalling_timeout * 0.85),
2928 if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout,
2948 LOCK(m_recent_confirmed_transactions_mutex);
2950 m_recent_confirmed_transactions.insert(ptx->GetId());
2955 for (
const auto &ptx : pblock->vtx) {
2956 m_txrequest.ForgetInvId(ptx->GetId());
2961void PeerManagerImpl::BlockDisconnected(
2962 const std::shared_ptr<const CBlock> &block,
const CBlockIndex *pindex) {
2971 LOCK(m_recent_confirmed_transactions_mutex);
2972 m_recent_confirmed_transactions.reset();
2979void PeerManagerImpl::NewPoWValidBlock(
2980 const CBlockIndex *pindex,
const std::shared_ptr<const CBlock> &pblock) {
2981 std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock =
2982 std::make_shared<const CBlockHeaderAndShortTxIDs>(
2987 if (pindex->
nHeight <= m_highest_fast_announce) {
2990 m_highest_fast_announce = pindex->
nHeight;
2993 const std::shared_future<CSerializedNetMsg> lazy_ser{
2994 std::async(std::launch::deferred, [&] {
2999 auto most_recent_block_txs =
3000 std::make_unique<std::map<TxId, CTransactionRef>>();
3001 for (
const auto &tx : pblock->vtx) {
3002 most_recent_block_txs->emplace(tx->GetId(), tx);
3005 LOCK(m_most_recent_block_mutex);
3006 m_most_recent_block_hash = hashBlock;
3007 m_most_recent_block = pblock;
3008 m_most_recent_compact_block = pcmpctblock;
3009 m_most_recent_block_txs = std::move(most_recent_block_txs);
3013 [
this, pindex, &lazy_ser, &hashBlock](
CNode *pnode)
3021 ProcessBlockAvailability(pnode->
GetId());
3022 CNodeState &state = *State(pnode->
GetId());
3026 if (state.m_requested_hb_cmpctblocks &&
3027 !PeerHasHeader(&state, pindex) &&
3028 PeerHasHeader(&state, pindex->
pprev)) {
3030 "%s sending header-and-ids %s to peer=%d\n",
3031 "PeerManager::NewPoWValidBlock",
3032 hashBlock.ToString(), pnode->
GetId());
3035 PushMessage(*pnode, ser_cmpctblock.Copy());
3036 state.pindexBestHeaderSent = pindex;
3045void PeerManagerImpl::UpdatedBlockTip(
const CBlockIndex *pindexNew,
3047 bool fInitialDownload) {
3048 SetBestHeight(pindexNew->
nHeight);
3052 if (fInitialDownload) {
3057 std::vector<BlockHash> vHashes;
3059 while (pindexToAnnounce != pindexFork) {
3061 pindexToAnnounce = pindexToAnnounce->
pprev;
3071 for (
auto &it : m_peer_map) {
3072 Peer &peer = *it.second;
3073 LOCK(peer.m_block_inv_mutex);
3075 peer.m_blocks_for_headers_relay.push_back(hash);
3087void PeerManagerImpl::BlockChecked(
const CBlock &block,
3092 std::map<BlockHash, std::pair<NodeId, bool>>::iterator it =
3093 mapBlockSource.find(hash);
3097 if (state.
IsInvalid() && it != mapBlockSource.end() &&
3098 State(it->second.first)) {
3099 MaybePunishNodeForBlock(it->second.first, state,
3100 !it->second.second);
3109 mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
3110 if (it != mapBlockSource.end()) {
3111 MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
3115 if (it != mapBlockSource.end()) {
3116 mapBlockSource.erase(it);
3125bool PeerManagerImpl::AlreadyHaveTx(
const TxId &txid,
3126 bool include_reconsiderable) {
3128 hashRecentRejectsChainTip) {
3133 hashRecentRejectsChainTip =
3135 m_recent_rejects.reset();
3136 m_recent_rejects_package_reconsiderable.reset();
3140 return orphanage.HaveTx(txid);
3146 return conflicting.HaveTx(txid);
3151 if (include_reconsiderable &&
3152 m_recent_rejects_package_reconsiderable.contains(txid)) {
3157 LOCK(m_recent_confirmed_transactions_mutex);
3158 if (m_recent_confirmed_transactions.contains(txid)) {
3163 return m_recent_rejects.contains(txid) || m_mempool.
exists(txid);
3166bool PeerManagerImpl::AlreadyHaveBlock(
const BlockHash &block_hash) {
3171 if (!
Assume(m_avalanche)) {
3176 if (localProof && localProof->getId() == proofid) {
3185void PeerManagerImpl::SendPings() {
3187 for (
auto &it : m_peer_map) {
3188 it.second->m_ping_queued =
true;
3192void PeerManagerImpl::RelayTransaction(
const TxId &txid) {
3194 for (
auto &it : m_peer_map) {
3195 Peer &peer = *it.second;
3196 auto tx_relay = peer.GetTxRelay();
3200 LOCK(tx_relay->m_tx_inventory_mutex);
3206 if (tx_relay->m_next_inv_send_time == 0s) {
3210 if (!tx_relay->m_tx_inventory_known_filter.contains(txid) ||
3211 tx_relay->m_avalanche_stalled_txids.count(txid) > 0) {
3212 tx_relay->m_tx_inventory_to_send.insert(txid);
3219 for (
auto &it : m_peer_map) {
3220 Peer &peer = *it.second;
3222 if (!peer.m_proof_relay) {
3225 LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
3226 if (!peer.m_proof_relay->m_proof_inventory_known_filter.contains(
3228 peer.m_proof_relay->m_proof_inventory_to_send.insert(proofid);
3233void PeerManagerImpl::RelayAddress(
NodeId originator,
const CAddress &addr,
3249 const auto current_time{GetTime<std::chrono::seconds>()};
3252 const uint64_t time_addr{
3253 (
static_cast<uint64_t
>(
count_seconds(current_time)) + hash_addr) /
3263 unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
3264 std::array<std::pair<uint64_t, Peer *>, 2> best{
3265 {{0,
nullptr}, {0,
nullptr}}};
3266 assert(nRelayNodes <= best.size());
3270 for (
auto &[
id, peer] : m_peer_map) {
3271 if (peer->m_addr_relay_enabled &&
id != originator &&
3272 IsAddrCompatible(*peer, addr)) {
3274 for (
unsigned int i = 0; i < nRelayNodes; i++) {
3275 if (hashKey > best[i].first) {
3276 std::copy(best.begin() + i, best.begin() + nRelayNodes - 1,
3277 best.begin() + i + 1);
3278 best[i] = std::make_pair(hashKey, peer.get());
3285 for (
unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
3286 PushAddress(*best[i].second, addr);
3290void PeerManagerImpl::ProcessGetBlockData(
const Config &config,
CNode &pfrom,
3291 Peer &peer,
const CInv &inv) {
3294 std::shared_ptr<const CBlock> a_recent_block;
3295 std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
3297 LOCK(m_most_recent_block_mutex);
3298 a_recent_block = m_most_recent_block;
3299 a_recent_compact_block = m_most_recent_compact_block;
3302 bool need_activate_chain =
false;
3316 need_activate_chain =
true;
3320 if (need_activate_chain) {
3323 state, a_recent_block, m_avalanche)) {
3331 bool can_direct_fetch{
false};
3339 if (!BlockRequestAllowed(pindex)) {
3341 "%s: ignoring request from peer=%i for old "
3342 "block that isn't in the main chain\n",
3343 __func__, pfrom.
GetId());
3349 (((m_chainman.m_best_header !=
nullptr) &&
3350 (m_chainman.m_best_header->GetBlockTime() -
3358 "historical block serving limit reached, disconnect peer=%d\n",
3371 (tip->nHeight - pindex->
nHeight >
3374 "Ignore block request below NODE_NETWORK_LIMITED "
3375 "threshold, disconnect peer=%d\n",
3385 if (!pindex->nStatus.hasData()) {
3388 can_direct_fetch = CanDirectFetch();
3392 std::shared_ptr<const CBlock> pblock;
3393 auto handle_block_read_error = [&]() {
3395 return m_chainman.
m_blockman.IsBlockPruned(*pindex))) {
3397 "Block was pruned before it could be read, disconnect "
3401 LogError(
"Cannot load block from disk, disconnect peer=%d\n",
3407 if (a_recent_block && a_recent_block->GetHash() == pindex->
GetBlockHash()) {
3408 pblock = a_recent_block;
3412 std::vector<uint8_t> block_data;
3414 handle_block_read_error();
3421 std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
3423 handle_block_read_error();
3426 pblock = pblockRead;
3432 bool sendMerkleBlock =
false;
3434 if (
auto tx_relay = peer.GetTxRelay()) {
3435 LOCK(tx_relay->m_bloom_filter_mutex);
3436 if (tx_relay->m_bloom_filter) {
3437 sendMerkleBlock =
true;
3442 if (sendMerkleBlock) {
3453 typedef std::pair<size_t, uint256> PairType;
3456 *pblock->vtx[pair.first]);
3466 if (can_direct_fetch &&
3468 if (a_recent_compact_block &&
3469 a_recent_compact_block->header.GetHash() ==
3472 *a_recent_compact_block);
3486 LOCK(peer.m_block_inv_mutex);
3489 if (hash == peer.m_continuation_block) {
3493 std::vector<CInv> vInv;
3496 peer.m_continuation_block =
BlockHash();
3502PeerManagerImpl::FindTxForGetData(
const Peer &peer,
const TxId &txid,
3503 const std::chrono::seconds mempool_req,
3504 const std::chrono::seconds now) {
3505 auto txinfo = m_mempool.
info(txid);
3510 if ((mempool_req.count() && txinfo.m_time <= mempool_req) ||
3512 return std::move(txinfo.tx);
3521 Assume(peer.GetTxRelay())->m_recently_announced_invs.contains(txid);
3522 if (recent && txinfo.tx) {
3523 return std::move(txinfo.tx);
3528 LOCK(m_most_recent_block_mutex);
3529 if (m_most_recent_block_txs !=
nullptr) {
3530 auto it = m_most_recent_block_txs->find(txid);
3531 if (it != m_most_recent_block_txs->end()) {
3544PeerManagerImpl::FindProofForGetData(
const Peer &peer,
3546 const std::chrono::seconds now) {
3549 bool send_unconditionally =
3575 if (send_unconditionally) {
3580 if (peer.m_proof_relay->m_recently_announced_proofs.contains(proofid)) {
3587void PeerManagerImpl::ProcessGetData(
3589 const std::atomic<bool> &interruptMsgProc) {
3592 auto tx_relay = peer.GetTxRelay();
3594 std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
3595 std::vector<CInv> vNotFound;
3597 const auto now{GetTime<std::chrono::seconds>()};
3599 const auto mempool_req = tx_relay !=
nullptr
3600 ? tx_relay->m_last_mempool_req.load()
3601 : std::chrono::seconds::min();
3606 while (it != peer.m_getdata_requests.end()) {
3607 if (interruptMsgProc) {
3616 const CInv &inv = *it;
3618 if (it->IsMsgProof()) {
3620 vNotFound.push_back(inv);
3625 auto proof = FindProofForGetData(peer, proofid, now);
3632 vNotFound.push_back(inv);
3639 if (it->IsMsgTx()) {
3640 if (tx_relay ==
nullptr) {
3654 std::vector<TxId> parent_ids_to_add;
3657 auto txiter = m_mempool.
GetIter(tx->GetId());
3659 auto &pentry = *txiter;
3661 (*pentry)->GetMemPoolParentsConst();
3662 parent_ids_to_add.reserve(parents.size());
3663 for (
const auto &parent : parents) {
3664 if (parent.get()->GetTime() >
3666 parent_ids_to_add.push_back(
3667 parent.get()->GetTx().GetId());
3672 for (
const TxId &parent_txid : parent_ids_to_add) {
3675 if (
WITH_LOCK(tx_relay->m_tx_inventory_mutex,
3676 return !tx_relay->m_tx_inventory_known_filter
3677 .contains(parent_txid))) {
3678 tx_relay->m_recently_announced_invs.insert(parent_txid);
3682 vNotFound.push_back(inv);
3695 if (it != peer.m_getdata_requests.end() && !pfrom.
fPauseSend) {
3696 const CInv &inv = *it++;
3698 ProcessGetBlockData(config, pfrom, peer, inv);
3704 peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
3706 if (!vNotFound.empty()) {
3723void PeerManagerImpl::SendBlockTransactions(
3727 for (
size_t i = 0; i < req.
indices.size(); i++) {
3729 Misbehaving(peer,
"getblocktxn with out-of-bounds tx indices");
3738bool PeerManagerImpl::CheckHeadersPoW(
const std::vector<CBlockHeader> &headers,
3743 Misbehaving(peer,
"header with invalid proof of work");
3748 if (!CheckHeadersAreContinuous(headers)) {
3749 Misbehaving(peer,
"non-continuous headers sequence");
3762 near_chaintip_work =
3775void PeerManagerImpl::HandleUnconnectingHeaders(
3776 CNode &pfrom, Peer &peer,
const std::vector<CBlockHeader> &headers) {
3780 if (MaybeSendGetHeaders(pfrom,
GetLocator(best_header), peer)) {
3783 "received header %s: missing prev block %s, sending getheaders "
3784 "(%d) to end (peer=%d)\n",
3786 headers[0].hashPrevBlock.ToString(), best_header->nHeight,
3794 UpdateBlockAvailability(pfrom.
GetId(), headers.back().GetHash()));
3797bool PeerManagerImpl::CheckHeadersAreContinuous(
3798 const std::vector<CBlockHeader> &headers)
const {
3801 if (!hashLastBlock.
IsNull() && header.hashPrevBlock != hashLastBlock) {
3804 hashLastBlock = header.GetHash();
3809bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(
3810 Peer &peer,
CNode &pfrom, std::vector<CBlockHeader> &headers) {
3811 if (peer.m_headers_sync) {
3812 auto result = peer.m_headers_sync->ProcessNextHeaders(
3816 if (result.success) {
3817 peer.m_last_getheaders_timestamp = {};
3819 if (result.request_more) {
3820 auto locator = peer.m_headers_sync->NextHeadersRequestLocator();
3823 Assume(!locator.vHave.empty());
3827 if (!locator.vHave.empty()) {
3830 bool sent_getheaders =
3831 MaybeSendGetHeaders(pfrom, locator, peer);
3834 locator.vHave.front().ToString(), pfrom.
GetId());
3839 peer.m_headers_sync.reset(
nullptr);
3844 LOCK(m_headers_presync_mutex);
3845 m_headers_presync_stats.erase(pfrom.
GetId());
3848 HeadersPresyncStats stats;
3849 stats.first = peer.m_headers_sync->GetPresyncWork();
3850 if (peer.m_headers_sync->GetState() ==
3852 stats.second = {peer.m_headers_sync->GetPresyncHeight(),
3853 peer.m_headers_sync->GetPresyncTime()};
3857 LOCK(m_headers_presync_mutex);
3858 m_headers_presync_stats[pfrom.
GetId()] = stats;
3860 m_headers_presync_stats.find(m_headers_presync_bestpeer);
3861 bool best_updated =
false;
3862 if (best_it == m_headers_presync_stats.end()) {
3867 const HeadersPresyncStats *stat_best{
nullptr};
3868 for (
const auto &[_peer, _stat] : m_headers_presync_stats) {
3869 if (!stat_best || _stat > *stat_best) {
3874 m_headers_presync_bestpeer = peer_best;
3875 best_updated = (peer_best == pfrom.
GetId());
3876 }
else if (best_it->first == pfrom.
GetId() ||
3877 stats > best_it->second) {
3880 m_headers_presync_bestpeer = pfrom.
GetId();
3881 best_updated =
true;
3883 if (best_updated && stats.second.has_value()) {
3886 m_headers_presync_should_signal =
true;
3890 if (result.success) {
3893 headers.swap(result.pow_validated_headers);
3896 return result.success;
3904bool PeerManagerImpl::TryLowWorkHeadersSync(
3906 std::vector<CBlockHeader> &headers) {
3913 arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold();
3917 if (total_work < minimum_chain_work) {
3931 LOCK(peer.m_headers_sync_mutex);
3932 peer.m_headers_sync.reset(
3934 chain_start_header, minimum_chain_work));
3939 (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
3942 "Ignoring low-work chain (height=%u) from peer=%d\n",
3943 chain_start_header->
nHeight + headers.size(),
3955bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(
const CBlockIndex *header) {
3956 return header !=
nullptr &&
3957 ((m_chainman.m_best_header !=
nullptr &&
3959 m_chainman.m_best_header->GetAncestor(header->
nHeight)) ||
3963bool PeerManagerImpl::MaybeSendGetHeaders(
CNode &pfrom,
3970 if (current_time - peer.m_last_getheaders_timestamp >
3973 peer.m_last_getheaders_timestamp = current_time;
3985void PeerManagerImpl::HeadersDirectFetchBlocks(
const Config &config,
3989 CNodeState *nodestate = State(pfrom.
GetId());
3993 std::vector<const CBlockIndex *> vToFetch;
3999 if (!pindexWalk->nStatus.hasData() &&
4002 vToFetch.push_back(pindexWalk);
4004 pindexWalk = pindexWalk->
pprev;
4015 std::vector<CInv> vGetData;
4018 if (nodestate->vBlocksInFlight.size() >=
4024 BlockRequested(config, pfrom.
GetId(), *pindex);
4028 if (vGetData.size() > 1) {
4030 "Downloading blocks toward %s (%d) via headers "
4035 if (vGetData.size() > 0) {
4036 if (!m_opts.ignore_incoming_txs &&
4037 nodestate->m_provides_cmpctblocks && vGetData.size() == 1 &&
4038 mapBlocksInFlight.size() == 1 &&
4055void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(
4057 bool received_new_header,
bool may_have_more_headers) {
4060 CNodeState *nodestate = State(pfrom.
GetId());
4068 if (received_new_header &&
4070 nodestate->m_last_block_announcement =
GetTime();
4078 if (nodestate->pindexBestKnownBlock &&
4079 nodestate->pindexBestKnownBlock->nChainWork <
4090 LogPrintf(
"Disconnecting outbound peer %d -- headers "
4091 "chain has insufficient work\n",
4105 nodestate->pindexBestKnownBlock !=
nullptr) {
4106 if (m_outbound_peers_with_protect_from_disconnect <
4108 nodestate->pindexBestKnownBlock->nChainWork >=
4110 !nodestate->m_chain_sync.m_protect) {
4113 nodestate->m_chain_sync.m_protect =
true;
4114 ++m_outbound_peers_with_protect_from_disconnect;
4119void PeerManagerImpl::ProcessHeadersMessage(
const Config &config,
CNode &pfrom,
4121 std::vector<CBlockHeader> &&headers,
4122 bool via_compact_block) {
4123 size_t nCount = headers.size();
4131 LOCK(peer.m_headers_sync_mutex);
4132 if (peer.m_headers_sync) {
4133 peer.m_headers_sync.reset(
nullptr);
4134 LOCK(m_headers_presync_mutex);
4135 m_headers_presync_stats.erase(pfrom.
GetId());
4140 peer.m_last_getheaders_timestamp = {};
4148 if (!CheckHeadersPoW(headers, m_chainparams.
GetConsensus(), peer)) {
4163 bool already_validated_work =
false;
4166 bool have_headers_sync =
false;
4168 LOCK(peer.m_headers_sync_mutex);
4170 already_validated_work =
4171 IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
4183 if (headers.empty()) {
4187 have_headers_sync = !!peer.m_headers_sync;
4193 headers[0].hashPrevBlock))};
4194 bool headers_connect_blockindex{chain_start_header !=
nullptr};
4196 if (!headers_connect_blockindex) {
4200 HandleUnconnectingHeaders(pfrom, peer, headers);
4208 peer.m_last_getheaders_timestamp = {};
4217 last_received_header =
4219 if (IsAncestorOfBestHeaderOrTip(last_received_header)) {
4220 already_validated_work =
true;
4228 already_validated_work =
true;
4234 if (!already_validated_work &&
4235 TryLowWorkHeadersSync(peer, pfrom, chain_start_header, headers)) {
4247 bool received_new_header{last_received_header ==
nullptr};
4252 state, &pindexLast)) {
4254 MaybePunishNodeForBlock(pfrom.
GetId(), state, via_compact_block,
4255 "invalid header received");
4265 if (MaybeSendGetHeaders(pfrom,
GetLocator(pindexLast), peer)) {
4268 "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
4269 pindexLast->
nHeight, pfrom.
GetId(), peer.m_starting_height);
4273 UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast,
4274 received_new_header,
4278 HeadersDirectFetchBlocks(config, pfrom, *pindexLast);
4281void PeerManagerImpl::ProcessInvalidTx(
NodeId nodeid,
4284 bool maybe_add_extra_compact_tx) {
4289 const TxId &txid = ptx->GetId();
4309 m_recent_rejects_package_reconsiderable.insert(txid);
4311 m_recent_rejects.insert(txid);
4313 m_txrequest.ForgetInvId(txid);
4316 AddToCompactExtraTransactions(ptx);
4319 MaybePunishNodeForTx(nodeid, state);
4325 return orphanage.EraseTx(txid);
4339 m_txrequest.ForgetInvId(tx->GetId());
4345 orphanage.
EraseTx(tx->GetId());
4350 "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
4351 nodeid, tx->GetId().ToString(), m_mempool.
size(),
4354 RelayTransaction(tx->GetId());
4357void PeerManagerImpl::ProcessPackageResult(
4358 const PackageToValidate &package_to_validate,
4364 const auto &
package = package_to_validate.m_txns;
4365 const auto &senders = package_to_validate.m_senders;
4368 m_recent_rejects_package_reconsiderable.insert(
GetPackageHash(package));
4372 if (!
Assume(package.size() == 2)) {
4378 auto package_iter = package.rbegin();
4379 auto senders_iter = senders.rbegin();
4380 while (package_iter != package.rend()) {
4381 const auto &tx = *package_iter;
4382 const NodeId nodeid = *senders_iter;
4383 const auto it_result{package_result.
m_tx_results.find(tx->GetId())};
4387 const auto &tx_result = it_result->second;
4388 switch (tx_result.m_result_type) {
4390 ProcessValidTx(nodeid, tx);
4400 ProcessInvalidTx(nodeid, tx, tx_result.m_state,
4417std::optional<PeerManagerImpl::PackageToValidate>
4423 const auto &parent_txid{ptx->GetId()};
4425 Assume(m_recent_rejects_package_reconsiderable.contains(parent_txid));
4431 const auto cpfp_candidates_same_peer{
4437 for (
const auto &child : cpfp_candidates_same_peer) {
4438 Package maybe_cpfp_package{ptx, child};
4439 if (!m_recent_rejects_package_reconsiderable.contains(
4441 return PeerManagerImpl::PackageToValidate{ptx, child, nodeid,
4455 const auto cpfp_candidates_different_peer{
4465 std::vector<size_t> tx_indices(cpfp_candidates_different_peer.size());
4466 std::iota(tx_indices.begin(), tx_indices.end(), 0);
4467 Shuffle(tx_indices.begin(), tx_indices.end(), m_rng);
4469 for (
const auto index : tx_indices) {
4472 const auto [child_tx, child_sender] =
4473 cpfp_candidates_different_peer.at(index);
4474 Package maybe_cpfp_package{ptx, child_tx};
4475 if (!m_recent_rejects_package_reconsiderable.contains(
4477 return PeerManagerImpl::PackageToValidate{ptx, child_tx, nodeid,
4481 return std::nullopt;
4484bool PeerManagerImpl::ProcessOrphanTx(
const Config &config, Peer &peer) {
4490 return orphanage.GetTxToReconsider(peer.m_id);
4495 const TxId &orphanTxId = porphanTx->GetId();
4500 ProcessValidTx(peer.m_id, porphanTx);
4506 " invalid orphan tx %s from peer=%d. %s\n",
4513 ProcessInvalidTx(peer.m_id, porphanTx, state,
4524bool PeerManagerImpl::PrepareBlockFilterRequest(
4526 const BlockHash &stop_hash, uint32_t max_height_diff,
4528 const bool supported_filter_type =
4531 if (!supported_filter_type) {
4533 "peer %d requested unsupported block filter type: %d\n",
4534 node.GetId(),
static_cast<uint8_t
>(filter_type));
4535 node.fDisconnect =
true;
4545 if (!stop_index || !BlockRequestAllowed(stop_index)) {
4548 node.fDisconnect =
true;
4553 uint32_t stop_height = stop_index->
nHeight;
4554 if (start_height > stop_height) {
4557 "peer %d sent invalid getcfilters/getcfheaders with "
4559 "start height %d and stop height %d\n",
4560 node.GetId(), start_height, stop_height);
4561 node.fDisconnect =
true;
4564 if (stop_height - start_height >= max_height_diff) {
4566 "peer %d requested too many cfilters/cfheaders: %d / %d\n",
4567 node.GetId(), stop_height - start_height + 1, max_height_diff);
4568 node.fDisconnect =
true;
4573 if (!filter_index) {
4582void PeerManagerImpl::ProcessGetCFilters(
CNode &
node, Peer &peer,
4584 uint8_t filter_type_ser;
4585 uint32_t start_height;
4588 vRecv >> filter_type_ser >> start_height >> stop_hash;
4595 if (!PrepareBlockFilterRequest(
node, peer, filter_type, start_height,
4601 std::vector<BlockFilter> filters;
4604 "Failed to find block filter in index: filter_type=%s, "
4605 "start_height=%d, stop_hash=%s\n",
4611 for (
const auto &filter : filters) {
4616void PeerManagerImpl::ProcessGetCFHeaders(
CNode &
node, Peer &peer,
4618 uint8_t filter_type_ser;
4619 uint32_t start_height;
4622 vRecv >> filter_type_ser >> start_height >> stop_hash;
4629 if (!PrepareBlockFilterRequest(
node, peer, filter_type, start_height,
4636 if (start_height > 0) {
4638 stop_index->
GetAncestor(
static_cast<int>(start_height - 1));
4641 "Failed to find block filter header in index: "
4642 "filter_type=%s, block_hash=%s\n",
4649 std::vector<uint256> filter_hashes;
4653 "Failed to find block filter hashes in index: filter_type=%s, "
4654 "start_height=%d, stop_hash=%s\n",
4661 stop_index->
GetBlockHash(), prev_header, filter_hashes);
4664void PeerManagerImpl::ProcessGetCFCheckPt(
CNode &
node, Peer &peer,
4666 uint8_t filter_type_ser;
4669 vRecv >> filter_type_ser >> stop_hash;
4676 if (!PrepareBlockFilterRequest(
4677 node, peer, filter_type, 0, stop_hash,
4678 std::numeric_limits<uint32_t>::max(),
4679 stop_index, filter_index)) {
4687 for (
int i = headers.size() - 1; i >= 0; i--) {
4693 "Failed to find block filter header in index: "
4694 "filter_type=%s, block_hash=%s\n",
4717PeerManagerImpl::GetAvalancheVoteForBlock(
const BlockHash &hash)
const {
4728 if (pindex->nStatus.isInvalid()) {
4733 if (pindex->nStatus.isOnParkedChain()) {
4741 if (pindex == pindexFork) {
4746 if (pindexFork != pindexTip) {
4751 if (!pindex->nStatus.hasData()) {
4762 const TxId &
id)
const {
4764 if (
WITH_LOCK(m_recent_confirmed_transactions_mutex,
4765 return m_recent_confirmed_transactions.contains(
id))) {
4774 if (m_recent_rejects.contains(
id)) {
4786 if (
auto iter = m_mempool.
GetIter(
id)) {
4787 mempool_tx = (**iter)->GetSharedTx();
4792 return conflicting.HaveTx(id);
4799 return orphanage.HaveTx(id);
4865 const std::shared_ptr<const CBlock> &block,
4866 bool force_processing,
4867 bool min_pow_checked) {
4868 bool new_block{
false};
4870 &new_block, m_avalanche);
4872 node.m_last_block_time = GetTime<std::chrono::seconds>();
4877 RemoveBlockRequest(block->GetHash(), std::nullopt);
4880 mapBlockSource.erase(block->GetHash());
4884void PeerManagerImpl::ProcessMessage(
4885 const Config &config,
CNode &pfrom,
const std::string &msg_type,
4886 DataStream &vRecv,
const std::chrono::microseconds time_received,
4887 const std::atomic<bool> &interruptMsgProc) {
4893 PeerRef peer = GetPeerRef(pfrom.
GetId());
4894 if (peer ==
nullptr) {
4900 "Avalanche is not initialized, ignoring %s message\n",
4915 uint64_t nNonce = 1;
4918 std::string cleanSubVer;
4919 int starting_height = -1;
4921 uint64_t nExtraEntropy = 1;
4923 vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
4936 "peer=%d does not offer the expected services "
4937 "(%08x offered, %08x expected); disconnecting\n",
4938 pfrom.
GetId(), nServices,
4948 "peer=%d does not offer the avalanche service; disconnecting\n",
4957 "peer=%d using obsolete version %i; disconnecting\n",
4958 pfrom.
GetId(), nVersion);
4963 if (!vRecv.
empty()) {
4972 if (!vRecv.
empty()) {
4973 std::string strSubVer;
4977 if (!vRecv.
empty()) {
4978 vRecv >> starting_height;
4980 if (!vRecv.
empty()) {
4983 if (!vRecv.
empty()) {
4984 vRecv >> nExtraEntropy;
4988 LogPrintf(
"connected to self at %s, disconnecting\n",
5001 PushNodeVersion(config, pfrom, *peer);
5005 const int greatest_common_version =
5017 peer->m_their_services = nServices;
5021 pfrom.cleanSubVer = cleanSubVer;
5023 peer->m_starting_height = starting_height;
5031 (fRelay || (peer->m_our_services &
NODE_BLOOM))) {
5032 auto *
const tx_relay = peer->SetTxRelay();
5034 LOCK(tx_relay->m_bloom_filter_mutex);
5036 tx_relay->m_relay_txs = fRelay;
5049 CNodeState *state = State(pfrom.
GetId());
5050 state->fPreferredDownload =
5054 m_num_preferred_download_peers += state->fPreferredDownload;
5060 bool send_getaddr{
false};
5062 send_getaddr = SetupAddressRelay(pfrom, *peer);
5073 peer->m_getaddr_sent =
true;
5077 WITH_LOCK(peer->m_addr_token_bucket_mutex,
5078 peer->m_addr_token_bucket += m_opts.max_addr_to_send);
5099 std::string remoteAddr;
5105 "receive version message: [%s] %s: version %d, blocks=%d, "
5106 "us=%s, txrelay=%d, peer=%d%s\n",
5109 pfrom.
GetId(), remoteAddr);
5111 int64_t currentTime =
GetTime();
5112 int64_t nTimeOffset = nTime - currentTime;
5117 Misbehaving(*peer,
"Ignoring invalid timestamp in version message");
5127 "feeler connection completed peer=%d; disconnecting\n",
5136 Misbehaving(*peer,
"non-version message before version handshake");
5143 "ignoring redundant verack message from peer=%d\n",
5149 LogPrintf(
"New outbound peer connected: version: %d, blocks=%d, "
5151 pfrom.
nVersion.load(), peer->m_starting_height,
5174 AddKnownProof(*peer, localProof->getId());
5178 peer->m_proof_relay->m_recently_announced_proofs.insert(
5179 localProof->getId());
5184 if (
auto tx_relay = peer->GetTxRelay()) {
5193 return tx_relay->m_tx_inventory_to_send.empty() &&
5194 tx_relay->m_next_inv_send_time == 0s));
5203 Misbehaving(*peer,
"non-verack message before version handshake");
5208 const auto ser_params{
5217 std::vector<CAddress> vAddr;
5221 if (!SetupAddressRelay(pfrom, *peer)) {
5227 if (vAddr.size() > m_opts.max_addr_to_send) {
5228 Misbehaving(*peer,
strprintf(
"%s message size = %u", msg_type,
5234 std::vector<CAddress> vAddrOk;
5235 const auto current_a_time{Now<NodeSeconds>()};
5238 const auto current_time = GetTime<std::chrono::microseconds>();
5240 LOCK(peer->m_addr_token_bucket_mutex);
5243 const auto time_diff =
5244 std::max(current_time - peer->m_addr_token_timestamp, 0us);
5245 const double increment =
5247 peer->m_addr_token_bucket =
5248 std::min<double>(peer->m_addr_token_bucket + increment,
5252 peer->m_addr_token_timestamp = current_time;
5254 const bool rate_limited =
5256 uint64_t num_proc = 0;
5257 uint64_t num_rate_limit = 0;
5258 Shuffle(vAddr.begin(), vAddr.end(), m_rng);
5260 if (interruptMsgProc) {
5265 LOCK(peer->m_addr_token_bucket_mutex);
5267 if (peer->m_addr_token_bucket < 1.0) {
5273 peer->m_addr_token_bucket -= 1.0;
5286 addr.
nTime > current_a_time + 10min) {
5287 addr.
nTime = current_a_time - 5 * 24h;
5289 AddAddressKnown(*peer, addr);
5298 if (addr.
nTime > current_a_time - 10min && !peer->m_getaddr_sent &&
5301 RelayAddress(pfrom.
GetId(), addr, fReachable);
5305 vAddrOk.push_back(addr);
5308 peer->m_addr_processed += num_proc;
5309 peer->m_addr_rate_limited += num_rate_limit;
5311 "Received addr: %u addresses (%u processed, %u rate-limited) "
5313 vAddr.size(), num_proc, num_rate_limit, pfrom.
GetId());
5315 m_addrman.
Add(vAddrOk, pfrom.
addr, 2h);
5316 if (vAddr.size() < 1000) {
5317 peer->m_getaddr_sent =
false;
5324 "addrfetch connection completed peer=%d; disconnecting\n",
5332 peer->m_wants_addrv2 =
true;
5337 peer->m_prefers_headers =
true;
5342 bool sendcmpct_hb{
false};
5343 uint64_t sendcmpct_version{0};
5344 vRecv >> sendcmpct_hb >> sendcmpct_version;
5351 CNodeState *nodestate = State(pfrom.
GetId());
5352 nodestate->m_provides_cmpctblocks =
true;
5353 nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb;
5362 std::vector<CInv> vInv;
5365 Misbehaving(*peer,
strprintf(
"inv message size = %u", vInv.size()));
5369 const bool reject_tx_invs{RejectIncomingTxs(pfrom)};
5371 const auto current_time{GetTime<std::chrono::microseconds>()};
5372 std::optional<BlockHash> best_block;
5374 auto logInv = [&](
const CInv &inv,
bool fAlreadyHave) {
5376 fAlreadyHave ?
"have" :
"new", pfrom.
GetId());
5379 for (
CInv &inv : vInv) {
5380 if (interruptMsgProc) {
5392 const bool fAlreadyHave = AlreadyHaveBlock(
BlockHash(inv.
hash));
5393 logInv(inv, fAlreadyHave);
5396 UpdateBlockAvailability(pfrom.
GetId(), hash);
5398 !IsBlockRequested(hash)) {
5405 best_block = std::move(hash);
5416 const bool fAlreadyHave = AlreadyHaveProof(proofid);
5417 logInv(inv, fAlreadyHave);
5418 AddKnownProof(*peer, proofid);
5420 if (!fAlreadyHave && m_avalanche &&
5422 const bool preferred = isPreferredDownloadPeer(pfrom);
5424 LOCK(cs_proofrequest);
5425 AddProofAnnouncement(pfrom, proofid, current_time,
5434 const bool fAlreadyHave =
5435 AlreadyHaveTx(txid,
true);
5436 logInv(inv, fAlreadyHave);
5438 AddKnownTx(*peer, txid);
5439 if (reject_tx_invs) {
5441 "transaction (%s) inv sent in violation of "
5442 "protocol, disconnecting peer=%d\n",
5446 }
else if (!fAlreadyHave &&
5448 AddTxAnnouncement(pfrom, txid, current_time);
5455 "Unknown inv type \"%s\" received from peer=%d\n",
5472 if (state.fSyncStarted ||
5473 (!peer->m_inv_triggered_getheaders_before_sync &&
5474 *best_block != m_last_block_inv_triggering_headers_sync)) {
5475 if (MaybeSendGetHeaders(
5476 pfrom,
GetLocator(m_chainman.m_best_header), *peer)) {
5478 m_chainman.m_best_header->nHeight,
5479 best_block->ToString(), pfrom.
GetId());
5481 if (!state.fSyncStarted) {
5482 peer->m_inv_triggered_getheaders_before_sync =
true;
5486 m_last_block_inv_triggering_headers_sync = *best_block;
5495 std::vector<CInv> vInv;
5499 strprintf(
"getdata message size = %u", vInv.size()));
5504 vInv.size(), pfrom.
GetId());
5506 if (vInv.size() > 0) {
5512 LOCK(peer->m_getdata_requests_mutex);
5513 peer->m_getdata_requests.insert(peer->m_getdata_requests.end(),
5514 vInv.begin(), vInv.end());
5515 ProcessGetData(config, pfrom, *peer, interruptMsgProc);
5524 vRecv >> locator >> hashStop;
5528 "getblocks locator size %lld > %d, disconnect peer=%d\n",
5542 std::shared_ptr<const CBlock> a_recent_block;
5544 LOCK(m_most_recent_block_mutex);
5545 a_recent_block = m_most_recent_block;
5549 state, a_recent_block, m_avalanche)) {
5567 (pindex ? pindex->
nHeight : -1),
5570 for (; pindex; pindex = m_chainman.
ActiveChain().Next(pindex)) {
5579 const int nPrunedBlocksLikelyToHave =
5583 (!pindex->nStatus.hasData() ||
5585 nPrunedBlocksLikelyToHave)) {
5588 " getblocks stopping, pruned or too old block at %d %s\n",
5593 peer->m_block_inv_mutex,
5594 peer->m_blocks_for_inv_relay.push_back(pindex->
GetBlockHash()));
5595 if (--nLimit <= 0) {
5601 peer->m_continuation_block = pindex->GetBlockHash();
5613 std::shared_ptr<const CBlock> recent_block;
5615 LOCK(m_most_recent_block_mutex);
5616 if (m_most_recent_block_hash == req.
blockhash) {
5617 recent_block = m_most_recent_block;
5622 SendBlockTransactions(pfrom, *peer, *recent_block, req);
5632 if (!pindex || !pindex->nStatus.hasData()) {
5635 "Peer %d sent us a getblocktxn for a block we don't have\n",
5646 if (!block_pos.IsNull()) {
5654 SendBlockTransactions(pfrom, *peer, block, req);
5666 "Peer %d sent us a getblocktxn for a block > %i deep\n",
5671 WITH_LOCK(peer->m_getdata_requests_mutex,
5672 peer->m_getdata_requests.push_back(inv));
5681 vRecv >> locator >> hashStop;
5685 "getheaders locator size %lld > %d, disconnect peer=%d\n",
5694 "Ignoring getheaders from peer=%d while importing/reindexing\n",
5708 if (m_chainman.
ActiveTip() ==
nullptr ||
5713 "Ignoring getheaders from peer=%d because active chain "
5714 "has too little work; sending empty response\n",
5719 std::vector<CBlock>());
5723 CNodeState *nodestate = State(pfrom.
GetId());
5732 if (!BlockRequestAllowed(pindex)) {
5734 "%s: ignoring request from peer=%i for old block "
5735 "header that isn't in the main chain\n",
5736 __func__, pfrom.
GetId());
5750 std::vector<CBlock> vHeaders;
5753 (pindex ? pindex->
nHeight : -1),
5756 for (; pindex; pindex = m_chainman.
ActiveChain().Next(pindex)) {
5758 if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) {
5775 nodestate->pindexBestHeaderSent =
5782 if (RejectIncomingTxs(pfrom)) {
5784 "transaction sent in violation of protocol peer=%d\n",
5800 const CTransaction &tx = *ptx;
5801 const TxId &txid = tx.GetId();
5802 AddKnownTx(*peer, txid);
5807 m_txrequest.ReceivedResponse(pfrom.
GetId(), txid);
5809 if (AlreadyHaveTx(txid,
true)) {
5815 if (!m_mempool.
exists(tx.GetId())) {
5817 "Not relaying non-mempool transaction %s from "
5818 "forcerelay peer=%d\n",
5819 tx.GetId().ToString(), pfrom.
GetId());
5821 LogPrintf(
"Force relaying tx %s from peer=%d\n",
5822 tx.GetId().ToString(), pfrom.
GetId());
5823 RelayTransaction(tx.GetId());
5827 if (m_recent_rejects_package_reconsiderable.contains(txid)) {
5835 "found tx %s in reconsiderable rejects, looking for "
5836 "child in orphanage\n",
5838 if (
auto package_to_validate{
5839 Find1P1CPackage(ptx, pfrom.
GetId())}) {
5842 package_to_validate->m_txns,
5845 "package evaluation for %s: %s (%s)\n",
5846 package_to_validate->ToString(),
5848 ?
"package accepted"
5849 :
"package rejected",
5851 ProcessPackageResult(package_to_validate.value(),
5880 ProcessValidTx(pfrom.
GetId(), ptx);
5886 bool fRejectedParents =
false;
5890 std::vector<TxId> unique_parents;
5891 unique_parents.reserve(tx.vin.size());
5892 for (
const CTxIn &txin : tx.vin) {
5895 unique_parents.push_back(txin.prevout.GetTxId());
5897 std::sort(unique_parents.begin(), unique_parents.end());
5898 unique_parents.erase(
5899 std::unique(unique_parents.begin(), unique_parents.end()),
5900 unique_parents.end());
5908 std::optional<TxId> rejected_parent_reconsiderable;
5909 for (
const TxId &parent_txid : unique_parents) {
5910 if (m_recent_rejects.contains(parent_txid)) {
5911 fRejectedParents =
true;
5915 if (m_recent_rejects_package_reconsiderable.contains(
5917 !m_mempool.
exists(parent_txid)) {
5922 if (rejected_parent_reconsiderable.has_value()) {
5923 fRejectedParents =
true;
5926 rejected_parent_reconsiderable = parent_txid;
5929 if (!fRejectedParents) {
5930 const auto current_time{
5931 GetTime<std::chrono::microseconds>()};
5933 for (
const TxId &parent_txid : unique_parents) {
5935 AddKnownTx(*peer, parent_txid);
5939 if (!AlreadyHaveTx(parent_txid,
5941 AddTxAnnouncement(pfrom, parent_txid, current_time);
5947 if (
unsigned int nEvicted =
5951 if (orphanage.AddTx(ptx,
5953 AddToCompactExtraTransactions(ptx);
5956 m_opts.max_orphan_txs, m_rng);
5959 "orphanage overflow, removed %u tx\n",
5965 m_txrequest.ForgetInvId(tx.GetId());
5969 "not keeping orphan with rejected parents %s\n",
5970 tx.GetId().ToString());
5973 m_recent_rejects.insert(tx.GetId());
5974 m_txrequest.ForgetInvId(tx.GetId());
5978 ProcessInvalidTx(pfrom.
GetId(), ptx, state,
5988 "tx %s failed but reconsiderable, looking for child in "
5991 if (
auto package_to_validate{
5992 Find1P1CPackage(ptx, pfrom.
GetId())}) {
5995 package_to_validate->m_txns,
false)};
5997 "package evaluation for %s: %s (%s)\n",
5998 package_to_validate->ToString(),
6000 ?
"package accepted"
6001 :
"package rejected",
6003 ProcessPackageResult(package_to_validate.value(),
6012 m_txrequest.ForgetInvId(tx.GetId());
6014 unsigned int nEvicted{0};
6021 m_opts.max_conflicting_txs, m_rng);
6026 "conflicting pool overflow, removed %u tx\n",
6039 "Unexpected cmpctblock message received from peer %d\n",
6046 vRecv >> cmpctblock;
6047 }
catch (std::ios_base::failure &e) {
6049 Misbehaving(*peer,
"cmpctblock-bad-indexes");
6053 bool received_new_header =
false;
6066 MaybeSendGetHeaders(
6067 pfrom,
GetLocator(m_chainman.m_best_header), *peer);
6073 GetAntiDoSWorkThreshold()) {
6077 "Ignoring low-work compact block from peer %d\n",
6083 received_new_header =
true;
6093 MaybePunishNodeForBlock(pfrom.
GetId(), state,
6095 "invalid header via cmpctblock");
6100 if (received_new_header) {
6101 LogInfo(
"Saw new cmpctblock header hash=%s peer=%d\n",
6102 blockhash.ToString(), pfrom.
GetId());
6109 bool fProcessBLOCKTXN =
false;
6115 bool fRevertToHeaderProcessing =
false;
6119 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
6120 bool fBlockReconstructed =
false;
6128 CNodeState *nodestate = State(pfrom.
GetId());
6132 if (received_new_header &&
6135 nodestate->m_last_block_announcement =
GetTime();
6138 if (pindex->nStatus.hasData()) {
6145 size_t already_in_flight =
6146 std::distance(range_flight.first, range_flight.second);
6147 bool requested_block_from_this_peer{
false};
6151 bool first_in_flight =
6152 already_in_flight == 0 ||
6153 (range_flight.first->second.first == pfrom.
GetId());
6155 while (range_flight.first != range_flight.second) {
6156 if (range_flight.first->second.first == pfrom.
GetId()) {
6157 requested_block_from_this_peer =
true;
6160 range_flight.first++;
6169 if (requested_block_from_this_peer) {
6173 std::vector<CInv> vInv(1);
6182 if (!already_in_flight && !CanDirectFetch()) {
6190 nodestate->vBlocksInFlight.size() <
6192 requested_block_from_this_peer) {
6193 std::list<QueuedBlock>::iterator *queuedBlockIt =
nullptr;
6194 if (!BlockRequested(config, pfrom.
GetId(), *pindex,
6196 if (!(*queuedBlockIt)->partialBlock) {
6198 ->partialBlock.reset(
6205 "we were already syncing!\n");
6211 *(*queuedBlockIt)->partialBlock;
6213 partialBlock.
InitData(cmpctblock, vExtraTxnForCompact);
6219 Misbehaving(*peer,
"invalid compact block");
6222 if (first_in_flight) {
6225 std::vector<CInv> vInv(1);
6238 for (
size_t i = 0; i < cmpctblock.
BlockTxCount(); i++) {
6249 fProcessBLOCKTXN =
true;
6250 }
else if (first_in_flight) {
6257 IsBlockRequestedFromOutbound(blockhash) ||
6280 tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
6285 std::vector<CTransactionRef> dummy;
6286 status = tempBlock.FillBlock(*pblock, dummy);
6288 fBlockReconstructed =
true;
6292 if (requested_block_from_this_peer) {
6296 std::vector<CInv> vInv(1);
6303 fRevertToHeaderProcessing =
true;
6308 if (fProcessBLOCKTXN) {
6310 blockTxnMsg, time_received, interruptMsgProc);
6313 if (fRevertToHeaderProcessing) {
6319 return ProcessHeadersMessage(config, pfrom, *peer,
6324 if (fBlockReconstructed) {
6329 mapBlockSource.emplace(pblock->GetHash(),
6330 std::make_pair(pfrom.
GetId(),
false));
6341 ProcessBlock(config, pfrom, pblock,
true,
6350 RemoveBlockRequest(pblock->GetHash(), std::nullopt);
6360 "Unexpected blocktxn message received from peer %d\n",
6368 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
6369 bool fBlockRead =
false;
6373 auto range_flight = mapBlocksInFlight.equal_range(resp.
blockhash);
6374 size_t already_in_flight =
6375 std::distance(range_flight.first, range_flight.second);
6376 bool requested_block_from_this_peer{
false};
6380 bool first_in_flight =
6381 already_in_flight == 0 ||
6382 (range_flight.first->second.first == pfrom.
GetId());
6384 while (range_flight.first != range_flight.second) {
6385 auto [node_id, block_it] = range_flight.first->second;
6386 if (node_id == pfrom.
GetId() && block_it->partialBlock) {
6387 requested_block_from_this_peer =
true;
6390 range_flight.first++;
6393 if (!requested_block_from_this_peer) {
6395 "Peer %d sent us block transactions for block "
6396 "we weren't expecting\n",
6402 *range_flight.first->second.second->partialBlock;
6410 "invalid compact block/non-matching block transactions");
6413 if (first_in_flight) {
6415 std::vector<CInv> invs;
6422 "Peer %d sent us a compact block but it failed to "
6423 "reconstruct, waiting on first download to complete\n",
6456 std::make_pair(pfrom.
GetId(),
false));
6467 ProcessBlock(config, pfrom, pblock,
true,
6477 "Unexpected headers message received from peer %d\n",
6482 std::vector<CBlockHeader> headers;
6489 strprintf(
"too-many-headers: headers message size = %u",
6493 headers.resize(nCount);
6494 for (
unsigned int n = 0; n < nCount; n++) {
6495 vRecv >> headers[n];
6500 ProcessHeadersMessage(config, pfrom, *peer, std::move(headers),
6506 if (m_headers_presync_should_signal.exchange(
false)) {
6507 HeadersPresyncStats stats;
6509 LOCK(m_headers_presync_mutex);
6511 m_headers_presync_stats.find(m_headers_presync_bestpeer);
6512 if (it != m_headers_presync_stats.end()) {
6518 stats.first, stats.second->first, stats.second->second);
6529 "Unexpected block message received from peer %d\n",
6534 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
6538 pblock->GetHash().ToString(), pfrom.
GetId());
6543 pblock->hashPrevBlock))};
6547 "Received mutated block from peer=%d\n", peer->m_id);
6548 Misbehaving(*peer,
"mutated block");
6550 RemoveBlockRequest(pblock->GetHash(), peer->m_id));
6560 const BlockHash hash = pblock->GetHash();
6561 bool min_pow_checked =
false;
6566 forceProcessing = IsBlockRequested(hash);
6567 RemoveBlockRequest(hash, pfrom.
GetId());
6571 mapBlockSource.emplace(hash, std::make_pair(pfrom.
GetId(),
true));
6577 GetAntiDoSWorkThreshold()) {
6578 min_pow_checked =
true;
6581 ProcessBlock(config, pfrom, pblock, forceProcessing, min_pow_checked);
6591 if (pfrom.m_avalanche_pubkey.has_value()) {
6594 "Ignoring avahello from peer %d: already in our node set\n",
6600 vRecv >> delegation;
6607 if (!delegation.
verify(state, pubkey)) {
6608 Misbehaving(*peer,
"invalid-delegation");
6611 pfrom.m_avalanche_pubkey = std::move(pubkey);
6614 sighasher << delegation.
getId();
6622 if (!(*pfrom.m_avalanche_pubkey)
6623 .VerifySchnorr(sighasher.GetHash(),
sig)) {
6624 Misbehaving(*peer,
"invalid-avahello-signature");
6631 if (!AlreadyHaveProof(proofid)) {
6632 const bool preferred = isPreferredDownloadPeer(pfrom);
6633 LOCK(cs_proofrequest);
6634 AddProofAnnouncement(pfrom, proofid,
6635 GetTime<std::chrono::microseconds>(),
6653 WITH_LOCK(peer->m_addr_token_bucket_mutex,
6654 peer->m_addr_token_bucket += m_opts.max_addr_to_send);
6658 peer->m_proof_relay->compactproofs_requested =
true;
6669 const auto now = Now<SteadyMilliseconds>();
6675 last_poll + std::chrono::milliseconds(m_opts.avalanche_cooldown)) {
6677 "Ignoring repeated avapoll from peer %d: cooldown not "
6692 strprintf(
"too-many-ava-poll: poll message size = %u", nCount));
6696 std::vector<avalanche::Vote> votes;
6697 votes.reserve(nCount);
6699 bool fPreconsensus{
false};
6700 bool fStakingPreconsensus{
false};
6705 fStakingPreconsensus =
6709 for (
unsigned int n = 0; n < nCount; n++) {
6717 if (!quorum_established) {
6718 votes.emplace_back(vote, inv.
hash);
6725 if (fPreconsensus) {
6727 GetAvalancheVoteForTx(*m_avalanche,
TxId(inv.
hash));
6739 if (fStakingPreconsensus) {
6746 "poll inv type %d unknown from peer=%d\n",
6751 votes.emplace_back(vote, inv.
hash);
6777 if (!pfrom.m_avalanche_pubkey.has_value() ||
6778 !(*pfrom.m_avalanche_pubkey)
6779 .VerifySchnorr(verifier.GetHash(),
sig)) {
6780 Misbehaving(*peer,
"invalid-ava-response-signature");
6785 auto now = GetTime<std::chrono::seconds>();
6787 std::vector<avalanche::VoteItemUpdate> updates;
6788 bool disconnect{
false};
6791 disconnect, error)) {
6793 Misbehaving(*peer, error);
6811 "Repeated failure to register votes from peer %d: %s\n",
6812 pfrom.
GetId(), error);
6815 Misbehaving(*peer, error);
6828 auto logVoteUpdate = [](
const auto &voteUpdate,
6829 const std::string &voteItemTypeStr,
6830 const auto &voteItemId) {
6831 std::string voteOutcome;
6832 bool alwaysPrint =
false;
6833 switch (voteUpdate.getStatus()) {
6835 voteOutcome =
"invalidated";
6839 voteOutcome =
"rejected";
6842 voteOutcome =
"accepted";
6845 voteOutcome =
"finalized";
6848 alwaysPrint = voteItemTypeStr !=
"tx";
6851 voteOutcome =
"stalled";
6860 alwaysPrint &= (voteItemTypeStr !=
"contender");
6863 LogPrintf(
"Avalanche %s %s %s\n", voteOutcome, voteItemTypeStr,
6864 voteItemId.ToString());
6868 voteItemTypeStr, voteItemId.ToString());
6872 bool shouldActivateBestChain =
false;
6874 bool fPreconsensus{
false};
6875 bool fStakingPreconsensus{
false};
6880 fStakingPreconsensus =
6884 for (
const auto &u : updates) {
6889 if (
auto pitem = std::get_if<const avalanche::ProofRef>(&item)) {
6893 logVoteUpdate(u,
"proof", proofid);
6895 auto rejectionMode =
6897 auto nextCooldownTimePoint = GetTime<std::chrono::seconds>();
6898 switch (u.getStatus()) {
6914 return pm.rejectProof(proofid,
6918 "ERROR: Failed to reject proof: %s\n",
6924 nextCooldownTimePoint += std::chrono::seconds(
6925 m_opts.avalanche_peer_replacement_cooldown);
6931 avalanche::PeerManager::
6932 RegistrationMode::FORCE_ACCEPT);
6935 [&](const avalanche::Peer &peer) {
6936 pm.updateNextPossibleConflictTime(
6938 nextCooldownTimePoint);
6939 if (u.getStatus() ==
6940 avalanche::VoteStatus::
6942 pm.setFinalized(peer.peerid);
6950 "ERROR: Failed to accept proof: %s\n",
6957 auto getBlockFromIndex = [
this](
const CBlockIndex *pindex) {
6960 std::shared_ptr<const CBlock> pblock =
WITH_LOCK(
6961 m_most_recent_block_mutex,
return m_most_recent_block);
6963 if (!pblock || pblock->GetHash() != pindex->
GetBlockHash()) {
6964 std::shared_ptr<CBlock> pblockRead =
6965 std::make_shared<CBlock>();
6968 assert(!
"cannot load block from disk");
6970 pblock = pblockRead;
6975 if (
auto pitem = std::get_if<const CBlockIndex *>(&item)) {
6978 shouldActivateBestChain =
true;
6982 switch (u.getStatus()) {
6987 LogPrintf(
"ERROR: Database error: %s\n",
6996 LogPrintf(
"ERROR: Database error: %s\n",
7001 auto pblock = getBlockFromIndex(pindex);
7017 std::unique_ptr<node::CBlockTemplate> blockTemplate;
7021 chainstate.UnparkBlock(pindex);
7023 const bool newlyFinalized =
7024 !chainstate.IsBlockAvalancheFinalized(pindex) &&
7025 chainstate.AvalancheFinalizeBlock(pindex,
7030 if (fPreconsensus && newlyFinalized) {
7031 auto pblock = getBlockFromIndex(pindex);
7045 std::unordered_set<TxId, SaltedTxIdHasher>
7046 confirmedTxIdsInNonFinalizedBlocks;
7048 block !=
nullptr && block != pindex;
7049 block = block->
pprev) {
7051 getBlockFromIndex(block);
7053 for (
const auto &tx :
7054 currentBlock->vtx) {
7055 confirmedTxIdsInNonFinalizedBlocks
7056 .insert(tx->GetId());
7064 confirmedTxIdsInNonFinalizedBlocks);
7076 config, chainstate, &m_mempool,
7078 blockAssembler.pblocktemplate.reset(
7081 if (blockAssembler.pblocktemplate) {
7082 blockAssembler.addTxs(m_mempool);
7083 blockTemplate = std::move(
7084 blockAssembler.pblocktemplate);
7090 if (blockTemplate) {
7096 for (
const auto &templateEntry :
7110 if (fStakingPreconsensus) {
7112 std::get_if<const avalanche::StakeContenderId>(&item)) {
7114 logVoteUpdate(u,
"contender", contenderId);
7116 switch (u.getStatus()) {
7137 if (!fPreconsensus) {
7141 if (
auto pitem = std::get_if<const CTransactionRef>(&item)) {
7145 const TxId &txid = tx->GetId();
7146 const auto status{u.getStatus()};
7151 logVoteUpdate(u,
"tx", txid);
7162 if (m_mempool.
exists(txid)) {
7166 std::vector<CTransactionRef> conflictingTxs =
7172 if (conflictingTxs.size() > 0) {
7183 for (
const auto &conflictingTx :
7186 conflictingTx->GetId());
7205 m_recent_rejects.insert(txid);
7212 std::make_shared<const std::vector<Coin>>(
7228 return conflicting.HaveTx(txid);
7231 std::vector<CTransactionRef>
7232 mempool_conflicting_txs;
7233 for (
const auto &txin : tx->vin) {
7238 mempool_conflicting_txs.push_back(
7239 std::move(conflict));
7248 [&txid, &mempool_conflicting_txs](
7253 if (mempool_conflicting_txs.size() >
7256 mempool_conflicting_txs[0],
7265 auto it = m_mempool.
GetIter(txid);
7266 if (!it.has_value()) {
7269 "Error: finalized tx (%s) is not in the "
7275 std::vector<TxId> finalizedTxIds;
7276 m_mempool.setAvalancheFinalized(
7281 for (
const auto &finalized_txid : finalizedTxIds) {
7286 logVoteUpdate(u,
"tx", finalized_txid);
7294 std::vector<CTransactionRef>
7297 for (
const auto &conflictingTx :
7299 m_recent_rejects.insert(
7300 conflictingTx->GetId());
7302 conflictingTx->GetId());
7328 m_txrequest.ForgetInvId(txid);
7334 for (
auto &it : m_peer_map) {
7335 auto tx_relay = (*it.second).GetTxRelay();
7340 LOCK(tx_relay->m_tx_inventory_mutex);
7347 auto &stalled_by_time =
7348 tx_relay->m_avalanche_stalled_txids
7350 if (stalled_by_time.size() >=
7352 stalled_by_time.erase(
7353 stalled_by_time.begin()->timeAdded);
7356 tx_relay->m_avalanche_stalled_txids.insert(
7367 if (shouldActivateBestChain) {
7370 state,
nullptr, m_avalanche)) {
7385 ReceivedAvalancheProof(pfrom, *peer, proof);
7394 if (peer->m_proof_relay ==
nullptr) {
7398 peer->m_proof_relay->lastSharedProofsUpdate =
7399 GetTime<std::chrono::seconds>();
7401 peer->m_proof_relay->sharedProofs =
7407 peer->m_proof_relay->sharedProofs);
7417 if (peer->m_proof_relay ==
nullptr) {
7422 if (!peer->m_proof_relay->compactproofs_requested) {
7426 peer->m_proof_relay->compactproofs_requested =
false;
7430 vRecv >> compactProofs;
7431 }
catch (std::ios_base::failure &e) {
7433 Misbehaving(*peer,
"avaproofs-bad-indexes");
7439 if (!ReceivedAvalancheProof(pfrom, *peer, prefilledProof.proof)) {
7469 auto shortIdProcessor =
7473 if (shortIdProcessor.hasOutOfBoundIndex()) {
7476 Misbehaving(*peer,
"avaproofs-bad-indexes");
7479 if (!shortIdProcessor.isEvenlyDistributed()) {
7484 std::vector<std::pair<avalanche::ProofId, bool>> remoteProofsStatus;
7491 shortIdProcessor.matchKnownItem(shortid, peer.
proof);
7498 remoteProofsStatus.emplace_back(peer.
getProofId(),
7509 for (
size_t i = 0; i < compactProofs.
size(); i++) {
7510 if (shortIdProcessor.getItem(i) ==
nullptr) {
7526 return pfrom.m_avalanche_pubkey.has_value())) {
7529 for (
const auto &[proofid, present] : remoteProofsStatus) {
7539 if (peer->m_proof_relay ==
nullptr) {
7546 auto requestedIndiceIt = proofreq.
indices.begin();
7547 uint32_t treeIndice = 0;
7548 peer->m_proof_relay->sharedProofs.forEachLeaf([&](
const auto &proof) {
7549 if (requestedIndiceIt == proofreq.
indices.end()) {
7554 if (treeIndice++ == *requestedIndiceIt) {
7556 requestedIndiceIt++;
7562 peer->m_proof_relay->sharedProofs = {};
7575 "Ignoring \"getaddr\" from %s connection. peer=%d\n",
7582 Assume(SetupAddressRelay(pfrom, *peer));
7586 if (peer->m_getaddr_recvd) {
7591 peer->m_getaddr_recvd =
true;
7593 peer->m_addrs_to_send.clear();
7594 std::vector<CAddress> vAddr;
7595 const size_t maxAddrToSend = m_opts.max_addr_to_send;
7603 for (
const CAddress &addr : vAddr) {
7604 PushAddress(*peer, addr);
7610 auto now = GetTime<std::chrono::seconds>();
7620 if (!SetupAddressRelay(pfrom, *peer)) {
7622 "Ignoring getavaaddr message from %s peer=%d\n",
7627 auto availabilityScoreComparator = [](
const CNode *lhs,
7630 double scoreRhs = rhs->getAvailabilityScore();
7632 if (scoreLhs != scoreRhs) {
7633 return scoreLhs > scoreRhs;
7642 std::set<
const CNode *,
decltype(availabilityScoreComparator)> avaNodes(
7643 availabilityScoreComparator);
7650 avaNodes.insert(pnode);
7651 if (avaNodes.size() > m_opts.max_addr_to_send) {
7652 avaNodes.erase(std::prev(avaNodes.end()));
7656 peer->m_addrs_to_send.clear();
7657 for (
const CNode *pnode : avaNodes) {
7658 PushAddress(*peer, pnode->
addr);
7669 "mempool request with bloom filters disabled, "
7670 "disconnect peer=%d\n",
7681 "mempool request with bandwidth limit reached, "
7682 "disconnect peer=%d\n",
7689 if (
auto tx_relay = peer->GetTxRelay()) {
7690 LOCK(tx_relay->m_tx_inventory_mutex);
7691 tx_relay->m_send_mempool =
true;
7720 const auto ping_end = time_received;
7723 bool bPingFinished =
false;
7724 std::string sProblem;
7726 if (nAvail >=
sizeof(nonce)) {
7731 if (peer->m_ping_nonce_sent != 0) {
7732 if (nonce == peer->m_ping_nonce_sent) {
7735 bPingFinished =
true;
7736 const auto ping_time = ping_end - peer->m_ping_start.load();
7737 if (ping_time.count() >= 0) {
7742 sProblem =
"Timing mishap";
7746 sProblem =
"Nonce mismatch";
7750 bPingFinished =
true;
7751 sProblem =
"Nonce zero";
7755 sProblem =
"Unsolicited pong without ping";
7760 bPingFinished =
true;
7761 sProblem =
"Short payload";
7764 if (!(sProblem.empty())) {
7766 "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
7767 pfrom.
GetId(), sProblem, peer->m_ping_nonce_sent, nonce,
7770 if (bPingFinished) {
7771 peer->m_ping_nonce_sent = 0;
7779 "filterload received despite not offering bloom services "
7780 "from peer=%d; disconnecting\n",
7790 Misbehaving(*peer,
"too-large bloom filter");
7791 }
else if (
auto tx_relay = peer->GetTxRelay()) {
7793 LOCK(tx_relay->m_bloom_filter_mutex);
7794 tx_relay->m_bloom_filter.reset(
new CBloomFilter(filter));
7795 tx_relay->m_relay_txs =
true;
7805 "filteradd received despite not offering bloom services "
7806 "from peer=%d; disconnecting\n",
7811 std::vector<uint8_t> vData;
7820 }
else if (
auto tx_relay = peer->GetTxRelay()) {
7821 LOCK(tx_relay->m_bloom_filter_mutex);
7822 if (tx_relay->m_bloom_filter) {
7823 tx_relay->m_bloom_filter->insert(vData);
7831 Misbehaving(*peer,
"bad filteradd message");
7839 "filterclear received despite not offering bloom services "
7840 "from peer=%d; disconnecting\n",
7845 auto tx_relay = peer->GetTxRelay();
7851 LOCK(tx_relay->m_bloom_filter_mutex);
7852 tx_relay->m_bloom_filter =
nullptr;
7853 tx_relay->m_relay_txs =
true;
7862 vRecv >> newFeeFilter;
7864 if (
auto tx_relay = peer->GetTxRelay()) {
7865 tx_relay->m_fee_filter_received = newFeeFilter;
7874 ProcessGetCFilters(pfrom, *peer, vRecv);
7879 ProcessGetCFHeaders(pfrom, *peer, vRecv);
7884 ProcessGetCFCheckPt(pfrom, *peer, vRecv);
7889 std::vector<CInv> vInv;
7895 for (
CInv &inv : vInv) {
7901 m_txrequest.ReceivedResponse(pfrom.
GetId(),
TxId(inv.
hash));
7908 LOCK(cs_proofrequest);
7909 m_proofrequest.ReceivedResponse(
7923bool PeerManagerImpl::MaybeDiscourageAndDisconnect(
CNode &pnode, Peer &peer) {
7925 LOCK(peer.m_misbehavior_mutex);
7928 if (!peer.m_should_discourage) {
7932 peer.m_should_discourage =
false;
7938 LogPrintf(
"Warning: not punishing noban peer %d!\n", peer.m_id);
7944 LogPrintf(
"Warning: not punishing manually connected peer %d!\n",
7953 "Warning: disconnecting but not discouraging %s peer %d!\n",
7970bool PeerManagerImpl::ProcessMessages(
const Config &config,
CNode *pfrom,
7971 std::atomic<bool> &interruptMsgProc) {
7983 PeerRef peer = GetPeerRef(pfrom->
GetId());
7984 if (peer ==
nullptr) {
7989 LOCK(peer->m_getdata_requests_mutex);
7990 if (!peer->m_getdata_requests.empty()) {
7991 ProcessGetData(config, *pfrom, *peer, interruptMsgProc);
7995 const bool processed_orphan = ProcessOrphanTx(config, *peer);
8001 if (processed_orphan) {
8008 LOCK(peer->m_getdata_requests_mutex);
8009 if (!peer->m_getdata_requests.empty()) {
8026 bool fMoreWork = poll_result->second;
8030 msg.m_recv.size(), msg.m_recv.data());
8032 if (m_opts.capture_messages) {
8038 if (!msg.m_valid_netmagic) {
8040 "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n",
8054 if (!msg.m_valid_header) {
8062 if (!msg.m_valid_checksum) {
8074 ProcessMessage(config, *pfrom, msg.m_type, vRecv, msg.m_time,
8076 if (interruptMsgProc) {
8081 LOCK(peer->m_getdata_requests_mutex);
8082 if (!peer->m_getdata_requests.empty()) {
8091 return orphanage.HaveTxToReconsider(peer->m_id);
8095 }
catch (
const std::exception &e) {
8098 e.what(),
typeid(e).name());
8107void PeerManagerImpl::ConsiderEviction(
CNode &pto, Peer &peer,
8108 std::chrono::seconds time_in_seconds) {
8111 CNodeState &state = *State(pto.
GetId());
8114 state.fSyncStarted) {
8121 if (state.pindexBestKnownBlock !=
nullptr &&
8122 state.pindexBestKnownBlock->nChainWork >=
8124 if (state.m_chain_sync.m_timeout != 0s) {
8125 state.m_chain_sync.m_timeout = 0s;
8126 state.m_chain_sync.m_work_header =
nullptr;
8127 state.m_chain_sync.m_sent_getheaders =
false;
8129 }
else if (state.m_chain_sync.m_timeout == 0s ||
8130 (state.m_chain_sync.m_work_header !=
nullptr &&
8131 state.pindexBestKnownBlock !=
nullptr &&
8132 state.pindexBestKnownBlock->nChainWork >=
8133 state.m_chain_sync.m_work_header->nChainWork)) {
8139 state.m_chain_sync.m_work_header = m_chainman.
ActiveChain().
Tip();
8140 state.m_chain_sync.m_sent_getheaders =
false;
8141 }
else if (state.m_chain_sync.m_timeout > 0s &&
8142 time_in_seconds > state.m_chain_sync.m_timeout) {
8147 if (state.m_chain_sync.m_sent_getheaders) {
8150 "Disconnecting outbound peer %d for old chain, best known "
8153 state.pindexBestKnownBlock !=
nullptr
8154 ? state.pindexBestKnownBlock->GetBlockHash().ToString()
8158 assert(state.m_chain_sync.m_work_header);
8163 MaybeSendGetHeaders(
8164 pto,
GetLocator(state.m_chain_sync.m_work_header->pprev),
8168 "sending getheaders to outbound peer=%d to verify chain "
8169 "work (current best known block:%s, benchmark blockhash: "
8172 state.pindexBestKnownBlock !=
nullptr
8173 ? state.pindexBestKnownBlock->GetBlockHash().ToString()
8175 state.m_chain_sync.m_work_header->GetBlockHash()
8177 state.m_chain_sync.m_sent_getheaders =
true;
8184 state.m_chain_sync.m_timeout =
8191void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now) {
8200 std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0},
8201 next_youngest_peer{-1, 0};
8207 if (pnode->
GetId() > youngest_peer.first) {
8208 next_youngest_peer = youngest_peer;
8209 youngest_peer.first = pnode->GetId();
8210 youngest_peer.second = pnode->m_last_block_time;
8214 NodeId to_disconnect = youngest_peer.first;
8215 if (youngest_peer.second > next_youngest_peer.second) {
8218 to_disconnect = next_youngest_peer.first;
8230 CNodeState *node_state = State(pnode->
GetId());
8231 if (node_state ==
nullptr ||
8233 node_state->vBlocksInFlight.empty())) {
8236 "disconnecting extra block-relay-only peer=%d "
8237 "(last block received at time %d)\n",
8244 "keeping block-relay-only peer=%d chosen for eviction "
8245 "(connect time: %d, blocks_in_flight: %d)\n",
8247 node_state->vBlocksInFlight.size());
8263 int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
8274 CNodeState *state = State(pnode->
GetId());
8275 if (state ==
nullptr) {
8280 if (state->m_chain_sync.m_protect) {
8283 if (state->m_last_block_announcement < oldest_block_announcement ||
8284 (state->m_last_block_announcement == oldest_block_announcement &&
8285 pnode->
GetId() > worst_peer)) {
8286 worst_peer = pnode->
GetId();
8287 oldest_block_announcement = state->m_last_block_announcement;
8291 if (worst_peer == -1) {
8295 bool disconnected = m_connman.
ForNode(
8303 CNodeState &state = *State(pnode->
GetId());
8305 state.vBlocksInFlight.empty()) {
8307 "disconnecting extra outbound peer=%d (last block "
8308 "announcement received at time %d)\n",
8309 pnode->
GetId(), oldest_block_announcement);
8314 "keeping outbound peer=%d chosen for eviction "
8315 "(connect time: %d, blocks_in_flight: %d)\n",
8317 state.vBlocksInFlight.size());
8332void PeerManagerImpl::CheckForStaleTipAndEvictPeers() {
8335 auto now{GetTime<std::chrono::seconds>()};
8337 EvictExtraOutboundPeers(now);
8339 if (now > m_stale_tip_check_time) {
8345 LogPrintf(
"Potential stale tip detected, will try using extra "
8346 "outbound peer (last tip update: %d seconds ago)\n",
8355 if (!m_initial_sync_finished && CanDirectFetch()) {
8357 m_initial_sync_finished =
true;
8361void PeerManagerImpl::MaybeSendPing(
CNode &node_to, Peer &peer,
8362 std::chrono::microseconds now) {
8364 node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
8365 peer.m_ping_nonce_sent &&
8376 bool pingSend =
false;
8378 if (peer.m_ping_queued) {
8383 if (peer.m_ping_nonce_sent == 0 &&
8393 }
while (nonce == 0);
8394 peer.m_ping_queued =
false;
8395 peer.m_ping_start = now;
8397 peer.m_ping_nonce_sent = nonce;
8402 peer.m_ping_nonce_sent = 0;
8408void PeerManagerImpl::MaybeSendAddr(
CNode &
node, Peer &peer,
8409 std::chrono::microseconds current_time) {
8411 if (!peer.m_addr_relay_enabled) {
8415 LOCK(peer.m_addr_send_times_mutex);
8417 peer.m_next_local_addr_send < current_time) {
8424 if (peer.m_next_local_addr_send != 0us) {
8425 peer.m_addr_known->reset();
8428 CAddress local_addr{*local_service, peer.m_our_services,
8429 Now<NodeSeconds>()};
8430 PushAddress(peer, local_addr);
8432 peer.m_next_local_addr_send =
8438 if (current_time <= peer.m_next_addr_send) {
8442 peer.m_next_addr_send =
8445 const size_t max_addr_to_send = m_opts.max_addr_to_send;
8446 if (!
Assume(peer.m_addrs_to_send.size() <= max_addr_to_send)) {
8449 peer.m_addrs_to_send.resize(max_addr_to_send);
8454 auto addr_already_known =
8457 bool ret = peer.m_addr_known->contains(addr.
GetKey());
8459 peer.m_addr_known->insert(addr.
GetKey());
8463 peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(),
8464 peer.m_addrs_to_send.end(),
8465 addr_already_known),
8466 peer.m_addrs_to_send.end());
8469 if (peer.m_addrs_to_send.empty()) {
8473 const char *msg_type;
8475 if (peer.m_wants_addrv2) {
8485 peer.m_addrs_to_send));
8486 peer.m_addrs_to_send.clear();
8489 if (peer.m_addrs_to_send.capacity() > 40) {
8490 peer.m_addrs_to_send.shrink_to_fit();
8494void PeerManagerImpl::MaybeSendSendHeaders(
CNode &
node, Peer &peer) {
8499 if (!peer.m_sent_sendheaders &&
8502 CNodeState &state = *State(
node.GetId());
8503 if (state.pindexBestKnownBlock !=
nullptr &&
8504 state.pindexBestKnownBlock->nChainWork >
8511 peer.m_sent_sendheaders =
true;
8516void PeerManagerImpl::MaybeSendFeefilter(
8517 CNode &pto, Peer &peer, std::chrono::microseconds current_time) {
8518 if (m_opts.ignore_incoming_txs) {
8542 static const Amount MAX_FILTER{m_fee_filter_rounder.round(
MAX_MONEY)};
8543 if (peer.m_fee_filter_sent == MAX_FILTER) {
8546 peer.m_next_send_feefilter = 0us;
8549 if (current_time > peer.m_next_send_feefilter) {
8550 Amount filterToSend = m_fee_filter_rounder.round(currentFilter);
8554 if (filterToSend != peer.m_fee_filter_sent) {
8556 peer.m_fee_filter_sent = filterToSend;
8558 peer.m_next_send_feefilter =
8566 peer.m_next_send_feefilter &&
8567 (currentFilter < 3 * peer.m_fee_filter_sent / 4 ||
8568 currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
8569 peer.m_next_send_feefilter =
8577class CompareInvMempoolOrder {
8581 explicit CompareInvMempoolOrder(
CTxMemPool *_mempool) : mp(_mempool) {}
8583 bool operator()(std::set<TxId>::iterator a, std::set<TxId>::iterator b) {
8593bool PeerManagerImpl::RejectIncomingTxs(
const CNode &peer)
const {
8602 if (m_opts.ignore_incoming_txs &&
8609bool PeerManagerImpl::SetupAddressRelay(
const CNode &
node, Peer &peer) {
8613 if (
node.IsBlockOnlyConn()) {
8617 if (!peer.m_addr_relay_enabled.exchange(
true)) {
8621 peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
8627bool PeerManagerImpl::SendMessages(
const Config &config,
CNode *pto) {
8630 PeerRef peer = GetPeerRef(pto->
GetId());
8639 if (MaybeDiscourageAndDisconnect(*pto, *peer)) {
8648 const auto current_time{GetTime<std::chrono::microseconds>()};
8653 "addrfetch connection timeout; disconnecting peer=%d\n",
8659 MaybeSendPing(*pto, *peer, current_time);
8666 bool sync_blocks_and_headers_from_peer =
false;
8668 MaybeSendAddr(*pto, *peer, current_time);
8670 MaybeSendSendHeaders(*pto, *peer);
8675 CNodeState &state = *State(pto->
GetId());
8678 if (m_chainman.m_best_header ==
nullptr) {
8685 if (state.fPreferredDownload) {
8686 sync_blocks_and_headers_from_peer =
true;
8697 if (m_num_preferred_download_peers == 0 ||
8698 mapBlocksInFlight.empty()) {
8699 sync_blocks_and_headers_from_peer =
true;
8703 if (!state.fSyncStarted && CanServeBlocks(*peer) &&
8707 if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) ||
8709 const CBlockIndex *pindexStart = m_chainman.m_best_header;
8718 if (pindexStart->
pprev) {
8719 pindexStart = pindexStart->
pprev;
8721 if (MaybeSendGetHeaders(*pto,
GetLocator(pindexStart), *peer)) {
8724 "initial getheaders (%d) to peer=%d (startheight:%d)\n",
8726 peer->m_starting_height);
8728 state.fSyncStarted =
true;
8729 peer->m_headers_sync_timeout =
8734 std::chrono::microseconds{
8736 Ticks<std::chrono::seconds>(
8738 m_chainman.m_best_header->Time()) /
8755 LOCK(peer->m_block_inv_mutex);
8756 std::vector<CBlock> vHeaders;
8758 ((!peer->m_prefers_headers &&
8759 (!state.m_requested_hb_cmpctblocks ||
8760 peer->m_blocks_for_headers_relay.size() > 1)) ||
8761 peer->m_blocks_for_headers_relay.size() >
8766 ProcessBlockAvailability(pto->
GetId());
8768 if (!fRevertToInv) {
8769 bool fFoundStartingHeader =
false;
8773 for (
const BlockHash &hash : peer->m_blocks_for_headers_relay) {
8779 fRevertToInv =
true;
8782 if (pBestIndex !=
nullptr && pindex->
pprev != pBestIndex) {
8793 fRevertToInv =
true;
8796 pBestIndex = pindex;
8797 if (fFoundStartingHeader) {
8800 }
else if (PeerHasHeader(&state, pindex)) {
8803 }
else if (pindex->
pprev ==
nullptr ||
8804 PeerHasHeader(&state, pindex->
pprev)) {
8807 fFoundStartingHeader =
true;
8812 fRevertToInv =
true;
8817 if (!fRevertToInv && !vHeaders.empty()) {
8818 if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
8823 "%s sending header-and-ids %s to peer=%d\n",
8824 __func__, vHeaders.front().GetHash().ToString(),
8827 std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
8829 LOCK(m_most_recent_block_mutex);
8830 if (m_most_recent_block_hash ==
8832 cached_cmpctblock_msg =
8834 *m_most_recent_compact_block);
8837 if (cached_cmpctblock_msg.has_value()) {
8839 std::move(cached_cmpctblock_msg.value()));
8843 block, *pBestIndex)};
8850 state.pindexBestHeaderSent = pBestIndex;
8851 }
else if (peer->m_prefers_headers) {
8852 if (vHeaders.size() > 1) {
8854 "%s: %u headers, range (%s, %s), to peer=%d\n",
8855 __func__, vHeaders.size(),
8856 vHeaders.front().GetHash().ToString(),
8857 vHeaders.back().GetHash().ToString(),
8861 "%s: sending header %s to peer=%d\n", __func__,
8862 vHeaders.front().GetHash().ToString(),
8866 state.pindexBestHeaderSent = pBestIndex;
8868 fRevertToInv =
true;
8875 if (!peer->m_blocks_for_headers_relay.empty()) {
8877 peer->m_blocks_for_headers_relay.back();
8888 "Announcing block %s not on main chain (tip=%s)\n",
8897 if (!PeerHasHeader(&state, pindex)) {
8898 peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
8900 "%s: sending inv peer=%d hash=%s\n", __func__,
8905 peer->m_blocks_for_headers_relay.clear();
8912 std::vector<CInv> vInv;
8913 auto addInvAndMaybeFlush = [&](uint32_t type,
const uint256 &hash) {
8914 vInv.emplace_back(type, hash);
8925 LOCK(peer->m_block_inv_mutex);
8927 vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(),
8933 for (
const BlockHash &hash : peer->m_blocks_for_inv_relay) {
8936 peer->m_blocks_for_inv_relay.clear();
8939 auto computeNextInvSendTime =
8940 [&](std::chrono::microseconds &next)
8944 if (next < current_time) {
8945 fSendTrickle =
true;
8947 next = NextInvToInbounds(
8952 next = current_time;
8956 return fSendTrickle;
8960 if (peer->m_proof_relay !=
nullptr) {
8961 LOCK(peer->m_proof_relay->m_proof_inventory_mutex);
8963 if (computeNextInvSendTime(
8964 peer->m_proof_relay->m_next_inv_send_time)) {
8966 peer->m_proof_relay->m_proof_inventory_to_send.begin();
8968 peer->m_proof_relay->m_proof_inventory_to_send.end()) {
8971 it = peer->m_proof_relay->m_proof_inventory_to_send.erase(
8974 if (peer->m_proof_relay->m_proof_inventory_known_filter
8975 .contains(proofid)) {
8979 peer->m_proof_relay->m_proof_inventory_known_filter.insert(
8982 peer->m_proof_relay->m_recently_announced_proofs.insert(
8988 if (
auto tx_relay = peer->GetTxRelay()) {
8989 LOCK(tx_relay->m_tx_inventory_mutex);
8991 const bool fSendTrickle =
8992 computeNextInvSendTime(tx_relay->m_next_inv_send_time);
8997 LOCK(tx_relay->m_bloom_filter_mutex);
8998 if (!tx_relay->m_relay_txs) {
8999 tx_relay->m_tx_inventory_to_send.clear();
9004 if (fSendTrickle && tx_relay->m_send_mempool) {
9005 auto vtxinfo = m_mempool.
infoAll();
9006 tx_relay->m_send_mempool =
false;
9008 tx_relay->m_fee_filter_received.load()};
9010 LOCK(tx_relay->m_bloom_filter_mutex);
9012 for (
const auto &txinfo : vtxinfo) {
9013 const TxId &txid = txinfo.tx->GetId();
9014 tx_relay->m_tx_inventory_to_send.erase(txid);
9017 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
9020 if (tx_relay->m_bloom_filter &&
9021 !tx_relay->m_bloom_filter->IsRelevantAndUpdate(
9025 tx_relay->m_tx_inventory_known_filter.insert(txid);
9028 addInvAndMaybeFlush(
MSG_TX, txid);
9030 tx_relay->m_last_mempool_req =
9031 std::chrono::duration_cast<std::chrono::seconds>(
9038 std::vector<std::set<TxId>::iterator> vInvTx;
9039 vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size());
9040 for (std::set<TxId>::iterator it =
9041 tx_relay->m_tx_inventory_to_send.begin();
9042 it != tx_relay->m_tx_inventory_to_send.end(); it++) {
9043 vInvTx.push_back(it);
9046 tx_relay->m_fee_filter_received.load()};
9051 CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool);
9052 std::make_heap(vInvTx.begin(), vInvTx.end(),
9053 compareInvMempoolOrder);
9057 unsigned int nRelayedTransactions = 0;
9058 LOCK(tx_relay->m_bloom_filter_mutex);
9059 while (!vInvTx.empty() &&
9064 std::pop_heap(vInvTx.begin(), vInvTx.end(),
9065 compareInvMempoolOrder);
9066 std::set<TxId>::iterator it = vInvTx.back();
9068 const TxId txid = *it;
9070 tx_relay->m_tx_inventory_to_send.erase(it);
9072 if (tx_relay->m_tx_inventory_known_filter.contains(txid) &&
9073 tx_relay->m_avalanche_stalled_txids.count(txid) == 0) {
9077 auto txinfo = m_mempool.
info(txid);
9083 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
9086 if (tx_relay->m_bloom_filter &&
9087 !tx_relay->m_bloom_filter->IsRelevantAndUpdate(
9092 tx_relay->m_recently_announced_invs.insert(txid);
9093 addInvAndMaybeFlush(
MSG_TX, txid);
9094 nRelayedTransactions++;
9095 tx_relay->m_tx_inventory_known_filter.insert(txid);
9096 tx_relay->m_avalanche_stalled_txids.erase(txid);
9102 if (!vInv.empty()) {
9109 CNodeState &state = *State(pto->
GetId());
9112 auto stalling_timeout = m_block_stalling_timeout.load();
9113 if (state.m_stalling_since.count() &&
9114 state.m_stalling_since < current_time - stalling_timeout) {
9119 LogPrintf(
"Peer=%d is stalling block download, disconnecting\n",
9124 const auto new_timeout =
9126 if (stalling_timeout != new_timeout &&
9127 m_block_stalling_timeout.compare_exchange_strong(
9128 stalling_timeout, new_timeout)) {
9131 "Increased stalling timeout temporarily to %d seconds\n",
9143 if (state.vBlocksInFlight.size() > 0) {
9144 QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
9145 int nOtherPeersWithValidatedDownloads =
9146 m_peers_downloading_from - 1;
9148 state.m_downloading_since +
9149 std::chrono::seconds{consensusParams.nPowTargetSpacing} *
9152 nOtherPeersWithValidatedDownloads)) {
9153 LogPrintf(
"Timeout downloading block %s from peer=%d, "
9155 queuedBlock.pindex->GetBlockHash().ToString(),
9163 if (state.fSyncStarted &&
9164 peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
9167 if (current_time > peer->m_headers_sync_timeout &&
9168 nSyncStarted == 1 &&
9169 (m_num_preferred_download_peers -
9170 state.fPreferredDownload >=
9179 LogPrintf(
"Timeout downloading headers from peer=%d, "
9185 LogPrintf(
"Timeout downloading headers from noban "
9186 "peer=%d, not disconnecting\n",
9192 state.fSyncStarted =
false;
9194 peer->m_headers_sync_timeout = 0us;
9200 peer->m_headers_sync_timeout = std::chrono::microseconds::max();
9206 ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
9209 std::vector<CInv> vGetData;
9217 CNodeState &state = *State(pto->
GetId());
9219 if (CanServeBlocks(*peer) &&
9220 ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) ||
9223 std::vector<const CBlockIndex *> vToDownload;
9225 auto get_inflight_budget = [&state]() {
9228 static_cast<int>(state.vBlocksInFlight.size()));
9234 FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload,
9237 !IsLimitedPeer(*peer)) {
9243 m_chainman.GetSnapshotBaseBlock());
9245 TryDownloadingHistoricalBlocks(
9246 *peer, get_inflight_budget(), vToDownload, from_tip,
9247 Assert(m_chainman.GetSnapshotBaseBlock()));
9251 BlockRequested(config, pto->
GetId(), *pindex);
9256 if (state.vBlocksInFlight.empty() && staller != -1) {
9257 if (State(staller)->m_stalling_since == 0us) {
9258 State(staller)->m_stalling_since = current_time;
9265 auto addGetDataAndMaybeFlush = [&](uint32_t type,
const uint256 &hash) {
9266 CInv inv(type, hash);
9269 vGetData.push_back(std::move(inv));
9280 LOCK(cs_proofrequest);
9281 std::vector<std::pair<NodeId, avalanche::ProofId>> expired;
9283 m_proofrequest.GetRequestable(pto->
GetId(), current_time, &expired);
9284 for (
const auto &entry : expired) {
9286 "timeout of inflight proof %s from peer=%d\n",
9287 entry.second.ToString(), entry.first);
9289 for (
const auto &proofid : requestable) {
9290 if (!AlreadyHaveProof(proofid)) {
9292 m_proofrequest.RequestedData(
9293 pto->
GetId(), proofid,
9300 m_proofrequest.ForgetInvId(proofid);
9310 std::vector<std::pair<NodeId, TxId>> expired;
9312 m_txrequest.GetRequestable(pto->
GetId(), current_time, &expired);
9313 for (
const auto &entry : expired) {
9315 entry.second.ToString(), entry.first);
9317 for (
const TxId &txid : requestable) {
9321 if (!AlreadyHaveTx(txid,
false)) {
9322 addGetDataAndMaybeFlush(
MSG_TX, txid);
9323 m_txrequest.RequestedData(
9330 m_txrequest.ForgetInvId(txid);
9334 if (!vGetData.empty()) {
9339 MaybeSendFeefilter(*pto, *peer, current_time);
9343bool PeerManagerImpl::ReceivedAvalancheProof(
CNode &
node, Peer &peer,
9345 assert(proof !=
nullptr);
9349 AddKnownProof(peer, proofid);
9361 return node.m_avalanche_pubkey.has_value());
9362 auto saveProofIfStaker = [
this, isStaker](
const CNode &
node,
9364 const NodeId nodeid) ->
bool {
9376 LOCK(cs_proofrequest);
9377 m_proofrequest.ReceivedResponse(nodeid, proofid);
9379 if (AlreadyHaveProof(proofid)) {
9380 m_proofrequest.ForgetInvId(proofid);
9381 saveProofIfStaker(
node, proofid, nodeid);
9391 return pm.registerProof(proof, state);
9393 WITH_LOCK(cs_proofrequest, m_proofrequest.ForgetInvId(proofid));
9394 RelayProof(proofid);
9396 node.m_last_proof_time = GetTime<std::chrono::seconds>();
9399 nodeid, proofid.ToString());
9421 "Not polling the avalanche proof (%s): peer=%d, proofid %s\n",
9422 state.
IsValid() ?
"not-worth-polling"
9424 nodeid, proofid.ToString());
9427 saveProofIfStaker(
node, proofid, nodeid);
bool MoneyRange(const Amount nValue)
static constexpr Amount MAX_MONEY
No amount larger than this (in satoshi) is valid.
enum ReadStatus_t ReadStatus
const std::string & BlockFilterTypeName(BlockFilterType filter_type)
Get the human-readable name for a filter type.
BlockFilterIndex * GetBlockFilterIndex(BlockFilterType filter_type)
Get a block filter index by type.
static constexpr int CFCHECKPT_INTERVAL
Interval between compact filter checkpoints.
@ CHAIN
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends,...
@ TRANSACTIONS
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid,...
@ SCRIPTS
Scripts & signatures ok.
@ TREE
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
arith_uint256 GetBlockProof(const CBlockIndex &block)
CBlockLocator GetLocator(const CBlockIndex *index)
Get a locator for a block index entry.
int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params ¶ms)
Return the time it would take to redo the work difference between from and to, assuming the current h...
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
#define Assert(val)
Identity function.
#define Assume(val)
Assume is the identity function.
Stochastic address manager.
void Connected(const CService &addr, NodeSeconds time=Now< NodeSeconds >())
We have successfully connected to this peer.
void Good(const CService &addr, bool test_before_evict=true, NodeSeconds time=Now< NodeSeconds >())
Mark an entry as accessible, possibly moving it from "new" to "tried".
bool Add(const std::vector< CAddress > &vAddr, const CNetAddr &source, std::chrono::seconds time_penalty=0s)
Attempt to add one or more addresses to addrman's new table.
void SetServices(const CService &addr, ServiceFlags nServices)
Update an entry's service bits.
void Discourage(const CNetAddr &net_addr)
bool IsBanned(const CNetAddr &net_addr)
Return whether net_addr is banned.
bool IsDiscouraged(const CNetAddr &net_addr)
Return whether net_addr is discouraged.
BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of bloc...
bool LookupFilterRange(int start_height, const CBlockIndex *stop_index, std::vector< BlockFilter > &filters_out) const
Get a range of filters between two heights on a chain.
bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index, std::vector< uint256 > &hashes_out) const
Get a range of filter hashes between two heights on a chain.
bool LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out) EXCLUSIVE_LOCKS_REQUIRED(!m_cs_headers_cache)
Get a single filter header by block.
std::vector< CTransactionRef > txn
std::vector< uint32_t > indices
A CService with information about it as peer.
ServiceFlags nServices
Serialized as uint64_t in V1, and as CompactSize in V2.
static constexpr SerParams V1_NETWORK
NodeSeconds nTime
Always included in serialization, except in the network format on INIT_PROTO_VERSION.
static constexpr SerParams V2_NETWORK
size_t BlockTxCount() const
std::vector< CTransactionRef > vtx
The block chain is a tree shaped structure starting with the genesis block at the root,...
bool IsValid(enum BlockValidity nUpTo=BlockValidity::TRANSACTIONS) const EXCLUSIVE_LOCKS_REQUIRED(
Check whether this block index entry is valid up to the passed validity level.
CBlockIndex * pprev
pointer to the index of the predecessor of this block
CBlockHeader GetBlockHeader() const
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
bool HaveNumChainTxs() const
Check whether this block and all previous blocks back to the genesis block or an assumeutxo snapshot ...
int64_t GetBlockTime() const
unsigned int nTx
Number of transactions in this block.
CBlockIndex * GetAncestor(int height)
Efficiently find an ancestor of this block.
BlockHash GetBlockHash() const
int nHeight
height of the entry in the chain. The genesis block has height 0
FlatFilePos GetBlockPos() const EXCLUSIVE_LOCKS_REQUIRED(
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
bool IsWithinSizeConstraints() const
True if the size is <= MAX_BLOOM_FILTER_SIZE and the number of hash functions is <= MAX_HASH_FUNCS (c...
An in-memory indexed chain of blocks.
CBlockIndex * Tip() const
Returns the index entry for the tip of this chain, or nullptr if none.
CBlockIndex * Next(const CBlockIndex *pindex) const
Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip...
int Height() const
Return the maximal height in the chain.
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
CChainParams defines various tweakable parameters of a given instance of the Bitcoin system.
const CBlock & GenesisBlock() const
const Consensus::Params & GetConsensus() const
CCoinsView that adds a memory cache for transactions to another CCoinsView.
CCoinsView that brings transactions from a mempool into view.
void ForEachNode(const NodeFn &func)
bool OutboundTargetReached(bool historicalBlockServingLimit) const
check if the outbound target is reached.
bool ForNode(NodeId id, std::function< bool(CNode *pnode)> func)
bool GetNetworkActive() const
bool GetTryNewOutboundPeer() const
void SetTryNewOutboundPeer(bool flag)
int GetExtraBlockRelayCount() const
void WakeMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc)
void StartExtraBlockRelayPeers()
bool DisconnectNode(const std::string &node)
CSipHasher GetDeterministicRandomizer(uint64_t id) const
Get a unique deterministic randomizer.
int GetExtraFullOutboundCount() const
std::vector< CAddress > GetAddresses(size_t max_addresses, size_t max_pct, std::optional< Network > network) const
Return all or many randomly selected addresses, optionally by network.
bool CheckIncomingNonce(uint64_t nonce)
bool ShouldRunInactivityChecks(const CNode &node, std::chrono::seconds now) const
Return true if we should disconnect the peer for failing an inactivity check.
bool GetUseAddrmanOutgoing() const
Fee rate in satoshis per kilobyte: Amount / kB.
Amount GetFeePerK() const
Return the fee in satoshis for a size of 1000 bytes.
Inv(ventory) message data.
bool IsMsgCmpctBlk() const
std::string ToString() const
bool IsMsgStakeContender() const
bool IsMsgFilteredBlk() const
void TransactionInvalidated(const CTransactionRef &tx, std::shared_ptr< const std::vector< Coin > > spent_coins)
Used to create a Merkle proof (usually from a subset of transactions), which consists of a block head...
std::vector< std::pair< size_t, uint256 > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
bool IsRelayable() const
Whether this address should be relayed to other peers even if we can't reach it ourselves.
static constexpr SerParams V1
bool IsAddrV1Compatible() const
Check if the current object can be serialized in pre-ADDRv2/BIP155 format.
Transport protocol agnostic message container.
Information about a peer.
Mutex cs_avalanche_pubkey
bool IsFeelerConn() const
const std::chrono::seconds m_connected
Unix epoch time at peer connection.
bool ExpectServicesFromConn() const
std::atomic< int > nVersion
std::atomic_bool m_has_all_wanted_services
Whether this peer provides all services that we want.
bool IsInboundConn() const
bool HasPermission(NetPermissionFlags permission) const
bool IsOutboundOrBlockRelayConn() const
bool IsManualConn() const
std::atomic< int64_t > nTimeOffset
const std::string m_addr_name
std::string ConnectionTypeAsString() const
void SetCommonVersion(int greatest_common_version)
std::atomic< bool > m_bip152_highbandwidth_to
std::atomic_bool m_relays_txs
Whether we should relay transactions to this peer.
std::atomic< bool > m_bip152_highbandwidth_from
void PongReceived(std::chrono::microseconds ping_time)
A ping-pong round trip has completed successfully.
std::atomic_bool fSuccessfullyConnected
bool IsAddrFetchConn() const
uint64_t GetLocalNonce() const
void SetAddrLocal(const CService &addrLocalIn) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_local_mutex)
May not be called more than once.
bool IsBlockOnlyConn() const
int GetCommonVersion() const
bool IsFullOutboundConn() const
uint64_t nRemoteHostNonce
Mutex m_subver_mutex
cleanSubVer is a sanitized string of the user agent byte array we read from the wire.
std::atomic_bool fPauseSend
std::chrono::seconds m_nextGetAvaAddr
uint64_t nRemoteExtraEntropy
std::optional< std::pair< CNetMessage, bool > > PollMessage() EXCLUSIVE_LOCKS_REQUIRED(!m_msg_process_queue_mutex)
Poll the next message from the processing queue of this connection.
uint64_t GetLocalExtraEntropy() const
SteadyMilliseconds m_last_poll
double getAvailabilityScore() const
std::atomic_bool m_bloom_filter_loaded
Whether this peer has loaded a bloom filter.
void updateAvailabilityScore(double decayFactor)
The availability score is calculated using an exponentially weighted average.
std::atomic< std::chrono::seconds > m_avalanche_last_message_fault
const bool m_inbound_onion
Whether this peer is an inbound onion, i.e.
std::atomic< int > m_avalanche_message_fault_counter
How much faulty messages did this node accumulate.
std::atomic< bool > m_avalanche_enabled
std::atomic< std::chrono::seconds > m_last_block_time
UNIX epoch time of the last block received from this peer that we had not yet seen (e....
std::atomic_bool fDisconnect
std::atomic< int > m_avalanche_message_fault_score
This score is incremented for every new faulty message received when m_avalanche_message_fault_counte...
std::atomic< std::chrono::seconds > m_last_tx_time
UNIX epoch time of the last transaction received from this peer that we had not yet seen (e....
void invsVoted(uint32_t count)
The node voted for count invs.
bool IsAvalancheOutboundConnection() const
An encapsulated public key.
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set.
Simple class for background tasks that should be run periodically or once "after a while".
void scheduleEvery(Predicate p, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Repeat p until it return false.
void scheduleFromNow(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Call f once after the delta has passed.
A combination of a network address (CNetAddr) and a (TCP) port.
std::vector< uint8_t > GetKey() const
std::string ToStringAddrPort() const
uint64_t Finalize() const
Compute the 64-bit SipHash-2-4 of the data written so far.
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data.
std::set< std::reference_wrapper< const CTxMemPoolEntryRef >, CompareIteratorById > Parents
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
void removeConflicts(const CTransaction &tx) EXCLUSIVE_LOCKS_REQUIRED(cs)
void RemoveUnbroadcastTx(const TxId &txid, const bool unchecked=false)
Removes a transaction from the unbroadcast set.
CFeeRate GetMinFee() const
The minimum fee to get into the mempool, which may itself not be enough for larger-sized transactions...
RecursiveMutex cs
This mutex needs to be locked when accessing mapTx or other members that are guarded by it.
void removeRecursive(const CTransaction &tx, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs)
bool CompareTopologically(const TxId &txida, const TxId &txidb) const
TxMempoolInfo info(const TxId &txid) const
size_t DynamicMemoryUsage() const
bool setAvalancheFinalized(const CTxMemPoolEntryRef &tx, const Consensus::Params ¶ms, const CBlockIndex &active_chain_tip, std::vector< TxId > &finalizedTxIds) EXCLUSIVE_LOCKS_REQUIRED(bool isAvalancheFinalizedPreConsensus(const TxId &txid) const EXCLUSIVE_LOCKS_REQUIRED(cs)
std::vector< TxMempoolInfo > infoAll() const
CTransactionRef GetConflictTx(const COutPoint &prevout) const EXCLUSIVE_LOCKS_REQUIRED(cs)
Get the transaction in the pool that spends the same prevout.
bool exists(const TxId &txid) const
std::set< TxId > GetUnbroadcastTxs() const
Returns transactions in unbroadcast set.
auto withOrphanage(Callable &&func) const EXCLUSIVE_LOCKS_REQUIRED(!cs_orphanage)
const CFeeRate m_min_relay_feerate
auto withConflicting(Callable &&func) const EXCLUSIVE_LOCKS_REQUIRED(!cs_conflicting)
void removeForFinalizedBlock(const std::unordered_set< TxId, SaltedTxIdHasher > &confirmedTxIdsInNonFinalizedBlocks) EXCLUSIVE_LOCKS_REQUIRED(cs)
unsigned long size() const
std::optional< txiter > GetIter(const TxId &txid) const EXCLUSIVE_LOCKS_REQUIRED(cs)
Returns an iterator to the given txid, if found.
virtual void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr< const CBlock > &block)
Notifies listeners that a block which builds directly on our current tip has been received and connec...
virtual void BlockConnected(ChainstateRole role, const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being connected.
virtual void BlockChecked(const CBlock &, const BlockValidationState &)
Notifies listeners of a block validation result.
virtual void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
Notifies listeners when the block chain tip advances.
virtual void BlockDisconnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being disconnected.
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
SnapshotCompletionResult MaybeCompleteSnapshotValidation() EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex *GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(Chainstate ActiveChainstate)() const
Once the background validation chainstate has reached the height which is the base of the UTXO snapsh...
const CBlockIndex * GetBackgroundSyncTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
The tip of the background sync chain.
MempoolAcceptResult ProcessTransaction(const CTransactionRef &tx, bool test_accept=false) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Try to add a transaction to the memory pool.
bool IsInitialBlockDownload() const
Check whether we are doing an initial block download (synchronizing from disk or network)
bool ProcessNewBlock(const std::shared_ptr< const CBlock > &block, bool force_processing, bool min_pow_checked, bool *new_block, avalanche::Processor *const avalanche=nullptr) LOCKS_EXCLUDED(cs_main)
Process an incoming block.
RecursiveMutex & GetMutex() const LOCK_RETURNED(
Alias for cs_main.
CBlockIndex * ActiveTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
bool BackgroundSyncInProgress() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
The state of a background sync (for net processing)
bool ProcessNewBlockHeaders(const std::vector< CBlockHeader > &block, bool min_pow_checked, BlockValidationState &state, const CBlockIndex **ppindex=nullptr, const std::optional< CCheckpointData > &test_checkpoints=std::nullopt) LOCKS_EXCLUDED(cs_main)
Process incoming block headers.
const arith_uint256 & MinimumChainWork() const
CChain & ActiveChain() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
void MaybeRebalanceCaches() EXCLUSIVE_LOCKS_REQUIRED(void ReportHeadersPresync(const arith_uint256 &work, int64_t height, int64_t timestamp)
Check to see if caches are out of balance and if so, call ResizeCoinsCaches() as needed.
node::BlockManager m_blockman
A single BlockManager instance is shared across each constructed chainstate to avoid duplicating bloc...
virtual uint64_t GetMaxBlockSize() const =0
Double ended buffer combining vector and stream-like interfaces.
void ignore(size_t num_ignore)
uint64_t rand64() noexcept
Generate a random 64-bit integer.
Reads data from an underlying stream, while hashing the read data.
A writer stream (for serialization) that computes a 256-bit hash.
size_t Count(NodeId peer) const
Count how many announcements a peer has (REQUESTED, CANDIDATE, and COMPLETED combined).
size_t CountInFlight(NodeId peer) const
Count how many REQUESTED announcements a peer has.
Interface for message handling.
static Mutex g_msgproc_mutex
Mutex for anything that is only accessed via the msg processing thread.
virtual bool ProcessMessages(const Config &config, CNode *pnode, std::atomic< bool > &interrupt) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process protocol messages received from a given node.
virtual bool SendMessages(const Config &config, CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Send queued protocol messages to a given node.
virtual void InitializeNode(const Config &config, CNode &node, ServiceFlags our_services)=0
Initialize a peer (setup state, queue any initial messages)
virtual void FinalizeNode(const Config &config, const CNode &node)=0
Handle removal of a peer (clear state)
static bool HasFlag(NetPermissionFlags flags, NetPermissionFlags f)
ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector< CTransactionRef > &extra_txn)
bool IsTxAvailable(size_t index) const
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing)
virtual std::optional< std::string > FetchBlock(const Config &config, NodeId peer_id, const CBlockIndex &block_index)=0
Attempt to manually fetch block from a given peer.
virtual void SendPings()=0
Send ping message to all peers.
static std::unique_ptr< PeerManager > make(CConnman &connman, AddrMan &addrman, BanMan *banman, ChainstateManager &chainman, CTxMemPool &pool, avalanche::Processor *const avalanche, Options opts)
virtual void StartScheduledTasks(CScheduler &scheduler)=0
Begin running background tasks, should only be called once.
virtual bool IgnoresIncomingTxs()=0
Whether this node ignores txs received over p2p.
virtual void ProcessMessage(const Config &config, CNode &pfrom, const std::string &msg_type, DataStream &vRecv, const std::chrono::microseconds time_received, const std::atomic< bool > &interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process a single message from a peer.
virtual bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) const =0
Get statistics from node state.
virtual void UnitTestMisbehaving(const NodeId peer_id)=0
Public for unit testing.
virtual void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)=0
This function is used for testing the stale tip eviction logic, see denialofservice_tests....
virtual void CheckForStaleTipAndEvictPeers()=0
Evict extra outbound peers.
static RCUPtr make(Args &&...args)
Construct a new object that is owned by the pointer.
I randrange(I range) noexcept
Generate a random integer in the range [0..range), with range > 0.
A Span is an object that can refer to a contiguous sequence of objects.
int EraseTx(const TxId &txid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase a tx by txid.
void EraseForPeer(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase all txs announced by a peer (eg, after that peer disconnects)
std::vector< CTransactionRef > GetChildrenFromSamePeer(const CTransactionRef &parent, NodeId nodeid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Get all children that spend from this tx and were received from nodeid.
bool AddTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add a new transaction to the pool.
unsigned int LimitTxs(unsigned int max_txs, FastRandomContext &rng) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Limit the txs to the given maximum.
void EraseForBlock(const CBlock &block) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase all txs included in or invalidated by a new block.
std::vector< CTransactionRef > GetConflictTxs(const CTransactionRef &tx) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
void AddChildrenToWorkSet(const CTransaction &tx) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add any tx that list a particular tx as a parent into the from peer's work set.
std::vector< std::pair< CTransactionRef, NodeId > > GetChildrenFromDifferentPeer(const CTransactionRef &parent, NodeId nodeid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Get all children that spend from this tx but were not received from nodeid.
std::string GetRejectReason() const
std::string ToString() const
256-bit unsigned big integer.
const std::vector< PrefilledProof > & getPrefilledProofs() const
uint64_t getShortID(const ProofId &proofid) const
const std::vector< uint64_t > & getShortIDs() const
ProofId getProofId() const
bool verify(DelegationState &state, CPubKey &auth) const
const DelegationId & getId() const
const LimitedProofId & getLimitedProofId() const
bool shouldRequestMoreNodes()
Returns true if we encountered a lack of node since the last call.
bool exists(const ProofId &proofid) const
Return true if the (valid) proof exists, but only for non-dangling proofs.
bool forPeer(const ProofId &proofid, Callable &&func) const
bool addNode(NodeId nodeid, const ProofId &proofid)
Node API.
void removeUnbroadcastProof(const ProofId &proofid)
const ProofRadixTree & getShareableProofsSnapshot() const
bool isBoundToPeer(const ProofId &proofid) const
bool saveRemoteProof(const ProofId &proofid, const NodeId nodeid, const bool present)
void forEachPeer(Callable &&func) const
void setInvalid(const ProofId &proofid)
bool isInvalid(const ProofId &proofid) const
bool isImmature(const ProofId &proofid) const
auto getUnbroadcastProofs() const
bool isInConflictingPool(const ProofId &proofid) const
void sendResponse(CNode *pfrom, Response response) const
bool addToReconcile(const AnyVoteItem &item) EXCLUSIVE_LOCKS_REQUIRED(!cs_finalizedItems)
bool isStakingPreconsensusActivated(const CBlockIndex *pprev) const
int64_t getAvaproofsNodeCounter() const
bool sendHello(CNode *pfrom) EXCLUSIVE_LOCKS_REQUIRED(!cs_delayedAvahelloNodeIds)
Send a avahello message.
void setRecentlyFinalized(const uint256 &itemId) EXCLUSIVE_LOCKS_REQUIRED(!cs_finalizedItems)
bool isQuorumEstablished() LOCKS_EXCLUDED(cs_main) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
void cleanupStakingRewards(const int minHeight) EXCLUSIVE_LOCKS_REQUIRED(!cs_stakingRewards
ProofRef getLocalProof() const
void acceptStakeContender(const StakeContenderId &contenderId) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
bool reconcileOrFinalize(const ProofRef &proof) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Wrapper around the addToReconcile for proofs that adds back the finalization flag to the peer if it i...
int getStakeContenderStatus(const StakeContenderId &contenderId) const EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Track votes on stake contenders.
void sendDelayedAvahello() EXCLUSIVE_LOCKS_REQUIRED(!cs_delayedAvahelloNodeIds)
void finalizeStakeContender(const StakeContenderId &contenderId) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
bool isPreconsensusActivated(const CBlockIndex *pprev) const
auto withPeerManager(Callable &&func) const EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
bool registerVotes(NodeId nodeid, const Response &response, std::vector< VoteItemUpdate > &updates, bool &disconnect, std::string &error) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
void rejectStakeContender(const StakeContenderId &contenderId) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
void avaproofsSent(NodeId nodeid) LOCKS_EXCLUDED(cs_main) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
std::vector< uint32_t > indices
std::string ToString() const
std::string GetHex() const
Generate a new block, without valid proof-of-work.
bool ReadRawBlock(std::vector< uint8_t > &block, const FlatFilePos &pos) const
CBlockIndex * LookupBlockIndex(const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
bool LoadingBlocks() const
bool IsPruneMode() const
Whether running in -prune mode.
bool ReadBlock(CBlock &block, const FlatFilePos &pos) const
Functions for disk access for blocks.
static const uint256 ZERO
@ BLOCK_CHECKPOINT
the block failed to meet one of our checkpoints
@ BLOCK_HEADER_LOW_WORK
the block header may be on a too-little-work chain
@ BLOCK_INVALID_HEADER
invalid proof of work or time too old
@ BLOCK_CACHED_INVALID
this block was cached as being invalid and we didn't store the reason why
@ BLOCK_CONSENSUS
invalid by consensus rules (excluding any below reasons)
@ BLOCK_MISSING_PREV
We don't have the previous block the checked one is built on.
@ BLOCK_INVALID_PREV
A block this one builds on is invalid.
@ BLOCK_MUTATED
the block's data didn't match the data committed to by the PoW
@ BLOCK_TIME_FUTURE
block timestamp was > 2 hours in the future (or our clock is bad)
@ BLOCK_RESULT_UNSET
initial value. Block has not yet been rejected
@ TX_MISSING_INPUTS
transaction was missing some of its inputs
@ TX_CHILD_BEFORE_PARENT
This tx outputs are already spent in the mempool.
@ TX_MEMPOOL_POLICY
violated mempool's fee/size/descendant/etc limits
@ TX_PACKAGE_RECONSIDERABLE
fails some policy, but might be acceptable if submitted in a (different) package
@ TX_UNKNOWN
transaction was not validated because package failed
@ TX_PREMATURE_SPEND
transaction spends a coinbase too early, or violates locktime/sequence locks
@ TX_DUPLICATE
Tx already in mempool or in the chain.
@ TX_INPUTS_NOT_STANDARD
inputs failed policy rules
@ TX_CONFLICT
Tx conflicts with a finalized tx, i.e.
@ TX_NOT_STANDARD
otherwise didn't meet our local policy rules
@ TX_AVALANCHE_RECONSIDERABLE
fails some policy, but might be reconsidered by avalanche voting
@ TX_NO_MEMPOOL
this node does not have a mempool so can't validate the transaction
@ TX_RESULT_UNSET
initial value. Tx has not yet been rejected
@ TX_CONSENSUS
invalid by consensus rules
static size_t RecursiveDynamicUsage(const CScript &script)
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate.
ChainstateRole
This enum describes the various roles a specific Chainstate instance can take.
std::array< uint8_t, CPubKey::SCHNORR_SIZE > SchnorrSig
a Schnorr signature
#define LogPrintLevel(category, level,...)
#define LogPrint(category,...)
#define LogDebug(category,...)
CSerializedNetMsg Make(std::string msg_type, Args &&...args)
const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
const char * CFHEADERS
cfheaders is a response to a getcfheaders request containing a filter header and a vector of filter h...
const char * AVAPROOFSREQ
Request for missing avalanche proofs after an avaproofs message has been processed.
const char * CFILTER
cfilter is a response to a getcfilters request containing a single compact filter.
const char * BLOCK
The block message transmits a single serialized block.
const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter.
const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
const char * ADDRV2
The addrv2 message relays connection information for peers on the network just like the addr message,...
const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
const char * AVAPROOFS
The avaproofs message the proof short ids of all the valid proofs that we know.
const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
const char * GETAVAPROOFS
The getavaproofs message requests an avaproofs message that provides the proof short ids of all the v...
const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
const char * GETCFCHECKPT
getcfcheckpt requests evenly spaced compact filter headers, enabling parallelized download and valida...
const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
const char * GETAVAADDR
The getavaaddr message requests an addr message from the receiving node, containing IP addresses of t...
const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids".
const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
const char * GETCFILTERS
getcfilters requests compact filters for a range of blocks.
const char * TX
The tx message transmits a single transaction.
const char * AVAHELLO
Contains a delegation and a signature.
const char * FILTERADD
The filteradd message tells the receiving peer to add a single element to a previously-set bloom filt...
const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
const char * FEEFILTER
The feefilter message tells the receiving peer not to inv us any txs which do not meet the specified ...
const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
const char * AVARESPONSE
Contains an avalanche::Response.
const char * GETDATA
The getdata message requests one or more data objects from another node.
const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
const char * BLOCKTXN
Contains a BlockTransactions.
const char * GETCFHEADERS
getcfheaders requests a compact filter header and the filter hashes for a range of blocks,...
const char * SENDADDRV2
The sendaddrv2 message signals support for receiving ADDRV2 messages (BIP155).
const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected.
const char * AVAPOLL
Contains an avalanche::Poll.
const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
const char * AVAPROOF
Contains an avalanche::Proof.
const char * CFCHECKPT
cfcheckpt is a response to a getcfcheckpt request containing a vector of evenly spaced filter headers...
const char * GETBLOCKTXN
Contains a BlockTransactionsRequest Peer should respond with "blocktxn" message.
const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
ShortIdProcessor< PrefilledProof, ShortIdProcessorPrefilledProofAdapter, ProofRefCompare > ProofShortIdProcessor
std::variant< const ProofRef, const CBlockIndex *, const StakeContenderId, const CTransactionRef > AnyVoteItem
RCUPtr< const Proof > ProofRef
Implement std::hash so RCUPtr can be used as a key for maps or sets.
std::optional< CService > GetLocalAddrForPeer(CNode &node)
Returns a local address that we should advertise to this peer.
std::function< void(const CAddress &addr, const std::string &msg_type, Span< const uint8_t > data, bool is_incoming)> CaptureMessage
Defaults to CaptureMessageToFile(), but can be overridden by unit tests.
std::string userAgent(const Config &config)
bool IsReachable(enum Network net)
bool SeenLocal(const CService &addr)
vote for a local address
static const unsigned int MAX_SUBVERSION_LENGTH
Maximum length of the user agent string in version message.
static constexpr std::chrono::minutes TIMEOUT_INTERVAL
Time after which to disconnect, after waiting for a ping response (or inactivity).
@ BypassProofRequestLimits
static constexpr auto HEADERS_RESPONSE_TIME
How long to wait for a peer to respond to a getheaders request.
static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET
The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND based inc...
static constexpr size_t MAX_AVALANCHE_STALLED_TXIDS_PER_PEER
Maximum number of stalled avalanche txids to store per peer.
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT
Default time during which a peer must stall block download progress before being disconnected.
static constexpr auto GETAVAADDR_INTERVAL
Minimum time between 2 successives getavaaddr messages from the same peer.
static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL
Verify that INVENTORY_MAX_RECENT_RELAY is enough to cache everything typically relayed before uncondi...
static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB
Maximum number of inventory items to send per transmission.
static constexpr auto EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect.
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch?...
static uint32_t getAvalancheVoteForProof(const avalanche::Processor &avalanche, const avalanche::ProofId &id)
Decide a response for an Avalanche poll about the given proof.
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
static constexpr int HISTORICAL_BLOCK_AGE
Age after which a block is considered historical for purposes of rate limiting block relay.
static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL
Delay between rotating the peers we relay a particular address to.
static constexpr auto MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict.
static constexpr auto CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork.
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
Average delay between local address broadcasts.
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for.
static constexpr uint64_t CMPCTBLOCKS_VERSION
The compactblocks version we support.
bool IsAvalancheMessageType(const std::string &msg_type)
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/behind headers chain.
static std::chrono::microseconds ComputeRequestTime(const CNode &node, const InvRequestTracker< InvId > &requestTracker, const DataRequestParameters &requestParams, std::chrono::microseconds current_time, bool preferred)
Compute the request time for this announcement, current time plus delays for:
static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for inbound peers.
static constexpr DataRequestParameters TX_REQUEST_PARAMS
static constexpr auto MAX_FEEFILTER_CHANGE_DELAY
Maximum feefilter broadcast delay after significant change.
static constexpr uint32_t MAX_GETCFILTERS_SIZE
Maximum number of compact filters that may be requested with one getcfilters.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout.
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE
Block download timeout base, expressed in multiples of the block interval (i.e.
static constexpr auto AVALANCHE_AVAPROOFS_TIMEOUT
If no proof was requested from a compact proof message after this timeout expired,...
static constexpr auto STALE_CHECK_INTERVAL
How frequently to check for stale tips.
static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY
The number of most recently announced transactions a peer can request.
static constexpr auto UNCONDITIONAL_RELAY_DELAY
How long a transaction has to be in the mempool before it can unconditionally be relayed.
static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts.
static const unsigned int MAX_LOCATOR_SZ
The maximum number of entries in a locator.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
static constexpr double MAX_ADDR_RATE_PER_SECOND
The maximum rate of address records we're willing to process on average.
static constexpr auto PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive.
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we're willing to serve as compact blocks to peers when requested.
static constexpr DataRequestParameters PROOF_REQUEST_PARAMS
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE
Maximum number of headers to announce when relaying blocks with headers message.
static bool TooManyAnnouncements(const CNode &node, const InvRequestTracker< InvId > &requestTracker, const DataRequestParameters &requestParams)
static constexpr uint32_t MAX_GETCFHEADERS_SIZE
Maximum number of cf hashes that may be requested with one getcfheaders.
static constexpr auto BLOCK_STALLING_TIMEOUT_MAX
Maximum timeout for stalling block download.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY
SHA256("main address relay")[0:8].
static constexpr size_t MAX_PCT_ADDR_TO_SEND
the maximum percentage of addresses from our addrman to return in response to a getaddr message.
static const unsigned int MAX_INV_SZ
The maximum number of entries in an 'inv' protocol message.
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND
Maximum rate of inventory items to send per second.
static constexpr size_t MAX_ADDR_TO_SEND
The maximum number of address records permitted in an ADDR message.
static const unsigned int MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK
Maximum number of outstanding CMPCTBLOCK requests for the same block.
static const unsigned int MAX_HEADERS_RESULTS
Number of headers sent in one getheaders result.
bool IsProxy(const CNetAddr &addr)
static constexpr NodeId NO_NODE
Special NodeId that represent no node.
uint256 GetPackageHash(const Package &package)
std::vector< CTransactionRef > Package
A package is an ordered list of transactions.
static constexpr Amount DEFAULT_MIN_RELAY_TX_FEE_PER_KB(1000 *SATOSHI)
Default for -minrelaytxfee, minimum relay fee for transactions.
std::shared_ptr< const CTransaction > CTransactionRef
static constexpr size_t AVALANCHE_MAX_ELEMENT_POLL
Maximum item that can be polled at once.
void SetServiceFlagsIBDCache(bool state)
Set the current IBD status in order to figure out the desirable service flags.
ServiceFlags GetDesirableServiceFlags(ServiceFlags services)
Gets the set of service flags which are "desirable" for a given peer.
static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH
Maximum length of incoming protocol messages (Currently 2MB).
static bool HasAllDesirableServiceFlags(ServiceFlags services)
A shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services),...
@ MSG_AVA_STAKE_CONTENDER
@ MSG_CMPCT_BLOCK
Defined in BIP152.
ServiceFlags
nServices flags.
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB.
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version
static const int PROTOCOL_VERSION
network protocol versioning
static const int FEEFILTER_VERSION
"feefilter" tells peers to filter invs to you by fee starts with this version
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
static const int INVALID_CB_NO_BAN_VERSION
not banning for invalid compact blocks starts with this version
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.
void Shuffle(I first, I last, R &&rng)
More efficient than using std::shuffle on a FastRandomContext.
reverse_range< T > reverse_iterate(T &x)
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
void Unserialize(Stream &, V)=delete
#define LIMITED_STRING(obj, n)
static auto WithParams(const Params ¶ms, T &&t)
Return a wrapper around t that (de)serializes it with specified parameter params.
uint64_t ReadCompactSize(Stream &is, bool range_check=true)
Decode a CompactSize-encoded variable-length integer.
constexpr auto MakeUCharSpan(V &&v) -> decltype(UCharSpanCast(Span{std::forward< V >(v)}))
Like the Span constructor, but for (const) uint8_t member types only.
static const double AVALANCHE_STATISTICS_DECAY_FACTOR
Pre-computed decay factor for the avalanche statistics computation.
static constexpr std::chrono::minutes AVALANCHE_STATISTICS_REFRESH_PERIOD
Refresh period for the avalanche statistics computation.
std::string ToString(const T &t)
Locale-independent version of std::to_string.
static constexpr Amount zero() noexcept
A BlockHash is a unqiue identifier for a block.
Describes a place in the block chain to another node such that if the other node doesn't have the sam...
std::vector< BlockHash > vHave
std::chrono::microseconds m_ping_wait
Amount m_fee_filter_received
std::vector< int > vHeightInFlight
bool m_addr_relay_enabled
uint64_t m_addr_rate_limited
uint64_t m_addr_processed
ServiceFlags their_services
Parameters that influence chain consensus.
int64_t nPowTargetSpacing
std::chrono::seconds PowTargetSpacing() const
const std::chrono::seconds overloaded_peer_delay
How long to delay requesting data from overloaded peers (see max_peer_request_in_flight).
const size_t max_peer_announcements
Maximum number of inventories to consider for requesting, per peer.
const std::chrono::seconds nonpref_peer_delay
How long to delay requesting data from non-preferred peers.
const NetPermissionFlags bypass_request_limits_permissions
Permission flags a peer requires to bypass the request limits tracking limits and delay penalty.
const std::chrono::microseconds getdata_interval
How long to wait (in microseconds) before a data request from an additional peer.
const size_t max_peer_request_in_flight
Maximum number of in-flight data requests from a peer.
Validation result for a transaction evaluated by MemPoolAccept (single or package).
const ResultType m_result_type
Result type.
const TxValidationState m_state
Contains information about why the transaction failed.
@ MEMPOOL_ENTRY
Valid, transaction was already in the mempool.
@ VALID
Fully validated, valid.
static time_point now() noexcept
Return current system time or mocked time, if set.
std::chrono::time_point< NodeClock > time_point
Validation result for package mempool acceptance.
PackageValidationState m_state
std::map< TxId, MempoolAcceptResult > m_tx_results
Map from txid to finished MempoolAcceptResults.
This is a radix tree storing values identified by a unique key.
A TxId is the identifier of a transaction.
std::chrono::seconds registration_time
const ProofId & getProofId() const
StakeContenderIds are unique for each block to ensure that the peer polling for their acceptance has ...
#define AssertLockNotHeld(cs)
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
#define EXCLUSIVE_LOCKS_REQUIRED(...)
#define LOCKS_EXCLUDED(...)
#define NO_THREAD_SAFETY_ANALYSIS
int64_t GetTime()
DEPRECATED Use either ClockType::now() or Now<TimePointType>() if a cast is needed.
constexpr int64_t count_microseconds(std::chrono::microseconds t)
constexpr int64_t count_seconds(std::chrono::seconds t)
std::chrono::time_point< NodeClock, std::chrono::seconds > NodeSeconds
double CountSecondsDouble(SecondsDouble t)
Helper to count the seconds in any std::chrono::duration type.
NodeClock::time_point GetAdjustedTime()
void AddTimeData(const CNetAddr &ip, int64_t nOffsetSample)
#define TRACE6(context, event, a, b, c, d, e, f)
@ AVALANCHE
Removed by avalanche vote.
std::string SanitizeString(std::string_view str, int rule)
Remove unsafe chars.
arith_uint256 CalculateHeadersWork(const std::vector< CBlockHeader > &headers)
Return the sum of the work on a given set of headers.
bool HasValidProofOfWork(const std::vector< CBlockHeader > &headers, const Consensus::Params &consensusParams)
Check with the proof of work on each blockheader matches the value in nBits.
PackageMempoolAcceptResult ProcessNewPackage(Chainstate &active_chainstate, CTxMemPool &pool, const Package &package, bool test_accept)
Validate (and maybe submit) a package to the mempool.
bool IsBlockMutated(const CBlock &block)
Check if a block has been mutated (with respect to its merkle root).
std::vector< Coin > GetSpentCoins(const CTransactionRef &ptx, const CCoinsViewCache &coins_view)
Get the coins spent by ptx from the coins_view.
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ActiveChain().Tip() will not be pr...
CMainSignals & GetMainSignals()