Bitcoin ABC 0.33.3
P2P Digital Currency
net_processing.cpp
Go to the documentation of this file.
1// Copyright (c) 2009-2010 Satoshi Nakamoto
2// Copyright (c) 2009-2016 The Bitcoin Core developers
3// Distributed under the MIT software license, see the accompanying
4// file COPYING or http://www.opensource.org/licenses/mit-license.php.
5
6#include <net_processing.h>
7
8#include <addrman.h>
11#include <avalanche/processor.h>
12#include <avalanche/proof.h>
16#include <banman.h>
17#include <blockencodings.h>
18#include <blockfilter.h>
19#include <blockvalidity.h>
20#include <chain.h>
21#include <chainparams.h>
22#include <config.h>
23#include <consensus/amount.h>
25#include <hash.h>
26#include <headerssync.h>
28#include <invrequest.h>
29#include <kernel/chain.h>
31#include <merkleblock.h>
32#include <netbase.h>
33#include <netmessagemaker.h>
34#include <node/blockstorage.h>
35#include <node/miner.h>
36#include <policy/fees.h>
37#include <policy/policy.h>
38#include <policy/settings.h>
39#include <primitives/block.h>
41#include <random.h>
42#include <reverse_iterator.h>
43#include <scheduler.h>
44#include <streams.h>
45#include <timedata.h>
46#include <tinyformat.h>
47#include <txmempool.h>
48#include <txorphanage.h>
49#include <util/check.h>
50#include <util/strencodings.h>
51#include <util/trace.h>
52#include <validation.h>
53
54#include <boost/multi_index/hashed_index.hpp>
55#include <boost/multi_index/member.hpp>
56#include <boost/multi_index/ordered_index.hpp>
57#include <boost/multi_index_container.hpp>
58
59#include <algorithm>
60#include <atomic>
61#include <chrono>
62#include <functional>
63#include <future>
64#include <memory>
65#include <numeric>
66#include <typeinfo>
67#include <utility>
68
73static constexpr auto UNCONDITIONAL_RELAY_DELAY = 2min;
78static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
79static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
81static constexpr auto HEADERS_RESPONSE_TIME{2min};
88static constexpr auto CHAIN_SYNC_TIMEOUT{20min};
90static constexpr auto STALE_CHECK_INTERVAL{10min};
92static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s};
97static constexpr auto MINIMUM_CONNECT_TIME{30s};
99static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
102static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
105static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
109static constexpr auto PING_INTERVAL{2min};
111static const unsigned int MAX_LOCATOR_SZ = 101;
113static const unsigned int MAX_INV_SZ = 50000;
114static_assert(MAX_PROTOCOL_MESSAGE_LENGTH > MAX_INV_SZ * sizeof(CInv),
115 "Max protocol message length must be greater than largest "
116 "possible INV message");
117
119static constexpr auto GETAVAADDR_INTERVAL{2min};
120
125static constexpr auto AVALANCHE_AVAPROOFS_TIMEOUT{2min};
126
128static constexpr size_t MAX_AVALANCHE_STALLED_TXIDS_PER_PEER{100};
129
137
147
149 const std::chrono::seconds nonpref_peer_delay;
150
155 const std::chrono::seconds overloaded_peer_delay;
156
161 const std::chrono::microseconds getdata_interval;
162
168};
169
171 100, // max_peer_request_in_flight
172 5000, // max_peer_announcements
173 std::chrono::seconds(2), // nonpref_peer_delay
174 std::chrono::seconds(2), // overloaded_peer_delay
175 std::chrono::seconds(60), // getdata_interval
176 NetPermissionFlags::Relay, // bypass_request_limits_permissions
177};
178
180 100, // max_peer_request_in_flight
181 5000, // max_peer_announcements
182 std::chrono::seconds(2), // nonpref_peer_delay
183 std::chrono::seconds(2), // overloaded_peer_delay
184 std::chrono::seconds(60), // getdata_interval
186 BypassProofRequestLimits, // bypass_request_limits_permissions
187};
188
193static const unsigned int MAX_GETDATA_SZ = 1000;
197static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
203static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s};
205static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s};
210static const int MAX_CMPCTBLOCK_DEPTH = 5;
215static const int MAX_BLOCKTXN_DEPTH = 10;
217 "MAX_BLOCKTXN_DEPTH too high");
225static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
230static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
234static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
239static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
241static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
245static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h};
249static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s};
251static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL{24h};
256static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s};
261static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
263static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB =
267static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500;
276 std::chrono::seconds{1},
277 "INVENTORY_RELAY_MAX too low");
278
282static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min};
286static constexpr auto MAX_FEEFILTER_CHANGE_DELAY{5min};
291static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
296static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
301static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
306static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1};
314static constexpr uint64_t CMPCTBLOCKS_VERSION{1};
315
316// Internal stuff
317namespace {
321struct QueuedBlock {
326 const CBlockIndex *pindex;
328 std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
329};
330
331struct StalledTxId {
332 TxId txid;
333 std::chrono::seconds timeAdded;
334
335 StalledTxId(TxId txid_, std::chrono::seconds timeAdded_)
336 : txid(txid_), timeAdded(timeAdded_){};
337};
338
339struct by_txid {};
340struct by_time {};
341
342using StalledTxIdSet = boost::multi_index_container<
343 StalledTxId,
344 boost::multi_index::indexed_by<
345 // sort by txid
346 boost::multi_index::hashed_unique<
347 boost::multi_index::tag<by_txid>,
348 boost::multi_index::member<StalledTxId, TxId, &StalledTxId::txid>,
350 // sort by timeAdded
351 boost::multi_index::ordered_non_unique<
352 boost::multi_index::tag<by_time>,
353 boost::multi_index::member<StalledTxId, std::chrono::seconds,
354 &StalledTxId::timeAdded>>>>;
355
369struct Peer {
371 const NodeId m_id{0};
372
388 const ServiceFlags m_our_services;
389
391 std::atomic<ServiceFlags> m_their_services{NODE_NONE};
392
394 Mutex m_misbehavior_mutex;
399 bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
400
402 Mutex m_block_inv_mutex;
408 std::vector<BlockHash> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex);
414 std::vector<BlockHash>
415 m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex);
416
423 BlockHash m_continuation_block GUARDED_BY(m_block_inv_mutex){};
424
426 std::atomic<int> m_starting_height{-1};
427
429 std::atomic<uint64_t> m_ping_nonce_sent{0};
431 std::atomic<std::chrono::microseconds> m_ping_start{0us};
433 std::atomic<bool> m_ping_queued{false};
434
442 Amount::zero()};
443 std::chrono::microseconds m_next_send_feefilter
445
446 struct TxRelay {
447 mutable RecursiveMutex m_bloom_filter_mutex;
456 bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false};
461 std::unique_ptr<CBloomFilter>
462 m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex)
463 GUARDED_BY(m_bloom_filter_mutex){nullptr};
464
466 CRollingBloomFilter m_recently_announced_invs GUARDED_BY(
468 0.000001};
469
470 mutable RecursiveMutex m_tx_inventory_mutex;
476 CRollingBloomFilter m_tx_inventory_known_filter
477 GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
483 std::set<TxId> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex);
489 bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false};
491 std::atomic<std::chrono::seconds> m_last_mempool_req{0s};
496 std::chrono::microseconds
497 m_next_inv_send_time GUARDED_BY(m_tx_inventory_mutex){0};
498
503 std::atomic<Amount> m_fee_filter_received{Amount::zero()};
504
508 StalledTxIdSet
509 m_avalanche_stalled_txids GUARDED_BY(m_tx_inventory_mutex);
510 };
511
512 /*
513 * Initializes a TxRelay struct for this peer. Can be called at most once
514 * for a peer.
515 */
516 TxRelay *SetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) {
517 LOCK(m_tx_relay_mutex);
518 Assume(!m_tx_relay);
519 m_tx_relay = std::make_unique<Peer::TxRelay>();
520 return m_tx_relay.get();
521 };
522
523 TxRelay *GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) {
524 return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
525 };
526 const TxRelay *GetTxRelay() const
527 EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) {
528 return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
529 };
530
531 struct ProofRelay {
532 mutable RecursiveMutex m_proof_inventory_mutex;
533 std::set<avalanche::ProofId>
534 m_proof_inventory_to_send GUARDED_BY(m_proof_inventory_mutex);
535 // Prevent sending proof invs if the peer already knows about them
536 CRollingBloomFilter m_proof_inventory_known_filter
537 GUARDED_BY(m_proof_inventory_mutex){10000, 0.000001};
541 CRollingBloomFilter m_recently_announced_proofs GUARDED_BY(
543 0.000001};
544 std::chrono::microseconds m_next_inv_send_time{0};
545
547 sharedProofs;
548 std::atomic<std::chrono::seconds> lastSharedProofsUpdate{0s};
549 std::atomic<bool> compactproofs_requested{false};
550 };
551
556 const std::unique_ptr<ProofRelay> m_proof_relay;
557
561 std::vector<CAddress>
573 std::unique_ptr<CRollingBloomFilter>
591 std::atomic_bool m_addr_relay_enabled{false};
593 bool m_getaddr_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
595 mutable Mutex m_addr_send_times_mutex;
597 std::chrono::microseconds
598 m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
600 std::chrono::microseconds
601 m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
606 std::atomic_bool m_wants_addrv2{false};
608 bool m_getaddr_recvd GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
610 mutable Mutex m_addr_token_bucket_mutex;
615 double m_addr_token_bucket GUARDED_BY(m_addr_token_bucket_mutex){1.0};
617 std::chrono::microseconds
618 m_addr_token_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){
619 GetTime<std::chrono::microseconds>()};
621 std::atomic<uint64_t> m_addr_rate_limited{0};
626 std::atomic<uint64_t> m_addr_processed{0};
627
632 bool m_inv_triggered_getheaders_before_sync
634
636 Mutex m_getdata_requests_mutex;
638 std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
639
641 NodeClock::time_point m_last_getheaders_timestamp
643
645 Mutex m_headers_sync_mutex;
650 std::unique_ptr<HeadersSyncState>
651 m_headers_sync PT_GUARDED_BY(m_headers_sync_mutex)
652 GUARDED_BY(m_headers_sync_mutex){};
653
655 std::atomic<bool> m_sent_sendheaders{false};
656
658 std::chrono::microseconds m_headers_sync_timeout
660
665 bool m_prefers_headers GUARDED_BY(NetEventsInterface::g_msgproc_mutex){
666 false};
667
668 explicit Peer(NodeId id, ServiceFlags our_services, bool fRelayProofs)
669 : m_id(id), m_our_services{our_services},
670 m_proof_relay(fRelayProofs ? std::make_unique<ProofRelay>()
671 : nullptr) {}
672
673private:
674 mutable Mutex m_tx_relay_mutex;
675
677 std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex);
678};
679
680using PeerRef = std::shared_ptr<Peer>;
681
688struct CNodeState {
690 const CBlockIndex *pindexBestKnownBlock{nullptr};
692 BlockHash hashLastUnknownBlock{};
694 const CBlockIndex *pindexLastCommonBlock{nullptr};
696 const CBlockIndex *pindexBestHeaderSent{nullptr};
698 bool fSyncStarted{false};
701 std::chrono::microseconds m_stalling_since{0us};
702 std::list<QueuedBlock> vBlocksInFlight;
705 std::chrono::microseconds m_downloading_since{0us};
707 bool fPreferredDownload{false};
712 bool m_requested_hb_cmpctblocks{false};
714 bool m_provides_cmpctblocks{false};
715
742 struct ChainSyncTimeoutState {
745 std::chrono::seconds m_timeout{0s};
747 const CBlockIndex *m_work_header{nullptr};
749 bool m_sent_getheaders{false};
752 bool m_protect{false};
753 };
754
755 ChainSyncTimeoutState m_chain_sync;
756
758 int64_t m_last_block_announcement{0};
759
761 const bool m_is_inbound;
762
763 CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {}
764};
765
766class PeerManagerImpl final : public PeerManager {
767public:
768 PeerManagerImpl(CConnman &connman, AddrMan &addrman, BanMan *banman,
769 ChainstateManager &chainman, CTxMemPool &pool,
770 avalanche::Processor *const avalanche, Options opts);
771
774 const std::shared_ptr<const CBlock> &pblock,
775 const CBlockIndex *pindexConnected) override
776 EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
777 void BlockDisconnected(const std::shared_ptr<const CBlock> &block,
778 const CBlockIndex *pindex) override
779 EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
780 void UpdatedBlockTip(const CBlockIndex *pindexNew,
781 const CBlockIndex *pindexFork,
782 bool fInitialDownload) override
783 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
784 void BlockChecked(const CBlock &block,
785 const BlockValidationState &state) override
786 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
787 void NewPoWValidBlock(const CBlockIndex *pindex,
788 const std::shared_ptr<const CBlock> &pblock) override
789 EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
790
792 void InitializeNode(const Config &config, CNode &node,
793 ServiceFlags our_services) override
794 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
795 void FinalizeNode(const Config &config, const CNode &node) override
796 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !cs_proofrequest,
797 !m_headers_presync_mutex);
798 bool ProcessMessages(const Config &config, CNode *pfrom,
799 std::atomic<bool> &interrupt) override
800 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex,
801 !m_recent_confirmed_transactions_mutex,
802 !m_most_recent_block_mutex, !cs_proofrequest,
803 !m_headers_presync_mutex, g_msgproc_mutex);
804 bool SendMessages(const Config &config, CNode *pto) override
805 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex,
806 !m_recent_confirmed_transactions_mutex,
807 !m_most_recent_block_mutex, !cs_proofrequest,
808 g_msgproc_mutex);
809
811 void StartScheduledTasks(CScheduler &scheduler) override;
812 void CheckForStaleTipAndEvictPeers() override;
813 std::optional<std::string>
814 FetchBlock(const Config &config, NodeId peer_id,
815 const CBlockIndex &block_index) override;
816 bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) const override
817 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
818 bool IgnoresIncomingTxs() override { return m_opts.ignore_incoming_txs; }
819 void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
820 void RelayTransaction(const TxId &txid) override
821 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
822 void RelayProof(const avalanche::ProofId &proofid) override
823 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
824 void SetBestHeight(int height) override { m_best_height = height; };
825 void UnitTestMisbehaving(NodeId peer_id) override
826 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) {
827 Misbehaving(*Assert(GetPeerRef(peer_id)), "");
828 }
829 void ProcessMessage(const Config &config, CNode &pfrom,
830 const std::string &msg_type, DataStream &vRecv,
831 const std::chrono::microseconds time_received,
832 const std::atomic<bool> &interruptMsgProc) override
833 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex,
834 !m_recent_confirmed_transactions_mutex,
835 !m_most_recent_block_mutex, !cs_proofrequest,
836 !m_headers_presync_mutex, g_msgproc_mutex);
838 int64_t time_in_seconds) override;
839
840private:
845 void ConsiderEviction(CNode &pto, Peer &peer,
846 std::chrono::seconds time_in_seconds)
847 EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_msgproc_mutex);
848
853 void EvictExtraOutboundPeers(std::chrono::seconds now)
855
860 void ReattemptInitialBroadcast(CScheduler &scheduler)
861 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
862
866 void UpdateAvalancheStatistics() const;
867
871 void AvalanchePeriodicNetworking(CScheduler &scheduler) const;
872
877 PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
878
883 PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
884
889 void Misbehaving(Peer &peer, const std::string &message);
890
901 void MaybePunishNodeForBlock(NodeId nodeid,
902 const BlockValidationState &state,
903 bool via_compact_block,
904 const std::string &message = "")
905 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
906
911 void MaybePunishNodeForTx(NodeId nodeid, const TxValidationState &state,
912 const std::string &message = "")
913 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
914
924 bool MaybeDiscourageAndDisconnect(CNode &pnode, Peer &peer);
925
940 void ProcessInvalidTx(NodeId nodeid, const CTransactionRef &tx,
941 const TxValidationState &result,
942 bool maybe_add_extra_compact_tx)
943 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
944
945 struct PackageToValidate {
946 const Package m_txns;
947 const std::vector<NodeId> m_senders;
949 explicit PackageToValidate(const CTransactionRef &parent,
950 const CTransactionRef &child,
951 NodeId parent_sender, NodeId child_sender)
952 : m_txns{parent, child}, m_senders{parent_sender, child_sender} {}
953
954 std::string ToString() const {
955 Assume(m_txns.size() == 2);
956 return strprintf(
957 "parent %s (sender=%d) + child %s (sender=%d)",
958 m_txns.front()->GetId().ToString(), m_senders.front(),
959 m_txns.back()->GetId().ToString(), m_senders.back());
960 }
961 };
962
968 void ProcessPackageResult(const PackageToValidate &package_to_validate,
969 const PackageMempoolAcceptResult &package_result)
970 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
971
978 std::optional<PackageToValidate> Find1P1CPackage(const CTransactionRef &ptx,
979 NodeId nodeid)
980 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
981
987 void ProcessValidTx(NodeId nodeid, const CTransactionRef &tx)
988 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, cs_main);
989
1005 bool ProcessOrphanTx(const Config &config, Peer &peer)
1006 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
1007
1018 void ProcessHeadersMessage(const Config &config, CNode &pfrom, Peer &peer,
1019 std::vector<CBlockHeader> &&headers,
1020 bool via_compact_block)
1021 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex,
1022 g_msgproc_mutex);
1023
1024 // Various helpers for headers processing, invoked by
1025 // ProcessHeadersMessage()
1030 bool CheckHeadersPoW(const std::vector<CBlockHeader> &headers,
1031 const Consensus::Params &consensusParams, Peer &peer);
1033 arith_uint256 GetAntiDoSWorkThreshold();
1040 void HandleUnconnectingHeaders(CNode &pfrom, Peer &peer,
1041 const std::vector<CBlockHeader> &headers)
1042 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1044 bool
1045 CheckHeadersAreContinuous(const std::vector<CBlockHeader> &headers) const;
1065 bool IsContinuationOfLowWorkHeadersSync(Peer &peer, CNode &pfrom,
1066 std::vector<CBlockHeader> &headers)
1067 EXCLUSIVE_LOCKS_REQUIRED(peer.m_headers_sync_mutex,
1068 !m_headers_presync_mutex, g_msgproc_mutex);
1082 bool TryLowWorkHeadersSync(Peer &peer, CNode &pfrom,
1083 const CBlockIndex *chain_start_header,
1084 std::vector<CBlockHeader> &headers)
1085 EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex,
1086 !m_headers_presync_mutex, g_msgproc_mutex);
1087
1092 bool IsAncestorOfBestHeaderOrTip(const CBlockIndex *header)
1094
1100 bool MaybeSendGetHeaders(CNode &pfrom, const CBlockLocator &locator,
1101 Peer &peer)
1102 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1106 void HeadersDirectFetchBlocks(const Config &config, CNode &pfrom,
1107 const CBlockIndex &last_header);
1109 void UpdatePeerStateForReceivedHeaders(CNode &pfrom, Peer &peer,
1110 const CBlockIndex &last_header,
1111 bool received_new_header,
1112 bool may_have_more_headers)
1113 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1114
1115 void SendBlockTransactions(CNode &pfrom, Peer &peer, const CBlock &block,
1116 const BlockTransactionsRequest &req);
1117
1123 void AddTxAnnouncement(const CNode &node, const TxId &txid,
1124 std::chrono::microseconds current_time)
1126
1132 void
1133 AddProofAnnouncement(const CNode &node, const avalanche::ProofId &proofid,
1134 std::chrono::microseconds current_time, bool preferred)
1135 EXCLUSIVE_LOCKS_REQUIRED(cs_proofrequest);
1136
1138 void PushMessage(CNode &node, CSerializedNetMsg &&msg) const {
1139 m_connman.PushMessage(&node, std::move(msg));
1140 }
1141 template <typename... Args>
1142 void MakeAndPushMessage(CNode &node, std::string msg_type,
1143 Args &&...args) const {
1144 m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type),
1145 std::forward<Args>(args)...));
1146 }
1147
1149 void PushNodeVersion(const Config &config, CNode &pnode, const Peer &peer);
1150
1157 void MaybeSendPing(CNode &node_to, Peer &peer,
1158 std::chrono::microseconds now);
1159
1161 void MaybeSendAddr(CNode &node, Peer &peer,
1162 std::chrono::microseconds current_time)
1163 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1164
1169 void MaybeSendSendHeaders(CNode &node, Peer &peer)
1170 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1171
1173 void MaybeSendFeefilter(CNode &node, Peer &peer,
1174 std::chrono::microseconds current_time)
1175 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1176
1186 void RelayAddress(NodeId originator, const CAddress &addr, bool fReachable)
1187 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
1188
1190
1192 m_fee_filter_rounder GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
1193
1194 const CChainParams &m_chainparams;
1195 CConnman &m_connman;
1196 AddrMan &m_addrman;
1201 BanMan *const m_banman;
1202 ChainstateManager &m_chainman;
1203 CTxMemPool &m_mempool;
1204 avalanche::Processor *const m_avalanche;
1206
1207 Mutex cs_proofrequest;
1209 m_proofrequest GUARDED_BY(cs_proofrequest);
1210
1212 std::atomic<int> m_best_height{-1};
1213
1215 std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s};
1216
1217 const Options m_opts;
1218
1219 bool RejectIncomingTxs(const CNode &peer) const;
1220
1225 bool m_initial_sync_finished GUARDED_BY(cs_main){false};
1226
1231 mutable Mutex m_peer_mutex;
1238 std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
1239
1241 std::map<NodeId, CNodeState> m_node_states GUARDED_BY(cs_main);
1242
1247 const CNodeState *State(NodeId pnode) const
1250 CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1251
1252 std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us};
1253
1255 int nSyncStarted GUARDED_BY(cs_main) = 0;
1256
1258 BlockHash
1259 m_last_block_inv_triggering_headers_sync GUARDED_BY(g_msgproc_mutex){};
1260
1267 std::map<BlockHash, std::pair<NodeId, bool>>
1268 mapBlockSource GUARDED_BY(cs_main);
1269
1271 int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
1272
1274 int m_num_preferred_download_peers GUARDED_BY(cs_main){0};
1275
1277 std::atomic<std::chrono::seconds> m_block_stalling_timeout{
1279
1291 bool AlreadyHaveTx(const TxId &txid, bool include_reconsiderable)
1293 !m_recent_confirmed_transactions_mutex);
1294
1314 CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000,
1315 0.000'001};
1316
1322 uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
1323
1349 CRollingBloomFilter m_recent_rejects_package_reconsiderable
1350 GUARDED_BY(::cs_main){120'000, 0.000'001};
1351
1357 mutable Mutex m_recent_confirmed_transactions_mutex;
1358 CRollingBloomFilter m_recent_confirmed_transactions
1359 GUARDED_BY(m_recent_confirmed_transactions_mutex){24'000, 0.000'001};
1360
1368 std::chrono::microseconds
1369 NextInvToInbounds(std::chrono::microseconds now,
1370 std::chrono::seconds average_interval)
1371 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1372
1373 // All of the following cache a recent block, and are protected by
1374 // m_most_recent_block_mutex
1375 mutable Mutex m_most_recent_block_mutex;
1376 std::shared_ptr<const CBlock>
1377 m_most_recent_block GUARDED_BY(m_most_recent_block_mutex);
1378 std::shared_ptr<const CBlockHeaderAndShortTxIDs>
1379 m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex);
1380 BlockHash m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex);
1381 std::unique_ptr<const std::map<TxId, CTransactionRef>>
1382 m_most_recent_block_txs GUARDED_BY(m_most_recent_block_mutex);
1383
1384 // Data about the low-work headers synchronization, aggregated from all
1385 // peers' HeadersSyncStates.
1387 Mutex m_headers_presync_mutex;
1398 using HeadersPresyncStats =
1399 std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>;
1401 std::map<NodeId, HeadersPresyncStats>
1402 m_headers_presync_stats GUARDED_BY(m_headers_presync_mutex){};
1404 NodeId m_headers_presync_bestpeer GUARDED_BY(m_headers_presync_mutex){-1};
1406 std::atomic_bool m_headers_presync_should_signal{false};
1407
1411 int m_highest_fast_announce GUARDED_BY(::cs_main){0};
1412
1414 bool IsBlockRequested(const BlockHash &hash)
1416
1418 bool IsBlockRequestedFromOutbound(const BlockHash &hash)
1420
1429 void RemoveBlockRequest(const BlockHash &hash,
1430 std::optional<NodeId> from_peer)
1432
1439 bool BlockRequested(const Config &config, NodeId nodeid,
1440 const CBlockIndex &block,
1441 std::list<QueuedBlock>::iterator **pit = nullptr)
1443
1444 bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1445
1450 void FindNextBlocksToDownload(const Peer &peer, unsigned int count,
1451 std::vector<const CBlockIndex *> &vBlocks,
1452 NodeId &nodeStaller)
1454
1456 void TryDownloadingHistoricalBlocks(
1457 const Peer &peer, unsigned int count,
1458 std::vector<const CBlockIndex *> &vBlocks, const CBlockIndex *from_tip,
1459 const CBlockIndex *target_block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1460
1490 void FindNextBlocks(std::vector<const CBlockIndex *> &vBlocks,
1491 const Peer &peer, CNodeState *state,
1492 const CBlockIndex *pindexWalk, unsigned int count,
1493 int nWindowEnd, const CChain *activeChain = nullptr,
1494 NodeId *nodeStaller = nullptr)
1496
1498 typedef std::multimap<BlockHash,
1499 std::pair<NodeId, std::list<QueuedBlock>::iterator>>
1500 BlockDownloadMap;
1501 BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main);
1502
1504 std::atomic<std::chrono::seconds> m_last_tip_update{0s};
1505
1510 CTransactionRef FindTxForGetData(const Peer &peer, const TxId &txid,
1511 const std::chrono::seconds mempool_req,
1512 const std::chrono::seconds now)
1514 EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex,
1516
1517 void ProcessGetData(const Config &config, CNode &pfrom, Peer &peer,
1518 const std::atomic<bool> &interruptMsgProc)
1519 EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex,
1520 peer.m_getdata_requests_mutex,
1523
1525 void ProcessBlock(const Config &config, CNode &node,
1526 const std::shared_ptr<const CBlock> &block,
1527 bool force_processing, bool min_pow_checked);
1528
1535 void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
1537
1539 std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
1540
1542 int m_peers_downloading_from GUARDED_BY(cs_main) = 0;
1543
1544 void AddToCompactExtraTransactions(const CTransactionRef &tx)
1545 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1546
1554 std::vector<CTransactionRef>
1555 vExtraTxnForCompact GUARDED_BY(g_msgproc_mutex);
1557 size_t vExtraTxnForCompactIt GUARDED_BY(g_msgproc_mutex) = 0;
1558
1562 void ProcessBlockAvailability(NodeId nodeid)
1567 void UpdateBlockAvailability(NodeId nodeid, const BlockHash &hash)
1569 bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
1570
1577 bool BlockRequestAllowed(const CBlockIndex *pindex)
1579 bool AlreadyHaveBlock(const BlockHash &block_hash)
1581 bool AlreadyHaveProof(const avalanche::ProofId &proofid);
1582 void ProcessGetBlockData(const Config &config, CNode &pfrom, Peer &peer,
1583 const CInv &inv)
1584 EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
1585
1605 bool PrepareBlockFilterRequest(CNode &node, Peer &peer,
1606 BlockFilterType filter_type,
1607 uint32_t start_height,
1608 const BlockHash &stop_hash,
1609 uint32_t max_height_diff,
1610 const CBlockIndex *&stop_index,
1611 BlockFilterIndex *&filter_index);
1612
1622 void ProcessGetCFilters(CNode &node, Peer &peer, DataStream &vRecv);
1632 void ProcessGetCFHeaders(CNode &node, Peer &peer, DataStream &vRecv);
1633
1643 void ProcessGetCFCheckPt(CNode &node, Peer &peer, DataStream &vRecv);
1644
1651 uint32_t GetAvalancheVoteForBlock(const BlockHash &hash) const
1653
1661 uint32_t GetAvalancheVoteForTx(const avalanche::Processor &avalanche,
1662 const TxId &id) const
1663 EXCLUSIVE_LOCKS_REQUIRED(!m_mempool.cs,
1664 !m_recent_confirmed_transactions_mutex);
1665
1673 bool SetupAddressRelay(const CNode &node, Peer &peer)
1674 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1675
1676 void AddAddressKnown(Peer &peer, const CAddress &addr)
1677 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1678 void PushAddress(Peer &peer, const CAddress &addr)
1679 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
1680
1686 bool ReceivedAvalancheProof(CNode &node, Peer &peer,
1687 const avalanche::ProofRef &proof)
1688 EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !cs_proofrequest);
1689
1690 avalanche::ProofRef FindProofForGetData(const Peer &peer,
1691 const avalanche::ProofId &proofid,
1692 const std::chrono::seconds now)
1694
1695 bool isPreferredDownloadPeer(const CNode &pfrom);
1696};
1697
1698const CNodeState *PeerManagerImpl::State(NodeId pnode) const
1700 std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
1701 if (it == m_node_states.end()) {
1702 return nullptr;
1703 }
1704
1705 return &it->second;
1706}
1707
1708CNodeState *PeerManagerImpl::State(NodeId pnode)
1710 return const_cast<CNodeState *>(std::as_const(*this).State(pnode));
1711}
1712
1718static bool IsAddrCompatible(const Peer &peer, const CAddress &addr) {
1719 return peer.m_wants_addrv2 || addr.IsAddrV1Compatible();
1720}
1721
1722void PeerManagerImpl::AddAddressKnown(Peer &peer, const CAddress &addr) {
1723 assert(peer.m_addr_known);
1724 peer.m_addr_known->insert(addr.GetKey());
1725}
1726
1727void PeerManagerImpl::PushAddress(Peer &peer, const CAddress &addr) {
1728 // Known checking here is only to save space from duplicates.
1729 // Before sending, we'll filter it again for known addresses that were
1730 // added after addresses were pushed.
1731 assert(peer.m_addr_known);
1732 if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) &&
1733 IsAddrCompatible(peer, addr)) {
1734 if (peer.m_addrs_to_send.size() >= m_opts.max_addr_to_send) {
1735 peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] =
1736 addr;
1737 } else {
1738 peer.m_addrs_to_send.push_back(addr);
1739 }
1740 }
1741}
1742
1743static void AddKnownTx(Peer &peer, const TxId &txid) {
1744 auto tx_relay = peer.GetTxRelay();
1745 if (!tx_relay) {
1746 return;
1747 }
1748
1749 LOCK(tx_relay->m_tx_inventory_mutex);
1750 tx_relay->m_tx_inventory_known_filter.insert(txid);
1751}
1752
1753static void AddKnownProof(Peer &peer, const avalanche::ProofId &proofid) {
1754 if (peer.m_proof_relay != nullptr) {
1755 LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
1756 peer.m_proof_relay->m_proof_inventory_known_filter.insert(proofid);
1757 }
1758}
1759
1760bool PeerManagerImpl::isPreferredDownloadPeer(const CNode &pfrom) {
1761 LOCK(cs_main);
1762 const CNodeState *state = State(pfrom.GetId());
1763 return state && state->fPreferredDownload;
1764}
1766static bool CanServeBlocks(const Peer &peer) {
1767 return peer.m_their_services & (NODE_NETWORK | NODE_NETWORK_LIMITED);
1768}
1769
1774static bool IsLimitedPeer(const Peer &peer) {
1775 return (!(peer.m_their_services & NODE_NETWORK) &&
1776 (peer.m_their_services & NODE_NETWORK_LIMITED));
1777}
1778
1779std::chrono::microseconds
1780PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
1781 std::chrono::seconds average_interval) {
1782 if (m_next_inv_to_inbounds.load() < now) {
1783 // If this function were called from multiple threads simultaneously
1784 // it would possible that both update the next send variable, and return
1785 // a different result to their caller. This is not possible in practice
1786 // as only the net processing thread invokes this function.
1787 m_next_inv_to_inbounds =
1788 now + m_rng.rand_exp_duration(average_interval);
1789 }
1790 return m_next_inv_to_inbounds;
1791}
1792
1793bool PeerManagerImpl::IsBlockRequested(const BlockHash &hash) {
1794 return mapBlocksInFlight.count(hash);
1795}
1796
1797bool PeerManagerImpl::IsBlockRequestedFromOutbound(const BlockHash &hash) {
1798 for (auto range = mapBlocksInFlight.equal_range(hash);
1799 range.first != range.second; range.first++) {
1800 auto [nodeid, block_it] = range.first->second;
1801 CNodeState &nodestate = *Assert(State(nodeid));
1802 if (!nodestate.m_is_inbound) {
1803 return true;
1804 }
1805 }
1806
1807 return false;
1808}
1809
1810void PeerManagerImpl::RemoveBlockRequest(const BlockHash &hash,
1811 std::optional<NodeId> from_peer) {
1812 auto range = mapBlocksInFlight.equal_range(hash);
1813 if (range.first == range.second) {
1814 // Block was not requested from any peer
1815 return;
1816 }
1817
1818 // We should not have requested too many of this block
1819 Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1820
1821 while (range.first != range.second) {
1822 auto [node_id, list_it] = range.first->second;
1823
1824 if (from_peer && *from_peer != node_id) {
1825 range.first++;
1826 continue;
1827 }
1828
1829 CNodeState &state = *Assert(State(node_id));
1830
1831 if (state.vBlocksInFlight.begin() == list_it) {
1832 // First block on the queue was received, update the start download
1833 // time for the next one
1834 state.m_downloading_since =
1835 std::max(state.m_downloading_since,
1836 GetTime<std::chrono::microseconds>());
1837 }
1838 state.vBlocksInFlight.erase(list_it);
1839
1840 if (state.vBlocksInFlight.empty()) {
1841 // Last validated block on the queue for this peer was received.
1842 m_peers_downloading_from--;
1843 }
1844 state.m_stalling_since = 0us;
1845
1846 range.first = mapBlocksInFlight.erase(range.first);
1847 }
1848}
1849
1850bool PeerManagerImpl::BlockRequested(const Config &config, NodeId nodeid,
1851 const CBlockIndex &block,
1852 std::list<QueuedBlock>::iterator **pit) {
1853 const BlockHash &hash{block.GetBlockHash()};
1854
1855 CNodeState *state = State(nodeid);
1856 assert(state != nullptr);
1857
1858 Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
1859
1860 // Short-circuit most stuff in case it is from the same node
1861 for (auto range = mapBlocksInFlight.equal_range(hash);
1862 range.first != range.second; range.first++) {
1863 if (range.first->second.first == nodeid) {
1864 if (pit) {
1865 *pit = &range.first->second.second;
1866 }
1867 return false;
1868 }
1869 }
1870
1871 // Make sure it's not being fetched already from same peer.
1872 RemoveBlockRequest(hash, nodeid);
1873
1874 std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(
1875 state->vBlocksInFlight.end(),
1876 {&block, std::unique_ptr<PartiallyDownloadedBlock>(
1877 pit ? new PartiallyDownloadedBlock(config, &m_mempool)
1878 : nullptr)});
1879 if (state->vBlocksInFlight.size() == 1) {
1880 // We're starting a block download (batch) from this peer.
1881 state->m_downloading_since = GetTime<std::chrono::microseconds>();
1882 m_peers_downloading_from++;
1883 }
1884
1885 auto itInFlight = mapBlocksInFlight.insert(
1886 std::make_pair(hash, std::make_pair(nodeid, it)));
1887
1888 if (pit) {
1889 *pit = &itInFlight->second.second;
1890 }
1891
1892 return true;
1893}
1894
1895void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) {
1897
1898 // When in -blocksonly mode, never request high-bandwidth mode from peers.
1899 // Our mempool will not contain the transactions necessary to reconstruct
1900 // the compact block.
1901 if (m_opts.ignore_incoming_txs) {
1902 return;
1903 }
1904
1905 CNodeState *nodestate = State(nodeid);
1906 if (!nodestate) {
1907 LogPrint(BCLog::NET, "node state unavailable: peer=%d\n", nodeid);
1908 return;
1909 }
1910 if (!nodestate->m_provides_cmpctblocks) {
1911 return;
1912 }
1913 int num_outbound_hb_peers = 0;
1914 for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin();
1915 it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
1916 if (*it == nodeid) {
1917 lNodesAnnouncingHeaderAndIDs.erase(it);
1918 lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
1919 return;
1920 }
1921 CNodeState *state = State(*it);
1922 if (state != nullptr && !state->m_is_inbound) {
1923 ++num_outbound_hb_peers;
1924 }
1925 }
1926 if (nodestate->m_is_inbound) {
1927 // If we're adding an inbound HB peer, make sure we're not removing
1928 // our last outbound HB peer in the process.
1929 if (lNodesAnnouncingHeaderAndIDs.size() >= 3 &&
1930 num_outbound_hb_peers == 1) {
1931 CNodeState *remove_node =
1932 State(lNodesAnnouncingHeaderAndIDs.front());
1933 if (remove_node != nullptr && !remove_node->m_is_inbound) {
1934 // Put the HB outbound peer in the second slot, so that it
1935 // doesn't get removed.
1936 std::swap(lNodesAnnouncingHeaderAndIDs.front(),
1937 *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
1938 }
1939 }
1940 }
1941 m_connman.ForNode(nodeid, [this](CNode *pfrom) EXCLUSIVE_LOCKS_REQUIRED(
1942 ::cs_main) {
1944 if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
1945 // As per BIP152, we only get 3 of our peers to announce
1946 // blocks using compact encodings.
1947 m_connman.ForNode(
1948 lNodesAnnouncingHeaderAndIDs.front(), [this](CNode *pnodeStop) {
1949 MakeAndPushMessage(*pnodeStop, NetMsgType::SENDCMPCT,
1950 /*high_bandwidth=*/false,
1951 /*version=*/CMPCTBLOCKS_VERSION);
1952 // save BIP152 bandwidth state: we select peer to be
1953 // low-bandwidth
1954 pnodeStop->m_bip152_highbandwidth_to = false;
1955 return true;
1956 });
1957 lNodesAnnouncingHeaderAndIDs.pop_front();
1958 }
1959 MakeAndPushMessage(*pfrom, NetMsgType::SENDCMPCT,
1960 /*high_bandwidth=*/true,
1961 /*version=*/CMPCTBLOCKS_VERSION);
1962 // save BIP152 bandwidth state: we select peer to be high-bandwidth
1963 pfrom->m_bip152_highbandwidth_to = true;
1964 lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
1965 return true;
1966 });
1967}
1968
1969bool PeerManagerImpl::TipMayBeStale() {
1971 const Consensus::Params &consensusParams = m_chainparams.GetConsensus();
1972 if (m_last_tip_update.load() == 0s) {
1973 m_last_tip_update = GetTime<std::chrono::seconds>();
1974 }
1975 return m_last_tip_update.load() <
1976 GetTime<std::chrono::seconds>() -
1977 std::chrono::seconds{consensusParams.nPowTargetSpacing *
1978 3} &&
1979 mapBlocksInFlight.empty();
1980}
1981
1982bool PeerManagerImpl::CanDirectFetch() {
1983 return m_chainman.ActiveChain().Tip()->Time() >
1984 GetAdjustedTime() -
1985 m_chainparams.GetConsensus().PowTargetSpacing() * 20;
1986}
1987
1988static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex)
1990 if (state->pindexBestKnownBlock &&
1991 pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) {
1992 return true;
1993 }
1994 if (state->pindexBestHeaderSent &&
1995 pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) {
1996 return true;
1997 }
1998 return false;
1999}
2000
2001void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) {
2002 CNodeState *state = State(nodeid);
2003 assert(state != nullptr);
2004
2005 if (!state->hashLastUnknownBlock.IsNull()) {
2006 const CBlockIndex *pindex =
2007 m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock);
2008 if (pindex && pindex->nChainWork > 0) {
2009 if (state->pindexBestKnownBlock == nullptr ||
2010 pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
2011 state->pindexBestKnownBlock = pindex;
2012 }
2013 state->hashLastUnknownBlock.SetNull();
2014 }
2015 }
2016}
2017
2018void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid,
2019 const BlockHash &hash) {
2020 CNodeState *state = State(nodeid);
2021 assert(state != nullptr);
2022
2023 ProcessBlockAvailability(nodeid);
2024
2025 const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
2026 if (pindex && pindex->nChainWork > 0) {
2027 // An actually better block was announced.
2028 if (state->pindexBestKnownBlock == nullptr ||
2029 pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
2030 state->pindexBestKnownBlock = pindex;
2031 }
2032 } else {
2033 // An unknown block was announced; just assume that the latest one is
2034 // the best one.
2035 state->hashLastUnknownBlock = hash;
2036 }
2037}
2038
2039// Logic for calculating which blocks to download from a given peer, given
2040// our current tip.
2041void PeerManagerImpl::FindNextBlocksToDownload(
2042 const Peer &peer, unsigned int count,
2043 std::vector<const CBlockIndex *> &vBlocks, NodeId &nodeStaller) {
2044 if (count == 0) {
2045 return;
2046 }
2047
2048 vBlocks.reserve(vBlocks.size() + count);
2049 CNodeState *state = State(peer.m_id);
2050 assert(state != nullptr);
2051
2052 // Make sure pindexBestKnownBlock is up to date, we'll need it.
2053 ProcessBlockAvailability(peer.m_id);
2054
2055 if (state->pindexBestKnownBlock == nullptr ||
2056 state->pindexBestKnownBlock->nChainWork <
2057 m_chainman.ActiveChain().Tip()->nChainWork ||
2058 state->pindexBestKnownBlock->nChainWork <
2059 m_chainman.MinimumChainWork()) {
2060 // This peer has nothing interesting.
2061 return;
2062 }
2063
2064 // When we sync with AssumeUtxo and discover the snapshot is not in the
2065 // peer's best chain, abort: We can't reorg to this chain due to missing
2066 // undo data until the background sync has finished, so downloading blocks
2067 // from it would be futile.
2068 const CBlockIndex *snap_base{m_chainman.GetSnapshotBaseBlock()};
2069 if (snap_base && state->pindexBestKnownBlock->GetAncestor(
2070 snap_base->nHeight) != snap_base) {
2072 "Not downloading blocks from peer=%d, which doesn't have the "
2073 "snapshot block in its best chain.\n",
2074 peer.m_id);
2075 return;
2076 }
2077
2078 // Bootstrap quickly by guessing a parent of our best tip is the forking
2079 // point. Guessing wrong in either direction is not a problem. Also reset
2080 // pindexLastCommonBlock after a snapshot was loaded, so that blocks after
2081 // the snapshot will be prioritised for download.
2082 if (state->pindexLastCommonBlock == nullptr ||
2083 (snap_base &&
2084 state->pindexLastCommonBlock->nHeight < snap_base->nHeight)) {
2085 state->pindexLastCommonBlock =
2086 m_chainman
2087 .ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight,
2088 m_chainman.ActiveChain().Height())];
2089 }
2090
2091 // If the peer reorganized, our previous pindexLastCommonBlock may not be an
2092 // ancestor of its current tip anymore. Go back enough to fix that.
2093 state->pindexLastCommonBlock = LastCommonAncestor(
2094 state->pindexLastCommonBlock, state->pindexBestKnownBlock);
2095 if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) {
2096 return;
2097 }
2098
2099 const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
2100 // Never fetch further than the best block we know the peer has, or more
2101 // than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last linked block we have in
2102 // common with this peer. The +1 is so we can detect stalling, namely if we
2103 // would be able to download that next block if the window were 1 larger.
2104 int nWindowEnd =
2105 state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
2106
2107 FindNextBlocks(vBlocks, peer, state, pindexWalk, count, nWindowEnd,
2108 &m_chainman.ActiveChain(), &nodeStaller);
2109}
2110
2111void PeerManagerImpl::TryDownloadingHistoricalBlocks(
2112 const Peer &peer, unsigned int count,
2113 std::vector<const CBlockIndex *> &vBlocks, const CBlockIndex *from_tip,
2114 const CBlockIndex *target_block) {
2115 Assert(from_tip);
2116 Assert(target_block);
2117
2118 if (vBlocks.size() >= count) {
2119 return;
2120 }
2121
2122 vBlocks.reserve(count);
2123 CNodeState *state = Assert(State(peer.m_id));
2124
2125 if (state->pindexBestKnownBlock == nullptr ||
2126 state->pindexBestKnownBlock->GetAncestor(target_block->nHeight) !=
2127 target_block) {
2128 // This peer can't provide us the complete series of blocks leading up
2129 // to the assumeutxo snapshot base.
2130 //
2131 // Presumably this peer's chain has less work than our ActiveChain()'s
2132 // tip, or else we will eventually crash when we try to reorg to it. Let
2133 // other logic deal with whether we disconnect this peer.
2134 //
2135 // TODO at some point in the future, we might choose to request what
2136 // blocks this peer does have from the historical chain, despite it not
2137 // having a complete history beneath the snapshot base.
2138 return;
2139 }
2140
2141 FindNextBlocks(vBlocks, peer, state, from_tip, count,
2142 std::min<int>(from_tip->nHeight + BLOCK_DOWNLOAD_WINDOW,
2143 target_block->nHeight));
2144}
2145
2146void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex *> &vBlocks,
2147 const Peer &peer, CNodeState *state,
2148 const CBlockIndex *pindexWalk,
2149 unsigned int count, int nWindowEnd,
2150 const CChain *activeChain,
2151 NodeId *nodeStaller) {
2152 std::vector<const CBlockIndex *> vToFetch;
2153 int nMaxHeight =
2154 std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
2155 NodeId waitingfor = -1;
2156 while (pindexWalk->nHeight < nMaxHeight) {
2157 // Read up to 128 (or more, if more blocks than that are needed)
2158 // successors of pindexWalk (towards pindexBestKnownBlock) into
2159 // vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as
2160 // expensive as iterating over ~100 CBlockIndex* entries anyway.
2161 int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight,
2162 std::max<int>(count - vBlocks.size(), 128));
2163 vToFetch.resize(nToFetch);
2164 pindexWalk = state->pindexBestKnownBlock->GetAncestor(
2165 pindexWalk->nHeight + nToFetch);
2166 vToFetch[nToFetch - 1] = pindexWalk;
2167 for (unsigned int i = nToFetch - 1; i > 0; i--) {
2168 vToFetch[i - 1] = vToFetch[i]->pprev;
2169 }
2170
2171 // Iterate over those blocks in vToFetch (in forward direction), adding
2172 // the ones that are not yet downloaded and not in flight to vBlocks. In
2173 // the meantime, update pindexLastCommonBlock as long as all ancestors
2174 // are already downloaded, or if it's already part of our chain (and
2175 // therefore don't need it even if pruned).
2176 for (const CBlockIndex *pindex : vToFetch) {
2177 if (!pindex->IsValid(BlockValidity::TREE)) {
2178 // We consider the chain that this peer is on invalid.
2179 return;
2180 }
2181 if (pindex->nStatus.hasData() ||
2182 (activeChain && activeChain->Contains(pindex))) {
2183 if (activeChain && pindex->HaveNumChainTxs()) {
2184 state->pindexLastCommonBlock = pindex;
2185 }
2186 } else if (!IsBlockRequested(pindex->GetBlockHash())) {
2187 // The block is not already downloaded, and not yet in flight.
2188 if (pindex->nHeight > nWindowEnd) {
2189 // We reached the end of the window.
2190 if (vBlocks.size() == 0 && waitingfor != peer.m_id) {
2191 // We aren't able to fetch anything, but we would be if
2192 // the download window was one larger.
2193 if (nodeStaller) {
2194 *nodeStaller = waitingfor;
2195 }
2196 }
2197 return;
2198 }
2199 vBlocks.push_back(pindex);
2200 if (vBlocks.size() == count) {
2201 return;
2202 }
2203 } else if (waitingfor == -1) {
2204 // This is the first already-in-flight block.
2205 waitingfor =
2206 mapBlocksInFlight.lower_bound(pindex->GetBlockHash())
2207 ->second.first;
2208 }
2209 }
2210 }
2211}
2212
2213} // namespace
2214
2215template <class InvId>
2217 const InvRequestTracker<InvId> &requestTracker,
2218 const DataRequestParameters &requestParams) {
2219 return !node.HasPermission(
2220 requestParams.bypass_request_limits_permissions) &&
2221 requestTracker.Count(node.GetId()) >=
2222 requestParams.max_peer_announcements;
2223}
2224
2232template <class InvId>
2233static std::chrono::microseconds
2235 const InvRequestTracker<InvId> &requestTracker,
2236 const DataRequestParameters &requestParams,
2237 std::chrono::microseconds current_time, bool preferred) {
2238 auto delay = std::chrono::microseconds{0};
2239
2240 if (!preferred) {
2241 delay += requestParams.nonpref_peer_delay;
2242 }
2243
2244 if (!node.HasPermission(requestParams.bypass_request_limits_permissions) &&
2245 requestTracker.CountInFlight(node.GetId()) >=
2246 requestParams.max_peer_request_in_flight) {
2247 delay += requestParams.overloaded_peer_delay;
2248 }
2249
2250 return current_time + delay;
2251}
2252
2253void PeerManagerImpl::PushNodeVersion(const Config &config, CNode &pnode,
2254 const Peer &peer) {
2255 uint64_t my_services{peer.m_our_services};
2256 const int64_t nTime{count_seconds(GetTime<std::chrono::seconds>())};
2257 uint64_t nonce = pnode.GetLocalNonce();
2258 const int nNodeStartingHeight{m_best_height};
2259 NodeId nodeid = pnode.GetId();
2260 CAddress addr = pnode.addr;
2261 uint64_t extraEntropy = pnode.GetLocalExtraEntropy();
2262
2263 CService addr_you =
2264 addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible()
2265 ? addr
2266 : CService();
2267 uint64_t your_services{addr.nServices};
2268
2269 const bool tx_relay{!RejectIncomingTxs(pnode)};
2270 MakeAndPushMessage(
2271 // your_services, addr_you: Together the pre-version-31402 serialization
2272 // of CAddress "addrYou" (without nTime)
2273 // my_services, CService(): Together the pre-version-31402 serialization
2274 // of CAddress "addrMe" (without nTime)
2275 pnode, NetMsgType::VERSION, PROTOCOL_VERSION, my_services, nTime,
2276 your_services, WithParams(CNetAddr::V1, addr_you), my_services,
2277 WithParams(CNetAddr::V1, CService{}), nonce, userAgent(config),
2278 nNodeStartingHeight, tx_relay, extraEntropy);
2279
2280 if (fLogIPs) {
2282 "send version message: version %d, blocks=%d, them=%s, "
2283 "txrelay=%d, peer=%d\n",
2284 PROTOCOL_VERSION, nNodeStartingHeight,
2285 addr_you.ToStringAddrPort(), tx_relay, nodeid);
2286 } else {
2288 "send version message: version %d, blocks=%d, "
2289 "txrelay=%d, peer=%d\n",
2290 PROTOCOL_VERSION, nNodeStartingHeight, tx_relay, nodeid);
2291 }
2292}
2293
2294void PeerManagerImpl::AddTxAnnouncement(
2295 const CNode &node, const TxId &txid,
2296 std::chrono::microseconds current_time) {
2297 // For m_txrequest and state
2299
2300 if (TooManyAnnouncements(node, m_txrequest, TX_REQUEST_PARAMS)) {
2301 return;
2302 }
2303
2304 const bool preferred = isPreferredDownloadPeer(node);
2305 auto reqtime = ComputeRequestTime(node, m_txrequest, TX_REQUEST_PARAMS,
2306 current_time, preferred);
2307
2308 m_txrequest.ReceivedInv(node.GetId(), txid, preferred, reqtime);
2309}
2310
2311void PeerManagerImpl::AddProofAnnouncement(
2312 const CNode &node, const avalanche::ProofId &proofid,
2313 std::chrono::microseconds current_time, bool preferred) {
2314 // For m_proofrequest
2315 AssertLockHeld(cs_proofrequest);
2316
2317 if (TooManyAnnouncements(node, m_proofrequest, PROOF_REQUEST_PARAMS)) {
2318 return;
2319 }
2320
2321 auto reqtime = ComputeRequestTime(
2322 node, m_proofrequest, PROOF_REQUEST_PARAMS, current_time, preferred);
2323
2324 m_proofrequest.ReceivedInv(node.GetId(), proofid, preferred, reqtime);
2325}
2326
2327void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node,
2328 int64_t time_in_seconds) {
2329 LOCK(cs_main);
2330 CNodeState *state = State(node);
2331 if (state) {
2332 state->m_last_block_announcement = time_in_seconds;
2333 }
2334}
2335
2336void PeerManagerImpl::InitializeNode(const Config &config, CNode &node,
2337 ServiceFlags our_services) {
2338 NodeId nodeid = node.GetId();
2339 {
2340 LOCK(cs_main);
2341 m_node_states.emplace_hint(m_node_states.end(),
2342 std::piecewise_construct,
2343 std::forward_as_tuple(nodeid),
2344 std::forward_as_tuple(node.IsInboundConn()));
2345 assert(m_txrequest.Count(nodeid) == 0);
2346 }
2347
2348 if (NetPermissions::HasFlag(node.m_permission_flags,
2350 our_services = static_cast<ServiceFlags>(our_services | NODE_BLOOM);
2351 }
2352
2353 PeerRef peer = std::make_shared<Peer>(nodeid, our_services, !!m_avalanche);
2354 {
2355 LOCK(m_peer_mutex);
2356 m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
2357 }
2358 if (!node.IsInboundConn()) {
2359 PushNodeVersion(config, node, *peer);
2360 }
2361}
2362
2363void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler &scheduler) {
2364 std::set<TxId> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
2365
2366 for (const TxId &txid : unbroadcast_txids) {
2367 // Sanity check: all unbroadcast txns should exist in the mempool
2368 if (m_mempool.exists(txid)) {
2369 RelayTransaction(txid);
2370 } else {
2371 m_mempool.RemoveUnbroadcastTx(txid, true);
2372 }
2373 }
2374
2375 if (m_avalanche) {
2376 // Get and sanitize the list of proofids to broadcast. The RelayProof
2377 // call is done in a second loop to avoid locking cs_vNodes while
2378 // cs_peerManager is locked which would cause a potential deadlock due
2379 // to reversed lock order.
2380 auto unbroadcasted_proofids =
2381 m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
2382 auto unbroadcasted_proofids = pm.getUnbroadcastProofs();
2383
2384 auto it = unbroadcasted_proofids.begin();
2385 while (it != unbroadcasted_proofids.end()) {
2386 // Sanity check: all unbroadcast proofs should be bound to a
2387 // peer in the peermanager
2388 if (!pm.isBoundToPeer(*it)) {
2389 pm.removeUnbroadcastProof(*it);
2390 it = unbroadcasted_proofids.erase(it);
2391 continue;
2392 }
2393
2394 ++it;
2395 }
2396
2397 return unbroadcasted_proofids;
2398 });
2399
2400 // Remaining proofids are the ones to broadcast
2401 for (const auto &proofid : unbroadcasted_proofids) {
2402 RelayProof(proofid);
2403 }
2404 }
2405
2406 // Schedule next run for 10-15 minutes in the future.
2407 // We add randomness on every cycle to avoid the possibility of P2P
2408 // fingerprinting.
2409 const auto reattemptBroadcastInterval =
2410 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min);
2411 scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
2412 reattemptBroadcastInterval);
2413}
2414
2415void PeerManagerImpl::UpdateAvalancheStatistics() const {
2416 m_connman.ForEachNode([](CNode *pnode) {
2418 });
2419}
2420
2421void PeerManagerImpl::AvalanchePeriodicNetworking(CScheduler &scheduler) const {
2422 const auto now = GetTime<std::chrono::seconds>();
2423 std::vector<NodeId> avanode_ids;
2424 bool fQuorumEstablished;
2425 bool fShouldRequestMoreNodes;
2426
2427 if (!m_avalanche) {
2428 // Not enabled or not ready yet, retry later
2429 goto scheduleLater;
2430 }
2431
2432 m_avalanche->sendDelayedAvahello();
2433
2434 fQuorumEstablished = m_avalanche->isQuorumEstablished();
2435 fShouldRequestMoreNodes =
2436 m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
2437 return pm.shouldRequestMoreNodes();
2438 });
2439
2440 m_connman.ForEachNode([&](CNode *pnode) {
2441 // Build a list of the avalanche peers nodeids
2442 if (pnode->m_avalanche_enabled) {
2443 avanode_ids.push_back(pnode->GetId());
2444 }
2445
2446 PeerRef peer = GetPeerRef(pnode->GetId());
2447 if (peer == nullptr) {
2448 return;
2449 }
2450 // If a proof radix tree timed out, cleanup
2451 if (peer->m_proof_relay &&
2452 now > (peer->m_proof_relay->lastSharedProofsUpdate.load() +
2454 peer->m_proof_relay->sharedProofs = {};
2455 }
2456 });
2457
2458 if (avanode_ids.empty()) {
2459 // No node is available for messaging, retry later
2460 goto scheduleLater;
2461 }
2462
2463 Shuffle(avanode_ids.begin(), avanode_ids.end(), FastRandomContext());
2464
2465 // Request avalanche addresses from our peers
2466 for (NodeId avanodeId : avanode_ids) {
2467 const bool sentGetavaaddr =
2468 m_connman.ForNode(avanodeId, [&](CNode *pavanode) {
2469 if (!fQuorumEstablished || !pavanode->IsInboundConn()) {
2470 MakeAndPushMessage(*pavanode, NetMsgType::GETAVAADDR);
2471 PeerRef peer = GetPeerRef(avanodeId);
2472 WITH_LOCK(peer->m_addr_token_bucket_mutex,
2473 peer->m_addr_token_bucket +=
2474 m_opts.max_addr_to_send);
2475 return true;
2476 }
2477 return false;
2478 });
2479
2480 // If we have no reason to believe that we need more nodes, only request
2481 // addresses from one of our peers.
2482 if (sentGetavaaddr && fQuorumEstablished && !fShouldRequestMoreNodes) {
2483 break;
2484 }
2485 }
2486
2487 if (m_chainman.IsInitialBlockDownload()) {
2488 // Don't request proofs while in IBD. We're likely to orphan them
2489 // because we don't have the UTXOs.
2490 goto scheduleLater;
2491 }
2492
2493 // If we never had an avaproofs message yet, be kind and only request to a
2494 // subset of our peers as we expect a ton of avaproofs message in the
2495 // process.
2496 if (m_avalanche->getAvaproofsNodeCounter() == 0) {
2497 avanode_ids.resize(std::min<size_t>(avanode_ids.size(), 3));
2498 }
2499
2500 for (NodeId nodeid : avanode_ids) {
2501 // Send a getavaproofs to all of our peers
2502 m_connman.ForNode(nodeid, [&](CNode *pavanode) {
2503 PeerRef peer = GetPeerRef(nodeid);
2504 if (peer->m_proof_relay) {
2505 MakeAndPushMessage(*pavanode, NetMsgType::GETAVAPROOFS);
2506 peer->m_proof_relay->compactproofs_requested = true;
2507 }
2508 return true;
2509 });
2510 }
2511
2512scheduleLater:
2513 // Schedule next run for 2-5 minutes in the future.
2514 // We add randomness on every cycle to avoid the possibility of P2P
2515 // fingerprinting.
2516 const auto avalanchePeriodicNetworkingInterval =
2517 2min + FastRandomContext().randrange<std::chrono::milliseconds>(3min);
2518 scheduler.scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
2519 avalanchePeriodicNetworkingInterval);
2520}
2521
2522void PeerManagerImpl::FinalizeNode(const Config &config, const CNode &node) {
2523 NodeId nodeid = node.GetId();
2524 {
2525 LOCK(cs_main);
2526 {
2527 // We remove the PeerRef from g_peer_map here, but we don't always
2528 // destruct the Peer. Sometimes another thread is still holding a
2529 // PeerRef, so the refcount is >= 1. Be careful not to do any
2530 // processing here that assumes Peer won't be changed before it's
2531 // destructed.
2532 PeerRef peer = RemovePeer(nodeid);
2533 assert(peer != nullptr);
2534 LOCK(m_peer_mutex);
2535 m_peer_map.erase(nodeid);
2536 }
2537 CNodeState *state = State(nodeid);
2538 assert(state != nullptr);
2539
2540 if (state->fSyncStarted) {
2541 nSyncStarted--;
2542 }
2543
2544 for (const QueuedBlock &entry : state->vBlocksInFlight) {
2545 auto range =
2546 mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash());
2547 while (range.first != range.second) {
2548 auto [node_id, list_it] = range.first->second;
2549 if (node_id != nodeid) {
2550 range.first++;
2551 } else {
2552 range.first = mapBlocksInFlight.erase(range.first);
2553 }
2554 }
2555 }
2556 m_mempool.withOrphanage([nodeid](TxOrphanage &orphanage) {
2557 orphanage.EraseForPeer(nodeid);
2558 });
2559 m_txrequest.DisconnectedPeer(nodeid);
2560 m_num_preferred_download_peers -= state->fPreferredDownload;
2561 m_peers_downloading_from -= (!state->vBlocksInFlight.empty());
2562 assert(m_peers_downloading_from >= 0);
2563 m_outbound_peers_with_protect_from_disconnect -=
2564 state->m_chain_sync.m_protect;
2565 assert(m_outbound_peers_with_protect_from_disconnect >= 0);
2566
2567 m_node_states.erase(nodeid);
2568
2569 if (m_node_states.empty()) {
2570 // Do a consistency check after the last peer is removed.
2571 assert(mapBlocksInFlight.empty());
2572 assert(m_num_preferred_download_peers == 0);
2573 assert(m_peers_downloading_from == 0);
2574 assert(m_outbound_peers_with_protect_from_disconnect == 0);
2575 assert(m_txrequest.Size() == 0);
2576 assert(m_mempool.withOrphanage([](const TxOrphanage &orphanage) {
2577 return orphanage.Size();
2578 }) == 0);
2579 }
2580 }
2581
2582 if (node.fSuccessfullyConnected && !node.IsBlockOnlyConn() &&
2583 !node.IsInboundConn()) {
2584 // Only change visible addrman state for full outbound peers. We don't
2585 // call Connected() for feeler connections since they don't have
2586 // fSuccessfullyConnected set.
2587 m_addrman.Connected(node.addr);
2588 }
2589 {
2590 LOCK(m_headers_presync_mutex);
2591 m_headers_presync_stats.erase(nodeid);
2592 }
2593
2594 WITH_LOCK(cs_proofrequest, m_proofrequest.DisconnectedPeer(nodeid));
2595
2596 LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
2597}
2598
2599PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const {
2600 LOCK(m_peer_mutex);
2601 auto it = m_peer_map.find(id);
2602 return it != m_peer_map.end() ? it->second : nullptr;
2603}
2604
2605PeerRef PeerManagerImpl::RemovePeer(NodeId id) {
2606 PeerRef ret;
2607 LOCK(m_peer_mutex);
2608 auto it = m_peer_map.find(id);
2609 if (it != m_peer_map.end()) {
2610 ret = std::move(it->second);
2611 m_peer_map.erase(it);
2612 }
2613 return ret;
2614}
2615
2616bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid,
2617 CNodeStateStats &stats) const {
2618 {
2619 LOCK(cs_main);
2620 const CNodeState *state = State(nodeid);
2621 if (state == nullptr) {
2622 return false;
2623 }
2624 stats.nSyncHeight = state->pindexBestKnownBlock
2625 ? state->pindexBestKnownBlock->nHeight
2626 : -1;
2627 stats.nCommonHeight = state->pindexLastCommonBlock
2628 ? state->pindexLastCommonBlock->nHeight
2629 : -1;
2630 for (const QueuedBlock &queue : state->vBlocksInFlight) {
2631 if (queue.pindex) {
2632 stats.vHeightInFlight.push_back(queue.pindex->nHeight);
2633 }
2634 }
2635 }
2636
2637 PeerRef peer = GetPeerRef(nodeid);
2638 if (peer == nullptr) {
2639 return false;
2640 }
2641 stats.their_services = peer->m_their_services;
2642 stats.m_starting_height = peer->m_starting_height;
2643 // It is common for nodes with good ping times to suddenly become lagged,
2644 // due to a new block arriving or other large transfer.
2645 // Merely reporting pingtime might fool the caller into thinking the node
2646 // was still responsive, since pingtime does not update until the ping is
2647 // complete, which might take a while. So, if a ping is taking an unusually
2648 // long time in flight, the caller can immediately detect that this is
2649 // happening.
2650 auto ping_wait{0us};
2651 if ((0 != peer->m_ping_nonce_sent) &&
2652 (0 != peer->m_ping_start.load().count())) {
2653 ping_wait =
2654 GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
2655 }
2656
2657 if (auto tx_relay = peer->GetTxRelay()) {
2658 stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex,
2659 return tx_relay->m_relay_txs);
2660 stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load();
2661 } else {
2662 stats.m_relay_txs = false;
2664 }
2665
2666 stats.m_ping_wait = ping_wait;
2667 stats.m_addr_processed = peer->m_addr_processed.load();
2668 stats.m_addr_rate_limited = peer->m_addr_rate_limited.load();
2669 stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load();
2670 {
2671 LOCK(peer->m_headers_sync_mutex);
2672 if (peer->m_headers_sync) {
2673 stats.presync_height = peer->m_headers_sync->GetPresyncHeight();
2674 }
2675 }
2676
2677 return true;
2678}
2679
2680void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef &tx) {
2681 if (m_opts.max_extra_txs <= 0) {
2682 return;
2683 }
2684
2685 if (!vExtraTxnForCompact.size()) {
2686 vExtraTxnForCompact.resize(m_opts.max_extra_txs);
2687 }
2688
2689 vExtraTxnForCompact[vExtraTxnForCompactIt] = tx;
2690 vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs;
2691}
2692
2693void PeerManagerImpl::Misbehaving(Peer &peer, const std::string &message) {
2694 LOCK(peer.m_misbehavior_mutex);
2695
2696 const std::string message_prefixed =
2697 message.empty() ? "" : (": " + message);
2698 peer.m_should_discourage = true;
2699 LogPrint(BCLog::NET, "Misbehaving: peer=%d%s\n", peer.m_id,
2700 message_prefixed);
2701}
2702
2703void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid,
2704 const BlockValidationState &state,
2705 bool via_compact_block,
2706 const std::string &message) {
2707 PeerRef peer{GetPeerRef(nodeid)};
2708 switch (state.GetResult()) {
2710 break;
2712 // We didn't try to process the block because the header chain may
2713 // have too little work.
2714 break;
2715 // The node is providing invalid data:
2718 if (!via_compact_block) {
2719 if (peer) {
2720 Misbehaving(*peer, message);
2721 }
2722 return;
2723 }
2724 break;
2726 LOCK(cs_main);
2727 CNodeState *node_state = State(nodeid);
2728 if (node_state == nullptr) {
2729 break;
2730 }
2731
2732 // Ban outbound (but not inbound) peers if on an invalid chain.
2733 // Exempt HB compact block peers. Manual connections are always
2734 // protected from discouragement.
2735 if (!via_compact_block && !node_state->m_is_inbound) {
2736 if (peer) {
2737 Misbehaving(*peer, message);
2738 }
2739 return;
2740 }
2741 break;
2742 }
2746 if (peer) {
2747 Misbehaving(*peer, message);
2748 }
2749 return;
2750 // Conflicting (but not necessarily invalid) data or different policy:
2752 if (peer) {
2753 Misbehaving(*peer, message);
2754 }
2755 return;
2757 break;
2758 }
2759 if (message != "") {
2760 LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
2761 }
2762}
2763
2764void PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid,
2765 const TxValidationState &state,
2766 const std::string &message) {
2767 PeerRef peer{GetPeerRef(nodeid)};
2768 switch (state.GetResult()) {
2770 break;
2771 // The node is providing invalid data:
2773 if (peer) {
2774 Misbehaving(*peer, message);
2775 }
2776 return;
2777 // Conflicting (but not necessarily invalid) data or different policy:
2790 break;
2791 }
2792 if (message != "") {
2793 LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
2794 }
2795}
2796
2797bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex *pindex) {
2799 if (m_chainman.ActiveChain().Contains(pindex)) {
2800 return true;
2801 }
2802 return pindex->IsValid(BlockValidity::SCRIPTS) &&
2803 (m_chainman.m_best_header != nullptr) &&
2804 (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() <
2807 *m_chainman.m_best_header, *pindex, *m_chainman.m_best_header,
2808 m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT);
2809}
2810
2811std::optional<std::string>
2812PeerManagerImpl::FetchBlock(const Config &config, NodeId peer_id,
2813 const CBlockIndex &block_index) {
2814 if (m_chainman.m_blockman.LoadingBlocks()) {
2815 return "Loading blocks ...";
2816 }
2817
2818 LOCK(cs_main);
2819
2820 // Ensure this peer exists and hasn't been disconnected
2821 CNodeState *state = State(peer_id);
2822 if (state == nullptr) {
2823 return "Peer does not exist";
2824 }
2825
2826 // Forget about all prior requests
2827 RemoveBlockRequest(block_index.GetBlockHash(), std::nullopt);
2828
2829 // Mark block as in-flight
2830 if (!BlockRequested(config, peer_id, block_index)) {
2831 return "Already requested from this peer";
2832 }
2833
2834 // Construct message to request the block
2835 const BlockHash &hash{block_index.GetBlockHash()};
2836 const std::vector<CInv> invs{CInv(MSG_BLOCK, hash)};
2837
2838 // Send block request message to the peer
2839 if (!m_connman.ForNode(peer_id, [this, &invs](CNode *node) {
2840 this->MakeAndPushMessage(*node, NetMsgType::GETDATA, invs);
2841 return true;
2842 })) {
2843 return "Node not fully connected";
2844 }
2845
2846 LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", hash.ToString(),
2847 peer_id);
2848 return std::nullopt;
2849}
2850
2851std::unique_ptr<PeerManager>
2852PeerManager::make(CConnman &connman, AddrMan &addrman, BanMan *banman,
2853 ChainstateManager &chainman, CTxMemPool &pool,
2854 avalanche::Processor *const avalanche, Options opts) {
2855 return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman,
2856 pool, avalanche, opts);
2857}
2858
2859PeerManagerImpl::PeerManagerImpl(CConnman &connman, AddrMan &addrman,
2860 BanMan *banman, ChainstateManager &chainman,
2861 CTxMemPool &pool,
2863 Options opts)
2864 : m_rng{opts.deterministic_rng},
2865 m_fee_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE_PER_KB}, m_rng},
2866 m_chainparams(chainman.GetParams()), m_connman(connman),
2867 m_addrman(addrman), m_banman(banman), m_chainman(chainman),
2868 m_mempool(pool), m_avalanche(avalanche), m_opts{opts} {}
2869
2870void PeerManagerImpl::StartScheduledTasks(CScheduler &scheduler) {
2871 // Stale tip checking and peer eviction are on two different timers, but we
2872 // don't want them to get out of sync due to drift in the scheduler, so we
2873 // combine them in one function and schedule at the quicker (peer-eviction)
2874 // timer.
2875 static_assert(
2877 "peer eviction timer should be less than stale tip check timer");
2878 scheduler.scheduleEvery(
2879 [this]() {
2880 this->CheckForStaleTipAndEvictPeers();
2881 return true;
2882 },
2883 std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
2884
2885 // schedule next run for 10-15 minutes in the future
2886 const auto reattemptBroadcastInterval =
2887 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min);
2888 scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
2889 reattemptBroadcastInterval);
2890
2891 // Update the avalanche statistics on a schedule
2892 scheduler.scheduleEvery(
2893 [this]() {
2894 UpdateAvalancheStatistics();
2895 return true;
2896 },
2898
2899 // schedule next run for 2-5 minutes in the future
2900 const auto avalanchePeriodicNetworkingInterval =
2901 2min + FastRandomContext().randrange<std::chrono::milliseconds>(3min);
2902 scheduler.scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
2903 avalanchePeriodicNetworkingInterval);
2904}
2905
2912void PeerManagerImpl::BlockConnected(
2913 ChainstateRole role, const std::shared_ptr<const CBlock> &pblock,
2914 const CBlockIndex *pindex) {
2915 // Update this for all chainstate roles so that we don't mistakenly see
2916 // peers helping us do background IBD as having a stale tip.
2917 m_last_tip_update = GetTime<std::chrono::seconds>();
2918
2919 // In case the dynamic timeout was doubled once or more, reduce it slowly
2920 // back to its default value
2921 auto stalling_timeout = m_block_stalling_timeout.load();
2922 Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT);
2923 if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) {
2924 const auto new_timeout =
2925 std::max(std::chrono::duration_cast<std::chrono::seconds>(
2926 stalling_timeout * 0.85),
2928 if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout,
2929 new_timeout)) {
2930 LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n",
2931 count_seconds(new_timeout));
2932 }
2933 }
2934
2935 // The following tasks can be skipped since we don't maintain a mempool for
2936 // the ibd/background chainstate.
2937 if (role == ChainstateRole::BACKGROUND) {
2938 return;
2939 }
2940 m_mempool.withOrphanage([&pblock](TxOrphanage &orphanage) {
2941 orphanage.EraseForBlock(*pblock);
2942 });
2943 m_mempool.withConflicting([&pblock](TxConflicting &conflicting) {
2944 conflicting.EraseForBlock(*pblock);
2945 });
2946
2947 {
2948 LOCK(m_recent_confirmed_transactions_mutex);
2949 for (const CTransactionRef &ptx : pblock->vtx) {
2950 m_recent_confirmed_transactions.insert(ptx->GetId());
2951 }
2952 }
2953 {
2954 LOCK(cs_main);
2955 for (const auto &ptx : pblock->vtx) {
2956 m_txrequest.ForgetInvId(ptx->GetId());
2957 }
2958 }
2959}
2960
2961void PeerManagerImpl::BlockDisconnected(
2962 const std::shared_ptr<const CBlock> &block, const CBlockIndex *pindex) {
2963 // To avoid relay problems with transactions that were previously
2964 // confirmed, clear our filter of recently confirmed transactions whenever
2965 // there's a reorg.
2966 // This means that in a 1-block reorg (where 1 block is disconnected and
2967 // then another block reconnected), our filter will drop to having only one
2968 // block's worth of transactions in it, but that should be fine, since
2969 // presumably the most common case of relaying a confirmed transaction
2970 // should be just after a new block containing it is found.
2971 LOCK(m_recent_confirmed_transactions_mutex);
2972 m_recent_confirmed_transactions.reset();
2973}
2974
2979void PeerManagerImpl::NewPoWValidBlock(
2980 const CBlockIndex *pindex, const std::shared_ptr<const CBlock> &pblock) {
2981 std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock =
2982 std::make_shared<const CBlockHeaderAndShortTxIDs>(
2983 *pblock, FastRandomContext().rand64());
2984
2985 LOCK(cs_main);
2986
2987 if (pindex->nHeight <= m_highest_fast_announce) {
2988 return;
2989 }
2990 m_highest_fast_announce = pindex->nHeight;
2991
2992 BlockHash hashBlock(pblock->GetHash());
2993 const std::shared_future<CSerializedNetMsg> lazy_ser{
2994 std::async(std::launch::deferred, [&] {
2995 return NetMsg::Make(NetMsgType::CMPCTBLOCK, *pcmpctblock);
2996 })};
2997
2998 {
2999 auto most_recent_block_txs =
3000 std::make_unique<std::map<TxId, CTransactionRef>>();
3001 for (const auto &tx : pblock->vtx) {
3002 most_recent_block_txs->emplace(tx->GetId(), tx);
3003 }
3004
3005 LOCK(m_most_recent_block_mutex);
3006 m_most_recent_block_hash = hashBlock;
3007 m_most_recent_block = pblock;
3008 m_most_recent_compact_block = pcmpctblock;
3009 m_most_recent_block_txs = std::move(most_recent_block_txs);
3010 }
3011
3012 m_connman.ForEachNode(
3013 [this, pindex, &lazy_ser, &hashBlock](CNode *pnode)
3016
3018 pnode->fDisconnect) {
3019 return;
3020 }
3021 ProcessBlockAvailability(pnode->GetId());
3022 CNodeState &state = *State(pnode->GetId());
3023 // If the peer has, or we announced to them the previous block
3024 // already, but we don't think they have this one, go ahead and
3025 // announce it.
3026 if (state.m_requested_hb_cmpctblocks &&
3027 !PeerHasHeader(&state, pindex) &&
3028 PeerHasHeader(&state, pindex->pprev)) {
3030 "%s sending header-and-ids %s to peer=%d\n",
3031 "PeerManager::NewPoWValidBlock",
3032 hashBlock.ToString(), pnode->GetId());
3033
3034 const CSerializedNetMsg &ser_cmpctblock{lazy_ser.get()};
3035 PushMessage(*pnode, ser_cmpctblock.Copy());
3036 state.pindexBestHeaderSent = pindex;
3037 }
3038 });
3039}
3040
3045void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew,
3046 const CBlockIndex *pindexFork,
3047 bool fInitialDownload) {
3048 SetBestHeight(pindexNew->nHeight);
3049 SetServiceFlagsIBDCache(!fInitialDownload);
3050
3051 // Don't relay inventory during initial block download.
3052 if (fInitialDownload) {
3053 return;
3054 }
3055
3056 // Find the hashes of all blocks that weren't previously in the best chain.
3057 std::vector<BlockHash> vHashes;
3058 const CBlockIndex *pindexToAnnounce = pindexNew;
3059 while (pindexToAnnounce != pindexFork) {
3060 vHashes.push_back(pindexToAnnounce->GetBlockHash());
3061 pindexToAnnounce = pindexToAnnounce->pprev;
3062 if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
3063 // Limit announcements in case of a huge reorganization. Rely on the
3064 // peer's synchronization mechanism in that case.
3065 break;
3066 }
3067 }
3068
3069 {
3070 LOCK(m_peer_mutex);
3071 for (auto &it : m_peer_map) {
3072 Peer &peer = *it.second;
3073 LOCK(peer.m_block_inv_mutex);
3074 for (const BlockHash &hash : reverse_iterate(vHashes)) {
3075 peer.m_blocks_for_headers_relay.push_back(hash);
3076 }
3077 }
3078 }
3079
3080 m_connman.WakeMessageHandler();
3081}
3082
3087void PeerManagerImpl::BlockChecked(const CBlock &block,
3088 const BlockValidationState &state) {
3089 LOCK(cs_main);
3090
3091 const BlockHash hash = block.GetHash();
3092 std::map<BlockHash, std::pair<NodeId, bool>>::iterator it =
3093 mapBlockSource.find(hash);
3094
3095 // If the block failed validation, we know where it came from and we're
3096 // still connected to that peer, maybe punish.
3097 if (state.IsInvalid() && it != mapBlockSource.end() &&
3098 State(it->second.first)) {
3099 MaybePunishNodeForBlock(/*nodeid=*/it->second.first, state,
3100 /*via_compact_block=*/!it->second.second);
3101 }
3102 // Check that:
3103 // 1. The block is valid
3104 // 2. We're not in initial block download
3105 // 3. This is currently the best block we're aware of. We haven't updated
3106 // the tip yet so we have no way to check this directly here. Instead we
3107 // just check that there are currently no other blocks in flight.
3108 else if (state.IsValid() && !m_chainman.IsInitialBlockDownload() &&
3109 mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
3110 if (it != mapBlockSource.end()) {
3111 MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
3112 }
3113 }
3114
3115 if (it != mapBlockSource.end()) {
3116 mapBlockSource.erase(it);
3117 }
3118}
3119
3121//
3122// Messages
3123//
3124
3125bool PeerManagerImpl::AlreadyHaveTx(const TxId &txid,
3126 bool include_reconsiderable) {
3127 if (m_chainman.ActiveChain().Tip()->GetBlockHash() !=
3128 hashRecentRejectsChainTip) {
3129 // If the chain tip has changed previously rejected transactions
3130 // might be now valid, e.g. due to a nLockTime'd tx becoming
3131 // valid, or a double-spend. Reset the rejects filter and give
3132 // those txs a second chance.
3133 hashRecentRejectsChainTip =
3134 m_chainman.ActiveChain().Tip()->GetBlockHash();
3135 m_recent_rejects.reset();
3136 m_recent_rejects_package_reconsiderable.reset();
3137 }
3138
3139 if (m_mempool.withOrphanage([&txid](const TxOrphanage &orphanage) {
3140 return orphanage.HaveTx(txid);
3141 })) {
3142 return true;
3143 }
3144
3145 if (m_mempool.withConflicting([&txid](const TxConflicting &conflicting) {
3146 return conflicting.HaveTx(txid);
3147 })) {
3148 return true;
3149 }
3150
3151 if (include_reconsiderable &&
3152 m_recent_rejects_package_reconsiderable.contains(txid)) {
3153 return true;
3154 }
3155
3156 {
3157 LOCK(m_recent_confirmed_transactions_mutex);
3158 if (m_recent_confirmed_transactions.contains(txid)) {
3159 return true;
3160 }
3161 }
3162
3163 return m_recent_rejects.contains(txid) || m_mempool.exists(txid);
3164}
3165
3166bool PeerManagerImpl::AlreadyHaveBlock(const BlockHash &block_hash) {
3167 return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr;
3168}
3169
3170bool PeerManagerImpl::AlreadyHaveProof(const avalanche::ProofId &proofid) {
3171 if (!Assume(m_avalanche)) {
3172 return false;
3173 }
3174
3175 auto localProof = m_avalanche->getLocalProof();
3176 if (localProof && localProof->getId() == proofid) {
3177 return true;
3178 }
3179
3180 return m_avalanche->withPeerManager([&proofid](avalanche::PeerManager &pm) {
3181 return pm.exists(proofid) || pm.isInvalid(proofid);
3182 });
3183}
3184
3185void PeerManagerImpl::SendPings() {
3186 LOCK(m_peer_mutex);
3187 for (auto &it : m_peer_map) {
3188 it.second->m_ping_queued = true;
3189 }
3190}
3191
3192void PeerManagerImpl::RelayTransaction(const TxId &txid) {
3193 LOCK(m_peer_mutex);
3194 for (auto &it : m_peer_map) {
3195 Peer &peer = *it.second;
3196 auto tx_relay = peer.GetTxRelay();
3197 if (!tx_relay) {
3198 continue;
3199 }
3200 LOCK(tx_relay->m_tx_inventory_mutex);
3201 // Only queue transactions for announcement once the version handshake
3202 // is completed. The time of arrival for these transactions is
3203 // otherwise at risk of leaking to a spy, if the spy is able to
3204 // distinguish transactions received during the handshake from the rest
3205 // in the announcement.
3206 if (tx_relay->m_next_inv_send_time == 0s) {
3207 continue;
3208 }
3209
3210 if (!tx_relay->m_tx_inventory_known_filter.contains(txid) ||
3211 tx_relay->m_avalanche_stalled_txids.count(txid) > 0) {
3212 tx_relay->m_tx_inventory_to_send.insert(txid);
3213 }
3214 }
3215}
3216
3217void PeerManagerImpl::RelayProof(const avalanche::ProofId &proofid) {
3218 LOCK(m_peer_mutex);
3219 for (auto &it : m_peer_map) {
3220 Peer &peer = *it.second;
3221
3222 if (!peer.m_proof_relay) {
3223 continue;
3224 }
3225 LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
3226 if (!peer.m_proof_relay->m_proof_inventory_known_filter.contains(
3227 proofid)) {
3228 peer.m_proof_relay->m_proof_inventory_to_send.insert(proofid);
3229 }
3230 }
3231}
3232
3233void PeerManagerImpl::RelayAddress(NodeId originator, const CAddress &addr,
3234 bool fReachable) {
3235 // We choose the same nodes within a given 24h window (if the list of
3236 // connected nodes does not change) and we don't relay to nodes that already
3237 // know an address. So within 24h we will likely relay a given address once.
3238 // This is to prevent a peer from unjustly giving their address better
3239 // propagation by sending it to us repeatedly.
3240
3241 if (!fReachable && !addr.IsRelayable()) {
3242 return;
3243 }
3244
3245 // Relay to a limited number of other nodes
3246 // Use deterministic randomness to send to the same nodes for 24 hours
3247 // at a time so the m_addr_knowns of the chosen nodes prevent repeats
3248 const uint64_t hash_addr{CServiceHash(0, 0)(addr)};
3249 const auto current_time{GetTime<std::chrono::seconds>()};
3250 // Adding address hash makes exact rotation time different per address,
3251 // while preserving periodicity.
3252 const uint64_t time_addr{
3253 (static_cast<uint64_t>(count_seconds(current_time)) + hash_addr) /
3255
3256 const CSipHasher hasher{
3258 .Write(hash_addr)
3259 .Write(time_addr)};
3260
3261 // Relay reachable addresses to 2 peers. Unreachable addresses are relayed
3262 // randomly to 1 or 2 peers.
3263 unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
3264 std::array<std::pair<uint64_t, Peer *>, 2> best{
3265 {{0, nullptr}, {0, nullptr}}};
3266 assert(nRelayNodes <= best.size());
3267
3268 LOCK(m_peer_mutex);
3269
3270 for (auto &[id, peer] : m_peer_map) {
3271 if (peer->m_addr_relay_enabled && id != originator &&
3272 IsAddrCompatible(*peer, addr)) {
3273 uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize();
3274 for (unsigned int i = 0; i < nRelayNodes; i++) {
3275 if (hashKey > best[i].first) {
3276 std::copy(best.begin() + i, best.begin() + nRelayNodes - 1,
3277 best.begin() + i + 1);
3278 best[i] = std::make_pair(hashKey, peer.get());
3279 break;
3280 }
3281 }
3282 }
3283 };
3284
3285 for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
3286 PushAddress(*best[i].second, addr);
3287 }
3288}
3289
3290void PeerManagerImpl::ProcessGetBlockData(const Config &config, CNode &pfrom,
3291 Peer &peer, const CInv &inv) {
3292 const BlockHash hash(inv.hash);
3293
3294 std::shared_ptr<const CBlock> a_recent_block;
3295 std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
3296 {
3297 LOCK(m_most_recent_block_mutex);
3298 a_recent_block = m_most_recent_block;
3299 a_recent_compact_block = m_most_recent_compact_block;
3300 }
3301
3302 bool need_activate_chain = false;
3303 {
3304 LOCK(cs_main);
3305 const CBlockIndex *pindex =
3306 m_chainman.m_blockman.LookupBlockIndex(hash);
3307 if (pindex) {
3308 if (pindex->HaveNumChainTxs() &&
3309 !pindex->IsValid(BlockValidity::SCRIPTS) &&
3310 pindex->IsValid(BlockValidity::TREE)) {
3311 // If we have the block and all of its parents, but have not yet
3312 // validated it, we might be in the middle of connecting it (ie
3313 // in the unlock of cs_main before ActivateBestChain but after
3314 // AcceptBlock). In this case, we need to run ActivateBestChain
3315 // prior to checking the relay conditions below.
3316 need_activate_chain = true;
3317 }
3318 }
3319 } // release cs_main before calling ActivateBestChain
3320 if (need_activate_chain) {
3322 if (!m_chainman.ActiveChainstate().ActivateBestChain(
3323 state, a_recent_block, m_avalanche)) {
3324 LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
3325 state.ToString());
3326 }
3327 }
3328
3329 const CBlockIndex *pindex{nullptr};
3330 const CBlockIndex *tip{nullptr};
3331 bool can_direct_fetch{false};
3332 FlatFilePos block_pos{};
3333 {
3334 LOCK(cs_main);
3335 pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
3336 if (!pindex) {
3337 return;
3338 }
3339 if (!BlockRequestAllowed(pindex)) {
3341 "%s: ignoring request from peer=%i for old "
3342 "block that isn't in the main chain\n",
3343 __func__, pfrom.GetId());
3344 return;
3345 }
3346 // Disconnect node in case we have reached the outbound limit for
3347 // serving historical blocks.
3348 if (m_connman.OutboundTargetReached(true) &&
3349 (((m_chainman.m_best_header != nullptr) &&
3350 (m_chainman.m_best_header->GetBlockTime() -
3351 pindex->GetBlockTime() >
3353 inv.IsMsgFilteredBlk()) &&
3354 // nodes with the download permission may exceed target
3356 LogPrint(
3357 BCLog::NET,
3358 "historical block serving limit reached, disconnect peer=%d\n",
3359 pfrom.GetId());
3360 pfrom.fDisconnect = true;
3361 return;
3362 }
3363 tip = m_chainman.ActiveChain().Tip();
3364 // Avoid leaking prune-height by never sending blocks below the
3365 // NODE_NETWORK_LIMITED threshold.
3366 // Add two blocks buffer extension for possible races
3368 ((((peer.m_our_services & NODE_NETWORK_LIMITED) ==
3370 ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) &&
3371 (tip->nHeight - pindex->nHeight >
3372 (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2)))) {
3374 "Ignore block request below NODE_NETWORK_LIMITED "
3375 "threshold, disconnect peer=%d\n",
3376 pfrom.GetId());
3377
3378 // disconnect node and prevent it from stalling (would otherwise
3379 // wait for the missing block)
3380 pfrom.fDisconnect = true;
3381 return;
3382 }
3383 // Pruned nodes may have deleted the block, so check whether it's
3384 // available before trying to send.
3385 if (!pindex->nStatus.hasData()) {
3386 return;
3387 }
3388 can_direct_fetch = CanDirectFetch();
3389 block_pos = pindex->GetBlockPos();
3390 }
3391
3392 std::shared_ptr<const CBlock> pblock;
3393 auto handle_block_read_error = [&]() {
3394 if (WITH_LOCK(m_chainman.GetMutex(),
3395 return m_chainman.m_blockman.IsBlockPruned(*pindex))) {
3397 "Block was pruned before it could be read, disconnect "
3398 "peer=%s\n",
3399 pfrom.GetId());
3400 } else {
3401 LogError("Cannot load block from disk, disconnect peer=%d\n",
3402 pfrom.GetId());
3403 }
3404 pfrom.fDisconnect = true;
3405 };
3406
3407 if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
3408 pblock = a_recent_block;
3409 } else if (!inv.IsMsgCmpctBlk()) {
3410 // Fast-path: in this case it is possible to serve the block directly
3411 // from disk, as the network format matches the format on disk
3412 std::vector<uint8_t> block_data;
3413 if (!m_chainman.m_blockman.ReadRawBlock(block_data, block_pos)) {
3414 handle_block_read_error();
3415 return;
3416 }
3417 MakeAndPushMessage(pfrom, NetMsgType::BLOCK, Span{block_data});
3418 // Don't set pblock as we've sent the block
3419 } else {
3420 // Send block from disk
3421 std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
3422 if (!m_chainman.m_blockman.ReadBlock(*pblockRead, block_pos)) {
3423 handle_block_read_error();
3424 return;
3425 }
3426 pblock = pblockRead;
3427 }
3428 if (pblock) {
3429 if (inv.IsMsgBlk()) {
3430 MakeAndPushMessage(pfrom, NetMsgType::BLOCK, *pblock);
3431 } else if (inv.IsMsgFilteredBlk()) {
3432 bool sendMerkleBlock = false;
3433 CMerkleBlock merkleBlock;
3434 if (auto tx_relay = peer.GetTxRelay()) {
3435 LOCK(tx_relay->m_bloom_filter_mutex);
3436 if (tx_relay->m_bloom_filter) {
3437 sendMerkleBlock = true;
3438 merkleBlock =
3439 CMerkleBlock(*pblock, *tx_relay->m_bloom_filter);
3440 }
3441 }
3442 if (sendMerkleBlock) {
3443 MakeAndPushMessage(pfrom, NetMsgType::MERKLEBLOCK, merkleBlock);
3444 // CMerkleBlock just contains hashes, so also push any
3445 // transactions in the block the client did not see. This avoids
3446 // hurting performance by pointlessly requiring a round-trip.
3447 // Note that there is currently no way for a node to request any
3448 // single transactions we didn't send here - they must either
3449 // disconnect and retry or request the full block. Thus, the
3450 // protocol spec specified allows for us to provide duplicate
3451 // txn here, however we MUST always provide at least what the
3452 // remote peer needs.
3453 typedef std::pair<size_t, uint256> PairType;
3454 for (PairType &pair : merkleBlock.vMatchedTxn) {
3455 MakeAndPushMessage(pfrom, NetMsgType::TX,
3456 *pblock->vtx[pair.first]);
3457 }
3458 }
3459 // else
3460 // no response
3461 } else if (inv.IsMsgCmpctBlk()) {
3462 // If a peer is asking for old blocks, we're almost guaranteed they
3463 // won't have a useful mempool to match against a compact block, and
3464 // we don't feel like constructing the object for them, so instead
3465 // we respond with the full, non-compact block.
3466 if (can_direct_fetch &&
3467 pindex->nHeight >= tip->nHeight - MAX_CMPCTBLOCK_DEPTH) {
3468 if (a_recent_compact_block &&
3469 a_recent_compact_block->header.GetHash() ==
3470 pindex->GetBlockHash()) {
3471 MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK,
3472 *a_recent_compact_block);
3473 } else {
3474 CBlockHeaderAndShortTxIDs cmpctblock(
3475 *pblock, FastRandomContext().rand64());
3476 MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK,
3477 cmpctblock);
3478 }
3479 } else {
3480 MakeAndPushMessage(pfrom, NetMsgType::BLOCK, *pblock);
3481 }
3482 }
3483 }
3484
3485 {
3486 LOCK(peer.m_block_inv_mutex);
3487 // Trigger the peer node to send a getblocks request for the next
3488 // batch of inventory.
3489 if (hash == peer.m_continuation_block) {
3490 // Send immediately. This must send even if redundant, and
3491 // we want it right after the last block so they don't wait for
3492 // other stuff first.
3493 std::vector<CInv> vInv;
3494 vInv.push_back(CInv(MSG_BLOCK, tip->GetBlockHash()));
3495 MakeAndPushMessage(pfrom, NetMsgType::INV, vInv);
3496 peer.m_continuation_block = BlockHash();
3497 }
3498 }
3499}
3500
3502PeerManagerImpl::FindTxForGetData(const Peer &peer, const TxId &txid,
3503 const std::chrono::seconds mempool_req,
3504 const std::chrono::seconds now) {
3505 auto txinfo = m_mempool.info(txid);
3506 if (txinfo.tx) {
3507 // If a TX could have been INVed in reply to a MEMPOOL request,
3508 // or is older than UNCONDITIONAL_RELAY_DELAY, permit the request
3509 // unconditionally.
3510 if ((mempool_req.count() && txinfo.m_time <= mempool_req) ||
3511 txinfo.m_time <= now - UNCONDITIONAL_RELAY_DELAY) {
3512 return std::move(txinfo.tx);
3513 }
3514 }
3515
3516 {
3517 LOCK(cs_main);
3518
3519 // Otherwise, the transaction might have been announced recently.
3520 bool recent =
3521 Assume(peer.GetTxRelay())->m_recently_announced_invs.contains(txid);
3522 if (recent && txinfo.tx) {
3523 return std::move(txinfo.tx);
3524 }
3525
3526 // Or it might be from the most recent block
3527 {
3528 LOCK(m_most_recent_block_mutex);
3529 if (m_most_recent_block_txs != nullptr) {
3530 auto it = m_most_recent_block_txs->find(txid);
3531 if (it != m_most_recent_block_txs->end()) {
3532 return it->second;
3533 }
3534 }
3535 }
3536 }
3537
3538 return {};
3539}
3540
3544PeerManagerImpl::FindProofForGetData(const Peer &peer,
3545 const avalanche::ProofId &proofid,
3546 const std::chrono::seconds now) {
3547 avalanche::ProofRef proof;
3548
3549 bool send_unconditionally =
3550 m_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
3551 return pm.forPeer(proofid, [&](const avalanche::Peer &peer) {
3552 proof = peer.proof;
3553
3554 // If we know that proof for long enough, allow for requesting
3555 // it.
3556 return peer.registration_time <=
3558 });
3559 });
3560
3561 if (!proof) {
3562 // Always send our local proof if it gets requested, assuming it's
3563 // valid. This will make it easier to bind with peers upon startup where
3564 // the status of our proof is unknown pending for a block. Note that it
3565 // still needs to have been announced first (presumably via an avahello
3566 // message).
3567 proof = m_avalanche->getLocalProof();
3568 }
3569
3570 // We don't have this proof
3571 if (!proof) {
3572 return avalanche::ProofRef();
3573 }
3574
3575 if (send_unconditionally) {
3576 return proof;
3577 }
3578
3579 // Otherwise, the proofs must have been announced recently.
3580 if (peer.m_proof_relay->m_recently_announced_proofs.contains(proofid)) {
3581 return proof;
3582 }
3583
3584 return avalanche::ProofRef();
3585}
3586
3587void PeerManagerImpl::ProcessGetData(
3588 const Config &config, CNode &pfrom, Peer &peer,
3589 const std::atomic<bool> &interruptMsgProc) {
3591
3592 auto tx_relay = peer.GetTxRelay();
3593
3594 std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
3595 std::vector<CInv> vNotFound;
3596
3597 const auto now{GetTime<std::chrono::seconds>()};
3598 // Get last mempool request time
3599 const auto mempool_req = tx_relay != nullptr
3600 ? tx_relay->m_last_mempool_req.load()
3601 : std::chrono::seconds::min();
3602
3603 // Process as many TX or AVA_PROOF items from the front of the getdata
3604 // queue as possible, since they're common and it's efficient to batch
3605 // process them.
3606 while (it != peer.m_getdata_requests.end()) {
3607 if (interruptMsgProc) {
3608 return;
3609 }
3610 // The send buffer provides backpressure. If there's no space in
3611 // the buffer, pause processing until the next call.
3612 if (pfrom.fPauseSend) {
3613 break;
3614 }
3615
3616 const CInv &inv = *it;
3617
3618 if (it->IsMsgProof()) {
3619 if (!m_avalanche) {
3620 vNotFound.push_back(inv);
3621 ++it;
3622 continue;
3623 }
3624 const avalanche::ProofId proofid(inv.hash);
3625 auto proof = FindProofForGetData(peer, proofid, now);
3626 if (proof) {
3627 MakeAndPushMessage(pfrom, NetMsgType::AVAPROOF, *proof);
3628 m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
3629 pm.removeUnbroadcastProof(proofid);
3630 });
3631 } else {
3632 vNotFound.push_back(inv);
3633 }
3634
3635 ++it;
3636 continue;
3637 }
3638
3639 if (it->IsMsgTx()) {
3640 if (tx_relay == nullptr) {
3641 // Ignore GETDATA requests for transactions from
3642 // block-relay-only peers and peers that asked us not to
3643 // announce transactions.
3644 continue;
3645 }
3646
3647 const TxId txid(inv.hash);
3648 CTransactionRef tx = FindTxForGetData(peer, txid, mempool_req, now);
3649 if (tx) {
3650 MakeAndPushMessage(pfrom, NetMsgType::TX, *tx);
3651 m_mempool.RemoveUnbroadcastTx(txid);
3652 // As we're going to send tx, make sure its unconfirmed parents
3653 // are made requestable.
3654 std::vector<TxId> parent_ids_to_add;
3655 {
3656 LOCK(m_mempool.cs);
3657 auto txiter = m_mempool.GetIter(tx->GetId());
3658 if (txiter) {
3659 auto &pentry = *txiter;
3660 const CTxMemPoolEntry::Parents &parents =
3661 (*pentry)->GetMemPoolParentsConst();
3662 parent_ids_to_add.reserve(parents.size());
3663 for (const auto &parent : parents) {
3664 if (parent.get()->GetTime() >
3666 parent_ids_to_add.push_back(
3667 parent.get()->GetTx().GetId());
3668 }
3669 }
3670 }
3671 }
3672 for (const TxId &parent_txid : parent_ids_to_add) {
3673 // Relaying a transaction with a recent but unconfirmed
3674 // parent.
3675 if (WITH_LOCK(tx_relay->m_tx_inventory_mutex,
3676 return !tx_relay->m_tx_inventory_known_filter
3677 .contains(parent_txid))) {
3678 tx_relay->m_recently_announced_invs.insert(parent_txid);
3679 }
3680 }
3681 } else {
3682 vNotFound.push_back(inv);
3683 }
3684
3685 ++it;
3686 continue;
3687 }
3688
3689 // It's neither a proof nor a transaction
3690 break;
3691 }
3692
3693 // Only process one BLOCK item per call, since they're uncommon and can be
3694 // expensive to process.
3695 if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
3696 const CInv &inv = *it++;
3697 if (inv.IsGenBlkMsg()) {
3698 ProcessGetBlockData(config, pfrom, peer, inv);
3699 }
3700 // else: If the first item on the queue is an unknown type, we erase it
3701 // and continue processing the queue on the next call.
3702 }
3703
3704 peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
3705
3706 if (!vNotFound.empty()) {
3707 // Let the peer know that we didn't find what it asked for, so it
3708 // doesn't have to wait around forever. SPV clients care about this
3709 // message: it's needed when they are recursively walking the
3710 // dependencies of relevant unconfirmed transactions. SPV clients want
3711 // to do that because they want to know about (and store and rebroadcast
3712 // and risk analyze) the dependencies of transactions relevant to them,
3713 // without having to download the entire memory pool. Also, other nodes
3714 // can use these messages to automatically request a transaction from
3715 // some other peer that annnounced it, and stop waiting for us to
3716 // respond. In normal operation, we often send NOTFOUND messages for
3717 // parents of transactions that we relay; if a peer is missing a parent,
3718 // they may assume we have them and request the parents from us.
3719 MakeAndPushMessage(pfrom, NetMsgType::NOTFOUND, vNotFound);
3720 }
3721}
3722
3723void PeerManagerImpl::SendBlockTransactions(
3724 CNode &pfrom, Peer &peer, const CBlock &block,
3725 const BlockTransactionsRequest &req) {
3726 BlockTransactions resp(req);
3727 for (size_t i = 0; i < req.indices.size(); i++) {
3728 if (req.indices[i] >= block.vtx.size()) {
3729 Misbehaving(peer, "getblocktxn with out-of-bounds tx indices");
3730 return;
3731 }
3732 resp.txn[i] = block.vtx[req.indices[i]];
3733 }
3734 LOCK(cs_main);
3735 MakeAndPushMessage(pfrom, NetMsgType::BLOCKTXN, resp);
3736}
3737
3738bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader> &headers,
3739 const Consensus::Params &consensusParams,
3740 Peer &peer) {
3741 // Do these headers have proof-of-work matching what's claimed?
3742 if (!HasValidProofOfWork(headers, consensusParams)) {
3743 Misbehaving(peer, "header with invalid proof of work");
3744 return false;
3745 }
3746
3747 // Are these headers connected to each other?
3748 if (!CheckHeadersAreContinuous(headers)) {
3749 Misbehaving(peer, "non-continuous headers sequence");
3750 return false;
3751 }
3752 return true;
3753}
3754
3755arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold() {
3756 arith_uint256 near_chaintip_work = 0;
3757 LOCK(cs_main);
3758 if (m_chainman.ActiveChain().Tip() != nullptr) {
3759 const CBlockIndex *tip = m_chainman.ActiveChain().Tip();
3760 // Use a 144 block buffer, so that we'll accept headers that fork from
3761 // near our tip.
3762 near_chaintip_work =
3763 tip->nChainWork -
3764 std::min<arith_uint256>(144 * GetBlockProof(*tip), tip->nChainWork);
3765 }
3766 return std::max(near_chaintip_work, m_chainman.MinimumChainWork());
3767}
3768
3775void PeerManagerImpl::HandleUnconnectingHeaders(
3776 CNode &pfrom, Peer &peer, const std::vector<CBlockHeader> &headers) {
3777 // Try to fill in the missing headers.
3778 const CBlockIndex *best_header{
3779 WITH_LOCK(cs_main, return m_chainman.m_best_header)};
3780 if (MaybeSendGetHeaders(pfrom, GetLocator(best_header), peer)) {
3781 LogPrint(
3782 BCLog::NET,
3783 "received header %s: missing prev block %s, sending getheaders "
3784 "(%d) to end (peer=%d)\n",
3785 headers[0].GetHash().ToString(),
3786 headers[0].hashPrevBlock.ToString(), best_header->nHeight,
3787 pfrom.GetId());
3788 }
3789
3790 // Set hashLastUnknownBlock for this peer, so that if we
3791 // eventually get the headers - even from a different peer -
3792 // we can use this peer to download.
3794 UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()));
3795}
3796
3797bool PeerManagerImpl::CheckHeadersAreContinuous(
3798 const std::vector<CBlockHeader> &headers) const {
3799 BlockHash hashLastBlock;
3800 for (const CBlockHeader &header : headers) {
3801 if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
3802 return false;
3803 }
3804 hashLastBlock = header.GetHash();
3805 }
3806 return true;
3807}
3808
3809bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(
3810 Peer &peer, CNode &pfrom, std::vector<CBlockHeader> &headers) {
3811 if (peer.m_headers_sync) {
3812 auto result = peer.m_headers_sync->ProcessNextHeaders(
3813 headers, headers.size() == MAX_HEADERS_RESULTS);
3814 // If it is a valid continuation, we should treat the existing
3815 // getheaders request as responded to.
3816 if (result.success) {
3817 peer.m_last_getheaders_timestamp = {};
3818 }
3819 if (result.request_more) {
3820 auto locator = peer.m_headers_sync->NextHeadersRequestLocator();
3821 // If we were instructed to ask for a locator, it should not be
3822 // empty.
3823 Assume(!locator.vHave.empty());
3824 // We can only be instructed to request more if processing was
3825 // successful.
3826 Assume(result.success);
3827 if (!locator.vHave.empty()) {
3828 // It should be impossible for the getheaders request to fail,
3829 // because we just cleared the last getheaders timestamp.
3830 bool sent_getheaders =
3831 MaybeSendGetHeaders(pfrom, locator, peer);
3832 Assume(sent_getheaders);
3833 LogPrint(BCLog::NET, "more getheaders (from %s) to peer=%d\n",
3834 locator.vHave.front().ToString(), pfrom.GetId());
3835 }
3836 }
3837
3838 if (peer.m_headers_sync->GetState() == HeadersSyncState::State::FINAL) {
3839 peer.m_headers_sync.reset(nullptr);
3840
3841 // Delete this peer's entry in m_headers_presync_stats.
3842 // If this is m_headers_presync_bestpeer, it will be replaced later
3843 // by the next peer that triggers the else{} branch below.
3844 LOCK(m_headers_presync_mutex);
3845 m_headers_presync_stats.erase(pfrom.GetId());
3846 } else {
3847 // Build statistics for this peer's sync.
3848 HeadersPresyncStats stats;
3849 stats.first = peer.m_headers_sync->GetPresyncWork();
3850 if (peer.m_headers_sync->GetState() ==
3852 stats.second = {peer.m_headers_sync->GetPresyncHeight(),
3853 peer.m_headers_sync->GetPresyncTime()};
3854 }
3855
3856 // Update statistics in stats.
3857 LOCK(m_headers_presync_mutex);
3858 m_headers_presync_stats[pfrom.GetId()] = stats;
3859 auto best_it =
3860 m_headers_presync_stats.find(m_headers_presync_bestpeer);
3861 bool best_updated = false;
3862 if (best_it == m_headers_presync_stats.end()) {
3863 // If the cached best peer is outdated, iterate over all
3864 // remaining ones (including newly updated one) to find the best
3865 // one.
3866 NodeId peer_best{-1};
3867 const HeadersPresyncStats *stat_best{nullptr};
3868 for (const auto &[_peer, _stat] : m_headers_presync_stats) {
3869 if (!stat_best || _stat > *stat_best) {
3870 peer_best = _peer;
3871 stat_best = &_stat;
3872 }
3873 }
3874 m_headers_presync_bestpeer = peer_best;
3875 best_updated = (peer_best == pfrom.GetId());
3876 } else if (best_it->first == pfrom.GetId() ||
3877 stats > best_it->second) {
3878 // pfrom was and remains the best peer, or pfrom just became
3879 // best.
3880 m_headers_presync_bestpeer = pfrom.GetId();
3881 best_updated = true;
3882 }
3883 if (best_updated && stats.second.has_value()) {
3884 // If the best peer updated, and it is in its first phase,
3885 // signal.
3886 m_headers_presync_should_signal = true;
3887 }
3888 }
3889
3890 if (result.success) {
3891 // We only overwrite the headers passed in if processing was
3892 // successful.
3893 headers.swap(result.pow_validated_headers);
3894 }
3895
3896 return result.success;
3897 }
3898 // Either we didn't have a sync in progress, or something went wrong
3899 // processing these headers, or we are returning headers to the caller to
3900 // process.
3901 return false;
3902}
3903
3904bool PeerManagerImpl::TryLowWorkHeadersSync(
3905 Peer &peer, CNode &pfrom, const CBlockIndex *chain_start_header,
3906 std::vector<CBlockHeader> &headers) {
3907 // Calculate the total work on this chain.
3908 arith_uint256 total_work =
3909 chain_start_header->nChainWork + CalculateHeadersWork(headers);
3910
3911 // Our dynamic anti-DoS threshold (minimum work required on a headers chain
3912 // before we'll store it)
3913 arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold();
3914
3915 // Avoid DoS via low-difficulty-headers by only processing if the headers
3916 // are part of a chain with sufficient work.
3917 if (total_work < minimum_chain_work) {
3918 // Only try to sync with this peer if their headers message was full;
3919 // otherwise they don't have more headers after this so no point in
3920 // trying to sync their too-little-work chain.
3921 if (headers.size() == MAX_HEADERS_RESULTS) {
3922 // Note: we could advance to the last header in this set that is
3923 // known to us, rather than starting at the first header (which we
3924 // may already have); however this is unlikely to matter much since
3925 // ProcessHeadersMessage() already handles the case where all
3926 // headers in a received message are already known and are
3927 // ancestors of m_best_header or chainActive.Tip(), by skipping
3928 // this logic in that case. So even if the first header in this set
3929 // of headers is known, some header in this set must be new, so
3930 // advancing to the first unknown header would be a small effect.
3931 LOCK(peer.m_headers_sync_mutex);
3932 peer.m_headers_sync.reset(
3933 new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(),
3934 chain_start_header, minimum_chain_work));
3935
3936 // Now a HeadersSyncState object for tracking this synchronization
3937 // is created, process the headers using it as normal. Failures are
3938 // handled inside of IsContinuationOfLowWorkHeadersSync.
3939 (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
3940 } else {
3942 "Ignoring low-work chain (height=%u) from peer=%d\n",
3943 chain_start_header->nHeight + headers.size(),
3944 pfrom.GetId());
3945 }
3946 // The peer has not yet given us a chain that meets our work threshold,
3947 // so we want to prevent further processing of the headers in any case.
3948 headers = {};
3949 return true;
3950 }
3951
3952 return false;
3953}
3954
3955bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(const CBlockIndex *header) {
3956 return header != nullptr &&
3957 ((m_chainman.m_best_header != nullptr &&
3958 header ==
3959 m_chainman.m_best_header->GetAncestor(header->nHeight)) ||
3960 m_chainman.ActiveChain().Contains(header));
3961}
3962
3963bool PeerManagerImpl::MaybeSendGetHeaders(CNode &pfrom,
3964 const CBlockLocator &locator,
3965 Peer &peer) {
3966 const auto current_time = NodeClock::now();
3967
3968 // Only allow a new getheaders message to go out if we don't have a recent
3969 // one already in-flight
3970 if (current_time - peer.m_last_getheaders_timestamp >
3972 MakeAndPushMessage(pfrom, NetMsgType::GETHEADERS, locator, uint256());
3973 peer.m_last_getheaders_timestamp = current_time;
3974 return true;
3975 }
3976 return false;
3977}
3978
3985void PeerManagerImpl::HeadersDirectFetchBlocks(const Config &config,
3986 CNode &pfrom,
3987 const CBlockIndex &last_header) {
3988 LOCK(cs_main);
3989 CNodeState *nodestate = State(pfrom.GetId());
3990
3991 if (CanDirectFetch() && last_header.IsValid(BlockValidity::TREE) &&
3992 m_chainman.ActiveChain().Tip()->nChainWork <= last_header.nChainWork) {
3993 std::vector<const CBlockIndex *> vToFetch;
3994 const CBlockIndex *pindexWalk{&last_header};
3995 // Calculate all the blocks we'd need to switch to last_header, up to
3996 // a limit.
3997 while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) &&
3998 vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
3999 if (!pindexWalk->nStatus.hasData() &&
4000 !IsBlockRequested(pindexWalk->GetBlockHash())) {
4001 // We don't have this block, and it's not yet in flight.
4002 vToFetch.push_back(pindexWalk);
4003 }
4004 pindexWalk = pindexWalk->pprev;
4005 }
4006 // If pindexWalk still isn't on our main chain, we're looking at a
4007 // very large reorg at a time we think we're close to caught up to
4008 // the main chain -- this shouldn't really happen. Bail out on the
4009 // direct fetch and rely on parallel download instead.
4010 if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
4011 LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
4012 last_header.GetBlockHash().ToString(),
4013 last_header.nHeight);
4014 } else {
4015 std::vector<CInv> vGetData;
4016 // Download as much as possible, from earliest to latest.
4017 for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
4018 if (nodestate->vBlocksInFlight.size() >=
4020 // Can't download any more from this peer
4021 break;
4022 }
4023 vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
4024 BlockRequested(config, pfrom.GetId(), *pindex);
4025 LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
4026 pindex->GetBlockHash().ToString(), pfrom.GetId());
4027 }
4028 if (vGetData.size() > 1) {
4030 "Downloading blocks toward %s (%d) via headers "
4031 "direct fetch\n",
4032 last_header.GetBlockHash().ToString(),
4033 last_header.nHeight);
4034 }
4035 if (vGetData.size() > 0) {
4036 if (!m_opts.ignore_incoming_txs &&
4037 nodestate->m_provides_cmpctblocks && vGetData.size() == 1 &&
4038 mapBlocksInFlight.size() == 1 &&
4039 last_header.pprev->IsValid(BlockValidity::CHAIN)) {
4040 // In any case, we want to download using a compact
4041 // block, not a regular one.
4042 vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
4043 }
4044 MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vGetData);
4045 }
4046 }
4047 }
4048}
4049
4055void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(
4056 CNode &pfrom, Peer &peer, const CBlockIndex &last_header,
4057 bool received_new_header, bool may_have_more_headers) {
4058 LOCK(cs_main);
4059
4060 CNodeState *nodestate = State(pfrom.GetId());
4061
4062 UpdateBlockAvailability(pfrom.GetId(), last_header.GetBlockHash());
4063
4064 // From here, pindexBestKnownBlock should be guaranteed to be non-null,
4065 // because it is set in UpdateBlockAvailability. Some nullptr checks are
4066 // still present, however, as belt-and-suspenders.
4067
4068 if (received_new_header &&
4069 last_header.nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
4070 nodestate->m_last_block_announcement = GetTime();
4071 }
4072
4073 // If we're in IBD, we want outbound peers that will serve us a useful
4074 // chain. Disconnect peers that are on chains with insufficient work.
4075 if (m_chainman.IsInitialBlockDownload() && !may_have_more_headers) {
4076 // When nCount < MAX_HEADERS_RESULTS, we know we have no more
4077 // headers to fetch from this peer.
4078 if (nodestate->pindexBestKnownBlock &&
4079 nodestate->pindexBestKnownBlock->nChainWork <
4080 m_chainman.MinimumChainWork()) {
4081 // This peer has too little work on their headers chain to help
4082 // us sync -- disconnect if it is an outbound disconnection
4083 // candidate.
4084 // Note: We compare their tip to the minimum chain work (rather than
4085 // m_chainman.ActiveChain().Tip()) because we won't start block
4086 // download until we have a headers chain that has at least
4087 // the minimum chain work, even if a peer has a chain past our tip,
4088 // as an anti-DoS measure.
4089 if (pfrom.IsOutboundOrBlockRelayConn()) {
4090 LogPrintf("Disconnecting outbound peer %d -- headers "
4091 "chain has insufficient work\n",
4092 pfrom.GetId());
4093 pfrom.fDisconnect = true;
4094 }
4095 }
4096 }
4097
4098 // If this is an outbound full-relay peer, check to see if we should
4099 // protect it from the bad/lagging chain logic.
4100 // Note that outbound block-relay peers are excluded from this
4101 // protection, and thus always subject to eviction under the bad/lagging
4102 // chain logic.
4103 // See ChainSyncTimeoutState.
4104 if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() &&
4105 nodestate->pindexBestKnownBlock != nullptr) {
4106 if (m_outbound_peers_with_protect_from_disconnect <
4108 nodestate->pindexBestKnownBlock->nChainWork >=
4109 m_chainman.ActiveChain().Tip()->nChainWork &&
4110 !nodestate->m_chain_sync.m_protect) {
4111 LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n",
4112 pfrom.GetId());
4113 nodestate->m_chain_sync.m_protect = true;
4114 ++m_outbound_peers_with_protect_from_disconnect;
4115 }
4116 }
4117}
4118
4119void PeerManagerImpl::ProcessHeadersMessage(const Config &config, CNode &pfrom,
4120 Peer &peer,
4121 std::vector<CBlockHeader> &&headers,
4122 bool via_compact_block) {
4123 size_t nCount = headers.size();
4124
4125 if (nCount == 0) {
4126 // Nothing interesting. Stop asking this peers for more headers.
4127 // If we were in the middle of headers sync, receiving an empty headers
4128 // message suggests that the peer suddenly has nothing to give us
4129 // (perhaps it reorged to our chain). Clear download state for this
4130 // peer.
4131 LOCK(peer.m_headers_sync_mutex);
4132 if (peer.m_headers_sync) {
4133 peer.m_headers_sync.reset(nullptr);
4134 LOCK(m_headers_presync_mutex);
4135 m_headers_presync_stats.erase(pfrom.GetId());
4136 }
4137 // A headers message with no headers cannot be an announcement, so
4138 // assume it is a response to our last getheaders request, if there is
4139 // one.
4140 peer.m_last_getheaders_timestamp = {};
4141 return;
4142 }
4143
4144 // Before we do any processing, make sure these pass basic sanity checks.
4145 // We'll rely on headers having valid proof-of-work further down, as an
4146 // anti-DoS criteria (note: this check is required before passing any
4147 // headers into HeadersSyncState).
4148 if (!CheckHeadersPoW(headers, m_chainparams.GetConsensus(), peer)) {
4149 // Misbehaving() calls are handled within CheckHeadersPoW(), so we can
4150 // just return. (Note that even if a header is announced via compact
4151 // block, the header itself should be valid, so this type of error can
4152 // always be punished.)
4153 return;
4154 }
4155
4156 const CBlockIndex *pindexLast = nullptr;
4157
4158 // We'll set already_validated_work to true if these headers are
4159 // successfully processed as part of a low-work headers sync in progress
4160 // (either in PRESYNC or REDOWNLOAD phase).
4161 // If true, this will mean that any headers returned to us (ie during
4162 // REDOWNLOAD) can be validated without further anti-DoS checks.
4163 bool already_validated_work = false;
4164
4165 // If we're in the middle of headers sync, let it do its magic.
4166 bool have_headers_sync = false;
4167 {
4168 LOCK(peer.m_headers_sync_mutex);
4169
4170 already_validated_work =
4171 IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
4172
4173 // The headers we passed in may have been:
4174 // - untouched, perhaps if no headers-sync was in progress, or some
4175 // failure occurred
4176 // - erased, such as if the headers were successfully processed and no
4177 // additional headers processing needs to take place (such as if we
4178 // are still in PRESYNC)
4179 // - replaced with headers that are now ready for validation, such as
4180 // during the REDOWNLOAD phase of a low-work headers sync.
4181 // So just check whether we still have headers that we need to process,
4182 // or not.
4183 if (headers.empty()) {
4184 return;
4185 }
4186
4187 have_headers_sync = !!peer.m_headers_sync;
4188 }
4189
4190 // Do these headers connect to something in our block index?
4191 const CBlockIndex *chain_start_header{
4193 headers[0].hashPrevBlock))};
4194 bool headers_connect_blockindex{chain_start_header != nullptr};
4195
4196 if (!headers_connect_blockindex) {
4197 // This could be a BIP 130 block announcement, use
4198 // special logic for handling headers that don't connect, as this
4199 // could be benign.
4200 HandleUnconnectingHeaders(pfrom, peer, headers);
4201 return;
4202 }
4203
4204 // If headers connect, assume that this is in response to any outstanding
4205 // getheaders request we may have sent, and clear out the time of our last
4206 // request. Non-connecting headers cannot be a response to a getheaders
4207 // request.
4208 peer.m_last_getheaders_timestamp = {};
4209
4210 // If the headers we received are already in memory and an ancestor of
4211 // m_best_header or our tip, skip anti-DoS checks. These headers will not
4212 // use any more memory (and we are not leaking information that could be
4213 // used to fingerprint us).
4214 const CBlockIndex *last_received_header{nullptr};
4215 {
4216 LOCK(cs_main);
4217 last_received_header =
4218 m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash());
4219 if (IsAncestorOfBestHeaderOrTip(last_received_header)) {
4220 already_validated_work = true;
4221 }
4222 }
4223
4224 // If our peer has NetPermissionFlags::NoBan privileges, then bypass our
4225 // anti-DoS logic (this saves bandwidth when we connect to a trusted peer
4226 // on startup).
4228 already_validated_work = true;
4229 }
4230
4231 // At this point, the headers connect to something in our block index.
4232 // Do anti-DoS checks to determine if we should process or store for later
4233 // processing.
4234 if (!already_validated_work &&
4235 TryLowWorkHeadersSync(peer, pfrom, chain_start_header, headers)) {
4236 // If we successfully started a low-work headers sync, then there
4237 // should be no headers to process any further.
4238 Assume(headers.empty());
4239 return;
4240 }
4241
4242 // At this point, we have a set of headers with sufficient work on them
4243 // which can be processed.
4244
4245 // If we don't have the last header, then this peer will have given us
4246 // something new (if these headers are valid).
4247 bool received_new_header{last_received_header == nullptr};
4248
4249 // Now process all the headers.
4251 if (!m_chainman.ProcessNewBlockHeaders(headers, /*min_pow_checked=*/true,
4252 state, &pindexLast)) {
4253 if (state.IsInvalid()) {
4254 MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block,
4255 "invalid header received");
4256 return;
4257 }
4258 }
4259 assert(pindexLast);
4260
4261 // Consider fetching more headers if we are not using our headers-sync
4262 // mechanism.
4263 if (nCount == MAX_HEADERS_RESULTS && !have_headers_sync) {
4264 // Headers message had its maximum size; the peer may have more headers.
4265 if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) {
4266 LogPrint(
4267 BCLog::NET,
4268 "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
4269 pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
4270 }
4271 }
4272
4273 UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast,
4274 received_new_header,
4275 nCount == MAX_HEADERS_RESULTS);
4276
4277 // Consider immediately downloading blocks.
4278 HeadersDirectFetchBlocks(config, pfrom, *pindexLast);
4279}
4280
4281void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid,
4282 const CTransactionRef &ptx,
4283 const TxValidationState &state,
4284 bool maybe_add_extra_compact_tx) {
4285 AssertLockNotHeld(m_peer_mutex);
4286 AssertLockHeld(g_msgproc_mutex);
4288
4289 const TxId &txid = ptx->GetId();
4290
4291 LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n",
4292 txid.ToString(), nodeid, state.ToString());
4293
4295 return;
4296 }
4297
4298 if (m_avalanche &&
4299 m_avalanche->isPreconsensusActivated(m_chainman.ActiveTip()) &&
4301 return;
4302 }
4303
4305 // If the result is TX_PACKAGE_RECONSIDERABLE, add it to
4306 // m_recent_rejects_package_reconsiderable because we should not
4307 // download or submit this transaction by itself again, but may submit
4308 // it as part of a package later.
4309 m_recent_rejects_package_reconsiderable.insert(txid);
4310 } else {
4311 m_recent_rejects.insert(txid);
4312 }
4313 m_txrequest.ForgetInvId(txid);
4314
4315 if (maybe_add_extra_compact_tx && RecursiveDynamicUsage(*ptx) < 100000) {
4316 AddToCompactExtraTransactions(ptx);
4317 }
4318
4319 MaybePunishNodeForTx(nodeid, state);
4320
4321 // If the tx failed in ProcessOrphanTx, it should be removed from the
4322 // orphanage unless the tx was still missing inputs. If the tx was not in
4323 // the orphanage, EraseTx does nothing and returns 0.
4324 if (m_mempool.withOrphanage([&txid](TxOrphanage &orphanage) {
4325 return orphanage.EraseTx(txid);
4326 }) > 0) {
4327 LogPrint(BCLog::TXPACKAGES, " removed orphan tx %s\n",
4328 txid.ToString());
4329 }
4330}
4331
4332void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef &tx) {
4333 AssertLockNotHeld(m_peer_mutex);
4334 AssertLockHeld(g_msgproc_mutex);
4336
4337 // As this version of the transaction was acceptable, we can forget about
4338 // any requests for it. No-op if the tx is not in txrequest.
4339 m_txrequest.ForgetInvId(tx->GetId());
4340
4341 m_mempool.withOrphanage([&tx](TxOrphanage &orphanage) {
4342 orphanage.AddChildrenToWorkSet(*tx);
4343 // If it came from the orphanage, remove it. No-op if the tx is not in
4344 // txorphanage.
4345 orphanage.EraseTx(tx->GetId());
4346 });
4347
4348 LogPrint(
4350 "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
4351 nodeid, tx->GetId().ToString(), m_mempool.size(),
4352 m_mempool.DynamicMemoryUsage() / 1000);
4353
4354 RelayTransaction(tx->GetId());
4355}
4356
4357void PeerManagerImpl::ProcessPackageResult(
4358 const PackageToValidate &package_to_validate,
4359 const PackageMempoolAcceptResult &package_result) {
4360 AssertLockNotHeld(m_peer_mutex);
4361 AssertLockHeld(g_msgproc_mutex);
4363
4364 const auto &package = package_to_validate.m_txns;
4365 const auto &senders = package_to_validate.m_senders;
4366
4367 if (package_result.m_state.IsInvalid()) {
4368 m_recent_rejects_package_reconsiderable.insert(GetPackageHash(package));
4369 }
4370 // We currently only expect to process 1-parent-1-child packages. Remove if
4371 // this changes.
4372 if (!Assume(package.size() == 2)) {
4373 return;
4374 }
4375
4376 // Iterate backwards to erase in-package descendants from the orphanage
4377 // before they become relevant in AddChildrenToWorkSet.
4378 auto package_iter = package.rbegin();
4379 auto senders_iter = senders.rbegin();
4380 while (package_iter != package.rend()) {
4381 const auto &tx = *package_iter;
4382 const NodeId nodeid = *senders_iter;
4383 const auto it_result{package_result.m_tx_results.find(tx->GetId())};
4384
4385 // It is not guaranteed that a result exists for every transaction.
4386 if (it_result != package_result.m_tx_results.end()) {
4387 const auto &tx_result = it_result->second;
4388 switch (tx_result.m_result_type) {
4390 ProcessValidTx(nodeid, tx);
4391 break;
4392 }
4394 // Don't add to vExtraTxnForCompact, as these transactions
4395 // should have already been added there when added to the
4396 // orphanage or rejected for TX_PACKAGE_RECONSIDERABLE.
4397 // This should be updated if package submission is ever used
4398 // for transactions that haven't already been validated
4399 // before.
4400 ProcessInvalidTx(nodeid, tx, tx_result.m_state,
4401 /*maybe_add_extra_compact_tx=*/false);
4402 break;
4403 }
4405 // AlreadyHaveTx() should be catching transactions that are
4406 // already in mempool.
4407 Assume(false);
4408 break;
4409 }
4410 }
4411 }
4412 package_iter++;
4413 senders_iter++;
4414 }
4415}
4416
4417std::optional<PeerManagerImpl::PackageToValidate>
4418PeerManagerImpl::Find1P1CPackage(const CTransactionRef &ptx, NodeId nodeid) {
4419 AssertLockNotHeld(m_peer_mutex);
4420 AssertLockHeld(g_msgproc_mutex);
4422
4423 const auto &parent_txid{ptx->GetId()};
4424
4425 Assume(m_recent_rejects_package_reconsiderable.contains(parent_txid));
4426
4427 // Prefer children from this peer. This helps prevent censorship attempts in
4428 // which an attacker sends lots of fake children for the parent, and we
4429 // (unluckily) keep selecting the fake children instead of the real one
4430 // provided by the honest peer.
4431 const auto cpfp_candidates_same_peer{
4432 m_mempool.withOrphanage([&ptx, nodeid](const TxOrphanage &orphanage) {
4433 return orphanage.GetChildrenFromSamePeer(ptx, nodeid);
4434 })};
4435
4436 // These children should be sorted from newest to oldest.
4437 for (const auto &child : cpfp_candidates_same_peer) {
4438 Package maybe_cpfp_package{ptx, child};
4439 if (!m_recent_rejects_package_reconsiderable.contains(
4440 GetPackageHash(maybe_cpfp_package))) {
4441 return PeerManagerImpl::PackageToValidate{ptx, child, nodeid,
4442 nodeid};
4443 }
4444 }
4445
4446 // If no suitable candidate from the same peer is found, also try children
4447 // that were provided by a different peer. This is useful because sometimes
4448 // multiple peers announce both transactions to us, and we happen to
4449 // download them from different peers (we wouldn't have known that these 2
4450 // transactions are related). We still want to find 1p1c packages then.
4451 //
4452 // If we start tracking all announcers of orphans, we can restrict this
4453 // logic to parent + child pairs in which both were provided by the same
4454 // peer, i.e. delete this step.
4455 const auto cpfp_candidates_different_peer{
4456 m_mempool.withOrphanage([&ptx, nodeid](const TxOrphanage &orphanage) {
4457 return orphanage.GetChildrenFromDifferentPeer(ptx, nodeid);
4458 })};
4459
4460 // Find the first 1p1c that hasn't already been rejected. We randomize the
4461 // order to not create a bias that attackers can use to delay package
4462 // acceptance.
4463 //
4464 // Create a random permutation of the indices.
4465 std::vector<size_t> tx_indices(cpfp_candidates_different_peer.size());
4466 std::iota(tx_indices.begin(), tx_indices.end(), 0);
4467 Shuffle(tx_indices.begin(), tx_indices.end(), m_rng);
4468
4469 for (const auto index : tx_indices) {
4470 // If we already tried a package and failed for any reason, the combined
4471 // hash was cached in m_recent_rejects_package_reconsiderable.
4472 const auto [child_tx, child_sender] =
4473 cpfp_candidates_different_peer.at(index);
4474 Package maybe_cpfp_package{ptx, child_tx};
4475 if (!m_recent_rejects_package_reconsiderable.contains(
4476 GetPackageHash(maybe_cpfp_package))) {
4477 return PeerManagerImpl::PackageToValidate{ptx, child_tx, nodeid,
4478 child_sender};
4479 }
4480 }
4481 return std::nullopt;
4482}
4483
4484bool PeerManagerImpl::ProcessOrphanTx(const Config &config, Peer &peer) {
4485 AssertLockHeld(g_msgproc_mutex);
4486 LOCK(cs_main);
4487
4488 while (CTransactionRef porphanTx =
4489 m_mempool.withOrphanage([&peer](TxOrphanage &orphanage) {
4490 return orphanage.GetTxToReconsider(peer.m_id);
4491 })) {
4492 const MempoolAcceptResult result =
4493 m_chainman.ProcessTransaction(porphanTx);
4494 const TxValidationState &state = result.m_state;
4495 const TxId &orphanTxId = porphanTx->GetId();
4496
4498 LogPrint(BCLog::TXPACKAGES, " accepted orphan tx %s\n",
4499 orphanTxId.ToString());
4500 ProcessValidTx(peer.m_id, porphanTx);
4501 return true;
4502 }
4503
4506 " invalid orphan tx %s from peer=%d. %s\n",
4507 orphanTxId.ToString(), peer.m_id, state.ToString());
4508
4509 if (Assume(state.IsInvalid() &&
4511 state.GetResult() !=
4513 ProcessInvalidTx(peer.m_id, porphanTx, state,
4514 /*maybe_add_extra_compact_tx=*/false);
4515 }
4516
4517 return true;
4518 }
4519 }
4520
4521 return false;
4522}
4523
4524bool PeerManagerImpl::PrepareBlockFilterRequest(
4525 CNode &node, Peer &peer, BlockFilterType filter_type, uint32_t start_height,
4526 const BlockHash &stop_hash, uint32_t max_height_diff,
4527 const CBlockIndex *&stop_index, BlockFilterIndex *&filter_index) {
4528 const bool supported_filter_type =
4529 (filter_type == BlockFilterType::BASIC &&
4530 (peer.m_our_services & NODE_COMPACT_FILTERS));
4531 if (!supported_filter_type) {
4533 "peer %d requested unsupported block filter type: %d\n",
4534 node.GetId(), static_cast<uint8_t>(filter_type));
4535 node.fDisconnect = true;
4536 return false;
4537 }
4538
4539 {
4540 LOCK(cs_main);
4541 stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash);
4542
4543 // Check that the stop block exists and the peer would be allowed to
4544 // fetch it.
4545 if (!stop_index || !BlockRequestAllowed(stop_index)) {
4546 LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
4547 node.GetId(), stop_hash.ToString());
4548 node.fDisconnect = true;
4549 return false;
4550 }
4551 }
4552
4553 uint32_t stop_height = stop_index->nHeight;
4554 if (start_height > stop_height) {
4555 LogPrint(
4556 BCLog::NET,
4557 "peer %d sent invalid getcfilters/getcfheaders with " /* Continued
4558 */
4559 "start height %d and stop height %d\n",
4560 node.GetId(), start_height, stop_height);
4561 node.fDisconnect = true;
4562 return false;
4563 }
4564 if (stop_height - start_height >= max_height_diff) {
4566 "peer %d requested too many cfilters/cfheaders: %d / %d\n",
4567 node.GetId(), stop_height - start_height + 1, max_height_diff);
4568 node.fDisconnect = true;
4569 return false;
4570 }
4571
4572 filter_index = GetBlockFilterIndex(filter_type);
4573 if (!filter_index) {
4574 LogPrint(BCLog::NET, "Filter index for supported type %s not found\n",
4575 BlockFilterTypeName(filter_type));
4576 return false;
4577 }
4578
4579 return true;
4580}
4581
4582void PeerManagerImpl::ProcessGetCFilters(CNode &node, Peer &peer,
4583 DataStream &vRecv) {
4584 uint8_t filter_type_ser;
4585 uint32_t start_height;
4586 BlockHash stop_hash;
4587
4588 vRecv >> filter_type_ser >> start_height >> stop_hash;
4589
4590 const BlockFilterType filter_type =
4591 static_cast<BlockFilterType>(filter_type_ser);
4592
4593 const CBlockIndex *stop_index;
4594 BlockFilterIndex *filter_index;
4595 if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height,
4596 stop_hash, MAX_GETCFILTERS_SIZE, stop_index,
4597 filter_index)) {
4598 return;
4599 }
4600
4601 std::vector<BlockFilter> filters;
4602 if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
4604 "Failed to find block filter in index: filter_type=%s, "
4605 "start_height=%d, stop_hash=%s\n",
4606 BlockFilterTypeName(filter_type), start_height,
4607 stop_hash.ToString());
4608 return;
4609 }
4610
4611 for (const auto &filter : filters) {
4612 MakeAndPushMessage(node, NetMsgType::CFILTER, filter);
4613 }
4614}
4615
4616void PeerManagerImpl::ProcessGetCFHeaders(CNode &node, Peer &peer,
4617 DataStream &vRecv) {
4618 uint8_t filter_type_ser;
4619 uint32_t start_height;
4620 BlockHash stop_hash;
4621
4622 vRecv >> filter_type_ser >> start_height >> stop_hash;
4623
4624 const BlockFilterType filter_type =
4625 static_cast<BlockFilterType>(filter_type_ser);
4626
4627 const CBlockIndex *stop_index;
4628 BlockFilterIndex *filter_index;
4629 if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height,
4630 stop_hash, MAX_GETCFHEADERS_SIZE, stop_index,
4631 filter_index)) {
4632 return;
4633 }
4634
4635 uint256 prev_header;
4636 if (start_height > 0) {
4637 const CBlockIndex *const prev_block =
4638 stop_index->GetAncestor(static_cast<int>(start_height - 1));
4639 if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
4641 "Failed to find block filter header in index: "
4642 "filter_type=%s, block_hash=%s\n",
4643 BlockFilterTypeName(filter_type),
4644 prev_block->GetBlockHash().ToString());
4645 return;
4646 }
4647 }
4648
4649 std::vector<uint256> filter_hashes;
4650 if (!filter_index->LookupFilterHashRange(start_height, stop_index,
4651 filter_hashes)) {
4653 "Failed to find block filter hashes in index: filter_type=%s, "
4654 "start_height=%d, stop_hash=%s\n",
4655 BlockFilterTypeName(filter_type), start_height,
4656 stop_hash.ToString());
4657 return;
4658 }
4659
4660 MakeAndPushMessage(node, NetMsgType::CFHEADERS, filter_type_ser,
4661 stop_index->GetBlockHash(), prev_header, filter_hashes);
4662}
4663
4664void PeerManagerImpl::ProcessGetCFCheckPt(CNode &node, Peer &peer,
4665 DataStream &vRecv) {
4666 uint8_t filter_type_ser;
4667 BlockHash stop_hash;
4668
4669 vRecv >> filter_type_ser >> stop_hash;
4670
4671 const BlockFilterType filter_type =
4672 static_cast<BlockFilterType>(filter_type_ser);
4673
4674 const CBlockIndex *stop_index;
4675 BlockFilterIndex *filter_index;
4676 if (!PrepareBlockFilterRequest(
4677 node, peer, filter_type, /*start_height=*/0, stop_hash,
4678 /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
4679 stop_index, filter_index)) {
4680 return;
4681 }
4682
4683 std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
4684
4685 // Populate headers.
4686 const CBlockIndex *block_index = stop_index;
4687 for (int i = headers.size() - 1; i >= 0; i--) {
4688 int height = (i + 1) * CFCHECKPT_INTERVAL;
4689 block_index = block_index->GetAncestor(height);
4690
4691 if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
4693 "Failed to find block filter header in index: "
4694 "filter_type=%s, block_hash=%s\n",
4695 BlockFilterTypeName(filter_type),
4696 block_index->GetBlockHash().ToString());
4697 return;
4698 }
4699 }
4700
4701 MakeAndPushMessage(node, NetMsgType::CFCHECKPT, filter_type_ser,
4702 stop_index->GetBlockHash(), headers);
4703}
4704
4705bool IsAvalancheMessageType(const std::string &msg_type) {
4706 return msg_type == NetMsgType::AVAHELLO ||
4707 msg_type == NetMsgType::AVAPOLL ||
4708 msg_type == NetMsgType::AVARESPONSE ||
4709 msg_type == NetMsgType::AVAPROOF ||
4710 msg_type == NetMsgType::GETAVAADDR ||
4711 msg_type == NetMsgType::GETAVAPROOFS ||
4712 msg_type == NetMsgType::AVAPROOFS ||
4713 msg_type == NetMsgType::AVAPROOFSREQ;
4714}
4715
4716uint32_t
4717PeerManagerImpl::GetAvalancheVoteForBlock(const BlockHash &hash) const {
4719
4720 const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
4721
4722 // Unknown block.
4723 if (!pindex) {
4724 return -1;
4725 }
4726
4727 // Invalid block
4728 if (pindex->nStatus.isInvalid()) {
4729 return 1;
4730 }
4731
4732 // Parked block
4733 if (pindex->nStatus.isOnParkedChain()) {
4734 return 2;
4735 }
4736
4737 const CBlockIndex *pindexTip = m_chainman.ActiveChain().Tip();
4738 const CBlockIndex *pindexFork = LastCommonAncestor(pindex, pindexTip);
4739
4740 // Active block.
4741 if (pindex == pindexFork) {
4742 return 0;
4743 }
4744
4745 // Fork block.
4746 if (pindexFork != pindexTip) {
4747 return 3;
4748 }
4749
4750 // Missing block data.
4751 if (!pindex->nStatus.hasData()) {
4752 return -2;
4753 }
4754
4755 // This block is built on top of the tip, we have the data, it
4756 // is pending connection or rejection.
4757 return -3;
4758};
4759
4760uint32_t
4761PeerManagerImpl::GetAvalancheVoteForTx(const avalanche::Processor &avalanche,
4762 const TxId &id) const {
4763 // Recently confirmed
4764 if (WITH_LOCK(m_recent_confirmed_transactions_mutex,
4765 return m_recent_confirmed_transactions.contains(id))) {
4766 return 0;
4767 }
4768
4769 CTransactionRef mempool_tx;
4770 {
4771 LOCK(::cs_main);
4772
4773 // Invalid tx. m_recent_rejects needs cs_main
4774 if (m_recent_rejects.contains(id)) {
4775 return 1;
4776 }
4777
4778 LOCK(m_mempool.cs);
4779
4780 // Finalized
4781 if (m_mempool.isAvalancheFinalizedPreConsensus(id)) {
4782 return 0;
4783 }
4784
4785 // Accepted in mempool
4786 if (auto iter = m_mempool.GetIter(id)) {
4787 mempool_tx = (**iter)->GetSharedTx();
4788 } else {
4789 // Conflicting tx
4790 if (m_mempool.withConflicting(
4791 [&id](const TxConflicting &conflicting) {
4792 return conflicting.HaveTx(id);
4793 })) {
4794 return 2;
4795 }
4796
4797 // Orphan tx
4798 if (m_mempool.withOrphanage([&id](const TxOrphanage &orphanage) {
4799 return orphanage.HaveTx(id);
4800 })) {
4801 return -2;
4802 }
4803 }
4804 } // release cs_main and mempool.cs locks
4805
4806 // isPolled() access the vote records, and should be accessed with cs_main
4807 // released.
4808 // If the tx is in the mempool...
4809 if (mempool_tx) {
4810 // ... and in the polled list
4811 if (avalanche.isPolled(mempool_tx)) {
4812 return 0;
4813 }
4814
4815 // ... but not in the polled list
4816 return -3;
4817 }
4818
4819 // Unknown tx
4820 return -1;
4821};
4822
4830 const avalanche::ProofId &id) {
4831 return avalanche.withPeerManager([&id](avalanche::PeerManager &pm) {
4832 // Rejected proof
4833 if (pm.isInvalid(id)) {
4834 return 1;
4835 }
4836
4837 // The proof is actively bound to a peer
4838 if (pm.isBoundToPeer(id)) {
4839 return 0;
4840 }
4841
4842 // Unknown proof
4843 if (!pm.exists(id)) {
4844 return -1;
4845 }
4846
4847 // Immature proof
4848 if (pm.isImmature(id)) {
4849 return 2;
4850 }
4851
4852 // Not immature, but in conflict with an actively bound proof
4853 if (pm.isInConflictingPool(id)) {
4854 return 3;
4855 }
4856
4857 // The proof is known, not rejected, not immature, not a conflict, but
4858 // for some reason unbound. This should not happen if the above pools
4859 // are managed correctly, but added for robustness.
4860 return -2;
4861 });
4862};
4863
4864void PeerManagerImpl::ProcessBlock(const Config &config, CNode &node,
4865 const std::shared_ptr<const CBlock> &block,
4866 bool force_processing,
4867 bool min_pow_checked) {
4868 bool new_block{false};
4869 m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked,
4870 &new_block, m_avalanche);
4871 if (new_block) {
4872 node.m_last_block_time = GetTime<std::chrono::seconds>();
4873 // In case this block came from a different peer than we requested
4874 // from, we can erase the block request now anyway (as we just stored
4875 // this block to disk).
4876 LOCK(cs_main);
4877 RemoveBlockRequest(block->GetHash(), std::nullopt);
4878 } else {
4879 LOCK(cs_main);
4880 mapBlockSource.erase(block->GetHash());
4881 }
4882}
4883
4884void PeerManagerImpl::ProcessMessage(
4885 const Config &config, CNode &pfrom, const std::string &msg_type,
4886 DataStream &vRecv, const std::chrono::microseconds time_received,
4887 const std::atomic<bool> &interruptMsgProc) {
4888 AssertLockHeld(g_msgproc_mutex);
4889
4890 LogPrint(BCLog::NETDEBUG, "received: %s (%u bytes) peer=%d\n",
4891 SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
4892
4893 PeerRef peer = GetPeerRef(pfrom.GetId());
4894 if (peer == nullptr) {
4895 return;
4896 }
4897
4898 if (!m_avalanche && IsAvalancheMessageType(msg_type)) {
4900 "Avalanche is not initialized, ignoring %s message\n",
4901 msg_type);
4902 return;
4903 }
4904
4905 if (msg_type == NetMsgType::VERSION) {
4906 // Each connection can only send one version message
4907 if (pfrom.nVersion != 0) {
4908 LogPrint(BCLog::NET, "redundant version message from peer=%d\n",
4909 pfrom.GetId());
4910 return;
4911 }
4912
4913 int64_t nTime;
4914 CService addrMe;
4915 uint64_t nNonce = 1;
4916 ServiceFlags nServices;
4917 int nVersion;
4918 std::string cleanSubVer;
4919 int starting_height = -1;
4920 bool fRelay = true;
4921 uint64_t nExtraEntropy = 1;
4922
4923 vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
4924 if (nTime < 0) {
4925 nTime = 0;
4926 }
4927 // Ignore the addrMe service bits sent by the peer
4928 vRecv.ignore(8);
4929 vRecv >> WithParams(CNetAddr::V1, addrMe);
4930 if (!pfrom.IsInboundConn()) {
4931 m_addrman.SetServices(pfrom.addr, nServices);
4932 }
4933 if (pfrom.ExpectServicesFromConn() &&
4934 !HasAllDesirableServiceFlags(nServices)) {
4936 "peer=%d does not offer the expected services "
4937 "(%08x offered, %08x expected); disconnecting\n",
4938 pfrom.GetId(), nServices,
4939 GetDesirableServiceFlags(nServices));
4940 pfrom.fDisconnect = true;
4941 return;
4942 }
4943
4944 if (pfrom.IsAvalancheOutboundConnection() &&
4945 !(nServices & NODE_AVALANCHE)) {
4946 LogPrint(
4948 "peer=%d does not offer the avalanche service; disconnecting\n",
4949 pfrom.GetId());
4950 pfrom.fDisconnect = true;
4951 return;
4952 }
4953
4954 if (nVersion < MIN_PEER_PROTO_VERSION) {
4955 // disconnect from peers older than this proto version
4957 "peer=%d using obsolete version %i; disconnecting\n",
4958 pfrom.GetId(), nVersion);
4959 pfrom.fDisconnect = true;
4960 return;
4961 }
4962
4963 if (!vRecv.empty()) {
4964 // The version message includes information about the sending node
4965 // which we don't use:
4966 // - 8 bytes (service bits)
4967 // - 16 bytes (ipv6 address)
4968 // - 2 bytes (port)
4969 vRecv.ignore(26);
4970 vRecv >> nNonce;
4971 }
4972 if (!vRecv.empty()) {
4973 std::string strSubVer;
4974 vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
4975 cleanSubVer = SanitizeString(strSubVer);
4976 }
4977 if (!vRecv.empty()) {
4978 vRecv >> starting_height;
4979 }
4980 if (!vRecv.empty()) {
4981 vRecv >> fRelay;
4982 }
4983 if (!vRecv.empty()) {
4984 vRecv >> nExtraEntropy;
4985 }
4986 // Disconnect if we connected to ourself
4987 if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce)) {
4988 LogPrintf("connected to self at %s, disconnecting\n",
4989 pfrom.addr.ToStringAddrPort());
4990 pfrom.fDisconnect = true;
4991 return;
4992 }
4993
4994 if (pfrom.IsInboundConn() && addrMe.IsRoutable()) {
4995 SeenLocal(addrMe);
4996 }
4997
4998 // Inbound peers send us their version message when they connect.
4999 // We send our version message in response.
5000 if (pfrom.IsInboundConn()) {
5001 PushNodeVersion(config, pfrom, *peer);
5002 }
5003
5004 // Change version
5005 const int greatest_common_version =
5006 std::min(nVersion, PROTOCOL_VERSION);
5007 pfrom.SetCommonVersion(greatest_common_version);
5008 pfrom.nVersion = nVersion;
5009
5010 MakeAndPushMessage(pfrom, NetMsgType::VERACK);
5011
5012 // Signal ADDRv2 support (BIP155).
5013 MakeAndPushMessage(pfrom, NetMsgType::SENDADDRV2);
5014
5016 HasAllDesirableServiceFlags(nServices);
5017 peer->m_their_services = nServices;
5018 pfrom.SetAddrLocal(addrMe);
5019 {
5020 LOCK(pfrom.m_subver_mutex);
5021 pfrom.cleanSubVer = cleanSubVer;
5022 }
5023 peer->m_starting_height = starting_height;
5024
5025 // Only initialize the m_tx_relay data structure if:
5026 // - this isn't an outbound block-relay-only connection; and
5027 // - this isn't an outbound feeler connection, and
5028 // - fRelay=true or we're offering NODE_BLOOM to this peer
5029 // (NODE_BLOOM means that the peer may turn on tx relay later)
5030 if (!pfrom.IsBlockOnlyConn() && !pfrom.IsFeelerConn() &&
5031 (fRelay || (peer->m_our_services & NODE_BLOOM))) {
5032 auto *const tx_relay = peer->SetTxRelay();
5033 {
5034 LOCK(tx_relay->m_bloom_filter_mutex);
5035 // set to true after we get the first filter* message
5036 tx_relay->m_relay_txs = fRelay;
5037 }
5038 if (fRelay) {
5039 pfrom.m_relays_txs = true;
5040 }
5041 }
5042
5043 pfrom.nRemoteHostNonce = nNonce;
5044 pfrom.nRemoteExtraEntropy = nExtraEntropy;
5045
5046 // Potentially mark this peer as a preferred download peer.
5047 {
5048 LOCK(cs_main);
5049 CNodeState *state = State(pfrom.GetId());
5050 state->fPreferredDownload =
5051 (!pfrom.IsInboundConn() ||
5053 !pfrom.IsAddrFetchConn() && CanServeBlocks(*peer);
5054 m_num_preferred_download_peers += state->fPreferredDownload;
5055 }
5056
5057 // Attempt to initialize address relay for outbound peers and use result
5058 // to decide whether to send GETADDR, so that we don't send it to
5059 // inbound or outbound block-relay-only peers.
5060 bool send_getaddr{false};
5061 if (!pfrom.IsInboundConn()) {
5062 send_getaddr = SetupAddressRelay(pfrom, *peer);
5063 }
5064 if (send_getaddr) {
5065 // Do a one-time address fetch to help populate/update our addrman.
5066 // If we're starting up for the first time, our addrman may be
5067 // pretty empty, so this mechanism is important to help us connect
5068 // to the network.
5069 // We skip this for block-relay-only peers. We want to avoid
5070 // potentially leaking addr information and we do not want to
5071 // indicate to the peer that we will participate in addr relay.
5072 MakeAndPushMessage(pfrom, NetMsgType::GETADDR);
5073 peer->m_getaddr_sent = true;
5074 // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND
5075 // addresses in response (bypassing the
5076 // MAX_ADDR_PROCESSING_TOKEN_BUCKET limit).
5077 WITH_LOCK(peer->m_addr_token_bucket_mutex,
5078 peer->m_addr_token_bucket += m_opts.max_addr_to_send);
5079 }
5080
5081 if (!pfrom.IsInboundConn()) {
5082 // For non-inbound connections, we update the addrman to record
5083 // connection success so that addrman will have an up-to-date
5084 // notion of which peers are online and available.
5085 //
5086 // While we strive to not leak information about block-relay-only
5087 // connections via the addrman, not moving an address to the tried
5088 // table is also potentially detrimental because new-table entries
5089 // are subject to eviction in the event of addrman collisions. We
5090 // mitigate the information-leak by never calling
5091 // AddrMan::Connected() on block-relay-only peers; see
5092 // FinalizeNode().
5093 //
5094 // This moves an address from New to Tried table in Addrman,
5095 // resolves tried-table collisions, etc.
5096 m_addrman.Good(pfrom.addr);
5097 }
5098
5099 std::string remoteAddr;
5100 if (fLogIPs) {
5101 remoteAddr = ", peeraddr=" + pfrom.addr.ToStringAddrPort();
5102 }
5103
5105 "receive version message: [%s] %s: version %d, blocks=%d, "
5106 "us=%s, txrelay=%d, peer=%d%s\n",
5107 pfrom.addr.ToStringAddrPort(), cleanSubVer, pfrom.nVersion,
5108 peer->m_starting_height, addrMe.ToStringAddrPort(), fRelay,
5109 pfrom.GetId(), remoteAddr);
5110
5111 int64_t currentTime = GetTime();
5112 int64_t nTimeOffset = nTime - currentTime;
5113 pfrom.nTimeOffset = nTimeOffset;
5114 if (nTime < int64_t(m_chainparams.GenesisBlock().nTime)) {
5115 // Ignore time offsets that are improbable (before the Genesis
5116 // block) and may underflow our adjusted time.
5117 Misbehaving(*peer, "Ignoring invalid timestamp in version message");
5118 } else if (!pfrom.IsInboundConn()) {
5119 // Don't use timedata samples from inbound peers to make it
5120 // harder for others to tamper with our adjusted time.
5121 AddTimeData(pfrom.addr, nTimeOffset);
5122 }
5123
5124 // Feeler connections exist only to verify if address is online.
5125 if (pfrom.IsFeelerConn()) {
5127 "feeler connection completed peer=%d; disconnecting\n",
5128 pfrom.GetId());
5129 pfrom.fDisconnect = true;
5130 }
5131 return;
5132 }
5133
5134 if (pfrom.nVersion == 0) {
5135 // Must have a version message before anything else
5136 Misbehaving(*peer, "non-version message before version handshake");
5137 return;
5138 }
5139
5140 if (msg_type == NetMsgType::VERACK) {
5141 if (pfrom.fSuccessfullyConnected) {
5143 "ignoring redundant verack message from peer=%d\n",
5144 pfrom.GetId());
5145 return;
5146 }
5147
5148 if (!pfrom.IsInboundConn()) {
5149 LogPrintf("New outbound peer connected: version: %d, blocks=%d, "
5150 "peer=%d%s (%s)\n",
5151 pfrom.nVersion.load(), peer->m_starting_height,
5152 pfrom.GetId(),
5153 (fLogIPs ? strprintf(", peeraddr=%s",
5154 pfrom.addr.ToStringAddrPort())
5155 : ""),
5156 pfrom.ConnectionTypeAsString());
5157 }
5158
5160 // Tell our peer we are willing to provide version 1
5161 // cmpctblocks. However, we do not request new block announcements
5162 // using cmpctblock messages. We send this to non-NODE NETWORK peers
5163 // as well, because they may wish to request compact blocks from us.
5164 MakeAndPushMessage(pfrom, NetMsgType::SENDCMPCT,
5165 /*high_bandwidth=*/false,
5166 /*version=*/CMPCTBLOCKS_VERSION);
5167 }
5168
5169 if (m_avalanche) {
5170 if (m_avalanche->sendHello(&pfrom)) {
5171 auto localProof = m_avalanche->getLocalProof();
5172
5173 if (localProof) {
5174 AddKnownProof(*peer, localProof->getId());
5175 // Add our proof id to the list or the recently announced
5176 // proof INVs to this peer. This is used for filtering which
5177 // INV can be requested for download.
5178 peer->m_proof_relay->m_recently_announced_proofs.insert(
5179 localProof->getId());
5180 }
5181 }
5182 }
5183
5184 if (auto tx_relay = peer->GetTxRelay()) {
5185 // `TxRelay::m_tx_inventory_to_send` must be empty before the
5186 // version handshake is completed as
5187 // `TxRelay::m_next_inv_send_time` is first initialised in
5188 // `SendMessages` after the verack is received. Any transactions
5189 // received during the version handshake would otherwise
5190 // immediately be advertised without random delay, potentially
5191 // leaking the time of arrival to a spy.
5192 Assume(WITH_LOCK(tx_relay->m_tx_inventory_mutex,
5193 return tx_relay->m_tx_inventory_to_send.empty() &&
5194 tx_relay->m_next_inv_send_time == 0s));
5195 }
5196
5197 pfrom.fSuccessfullyConnected = true;
5198 return;
5199 }
5200
5201 if (!pfrom.fSuccessfullyConnected) {
5202 // Must have a verack message before anything else
5203 Misbehaving(*peer, "non-verack message before version handshake");
5204 return;
5205 }
5206
5207 if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
5208 const auto ser_params{
5209 msg_type == NetMsgType::ADDRV2
5210 ?
5211 // Set V2 param so that the CNetAddr and CAddress unserialize
5212 // methods know that an address in v2 format is coming.
5215 };
5216
5217 std::vector<CAddress> vAddr;
5218
5219 vRecv >> WithParams(ser_params, vAddr);
5220
5221 if (!SetupAddressRelay(pfrom, *peer)) {
5222 LogPrint(BCLog::NET, "ignoring %s message from %s peer=%d\n",
5223 msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId());
5224 return;
5225 }
5226
5227 if (vAddr.size() > m_opts.max_addr_to_send) {
5228 Misbehaving(*peer, strprintf("%s message size = %u", msg_type,
5229 vAddr.size()));
5230 return;
5231 }
5232
5233 // Store the new addresses
5234 std::vector<CAddress> vAddrOk;
5235 const auto current_a_time{Now<NodeSeconds>()};
5236
5237 // Update/increment addr rate limiting bucket.
5238 const auto current_time = GetTime<std::chrono::microseconds>();
5239 {
5240 LOCK(peer->m_addr_token_bucket_mutex);
5241 if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) {
5242 // Don't increment bucket if it's already full
5243 const auto time_diff =
5244 std::max(current_time - peer->m_addr_token_timestamp, 0us);
5245 const double increment =
5247 peer->m_addr_token_bucket =
5248 std::min<double>(peer->m_addr_token_bucket + increment,
5250 }
5251 }
5252 peer->m_addr_token_timestamp = current_time;
5253
5254 const bool rate_limited =
5256 uint64_t num_proc = 0;
5257 uint64_t num_rate_limit = 0;
5258 Shuffle(vAddr.begin(), vAddr.end(), m_rng);
5259 for (CAddress &addr : vAddr) {
5260 if (interruptMsgProc) {
5261 return;
5262 }
5263
5264 {
5265 LOCK(peer->m_addr_token_bucket_mutex);
5266 // Apply rate limiting.
5267 if (peer->m_addr_token_bucket < 1.0) {
5268 if (rate_limited) {
5269 ++num_rate_limit;
5270 continue;
5271 }
5272 } else {
5273 peer->m_addr_token_bucket -= 1.0;
5274 }
5275 }
5276
5277 // We only bother storing full nodes, though this may include things
5278 // which we would not make an outbound connection to, in part
5279 // because we may make feeler connections to them.
5280 if (!MayHaveUsefulAddressDB(addr.nServices) &&
5282 continue;
5283 }
5284
5285 if (addr.nTime <= NodeSeconds{100000000s} ||
5286 addr.nTime > current_a_time + 10min) {
5287 addr.nTime = current_a_time - 5 * 24h;
5288 }
5289 AddAddressKnown(*peer, addr);
5290 if (m_banman &&
5291 (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
5292 // Do not process banned/discouraged addresses beyond
5293 // remembering we received them
5294 continue;
5295 }
5296 ++num_proc;
5297 bool fReachable = IsReachable(addr);
5298 if (addr.nTime > current_a_time - 10min && !peer->m_getaddr_sent &&
5299 vAddr.size() <= 10 && addr.IsRoutable()) {
5300 // Relay to a limited number of other nodes
5301 RelayAddress(pfrom.GetId(), addr, fReachable);
5302 }
5303 // Do not store addresses outside our network
5304 if (fReachable) {
5305 vAddrOk.push_back(addr);
5306 }
5307 }
5308 peer->m_addr_processed += num_proc;
5309 peer->m_addr_rate_limited += num_rate_limit;
5311 "Received addr: %u addresses (%u processed, %u rate-limited) "
5312 "from peer=%d\n",
5313 vAddr.size(), num_proc, num_rate_limit, pfrom.GetId());
5314
5315 m_addrman.Add(vAddrOk, pfrom.addr, 2h);
5316 if (vAddr.size() < 1000) {
5317 peer->m_getaddr_sent = false;
5318 }
5319
5320 // AddrFetch: Require multiple addresses to avoid disconnecting on
5321 // self-announcements
5322 if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) {
5324 "addrfetch connection completed peer=%d; disconnecting\n",
5325 pfrom.GetId());
5326 pfrom.fDisconnect = true;
5327 }
5328 return;
5329 }
5330
5331 if (msg_type == NetMsgType::SENDADDRV2) {
5332 peer->m_wants_addrv2 = true;
5333 return;
5334 }
5335
5336 if (msg_type == NetMsgType::SENDHEADERS) {
5337 peer->m_prefers_headers = true;
5338 return;
5339 }
5340
5341 if (msg_type == NetMsgType::SENDCMPCT) {
5342 bool sendcmpct_hb{false};
5343 uint64_t sendcmpct_version{0};
5344 vRecv >> sendcmpct_hb >> sendcmpct_version;
5345
5346 if (sendcmpct_version != CMPCTBLOCKS_VERSION) {
5347 return;
5348 }
5349
5350 LOCK(cs_main);
5351 CNodeState *nodestate = State(pfrom.GetId());
5352 nodestate->m_provides_cmpctblocks = true;
5353 nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb;
5354 // save whether peer selects us as BIP152 high-bandwidth peer
5355 // (receiving sendcmpct(1) signals high-bandwidth,
5356 // sendcmpct(0) low-bandwidth)
5357 pfrom.m_bip152_highbandwidth_from = sendcmpct_hb;
5358 return;
5359 }
5360
5361 if (msg_type == NetMsgType::INV) {
5362 std::vector<CInv> vInv;
5363 vRecv >> vInv;
5364 if (vInv.size() > MAX_INV_SZ) {
5365 Misbehaving(*peer, strprintf("inv message size = %u", vInv.size()));
5366 return;
5367 }
5368
5369 const bool reject_tx_invs{RejectIncomingTxs(pfrom)};
5370
5371 const auto current_time{GetTime<std::chrono::microseconds>()};
5372 std::optional<BlockHash> best_block;
5373
5374 auto logInv = [&](const CInv &inv, bool fAlreadyHave) {
5375 LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(),
5376 fAlreadyHave ? "have" : "new", pfrom.GetId());
5377 };
5378
5379 for (CInv &inv : vInv) {
5380 if (interruptMsgProc) {
5381 return;
5382 }
5383
5384 if (inv.IsMsgStakeContender()) {
5385 // Ignore invs with stake contenders. This type is only used for
5386 // polling.
5387 continue;
5388 }
5389
5390 if (inv.IsMsgBlk()) {
5391 LOCK(cs_main);
5392 const bool fAlreadyHave = AlreadyHaveBlock(BlockHash(inv.hash));
5393 logInv(inv, fAlreadyHave);
5394
5395 BlockHash hash{inv.hash};
5396 UpdateBlockAvailability(pfrom.GetId(), hash);
5397 if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() &&
5398 !IsBlockRequested(hash)) {
5399 // Headers-first is the primary method of announcement on
5400 // the network. If a node fell back to sending blocks by
5401 // inv, it may be for a re-org, or because we haven't
5402 // completed initial headers sync. The final block hash
5403 // provided should be the highest, so send a getheaders and
5404 // then fetch the blocks we need to catch up.
5405 best_block = std::move(hash);
5406 }
5407
5408 continue;
5409 }
5410
5411 if (inv.IsMsgProof()) {
5412 if (!m_avalanche) {
5413 continue;
5414 }
5415 const avalanche::ProofId proofid(inv.hash);
5416 const bool fAlreadyHave = AlreadyHaveProof(proofid);
5417 logInv(inv, fAlreadyHave);
5418 AddKnownProof(*peer, proofid);
5419
5420 if (!fAlreadyHave && m_avalanche &&
5421 !m_chainman.IsInitialBlockDownload()) {
5422 const bool preferred = isPreferredDownloadPeer(pfrom);
5423
5424 LOCK(cs_proofrequest);
5425 AddProofAnnouncement(pfrom, proofid, current_time,
5426 preferred);
5427 }
5428 continue;
5429 }
5430
5431 if (inv.IsMsgTx()) {
5432 LOCK(cs_main);
5433 const TxId txid(inv.hash);
5434 const bool fAlreadyHave =
5435 AlreadyHaveTx(txid, /*include_reconsiderable=*/true);
5436 logInv(inv, fAlreadyHave);
5437
5438 AddKnownTx(*peer, txid);
5439 if (reject_tx_invs) {
5441 "transaction (%s) inv sent in violation of "
5442 "protocol, disconnecting peer=%d\n",
5443 txid.ToString(), pfrom.GetId());
5444 pfrom.fDisconnect = true;
5445 return;
5446 } else if (!fAlreadyHave &&
5447 !m_chainman.IsInitialBlockDownload()) {
5448 AddTxAnnouncement(pfrom, txid, current_time);
5449 }
5450
5451 continue;
5452 }
5453
5455 "Unknown inv type \"%s\" received from peer=%d\n",
5456 inv.ToString(), pfrom.GetId());
5457 }
5458
5459 if (best_block) {
5460 // If we haven't started initial headers-sync with this peer, then
5461 // consider sending a getheaders now. On initial startup, there's a
5462 // reliability vs bandwidth tradeoff, where we are only trying to do
5463 // initial headers sync with one peer at a time, with a long
5464 // timeout (at which point, if the sync hasn't completed, we will
5465 // disconnect the peer and then choose another). In the meantime,
5466 // as new blocks are found, we are willing to add one new peer per
5467 // block to sync with as well, to sync quicker in the case where
5468 // our initial peer is unresponsive (but less bandwidth than we'd
5469 // use if we turned on sync with all peers).
5470 LOCK(::cs_main);
5471 CNodeState &state{*Assert(State(pfrom.GetId()))};
5472 if (state.fSyncStarted ||
5473 (!peer->m_inv_triggered_getheaders_before_sync &&
5474 *best_block != m_last_block_inv_triggering_headers_sync)) {
5475 if (MaybeSendGetHeaders(
5476 pfrom, GetLocator(m_chainman.m_best_header), *peer)) {
5477 LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
5478 m_chainman.m_best_header->nHeight,
5479 best_block->ToString(), pfrom.GetId());
5480 }
5481 if (!state.fSyncStarted) {
5482 peer->m_inv_triggered_getheaders_before_sync = true;
5483 // Update the last block hash that triggered a new headers
5484 // sync, so that we don't turn on headers sync with more
5485 // than 1 new peer every new block.
5486 m_last_block_inv_triggering_headers_sync = *best_block;
5487 }
5488 }
5489 }
5490
5491 return;
5492 }
5493
5494 if (msg_type == NetMsgType::GETDATA) {
5495 std::vector<CInv> vInv;
5496 vRecv >> vInv;
5497 if (vInv.size() > MAX_INV_SZ) {
5498 Misbehaving(*peer,
5499 strprintf("getdata message size = %u", vInv.size()));
5500 return;
5501 }
5502
5503 LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n",
5504 vInv.size(), pfrom.GetId());
5505
5506 if (vInv.size() > 0) {
5507 LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n",
5508 vInv[0].ToString(), pfrom.GetId());
5509 }
5510
5511 {
5512 LOCK(peer->m_getdata_requests_mutex);
5513 peer->m_getdata_requests.insert(peer->m_getdata_requests.end(),
5514 vInv.begin(), vInv.end());
5515 ProcessGetData(config, pfrom, *peer, interruptMsgProc);
5516 }
5517
5518 return;
5519 }
5520
5521 if (msg_type == NetMsgType::GETBLOCKS) {
5522 CBlockLocator locator;
5523 uint256 hashStop;
5524 vRecv >> locator >> hashStop;
5525
5526 if (locator.vHave.size() > MAX_LOCATOR_SZ) {
5528 "getblocks locator size %lld > %d, disconnect peer=%d\n",
5529 locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
5530 pfrom.fDisconnect = true;
5531 return;
5532 }
5533
5534 // We might have announced the currently-being-connected tip using a
5535 // compact block, which resulted in the peer sending a getblocks
5536 // request, which we would otherwise respond to without the new block.
5537 // To avoid this situation we simply verify that we are on our best
5538 // known chain now. This is super overkill, but we handle it better
5539 // for getheaders requests, and there are no known nodes which support
5540 // compact blocks but still use getblocks to request blocks.
5541 {
5542 std::shared_ptr<const CBlock> a_recent_block;
5543 {
5544 LOCK(m_most_recent_block_mutex);
5545 a_recent_block = m_most_recent_block;
5546 }
5548 if (!m_chainman.ActiveChainstate().ActivateBestChain(
5549 state, a_recent_block, m_avalanche)) {
5550 LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
5551 state.ToString());
5552 }
5553 }
5554
5555 LOCK(cs_main);
5556
5557 // Find the last block the caller has in the main chain
5558 const CBlockIndex *pindex =
5559 m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
5560
5561 // Send the rest of the chain
5562 if (pindex) {
5563 pindex = m_chainman.ActiveChain().Next(pindex);
5564 }
5565 int nLimit = 500;
5566 LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n",
5567 (pindex ? pindex->nHeight : -1),
5568 hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit,
5569 pfrom.GetId());
5570 for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) {
5571 if (pindex->GetBlockHash() == hashStop) {
5572 LogPrint(BCLog::NET, " getblocks stopping at %d %s\n",
5573 pindex->nHeight, pindex->GetBlockHash().ToString());
5574 break;
5575 }
5576 // If pruning, don't inv blocks unless we have on disk and are
5577 // likely to still have for some reasonable time window (1 hour)
5578 // that block relay might require.
5579 const int nPrunedBlocksLikelyToHave =
5581 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
5582 if (m_chainman.m_blockman.IsPruneMode() &&
5583 (!pindex->nStatus.hasData() ||
5584 pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight -
5585 nPrunedBlocksLikelyToHave)) {
5586 LogPrint(
5587 BCLog::NET,
5588 " getblocks stopping, pruned or too old block at %d %s\n",
5589 pindex->nHeight, pindex->GetBlockHash().ToString());
5590 break;
5591 }
5592 WITH_LOCK(
5593 peer->m_block_inv_mutex,
5594 peer->m_blocks_for_inv_relay.push_back(pindex->GetBlockHash()));
5595 if (--nLimit <= 0) {
5596 // When this block is requested, we'll send an inv that'll
5597 // trigger the peer to getblocks the next batch of inventory.
5598 LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n",
5599 pindex->nHeight, pindex->GetBlockHash().ToString());
5600 WITH_LOCK(peer->m_block_inv_mutex, {
5601 peer->m_continuation_block = pindex->GetBlockHash();
5602 });
5603 break;
5604 }
5605 }
5606 return;
5607 }
5608
5609 if (msg_type == NetMsgType::GETBLOCKTXN) {
5611 vRecv >> req;
5612
5613 std::shared_ptr<const CBlock> recent_block;
5614 {
5615 LOCK(m_most_recent_block_mutex);
5616 if (m_most_recent_block_hash == req.blockhash) {
5617 recent_block = m_most_recent_block;
5618 }
5619 // Unlock m_most_recent_block_mutex to avoid cs_main lock inversion
5620 }
5621 if (recent_block) {
5622 SendBlockTransactions(pfrom, *peer, *recent_block, req);
5623 return;
5624 }
5625
5626 FlatFilePos block_pos{};
5627 {
5628 LOCK(cs_main);
5629
5630 const CBlockIndex *pindex =
5631 m_chainman.m_blockman.LookupBlockIndex(req.blockhash);
5632 if (!pindex || !pindex->nStatus.hasData()) {
5633 LogPrint(
5634 BCLog::NET,
5635 "Peer %d sent us a getblocktxn for a block we don't have\n",
5636 pfrom.GetId());
5637 return;
5638 }
5639
5640 if (pindex->nHeight >=
5641 m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) {
5642 block_pos = pindex->GetBlockPos();
5643 }
5644 }
5645
5646 if (!block_pos.IsNull()) {
5647 CBlock block;
5648 const bool ret{m_chainman.m_blockman.ReadBlock(block, block_pos)};
5649 // If height is above MAX_BLOCKTXN_DEPTH then this block cannot get
5650 // pruned after we release cs_main above, so this read should never
5651 // fail.
5652 assert(ret);
5653
5654 SendBlockTransactions(pfrom, *peer, block, req);
5655 return;
5656 }
5657
5658 // If an older block is requested (should never happen in practice,
5659 // but can happen in tests) send a block response instead of a
5660 // blocktxn response. Sending a full block response instead of a
5661 // small blocktxn response is preferable in the case where a peer
5662 // might maliciously send lots of getblocktxn requests to trigger
5663 // expensive disk reads, because it will require the peer to
5664 // actually receive all the data read from disk over the network.
5666 "Peer %d sent us a getblocktxn for a block > %i deep\n",
5667 pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
5668 CInv inv;
5669 inv.type = MSG_BLOCK;
5670 inv.hash = req.blockhash;
5671 WITH_LOCK(peer->m_getdata_requests_mutex,
5672 peer->m_getdata_requests.push_back(inv));
5673 // The message processing loop will go around again (without pausing)
5674 // and we'll respond then (without cs_main)
5675 return;
5676 }
5677
5678 if (msg_type == NetMsgType::GETHEADERS) {
5679 CBlockLocator locator;
5680 BlockHash hashStop;
5681 vRecv >> locator >> hashStop;
5682
5683 if (locator.vHave.size() > MAX_LOCATOR_SZ) {
5685 "getheaders locator size %lld > %d, disconnect peer=%d\n",
5686 locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
5687 pfrom.fDisconnect = true;
5688 return;
5689 }
5690
5691 if (m_chainman.m_blockman.LoadingBlocks()) {
5692 LogPrint(
5693 BCLog::NET,
5694 "Ignoring getheaders from peer=%d while importing/reindexing\n",
5695 pfrom.GetId());
5696 return;
5697 }
5698
5699 LOCK(cs_main);
5700
5701 // Note that if we were to be on a chain that forks from the
5702 // checkpointed chain, then serving those headers to a peer that has
5703 // seen the checkpointed chain would cause that peer to disconnect us.
5704 // Requiring that our chainwork exceed the minimum chainwork is a
5705 // protection against being fed a bogus chain when we started up for
5706 // the first time and getting partitioned off the honest network for
5707 // serving that chain to others.
5708 if (m_chainman.ActiveTip() == nullptr ||
5709 (m_chainman.ActiveTip()->nChainWork <
5710 m_chainman.MinimumChainWork() &&
5713 "Ignoring getheaders from peer=%d because active chain "
5714 "has too little work; sending empty response\n",
5715 pfrom.GetId());
5716 // Just respond with an empty headers message, to tell the peer to
5717 // go away but not treat us as unresponsive.
5718 MakeAndPushMessage(pfrom, NetMsgType::HEADERS,
5719 std::vector<CBlock>());
5720 return;
5721 }
5722
5723 CNodeState *nodestate = State(pfrom.GetId());
5724 const CBlockIndex *pindex = nullptr;
5725 if (locator.IsNull()) {
5726 // If locator is null, return the hashStop block
5727 pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop);
5728 if (!pindex) {
5729 return;
5730 }
5731
5732 if (!BlockRequestAllowed(pindex)) {
5734 "%s: ignoring request from peer=%i for old block "
5735 "header that isn't in the main chain\n",
5736 __func__, pfrom.GetId());
5737 return;
5738 }
5739 } else {
5740 // Find the last block the caller has in the main chain
5741 pindex =
5742 m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
5743 if (pindex) {
5744 pindex = m_chainman.ActiveChain().Next(pindex);
5745 }
5746 }
5747
5748 // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx
5749 // count at the end
5750 std::vector<CBlock> vHeaders;
5751 int nLimit = MAX_HEADERS_RESULTS;
5752 LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n",
5753 (pindex ? pindex->nHeight : -1),
5754 hashStop.IsNull() ? "end" : hashStop.ToString(),
5755 pfrom.GetId());
5756 for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) {
5757 vHeaders.push_back(pindex->GetBlockHeader());
5758 if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) {
5759 break;
5760 }
5761 }
5762 // pindex can be nullptr either if we sent
5763 // m_chainman.ActiveChain().Tip() OR if our peer has
5764 // m_chainman.ActiveChain().Tip() (and thus we are sending an empty
5765 // headers message). In both cases it's safe to update
5766 // pindexBestHeaderSent to be our tip.
5767 //
5768 // It is important that we simply reset the BestHeaderSent value here,
5769 // and not max(BestHeaderSent, newHeaderSent). We might have announced
5770 // the currently-being-connected tip using a compact block, which
5771 // resulted in the peer sending a headers request, which we respond to
5772 // without the new block. By resetting the BestHeaderSent, we ensure we
5773 // will re-announce the new block via headers (or compact blocks again)
5774 // in the SendMessages logic.
5775 nodestate->pindexBestHeaderSent =
5776 pindex ? pindex : m_chainman.ActiveChain().Tip();
5777 MakeAndPushMessage(pfrom, NetMsgType::HEADERS, vHeaders);
5778 return;
5779 }
5780
5781 if (msg_type == NetMsgType::TX) {
5782 if (RejectIncomingTxs(pfrom)) {
5784 "transaction sent in violation of protocol peer=%d\n",
5785 pfrom.GetId());
5786 pfrom.fDisconnect = true;
5787 return;
5788 }
5789
5790 // Stop processing the transaction early if we are still in IBD since we
5791 // don't have enough information to validate it yet. Sending unsolicited
5792 // transactions is not considered a protocol violation, so don't punish
5793 // the peer.
5794 if (m_chainman.IsInitialBlockDownload()) {
5795 return;
5796 }
5797
5798 CTransactionRef ptx;
5799 vRecv >> ptx;
5800 const CTransaction &tx = *ptx;
5801 const TxId &txid = tx.GetId();
5802 AddKnownTx(*peer, txid);
5803
5804 {
5805 LOCK(cs_main);
5806
5807 m_txrequest.ReceivedResponse(pfrom.GetId(), txid);
5808
5809 if (AlreadyHaveTx(txid, /*include_reconsiderable=*/true)) {
5811 // Always relay transactions received from peers with
5812 // forcerelay permission, even if they were already in the
5813 // mempool, allowing the node to function as a gateway for
5814 // nodes hidden behind it.
5815 if (!m_mempool.exists(tx.GetId())) {
5816 LogPrintf(
5817 "Not relaying non-mempool transaction %s from "
5818 "forcerelay peer=%d\n",
5819 tx.GetId().ToString(), pfrom.GetId());
5820 } else {
5821 LogPrintf("Force relaying tx %s from peer=%d\n",
5822 tx.GetId().ToString(), pfrom.GetId());
5823 RelayTransaction(tx.GetId());
5824 }
5825 }
5826
5827 if (m_recent_rejects_package_reconsiderable.contains(txid)) {
5828 // When a transaction is already in
5829 // m_recent_rejects_package_reconsiderable, we shouldn't
5830 // submit it by itself again. However, look for a matching
5831 // child in the orphanage, as it is possible that they
5832 // succeed as a package.
5833 LogPrint(
5835 "found tx %s in reconsiderable rejects, looking for "
5836 "child in orphanage\n",
5837 txid.ToString());
5838 if (auto package_to_validate{
5839 Find1P1CPackage(ptx, pfrom.GetId())}) {
5840 const auto package_result{ProcessNewPackage(
5841 m_chainman.ActiveChainstate(), m_mempool,
5842 package_to_validate->m_txns,
5843 /*test_accept=*/false)};
5845 "package evaluation for %s: %s (%s)\n",
5846 package_to_validate->ToString(),
5847 package_result.m_state.IsValid()
5848 ? "package accepted"
5849 : "package rejected",
5850 package_result.m_state.ToString());
5851 ProcessPackageResult(package_to_validate.value(),
5852 package_result);
5853 }
5854 }
5855 // If a tx is detected by m_recent_rejects it is ignored.
5856 // Because we haven't submitted the tx to our mempool, we won't
5857 // have computed a DoS score for it or determined exactly why we
5858 // consider it invalid.
5859 //
5860 // This means we won't penalize any peer subsequently relaying a
5861 // DoSy tx (even if we penalized the first peer who gave it to
5862 // us) because we have to account for m_recent_rejects showing
5863 // false positives. In other words, we shouldn't penalize a peer
5864 // if we aren't *sure* they submitted a DoSy tx.
5865 //
5866 // Note that m_recent_rejects doesn't just record DoSy or
5867 // invalid transactions, but any tx not accepted by the mempool,
5868 // which may be due to node policy (vs. consensus). So we can't
5869 // blanket penalize a peer simply for relaying a tx that our
5870 // m_recent_rejects has caught, regardless of false positives.
5871 return;
5872 }
5873
5874 const MempoolAcceptResult result =
5875 m_chainman.ProcessTransaction(ptx);
5876 const TxValidationState &state = result.m_state;
5877
5878 if (result.m_result_type ==
5880 ProcessValidTx(pfrom.GetId(), ptx);
5881 pfrom.m_last_tx_time = GetTime<std::chrono::seconds>();
5882 } else if (state.GetResult() ==
5884 // It may be the case that the orphans parents have all been
5885 // rejected.
5886 bool fRejectedParents = false;
5887
5888 // Deduplicate parent txids, so that we don't have to loop over
5889 // the same parent txid more than once down below.
5890 std::vector<TxId> unique_parents;
5891 unique_parents.reserve(tx.vin.size());
5892 for (const CTxIn &txin : tx.vin) {
5893 // We start with all parents, and then remove duplicates
5894 // below.
5895 unique_parents.push_back(txin.prevout.GetTxId());
5896 }
5897 std::sort(unique_parents.begin(), unique_parents.end());
5898 unique_parents.erase(
5899 std::unique(unique_parents.begin(), unique_parents.end()),
5900 unique_parents.end());
5901
5902 // Distinguish between parents in m_recent_rejects and
5903 // m_recent_rejects_package_reconsiderable. We can tolerate
5904 // having up to 1 parent in
5905 // m_recent_rejects_package_reconsiderable since we submit 1p1c
5906 // packages. However, fail immediately if any are in
5907 // m_recent_rejects.
5908 std::optional<TxId> rejected_parent_reconsiderable;
5909 for (const TxId &parent_txid : unique_parents) {
5910 if (m_recent_rejects.contains(parent_txid)) {
5911 fRejectedParents = true;
5912 break;
5913 }
5914
5915 if (m_recent_rejects_package_reconsiderable.contains(
5916 parent_txid) &&
5917 !m_mempool.exists(parent_txid)) {
5918 // More than 1 parent in
5919 // m_recent_rejects_package_reconsiderable:
5920 // 1p1c will not be sufficient to accept this package,
5921 // so just give up here.
5922 if (rejected_parent_reconsiderable.has_value()) {
5923 fRejectedParents = true;
5924 break;
5925 }
5926 rejected_parent_reconsiderable = parent_txid;
5927 }
5928 }
5929 if (!fRejectedParents) {
5930 const auto current_time{
5931 GetTime<std::chrono::microseconds>()};
5932
5933 for (const TxId &parent_txid : unique_parents) {
5934 // FIXME: MSG_TX should use a TxHash, not a TxId.
5935 AddKnownTx(*peer, parent_txid);
5936 // Exclude m_recent_rejects_package_reconsiderable: the
5937 // missing parent may have been previously rejected for
5938 // being too low feerate. This orphan might CPFP it.
5939 if (!AlreadyHaveTx(parent_txid,
5940 /*include_reconsiderable=*/false)) {
5941 AddTxAnnouncement(pfrom, parent_txid, current_time);
5942 }
5943 }
5944
5945 // NO_THREAD_SAFETY_ANALYSIS because we can't annotate for
5946 // g_msgproc_mutex
5947 if (unsigned int nEvicted =
5948 m_mempool.withOrphanage(
5949 [&](TxOrphanage &orphanage)
5951 if (orphanage.AddTx(ptx,
5952 pfrom.GetId())) {
5953 AddToCompactExtraTransactions(ptx);
5954 }
5955 return orphanage.LimitTxs(
5956 m_opts.max_orphan_txs, m_rng);
5957 }) > 0) {
5959 "orphanage overflow, removed %u tx\n",
5960 nEvicted);
5961 }
5962
5963 // Once added to the orphan pool, a tx is considered
5964 // AlreadyHave, and we shouldn't request it anymore.
5965 m_txrequest.ForgetInvId(tx.GetId());
5966
5967 } else {
5969 "not keeping orphan with rejected parents %s\n",
5970 tx.GetId().ToString());
5971 // We will continue to reject this tx since it has rejected
5972 // parents so avoid re-requesting it from other peers.
5973 m_recent_rejects.insert(tx.GetId());
5974 m_txrequest.ForgetInvId(tx.GetId());
5975 }
5976 }
5977 if (state.IsInvalid()) {
5978 ProcessInvalidTx(pfrom.GetId(), ptx, state,
5979 /*maybe_add_extra_compact_tx=*/true);
5980 }
5981 // When a transaction fails for TX_PACKAGE_RECONSIDERABLE, look for
5982 // a matching child in the orphanage, as it is possible that they
5983 // succeed as a package.
5984 if (state.GetResult() ==
5986 LogPrint(
5988 "tx %s failed but reconsiderable, looking for child in "
5989 "orphanage\n",
5990 txid.ToString());
5991 if (auto package_to_validate{
5992 Find1P1CPackage(ptx, pfrom.GetId())}) {
5993 const auto package_result{ProcessNewPackage(
5994 m_chainman.ActiveChainstate(), m_mempool,
5995 package_to_validate->m_txns, /*test_accept=*/false)};
5997 "package evaluation for %s: %s (%s)\n",
5998 package_to_validate->ToString(),
5999 package_result.m_state.IsValid()
6000 ? "package accepted"
6001 : "package rejected",
6002 package_result.m_state.ToString());
6003 ProcessPackageResult(package_to_validate.value(),
6004 package_result);
6005 }
6006 }
6007
6008 if (state.GetResult() ==
6010 // Once added to the conflicting pool, a tx is considered
6011 // AlreadyHave, and we shouldn't request it anymore.
6012 m_txrequest.ForgetInvId(tx.GetId());
6013
6014 unsigned int nEvicted{0};
6015 // NO_THREAD_SAFETY_ANALYSIS because of g_msgproc_mutex required
6016 // in the lambda for m_rng
6017 m_mempool.withConflicting(
6018 [&](TxConflicting &conflicting) NO_THREAD_SAFETY_ANALYSIS {
6019 conflicting.AddTx(ptx, pfrom.GetId());
6020 nEvicted = conflicting.LimitTxs(
6021 m_opts.max_conflicting_txs, m_rng);
6022 });
6023
6024 if (nEvicted > 0) {
6026 "conflicting pool overflow, removed %u tx\n",
6027 nEvicted);
6028 }
6029 }
6030 } // Release cs_main
6031
6032 return;
6033 }
6034
6035 if (msg_type == NetMsgType::CMPCTBLOCK) {
6036 // Ignore cmpctblock received while importing
6037 if (m_chainman.m_blockman.LoadingBlocks()) {
6039 "Unexpected cmpctblock message received from peer %d\n",
6040 pfrom.GetId());
6041 return;
6042 }
6043
6044 CBlockHeaderAndShortTxIDs cmpctblock;
6045 try {
6046 vRecv >> cmpctblock;
6047 } catch (std::ios_base::failure &e) {
6048 // This block has non contiguous or overflowing indexes
6049 Misbehaving(*peer, "cmpctblock-bad-indexes");
6050 return;
6051 }
6052
6053 bool received_new_header = false;
6054 const auto blockhash = cmpctblock.header.GetHash();
6055
6056 {
6057 LOCK(cs_main);
6058
6059 const CBlockIndex *prev_block =
6060 m_chainman.m_blockman.LookupBlockIndex(
6061 cmpctblock.header.hashPrevBlock);
6062 if (!prev_block) {
6063 // Doesn't connect (or is genesis), instead of DoSing in
6064 // AcceptBlockHeader, request deeper headers
6065 if (!m_chainman.IsInitialBlockDownload()) {
6066 MaybeSendGetHeaders(
6067 pfrom, GetLocator(m_chainman.m_best_header), *peer);
6068 }
6069 return;
6070 }
6071 if (prev_block->nChainWork +
6072 CalculateHeadersWork({cmpctblock.header}) <
6073 GetAntiDoSWorkThreshold()) {
6074 // If we get a low-work header in a compact block, we can ignore
6075 // it.
6077 "Ignoring low-work compact block from peer %d\n",
6078 pfrom.GetId());
6079 return;
6080 }
6081
6082 if (!m_chainman.m_blockman.LookupBlockIndex(blockhash)) {
6083 received_new_header = true;
6084 }
6085 }
6086
6087 const CBlockIndex *pindex = nullptr;
6089 if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header},
6090 /*min_pow_checked=*/true, state,
6091 &pindex)) {
6092 if (state.IsInvalid()) {
6093 MaybePunishNodeForBlock(pfrom.GetId(), state,
6094 /*via_compact_block*/ true,
6095 "invalid header via cmpctblock");
6096 return;
6097 }
6098 }
6099
6100 if (received_new_header) {
6101 LogInfo("Saw new cmpctblock header hash=%s peer=%d\n",
6102 blockhash.ToString(), pfrom.GetId());
6103 }
6104
6105 // When we succeed in decoding a block's txids from a cmpctblock
6106 // message we typically jump to the BLOCKTXN handling code, with a
6107 // dummy (empty) BLOCKTXN message, to re-use the logic there in
6108 // completing processing of the putative block (without cs_main).
6109 bool fProcessBLOCKTXN = false;
6110 DataStream blockTxnMsg{};
6111
6112 // If we end up treating this as a plain headers message, call that as
6113 // well
6114 // without cs_main.
6115 bool fRevertToHeaderProcessing = false;
6116
6117 // Keep a CBlock for "optimistic" compactblock reconstructions (see
6118 // below)
6119 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
6120 bool fBlockReconstructed = false;
6121
6122 {
6123 LOCK(cs_main);
6124 // If AcceptBlockHeader returned true, it set pindex
6125 assert(pindex);
6126 UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
6127
6128 CNodeState *nodestate = State(pfrom.GetId());
6129
6130 // If this was a new header with more work than our tip, update the
6131 // peer's last block announcement time
6132 if (received_new_header &&
6133 pindex->nChainWork >
6134 m_chainman.ActiveChain().Tip()->nChainWork) {
6135 nodestate->m_last_block_announcement = GetTime();
6136 }
6137
6138 if (pindex->nStatus.hasData()) {
6139 // Nothing to do here
6140 return;
6141 }
6142
6143 auto range_flight =
6144 mapBlocksInFlight.equal_range(pindex->GetBlockHash());
6145 size_t already_in_flight =
6146 std::distance(range_flight.first, range_flight.second);
6147 bool requested_block_from_this_peer{false};
6148
6149 // Multimap ensures ordering of outstanding requests. It's either
6150 // empty or first in line.
6151 bool first_in_flight =
6152 already_in_flight == 0 ||
6153 (range_flight.first->second.first == pfrom.GetId());
6154
6155 while (range_flight.first != range_flight.second) {
6156 if (range_flight.first->second.first == pfrom.GetId()) {
6157 requested_block_from_this_peer = true;
6158 break;
6159 }
6160 range_flight.first++;
6161 }
6162
6163 if (pindex->nChainWork <=
6164 m_chainman.ActiveChain()
6165 .Tip()
6166 ->nChainWork || // We know something better
6167 pindex->nTx != 0) {
6168 // We had this block at some point, but pruned it
6169 if (requested_block_from_this_peer) {
6170 // We requested this block for some reason, but our mempool
6171 // will probably be useless so we just grab the block via
6172 // normal getdata.
6173 std::vector<CInv> vInv(1);
6174 vInv[0] = CInv(MSG_BLOCK, blockhash);
6175 MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv);
6176 }
6177 return;
6178 }
6179
6180 // If we're not close to tip yet, give up and let parallel block
6181 // fetch work its magic.
6182 if (!already_in_flight && !CanDirectFetch()) {
6183 return;
6184 }
6185
6186 // We want to be a bit conservative just to be extra careful about
6187 // DoS possibilities in compact block processing...
6188 if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) {
6189 if ((already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK &&
6190 nodestate->vBlocksInFlight.size() <
6192 requested_block_from_this_peer) {
6193 std::list<QueuedBlock>::iterator *queuedBlockIt = nullptr;
6194 if (!BlockRequested(config, pfrom.GetId(), *pindex,
6195 &queuedBlockIt)) {
6196 if (!(*queuedBlockIt)->partialBlock) {
6197 (*queuedBlockIt)
6198 ->partialBlock.reset(
6199 new PartiallyDownloadedBlock(config,
6200 &m_mempool));
6201 } else {
6202 // The block was already in flight using compact
6203 // blocks from the same peer.
6204 LogPrint(BCLog::NET, "Peer sent us compact block "
6205 "we were already syncing!\n");
6206 return;
6207 }
6208 }
6209
6210 PartiallyDownloadedBlock &partialBlock =
6211 *(*queuedBlockIt)->partialBlock;
6212 ReadStatus status =
6213 partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
6214 if (status == READ_STATUS_INVALID) {
6215 // Reset in-flight state in case Misbehaving does not
6216 // result in a disconnect
6217 RemoveBlockRequest(pindex->GetBlockHash(),
6218 pfrom.GetId());
6219 Misbehaving(*peer, "invalid compact block");
6220 return;
6221 } else if (status == READ_STATUS_FAILED) {
6222 if (first_in_flight) {
6223 // Duplicate txindices, the block is now in-flight,
6224 // so just request it.
6225 std::vector<CInv> vInv(1);
6226 vInv[0] = CInv(MSG_BLOCK, blockhash);
6227 MakeAndPushMessage(pfrom, NetMsgType::GETDATA,
6228 vInv);
6229 } else {
6230 // Give up for this peer and wait for other peer(s)
6231 RemoveBlockRequest(pindex->GetBlockHash(),
6232 pfrom.GetId());
6233 }
6234 return;
6235 }
6236
6238 for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
6239 if (!partialBlock.IsTxAvailable(i)) {
6240 req.indices.push_back(i);
6241 }
6242 }
6243 if (req.indices.empty()) {
6244 // Dirty hack to jump to BLOCKTXN code (TODO: move
6245 // message handling into their own functions)
6247 txn.blockhash = blockhash;
6248 blockTxnMsg << txn;
6249 fProcessBLOCKTXN = true;
6250 } else if (first_in_flight) {
6251 // We will try to round-trip any compact blocks we get
6252 // on failure, as long as it's first...
6253 req.blockhash = pindex->GetBlockHash();
6254 MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req);
6255 } else if (pfrom.m_bip152_highbandwidth_to &&
6256 (!pfrom.IsInboundConn() ||
6257 IsBlockRequestedFromOutbound(blockhash) ||
6258 already_in_flight <
6260 // ... or it's a hb relay peer and:
6261 // - peer is outbound, or
6262 // - we already have an outbound attempt in flight (so
6263 // we'll take what we can get), or
6264 // - it's not the final parallel download slot (which we
6265 // may reserve for first outbound)
6266 req.blockhash = pindex->GetBlockHash();
6267 MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req);
6268 } else {
6269 // Give up for this peer and wait for other peer(s)
6270 RemoveBlockRequest(pindex->GetBlockHash(),
6271 pfrom.GetId());
6272 }
6273 } else {
6274 // This block is either already in flight from a different
6275 // peer, or this peer has too many blocks outstanding to
6276 // download from. Optimistically try to reconstruct anyway
6277 // since we might be able to without any round trips.
6278 PartiallyDownloadedBlock tempBlock(config, &m_mempool);
6279 ReadStatus status =
6280 tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
6281 if (status != READ_STATUS_OK) {
6282 // TODO: don't ignore failures
6283 return;
6284 }
6285 std::vector<CTransactionRef> dummy;
6286 status = tempBlock.FillBlock(*pblock, dummy);
6287 if (status == READ_STATUS_OK) {
6288 fBlockReconstructed = true;
6289 }
6290 }
6291 } else {
6292 if (requested_block_from_this_peer) {
6293 // We requested this block, but its far into the future, so
6294 // our mempool will probably be useless - request the block
6295 // normally.
6296 std::vector<CInv> vInv(1);
6297 vInv[0] = CInv(MSG_BLOCK, blockhash);
6298 MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv);
6299 return;
6300 } else {
6301 // If this was an announce-cmpctblock, we want the same
6302 // treatment as a header message.
6303 fRevertToHeaderProcessing = true;
6304 }
6305 }
6306 } // cs_main
6307
6308 if (fProcessBLOCKTXN) {
6309 return ProcessMessage(config, pfrom, NetMsgType::BLOCKTXN,
6310 blockTxnMsg, time_received, interruptMsgProc);
6311 }
6312
6313 if (fRevertToHeaderProcessing) {
6314 // Headers received from HB compact block peers are permitted to be
6315 // relayed before full validation (see BIP 152), so we don't want to
6316 // disconnect the peer if the header turns out to be for an invalid
6317 // block. Note that if a peer tries to build on an invalid chain,
6318 // that will be detected and the peer will be banned.
6319 return ProcessHeadersMessage(config, pfrom, *peer,
6320 {cmpctblock.header},
6321 /*via_compact_block=*/true);
6322 }
6323
6324 if (fBlockReconstructed) {
6325 // If we got here, we were able to optimistically reconstruct a
6326 // block that is in flight from some other peer.
6327 {
6328 LOCK(cs_main);
6329 mapBlockSource.emplace(pblock->GetHash(),
6330 std::make_pair(pfrom.GetId(), false));
6331 }
6332 // Setting force_processing to true means that we bypass some of
6333 // our anti-DoS protections in AcceptBlock, which filters
6334 // unrequested blocks that might be trying to waste our resources
6335 // (eg disk space). Because we only try to reconstruct blocks when
6336 // we're close to caught up (via the CanDirectFetch() requirement
6337 // above, combined with the behavior of not requesting blocks until
6338 // we have a chain with at least the minimum chain work), and we
6339 // ignore compact blocks with less work than our tip, it is safe to
6340 // treat reconstructed compact blocks as having been requested.
6341 ProcessBlock(config, pfrom, pblock, /*force_processing=*/true,
6342 /*min_pow_checked=*/true);
6343 // hold cs_main for CBlockIndex::IsValid()
6344 LOCK(cs_main);
6345 if (pindex->IsValid(BlockValidity::TRANSACTIONS)) {
6346 // Clear download state for this block, which is in process from
6347 // some other peer. We do this after calling. ProcessNewBlock so
6348 // that a malleated cmpctblock announcement can't be used to
6349 // interfere with block relay.
6350 RemoveBlockRequest(pblock->GetHash(), std::nullopt);
6351 }
6352 }
6353 return;
6354 }
6355
6356 if (msg_type == NetMsgType::BLOCKTXN) {
6357 // Ignore blocktxn received while importing
6358 if (m_chainman.m_blockman.LoadingBlocks()) {
6360 "Unexpected blocktxn message received from peer %d\n",
6361 pfrom.GetId());
6362 return;
6363 }
6364
6365 BlockTransactions resp;
6366 vRecv >> resp;
6367
6368 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
6369 bool fBlockRead = false;
6370 {
6371 LOCK(cs_main);
6372
6373 auto range_flight = mapBlocksInFlight.equal_range(resp.blockhash);
6374 size_t already_in_flight =
6375 std::distance(range_flight.first, range_flight.second);
6376 bool requested_block_from_this_peer{false};
6377
6378 // Multimap ensures ordering of outstanding requests. It's either
6379 // empty or first in line.
6380 bool first_in_flight =
6381 already_in_flight == 0 ||
6382 (range_flight.first->second.first == pfrom.GetId());
6383
6384 while (range_flight.first != range_flight.second) {
6385 auto [node_id, block_it] = range_flight.first->second;
6386 if (node_id == pfrom.GetId() && block_it->partialBlock) {
6387 requested_block_from_this_peer = true;
6388 break;
6389 }
6390 range_flight.first++;
6391 }
6392
6393 if (!requested_block_from_this_peer) {
6395 "Peer %d sent us block transactions for block "
6396 "we weren't expecting\n",
6397 pfrom.GetId());
6398 return;
6399 }
6400
6401 PartiallyDownloadedBlock &partialBlock =
6402 *range_flight.first->second.second->partialBlock;
6403 ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
6404 if (status == READ_STATUS_INVALID) {
6405 // Reset in-flight state in case of Misbehaving does not
6406 // result in a disconnect.
6407 RemoveBlockRequest(resp.blockhash, pfrom.GetId());
6408 Misbehaving(
6409 *peer,
6410 "invalid compact block/non-matching block transactions");
6411 return;
6412 } else if (status == READ_STATUS_FAILED) {
6413 if (first_in_flight) {
6414 // Might have collided, fall back to getdata now :(
6415 std::vector<CInv> invs;
6416 invs.push_back(CInv(MSG_BLOCK, resp.blockhash));
6417 MakeAndPushMessage(pfrom, NetMsgType::GETDATA, invs);
6418 } else {
6419 RemoveBlockRequest(resp.blockhash, pfrom.GetId());
6420 LogPrint(
6421 BCLog::NET,
6422 "Peer %d sent us a compact block but it failed to "
6423 "reconstruct, waiting on first download to complete\n",
6424 pfrom.GetId());
6425 return;
6426 }
6427 } else {
6428 // Block is either okay, or possibly we received
6429 // READ_STATUS_CHECKBLOCK_FAILED.
6430 // Note that CheckBlock can only fail for one of a few reasons:
6431 // 1. bad-proof-of-work (impossible here, because we've already
6432 // accepted the header)
6433 // 2. merkleroot doesn't match the transactions given (already
6434 // caught in FillBlock with READ_STATUS_FAILED, so
6435 // impossible here)
6436 // 3. the block is otherwise invalid (eg invalid coinbase,
6437 // block is too big, too many sigChecks, etc).
6438 // So if CheckBlock failed, #3 is the only possibility.
6439 // Under BIP 152, we don't DoS-ban unless proof of work is
6440 // invalid (we don't require all the stateless checks to have
6441 // been run). This is handled below, so just treat this as
6442 // though the block was successfully read, and rely on the
6443 // handling in ProcessNewBlock to ensure the block index is
6444 // updated, etc.
6445
6446 // it is now an empty pointer
6447 RemoveBlockRequest(resp.blockhash, pfrom.GetId());
6448 fBlockRead = true;
6449 // mapBlockSource is used for potentially punishing peers and
6450 // updating which peers send us compact blocks, so the race
6451 // between here and cs_main in ProcessNewBlock is fine.
6452 // BIP 152 permits peers to relay compact blocks after
6453 // validating the header only; we should not punish peers
6454 // if the block turns out to be invalid.
6455 mapBlockSource.emplace(resp.blockhash,
6456 std::make_pair(pfrom.GetId(), false));
6457 }
6458 } // Don't hold cs_main when we call into ProcessNewBlock
6459 if (fBlockRead) {
6460 // Since we requested this block (it was in mapBlocksInFlight),
6461 // force it to be processed, even if it would not be a candidate for
6462 // new tip (missing previous block, chain not long enough, etc)
6463 // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
6464 // disk-space attacks), but this should be safe due to the
6465 // protections in the compact block handler -- see related comment
6466 // in compact block optimistic reconstruction handling.
6467 ProcessBlock(config, pfrom, pblock, /*force_processing=*/true,
6468 /*min_pow_checked=*/true);
6469 }
6470 return;
6471 }
6472
6473 if (msg_type == NetMsgType::HEADERS) {
6474 // Ignore headers received while importing
6475 if (m_chainman.m_blockman.LoadingBlocks()) {
6477 "Unexpected headers message received from peer %d\n",
6478 pfrom.GetId());
6479 return;
6480 }
6481
6482 std::vector<CBlockHeader> headers;
6483
6484 // Bypass the normal CBlock deserialization, as we don't want to risk
6485 // deserializing 2000 full blocks.
6486 unsigned int nCount = ReadCompactSize(vRecv);
6487 if (nCount > MAX_HEADERS_RESULTS) {
6488 Misbehaving(*peer,
6489 strprintf("too-many-headers: headers message size = %u",
6490 nCount));
6491 return;
6492 }
6493 headers.resize(nCount);
6494 for (unsigned int n = 0; n < nCount; n++) {
6495 vRecv >> headers[n];
6496 // Ignore tx count; assume it is 0.
6497 ReadCompactSize(vRecv);
6498 }
6499
6500 ProcessHeadersMessage(config, pfrom, *peer, std::move(headers),
6501 /*via_compact_block=*/false);
6502
6503 // Check if the headers presync progress needs to be reported to
6504 // validation. This needs to be done without holding the
6505 // m_headers_presync_mutex lock.
6506 if (m_headers_presync_should_signal.exchange(false)) {
6507 HeadersPresyncStats stats;
6508 {
6509 LOCK(m_headers_presync_mutex);
6510 auto it =
6511 m_headers_presync_stats.find(m_headers_presync_bestpeer);
6512 if (it != m_headers_presync_stats.end()) {
6513 stats = it->second;
6514 }
6515 }
6516 if (stats.second) {
6517 m_chainman.ReportHeadersPresync(
6518 stats.first, stats.second->first, stats.second->second);
6519 }
6520 }
6521
6522 return;
6523 }
6524
6525 if (msg_type == NetMsgType::BLOCK) {
6526 // Ignore block received while importing
6527 if (m_chainman.m_blockman.LoadingBlocks()) {
6529 "Unexpected block message received from peer %d\n",
6530 pfrom.GetId());
6531 return;
6532 }
6533
6534 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
6535 vRecv >> *pblock;
6536
6537 LogPrint(BCLog::NET, "received block %s peer=%d\n",
6538 pblock->GetHash().ToString(), pfrom.GetId());
6539
6540 const CBlockIndex *prev_block{
6541 WITH_LOCK(m_chainman.GetMutex(),
6542 return m_chainman.m_blockman.LookupBlockIndex(
6543 pblock->hashPrevBlock))};
6544
6545 if (IsBlockMutated(/*block=*/*pblock)) {
6547 "Received mutated block from peer=%d\n", peer->m_id);
6548 Misbehaving(*peer, "mutated block");
6550 RemoveBlockRequest(pblock->GetHash(), peer->m_id));
6551 return;
6552 }
6553
6554 // Process all blocks from whitelisted peers, even if not requested,
6555 // unless we're still syncing with the network. Such an unrequested
6556 // block may still be processed, subject to the conditions in
6557 // AcceptBlock().
6558 bool forceProcessing = pfrom.HasPermission(NetPermissionFlags::NoBan) &&
6559 !m_chainman.IsInitialBlockDownload();
6560 const BlockHash hash = pblock->GetHash();
6561 bool min_pow_checked = false;
6562 {
6563 LOCK(cs_main);
6564 // Always process the block if we requested it, since we may
6565 // need it even when it's not a candidate for a new best tip.
6566 forceProcessing = IsBlockRequested(hash);
6567 RemoveBlockRequest(hash, pfrom.GetId());
6568 // mapBlockSource is only used for punishing peers and setting
6569 // which peers send us compact blocks, so the race between here and
6570 // cs_main in ProcessNewBlock is fine.
6571 mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
6572
6573 // Check work on this block against our anti-dos thresholds.
6574 if (prev_block &&
6575 prev_block->nChainWork +
6576 CalculateHeadersWork({pblock->GetBlockHeader()}) >=
6577 GetAntiDoSWorkThreshold()) {
6578 min_pow_checked = true;
6579 }
6580 }
6581 ProcessBlock(config, pfrom, pblock, forceProcessing, min_pow_checked);
6582 return;
6583 }
6584
6585 if (msg_type == NetMsgType::AVAHELLO) {
6586 if (!m_avalanche) {
6587 return;
6588 }
6589 {
6591 if (pfrom.m_avalanche_pubkey.has_value()) {
6592 LogPrint(
6594 "Ignoring avahello from peer %d: already in our node set\n",
6595 pfrom.GetId());
6596 return;
6597 }
6598
6599 avalanche::Delegation delegation;
6600 vRecv >> delegation;
6601
6602 // A delegation with an all zero limited id indicates that the peer
6603 // has no proof, so we're done.
6604 if (delegation.getLimitedProofId() != uint256::ZERO) {
6606 CPubKey pubkey;
6607 if (!delegation.verify(state, pubkey)) {
6608 Misbehaving(*peer, "invalid-delegation");
6609 return;
6610 }
6611 pfrom.m_avalanche_pubkey = std::move(pubkey);
6612
6613 HashWriter sighasher{};
6614 sighasher << delegation.getId();
6615 sighasher << pfrom.nRemoteHostNonce;
6616 sighasher << pfrom.GetLocalNonce();
6617 sighasher << pfrom.nRemoteExtraEntropy;
6618 sighasher << pfrom.GetLocalExtraEntropy();
6619
6621 vRecv >> sig;
6622 if (!(*pfrom.m_avalanche_pubkey)
6623 .VerifySchnorr(sighasher.GetHash(), sig)) {
6624 Misbehaving(*peer, "invalid-avahello-signature");
6625 return;
6626 }
6627
6628 // If we don't know this proof already, add it to the tracker so
6629 // it can be requested.
6630 const avalanche::ProofId proofid(delegation.getProofId());
6631 if (!AlreadyHaveProof(proofid)) {
6632 const bool preferred = isPreferredDownloadPeer(pfrom);
6633 LOCK(cs_proofrequest);
6634 AddProofAnnouncement(pfrom, proofid,
6635 GetTime<std::chrono::microseconds>(),
6636 preferred);
6637 }
6638
6639 uint32_t max_elements{AVALANCHE_MAX_ELEMENT_POLL_LEGACY};
6640 if (pfrom.GetCommonVersion() >=
6642 !vRecv.empty()) {
6643 vRecv >> max_elements;
6644 // max_elements below AVALANCHE_MAX_ELEMENT_POLL_LEGACY is
6645 // invalid
6646 if (max_elements < AVALANCHE_MAX_ELEMENT_POLL_LEGACY) {
6647 Misbehaving(*peer, "avahello-max-elements-too-low");
6648 return;
6649 }
6650 }
6651
6652 // Don't check the return value. If it fails we probably don't
6653 // know about the proof yet.
6654 m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
6655 return pm.addNode(pfrom.GetId(), proofid, max_elements);
6656 });
6657 }
6658
6659 pfrom.m_avalanche_enabled = true;
6660 }
6661
6662 // Send getavaaddr and getavaproofs to our avalanche outbound or
6663 // manual connections
6664 if (!pfrom.IsInboundConn()) {
6665 MakeAndPushMessage(pfrom, NetMsgType::GETAVAADDR);
6666 WITH_LOCK(peer->m_addr_token_bucket_mutex,
6667 peer->m_addr_token_bucket += m_opts.max_addr_to_send);
6668
6669 if (peer->m_proof_relay && !m_chainman.IsInitialBlockDownload()) {
6670 MakeAndPushMessage(pfrom, NetMsgType::GETAVAPROOFS);
6671 peer->m_proof_relay->compactproofs_requested = true;
6672 }
6673 }
6674
6675 return;
6676 }
6677
6678 if (msg_type == NetMsgType::AVAPOLL) {
6679 if (!m_avalanche) {
6680 return;
6681 }
6682 const auto now = Now<SteadyMilliseconds>();
6683
6684 const auto last_poll = pfrom.m_last_poll;
6685 pfrom.m_last_poll = now;
6686
6687 if (now <
6688 last_poll + std::chrono::milliseconds(m_opts.avalanche_cooldown)) {
6690 "Ignoring repeated avapoll from peer %d: cooldown not "
6691 "elapsed\n",
6692 pfrom.GetId());
6693 return;
6694 }
6695
6696 const bool quorum_established = m_avalanche->isQuorumEstablished();
6697
6698 uint64_t round;
6699 Unserialize(vRecv, round);
6700
6701 unsigned int nCount = ReadCompactSize(vRecv);
6702 if (nCount > m_avalanche->getMaxElementPoll()) {
6703 Misbehaving(
6704 *peer,
6705 strprintf("too-many-ava-poll: poll message size = %u", nCount));
6706 return;
6707 }
6708
6709 std::vector<avalanche::Vote> votes;
6710 votes.reserve(nCount);
6711
6712 bool fPreconsensus{false};
6713 bool fStakingPreconsensus{false};
6714 {
6715 LOCK(::cs_main);
6716 const CBlockIndex *tip = m_chainman.ActiveTip();
6717 fPreconsensus = m_avalanche->isPreconsensusActivated(tip);
6718 fStakingPreconsensus =
6719 m_avalanche->isStakingPreconsensusActivated(tip);
6720 }
6721
6722 for (unsigned int n = 0; n < nCount; n++) {
6723 CInv inv;
6724 vRecv >> inv;
6725
6726 // Default vote for unknown inv type
6727 uint32_t vote = -1;
6728
6729 // We don't vote definitively until we have an established quorum
6730 if (!quorum_established) {
6731 votes.emplace_back(vote, inv.hash);
6732 continue;
6733 }
6734
6735 // If inv's type is known, get a vote for its hash
6736 switch (inv.type) {
6737 case MSG_TX: {
6738 if (fPreconsensus) {
6739 vote =
6740 GetAvalancheVoteForTx(*m_avalanche, TxId(inv.hash));
6741 }
6742 } break;
6743 case MSG_BLOCK: {
6744 vote = WITH_LOCK(cs_main, return GetAvalancheVoteForBlock(
6745 BlockHash(inv.hash)));
6746 } break;
6747 case MSG_AVA_PROOF: {
6749 *m_avalanche, avalanche::ProofId(inv.hash));
6750 } break;
6752 if (fStakingPreconsensus) {
6753 vote = m_avalanche->getStakeContenderStatus(
6755 }
6756 } break;
6757 default: {
6759 "poll inv type %d unknown from peer=%d\n",
6760 inv.type, pfrom.GetId());
6761 }
6762 }
6763
6764 votes.emplace_back(vote, inv.hash);
6765 }
6766
6767 // Send the query to the node.
6768 m_avalanche->sendResponse(
6769 &pfrom, avalanche::Response(round, m_opts.avalanche_cooldown,
6770 std::move(votes)));
6771 return;
6772 }
6773
6774 if (msg_type == NetMsgType::AVARESPONSE) {
6775 if (!m_avalanche) {
6776 return;
6777 }
6778 // As long as QUIC is not implemented, we need to sign response and
6779 // verify response's signatures in order to avoid any manipulation of
6780 // messages at the transport level.
6781 HashVerifier verifier(vRecv);
6783 verifier >> response;
6784
6786 vRecv >> sig;
6787
6788 {
6790 if (!pfrom.m_avalanche_pubkey.has_value() ||
6791 !(*pfrom.m_avalanche_pubkey)
6792 .VerifySchnorr(verifier.GetHash(), sig)) {
6793 Misbehaving(*peer, "invalid-ava-response-signature");
6794 return;
6795 }
6796 }
6797
6798 auto now = GetTime<std::chrono::seconds>();
6799
6800 std::vector<avalanche::VoteItemUpdate> updates;
6801 bool disconnect{false};
6802 std::string error;
6803 if (!m_avalanche->registerVotes(pfrom.GetId(), response, updates,
6804 disconnect, error)) {
6805 if (disconnect) {
6806 Misbehaving(*peer, error);
6807 return;
6808 }
6809
6810 // Otherwise the node may have got a network issue. Increase the
6811 // fault counter instead and only ban if we reached a threshold.
6812 // This allows for fault tolerance should there be a temporary
6813 // outage while still preventing DoS'ing behaviors, as the counter
6814 // is reset if no fault occured over some time period.
6817
6818 // Allow up to 12 messages before increasing the ban score. Since
6819 // the queries are cleared after 10s, this is at least 2 minutes
6820 // of network outage tolerance over the 1h window.
6821 if (pfrom.m_avalanche_message_fault_counter > 12) {
6822 LogPrint(
6824 "Repeated failure to register votes from peer %d: %s\n",
6825 pfrom.GetId(), error);
6827 if (pfrom.m_avalanche_message_fault_score > 100) {
6828 Misbehaving(*peer, error);
6829 }
6830 return;
6831 }
6832 }
6833
6834 // If no fault occurred within the last hour, reset the fault counter
6835 if (now > (pfrom.m_avalanche_last_message_fault.load() + 1h)) {
6837 }
6838
6839 pfrom.invsVoted(response.GetVotes().size());
6840
6841 auto logVoteUpdate = [](const auto &voteUpdate,
6842 const std::string &voteItemTypeStr,
6843 const auto &voteItemId) {
6844 std::string voteOutcome;
6845 bool alwaysPrint = false;
6846 switch (voteUpdate.getStatus()) {
6848 voteOutcome = "invalidated";
6849 alwaysPrint = true;
6850 break;
6852 voteOutcome = "rejected";
6853 break;
6855 voteOutcome = "accepted";
6856 break;
6858 voteOutcome = "finalized";
6859 // Don't log tx finalization unconditionally as it can be
6860 // quite spammy.
6861 alwaysPrint = voteItemTypeStr != "tx";
6862 break;
6864 voteOutcome = "stalled";
6865 alwaysPrint = true;
6866 break;
6867
6868 // No default case, so the compiler can warn about missing
6869 // cases
6870 }
6871
6872 // Always log the stake contenders to the avalanche category
6873 alwaysPrint &= (voteItemTypeStr != "contender");
6874
6875 if (alwaysPrint) {
6876 LogPrintf("Avalanche %s %s %s\n", voteOutcome, voteItemTypeStr,
6877 voteItemId.ToString());
6878 } else {
6879 // Only print these messages if -debug=avalanche is set
6880 LogPrint(BCLog::AVALANCHE, "Avalanche %s %s %s\n", voteOutcome,
6881 voteItemTypeStr, voteItemId.ToString());
6882 }
6883 };
6884
6885 bool shouldActivateBestChain = false;
6886
6887 bool fPreconsensus{false};
6888 bool fStakingPreconsensus{false};
6889 {
6890 LOCK(::cs_main);
6891 const CBlockIndex *tip = m_chainman.ActiveTip();
6892 fPreconsensus = m_avalanche->isPreconsensusActivated(tip);
6893 fStakingPreconsensus =
6894 m_avalanche->isStakingPreconsensusActivated(tip);
6895 }
6896
6897 for (const auto &u : updates) {
6898 const avalanche::AnyVoteItem &item = u.getVoteItem();
6899
6900 // Don't use a visitor here as we want to ignore unsupported item
6901 // types. This comes in handy when adding new types.
6902 if (auto pitem = std::get_if<const avalanche::ProofRef>(&item)) {
6903 avalanche::ProofRef proof = *pitem;
6904 const avalanche::ProofId &proofid = proof->getId();
6905
6906 logVoteUpdate(u, "proof", proofid);
6907
6908 auto rejectionMode =
6910 auto nextCooldownTimePoint = GetTime<std::chrono::seconds>();
6911 switch (u.getStatus()) {
6913 m_avalanche->withPeerManager(
6914 [&](avalanche::PeerManager &pm) {
6915 pm.setInvalid(proofid);
6916 });
6917 // Fallthrough
6919 // Invalidate mode removes the proof from all proof
6920 // pools
6921 rejectionMode =
6923 // Fallthrough
6925 if (!m_avalanche->withPeerManager(
6926 [&](avalanche::PeerManager &pm) {
6927 return pm.rejectProof(proofid,
6928 rejectionMode);
6929 })) {
6931 "ERROR: Failed to reject proof: %s\n",
6932 proofid.GetHex());
6933 }
6934 break;
6936 m_avalanche->setRecentlyFinalized(proofid);
6937 nextCooldownTimePoint += std::chrono::seconds(
6938 m_opts.avalanche_peer_replacement_cooldown);
6940 if (!m_avalanche->withPeerManager(
6941 [&](avalanche::PeerManager &pm) {
6942 pm.registerProof(
6943 proof,
6944 avalanche::PeerManager::
6945 RegistrationMode::FORCE_ACCEPT);
6946 return pm.forPeer(
6947 proofid,
6948 [&](const avalanche::Peer &peer) {
6949 pm.updateNextPossibleConflictTime(
6950 peer.peerid,
6951 nextCooldownTimePoint);
6952 if (u.getStatus() ==
6953 avalanche::VoteStatus::
6954 Finalized) {
6955 pm.setFinalized(peer.peerid);
6956 }
6957 // Only fail if the peer was not
6958 // created
6959 return true;
6960 });
6961 })) {
6963 "ERROR: Failed to accept proof: %s\n",
6964 proofid.GetHex());
6965 }
6966 break;
6967 }
6968 }
6969
6970 auto getBlockFromIndex = [this](const CBlockIndex *pindex) {
6971 // First check if the block is cached before reading
6972 // from disk.
6973 std::shared_ptr<const CBlock> pblock = WITH_LOCK(
6974 m_most_recent_block_mutex, return m_most_recent_block);
6975
6976 if (!pblock || pblock->GetHash() != pindex->GetBlockHash()) {
6977 std::shared_ptr<CBlock> pblockRead =
6978 std::make_shared<CBlock>();
6979 if (!m_chainman.m_blockman.ReadBlock(*pblockRead,
6980 *pindex)) {
6981 assert(!"cannot load block from disk");
6982 }
6983 pblock = pblockRead;
6984 }
6985 return pblock;
6986 };
6987
6988 if (auto pitem = std::get_if<const CBlockIndex *>(&item)) {
6989 CBlockIndex *pindex = const_cast<CBlockIndex *>(*pitem);
6990
6991 shouldActivateBestChain = true;
6992
6993 logVoteUpdate(u, "block", pindex->GetBlockHash());
6994
6995 switch (u.getStatus()) {
6998 m_chainman.ActiveChainstate().ParkBlock(state, pindex);
6999 if (!state.IsValid()) {
7000 LogPrintf("ERROR: Database error: %s\n",
7001 state.GetRejectReason());
7002 return;
7003 }
7004 } break;
7007 m_chainman.ActiveChainstate().ParkBlock(state, pindex);
7008 if (!state.IsValid()) {
7009 LogPrintf("ERROR: Database error: %s\n",
7010 state.GetRejectReason());
7011 return;
7012 }
7013
7014 auto pblock = getBlockFromIndex(pindex);
7015 assert(pblock);
7016
7017 WITH_LOCK(cs_main, GetMainSignals().BlockInvalidated(
7018 pindex, pblock));
7019 } break;
7021 LOCK(cs_main);
7022 m_chainman.ActiveChainstate().UnparkBlock(pindex);
7023 } break;
7025 m_avalanche->setRecentlyFinalized(
7026 pindex->GetBlockHash());
7027
7028 m_avalanche->cleanupStakingRewards(pindex->nHeight);
7029
7030 std::unique_ptr<node::CBlockTemplate> blockTemplate;
7031 {
7032 LOCK(cs_main);
7033 auto &chainstate = m_chainman.ActiveChainstate();
7034 chainstate.UnparkBlock(pindex);
7035
7036 const bool newlyFinalized =
7037 !chainstate.IsBlockAvalancheFinalized(pindex) &&
7038 chainstate.AvalancheFinalizeBlock(pindex,
7039 *m_avalanche);
7040
7041 // Skip if the block is already finalized, aka an
7042 // ancestor of the finalized tip.
7043 if (fPreconsensus && newlyFinalized) {
7044 auto pblock = getBlockFromIndex(pindex);
7045 assert(pblock);
7046
7047 {
7048 // If the finalized block is not the tip, we
7049 // need to keep track of the transactions
7050 // from the non final blocks, so that we can
7051 // check if they were finalized by
7052 // pre-consensus. If these transactions were
7053 // pruned from the radix tree, their
7054 // finalization status could be lost in the
7055 // case the non final blocks are later
7056 // rejected.
7057 CBlockIndex *tip = m_chainman.ActiveTip();
7058 std::unordered_set<TxId, SaltedTxIdHasher>
7059 confirmedTxIdsInNonFinalizedBlocks;
7060 for (const CBlockIndex *block = tip;
7061 block != nullptr && block != pindex;
7062 block = block->pprev) {
7063 auto currentBlock =
7064 getBlockFromIndex(block);
7065 assert(currentBlock);
7066 for (const auto &tx :
7067 currentBlock->vtx) {
7068 confirmedTxIdsInNonFinalizedBlocks
7069 .insert(tx->GetId());
7070 }
7071 }
7072
7073 // Remove the transactions that are not
7074 // confirmed
7075 LOCK(m_mempool.cs);
7076 m_mempool.removeForFinalizedBlock(
7077 confirmedTxIdsInNonFinalizedBlocks);
7078
7079 // Now add mempool transactions to the poll.
7080 // To determine which transaction to add, we
7081 // leverage the legacy block template
7082 // construction method and build a template
7083 // with the most valuable txs in it. These
7084 // transactions are sorted topologically;
7085 // parents come before children, so we can
7086 // poll for children first and optimize the
7087 // number of polls.
7088 node::BlockAssembler blockAssembler(
7089 config, chainstate, &m_mempool,
7090 m_avalanche);
7091 blockAssembler.pblocktemplate.reset(
7092 new node::CBlockTemplate());
7093
7094 if (blockAssembler.pblocktemplate) {
7095 blockAssembler.addTxs(m_mempool);
7096 blockTemplate = std::move(
7097 blockAssembler.pblocktemplate);
7098 }
7099 }
7100 }
7101 } // release cs_main
7102
7103 if (blockTemplate) {
7104 // We could check if the tx is final already
7105 // but addToReconcile will skip the recently
7106 // finalized txs, so let's abuse this
7107 // feature and avoid a tree lookup for each
7108 // tx as an optimization.
7109 for (const auto &templateEntry :
7110 reverse_iterate(blockTemplate->entries)) {
7111 m_avalanche->addToReconcile(templateEntry.tx);
7112 }
7113 }
7114 } break;
7116 // Fall back on Nakamoto consensus in the absence of
7117 // Avalanche votes for other competing or descendant
7118 // blocks.
7119 break;
7120 }
7121 }
7122
7123 if (fStakingPreconsensus) {
7124 if (auto pitem =
7125 std::get_if<const avalanche::StakeContenderId>(&item)) {
7126 const avalanche::StakeContenderId contenderId = *pitem;
7127 logVoteUpdate(u, "contender", contenderId);
7128
7129 switch (u.getStatus()) {
7132 m_avalanche->rejectStakeContender(contenderId);
7133 break;
7134 }
7136 m_avalanche->setRecentlyFinalized(contenderId);
7137 m_avalanche->finalizeStakeContender(contenderId);
7138 break;
7139 }
7141 m_avalanche->acceptStakeContender(contenderId);
7142 break;
7143 }
7145 break;
7146 }
7147 }
7148 }
7149
7150 if (!fPreconsensus) {
7151 continue;
7152 }
7153
7154 if (auto pitem = std::get_if<const CTransactionRef>(&item)) {
7155 const CTransactionRef tx = *pitem;
7156 assert(tx != nullptr);
7157
7158 const TxId &txid = tx->GetId();
7159 const auto status{u.getStatus()};
7160
7161 if (status != avalanche::VoteStatus::Finalized) {
7162 // Because we also want to log the parents txs of this
7163 // finalized tx, we log the finalization later.
7164 logVoteUpdate(u, "tx", txid);
7165 }
7166
7167 switch (status) {
7168 case avalanche::VoteStatus::Invalid: // Fallthrough
7170 // Remove from the mempool and the finalized tree, as
7171 // well as all the children txs. Note that removal from
7172 // the finalized tree is only a safety net and should
7173 // never happen.
7174 LOCK2(cs_main, m_mempool.cs);
7175 if (m_mempool.exists(txid)) {
7176 m_mempool.removeRecursive(
7178
7179 std::vector<CTransactionRef> conflictingTxs =
7180 m_mempool.withConflicting(
7181 [&tx](const TxConflicting &conflicting) {
7182 return conflicting.GetConflictTxs(tx);
7183 });
7184
7185 if (conflictingTxs.size() > 0) {
7186 // Pull the first tx only, erase the others so
7187 // they can be re-downloaded if needed.
7188 auto result = m_chainman.ProcessTransaction(
7189 conflictingTxs[0]);
7190 if (!result.m_state.IsValid()) {
7191 LogPrint(
7193 "Attempting to pull a now invalid "
7194 "conflicting tx %s to mempool\n",
7195 conflictingTxs[0]->GetId().ToString());
7196 }
7197 }
7198
7199 m_mempool.withConflicting(
7200 [&conflictingTxs,
7201 &tx](TxConflicting &conflicting) {
7202 for (const auto &conflictingTx :
7203 conflictingTxs) {
7204 conflicting.EraseTx(
7205 conflictingTx->GetId());
7206 }
7207
7208 // Note that we don't store the descendants,
7209 // which should be re-downloaded. This could
7210 // be optimized but we will have to manage
7211 // the topological ordering.
7212 conflicting.AddTx(tx, NO_NODE);
7213 });
7214 }
7215
7216 if (status == avalanche::VoteStatus::Invalid) {
7217 // Also remove from the conflicting pool. If it was
7218 // in the mempool (unlikely) we just moved it there.
7219 m_mempool.withConflicting(
7220 [&txid](TxConflicting &conflicting) {
7221 conflicting.EraseTx(txid);
7222 });
7223
7224 m_recent_rejects.insert(txid);
7225
7226 AddToCompactExtraTransactions(tx);
7227
7228 CCoinsViewMemPool coinViewMempool(
7229 &m_chainman.ActiveChainstate().CoinsTip(),
7230 m_mempool);
7231 CCoinsViewCache coinViewCache(&coinViewMempool);
7232 auto spentCoins =
7233 std::make_shared<const std::vector<Coin>>(
7234 GetSpentCoins(tx, coinViewCache));
7235
7237 spentCoins);
7238 }
7239
7240 break;
7241 }
7243 // fallthrough
7245 {
7246 LOCK2(cs_main, m_mempool.cs);
7247 if (m_mempool.withConflicting(
7248 [&txid](const TxConflicting &conflicting) {
7249 return conflicting.HaveTx(txid);
7250 })) {
7251 // Swap conflicting txs from/to the mempool
7252 std::vector<CTransactionRef>
7253 mempool_conflicting_txs;
7254 for (const auto &txin : tx->vin) {
7255 // Find the conflicting txs
7256 if (CTransactionRef conflict =
7257 m_mempool.GetConflictTx(
7258 txin.prevout)) {
7259 mempool_conflicting_txs.push_back(
7260 std::move(conflict));
7261 }
7262 }
7263 m_mempool.removeConflicts(*tx);
7264
7265 auto result = m_chainman.ProcessTransaction(tx);
7266 assert(result.m_state.IsValid());
7267
7268 m_mempool.withConflicting(
7269 [&txid, &mempool_conflicting_txs](
7270 TxConflicting &conflicting) {
7271 conflicting.EraseTx(txid);
7272 // Store the first tx only, the others
7273 // can be re-downloaded if needed.
7274 if (mempool_conflicting_txs.size() >
7275 0) {
7276 conflicting.AddTx(
7277 mempool_conflicting_txs[0],
7278 NO_NODE);
7279 }
7280 });
7281 }
7282 }
7283
7284 if (status == avalanche::VoteStatus::Finalized) {
7285 LOCK2(cs_main, m_mempool.cs);
7286 auto it = m_mempool.GetIter(txid);
7287 if (!it.has_value()) {
7288 LogPrint(
7290 "Error: finalized tx (%s) is not in the "
7291 "mempool\n",
7292 txid.ToString());
7293 break;
7294 }
7295
7296 std::vector<TxId> finalizedTxIds;
7297 m_mempool.setAvalancheFinalized(
7298 **it, m_chainparams.GetConsensus(),
7299 *Assert(m_chainman.ActiveTip()),
7300 finalizedTxIds);
7301
7302 for (const auto &finalized_txid : finalizedTxIds) {
7303 m_avalanche->setRecentlyFinalized(
7304 finalized_txid);
7305 // Log the parent tx being implicitely finalized
7306 // as well
7307 logVoteUpdate(u, "tx", finalized_txid);
7308 }
7309
7310 // NO_THREAD_SAFETY_ANALYSIS because
7311 // m_recent_rejects requires cs_main in the lambda
7312 m_mempool.withConflicting(
7313 [&](TxConflicting &conflicting)
7315 std::vector<CTransactionRef>
7316 conflictingTxs =
7317 conflicting.GetConflictTxs(tx);
7318 for (const auto &conflictingTx :
7319 conflictingTxs) {
7320 m_recent_rejects.insert(
7321 conflictingTx->GetId());
7322 conflicting.EraseTx(
7323 conflictingTx->GetId());
7324 }
7325 });
7326 }
7327
7328 break;
7329 }
7331 LOCK(cs_main);
7332
7333 // If the tx is stale, there is no point keeping it
7334 // around as it will no be mined. Let's remove it but
7335 // also forget we got it so it can be eventually
7336 // re-downloaded.
7337 {
7338 LOCK(m_mempool.cs);
7339 m_mempool.removeRecursive(
7341
7342 m_mempool.withConflicting(
7343 [&txid](TxConflicting &conflicting) {
7344 conflicting.EraseTx(txid);
7345 });
7346 }
7347
7348 // Make sure we can request this tx again
7349 m_txrequest.ForgetInvId(txid);
7350
7351 {
7352 // Save the stalled txids so that we can relay them
7353 // to our peers.
7354 LOCK(m_peer_mutex);
7355 for (auto &it : m_peer_map) {
7356 auto tx_relay = (*it.second).GetTxRelay();
7357 if (!tx_relay) {
7358 continue;
7359 }
7360
7361 LOCK(tx_relay->m_tx_inventory_mutex);
7362
7363 // We limit the size of the stalled txs set to
7364 // avoid unbounded memory growth. In practice,
7365 // this should not be an issue as stalled txs
7366 // should be few and far between. If we are at
7367 // the limit, remove the oldest entries.
7368 auto &stalled_by_time =
7369 tx_relay->m_avalanche_stalled_txids
7370 .get<by_time>();
7371 if (stalled_by_time.size() >=
7373 stalled_by_time.erase(
7374 stalled_by_time.begin()->timeAdded);
7375 }
7376
7377 tx_relay->m_avalanche_stalled_txids.insert(
7378 {txid, now});
7379 }
7380 }
7381
7382 AddToCompactExtraTransactions(tx);
7383
7384 break;
7385 }
7386 }
7387 }
7388 }
7389
7390 if (shouldActivateBestChain) {
7392 if (!m_chainman.ActiveChainstate().ActivateBestChain(
7393 state, /*pblock=*/nullptr, m_avalanche)) {
7394 LogPrintf("failed to activate chain (%s)\n", state.ToString());
7395 }
7396 }
7397
7398 return;
7399 }
7400
7401 if (msg_type == NetMsgType::AVAPROOF) {
7402 if (!m_avalanche) {
7403 return;
7404 }
7405 auto proof = RCUPtr<avalanche::Proof>::make();
7406 vRecv >> *proof;
7407
7408 ReceivedAvalancheProof(pfrom, *peer, proof);
7409
7410 return;
7411 }
7412
7413 if (msg_type == NetMsgType::GETAVAPROOFS) {
7414 if (!m_avalanche) {
7415 return;
7416 }
7417 if (peer->m_proof_relay == nullptr) {
7418 return;
7419 }
7420
7421 peer->m_proof_relay->lastSharedProofsUpdate =
7422 GetTime<std::chrono::seconds>();
7423
7424 peer->m_proof_relay->sharedProofs =
7425 m_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
7426 return pm.getShareableProofsSnapshot();
7427 });
7428
7429 avalanche::CompactProofs compactProofs(
7430 peer->m_proof_relay->sharedProofs);
7431 MakeAndPushMessage(pfrom, NetMsgType::AVAPROOFS, compactProofs);
7432
7433 return;
7434 }
7435
7436 if (msg_type == NetMsgType::AVAPROOFS) {
7437 if (!m_avalanche) {
7438 return;
7439 }
7440 if (peer->m_proof_relay == nullptr) {
7441 return;
7442 }
7443
7444 // Only process the compact proofs if we requested them
7445 if (!peer->m_proof_relay->compactproofs_requested) {
7446 LogPrint(BCLog::AVALANCHE, "Ignoring unsollicited avaproofs\n");
7447 return;
7448 }
7449 peer->m_proof_relay->compactproofs_requested = false;
7450
7451 avalanche::CompactProofs compactProofs;
7452 try {
7453 vRecv >> compactProofs;
7454 } catch (std::ios_base::failure &e) {
7455 // This compact proofs have non contiguous or overflowing indexes
7456 Misbehaving(*peer, "avaproofs-bad-indexes");
7457 return;
7458 }
7459
7460 // If there are prefilled proofs, process them first
7461 for (const auto &prefilledProof : compactProofs.getPrefilledProofs()) {
7462 if (!ReceivedAvalancheProof(pfrom, *peer, prefilledProof.proof)) {
7463 // If we got an invalid proof, the peer is getting banned and we
7464 // can bail out.
7465 return;
7466 }
7467 }
7468
7469 // If there is no shortid, avoid parsing/responding/accounting for the
7470 // message.
7471 if (compactProofs.getShortIDs().size() == 0) {
7472 return;
7473 }
7474
7475 // To determine the chance that the number of entries in a bucket
7476 // exceeds N, we use the fact that the number of elements in a single
7477 // bucket is binomially distributed (with n = the number of shorttxids
7478 // S, and p = 1 / the number of buckets), that in the worst case the
7479 // number of buckets is equal to S (due to std::unordered_map having a
7480 // default load factor of 1.0), and that the chance for any bucket to
7481 // exceed N elements is at most buckets * (the chance that any given
7482 // bucket is above N elements). Thus:
7483 // P(max_elements_per_bucket > N) <=
7484 // S * (1 - cdf(binomial(n=S,p=1/S), N))
7485 // If we assume up to 21000000, allowing 15 elements per bucket should
7486 // only fail once per ~2.5 million avaproofs transfers (per peer and
7487 // connection).
7488 // TODO re-evaluate the bucket count to a more realistic value.
7489 // TODO: In the case of a shortid-collision, we should request all the
7490 // proofs which collided. For now, we only request one, which is not
7491 // that bad considering this event is expected to be very rare.
7492 auto shortIdProcessor =
7494 compactProofs.getShortIDs(), 15);
7495
7496 if (shortIdProcessor.hasOutOfBoundIndex()) {
7497 // This should be catched by deserialization, but catch it here as
7498 // well as a good measure.
7499 Misbehaving(*peer, "avaproofs-bad-indexes");
7500 return;
7501 }
7502 if (!shortIdProcessor.isEvenlyDistributed()) {
7503 // This is suspicious, don't ban but bail out
7504 return;
7505 }
7506
7507 std::vector<std::pair<avalanche::ProofId, bool>> remoteProofsStatus;
7508 m_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
7509 pm.forEachPeer([&](const avalanche::Peer &peer) {
7510 assert(peer.proof);
7511 uint64_t shortid = compactProofs.getShortID(peer.getProofId());
7512
7513 int added =
7514 shortIdProcessor.matchKnownItem(shortid, peer.proof);
7515
7516 // No collision
7517 if (added >= 0) {
7518 // Because we know the proof, we can determine if our peer
7519 // has it (added = 1) or not (added = 0) and update the
7520 // remote proof status accordingly.
7521 remoteProofsStatus.emplace_back(peer.getProofId(),
7522 added > 0);
7523 }
7524
7525 // In order to properly determine which proof is missing, we
7526 // need to keep scanning for all our proofs.
7527 return true;
7528 });
7529 });
7530
7532 for (size_t i = 0; i < compactProofs.size(); i++) {
7533 if (shortIdProcessor.getItem(i) == nullptr) {
7534 req.indices.push_back(i);
7535 }
7536 }
7537
7538 MakeAndPushMessage(pfrom, NetMsgType::AVAPROOFSREQ, req);
7539
7540 const NodeId nodeid = pfrom.GetId();
7541
7542 // We want to keep a count of how many nodes we successfully requested
7543 // avaproofs from as this is used to determine when we are confident our
7544 // quorum is close enough to the other participants.
7545 m_avalanche->avaproofsSent(nodeid);
7546
7547 // Only save remote proofs from stakers
7549 return pfrom.m_avalanche_pubkey.has_value())) {
7550 m_avalanche->withPeerManager(
7551 [&remoteProofsStatus, nodeid](avalanche::PeerManager &pm) {
7552 for (const auto &[proofid, present] : remoteProofsStatus) {
7553 pm.saveRemoteProof(proofid, nodeid, present);
7554 }
7555 });
7556 }
7557
7558 return;
7559 }
7560
7561 if (msg_type == NetMsgType::AVAPROOFSREQ) {
7562 if (peer->m_proof_relay == nullptr) {
7563 return;
7564 }
7565
7566 avalanche::ProofsRequest proofreq;
7567 vRecv >> proofreq;
7568
7569 auto requestedIndiceIt = proofreq.indices.begin();
7570 uint32_t treeIndice = 0;
7571 peer->m_proof_relay->sharedProofs.forEachLeaf([&](const auto &proof) {
7572 if (requestedIndiceIt == proofreq.indices.end()) {
7573 // No more indice to process
7574 return false;
7575 }
7576
7577 if (treeIndice++ == *requestedIndiceIt) {
7578 MakeAndPushMessage(pfrom, NetMsgType::AVAPROOF, *proof);
7579 requestedIndiceIt++;
7580 }
7581
7582 return true;
7583 });
7584
7585 peer->m_proof_relay->sharedProofs = {};
7586 return;
7587 }
7588
7589 if (msg_type == NetMsgType::GETADDR) {
7590 // This asymmetric behavior for inbound and outbound connections was
7591 // introduced to prevent a fingerprinting attack: an attacker can send
7592 // specific fake addresses to users' AddrMan and later request them by
7593 // sending getaddr messages. Making nodes which are behind NAT and can
7594 // only make outgoing connections ignore the getaddr message mitigates
7595 // the attack.
7596 if (!pfrom.IsInboundConn()) {
7598 "Ignoring \"getaddr\" from %s connection. peer=%d\n",
7599 pfrom.ConnectionTypeAsString(), pfrom.GetId());
7600 return;
7601 }
7602
7603 // Since this must be an inbound connection, SetupAddressRelay will
7604 // never fail.
7605 Assume(SetupAddressRelay(pfrom, *peer));
7606
7607 // Only send one GetAddr response per connection to reduce resource
7608 // waste and discourage addr stamping of INV announcements.
7609 if (peer->m_getaddr_recvd) {
7610 LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n",
7611 pfrom.GetId());
7612 return;
7613 }
7614 peer->m_getaddr_recvd = true;
7615
7616 peer->m_addrs_to_send.clear();
7617 std::vector<CAddress> vAddr;
7618 const size_t maxAddrToSend = m_opts.max_addr_to_send;
7620 vAddr = m_connman.GetAddresses(maxAddrToSend, MAX_PCT_ADDR_TO_SEND,
7621 /* network */ std::nullopt);
7622 } else {
7623 vAddr = m_connman.GetAddresses(pfrom, maxAddrToSend,
7625 }
7626 for (const CAddress &addr : vAddr) {
7627 PushAddress(*peer, addr);
7628 }
7629 return;
7630 }
7631
7632 if (msg_type == NetMsgType::GETAVAADDR) {
7633 auto now = GetTime<std::chrono::seconds>();
7634 if (now < pfrom.m_nextGetAvaAddr) {
7635 // Prevent a peer from exhausting our resources by spamming
7636 // getavaaddr messages.
7637 return;
7638 }
7639
7640 // Only accept a getavaaddr every GETAVAADDR_INTERVAL at most
7642
7643 if (!SetupAddressRelay(pfrom, *peer)) {
7645 "Ignoring getavaaddr message from %s peer=%d\n",
7646 pfrom.ConnectionTypeAsString(), pfrom.GetId());
7647 return;
7648 }
7649
7650 auto availabilityScoreComparator = [](const CNode *lhs,
7651 const CNode *rhs) {
7652 double scoreLhs = lhs->getAvailabilityScore();
7653 double scoreRhs = rhs->getAvailabilityScore();
7654
7655 if (scoreLhs != scoreRhs) {
7656 return scoreLhs > scoreRhs;
7657 }
7658
7659 return lhs < rhs;
7660 };
7661
7662 // Get up to MAX_ADDR_TO_SEND addresses of the nodes which are the
7663 // most active in the avalanche network. Account for 0 availability as
7664 // well so we can send addresses even if we did not start polling yet.
7665 std::set<const CNode *, decltype(availabilityScoreComparator)> avaNodes(
7666 availabilityScoreComparator);
7667 m_connman.ForEachNode([&](const CNode *pnode) {
7668 if (!pnode->m_avalanche_enabled ||
7669 pnode->getAvailabilityScore() < 0.) {
7670 return;
7671 }
7672
7673 avaNodes.insert(pnode);
7674 if (avaNodes.size() > m_opts.max_addr_to_send) {
7675 avaNodes.erase(std::prev(avaNodes.end()));
7676 }
7677 });
7678
7679 peer->m_addrs_to_send.clear();
7680 for (const CNode *pnode : avaNodes) {
7681 PushAddress(*peer, pnode->addr);
7682 }
7683
7684 return;
7685 }
7686
7687 if (msg_type == NetMsgType::MEMPOOL) {
7688 if (!(peer->m_our_services & NODE_BLOOM) &&
7692 "mempool request with bloom filters disabled, "
7693 "disconnect peer=%d\n",
7694 pfrom.GetId());
7695 pfrom.fDisconnect = true;
7696 }
7697 return;
7698 }
7699
7700 if (m_connman.OutboundTargetReached(false) &&
7704 "mempool request with bandwidth limit reached, "
7705 "disconnect peer=%d\n",
7706 pfrom.GetId());
7707 pfrom.fDisconnect = true;
7708 }
7709 return;
7710 }
7711
7712 if (auto tx_relay = peer->GetTxRelay()) {
7713 LOCK(tx_relay->m_tx_inventory_mutex);
7714 tx_relay->m_send_mempool = true;
7715 }
7716 return;
7717 }
7718
7719 if (msg_type == NetMsgType::PING) {
7720 if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
7721 uint64_t nonce = 0;
7722 vRecv >> nonce;
7723 // Echo the message back with the nonce. This allows for two useful
7724 // features:
7725 //
7726 // 1) A remote node can quickly check if the connection is
7727 // operational.
7728 // 2) Remote nodes can measure the latency of the network thread. If
7729 // this node is overloaded it won't respond to pings quickly and the
7730 // remote node can avoid sending us more work, like chain download
7731 // requests.
7732 //
7733 // The nonce stops the remote getting confused between different
7734 // pings: without it, if the remote node sends a ping once per
7735 // second and this node takes 5 seconds to respond to each, the 5th
7736 // ping the remote sends would appear to return very quickly.
7737 MakeAndPushMessage(pfrom, NetMsgType::PONG, nonce);
7738 }
7739 return;
7740 }
7741
7742 if (msg_type == NetMsgType::PONG) {
7743 const auto ping_end = time_received;
7744 uint64_t nonce = 0;
7745 size_t nAvail = vRecv.in_avail();
7746 bool bPingFinished = false;
7747 std::string sProblem;
7748
7749 if (nAvail >= sizeof(nonce)) {
7750 vRecv >> nonce;
7751
7752 // Only process pong message if there is an outstanding ping (old
7753 // ping without nonce should never pong)
7754 if (peer->m_ping_nonce_sent != 0) {
7755 if (nonce == peer->m_ping_nonce_sent) {
7756 // Matching pong received, this ping is no longer
7757 // outstanding
7758 bPingFinished = true;
7759 const auto ping_time = ping_end - peer->m_ping_start.load();
7760 if (ping_time.count() >= 0) {
7761 // Let connman know about this successful ping-pong
7762 pfrom.PongReceived(ping_time);
7763 } else {
7764 // This should never happen
7765 sProblem = "Timing mishap";
7766 }
7767 } else {
7768 // Nonce mismatches are normal when pings are overlapping
7769 sProblem = "Nonce mismatch";
7770 if (nonce == 0) {
7771 // This is most likely a bug in another implementation
7772 // somewhere; cancel this ping
7773 bPingFinished = true;
7774 sProblem = "Nonce zero";
7775 }
7776 }
7777 } else {
7778 sProblem = "Unsolicited pong without ping";
7779 }
7780 } else {
7781 // This is most likely a bug in another implementation somewhere;
7782 // cancel this ping
7783 bPingFinished = true;
7784 sProblem = "Short payload";
7785 }
7786
7787 if (!(sProblem.empty())) {
7789 "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
7790 pfrom.GetId(), sProblem, peer->m_ping_nonce_sent, nonce,
7791 nAvail);
7792 }
7793 if (bPingFinished) {
7794 peer->m_ping_nonce_sent = 0;
7795 }
7796 return;
7797 }
7798
7799 if (msg_type == NetMsgType::FILTERLOAD) {
7800 if (!(peer->m_our_services & NODE_BLOOM)) {
7802 "filterload received despite not offering bloom services "
7803 "from peer=%d; disconnecting\n",
7804 pfrom.GetId());
7805 pfrom.fDisconnect = true;
7806 return;
7807 }
7808 CBloomFilter filter;
7809 vRecv >> filter;
7810
7811 if (!filter.IsWithinSizeConstraints()) {
7812 // There is no excuse for sending a too-large filter
7813 Misbehaving(*peer, "too-large bloom filter");
7814 } else if (auto tx_relay = peer->GetTxRelay()) {
7815 {
7816 LOCK(tx_relay->m_bloom_filter_mutex);
7817 tx_relay->m_bloom_filter.reset(new CBloomFilter(filter));
7818 tx_relay->m_relay_txs = true;
7819 }
7820 pfrom.m_bloom_filter_loaded = true;
7821 }
7822 return;
7823 }
7824
7825 if (msg_type == NetMsgType::FILTERADD) {
7826 if (!(peer->m_our_services & NODE_BLOOM)) {
7828 "filteradd received despite not offering bloom services "
7829 "from peer=%d; disconnecting\n",
7830 pfrom.GetId());
7831 pfrom.fDisconnect = true;
7832 return;
7833 }
7834 std::vector<uint8_t> vData;
7835 vRecv >> vData;
7836
7837 // Nodes must NEVER send a data item > 520 bytes (the max size for a
7838 // script data object, and thus, the maximum size any matched object can
7839 // have) in a filteradd message.
7840 bool bad = false;
7841 if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
7842 bad = true;
7843 } else if (auto tx_relay = peer->GetTxRelay()) {
7844 LOCK(tx_relay->m_bloom_filter_mutex);
7845 if (tx_relay->m_bloom_filter) {
7846 tx_relay->m_bloom_filter->insert(vData);
7847 } else {
7848 bad = true;
7849 }
7850 }
7851 if (bad) {
7852 // The structure of this code doesn't really allow for a good error
7853 // code. We'll go generic.
7854 Misbehaving(*peer, "bad filteradd message");
7855 }
7856 return;
7857 }
7858
7859 if (msg_type == NetMsgType::FILTERCLEAR) {
7860 if (!(peer->m_our_services & NODE_BLOOM)) {
7862 "filterclear received despite not offering bloom services "
7863 "from peer=%d; disconnecting\n",
7864 pfrom.GetId());
7865 pfrom.fDisconnect = true;
7866 return;
7867 }
7868 auto tx_relay = peer->GetTxRelay();
7869 if (!tx_relay) {
7870 return;
7871 }
7872
7873 {
7874 LOCK(tx_relay->m_bloom_filter_mutex);
7875 tx_relay->m_bloom_filter = nullptr;
7876 tx_relay->m_relay_txs = true;
7877 }
7878 pfrom.m_bloom_filter_loaded = false;
7879 pfrom.m_relays_txs = true;
7880 return;
7881 }
7882
7883 if (msg_type == NetMsgType::FEEFILTER) {
7884 Amount newFeeFilter = Amount::zero();
7885 vRecv >> newFeeFilter;
7886 if (MoneyRange(newFeeFilter)) {
7887 if (auto tx_relay = peer->GetTxRelay()) {
7888 tx_relay->m_fee_filter_received = newFeeFilter;
7889 }
7890 LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n",
7891 CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
7892 }
7893 return;
7894 }
7895
7896 if (msg_type == NetMsgType::GETCFILTERS) {
7897 ProcessGetCFilters(pfrom, *peer, vRecv);
7898 return;
7899 }
7900
7901 if (msg_type == NetMsgType::GETCFHEADERS) {
7902 ProcessGetCFHeaders(pfrom, *peer, vRecv);
7903 return;
7904 }
7905
7906 if (msg_type == NetMsgType::GETCFCHECKPT) {
7907 ProcessGetCFCheckPt(pfrom, *peer, vRecv);
7908 return;
7909 }
7910
7911 if (msg_type == NetMsgType::NOTFOUND) {
7912 std::vector<CInv> vInv;
7913 vRecv >> vInv;
7914 // A peer might send up to 1 notfound per getdata request, but no more
7915 if (vInv.size() <= PROOF_REQUEST_PARAMS.max_peer_announcements +
7918 for (CInv &inv : vInv) {
7919 if (inv.IsMsgTx()) {
7920 // If we receive a NOTFOUND message for a tx we requested,
7921 // mark the announcement for it as completed in
7922 // InvRequestTracker.
7923 LOCK(::cs_main);
7924 m_txrequest.ReceivedResponse(pfrom.GetId(), TxId(inv.hash));
7925 continue;
7926 }
7927 if (inv.IsMsgProof()) {
7928 if (!m_avalanche) {
7929 continue;
7930 }
7931 LOCK(cs_proofrequest);
7932 m_proofrequest.ReceivedResponse(
7933 pfrom.GetId(), avalanche::ProofId(inv.hash));
7934 }
7935 }
7936 }
7937 return;
7938 }
7939
7940 // Ignore unknown commands for extensibility
7941 LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n",
7942 SanitizeString(msg_type), pfrom.GetId());
7943 return;
7944}
7945
7946bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode &pnode, Peer &peer) {
7947 {
7948 LOCK(peer.m_misbehavior_mutex);
7949
7950 // There's nothing to do if the m_should_discourage flag isn't set
7951 if (!peer.m_should_discourage) {
7952 return false;
7953 }
7954
7955 peer.m_should_discourage = false;
7956 } // peer.m_misbehavior_mutex
7957
7959 // We never disconnect or discourage peers for bad behavior if they have
7960 // NetPermissionFlags::NoBan permission
7961 LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id);
7962 return false;
7963 }
7964
7965 if (pnode.IsManualConn()) {
7966 // We never disconnect or discourage manual peers for bad behavior
7967 LogPrintf("Warning: not punishing manually connected peer %d!\n",
7968 peer.m_id);
7969 return false;
7970 }
7971
7972 if (pnode.addr.IsLocal()) {
7973 // We disconnect local peers for bad behavior but don't discourage
7974 // (since that would discourage all peers on the same local address)
7976 "Warning: disconnecting but not discouraging %s peer %d!\n",
7977 pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id);
7978 pnode.fDisconnect = true;
7979 return true;
7980 }
7981
7982 // Normal case: Disconnect the peer and discourage all nodes sharing the
7983 // address
7984 LogPrint(BCLog::NET, "Disconnecting and discouraging peer %d!\n",
7985 peer.m_id);
7986 if (m_banman) {
7987 m_banman->Discourage(pnode.addr);
7988 }
7989 m_connman.DisconnectNode(pnode.addr);
7990 return true;
7991}
7992
7993bool PeerManagerImpl::ProcessMessages(const Config &config, CNode *pfrom,
7994 std::atomic<bool> &interruptMsgProc) {
7995 AssertLockHeld(g_msgproc_mutex);
7996
7997 //
7998 // Message format
7999 // (4) message start
8000 // (12) command
8001 // (4) size
8002 // (4) checksum
8003 // (x) data
8004 //
8005
8006 PeerRef peer = GetPeerRef(pfrom->GetId());
8007 if (peer == nullptr) {
8008 return false;
8009 }
8010
8011 {
8012 LOCK(peer->m_getdata_requests_mutex);
8013 if (!peer->m_getdata_requests.empty()) {
8014 ProcessGetData(config, *pfrom, *peer, interruptMsgProc);
8015 }
8016 }
8017
8018 const bool processed_orphan = ProcessOrphanTx(config, *peer);
8019
8020 if (pfrom->fDisconnect) {
8021 return false;
8022 }
8023
8024 if (processed_orphan) {
8025 return true;
8026 }
8027
8028 // this maintains the order of responses and prevents m_getdata_requests to
8029 // grow unbounded
8030 {
8031 LOCK(peer->m_getdata_requests_mutex);
8032 if (!peer->m_getdata_requests.empty()) {
8033 return true;
8034 }
8035 }
8036
8037 // Don't bother if send buffer is too full to respond anyway
8038 if (pfrom->fPauseSend) {
8039 return false;
8040 }
8041
8042 auto poll_result{pfrom->PollMessage()};
8043 if (!poll_result) {
8044 // No message to process
8045 return false;
8046 }
8047
8048 CNetMessage &msg{poll_result->first};
8049 bool fMoreWork = poll_result->second;
8050
8051 TRACE6(net, inbound_message, pfrom->GetId(), pfrom->m_addr_name.c_str(),
8052 pfrom->ConnectionTypeAsString().c_str(), msg.m_type.c_str(),
8053 msg.m_recv.size(), msg.m_recv.data());
8054
8055 if (m_opts.capture_messages) {
8056 CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv),
8057 /*is_incoming=*/true);
8058 }
8059
8060 // Check network magic
8061 if (!msg.m_valid_netmagic) {
8063 "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n",
8064 SanitizeString(msg.m_type), pfrom->GetId());
8065
8066 // Make sure we discourage where that come from for some time.
8067 if (m_banman) {
8068 m_banman->Discourage(pfrom->addr);
8069 }
8070 m_connman.DisconnectNode(pfrom->addr);
8071
8072 pfrom->fDisconnect = true;
8073 return false;
8074 }
8075
8076 // Check header
8077 if (!msg.m_valid_header) {
8078 LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n",
8079 SanitizeString(msg.m_type), pfrom->GetId());
8080 return fMoreWork;
8081 }
8082
8083 // Checksum
8084 DataStream &vRecv = msg.m_recv;
8085 if (!msg.m_valid_checksum) {
8086 LogPrint(BCLog::NET, "%s(%s, %u bytes): CHECKSUM ERROR peer=%d\n",
8087 __func__, SanitizeString(msg.m_type), msg.m_message_size,
8088 pfrom->GetId());
8089 if (m_banman) {
8090 m_banman->Discourage(pfrom->addr);
8091 }
8092 m_connman.DisconnectNode(pfrom->addr);
8093 return fMoreWork;
8094 }
8095
8096 try {
8097 ProcessMessage(config, *pfrom, msg.m_type, vRecv, msg.m_time,
8098 interruptMsgProc);
8099 if (interruptMsgProc) {
8100 return false;
8101 }
8102
8103 {
8104 LOCK(peer->m_getdata_requests_mutex);
8105 if (!peer->m_getdata_requests.empty()) {
8106 fMoreWork = true;
8107 }
8108 }
8109 // Does this peer has an orphan ready to reconsider?
8110 // (Note: we may have provided a parent for an orphan provided by
8111 // another peer that was already processed; in that case, the extra work
8112 // may not be noticed, possibly resulting in an unnecessary 100ms delay)
8113 if (m_mempool.withOrphanage([&peer](TxOrphanage &orphanage) {
8114 return orphanage.HaveTxToReconsider(peer->m_id);
8115 })) {
8116 fMoreWork = true;
8117 }
8118 } catch (const std::exception &e) {
8119 LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n",
8120 __func__, SanitizeString(msg.m_type), msg.m_message_size,
8121 e.what(), typeid(e).name());
8122 } catch (...) {
8123 LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n",
8124 __func__, SanitizeString(msg.m_type), msg.m_message_size);
8125 }
8126
8127 return fMoreWork;
8128}
8129
8130void PeerManagerImpl::ConsiderEviction(CNode &pto, Peer &peer,
8131 std::chrono::seconds time_in_seconds) {
8133
8134 CNodeState &state = *State(pto.GetId());
8135
8136 if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() &&
8137 state.fSyncStarted) {
8138 // This is an outbound peer subject to disconnection if they don't
8139 // announce a block with as much work as the current tip within
8140 // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if their
8141 // chain has more work than ours, we should sync to it, unless it's
8142 // invalid, in which case we should find that out and disconnect from
8143 // them elsewhere).
8144 if (state.pindexBestKnownBlock != nullptr &&
8145 state.pindexBestKnownBlock->nChainWork >=
8146 m_chainman.ActiveChain().Tip()->nChainWork) {
8147 if (state.m_chain_sync.m_timeout != 0s) {
8148 state.m_chain_sync.m_timeout = 0s;
8149 state.m_chain_sync.m_work_header = nullptr;
8150 state.m_chain_sync.m_sent_getheaders = false;
8151 }
8152 } else if (state.m_chain_sync.m_timeout == 0s ||
8153 (state.m_chain_sync.m_work_header != nullptr &&
8154 state.pindexBestKnownBlock != nullptr &&
8155 state.pindexBestKnownBlock->nChainWork >=
8156 state.m_chain_sync.m_work_header->nChainWork)) {
8157 // Our best block known by this peer is behind our tip, and we're
8158 // either noticing that for the first time, OR this peer was able to
8159 // catch up to some earlier point where we checked against our tip.
8160 // Either way, set a new timeout based on current tip.
8161 state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
8162 state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip();
8163 state.m_chain_sync.m_sent_getheaders = false;
8164 } else if (state.m_chain_sync.m_timeout > 0s &&
8165 time_in_seconds > state.m_chain_sync.m_timeout) {
8166 // No evidence yet that our peer has synced to a chain with work
8167 // equal to that of our tip, when we first detected it was behind.
8168 // Send a single getheaders message to give the peer a chance to
8169 // update us.
8170 if (state.m_chain_sync.m_sent_getheaders) {
8171 // They've run out of time to catch up!
8172 LogPrintf(
8173 "Disconnecting outbound peer %d for old chain, best known "
8174 "block = %s\n",
8175 pto.GetId(),
8176 state.pindexBestKnownBlock != nullptr
8177 ? state.pindexBestKnownBlock->GetBlockHash().ToString()
8178 : "<none>");
8179 pto.fDisconnect = true;
8180 } else {
8181 assert(state.m_chain_sync.m_work_header);
8182 // Here, we assume that the getheaders message goes out,
8183 // because it'll either go out or be skipped because of a
8184 // getheaders in-flight already, in which case the peer should
8185 // still respond to us with a sufficiently high work chain tip.
8186 MaybeSendGetHeaders(
8187 pto, GetLocator(state.m_chain_sync.m_work_header->pprev),
8188 peer);
8189 LogPrint(
8190 BCLog::NET,
8191 "sending getheaders to outbound peer=%d to verify chain "
8192 "work (current best known block:%s, benchmark blockhash: "
8193 "%s)\n",
8194 pto.GetId(),
8195 state.pindexBestKnownBlock != nullptr
8196 ? state.pindexBestKnownBlock->GetBlockHash().ToString()
8197 : "<none>",
8198 state.m_chain_sync.m_work_header->GetBlockHash()
8199 .ToString());
8200 state.m_chain_sync.m_sent_getheaders = true;
8201 // Bump the timeout to allow a response, which could clear the
8202 // timeout (if the response shows the peer has synced), reset
8203 // the timeout (if the peer syncs to the required work but not
8204 // to our tip), or result in disconnect (if we advance to the
8205 // timeout and pindexBestKnownBlock has not sufficiently
8206 // progressed)
8207 state.m_chain_sync.m_timeout =
8208 time_in_seconds + HEADERS_RESPONSE_TIME;
8209 }
8210 }
8211 }
8212}
8213
8214void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now) {
8215 // If we have any extra block-relay-only peers, disconnect the youngest
8216 // unless it's given us a block -- in which case, compare with the
8217 // second-youngest, and out of those two, disconnect the peer who least
8218 // recently gave us a block.
8219 // The youngest block-relay-only peer would be the extra peer we connected
8220 // to temporarily in order to sync our tip; see net.cpp.
8221 // Note that we use higher nodeid as a measure for most recent connection.
8222 if (m_connman.GetExtraBlockRelayCount() > 0) {
8223 std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0},
8224 next_youngest_peer{-1, 0};
8225
8226 m_connman.ForEachNode([&](CNode *pnode) {
8227 if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) {
8228 return;
8229 }
8230 if (pnode->GetId() > youngest_peer.first) {
8231 next_youngest_peer = youngest_peer;
8232 youngest_peer.first = pnode->GetId();
8233 youngest_peer.second = pnode->m_last_block_time;
8234 }
8235 });
8236
8237 NodeId to_disconnect = youngest_peer.first;
8238 if (youngest_peer.second > next_youngest_peer.second) {
8239 // Our newest block-relay-only peer gave us a block more recently;
8240 // disconnect our second youngest.
8241 to_disconnect = next_youngest_peer.first;
8242 }
8243
8244 m_connman.ForNode(
8245 to_disconnect,
8248 // Make sure we're not getting a block right now, and that we've
8249 // been connected long enough for this eviction to happen at
8250 // all. Note that we only request blocks from a peer if we learn
8251 // of a valid headers chain with at least as much work as our
8252 // tip.
8253 CNodeState *node_state = State(pnode->GetId());
8254 if (node_state == nullptr ||
8255 (now - pnode->m_connected >= MINIMUM_CONNECT_TIME &&
8256 node_state->vBlocksInFlight.empty())) {
8257 pnode->fDisconnect = true;
8259 "disconnecting extra block-relay-only peer=%d "
8260 "(last block received at time %d)\n",
8261 pnode->GetId(),
8263 return true;
8264 } else {
8265 LogPrint(
8266 BCLog::NET,
8267 "keeping block-relay-only peer=%d chosen for eviction "
8268 "(connect time: %d, blocks_in_flight: %d)\n",
8269 pnode->GetId(), count_seconds(pnode->m_connected),
8270 node_state->vBlocksInFlight.size());
8271 }
8272 return false;
8273 });
8274 }
8275
8276 // Check whether we have too many OUTBOUND_FULL_RELAY peers
8277 if (m_connman.GetExtraFullOutboundCount() <= 0) {
8278 return;
8279 }
8280
8281 // If we have more OUTBOUND_FULL_RELAY peers than we target, disconnect one.
8282 // Pick the OUTBOUND_FULL_RELAY peer that least recently announced us a new
8283 // block, with ties broken by choosing the more recent connection (higher
8284 // node id)
8285 NodeId worst_peer = -1;
8286 int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
8287
8288 m_connman.ForEachNode([&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(
8289 ::cs_main) {
8291
8292 // Only consider OUTBOUND_FULL_RELAY peers that are not already marked
8293 // for disconnection
8294 if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) {
8295 return;
8296 }
8297 CNodeState *state = State(pnode->GetId());
8298 if (state == nullptr) {
8299 // shouldn't be possible, but just in case
8300 return;
8301 }
8302 // Don't evict our protected peers
8303 if (state->m_chain_sync.m_protect) {
8304 return;
8305 }
8306 if (state->m_last_block_announcement < oldest_block_announcement ||
8307 (state->m_last_block_announcement == oldest_block_announcement &&
8308 pnode->GetId() > worst_peer)) {
8309 worst_peer = pnode->GetId();
8310 oldest_block_announcement = state->m_last_block_announcement;
8311 }
8312 });
8313
8314 if (worst_peer == -1) {
8315 return;
8316 }
8317
8318 bool disconnected = m_connman.ForNode(
8319 worst_peer, [&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
8321
8322 // Only disconnect a peer that has been connected to us for some
8323 // reasonable fraction of our check-frequency, to give it time for
8324 // new information to have arrived. Also don't disconnect any peer
8325 // we're trying to download a block from.
8326 CNodeState &state = *State(pnode->GetId());
8327 if (now - pnode->m_connected > MINIMUM_CONNECT_TIME &&
8328 state.vBlocksInFlight.empty()) {
8330 "disconnecting extra outbound peer=%d (last block "
8331 "announcement received at time %d)\n",
8332 pnode->GetId(), oldest_block_announcement);
8333 pnode->fDisconnect = true;
8334 return true;
8335 } else {
8337 "keeping outbound peer=%d chosen for eviction "
8338 "(connect time: %d, blocks_in_flight: %d)\n",
8339 pnode->GetId(), count_seconds(pnode->m_connected),
8340 state.vBlocksInFlight.size());
8341 return false;
8342 }
8343 });
8344
8345 if (disconnected) {
8346 // If we disconnected an extra peer, that means we successfully
8347 // connected to at least one peer after the last time we detected a
8348 // stale tip. Don't try any more extra peers until we next detect a
8349 // stale tip, to limit the load we put on the network from these extra
8350 // connections.
8351 m_connman.SetTryNewOutboundPeer(false);
8352 }
8353}
8354
8355void PeerManagerImpl::CheckForStaleTipAndEvictPeers() {
8356 LOCK(cs_main);
8357
8358 auto now{GetTime<std::chrono::seconds>()};
8359
8360 EvictExtraOutboundPeers(now);
8361
8362 if (now > m_stale_tip_check_time) {
8363 // Check whether our tip is stale, and if so, allow using an extra
8364 // outbound peer.
8365 if (!m_chainman.m_blockman.LoadingBlocks() &&
8366 m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() &&
8367 TipMayBeStale()) {
8368 LogPrintf("Potential stale tip detected, will try using extra "
8369 "outbound peer (last tip update: %d seconds ago)\n",
8370 count_seconds(now - m_last_tip_update.load()));
8371 m_connman.SetTryNewOutboundPeer(true);
8372 } else if (m_connman.GetTryNewOutboundPeer()) {
8373 m_connman.SetTryNewOutboundPeer(false);
8374 }
8375 m_stale_tip_check_time = now + STALE_CHECK_INTERVAL;
8376 }
8377
8378 if (!m_initial_sync_finished && CanDirectFetch()) {
8379 m_connman.StartExtraBlockRelayPeers();
8380 m_initial_sync_finished = true;
8381 }
8382}
8383
8384void PeerManagerImpl::MaybeSendPing(CNode &node_to, Peer &peer,
8385 std::chrono::microseconds now) {
8386 if (m_connman.ShouldRunInactivityChecks(
8387 node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
8388 peer.m_ping_nonce_sent &&
8389 now > peer.m_ping_start.load() + TIMEOUT_INTERVAL) {
8390 // The ping timeout is using mocktime. To disable the check during
8391 // testing, increase -peertimeout.
8392 LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n",
8393 0.000001 * count_microseconds(now - peer.m_ping_start.load()),
8394 peer.m_id);
8395 node_to.fDisconnect = true;
8396 return;
8397 }
8398
8399 bool pingSend = false;
8400
8401 if (peer.m_ping_queued) {
8402 // RPC ping request by user
8403 pingSend = true;
8404 }
8405
8406 if (peer.m_ping_nonce_sent == 0 &&
8407 now > peer.m_ping_start.load() + PING_INTERVAL) {
8408 // Ping automatically sent as a latency probe & keepalive.
8409 pingSend = true;
8410 }
8411
8412 if (pingSend) {
8413 uint64_t nonce;
8414 do {
8415 nonce = FastRandomContext().rand64();
8416 } while (nonce == 0);
8417 peer.m_ping_queued = false;
8418 peer.m_ping_start = now;
8419 if (node_to.GetCommonVersion() > BIP0031_VERSION) {
8420 peer.m_ping_nonce_sent = nonce;
8421 MakeAndPushMessage(node_to, NetMsgType::PING, nonce);
8422 } else {
8423 // Peer is too old to support ping command with nonce, pong will
8424 // never arrive.
8425 peer.m_ping_nonce_sent = 0;
8426 MakeAndPushMessage(node_to, NetMsgType::PING);
8427 }
8428 }
8429}
8430
8431void PeerManagerImpl::MaybeSendAddr(CNode &node, Peer &peer,
8432 std::chrono::microseconds current_time) {
8433 // Nothing to do for non-address-relay peers
8434 if (!peer.m_addr_relay_enabled) {
8435 return;
8436 }
8437
8438 LOCK(peer.m_addr_send_times_mutex);
8439 if (fListen && !m_chainman.IsInitialBlockDownload() &&
8440 peer.m_next_local_addr_send < current_time) {
8441 // If we've sent before, clear the bloom filter for the peer, so
8442 // that our self-announcement will actually go out. This might
8443 // be unnecessary if the bloom filter has already rolled over
8444 // since our last self-announcement, but there is only a small
8445 // bandwidth cost that we can incur by doing this (which happens
8446 // once a day on average).
8447 if (peer.m_next_local_addr_send != 0us) {
8448 peer.m_addr_known->reset();
8449 }
8450 if (std::optional<CService> local_service = GetLocalAddrForPeer(node)) {
8451 CAddress local_addr{*local_service, peer.m_our_services,
8452 Now<NodeSeconds>()};
8453 PushAddress(peer, local_addr);
8454 }
8455 peer.m_next_local_addr_send =
8456 current_time +
8457 m_rng.rand_exp_duration(AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
8458 }
8459
8460 // We sent an `addr` message to this peer recently. Nothing more to do.
8461 if (current_time <= peer.m_next_addr_send) {
8462 return;
8463 }
8464
8465 peer.m_next_addr_send =
8466 current_time + m_rng.rand_exp_duration(AVG_ADDRESS_BROADCAST_INTERVAL);
8467
8468 const size_t max_addr_to_send = m_opts.max_addr_to_send;
8469 if (!Assume(peer.m_addrs_to_send.size() <= max_addr_to_send)) {
8470 // Should be impossible since we always check size before adding to
8471 // m_addrs_to_send. Recover by trimming the vector.
8472 peer.m_addrs_to_send.resize(max_addr_to_send);
8473 }
8474
8475 // Remove addr records that the peer already knows about, and add new
8476 // addrs to the m_addr_known filter on the same pass.
8477 auto addr_already_known =
8478 [&peer](const CAddress &addr)
8479 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) {
8480 bool ret = peer.m_addr_known->contains(addr.GetKey());
8481 if (!ret) {
8482 peer.m_addr_known->insert(addr.GetKey());
8483 }
8484 return ret;
8485 };
8486 peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(),
8487 peer.m_addrs_to_send.end(),
8488 addr_already_known),
8489 peer.m_addrs_to_send.end());
8490
8491 // No addr messages to send
8492 if (peer.m_addrs_to_send.empty()) {
8493 return;
8494 }
8495
8496 const char *msg_type;
8497 CNetAddr::Encoding ser_enc;
8498 if (peer.m_wants_addrv2) {
8499 msg_type = NetMsgType::ADDRV2;
8500 ser_enc = CNetAddr::Encoding::V2;
8501 } else {
8502 msg_type = NetMsgType::ADDR;
8503 ser_enc = CNetAddr::Encoding::V1;
8504 }
8505 MakeAndPushMessage(
8506 node, msg_type,
8508 peer.m_addrs_to_send));
8509 peer.m_addrs_to_send.clear();
8510
8511 // we only send the big addr message once
8512 if (peer.m_addrs_to_send.capacity() > 40) {
8513 peer.m_addrs_to_send.shrink_to_fit();
8514 }
8515}
8516
8517void PeerManagerImpl::MaybeSendSendHeaders(CNode &node, Peer &peer) {
8518 // Delay sending SENDHEADERS (BIP 130) until we're done with an
8519 // initial-headers-sync with this peer. Receiving headers announcements for
8520 // new blocks while trying to sync their headers chain is problematic,
8521 // because of the state tracking done.
8522 if (!peer.m_sent_sendheaders &&
8523 node.GetCommonVersion() >= SENDHEADERS_VERSION) {
8524 LOCK(cs_main);
8525 CNodeState &state = *State(node.GetId());
8526 if (state.pindexBestKnownBlock != nullptr &&
8527 state.pindexBestKnownBlock->nChainWork >
8528 m_chainman.MinimumChainWork()) {
8529 // Tell our peer we prefer to receive headers rather than inv's
8530 // We send this to non-NODE NETWORK peers as well, because even
8531 // non-NODE NETWORK peers can announce blocks (such as pruning
8532 // nodes)
8533 MakeAndPushMessage(node, NetMsgType::SENDHEADERS);
8534 peer.m_sent_sendheaders = true;
8535 }
8536 }
8537}
8538
8539void PeerManagerImpl::MaybeSendFeefilter(
8540 CNode &pto, Peer &peer, std::chrono::microseconds current_time) {
8541 if (m_opts.ignore_incoming_txs) {
8542 return;
8543 }
8544 if (pto.GetCommonVersion() < FEEFILTER_VERSION) {
8545 return;
8546 }
8547 // peers with the forcerelay permission should not filter txs to us
8549 return;
8550 }
8551 // Don't send feefilter messages to outbound block-relay-only peers since
8552 // they should never announce transactions to us, regardless of feefilter
8553 // state.
8554 if (pto.IsBlockOnlyConn()) {
8555 return;
8556 }
8557
8558 Amount currentFilter = m_mempool.GetMinFee().GetFeePerK();
8559
8560 if (m_chainman.IsInitialBlockDownload()) {
8561 // Received tx-inv messages are discarded when the active
8562 // chainstate is in IBD, so tell the peer to not send them.
8563 currentFilter = MAX_MONEY;
8564 } else {
8565 static const Amount MAX_FILTER{m_fee_filter_rounder.round(MAX_MONEY)};
8566 if (peer.m_fee_filter_sent == MAX_FILTER) {
8567 // Send the current filter if we sent MAX_FILTER previously
8568 // and made it out of IBD.
8569 peer.m_next_send_feefilter = 0us;
8570 }
8571 }
8572 if (current_time > peer.m_next_send_feefilter) {
8573 Amount filterToSend = m_fee_filter_rounder.round(currentFilter);
8574 // We always have a fee filter of at least the min relay fee
8575 filterToSend =
8576 std::max(filterToSend, m_mempool.m_min_relay_feerate.GetFeePerK());
8577 if (filterToSend != peer.m_fee_filter_sent) {
8578 MakeAndPushMessage(pto, NetMsgType::FEEFILTER, filterToSend);
8579 peer.m_fee_filter_sent = filterToSend;
8580 }
8581 peer.m_next_send_feefilter =
8582 current_time +
8583 m_rng.rand_exp_duration(AVG_FEEFILTER_BROADCAST_INTERVAL);
8584 }
8585 // If the fee filter has changed substantially and it's still more than
8586 // MAX_FEEFILTER_CHANGE_DELAY until scheduled broadcast, then move the
8587 // broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
8588 else if (current_time + MAX_FEEFILTER_CHANGE_DELAY <
8589 peer.m_next_send_feefilter &&
8590 (currentFilter < 3 * peer.m_fee_filter_sent / 4 ||
8591 currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
8592 peer.m_next_send_feefilter =
8593 current_time +
8594 FastRandomContext().randrange<std::chrono::microseconds>(
8596 }
8597}
8598
8599namespace {
8600class CompareInvMempoolOrder {
8601 CTxMemPool *mp;
8602
8603public:
8604 explicit CompareInvMempoolOrder(CTxMemPool *_mempool) : mp(_mempool) {}
8605
8606 bool operator()(std::set<TxId>::iterator a, std::set<TxId>::iterator b) {
8611 return mp->CompareTopologically(*b, *a);
8612 }
8613};
8614} // namespace
8615
8616bool PeerManagerImpl::RejectIncomingTxs(const CNode &peer) const {
8617 // block-relay-only peers may never send txs to us
8618 if (peer.IsBlockOnlyConn()) {
8619 return true;
8620 }
8621 if (peer.IsFeelerConn()) {
8622 return true;
8623 }
8624 // In -blocksonly mode, peers need the 'relay' permission to send txs to us
8625 if (m_opts.ignore_incoming_txs &&
8627 return true;
8628 }
8629 return false;
8630}
8631
8632bool PeerManagerImpl::SetupAddressRelay(const CNode &node, Peer &peer) {
8633 // We don't participate in addr relay with outbound block-relay-only
8634 // connections to prevent providing adversaries with the additional
8635 // information of addr traffic to infer the link.
8636 if (node.IsBlockOnlyConn()) {
8637 return false;
8638 }
8639
8640 if (!peer.m_addr_relay_enabled.exchange(true)) {
8641 // During version message processing (non-block-relay-only outbound
8642 // peers) or on first addr-related message we have received (inbound
8643 // peers), initialize m_addr_known.
8644 peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
8645 }
8646
8647 return true;
8648}
8649
8650bool PeerManagerImpl::SendMessages(const Config &config, CNode *pto) {
8651 AssertLockHeld(g_msgproc_mutex);
8652
8653 PeerRef peer = GetPeerRef(pto->GetId());
8654 if (!peer) {
8655 return false;
8656 }
8657 const Consensus::Params &consensusParams = m_chainparams.GetConsensus();
8658
8659 // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
8660 // disconnect misbehaving peers even before the version handshake is
8661 // complete.
8662 if (MaybeDiscourageAndDisconnect(*pto, *peer)) {
8663 return true;
8664 }
8665
8666 // Don't send anything until the version handshake is complete
8667 if (!pto->fSuccessfullyConnected || pto->fDisconnect) {
8668 return true;
8669 }
8670
8671 const auto current_time{GetTime<std::chrono::microseconds>()};
8672
8673 if (pto->IsAddrFetchConn() &&
8674 current_time - pto->m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
8676 "addrfetch connection timeout; disconnecting peer=%d\n",
8677 pto->GetId());
8678 pto->fDisconnect = true;
8679 return true;
8680 }
8681
8682 MaybeSendPing(*pto, *peer, current_time);
8683
8684 // MaybeSendPing may have marked peer for disconnection
8685 if (pto->fDisconnect) {
8686 return true;
8687 }
8688
8689 bool sync_blocks_and_headers_from_peer = false;
8690
8691 MaybeSendAddr(*pto, *peer, current_time);
8692
8693 MaybeSendSendHeaders(*pto, *peer);
8694
8695 {
8696 LOCK(cs_main);
8697
8698 CNodeState &state = *State(pto->GetId());
8699
8700 // Start block sync
8701 if (m_chainman.m_best_header == nullptr) {
8702 m_chainman.m_best_header = m_chainman.ActiveChain().Tip();
8703 }
8704
8705 // Determine whether we might try initial headers sync or parallel
8706 // block download from this peer -- this mostly affects behavior while
8707 // in IBD (once out of IBD, we sync from all peers).
8708 if (state.fPreferredDownload) {
8709 sync_blocks_and_headers_from_peer = true;
8710 } else if (CanServeBlocks(*peer) && !pto->IsAddrFetchConn()) {
8711 // Typically this is an inbound peer. If we don't have any outbound
8712 // peers, or if we aren't downloading any blocks from such peers,
8713 // then allow block downloads from this peer, too.
8714 // We prefer downloading blocks from outbound peers to avoid
8715 // putting undue load on (say) some home user who is just making
8716 // outbound connections to the network, but if our only source of
8717 // the latest blocks is from an inbound peer, we have to be sure to
8718 // eventually download it (and not just wait indefinitely for an
8719 // outbound peer to have it).
8720 if (m_num_preferred_download_peers == 0 ||
8721 mapBlocksInFlight.empty()) {
8722 sync_blocks_and_headers_from_peer = true;
8723 }
8724 }
8725
8726 if (!state.fSyncStarted && CanServeBlocks(*peer) &&
8727 !m_chainman.m_blockman.LoadingBlocks()) {
8728 // Only actively request headers from a single peer, unless we're
8729 // close to today.
8730 if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) ||
8731 m_chainman.m_best_header->Time() > GetAdjustedTime() - 24h) {
8732 const CBlockIndex *pindexStart = m_chainman.m_best_header;
8741 if (pindexStart->pprev) {
8742 pindexStart = pindexStart->pprev;
8743 }
8744 if (MaybeSendGetHeaders(*pto, GetLocator(pindexStart), *peer)) {
8745 LogPrint(
8746 BCLog::NET,
8747 "initial getheaders (%d) to peer=%d (startheight:%d)\n",
8748 pindexStart->nHeight, pto->GetId(),
8749 peer->m_starting_height);
8750
8751 state.fSyncStarted = true;
8752 peer->m_headers_sync_timeout =
8753 current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
8754 (
8755 // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to
8756 // microseconds before scaling to maintain precision
8757 std::chrono::microseconds{
8759 Ticks<std::chrono::seconds>(
8760 GetAdjustedTime() -
8761 m_chainman.m_best_header->Time()) /
8762 consensusParams.nPowTargetSpacing);
8763 nSyncStarted++;
8764 }
8765 }
8766 }
8767
8768 //
8769 // Try sending block announcements via headers
8770 //
8771 {
8772 // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our list of block
8773 // hashes we're relaying, and our peer wants headers announcements,
8774 // then find the first header not yet known to our peer but would
8775 // connect, and send. If no header would connect, or if we have too
8776 // many blocks, or if the peer doesn't want headers, just add all to
8777 // the inv queue.
8778 LOCK(peer->m_block_inv_mutex);
8779 std::vector<CBlock> vHeaders;
8780 bool fRevertToInv =
8781 ((!peer->m_prefers_headers &&
8782 (!state.m_requested_hb_cmpctblocks ||
8783 peer->m_blocks_for_headers_relay.size() > 1)) ||
8784 peer->m_blocks_for_headers_relay.size() >
8786 // last header queued for delivery
8787 const CBlockIndex *pBestIndex = nullptr;
8788 // ensure pindexBestKnownBlock is up-to-date
8789 ProcessBlockAvailability(pto->GetId());
8790
8791 if (!fRevertToInv) {
8792 bool fFoundStartingHeader = false;
8793 // Try to find first header that our peer doesn't have, and then
8794 // send all headers past that one. If we come across an headers
8795 // that aren't on m_chainman.ActiveChain(), give up.
8796 for (const BlockHash &hash : peer->m_blocks_for_headers_relay) {
8797 const CBlockIndex *pindex =
8798 m_chainman.m_blockman.LookupBlockIndex(hash);
8799 assert(pindex);
8800 if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
8801 // Bail out if we reorged away from this block
8802 fRevertToInv = true;
8803 break;
8804 }
8805 if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
8806 // This means that the list of blocks to announce don't
8807 // connect to each other. This shouldn't really be
8808 // possible to hit during regular operation (because
8809 // reorgs should take us to a chain that has some block
8810 // not on the prior chain, which should be caught by the
8811 // prior check), but one way this could happen is by
8812 // using invalidateblock / reconsiderblock repeatedly on
8813 // the tip, causing it to be added multiple times to
8814 // m_blocks_for_headers_relay. Robustly deal with this
8815 // rare situation by reverting to an inv.
8816 fRevertToInv = true;
8817 break;
8818 }
8819 pBestIndex = pindex;
8820 if (fFoundStartingHeader) {
8821 // add this to the headers message
8822 vHeaders.push_back(pindex->GetBlockHeader());
8823 } else if (PeerHasHeader(&state, pindex)) {
8824 // Keep looking for the first new block.
8825 continue;
8826 } else if (pindex->pprev == nullptr ||
8827 PeerHasHeader(&state, pindex->pprev)) {
8828 // Peer doesn't have this header but they do have the
8829 // prior one. Start sending headers.
8830 fFoundStartingHeader = true;
8831 vHeaders.push_back(pindex->GetBlockHeader());
8832 } else {
8833 // Peer doesn't have this header or the prior one --
8834 // nothing will connect, so bail out.
8835 fRevertToInv = true;
8836 break;
8837 }
8838 }
8839 }
8840 if (!fRevertToInv && !vHeaders.empty()) {
8841 if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
8842 // We only send up to 1 block as header-and-ids, as
8843 // otherwise probably means we're doing an initial-ish-sync
8844 // or they're slow.
8846 "%s sending header-and-ids %s to peer=%d\n",
8847 __func__, vHeaders.front().GetHash().ToString(),
8848 pto->GetId());
8849
8850 std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
8851 {
8852 LOCK(m_most_recent_block_mutex);
8853 if (m_most_recent_block_hash ==
8854 pBestIndex->GetBlockHash()) {
8855 cached_cmpctblock_msg =
8857 *m_most_recent_compact_block);
8858 }
8859 }
8860 if (cached_cmpctblock_msg.has_value()) {
8861 PushMessage(*pto,
8862 std::move(cached_cmpctblock_msg.value()));
8863 } else {
8864 CBlock block;
8865 const bool ret{m_chainman.m_blockman.ReadBlock(
8866 block, *pBestIndex)};
8867 assert(ret);
8868 CBlockHeaderAndShortTxIDs cmpctblock(
8869 block, FastRandomContext().rand64());
8870 MakeAndPushMessage(*pto, NetMsgType::CMPCTBLOCK,
8871 cmpctblock);
8872 }
8873 state.pindexBestHeaderSent = pBestIndex;
8874 } else if (peer->m_prefers_headers) {
8875 if (vHeaders.size() > 1) {
8877 "%s: %u headers, range (%s, %s), to peer=%d\n",
8878 __func__, vHeaders.size(),
8879 vHeaders.front().GetHash().ToString(),
8880 vHeaders.back().GetHash().ToString(),
8881 pto->GetId());
8882 } else {
8884 "%s: sending header %s to peer=%d\n", __func__,
8885 vHeaders.front().GetHash().ToString(),
8886 pto->GetId());
8887 }
8888 MakeAndPushMessage(*pto, NetMsgType::HEADERS, vHeaders);
8889 state.pindexBestHeaderSent = pBestIndex;
8890 } else {
8891 fRevertToInv = true;
8892 }
8893 }
8894 if (fRevertToInv) {
8895 // If falling back to using an inv, just try to inv the tip. The
8896 // last entry in m_blocks_for_headers_relay was our tip at some
8897 // point in the past.
8898 if (!peer->m_blocks_for_headers_relay.empty()) {
8899 const BlockHash &hashToAnnounce =
8900 peer->m_blocks_for_headers_relay.back();
8901 const CBlockIndex *pindex =
8902 m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce);
8903 assert(pindex);
8904
8905 // Warn if we're announcing a block that is not on the main
8906 // chain. This should be very rare and could be optimized
8907 // out. Just log for now.
8908 if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
8909 LogPrint(
8910 BCLog::NET,
8911 "Announcing block %s not on main chain (tip=%s)\n",
8912 hashToAnnounce.ToString(),
8913 m_chainman.ActiveChain()
8914 .Tip()
8915 ->GetBlockHash()
8916 .ToString());
8917 }
8918
8919 // If the peer's chain has this block, don't inv it back.
8920 if (!PeerHasHeader(&state, pindex)) {
8921 peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
8923 "%s: sending inv peer=%d hash=%s\n", __func__,
8924 pto->GetId(), hashToAnnounce.ToString());
8925 }
8926 }
8927 }
8928 peer->m_blocks_for_headers_relay.clear();
8929 }
8930 } // release cs_main
8931
8932 //
8933 // Message: inventory
8934 //
8935 std::vector<CInv> vInv;
8936 auto addInvAndMaybeFlush = [&](uint32_t type, const uint256 &hash) {
8937 vInv.emplace_back(type, hash);
8938 if (vInv.size() == MAX_INV_SZ) {
8939 MakeAndPushMessage(*pto, NetMsgType::INV, std::move(vInv));
8940 vInv.clear();
8941 }
8942 };
8943
8944 {
8945 LOCK(cs_main);
8946
8947 {
8948 LOCK(peer->m_block_inv_mutex);
8949
8950 vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(),
8952 config.GetMaxBlockSize() /
8953 1000000));
8954
8955 // Add blocks
8956 for (const BlockHash &hash : peer->m_blocks_for_inv_relay) {
8957 addInvAndMaybeFlush(MSG_BLOCK, hash);
8958 }
8959 peer->m_blocks_for_inv_relay.clear();
8960 }
8961
8962 auto computeNextInvSendTime =
8963 [&](std::chrono::microseconds &next)
8964 EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) -> bool {
8965 bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan);
8966
8967 if (next < current_time) {
8968 fSendTrickle = true;
8969 if (pto->IsInboundConn()) {
8970 next = NextInvToInbounds(
8972 } else {
8973 // Skip delay for outbound peers, as there is less privacy
8974 // concern for them.
8975 next = current_time;
8976 }
8977 }
8978
8979 return fSendTrickle;
8980 };
8981
8982 // Add proofs to inventory
8983 if (peer->m_proof_relay != nullptr) {
8984 LOCK(peer->m_proof_relay->m_proof_inventory_mutex);
8985
8986 if (computeNextInvSendTime(
8987 peer->m_proof_relay->m_next_inv_send_time)) {
8988 auto it =
8989 peer->m_proof_relay->m_proof_inventory_to_send.begin();
8990 while (it !=
8991 peer->m_proof_relay->m_proof_inventory_to_send.end()) {
8992 const avalanche::ProofId proofid = *it;
8993
8994 it = peer->m_proof_relay->m_proof_inventory_to_send.erase(
8995 it);
8996
8997 if (peer->m_proof_relay->m_proof_inventory_known_filter
8998 .contains(proofid)) {
8999 continue;
9000 }
9001
9002 peer->m_proof_relay->m_proof_inventory_known_filter.insert(
9003 proofid);
9004 addInvAndMaybeFlush(MSG_AVA_PROOF, proofid);
9005 peer->m_proof_relay->m_recently_announced_proofs.insert(
9006 proofid);
9007 }
9008 }
9009 }
9010
9011 if (auto tx_relay = peer->GetTxRelay()) {
9012 LOCK(tx_relay->m_tx_inventory_mutex);
9013 // Check whether periodic sends should happen
9014 const bool fSendTrickle =
9015 computeNextInvSendTime(tx_relay->m_next_inv_send_time);
9016
9017 // Time to send but the peer has requested we not relay
9018 // transactions.
9019 if (fSendTrickle) {
9020 LOCK(tx_relay->m_bloom_filter_mutex);
9021 if (!tx_relay->m_relay_txs) {
9022 tx_relay->m_tx_inventory_to_send.clear();
9023 }
9024 }
9025
9026 // Respond to BIP35 mempool requests
9027 if (fSendTrickle && tx_relay->m_send_mempool) {
9028 auto vtxinfo = m_mempool.infoAll();
9029 tx_relay->m_send_mempool = false;
9030 const CFeeRate filterrate{
9031 tx_relay->m_fee_filter_received.load()};
9032
9033 LOCK(tx_relay->m_bloom_filter_mutex);
9034
9035 for (const auto &txinfo : vtxinfo) {
9036 const TxId &txid = txinfo.tx->GetId();
9037 tx_relay->m_tx_inventory_to_send.erase(txid);
9038 // Don't send transactions that peers will not put into
9039 // their mempool
9040 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
9041 continue;
9042 }
9043 if (tx_relay->m_bloom_filter &&
9044 !tx_relay->m_bloom_filter->IsRelevantAndUpdate(
9045 *txinfo.tx)) {
9046 continue;
9047 }
9048 tx_relay->m_tx_inventory_known_filter.insert(txid);
9049 // Responses to MEMPOOL requests bypass the
9050 // m_recently_announced_invs filter.
9051 addInvAndMaybeFlush(MSG_TX, txid);
9052 }
9053 tx_relay->m_last_mempool_req =
9054 std::chrono::duration_cast<std::chrono::seconds>(
9055 current_time);
9056 }
9057
9058 // Determine transactions to relay
9059 if (fSendTrickle) {
9060 // Produce a vector with all candidates for sending
9061 std::vector<std::set<TxId>::iterator> vInvTx;
9062 vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size());
9063 for (std::set<TxId>::iterator it =
9064 tx_relay->m_tx_inventory_to_send.begin();
9065 it != tx_relay->m_tx_inventory_to_send.end(); it++) {
9066 vInvTx.push_back(it);
9067 }
9068 const CFeeRate filterrate{
9069 tx_relay->m_fee_filter_received.load()};
9070 // Send out the inventory in the order of admission to our
9071 // mempool, which is guaranteed to be a topological sort order.
9072 // A heap is used so that not all items need sorting if only a
9073 // few are being sent.
9074 CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool);
9075 std::make_heap(vInvTx.begin(), vInvTx.end(),
9076 compareInvMempoolOrder);
9077 // No reason to drain out at many times the network's
9078 // capacity, especially since we have many peers and some
9079 // will draw much shorter delays.
9080 unsigned int nRelayedTransactions = 0;
9081 LOCK(tx_relay->m_bloom_filter_mutex);
9082 while (!vInvTx.empty() &&
9083 nRelayedTransactions < INVENTORY_BROADCAST_MAX_PER_MB *
9084 config.GetMaxBlockSize() /
9085 1000000) {
9086 // Fetch the top element from the heap
9087 std::pop_heap(vInvTx.begin(), vInvTx.end(),
9088 compareInvMempoolOrder);
9089 std::set<TxId>::iterator it = vInvTx.back();
9090 vInvTx.pop_back();
9091 const TxId txid = *it;
9092 // Remove it from the to-be-sent set
9093 tx_relay->m_tx_inventory_to_send.erase(it);
9094 // Check if not in the filter already
9095 if (tx_relay->m_tx_inventory_known_filter.contains(txid) &&
9096 tx_relay->m_avalanche_stalled_txids.count(txid) == 0) {
9097 continue;
9098 }
9099 // Not in the mempool anymore? don't bother sending it.
9100 auto txinfo = m_mempool.info(txid);
9101 if (!txinfo.tx) {
9102 continue;
9103 }
9104 // Peer told you to not send transactions at that
9105 // feerate? Don't bother sending it.
9106 if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
9107 continue;
9108 }
9109 if (tx_relay->m_bloom_filter &&
9110 !tx_relay->m_bloom_filter->IsRelevantAndUpdate(
9111 *txinfo.tx)) {
9112 continue;
9113 }
9114 // Send
9115 tx_relay->m_recently_announced_invs.insert(txid);
9116 addInvAndMaybeFlush(MSG_TX, txid);
9117 nRelayedTransactions++;
9118 tx_relay->m_tx_inventory_known_filter.insert(txid);
9119 tx_relay->m_avalanche_stalled_txids.erase(txid);
9120 }
9121 }
9122 }
9123 } // release cs_main
9124
9125 if (!vInv.empty()) {
9126 MakeAndPushMessage(*pto, NetMsgType::INV, vInv);
9127 }
9128
9129 {
9130 LOCK(cs_main);
9131
9132 CNodeState &state = *State(pto->GetId());
9133
9134 // Detect whether we're stalling
9135 auto stalling_timeout = m_block_stalling_timeout.load();
9136 if (state.m_stalling_since.count() &&
9137 state.m_stalling_since < current_time - stalling_timeout) {
9138 // Stalling only triggers when the block download window cannot
9139 // move. During normal steady state, the download window should be
9140 // much larger than the to-be-downloaded set of blocks, so
9141 // disconnection should only happen during initial block download.
9142 LogPrintf("Peer=%d is stalling block download, disconnecting\n",
9143 pto->GetId());
9144 pto->fDisconnect = true;
9145 // Increase timeout for the next peer so that we don't disconnect
9146 // multiple peers if our own bandwidth is insufficient.
9147 const auto new_timeout =
9148 std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX);
9149 if (stalling_timeout != new_timeout &&
9150 m_block_stalling_timeout.compare_exchange_strong(
9151 stalling_timeout, new_timeout)) {
9152 LogPrint(
9153 BCLog::NET,
9154 "Increased stalling timeout temporarily to %d seconds\n",
9155 count_seconds(new_timeout));
9156 }
9157 return true;
9158 }
9159 // In case there is a block that has been in flight from this peer for
9160 // block_interval * (1 + 0.5 * N) (with N the number of peers from which
9161 // we're downloading validated blocks), disconnect due to timeout.
9162 // We compensate for other peers to prevent killing off peers due to our
9163 // own downstream link being saturated. We only count validated
9164 // in-flight blocks so peers can't advertise non-existing block hashes
9165 // to unreasonably increase our timeout.
9166 if (state.vBlocksInFlight.size() > 0) {
9167 QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
9168 int nOtherPeersWithValidatedDownloads =
9169 m_peers_downloading_from - 1;
9170 if (current_time >
9171 state.m_downloading_since +
9172 std::chrono::seconds{consensusParams.nPowTargetSpacing} *
9175 nOtherPeersWithValidatedDownloads)) {
9176 LogPrintf("Timeout downloading block %s from peer=%d, "
9177 "disconnecting\n",
9178 queuedBlock.pindex->GetBlockHash().ToString(),
9179 pto->GetId());
9180 pto->fDisconnect = true;
9181 return true;
9182 }
9183 }
9184
9185 // Check for headers sync timeouts
9186 if (state.fSyncStarted &&
9187 peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
9188 // Detect whether this is a stalling initial-headers-sync peer
9189 if (m_chainman.m_best_header->Time() <= GetAdjustedTime() - 24h) {
9190 if (current_time > peer->m_headers_sync_timeout &&
9191 nSyncStarted == 1 &&
9192 (m_num_preferred_download_peers -
9193 state.fPreferredDownload >=
9194 1)) {
9195 // Disconnect a peer (without NetPermissionFlags::NoBan
9196 // permission) if it is our only sync peer, and we have
9197 // others we could be using instead. Note: If all our peers
9198 // are inbound, then we won't disconnect our sync peer for
9199 // stalling; we have bigger problems if we can't get any
9200 // outbound peers.
9202 LogPrintf("Timeout downloading headers from peer=%d, "
9203 "disconnecting\n",
9204 pto->GetId());
9205 pto->fDisconnect = true;
9206 return true;
9207 } else {
9208 LogPrintf("Timeout downloading headers from noban "
9209 "peer=%d, not disconnecting\n",
9210 pto->GetId());
9211 // Reset the headers sync state so that we have a chance
9212 // to try downloading from a different peer. Note: this
9213 // will also result in at least one more getheaders
9214 // message to be sent to this peer (eventually).
9215 state.fSyncStarted = false;
9216 nSyncStarted--;
9217 peer->m_headers_sync_timeout = 0us;
9218 }
9219 }
9220 } else {
9221 // After we've caught up once, reset the timeout so we can't
9222 // trigger disconnect later.
9223 peer->m_headers_sync_timeout = std::chrono::microseconds::max();
9224 }
9225 }
9226
9227 // Check that outbound peers have reasonable chains GetTime() is used by
9228 // this anti-DoS logic so we can test this using mocktime.
9229 ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
9230 } // release cs_main
9231
9232 std::vector<CInv> vGetData;
9233
9234 //
9235 // Message: getdata (blocks)
9236 //
9237 {
9238 LOCK(cs_main);
9239
9240 CNodeState &state = *State(pto->GetId());
9241
9242 if (CanServeBlocks(*peer) &&
9243 ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) ||
9244 !m_chainman.IsInitialBlockDownload()) &&
9245 state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
9246 std::vector<const CBlockIndex *> vToDownload;
9247 NodeId staller = -1;
9248 auto get_inflight_budget = [&state]() {
9249 return std::max(
9251 static_cast<int>(state.vBlocksInFlight.size()));
9252 };
9253
9254 // If a snapshot chainstate is in use, we want to find its next
9255 // blocks before the background chainstate to prioritize getting to
9256 // network tip.
9257 FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload,
9258 staller);
9259 if (m_chainman.BackgroundSyncInProgress() &&
9260 !IsLimitedPeer(*peer)) {
9261 // If the background tip is not an ancestor of the snapshot
9262 // block, we need to start requesting blocks from their last
9263 // common ancestor.
9264 const CBlockIndex *from_tip =
9266 m_chainman.GetSnapshotBaseBlock());
9267
9268 TryDownloadingHistoricalBlocks(
9269 *peer, get_inflight_budget(), vToDownload, from_tip,
9270 Assert(m_chainman.GetSnapshotBaseBlock()));
9271 }
9272 for (const CBlockIndex *pindex : vToDownload) {
9273 vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
9274 BlockRequested(config, pto->GetId(), *pindex);
9275 LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n",
9276 pindex->GetBlockHash().ToString(), pindex->nHeight,
9277 pto->GetId());
9278 }
9279 if (state.vBlocksInFlight.empty() && staller != -1) {
9280 if (State(staller)->m_stalling_since == 0us) {
9281 State(staller)->m_stalling_since = current_time;
9282 LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
9283 }
9284 }
9285 }
9286 } // release cs_main
9287
9288 auto addGetDataAndMaybeFlush = [&](uint32_t type, const uint256 &hash) {
9289 CInv inv(type, hash);
9290 LogPrint(BCLog::NET, "Requesting %s from peer=%d\n", inv.ToString(),
9291 pto->GetId());
9292 vGetData.push_back(std::move(inv));
9293 if (vGetData.size() >= MAX_GETDATA_SZ) {
9294 MakeAndPushMessage(*pto, NetMsgType::GETDATA, std::move(vGetData));
9295 vGetData.clear();
9296 }
9297 };
9298
9299 //
9300 // Message: getdata (proof)
9301 //
9302 if (m_avalanche) {
9303 LOCK(cs_proofrequest);
9304 std::vector<std::pair<NodeId, avalanche::ProofId>> expired;
9305 auto requestable =
9306 m_proofrequest.GetRequestable(pto->GetId(), current_time, &expired);
9307 for (const auto &entry : expired) {
9309 "timeout of inflight proof %s from peer=%d\n",
9310 entry.second.ToString(), entry.first);
9311 }
9312 for (const auto &proofid : requestable) {
9313 if (!AlreadyHaveProof(proofid)) {
9314 addGetDataAndMaybeFlush(MSG_AVA_PROOF, proofid);
9315 m_proofrequest.RequestedData(
9316 pto->GetId(), proofid,
9317 current_time + PROOF_REQUEST_PARAMS.getdata_interval);
9318 } else {
9319 // We have already seen this proof, no need to download.
9320 // This is just a belt-and-suspenders, as this should
9321 // already be called whenever a proof becomes
9322 // AlreadyHaveProof().
9323 m_proofrequest.ForgetInvId(proofid);
9324 }
9325 }
9326 }
9327
9328 //
9329 // Message: getdata (transactions)
9330 //
9331 {
9332 LOCK(cs_main);
9333 std::vector<std::pair<NodeId, TxId>> expired;
9334 auto requestable =
9335 m_txrequest.GetRequestable(pto->GetId(), current_time, &expired);
9336 for (const auto &entry : expired) {
9337 LogPrint(BCLog::NET, "timeout of inflight tx %s from peer=%d\n",
9338 entry.second.ToString(), entry.first);
9339 }
9340 for (const TxId &txid : requestable) {
9341 // Exclude m_recent_rejects_package_reconsiderable: we may be
9342 // requesting a missing parent that was previously rejected for
9343 // being too low feerate.
9344 if (!AlreadyHaveTx(txid, /*include_reconsiderable=*/false)) {
9345 addGetDataAndMaybeFlush(MSG_TX, txid);
9346 m_txrequest.RequestedData(
9347 pto->GetId(), txid,
9348 current_time + TX_REQUEST_PARAMS.getdata_interval);
9349 } else {
9350 // We have already seen this transaction, no need to download.
9351 // This is just a belt-and-suspenders, as this should already be
9352 // called whenever a transaction becomes AlreadyHaveTx().
9353 m_txrequest.ForgetInvId(txid);
9354 }
9355 }
9356
9357 if (!vGetData.empty()) {
9358 MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData);
9359 }
9360
9361 } // release cs_main
9362 MaybeSendFeefilter(*pto, *peer, current_time);
9363 return true;
9364}
9365
9366bool PeerManagerImpl::ReceivedAvalancheProof(CNode &node, Peer &peer,
9367 const avalanche::ProofRef &proof) {
9368 assert(proof != nullptr);
9369
9370 const avalanche::ProofId &proofid = proof->getId();
9371
9372 AddKnownProof(peer, proofid);
9373
9374 if (m_chainman.IsInitialBlockDownload()) {
9375 // We cannot reliably verify proofs during IBD, so bail out early and
9376 // keep the inventory as pending so it can be requested when the node
9377 // has synced.
9378 return true;
9379 }
9380
9381 const NodeId nodeid = node.GetId();
9382
9383 const bool isStaker = WITH_LOCK(node.cs_avalanche_pubkey,
9384 return node.m_avalanche_pubkey.has_value());
9385 auto saveProofIfStaker = [this, isStaker](const CNode &node,
9386 const avalanche::ProofId &proofid,
9387 const NodeId nodeid) -> bool {
9388 if (isStaker) {
9389 return m_avalanche->withPeerManager(
9390 [&](avalanche::PeerManager &pm) {
9391 return pm.saveRemoteProof(proofid, nodeid, true);
9392 });
9393 }
9394
9395 return false;
9396 };
9397
9398 {
9399 LOCK(cs_proofrequest);
9400 m_proofrequest.ReceivedResponse(nodeid, proofid);
9401
9402 if (AlreadyHaveProof(proofid)) {
9403 m_proofrequest.ForgetInvId(proofid);
9404 saveProofIfStaker(node, proofid, nodeid);
9405 return true;
9406 }
9407 }
9408
9409 // registerProof should not be called while cs_proofrequest because it
9410 // holds cs_main and that creates a potential deadlock during shutdown
9411
9413 if (m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
9414 return pm.registerProof(proof, state);
9415 })) {
9416 WITH_LOCK(cs_proofrequest, m_proofrequest.ForgetInvId(proofid));
9417 RelayProof(proofid);
9418
9419 node.m_last_proof_time = GetTime<std::chrono::seconds>();
9420
9421 LogPrint(BCLog::NET, "New avalanche proof: peer=%d, proofid %s\n",
9422 nodeid, proofid.ToString());
9423 }
9424
9426 m_avalanche->withPeerManager(
9427 [&](avalanche::PeerManager &pm) { pm.setInvalid(proofid); });
9428 Misbehaving(peer, state.GetRejectReason());
9429 return false;
9430 }
9431
9433 // This is possible that a proof contains a utxo we don't know yet, so
9434 // don't ban for this.
9435 return false;
9436 }
9437
9438 // Unlike other reasons we can expect lots of peers to send a proof that we
9439 // have dangling. In this case we don't want to print a lot of useless debug
9440 // message, the proof will be polled as soon as it's considered again.
9441 if (!m_avalanche->reconcileOrFinalize(proof) &&
9444 "Not polling the avalanche proof (%s): peer=%d, proofid %s\n",
9445 state.IsValid() ? "not-worth-polling"
9446 : state.GetRejectReason(),
9447 nodeid, proofid.ToString());
9448 }
9449
9450 saveProofIfStaker(node, proofid, nodeid);
9451 return true;
9452}
bool MoneyRange(const Amount nValue)
Definition: amount.h:171
static constexpr Amount MAX_MONEY
No amount larger than this (in satoshi) is valid.
Definition: amount.h:170
@ READ_STATUS_OK
@ READ_STATUS_INVALID
@ READ_STATUS_FAILED
enum ReadStatus_t ReadStatus
const std::string & BlockFilterTypeName(BlockFilterType filter_type)
Get the human-readable name for a filter type.
BlockFilterType
Definition: blockfilter.h:88
BlockFilterIndex * GetBlockFilterIndex(BlockFilterType filter_type)
Get a block filter index by type.
static constexpr int CFCHECKPT_INTERVAL
Interval between compact filter checkpoints.
@ CHAIN
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends,...
@ TRANSACTIONS
Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid,...
@ SCRIPTS
Scripts & signatures ok.
@ TREE
All parent headers found, difficulty matches, timestamp >= median previous, checkpoint.
arith_uint256 GetBlockProof(const CBlockIndex &block)
Definition: chain.cpp:74
CBlockLocator GetLocator(const CBlockIndex *index)
Get a locator for a block index entry.
Definition: chain.cpp:41
int64_t GetBlockProofEquivalentTime(const CBlockIndex &to, const CBlockIndex &from, const CBlockIndex &tip, const Consensus::Params &params)
Return the time it would take to redo the work difference between from and to, assuming the current h...
Definition: chain.cpp:89
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
Definition: chain.cpp:112
#define Assert(val)
Identity function.
Definition: check.h:84
#define Assume(val)
Assume is the identity function.
Definition: check.h:97
Stochastic address manager.
Definition: addrman.h:68
void Connected(const CService &addr, NodeSeconds time=Now< NodeSeconds >())
We have successfully connected to this peer.
Definition: addrman.cpp:1321
void Good(const CService &addr, bool test_before_evict=true, NodeSeconds time=Now< NodeSeconds >())
Mark an entry as accessible, possibly moving it from "new" to "tried".
Definition: addrman.cpp:1294
bool Add(const std::vector< CAddress > &vAddr, const CNetAddr &source, std::chrono::seconds time_penalty=0s)
Attempt to add one or more addresses to addrman's new table.
Definition: addrman.cpp:1289
void SetServices(const CService &addr, ServiceFlags nServices)
Update an entry's service bits.
Definition: addrman.cpp:1325
Definition: banman.h:59
void Discourage(const CNetAddr &net_addr)
Definition: banman.cpp:116
bool IsBanned(const CNetAddr &net_addr)
Return whether net_addr is banned.
Definition: banman.cpp:83
bool IsDiscouraged(const CNetAddr &net_addr)
Return whether net_addr is discouraged.
Definition: banman.cpp:78
BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of bloc...
bool LookupFilterRange(int start_height, const CBlockIndex *stop_index, std::vector< BlockFilter > &filters_out) const
Get a range of filters between two heights on a chain.
bool LookupFilterHashRange(int start_height, const CBlockIndex *stop_index, std::vector< uint256 > &hashes_out) const
Get a range of filter hashes between two heights on a chain.
bool LookupFilterHeader(const CBlockIndex *block_index, uint256 &header_out) EXCLUSIVE_LOCKS_REQUIRED(!m_cs_headers_cache)
Get a single filter header by block.
std::vector< CTransactionRef > txn
std::vector< uint32_t > indices
A CService with information about it as peer.
Definition: protocol.h:443
ServiceFlags nServices
Serialized as uint64_t in V1, and as CompactSize in V2.
Definition: protocol.h:555
static constexpr SerParams V1_NETWORK
Definition: protocol.h:496
NodeSeconds nTime
Always included in serialization, except in the network format on INIT_PROTO_VERSION.
Definition: protocol.h:553
static constexpr SerParams V2_NETWORK
Definition: protocol.h:498
Nodes collect new transactions into a block, hash them into a hash tree, and scan through nonce value...
Definition: block.h:23
BlockHash GetHash() const
Definition: block.cpp:11
uint32_t nTime
Definition: block.h:29
BlockHash hashPrevBlock
Definition: block.h:27
Definition: block.h:60
std::vector< CTransactionRef > vtx
Definition: block.h:63
The block chain is a tree shaped structure starting with the genesis block at the root,...
Definition: blockindex.h:25
bool IsValid(enum BlockValidity nUpTo=BlockValidity::TRANSACTIONS) const EXCLUSIVE_LOCKS_REQUIRED(
Check whether this block index entry is valid up to the passed validity level.
Definition: blockindex.h:191
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition: blockindex.h:32
CBlockHeader GetBlockHeader() const
Definition: blockindex.h:117
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition: blockindex.h:51
bool HaveNumChainTxs() const
Check whether this block and all previous blocks back to the genesis block or an assumeutxo snapshot ...
Definition: blockindex.h:154
int64_t GetBlockTime() const
Definition: blockindex.h:160
unsigned int nTx
Number of transactions in this block.
Definition: blockindex.h:55
NodeSeconds Time() const
Definition: blockindex.h:156
CBlockIndex * GetAncestor(int height)
Efficiently find an ancestor of this block.
Definition: blockindex.cpp:62
BlockHash GetBlockHash() const
Definition: blockindex.h:130
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition: blockindex.h:38
FlatFilePos GetBlockPos() const EXCLUSIVE_LOCKS_REQUIRED(
Definition: blockindex.h:97
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
Definition: bloom.h:44
bool IsWithinSizeConstraints() const
True if the size is <= MAX_BLOOM_FILTER_SIZE and the number of hash functions is <= MAX_HASH_FUNCS (c...
Definition: bloom.cpp:93
An in-memory indexed chain of blocks.
Definition: chain.h:138
CBlockIndex * Tip() const
Returns the index entry for the tip of this chain, or nullptr if none.
Definition: chain.h:154
CBlockIndex * Next(const CBlockIndex *pindex) const
Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip...
Definition: chain.h:178
int Height() const
Return the maximal height in the chain.
Definition: chain.h:190
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
Definition: chain.h:170
CChainParams defines various tweakable parameters of a given instance of the Bitcoin system.
Definition: chainparams.h:86
const CBlock & GenesisBlock() const
Definition: chainparams.h:112
const Consensus::Params & GetConsensus() const
Definition: chainparams.h:98
CCoinsView that adds a memory cache for transactions to another CCoinsView.
Definition: coins.h:363
CCoinsView that brings transactions from a mempool into view.
Definition: txmempool.h:652
Definition: net.h:830
void ForEachNode(const NodeFn &func)
Definition: net.h:936
bool OutboundTargetReached(bool historicalBlockServingLimit) const
check if the outbound target is reached.
Definition: net.cpp:2903
bool ForNode(NodeId id, std::function< bool(CNode *pnode)> func)
Definition: net.cpp:3098
bool GetNetworkActive() const
Definition: net.h:922
bool GetTryNewOutboundPeer() const
Definition: net.cpp:1623
void SetTryNewOutboundPeer(bool flag)
Definition: net.cpp:1627
int GetExtraBlockRelayCount() const
Definition: net.cpp:1655
void WakeMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc)
Definition: net.cpp:1452
void StartExtraBlockRelayPeers()
Definition: net.h:981
bool DisconnectNode(const std::string &node)
Definition: net.cpp:2814
CSipHasher GetDeterministicRandomizer(uint64_t id) const
Get a unique deterministic randomizer.
Definition: net.cpp:3110
int GetExtraFullOutboundCount() const
Definition: net.cpp:1639
std::vector< CAddress > GetAddresses(size_t max_addresses, size_t max_pct, std::optional< Network > network) const
Return all or many randomly selected addresses, optionally by network.
Definition: net.cpp:2682
bool CheckIncomingNonce(uint64_t nonce)
Definition: net.cpp:399
bool ShouldRunInactivityChecks(const CNode &node, std::chrono::seconds now) const
Return true if we should disconnect the peer for failing an inactivity check.
Definition: net.cpp:1233
bool GetUseAddrmanOutgoing() const
Definition: net.h:923
Fee rate in satoshis per kilobyte: Amount / kB.
Definition: feerate.h:21
Amount GetFeePerK() const
Return the fee in satoshis for a size of 1000 bytes.
Definition: feerate.h:54
Inv(ventory) message data.
Definition: protocol.h:590
bool IsMsgCmpctBlk() const
Definition: protocol.h:629
bool IsMsgBlk() const
Definition: protocol.h:621
std::string ToString() const
Definition: protocol.cpp:242
uint32_t type
Definition: protocol.h:592
bool IsMsgTx() const
Definition: protocol.h:609
bool IsMsgStakeContender() const
Definition: protocol.h:617
bool IsMsgFilteredBlk() const
Definition: protocol.h:625
uint256 hash
Definition: protocol.h:593
bool IsMsgProof() const
Definition: protocol.h:613
bool IsGenBlkMsg() const
Definition: protocol.h:634
void TransactionInvalidated(const CTransactionRef &tx, std::shared_ptr< const std::vector< Coin > > spent_coins)
Used to create a Merkle proof (usually from a subset of transactions), which consists of a block head...
Definition: merkleblock.h:147
std::vector< std::pair< size_t, uint256 > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
Definition: merkleblock.h:159
bool IsRelayable() const
Whether this address should be relayed to other peers even if we can't reach it ourselves.
Definition: netaddress.h:245
bool IsRoutable() const
Definition: netaddress.cpp:516
static constexpr SerParams V1
Definition: netaddress.h:255
bool IsValid() const
Definition: netaddress.cpp:477
bool IsLocal() const
Definition: netaddress.cpp:451
@ V2
BIP155 encoding.
bool IsAddrV1Compatible() const
Check if the current object can be serialized in pre-ADDRv2/BIP155 format.
Definition: netaddress.cpp:532
Transport protocol agnostic message container.
Definition: net.h:259
Information about a peer.
Definition: net.h:389
Mutex cs_avalanche_pubkey
Definition: net.h:581
bool IsFeelerConn() const
Definition: net.h:512
const std::chrono::seconds m_connected
Unix epoch time at peer connection.
Definition: net.h:423
bool ExpectServicesFromConn() const
Definition: net.h:526
std::atomic< int > nVersion
Definition: net.h:433
std::atomic_bool m_has_all_wanted_services
Whether this peer provides all services that we want.
Definition: net.h:564
bool IsInboundConn() const
Definition: net.h:518
bool HasPermission(NetPermissionFlags permission) const
Definition: net.h:446
bool IsOutboundOrBlockRelayConn() const
Definition: net.h:485
NodeId GetId() const
Definition: net.h:681
bool IsManualConn() const
Definition: net.h:506
std::atomic< int64_t > nTimeOffset
Definition: net.h:424
const std::string m_addr_name
Definition: net.h:429
std::string ConnectionTypeAsString() const
Definition: net.h:727
void SetCommonVersion(int greatest_common_version)
Definition: net.h:703
std::atomic< bool > m_bip152_highbandwidth_to
Definition: net.h:556
std::atomic_bool m_relays_txs
Whether we should relay transactions to this peer.
Definition: net.h:570
std::atomic< bool > m_bip152_highbandwidth_from
Definition: net.h:558
void PongReceived(std::chrono::microseconds ping_time)
A ping-pong round trip has completed successfully.
Definition: net.h:676
std::atomic_bool fSuccessfullyConnected
Definition: net.h:449
bool IsAddrFetchConn() const
Definition: net.h:514
uint64_t GetLocalNonce() const
Definition: net.h:683
const CAddress addr
Definition: net.h:426
void SetAddrLocal(const CService &addrLocalIn) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_local_mutex)
May not be called more than once.
Definition: net.cpp:631
bool IsBlockOnlyConn() const
Definition: net.h:508
int GetCommonVersion() const
Definition: net.h:707
bool IsFullOutboundConn() const
Definition: net.h:501
uint64_t nRemoteHostNonce
Definition: net.h:435
Mutex m_subver_mutex
cleanSubVer is a sanitized string of the user agent byte array we read from the wire.
Definition: net.h:442
std::atomic_bool fPauseSend
Definition: net.h:458
std::chrono::seconds m_nextGetAvaAddr
Definition: net.h:611
uint64_t nRemoteExtraEntropy
Definition: net.h:437
std::optional< std::pair< CNetMessage, bool > > PollMessage() EXCLUSIVE_LOCKS_REQUIRED(!m_msg_process_queue_mutex)
Poll the next message from the processing queue of this connection.
Definition: net.cpp:3032
uint64_t GetLocalExtraEntropy() const
Definition: net.h:684
SteadyMilliseconds m_last_poll
Definition: net.h:627
double getAvailabilityScore() const
Definition: net.cpp:2973
std::atomic_bool m_bloom_filter_loaded
Whether this peer has loaded a bloom filter.
Definition: net.h:576
void updateAvailabilityScore(double decayFactor)
The availability score is calculated using an exponentially weighted average.
Definition: net.cpp:2958
std::atomic< std::chrono::seconds > m_avalanche_last_message_fault
Definition: net.h:614
const bool m_inbound_onion
Whether this peer is an inbound onion, i.e.
Definition: net.h:432
std::atomic< int > m_avalanche_message_fault_counter
How much faulty messages did this node accumulate.
Definition: net.h:619
std::atomic< bool > m_avalanche_enabled
Definition: net.h:579
std::atomic< std::chrono::seconds > m_last_block_time
UNIX epoch time of the last block received from this peer that we had not yet seen (e....
Definition: net.h:636
std::atomic_bool fDisconnect
Definition: net.h:452
std::atomic< int > m_avalanche_message_fault_score
This score is incremented for every new faulty message received when m_avalanche_message_fault_counte...
Definition: net.h:625
std::atomic< std::chrono::seconds > m_last_tx_time
UNIX epoch time of the last transaction received from this peer that we had not yet seen (e....
Definition: net.h:644
void invsVoted(uint32_t count)
The node voted for count invs.
Definition: net.cpp:2954
bool IsAvalancheOutboundConnection() const
Definition: net.h:522
An encapsulated public key.
Definition: pubkey.h:31
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set.
Definition: bloom.h:115
Simple class for background tasks that should be run periodically or once "after a while".
Definition: scheduler.h:41
void scheduleEvery(Predicate p, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Repeat p until it return false.
Definition: scheduler.cpp:114
void scheduleFromNow(Function f, std::chrono::milliseconds delta) EXCLUSIVE_LOCKS_REQUIRED(!newTaskMutex)
Call f once after the delta has passed.
Definition: scheduler.h:56
A combination of a network address (CNetAddr) and a (TCP) port.
Definition: netaddress.h:573
std::vector< uint8_t > GetKey() const
std::string ToStringAddrPort() const
SipHash-2-4.
Definition: siphash.h:14
uint64_t Finalize() const
Compute the 64-bit SipHash-2-4 of the data written so far.
Definition: siphash.cpp:83
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data.
Definition: siphash.cpp:36
std::set< std::reference_wrapper< const CTxMemPoolEntryRef >, CompareIteratorById > Parents
Definition: mempool_entry.h:70
CTxMemPool stores valid-according-to-the-current-best-chain transactions that may be included in the ...
Definition: txmempool.h:221
void removeConflicts(const CTransaction &tx) EXCLUSIVE_LOCKS_REQUIRED(cs)
Definition: txmempool.cpp:300
void RemoveUnbroadcastTx(const TxId &txid, const bool unchecked=false)
Removes a transaction from the unbroadcast set.
Definition: txmempool.cpp:825
CFeeRate GetMinFee() const
The minimum fee to get into the mempool, which may itself not be enough for larger-sized transactions...
Definition: txmempool.h:463
RecursiveMutex cs
This mutex needs to be locked when accessing mapTx or other members that are guarded by it.
Definition: txmempool.h:317
void removeRecursive(const CTransaction &tx, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs)
Definition: txmempool.cpp:269
bool CompareTopologically(const TxId &txida, const TxId &txidb) const
Definition: txmempool.cpp:503
TxMempoolInfo info(const TxId &txid) const
Definition: txmempool.cpp:686
size_t DynamicMemoryUsage() const
Definition: txmempool.cpp:814
bool setAvalancheFinalized(const CTxMemPoolEntryRef &tx, const Consensus::Params &params, const CBlockIndex &active_chain_tip, std::vector< TxId > &finalizedTxIds) EXCLUSIVE_LOCKS_REQUIRED(bool isAvalancheFinalizedPreConsensus(const TxId &txid) const EXCLUSIVE_LOCKS_REQUIRED(cs)
Definition: txmempool.h:546
std::vector< TxMempoolInfo > infoAll() const
Definition: txmempool.cpp:535
CTransactionRef GetConflictTx(const COutPoint &prevout) const EXCLUSIVE_LOCKS_REQUIRED(cs)
Get the transaction in the pool that spends the same prevout.
Definition: txmempool.cpp:739
bool exists(const TxId &txid) const
Definition: txmempool.h:535
std::set< TxId > GetUnbroadcastTxs() const
Returns transactions in unbroadcast set.
Definition: txmempool.h:574
auto withOrphanage(Callable &&func) const EXCLUSIVE_LOCKS_REQUIRED(!cs_orphanage)
Definition: txmempool.h:595
const CFeeRate m_min_relay_feerate
Definition: txmempool.h:356
auto withConflicting(Callable &&func) const EXCLUSIVE_LOCKS_REQUIRED(!cs_conflicting)
Definition: txmempool.h:603
void removeForFinalizedBlock(const std::unordered_set< TxId, SaltedTxIdHasher > &confirmedTxIdsInNonFinalizedBlocks) EXCLUSIVE_LOCKS_REQUIRED(cs)
Definition: txmempool.cpp:328
unsigned long size() const
Definition: txmempool.h:500
std::optional< txiter > GetIter(const TxId &txid) const EXCLUSIVE_LOCKS_REQUIRED(cs)
Returns an iterator to the given txid, if found.
Definition: txmempool.cpp:744
virtual void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr< const CBlock > &block)
Notifies listeners that a block which builds directly on our current tip has been received and connec...
virtual void BlockConnected(ChainstateRole role, const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being connected.
virtual void BlockChecked(const CBlock &, const BlockValidationState &)
Notifies listeners of a block validation result.
virtual void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload)
Notifies listeners when the block chain tip advances.
virtual void BlockDisconnected(const std::shared_ptr< const CBlock > &block, const CBlockIndex *pindex)
Notifies listeners of a block being disconnected.
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
Definition: validation.h:1185
SnapshotCompletionResult MaybeCompleteSnapshotValidation() EXCLUSIVE_LOCKS_REQUIRED(const CBlockIndex *GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(Chainstate ActiveChainstate)() const
Once the background validation chainstate has reached the height which is the base of the UTXO snapsh...
Definition: validation.h:1436
const CBlockIndex * GetBackgroundSyncTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
The tip of the background sync chain.
Definition: validation.h:1456
MempoolAcceptResult ProcessTransaction(const CTransactionRef &tx, bool test_accept=false) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Try to add a transaction to the memory pool.
bool IsInitialBlockDownload() const
Check whether we are doing an initial block download (synchronizing from disk or network)
bool ProcessNewBlock(const std::shared_ptr< const CBlock > &block, bool force_processing, bool min_pow_checked, bool *new_block, avalanche::Processor *const avalanche=nullptr) LOCKS_EXCLUDED(cs_main)
Process an incoming block.
RecursiveMutex & GetMutex() const LOCK_RETURNED(
Alias for cs_main.
Definition: validation.h:1317
CBlockIndex * ActiveTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
Definition: validation.h:1443
bool BackgroundSyncInProgress() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
The state of a background sync (for net processing)
Definition: validation.h:1450
bool ProcessNewBlockHeaders(const std::vector< CBlockHeader > &block, bool min_pow_checked, BlockValidationState &state, const CBlockIndex **ppindex=nullptr, const std::optional< CCheckpointData > &test_checkpoints=std::nullopt) LOCKS_EXCLUDED(cs_main)
Process incoming block headers.
const arith_uint256 & MinimumChainWork() const
Definition: validation.h:1287
CChain & ActiveChain() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
Definition: validation.h:1437
void MaybeRebalanceCaches() EXCLUSIVE_LOCKS_REQUIRED(void ReportHeadersPresync(const arith_uint256 &work, int64_t height, int64_t timestamp)
Check to see if caches are out of balance and if so, call ResizeCoinsCaches() as needed.
node::BlockManager m_blockman
A single BlockManager instance is shared across each constructed chainstate to avoid duplicating bloc...
Definition: validation.h:1326
Definition: config.h:19
virtual uint64_t GetMaxBlockSize() const =0
Double ended buffer combining vector and stream-like interfaces.
Definition: streams.h:118
bool empty() const
Definition: streams.h:152
size_type size() const
Definition: streams.h:151
void ignore(size_t num_ignore)
Definition: streams.h:276
int in_avail() const
Definition: streams.h:255
Fast randomness source.
Definition: random.h:411
uint64_t rand64() noexcept
Generate a random 64-bit integer.
Definition: random.h:432
Reads data from an underlying stream, while hashing the read data.
Definition: hash.h:150
A writer stream (for serialization) that computes a 256-bit hash.
Definition: hash.h:99
HeadersSyncState:
Definition: headerssync.h:98
@ FINAL
We're done syncing with this peer and can discard any remaining state.
@ PRESYNC
PRESYNC means the peer has not yet demonstrated their chain has sufficient work and we're only buildi...
size_t Count(NodeId peer) const
Count how many announcements a peer has (REQUESTED, CANDIDATE, and COMPLETED combined).
Definition: invrequest.h:309
size_t CountInFlight(NodeId peer) const
Count how many REQUESTED announcements a peer has.
Definition: invrequest.h:296
Interface for message handling.
Definition: net.h:779
static Mutex g_msgproc_mutex
Mutex for anything that is only accessed via the msg processing thread.
Definition: net.h:784
virtual bool ProcessMessages(const Config &config, CNode *pnode, std::atomic< bool > &interrupt) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process protocol messages received from a given node.
virtual bool SendMessages(const Config &config, CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Send queued protocol messages to a given node.
virtual void InitializeNode(const Config &config, CNode &node, ServiceFlags our_services)=0
Initialize a peer (setup state, queue any initial messages)
virtual void FinalizeNode(const Config &config, const CNode &node)=0
Handle removal of a peer (clear state)
static bool HasFlag(NetPermissionFlags flags, NetPermissionFlags f)
ReadStatus InitData(const CBlockHeaderAndShortTxIDs &cmpctblock, const std::vector< CTransactionRef > &extra_txn)
bool IsTxAvailable(size_t index) const
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing)
virtual std::optional< std::string > FetchBlock(const Config &config, NodeId peer_id, const CBlockIndex &block_index)=0
Attempt to manually fetch block from a given peer.
virtual void SendPings()=0
Send ping message to all peers.
static std::unique_ptr< PeerManager > make(CConnman &connman, AddrMan &addrman, BanMan *banman, ChainstateManager &chainman, CTxMemPool &pool, avalanche::Processor *const avalanche, Options opts)
virtual void StartScheduledTasks(CScheduler &scheduler)=0
Begin running background tasks, should only be called once.
virtual bool IgnoresIncomingTxs()=0
Whether this node ignores txs received over p2p.
virtual void ProcessMessage(const Config &config, CNode &pfrom, const std::string &msg_type, DataStream &vRecv, const std::chrono::microseconds time_received, const std::atomic< bool > &interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex)=0
Process a single message from a peer.
virtual bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) const =0
Get statistics from node state.
virtual void UnitTestMisbehaving(const NodeId peer_id)=0
Public for unit testing.
virtual void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)=0
This function is used for testing the stale tip eviction logic, see denialofservice_tests....
virtual void CheckForStaleTipAndEvictPeers()=0
Evict extra outbound peers.
static RCUPtr make(Args &&...args)
Construct a new object that is owned by the pointer.
Definition: rcu.h:112
I randrange(I range) noexcept
Generate a random integer in the range [0..range), with range > 0.
Definition: random.h:266
A Span is an object that can refer to a contiguous sequence of objects.
Definition: span.h:94
int EraseTx(const TxId &txid) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase a tx by txid.
Definition: txpool.cpp:50
void EraseForPeer(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase all txs announced by a peer (eg, after that peer disconnects)
Definition: txpool.cpp:94
std::vector< CTransactionRef > GetChildrenFromSamePeer(const CTransactionRef &parent, NodeId nodeid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Get all children that spend from this tx and were received from nodeid.
Definition: txpool.cpp:281
bool AddTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add a new transaction to the pool.
Definition: txpool.cpp:15
unsigned int LimitTxs(unsigned int max_txs, FastRandomContext &rng) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Limit the txs to the given maximum.
Definition: txpool.cpp:115
void EraseForBlock(const CBlock &block) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Erase all txs included in or invalidated by a new block.
Definition: txpool.cpp:239
std::vector< CTransactionRef > GetConflictTxs(const CTransactionRef &tx) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Definition: txpool.cpp:191
void AddChildrenToWorkSet(const CTransaction &tx) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Add any tx that list a particular tx as a parent into the from peer's work set.
Definition: txpool.cpp:151
std::vector< std::pair< CTransactionRef, NodeId > > GetChildrenFromDifferentPeer(const CTransactionRef &parent, NodeId nodeid) const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex)
Get all children that spend from this tx but were not received from nodeid.
Definition: txpool.cpp:326
bool IsValid() const
Definition: validation.h:119
std::string GetRejectReason() const
Definition: validation.h:123
Result GetResult() const
Definition: validation.h:122
std::string ToString() const
Definition: validation.h:125
bool IsInvalid() const
Definition: validation.h:120
256-bit unsigned big integer.
const std::vector< PrefilledProof > & getPrefilledProofs() const
Definition: compactproofs.h:76
uint64_t getShortID(const ProofId &proofid) const
const std::vector< uint64_t > & getShortIDs() const
Definition: compactproofs.h:79
ProofId getProofId() const
Definition: delegation.cpp:56
bool verify(DelegationState &state, CPubKey &auth) const
Definition: delegation.cpp:73
const DelegationId & getId() const
Definition: delegation.h:60
const LimitedProofId & getLimitedProofId() const
Definition: delegation.h:61
bool addNode(NodeId nodeid, const ProofId &proofid, size_t max_elements)
Node API.
Definition: peermanager.cpp:33
bool shouldRequestMoreNodes()
Returns true if we encountered a lack of node since the last call.
Definition: peermanager.h:340
bool exists(const ProofId &proofid) const
Return true if the (valid) proof exists, but only for non-dangling proofs.
Definition: peermanager.h:415
bool forPeer(const ProofId &proofid, Callable &&func) const
Definition: peermanager.h:423
void removeUnbroadcastProof(const ProofId &proofid)
const ProofRadixTree & getShareableProofsSnapshot() const
Definition: peermanager.h:530
bool isBoundToPeer(const ProofId &proofid) const
bool saveRemoteProof(const ProofId &proofid, const NodeId nodeid, const bool present)
void forEachPeer(Callable &&func) const
Definition: peermanager.h:429
void setInvalid(const ProofId &proofid)
bool isInvalid(const ProofId &proofid) const
bool isImmature(const ProofId &proofid) const
auto getUnbroadcastProofs() const
Definition: peermanager.h:445
bool isInConflictingPool(const ProofId &proofid) const
void sendResponse(CNode *pfrom, Response response) const
Definition: processor.cpp:559
bool addToReconcile(const AnyVoteItem &item) EXCLUSIVE_LOCKS_REQUIRED(!cs_finalizedItems)
Definition: processor.cpp:442
bool isStakingPreconsensusActivated(const CBlockIndex *pprev) const
Definition: processor.cpp:1544
int64_t getAvaproofsNodeCounter() const
Definition: processor.h:358
bool sendHello(CNode *pfrom) EXCLUSIVE_LOCKS_REQUIRED(!cs_delayedAvahelloNodeIds)
Send a avahello message.
Definition: processor.cpp:751
void setRecentlyFinalized(const uint256 &itemId) EXCLUSIVE_LOCKS_REQUIRED(!cs_finalizedItems)
Definition: processor.cpp:521
size_t getMaxElementPoll() const
Definition: processor.h:422
bool isQuorumEstablished() LOCKS_EXCLUDED(cs_main) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Definition: processor.cpp:838
void cleanupStakingRewards(const int minHeight) EXCLUSIVE_LOCKS_REQUIRED(!cs_stakingRewards
Definition: processor.cpp:983
ProofRef getLocalProof() const
Definition: processor.cpp:773
void acceptStakeContender(const StakeContenderId &contenderId) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
Definition: processor.cpp:1101
bool reconcileOrFinalize(const ProofRef &proof) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Wrapper around the addToReconcile for proofs that adds back the finalization flag to the peer if it i...
Definition: processor.cpp:460
int getStakeContenderStatus(const StakeContenderId &contenderId) const EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Track votes on stake contenders.
Definition: processor.cpp:1078
void sendDelayedAvahello() EXCLUSIVE_LOCKS_REQUIRED(!cs_delayedAvahelloNodeIds)
Definition: processor.cpp:756
void finalizeStakeContender(const StakeContenderId &contenderId) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Definition: processor.cpp:1106
bool isPreconsensusActivated(const CBlockIndex *pprev) const
Definition: processor.cpp:1540
auto withPeerManager(Callable &&func) const EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
Definition: processor.h:320
bool registerVotes(NodeId nodeid, const Response &response, std::vector< VoteItemUpdate > &updates, bool &disconnect, std::string &error) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager
Definition: processor.cpp:565
void rejectStakeContender(const StakeContenderId &contenderId) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
Definition: processor.cpp:1128
void avaproofsSent(NodeId nodeid) LOCKS_EXCLUDED(cs_main) EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager)
Definition: processor.cpp:817
std::vector< uint32_t > indices
std::string ToString() const
Definition: uint256.h:80
bool IsNull() const
Definition: uint256.h:32
std::string GetHex() const
Definition: uint256.cpp:16
Generate a new block, without valid proof-of-work.
Definition: miner.h:55
bool ReadRawBlock(std::vector< uint8_t > &block, const FlatFilePos &pos) const
CBlockIndex * LookupBlockIndex(const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
bool LoadingBlocks() const
Definition: blockstorage.h:359
bool IsPruneMode() const
Whether running in -prune mode.
Definition: blockstorage.h:350
bool ReadBlock(CBlock &block, const FlatFilePos &pos) const
Functions for disk access for blocks.
256-bit opaque blob.
Definition: uint256.h:129
static const uint256 ZERO
Definition: uint256.h:134
@ BLOCK_CHECKPOINT
the block failed to meet one of our checkpoints
@ BLOCK_HEADER_LOW_WORK
the block header may be on a too-little-work chain
@ BLOCK_INVALID_HEADER
invalid proof of work or time too old
@ BLOCK_CACHED_INVALID
this block was cached as being invalid and we didn't store the reason why
@ BLOCK_CONSENSUS
invalid by consensus rules (excluding any below reasons)
@ BLOCK_MISSING_PREV
We don't have the previous block the checked one is built on.
@ BLOCK_INVALID_PREV
A block this one builds on is invalid.
@ BLOCK_MUTATED
the block's data didn't match the data committed to by the PoW
@ BLOCK_TIME_FUTURE
block timestamp was > 2 hours in the future (or our clock is bad)
@ BLOCK_RESULT_UNSET
initial value. Block has not yet been rejected
@ TX_MISSING_INPUTS
transaction was missing some of its inputs
@ TX_CHILD_BEFORE_PARENT
This tx outputs are already spent in the mempool.
@ TX_MEMPOOL_POLICY
violated mempool's fee/size/descendant/etc limits
@ TX_PACKAGE_RECONSIDERABLE
fails some policy, but might be acceptable if submitted in a (different) package
@ TX_UNKNOWN
transaction was not validated because package failed
@ TX_PREMATURE_SPEND
transaction spends a coinbase too early, or violates locktime/sequence locks
@ TX_DUPLICATE
Tx already in mempool or in the chain.
@ TX_INPUTS_NOT_STANDARD
inputs failed policy rules
@ TX_CONFLICT
Tx conflicts with a finalized tx, i.e.
@ TX_NOT_STANDARD
otherwise didn't meet our local policy rules
@ TX_AVALANCHE_RECONSIDERABLE
fails some policy, but might be reconsidered by avalanche voting
@ TX_NO_MEMPOOL
this node does not have a mempool so can't validate the transaction
@ TX_RESULT_UNSET
initial value. Tx has not yet been rejected
@ TX_CONSENSUS
invalid by consensus rules
static size_t RecursiveDynamicUsage(const CScript &script)
Definition: core_memusage.h:12
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate.
Definition: cs_main.cpp:7
int64_t NodeId
Definition: eviction.h:16
ChainstateRole
This enum describes the various roles a specific Chainstate instance can take.
Definition: chain.h:14
std::array< uint8_t, CPubKey::SCHNORR_SIZE > SchnorrSig
a Schnorr signature
Definition: key.h:25
bool fLogIPs
Definition: logging.cpp:24
#define LogPrintLevel(category, level,...)
Definition: logging.h:437
#define LogPrint(category,...)
Definition: logging.h:452
#define LogInfo(...)
Definition: logging.h:413
#define LogError(...)
Definition: logging.h:419
#define LogDebug(category,...)
Definition: logging.h:446
#define LogPrintf(...)
Definition: logging.h:424
static void pool cs
@ AVALANCHE
Definition: logging.h:91
@ TXPACKAGES
Definition: logging.h:99
@ NETDEBUG
Definition: logging.h:98
@ MEMPOOLREJ
Definition: logging.h:85
@ MEMPOOL
Definition: logging.h:71
@ NET
Definition: logging.h:69
CSerializedNetMsg Make(std::string msg_type, Args &&...args)
const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
Definition: protocol.cpp:36
const char * CFHEADERS
cfheaders is a response to a getcfheaders request containing a filter header and a vector of filter h...
Definition: protocol.cpp:48
const char * AVAPROOFSREQ
Request for missing avalanche proofs after an avaproofs message has been processed.
Definition: protocol.cpp:58
const char * CFILTER
cfilter is a response to a getcfilters request containing a single compact filter.
Definition: protocol.cpp:46
const char * BLOCK
The block message transmits a single serialized block.
Definition: protocol.cpp:30
const char * FILTERCLEAR
The filterclear message tells the receiving peer to remove a previously-set bloom filter.
Definition: protocol.cpp:38
const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
Definition: protocol.cpp:29
const char * ADDRV2
The addrv2 message relays connection information for peers on the network just like the addr message,...
Definition: protocol.cpp:21
const char * SENDHEADERS
Indicates that a node prefers to receive new block announcements via a "headers" message rather than ...
Definition: protocol.cpp:39
const char * AVAPROOFS
The avaproofs message the proof short ids of all the valid proofs that we know.
Definition: protocol.cpp:57
const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
Definition: protocol.cpp:34
const char * GETAVAPROOFS
The getavaproofs message requests an avaproofs message that provides the proof short ids of all the v...
Definition: protocol.cpp:56
const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
Definition: protocol.cpp:41
const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
Definition: protocol.cpp:31
const char * GETCFCHECKPT
getcfcheckpt requests evenly spaced compact filter headers, enabling parallelized download and valida...
Definition: protocol.cpp:49
const char * NOTFOUND
The notfound message is a reply to a getdata message which requested an object the receiving node doe...
Definition: protocol.cpp:35
const char * GETAVAADDR
The getavaaddr message requests an addr message from the receiving node, containing IP addresses of t...
Definition: protocol.cpp:55
const char * CMPCTBLOCK
Contains a CBlockHeaderAndShortTxIDs object - providing a header and list of "short txids".
Definition: protocol.cpp:42
const char * MEMPOOL
The mempool message requests the TXIDs of transactions that the receiving node has verified as valid ...
Definition: protocol.cpp:32
const char * GETCFILTERS
getcfilters requests compact filters for a range of blocks.
Definition: protocol.cpp:45
const char * TX
The tx message transmits a single transaction.
Definition: protocol.cpp:28
const char * AVAHELLO
Contains a delegation and a signature.
Definition: protocol.cpp:51
const char * FILTERADD
The filteradd message tells the receiving peer to add a single element to a previously-set bloom filt...
Definition: protocol.cpp:37
const char * ADDR
The addr (IP address) message relays connection information for peers on the network.
Definition: protocol.cpp:20
const char * VERSION
The version message provides information about the transmitting node to the receiving node at the beg...
Definition: protocol.cpp:18
const char * GETBLOCKS
The getblocks message requests an inv message that provides block header hashes starting from a parti...
Definition: protocol.cpp:26
const char * FEEFILTER
The feefilter message tells the receiving peer not to inv us any txs which do not meet the specified ...
Definition: protocol.cpp:40
const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
Definition: protocol.cpp:27
const char * AVARESPONSE
Contains an avalanche::Response.
Definition: protocol.cpp:53
const char * GETDATA
The getdata message requests one or more data objects from another node.
Definition: protocol.cpp:24
const char * VERACK
The verack message acknowledges a previously-received version message, informing the connecting node ...
Definition: protocol.cpp:19
const char * BLOCKTXN
Contains a BlockTransactions.
Definition: protocol.cpp:44
const char * GETCFHEADERS
getcfheaders requests a compact filter header and the filter hashes for a range of blocks,...
Definition: protocol.cpp:47
const char * SENDADDRV2
The sendaddrv2 message signals support for receiving ADDRV2 messages (BIP155).
Definition: protocol.cpp:22
const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected.
Definition: protocol.cpp:33
const char * AVAPOLL
Contains an avalanche::Poll.
Definition: protocol.cpp:52
const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
Definition: protocol.cpp:25
const char * AVAPROOF
Contains an avalanche::Proof.
Definition: protocol.cpp:54
const char * CFCHECKPT
cfcheckpt is a response to a getcfcheckpt request containing a vector of evenly spaced filter headers...
Definition: protocol.cpp:50
const char * GETBLOCKTXN
Contains a BlockTransactionsRequest Peer should respond with "blocktxn" message.
Definition: protocol.cpp:43
const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
Definition: protocol.cpp:23
ShortIdProcessor< PrefilledProof, ShortIdProcessorPrefilledProofAdapter, ProofRefCompare > ProofShortIdProcessor
Definition: compactproofs.h:52
std::variant< const ProofRef, const CBlockIndex *, const StakeContenderId, const CTransactionRef > AnyVoteItem
Definition: processor.h:104
RCUPtr< const Proof > ProofRef
Definition: proof.h:186
Definition: messages.h:12
Implement std::hash so RCUPtr can be used as a key for maps or sets.
Definition: rcu.h:259
bool fListen
Definition: net.cpp:129
std::optional< CService > GetLocalAddrForPeer(CNode &node)
Returns a local address that we should advertise to this peer.
Definition: net.cpp:246
std::function< void(const CAddress &addr, const std::string &msg_type, Span< const uint8_t > data, bool is_incoming)> CaptureMessage
Defaults to CaptureMessageToFile(), but can be overridden by unit tests.
Definition: net.cpp:3202
std::string userAgent(const Config &config)
Definition: net.cpp:3150
bool IsReachable(enum Network net)
Definition: net.cpp:328
bool SeenLocal(const CService &addr)
vote for a local address
Definition: net.cpp:338
static const unsigned int MAX_SUBVERSION_LENGTH
Maximum length of the user agent string in version message.
Definition: net.h:71
static constexpr std::chrono::minutes TIMEOUT_INTERVAL
Time after which to disconnect, after waiting for a ping response (or inactivity).
Definition: net.h:65
NetPermissionFlags
static constexpr auto HEADERS_RESPONSE_TIME
How long to wait for a peer to respond to a getheaders request.
static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET
The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND based inc...
static constexpr size_t MAX_AVALANCHE_STALLED_TXIDS_PER_PEER
Maximum number of stalled avalanche txids to store per peer.
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT
Default time during which a peer must stall block download progress before being disconnected.
static constexpr auto GETAVAADDR_INTERVAL
Minimum time between 2 successives getavaaddr messages from the same peer.
static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL
Verify that INVENTORY_MAX_RECENT_RELAY is enough to cache everything typically relayed before uncondi...
static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB
Maximum number of inventory items to send per transmission.
static constexpr auto EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect.
static const unsigned int BLOCK_DOWNLOAD_WINDOW
Size of the "block download window": how far ahead of our current height do we fetch?...
static uint32_t getAvalancheVoteForProof(const avalanche::Processor &avalanche, const avalanche::ProofId &id)
Decide a response for an Avalanche poll about the given proof.
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
static constexpr int HISTORICAL_BLOCK_AGE
Age after which a block is considered historical for purposes of rate limiting block relay.
static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL
Delay between rotating the peers we relay a particular address to.
static constexpr auto MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict.
static constexpr auto CHAIN_SYNC_TIMEOUT
Timeout for (unprotected) outbound peers to sync to our chainwork.
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL
Average delay between local address broadcasts.
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for.
static constexpr uint64_t CMPCTBLOCKS_VERSION
The compactblocks version we support.
bool IsAvalancheMessageType(const std::string &msg_type)
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/behind headers chain.
static std::chrono::microseconds ComputeRequestTime(const CNode &node, const InvRequestTracker< InvId > &requestTracker, const DataRequestParameters &requestParams, std::chrono::microseconds current_time, bool preferred)
Compute the request time for this announcement, current time plus delays for:
static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL
Average delay between trickled inventory transmissions for inbound peers.
static constexpr DataRequestParameters TX_REQUEST_PARAMS
static constexpr auto MAX_FEEFILTER_CHANGE_DELAY
Maximum feefilter broadcast delay after significant change.
static constexpr uint32_t MAX_GETCFILTERS_SIZE
Maximum number of compact filters that may be requested with one getcfilters.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout.
static const unsigned int MAX_GETDATA_SZ
Limit to avoid sending big packets.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE
Block download timeout base, expressed in multiples of the block interval (i.e.
static constexpr auto AVALANCHE_AVAPROOFS_TIMEOUT
If no proof was requested from a compact proof message after this timeout expired,...
static constexpr auto STALE_CHECK_INTERVAL
How frequently to check for stale tips.
static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY
The number of most recently announced transactions a peer can request.
static constexpr auto UNCONDITIONAL_RELAY_DELAY
How long a transaction has to be in the mempool before it can unconditionally be relayed.
static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts.
static const unsigned int MAX_LOCATOR_SZ
The maximum number of entries in a locator.
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER
Additional block download timeout per parallel downloading peer (i.e.
static constexpr double MAX_ADDR_RATE_PER_SECOND
The maximum rate of address records we're willing to process on average.
static constexpr auto PING_INTERVAL
Time between pings automatically sent out for latency probing and keepalive.
static const int MAX_CMPCTBLOCK_DEPTH
Maximum depth of blocks we're willing to serve as compact blocks to peers when requested.
static constexpr DataRequestParameters PROOF_REQUEST_PARAMS
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE
Maximum number of headers to announce when relaying blocks with headers message.
static bool TooManyAnnouncements(const CNode &node, const InvRequestTracker< InvId > &requestTracker, const DataRequestParameters &requestParams)
static constexpr uint32_t MAX_GETCFHEADERS_SIZE
Maximum number of cf hashes that may be requested with one getcfheaders.
static constexpr auto BLOCK_STALLING_TIMEOUT_MAX
Maximum timeout for stalling block download.
static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY
SHA256("main address relay")[0:8].
static constexpr size_t MAX_PCT_ADDR_TO_SEND
the maximum percentage of addresses from our addrman to return in response to a getaddr message.
static const unsigned int MAX_INV_SZ
The maximum number of entries in an 'inv' protocol message.
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND
Maximum rate of inventory items to send per second.
static constexpr size_t MAX_ADDR_TO_SEND
The maximum number of address records permitted in an ADDR message.
static const unsigned int MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK
Maximum number of outstanding CMPCTBLOCK requests for the same block.
static const unsigned int MAX_HEADERS_RESULTS
Number of headers sent in one getheaders result.
bool IsProxy(const CNetAddr &addr)
Definition: netbase.cpp:842
static constexpr NodeId NO_NODE
Special NodeId that represent no node.
Definition: nodeid.h:15
uint256 GetPackageHash(const Package &package)
Definition: packages.cpp:129
std::vector< CTransactionRef > Package
A package is an ordered list of transactions.
Definition: packages.h:40
static constexpr Amount DEFAULT_MIN_RELAY_TX_FEE_PER_KB(1000 *SATOSHI)
Default for -minrelaytxfee, minimum relay fee for transactions.
std::shared_ptr< const CTransaction > CTransactionRef
Definition: transaction.h:315
Response response
Definition: processor.cpp:536
SchnorrSig sig
Definition: processor.cpp:537
static constexpr size_t AVALANCHE_MAX_ELEMENT_POLL_LEGACY
Legacy maximum element poll.
Definition: processor.h:63
void SetServiceFlagsIBDCache(bool state)
Set the current IBD status in order to figure out the desirable service flags.
Definition: protocol.cpp:215
ServiceFlags GetDesirableServiceFlags(ServiceFlags services)
Gets the set of service flags which are "desirable" for a given peer.
Definition: protocol.cpp:207
static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH
Maximum length of incoming protocol messages (Currently 2MB).
Definition: protocol.h:25
static bool HasAllDesirableServiceFlags(ServiceFlags services)
A shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services),...
Definition: protocol.h:428
@ MSG_TX
Definition: protocol.h:574
@ MSG_AVA_STAKE_CONTENDER
Definition: protocol.h:582
@ MSG_AVA_PROOF
Definition: protocol.h:581
@ MSG_BLOCK
Definition: protocol.h:575
@ MSG_CMPCT_BLOCK
Defined in BIP152.
Definition: protocol.h:580
ServiceFlags
nServices flags.
Definition: protocol.h:336
@ NODE_NONE
Definition: protocol.h:339
@ NODE_NETWORK_LIMITED
Definition: protocol.h:366
@ NODE_BLOOM
Definition: protocol.h:353
@ NODE_NETWORK
Definition: protocol.h:343
@ NODE_COMPACT_FILTERS
Definition: protocol.h:361
@ NODE_AVALANCHE
Definition: protocol.h:381
static bool MayHaveUsefulAddressDB(ServiceFlags services)
Checks if a peer with the given service flags may be capable of having a robust address-storage DB.
Definition: protocol.h:436
static const int SHORT_IDS_BLOCKS_VERSION
short-id-based block download starts with this version
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version
static const int PROTOCOL_VERSION
network protocol versioning
static const int FEEFILTER_VERSION
"feefilter" tells peers to filter invs to you by fee starts with this version
static const int MIN_PEER_PROTO_VERSION
disconnect from peers older than this proto version
static const int INVALID_CB_NO_BAN_VERSION
not banning for invalid compact blocks starts with this version
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.
static const int AVALANCHE_MAX_ELEMENT_BUMP_VERSION
Avalanche can poll up to 1024 items per message starting with this version.
void Shuffle(I first, I last, R &&rng)
More efficient than using std::shuffle on a FastRandomContext.
Definition: random.h:512
reverse_range< T > reverse_iterate(T &x)
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
Definition: script.h:25
static std::string ToString(const CService &ip)
Definition: db.h:36
void Unserialize(Stream &, V)=delete
#define LIMITED_STRING(obj, n)
Definition: serialize.h:637
static auto WithParams(const Params &params, T &&t)
Return a wrapper around t that (de)serializes it with specified parameter params.
Definition: serialize.h:1329
uint64_t ReadCompactSize(Stream &is, bool range_check=true)
Decode a CompactSize-encoded variable-length integer.
Definition: serialize.h:469
constexpr auto MakeUCharSpan(V &&v) -> decltype(UCharSpanCast(Span{std::forward< V >(v)}))
Like the Span constructor, but for (const) uint8_t member types only.
Definition: span.h:350
static const double AVALANCHE_STATISTICS_DECAY_FACTOR
Pre-computed decay factor for the avalanche statistics computation.
Definition: statistics.h:18
static constexpr std::chrono::minutes AVALANCHE_STATISTICS_REFRESH_PERIOD
Refresh period for the avalanche statistics computation.
Definition: statistics.h:11
Definition: amount.h:21
static constexpr Amount zero() noexcept
Definition: amount.h:34
A BlockHash is a unqiue identifier for a block.
Definition: blockhash.h:13
Describes a place in the block chain to another node such that if the other node doesn't have the sam...
Definition: block.h:108
std::vector< BlockHash > vHave
Definition: block.h:120
bool IsNull() const
Definition: block.h:135
std::chrono::microseconds m_ping_wait
Amount m_fee_filter_received
std::vector< int > vHeightInFlight
uint64_t m_addr_rate_limited
uint64_t m_addr_processed
int64_t presync_height
ServiceFlags their_services
Parameters that influence chain consensus.
Definition: params.h:34
int64_t nPowTargetSpacing
Definition: params.h:85
std::chrono::seconds PowTargetSpacing() const
Definition: params.h:87
const std::chrono::seconds overloaded_peer_delay
How long to delay requesting data from overloaded peers (see max_peer_request_in_flight).
const size_t max_peer_announcements
Maximum number of inventories to consider for requesting, per peer.
const std::chrono::seconds nonpref_peer_delay
How long to delay requesting data from non-preferred peers.
const NetPermissionFlags bypass_request_limits_permissions
Permission flags a peer requires to bypass the request limits tracking limits and delay penalty.
const std::chrono::microseconds getdata_interval
How long to wait (in microseconds) before a data request from an additional peer.
const size_t max_peer_request_in_flight
Maximum number of in-flight data requests from a peer.
Validation result for a transaction evaluated by MemPoolAccept (single or package).
Definition: validation.h:212
const ResultType m_result_type
Result type.
Definition: validation.h:223
const TxValidationState m_state
Contains information about why the transaction failed.
Definition: validation.h:226
@ MEMPOOL_ENTRY
Valid, transaction was already in the mempool.
@ VALID
Fully validated, valid.
static time_point now() noexcept
Return current system time or mocked time, if set.
Definition: time.cpp:29
std::chrono::time_point< NodeClock > time_point
Definition: time.h:21
Validation result for package mempool acceptance.
Definition: validation.h:315
PackageValidationState m_state
Definition: validation.h:316
std::map< TxId, MempoolAcceptResult > m_tx_results
Map from txid to finished MempoolAcceptResults.
Definition: validation.h:324
This is a radix tree storing values identified by a unique key.
Definition: radix.h:40
A TxId is the identifier of a transaction.
Definition: txid.h:14
std::chrono::seconds registration_time
Definition: peermanager.h:95
const ProofId & getProofId() const
Definition: peermanager.h:110
ProofRef proof
Definition: peermanager.h:91
StakeContenderIds are unique for each block to ensure that the peer polling for their acceptance has ...
#define AssertLockNotHeld(cs)
Definition: sync.h:163
#define LOCK2(cs1, cs2)
Definition: sync.h:309
#define LOCK(cs)
Definition: sync.h:306
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition: sync.h:357
static int count
#define EXCLUSIVE_LOCKS_REQUIRED(...)
Definition: threadsafety.h:56
#define GUARDED_BY(x)
Definition: threadsafety.h:45
#define LOCKS_EXCLUDED(...)
Definition: threadsafety.h:55
#define NO_THREAD_SAFETY_ANALYSIS
Definition: threadsafety.h:58
#define PT_GUARDED_BY(x)
Definition: threadsafety.h:46
int64_t GetTime()
DEPRECATED Use either ClockType::now() or Now<TimePointType>() if a cast is needed.
Definition: time.cpp:80
constexpr int64_t count_microseconds(std::chrono::microseconds t)
Definition: time.h:91
constexpr int64_t count_seconds(std::chrono::seconds t)
Definition: time.h:85
std::chrono::time_point< NodeClock, std::chrono::seconds > NodeSeconds
Definition: time.h:27
double CountSecondsDouble(SecondsDouble t)
Helper to count the seconds in any std::chrono::duration type.
Definition: time.h:104
NodeClock::time_point GetAdjustedTime()
Definition: timedata.cpp:35
void AddTimeData(const CNetAddr &ip, int64_t nOffsetSample)
Definition: timedata.cpp:45
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1202
#define TRACE6(context, event, a, b, c, d, e, f)
Definition: trace.h:45
@ AVALANCHE
Removed by avalanche vote.
std::string SanitizeString(std::string_view str, int rule)
Remove unsafe chars.
arith_uint256 CalculateHeadersWork(const std::vector< CBlockHeader > &headers)
Return the sum of the work on a given set of headers.
bool HasValidProofOfWork(const std::vector< CBlockHeader > &headers, const Consensus::Params &consensusParams)
Check with the proof of work on each blockheader matches the value in nBits.
PackageMempoolAcceptResult ProcessNewPackage(Chainstate &active_chainstate, CTxMemPool &pool, const Package &package, bool test_accept)
Validate (and maybe submit) a package to the mempool.
bool IsBlockMutated(const CBlock &block)
Check if a block has been mutated (with respect to its merkle root).
std::vector< Coin > GetSpentCoins(const CTransactionRef &ptx, const CCoinsViewCache &coins_view)
Get the coins spent by ptx from the coins_view.
AssertLockHeld(pool.cs)
assert(!tx.IsCoinBase())
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ActiveChain().Tip() will not be pr...
Definition: validation.h:99
CMainSignals & GetMainSignals()