Bitcoin ABC  0.22.12
P2P Digital Currency
net_processing.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2016 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 
6 #include <net_processing.h>
7 
8 #include <addrman.h>
9 #include <avalanche/processor.h>
10 #include <avalanche/proof.h>
11 #include <avalanche/validation.h>
12 #include <banman.h>
13 #include <blockdb.h>
14 #include <blockencodings.h>
15 #include <blockfilter.h>
16 #include <blockvalidity.h>
17 #include <chain.h>
18 #include <chainparams.h>
19 #include <config.h>
20 #include <consensus/validation.h>
21 #include <hash.h>
22 #include <index/blockfilterindex.h>
23 #include <merkleblock.h>
24 #include <netbase.h>
25 #include <netmessagemaker.h>
26 #include <policy/fees.h>
27 #include <policy/policy.h>
28 #include <primitives/block.h>
29 #include <primitives/transaction.h>
30 #include <random.h>
31 #include <reverse_iterator.h>
32 #include <scheduler.h>
33 #include <tinyformat.h>
34 #include <txmempool.h>
35 #include <util/check.h> // For NDEBUG compile time check
36 #include <util/strencodings.h>
37 #include <util/system.h>
38 #include <validation.h>
39 
40 #include <memory>
41 #include <typeinfo>
42 
44 static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60;
46 static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60;
48 static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME{15 * 60};
53 // 15 minutes
54 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000;
55 // 1ms/header
56 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1000;
61 static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
66 // 20 minutes
67 static constexpr int64_t CHAIN_SYNC_TIMEOUT = 20 * 60;
69 // 10 minutes
70 static constexpr int64_t STALE_CHECK_INTERVAL = 10 * 60;
74 static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL = 45;
79 static constexpr int64_t MINIMUM_CONNECT_TIME = 30;
81 static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
84 static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
87 static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
92 static const int PING_INTERVAL = 2 * 60;
94 static const unsigned int MAX_LOCATOR_SZ = 101;
96 static const unsigned int MAX_INV_SZ = 50000;
97 static_assert(MAX_PROTOCOL_MESSAGE_LENGTH > MAX_INV_SZ * sizeof(CInv),
98  "Max protocol message length must be greater than largest "
99  "possible INV message");
101 static constexpr int32_t MAX_PEER_TX_IN_FLIGHT = 100;
103 static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 2 * MAX_INV_SZ;
105 static constexpr std::chrono::microseconds INBOUND_PEER_TX_DELAY{
106  std::chrono::seconds{2}};
111 static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL{
112  std::chrono::seconds{60}};
117 static constexpr std::chrono::microseconds MAX_GETDATA_RANDOM_DELAY{
118  std::chrono::seconds{2}};
123 static constexpr std::chrono::microseconds TX_EXPIRY_INTERVAL{
124  GETDATA_TX_INTERVAL * 10};
126  "To preserve security, MAX_GETDATA_RANDOM_DELAY should not "
127  "exceed INBOUND_PEER_DELAY");
132 static const unsigned int MAX_GETDATA_SZ = 1000;
136 static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
141 static const unsigned int BLOCK_STALLING_TIMEOUT = 2;
148 static const unsigned int MAX_HEADERS_RESULTS = 2000;
153 static const int MAX_CMPCTBLOCK_DEPTH = 5;
158 static const int MAX_BLOCKTXN_DEPTH = 10;
166 static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
171 static const int64_t BLOCK_DOWNLOAD_TIMEOUT_BASE = 1000000;
175 static const int64_t BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 500000;
180 static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
182 static const int MAX_UNCONNECTING_HEADERS = 10;
184 static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
188 static constexpr std::chrono::hours AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24};
192 static const std::chrono::seconds AVG_ADDRESS_BROADCAST_INTERVAL{30};
198 static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5;
203 static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB =
208 static constexpr unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60;
212 static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY = 5 * 60;
217 static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
222 static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
223 
225 static constexpr uint32_t MAX_NON_STANDARD_ORPHAN_PER_NODE = 5;
226 
227 struct COrphanTx {
228  // When modifying, adapt the copy of this definition in tests/DoS_tests.
231  int64_t nTimeExpire;
232  size_t list_pos;
233 };
234 
236 std::map<TxId, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
237 
238 void EraseOrphansFor(NodeId peer);
239 
240 // Internal stuff
241 namespace {
243 int nSyncStarted GUARDED_BY(cs_main) = 0;
244 
251 std::map<BlockHash, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
252 
271 std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
272 uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
273 
279 RecursiveMutex g_cs_recent_confirmed_transactions;
280 std::unique_ptr<CRollingBloomFilter> g_recent_confirmed_transactions
281  GUARDED_BY(g_cs_recent_confirmed_transactions);
282 
286 struct QueuedBlock {
287  BlockHash hash;
289  const CBlockIndex *pindex;
291  bool fValidatedHeaders;
293  std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
294 };
295 std::map<BlockHash, std::pair<NodeId, std::list<QueuedBlock>::iterator>>
296  mapBlocksInFlight GUARDED_BY(cs_main);
297 
299 std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
300 
302 int nPreferredDownload GUARDED_BY(cs_main) = 0;
303 
305 int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0;
306 
308 int g_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
309 
311 std::atomic<int64_t> g_last_tip_update(0);
312 
314 typedef std::map<uint256, CTransactionRef> MapRelay;
315 MapRelay mapRelay GUARDED_BY(cs_main);
320 std::deque<std::pair<int64_t, MapRelay::iterator>>
321  vRelayExpiration GUARDED_BY(cs_main);
322 
323 struct IteratorComparator {
324  template <typename I> bool operator()(const I &a, const I &b) const {
325  return &(*a) < &(*b);
326  }
327 };
328 std::map<COutPoint,
329  std::set<std::map<TxId, COrphanTx>::iterator, IteratorComparator>>
330  mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
331 
333 std::vector<std::map<TxId, COrphanTx>::iterator>
334  g_orphan_list GUARDED_BY(g_cs_orphans);
335 
336 static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
337 static std::vector<std::pair<TxHash, CTransactionRef>>
338  vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
339 } // namespace
340 
341 namespace {
348 struct CNodeState {
350  const CService address;
352  bool fCurrentlyConnected;
354  const CBlockIndex *pindexBestKnownBlock;
356  BlockHash hashLastUnknownBlock;
358  const CBlockIndex *pindexLastCommonBlock;
360  const CBlockIndex *pindexBestHeaderSent;
362  int nUnconnectingHeaders;
364  bool fSyncStarted;
366  int64_t nHeadersSyncTimeout;
369  int64_t nStallingSince;
370  std::list<QueuedBlock> vBlocksInFlight;
373  int64_t nDownloadingSince;
374  int nBlocksInFlight;
375  int nBlocksInFlightValidHeaders;
377  bool fPreferredDownload;
380  bool fPreferHeaders;
383  bool fPreferHeaderAndIDs;
390  bool fProvidesHeaderAndIDs;
396  bool fSupportsDesiredCmpctVersion;
397 
412  struct ChainSyncTimeoutState {
415  int64_t m_timeout;
417  const CBlockIndex *m_work_header;
419  bool m_sent_getheaders;
422  bool m_protect;
423  };
424 
425  ChainSyncTimeoutState m_chain_sync;
426 
428  int64_t m_last_block_announcement;
429 
430  /*
431  * State associated with transaction download.
432  *
433  * Tx download algorithm:
434  *
435  * When inv comes in, queue up (process_time, txid) inside the peer's
436  * CNodeState (m_tx_process_time) as long as m_tx_announced for the peer
437  * isn't too big (MAX_PEER_TX_ANNOUNCEMENTS).
438  *
439  * The process_time for a transaction is set to nNow for outbound peers,
440  * nNow + 2 seconds for inbound peers. This is the time at which we'll
441  * consider trying to request the transaction from the peer in
442  * SendMessages(). The delay for inbound peers is to allow outbound peers
443  * a chance to announce before we request from inbound peers, to prevent
444  * an adversary from using inbound connections to blind us to a
445  * transaction (InvBlock).
446  *
447  * When we call SendMessages() for a given peer,
448  * we will loop over the transactions in m_tx_process_time, looking
449  * at the transactions whose process_time <= nNow. We'll request each
450  * such transaction that we don't have already and that hasn't been
451  * requested from another peer recently, up until we hit the
452  * MAX_PEER_TX_IN_FLIGHT limit for the peer. Then we'll update
453  * g_already_asked_for for each requested txid, storing the time of the
454  * GETDATA request. We use g_already_asked_for to coordinate transaction
455  * requests amongst our peers.
456  *
457  * For transactions that we still need but we have already recently
458  * requested from some other peer, we'll reinsert (process_time, txid)
459  * back into the peer's m_tx_process_time at the point in the future at
460  * which the most recent GETDATA request would time out (ie
461  * GETDATA_TX_INTERVAL + the request time stored in g_already_asked_for).
462  * We add an additional delay for inbound peers, again to prefer
463  * attempting download from outbound peers first.
464  * We also add an extra small random delay up to 2 seconds
465  * to avoid biasing some peers over others. (e.g., due to fixed ordering
466  * of peer processing in ThreadMessageHandler).
467  *
468  * When we receive a transaction from a peer, we remove the txid from the
469  * peer's m_tx_in_flight set and from their recently announced set
470  * (m_tx_announced). We also clear g_already_asked_for for that entry, so
471  * that if somehow the transaction is not accepted but also not added to
472  * the reject filter, then we will eventually redownload from other
473  * peers.
474  */
475  struct TxDownloadState {
480  std::multimap<std::chrono::microseconds, TxId> m_tx_process_time;
481 
483  std::set<TxId> m_tx_announced;
484 
486  std::map<TxId, std::chrono::microseconds> m_tx_in_flight;
487 
489  std::chrono::microseconds m_check_expiry_timer{0};
490  };
491 
492  TxDownloadState m_tx_download;
493 
494  struct AvalancheState {
495  std::chrono::time_point<std::chrono::steady_clock> last_poll;
496  };
497 
498  AvalancheState m_avalanche_state;
499 
501  bool m_is_inbound;
502 
504  bool m_is_manual_connection;
505 
506  CNodeState(CAddress addrIn, bool is_inbound, bool is_manual)
507  : address(addrIn), m_is_inbound(is_inbound),
508  m_is_manual_connection(is_manual) {
509  fCurrentlyConnected = false;
510  pindexBestKnownBlock = nullptr;
511  hashLastUnknownBlock = BlockHash();
512  pindexLastCommonBlock = nullptr;
513  pindexBestHeaderSent = nullptr;
514  nUnconnectingHeaders = 0;
515  fSyncStarted = false;
516  nHeadersSyncTimeout = 0;
517  nStallingSince = 0;
518  nDownloadingSince = 0;
519  nBlocksInFlight = 0;
520  nBlocksInFlightValidHeaders = 0;
521  fPreferredDownload = false;
522  fPreferHeaders = false;
523  fPreferHeaderAndIDs = false;
524  fProvidesHeaderAndIDs = false;
525  fSupportsDesiredCmpctVersion = false;
526  m_chain_sync = {0, nullptr, false, false};
527  m_last_block_announcement = 0;
528  }
529 };
530 
531 // Keeps track of the time (in microseconds) when transactions were requested
532 // last time
534  g_already_asked_for GUARDED_BY(cs_main)(MAX_INV_SZ);
535 
537 static std::map<NodeId, CNodeState> mapNodeState GUARDED_BY(cs_main);
538 
539 static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
540  std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
541  if (it == mapNodeState.end()) {
542  return nullptr;
543  }
544 
545  return &it->second;
546 }
547 
559 struct Peer {
561  const NodeId m_id{0};
562 
564  Mutex m_misbehavior_mutex;
566  int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0};
569  bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
570 
571  Peer(NodeId id) : m_id(id) {}
572 };
573 
574 using PeerRef = std::shared_ptr<Peer>;
575 
582 Mutex g_peer_mutex;
583 static std::map<NodeId, PeerRef> g_peer_map GUARDED_BY(g_peer_mutex);
584 
589 static PeerRef GetPeerRef(NodeId id) {
590  LOCK(g_peer_mutex);
591  auto it = g_peer_map.find(id);
592  return it != g_peer_map.end() ? it->second : nullptr;
593 }
594 
595 static void UpdatePreferredDownload(const CNode &node, CNodeState *state)
597  nPreferredDownload -= state->fPreferredDownload;
598 
599  // Whether this node should be marked as a preferred download node.
600  state->fPreferredDownload =
601  (!node.IsInboundConn() || node.HasPermission(PF_NOBAN)) &&
602  !node.IsAddrFetchConn() && !node.fClient;
603 
604  nPreferredDownload += state->fPreferredDownload;
605 }
606 
607 static void PushNodeVersion(const Config &config, CNode &pnode,
608  CConnman &connman, int64_t nTime) {
609  // Note that pnode.GetLocalServices() is a reflection of the local
610  // services we were offering when the CNode object was created for this
611  // peer.
612  ServiceFlags nLocalNodeServices = pnode.GetLocalServices();
613  uint64_t nonce = pnode.GetLocalNonce();
614  int nNodeStartingHeight = pnode.GetMyStartingHeight();
615  NodeId nodeid = pnode.GetId();
616  CAddress addr = pnode.addr;
617  uint64_t extraEntropy = pnode.GetLocalExtraEntropy();
618 
619  CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr)
620  ? addr
621  : CAddress(CService(), addr.nServices));
622  CAddress addrMe = CAddress(CService(), nLocalNodeServices);
623 
624  connman.PushMessage(
625  &pnode,
628  uint64_t(nLocalNodeServices), nTime, addrYou, addrMe, nonce,
629  userAgent(config), nNodeStartingHeight,
630  ::g_relay_txes && pnode.m_tx_relay != nullptr, extraEntropy));
631 
632  if (fLogIPs) {
634  "send version message: version %d, blocks=%d, us=%s, them=%s, "
635  "peer=%d\n",
636  PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(),
637  addrYou.ToString(), nodeid);
638  } else {
639  LogPrint(
640  BCLog::NET,
641  "send version message: version %d, blocks=%d, us=%s, peer=%d\n",
642  PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
643  }
644  LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
645 }
646 
647 // Returns a bool indicating whether we requested this block.
648 // Also used if a block was /not/ received and timed out or started with another
649 // peer.
650 static bool MarkBlockAsReceived(const BlockHash &hash)
652  std::map<BlockHash,
653  std::pair<NodeId, std::list<QueuedBlock>::iterator>>::iterator
654  itInFlight = mapBlocksInFlight.find(hash);
655  if (itInFlight != mapBlocksInFlight.end()) {
656  CNodeState *state = State(itInFlight->second.first);
657  assert(state != nullptr);
658  state->nBlocksInFlightValidHeaders -=
659  itInFlight->second.second->fValidatedHeaders;
660  if (state->nBlocksInFlightValidHeaders == 0 &&
661  itInFlight->second.second->fValidatedHeaders) {
662  // Last validated block on the queue was received.
663  nPeersWithValidatedDownloads--;
664  }
665  if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
666  // First block on the queue was received, update the start download
667  // time for the next one
668  state->nDownloadingSince =
669  std::max(state->nDownloadingSince, GetTimeMicros());
670  }
671  state->vBlocksInFlight.erase(itInFlight->second.second);
672  state->nBlocksInFlight--;
673  state->nStallingSince = 0;
674  mapBlocksInFlight.erase(itInFlight);
675  return true;
676  }
677 
678  return false;
679 }
680 
681 // returns false, still setting pit, if the block was already in flight from the
682 // same peer
683 // pit will only be valid as long as the same cs_main lock is being held.
684 static bool
685 MarkBlockAsInFlight(const Config &config, CTxMemPool &mempool, NodeId nodeid,
686  const BlockHash &hash,
687  const Consensus::Params &consensusParams,
688  const CBlockIndex *pindex = nullptr,
689  std::list<QueuedBlock>::iterator **pit = nullptr)
691  CNodeState *state = State(nodeid);
692  assert(state != nullptr);
693 
694  // Short-circuit most stuff in case it is from the same node.
695  std::map<BlockHash,
696  std::pair<NodeId, std::list<QueuedBlock>::iterator>>::iterator
697  itInFlight = mapBlocksInFlight.find(hash);
698  if (itInFlight != mapBlocksInFlight.end() &&
699  itInFlight->second.first == nodeid) {
700  if (pit) {
701  *pit = &itInFlight->second.second;
702  }
703  return false;
704  }
705 
706  // Make sure it's not listed somewhere already.
707  MarkBlockAsReceived(hash);
708 
709  std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(
710  state->vBlocksInFlight.end(),
711  {hash, pindex, pindex != nullptr,
712  std::unique_ptr<PartiallyDownloadedBlock>(
713  pit ? new PartiallyDownloadedBlock(config, &mempool) : nullptr)});
714  state->nBlocksInFlight++;
715  state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
716  if (state->nBlocksInFlight == 1) {
717  // We're starting a block download (batch) from this peer.
718  state->nDownloadingSince = GetTimeMicros();
719  }
720 
721  if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) {
722  nPeersWithValidatedDownloads++;
723  }
724 
725  itInFlight = mapBlocksInFlight
726  .insert(std::make_pair(hash, std::make_pair(nodeid, it)))
727  .first;
728 
729  if (pit) {
730  *pit = &itInFlight->second.second;
731  }
732 
733  return true;
734 }
735 
737 static void ProcessBlockAvailability(NodeId nodeid)
738  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
739  CNodeState *state = State(nodeid);
740  assert(state != nullptr);
741 
742  if (!state->hashLastUnknownBlock.IsNull()) {
743  const CBlockIndex *pindex =
744  LookupBlockIndex(state->hashLastUnknownBlock);
745  if (pindex && pindex->nChainWork > 0) {
746  if (state->pindexBestKnownBlock == nullptr ||
747  pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
748  state->pindexBestKnownBlock = pindex;
749  }
750  state->hashLastUnknownBlock.SetNull();
751  }
752  }
753 }
754 
756 static void UpdateBlockAvailability(NodeId nodeid, const BlockHash &hash)
757  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
758  CNodeState *state = State(nodeid);
759  assert(state != nullptr);
760 
761  ProcessBlockAvailability(nodeid);
762 
763  const CBlockIndex *pindex = LookupBlockIndex(hash);
764  if (pindex && pindex->nChainWork > 0) {
765  // An actually better block was announced.
766  if (state->pindexBestKnownBlock == nullptr ||
767  pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
768  state->pindexBestKnownBlock = pindex;
769  }
770  } else {
771  // An unknown block was announced; just assume that the latest one is
772  // the best one.
773  state->hashLastUnknownBlock = hash;
774  }
775 }
776 
783 static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid,
784  CConnman &connman)
785  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
786  AssertLockHeld(cs_main);
787  CNodeState *nodestate = State(nodeid);
788  if (!nodestate) {
789  LogPrint(BCLog::NET, "node state unavailable: peer=%d\n", nodeid);
790  return;
791  }
792  if (!nodestate->fProvidesHeaderAndIDs) {
793  return;
794  }
795  for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin();
796  it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
797  if (*it == nodeid) {
798  lNodesAnnouncingHeaderAndIDs.erase(it);
799  lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
800  return;
801  }
802  }
803  connman.ForNode(nodeid, [&connman](CNode *pfrom) {
804  AssertLockHeld(cs_main);
805  uint64_t nCMPCTBLOCKVersion = 1;
806  if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
807  // As per BIP152, we only get 3 of our peers to announce
808  // blocks using compact encodings.
809  connman.ForNode(
810  lNodesAnnouncingHeaderAndIDs.front(),
811  [&connman, nCMPCTBLOCKVersion](CNode *pnodeStop) {
812  AssertLockHeld(cs_main);
813  connman.PushMessage(
814  pnodeStop, CNetMsgMaker(pnodeStop->GetSendVersion())
815  .Make(NetMsgType::SENDCMPCT,
816  /*fAnnounceUsingCMPCTBLOCK=*/false,
817  nCMPCTBLOCKVersion));
818  return true;
819  });
820  lNodesAnnouncingHeaderAndIDs.pop_front();
821  }
822  connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion())
823  .Make(NetMsgType::SENDCMPCT,
824  /*fAnnounceUsingCMPCTBLOCK=*/true,
825  nCMPCTBLOCKVersion));
826  lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
827  return true;
828  });
829 }
830 
831 static bool TipMayBeStale(const Consensus::Params &consensusParams)
832  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
833  AssertLockHeld(cs_main);
834  if (g_last_tip_update == 0) {
835  g_last_tip_update = GetTime();
836  }
837  return g_last_tip_update <
838  GetTime() - consensusParams.nPowTargetSpacing * 3 &&
839  mapBlocksInFlight.empty();
840 }
841 
842 static bool CanDirectFetch(const Consensus::Params &consensusParams)
843  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
845  GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
846 }
847 
848 static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex)
849  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
850  if (state->pindexBestKnownBlock &&
851  pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) {
852  return true;
853  }
854  if (state->pindexBestHeaderSent &&
855  pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) {
856  return true;
857  }
858  return false;
859 }
860 
865 static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count,
866  std::vector<const CBlockIndex *> &vBlocks,
867  NodeId &nodeStaller,
868  const Consensus::Params &consensusParams)
869  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
870  if (count == 0) {
871  return;
872  }
873 
874  vBlocks.reserve(vBlocks.size() + count);
875  CNodeState *state = State(nodeid);
876  assert(state != nullptr);
877 
878  // Make sure pindexBestKnownBlock is up to date, we'll need it.
879  ProcessBlockAvailability(nodeid);
880 
881  if (state->pindexBestKnownBlock == nullptr ||
882  state->pindexBestKnownBlock->nChainWork <
883  ::ChainActive().Tip()->nChainWork ||
884  state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
885  // This peer has nothing interesting.
886  return;
887  }
888 
889  if (state->pindexLastCommonBlock == nullptr) {
890  // Bootstrap quickly by guessing a parent of our best tip is the forking
891  // point. Guessing wrong in either direction is not a problem.
892  state->pindexLastCommonBlock = ::ChainActive()[std::min(
893  state->pindexBestKnownBlock->nHeight, ::ChainActive().Height())];
894  }
895 
896  // If the peer reorganized, our previous pindexLastCommonBlock may not be an
897  // ancestor of its current tip anymore. Go back enough to fix that.
898  state->pindexLastCommonBlock = LastCommonAncestor(
899  state->pindexLastCommonBlock, state->pindexBestKnownBlock);
900  if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) {
901  return;
902  }
903 
904  std::vector<const CBlockIndex *> vToFetch;
905  const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
906  // Never fetch further than the best block we know the peer has, or more
907  // than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last linked block we have in
908  // common with this peer. The +1 is so we can detect stalling, namely if we
909  // would be able to download that next block if the window were 1 larger.
910  int nWindowEnd =
911  state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
912  int nMaxHeight =
913  std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
914  NodeId waitingfor = -1;
915  while (pindexWalk->nHeight < nMaxHeight) {
916  // Read up to 128 (or more, if more blocks than that are needed)
917  // successors of pindexWalk (towards pindexBestKnownBlock) into
918  // vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as
919  // expensive as iterating over ~100 CBlockIndex* entries anyway.
920  int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight,
921  std::max<int>(count - vBlocks.size(), 128));
922  vToFetch.resize(nToFetch);
923  pindexWalk = state->pindexBestKnownBlock->GetAncestor(
924  pindexWalk->nHeight + nToFetch);
925  vToFetch[nToFetch - 1] = pindexWalk;
926  for (unsigned int i = nToFetch - 1; i > 0; i--) {
927  vToFetch[i - 1] = vToFetch[i]->pprev;
928  }
929 
930  // Iterate over those blocks in vToFetch (in forward direction), adding
931  // the ones that are not yet downloaded and not in flight to vBlocks. In
932  // the meantime, update pindexLastCommonBlock as long as all ancestors
933  // are already downloaded, or if it's already part of our chain (and
934  // therefore don't need it even if pruned).
935  for (const CBlockIndex *pindex : vToFetch) {
936  if (!pindex->IsValid(BlockValidity::TREE)) {
937  // We consider the chain that this peer is on invalid.
938  return;
939  }
940  if (pindex->nStatus.hasData() || ::ChainActive().Contains(pindex)) {
941  if (pindex->HaveTxsDownloaded()) {
942  state->pindexLastCommonBlock = pindex;
943  }
944  } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
945  // The block is not already downloaded, and not yet in flight.
946  if (pindex->nHeight > nWindowEnd) {
947  // We reached the end of the window.
948  if (vBlocks.size() == 0 && waitingfor != nodeid) {
949  // We aren't able to fetch anything, but we would be if
950  // the download window was one larger.
951  nodeStaller = waitingfor;
952  }
953  return;
954  }
955  vBlocks.push_back(pindex);
956  if (vBlocks.size() == count) {
957  return;
958  }
959  } else if (waitingfor == -1) {
960  // This is the first already-in-flight block.
961  waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
962  }
963  }
964  }
965 }
966 
967 void EraseTxRequest(const TxId &txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
968  g_already_asked_for.erase(txid);
969 }
970 
971 std::chrono::microseconds GetTxRequestTime(const TxId &txid)
972  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
973  auto it = g_already_asked_for.find(txid);
974  if (it != g_already_asked_for.end()) {
975  return it->second;
976  }
977  return {};
978 }
979 
980 void UpdateTxRequestTime(const TxId &txid,
981  std::chrono::microseconds request_time)
982  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
983  auto it = g_already_asked_for.find(txid);
984  if (it == g_already_asked_for.end()) {
985  g_already_asked_for.insert(std::make_pair(txid, request_time));
986  } else {
987  g_already_asked_for.update(it, request_time);
988  }
989 }
990 
991 std::chrono::microseconds
992 CalculateTxGetDataTime(const TxId &txid, std::chrono::microseconds current_time,
993  bool use_inbound_delay)
994  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
995  std::chrono::microseconds process_time;
996  const auto last_request_time = GetTxRequestTime(txid);
997  // First time requesting this tx
998  if (last_request_time.count() == 0) {
999  process_time = current_time;
1000  } else {
1001  // Randomize the delay to avoid biasing some peers over others (such as
1002  // due to fixed ordering of peer processing in ThreadMessageHandler)
1003  process_time = last_request_time + GETDATA_TX_INTERVAL +
1005  }
1006 
1007  // We delay processing announcements from inbound peers
1008  if (use_inbound_delay) {
1009  process_time += INBOUND_PEER_TX_DELAY;
1010  }
1011 
1012  return process_time;
1013 }
1014 
1015 void RequestTx(CNodeState *state, const TxId &txid,
1016  std::chrono::microseconds current_time)
1017  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1018  CNodeState::TxDownloadState &peer_download_state = state->m_tx_download;
1019  if (peer_download_state.m_tx_announced.size() >=
1020  MAX_PEER_TX_ANNOUNCEMENTS ||
1021  peer_download_state.m_tx_process_time.size() >=
1022  MAX_PEER_TX_ANNOUNCEMENTS ||
1023  peer_download_state.m_tx_announced.count(txid)) {
1024  // Too many queued announcements from this peer, or we already have
1025  // this announcement
1026  return;
1027  }
1028  peer_download_state.m_tx_announced.insert(txid);
1029 
1030  // Calculate the time to try requesting this transaction. Use
1031  // fPreferredDownload as a proxy for outbound peers.
1032  const auto process_time =
1033  CalculateTxGetDataTime(txid, current_time, !state->fPreferredDownload);
1034 
1035  peer_download_state.m_tx_process_time.emplace(process_time, txid);
1036 }
1037 
1038 } // namespace
1039 
1040 // This function is used for testing the stale tip eviction logic, see
1041 // denialofservice_tests.cpp
1042 void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) {
1043  LOCK(cs_main);
1044  CNodeState *state = State(node);
1045  if (state) {
1046  state->m_last_block_announcement = time_in_seconds;
1047  }
1048 }
1049 
1050 void PeerLogicValidation::InitializeNode(const Config &config, CNode *pnode) {
1051  CAddress addr = pnode->addr;
1052  std::string addrName = pnode->GetAddrName();
1053  NodeId nodeid = pnode->GetId();
1054  {
1055  LOCK(cs_main);
1056  mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct,
1057  std::forward_as_tuple(nodeid),
1058  std::forward_as_tuple(addr,
1059  pnode->IsInboundConn(),
1060  pnode->IsManualConn()));
1061  }
1062  {
1063  PeerRef peer = std::make_shared<Peer>(nodeid);
1064  LOCK(g_peer_mutex);
1065  g_peer_map.emplace_hint(g_peer_map.end(), nodeid, std::move(peer));
1066  }
1067  if (!pnode->IsInboundConn()) {
1068  PushNodeVersion(config, *pnode, m_connman, GetTime());
1069  }
1070 }
1071 
1073  bool &fUpdateConnectionTime) {
1074  fUpdateConnectionTime = false;
1075  LOCK(cs_main);
1076  int misbehavior{0};
1077  {
1078  PeerRef peer = GetPeerRef(nodeid);
1079  assert(peer != nullptr);
1080  misbehavior = WITH_LOCK(peer->m_misbehavior_mutex,
1081  return peer->m_misbehavior_score);
1082  LOCK(g_peer_mutex);
1083  g_peer_map.erase(nodeid);
1084  }
1085  CNodeState *state = State(nodeid);
1086  assert(state != nullptr);
1087 
1088  if (state->fSyncStarted) {
1089  nSyncStarted--;
1090  }
1091 
1092  if (misbehavior == 0 && state->fCurrentlyConnected) {
1093  fUpdateConnectionTime = true;
1094  }
1095 
1096  for (const QueuedBlock &entry : state->vBlocksInFlight) {
1097  mapBlocksInFlight.erase(entry.hash);
1098  }
1099  EraseOrphansFor(nodeid);
1100  nPreferredDownload -= state->fPreferredDownload;
1101  nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
1102  assert(nPeersWithValidatedDownloads >= 0);
1103  g_outbound_peers_with_protect_from_disconnect -=
1104  state->m_chain_sync.m_protect;
1105  assert(g_outbound_peers_with_protect_from_disconnect >= 0);
1106 
1107  mapNodeState.erase(nodeid);
1108 
1109  if (mapNodeState.empty()) {
1110  // Do a consistency check after the last peer is removed.
1111  assert(mapBlocksInFlight.empty());
1112  assert(nPreferredDownload == 0);
1113  assert(nPeersWithValidatedDownloads == 0);
1114  assert(g_outbound_peers_with_protect_from_disconnect == 0);
1115  }
1116  LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
1117 }
1118 
1120  {
1121  LOCK(cs_main);
1122  CNodeState *state = State(nodeid);
1123  if (state == nullptr) {
1124  return false;
1125  }
1126  stats.nSyncHeight = state->pindexBestKnownBlock
1127  ? state->pindexBestKnownBlock->nHeight
1128  : -1;
1129  stats.nCommonHeight = state->pindexLastCommonBlock
1130  ? state->pindexLastCommonBlock->nHeight
1131  : -1;
1132  for (const QueuedBlock &queue : state->vBlocksInFlight) {
1133  if (queue.pindex) {
1134  stats.vHeightInFlight.push_back(queue.pindex->nHeight);
1135  }
1136  }
1137  }
1138 
1139  PeerRef peer = GetPeerRef(nodeid);
1140  if (peer == nullptr) {
1141  return false;
1142  }
1143  stats.m_misbehavior_score =
1144  WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score);
1145 
1146  return true;
1147 }
1148 
1150 //
1151 // mapOrphanTransactions
1152 //
1153 
1155  EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) {
1156  size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn",
1158  if (max_extra_txn <= 0) {
1159  return;
1160  }
1161 
1162  if (!vExtraTxnForCompact.size()) {
1163  vExtraTxnForCompact.resize(max_extra_txn);
1164  }
1165 
1166  vExtraTxnForCompact[vExtraTxnForCompactIt] =
1167  std::make_pair(tx->GetHash(), tx);
1168  vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
1169 }
1170 
1172  EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) {
1173  const TxId &txid = tx->GetId();
1174  if (mapOrphanTransactions.count(txid)) {
1175  return false;
1176  }
1177 
1178  // Ignore big transactions, to avoid a send-big-orphans memory exhaustion
1179  // attack. If a peer has a legitimate large transaction with a missing
1180  // parent then we assume it will rebroadcast it later, after the parent
1181  // transaction(s) have been mined or received.
1182  // 100 orphans, each of which is at most 100,000 bytes big is at most 10
1183  // megabytes of orphans and somewhat more byprev index (in the worst case):
1184  unsigned int sz = tx->GetTotalSize();
1185  if (sz > MAX_STANDARD_TX_SIZE) {
1187  "ignoring large orphan tx (size: %u, hash: %s)\n", sz,
1188  txid.ToString());
1189  return false;
1190  }
1191 
1192  auto ret = mapOrphanTransactions.emplace(
1193  txid, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME,
1194  g_orphan_list.size()});
1195  assert(ret.second);
1196  g_orphan_list.push_back(ret.first);
1197  for (const CTxIn &txin : tx->vin) {
1198  mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
1199  }
1200 
1202 
1203  LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n",
1204  txid.ToString(), mapOrphanTransactions.size(),
1205  mapOrphanTransactionsByPrev.size());
1206  return true;
1207 }
1208 
1209 static int EraseOrphanTx(const TxId id) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) {
1210  const auto it = mapOrphanTransactions.find(id);
1211  if (it == mapOrphanTransactions.end()) {
1212  return 0;
1213  }
1214  for (const CTxIn &txin : it->second.tx->vin) {
1215  const auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
1216  if (itPrev == mapOrphanTransactionsByPrev.end()) {
1217  continue;
1218  }
1219  itPrev->second.erase(it);
1220  if (itPrev->second.empty()) {
1221  mapOrphanTransactionsByPrev.erase(itPrev);
1222  }
1223  }
1224 
1225  size_t old_pos = it->second.list_pos;
1226  assert(g_orphan_list[old_pos] == it);
1227  if (old_pos + 1 != g_orphan_list.size()) {
1228  // Unless we're deleting the last entry in g_orphan_list, move the last
1229  // entry to the position we're deleting.
1230  auto it_last = g_orphan_list.back();
1231  g_orphan_list[old_pos] = it_last;
1232  it_last->second.list_pos = old_pos;
1233  }
1234  g_orphan_list.pop_back();
1235 
1236  mapOrphanTransactions.erase(it);
1237  return 1;
1238 }
1239 
1241  LOCK(g_cs_orphans);
1242  int nErased = 0;
1243  auto iter = mapOrphanTransactions.begin();
1244  while (iter != mapOrphanTransactions.end()) {
1245  // Increment to avoid iterator becoming invalid.
1246  const auto maybeErase = iter++;
1247  if (maybeErase->second.fromPeer == peer) {
1248  nErased += EraseOrphanTx(maybeErase->second.tx->GetId());
1249  }
1250  }
1251  if (nErased > 0) {
1252  LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased,
1253  peer);
1254  }
1255 }
1256 
1257 unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) {
1258  LOCK(g_cs_orphans);
1259 
1260  unsigned int nEvicted = 0;
1261  static int64_t nNextSweep;
1262  int64_t nNow = GetTime();
1263  if (nNextSweep <= nNow) {
1264  // Sweep out expired orphan pool entries:
1265  int nErased = 0;
1266  int64_t nMinExpTime =
1268  auto iter = mapOrphanTransactions.begin();
1269  while (iter != mapOrphanTransactions.end()) {
1270  const auto maybeErase = iter++;
1271  if (maybeErase->second.nTimeExpire <= nNow) {
1272  nErased += EraseOrphanTx(maybeErase->second.tx->GetId());
1273  } else {
1274  nMinExpTime =
1275  std::min(maybeErase->second.nTimeExpire, nMinExpTime);
1276  }
1277  }
1278  // Sweep again 5 minutes after the next entry that expires in order to
1279  // batch the linear scan.
1280  nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
1281  if (nErased > 0) {
1282  LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n",
1283  nErased);
1284  }
1285  }
1286  FastRandomContext rng;
1287  while (mapOrphanTransactions.size() > nMaxOrphans) {
1288  // Evict a random orphan:
1289  size_t randompos = rng.randrange(g_orphan_list.size());
1290  EraseOrphanTx(g_orphan_list[randompos]->first);
1291  ++nEvicted;
1292  }
1293  return nEvicted;
1294 }
1295 
1301 void Misbehaving(const NodeId pnode, const int howmuch,
1302  const std::string &message) {
1303  assert(howmuch > 0);
1304 
1305  PeerRef peer = GetPeerRef(pnode);
1306  if (peer == nullptr) {
1307  return;
1308  }
1309 
1310  LOCK(peer->m_misbehavior_mutex);
1311 
1312  peer->m_misbehavior_score += howmuch;
1313  const std::string message_prefixed =
1314  message.empty() ? "" : (": " + message);
1315  if (peer->m_misbehavior_score >= DISCOURAGEMENT_THRESHOLD &&
1316  peer->m_misbehavior_score - howmuch < DISCOURAGEMENT_THRESHOLD) {
1318  "Misbehaving: peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED%s\n",
1319  pnode, peer->m_misbehavior_score - howmuch,
1320  peer->m_misbehavior_score, message_prefixed);
1321  peer->m_should_discourage = true;
1322  } else {
1323  LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s\n", pnode,
1324  peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score,
1325  message_prefixed);
1326  }
1327 }
1328 
1329 // overloaded variant of above to operate on CNode*s
1330 static void Misbehaving(const CNode &node, int howmuch,
1331  const std::string &message) {
1332  Misbehaving(node.GetId(), howmuch, message);
1333 }
1334 
1341  return state.GetResult() == TxValidationResult::TX_CONSENSUS;
1342 }
1343 
1354 static bool MaybePunishNodeForBlock(NodeId nodeid,
1355  const BlockValidationState &state,
1356  bool via_compact_block,
1357  const std::string &message = "") {
1358  switch (state.GetResult()) {
1360  break;
1361  // The node is providing invalid data:
1364  if (!via_compact_block) {
1365  Misbehaving(nodeid, 100, message);
1366  return true;
1367  }
1368  break;
1370  LOCK(cs_main);
1371  CNodeState *node_state = State(nodeid);
1372  if (node_state == nullptr) {
1373  break;
1374  }
1375 
1376  // Ban outbound (but not inbound) peers if on an invalid chain.
1377  // Exempt HB compact block peers and manual connections.
1378  if (!via_compact_block && !node_state->m_is_inbound &&
1379  !node_state->m_is_manual_connection) {
1380  Misbehaving(nodeid, 100, message);
1381  return true;
1382  }
1383  break;
1384  }
1388  Misbehaving(nodeid, 100, message);
1389  return true;
1391  // TODO: Use the state object to report this is probably not the
1392  // best idea. This is effectively unreachable, unless there is a bug
1393  // somewhere.
1394  Misbehaving(nodeid, 20, message);
1395  return true;
1396  // Conflicting (but not necessarily invalid) data or different policy:
1398  // TODO: Handle this much more gracefully (10 DoS points is super
1399  // arbitrary)
1400  Misbehaving(nodeid, 10, message);
1401  return true;
1404  break;
1405  }
1406  if (message != "") {
1407  LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1408  }
1409  return false;
1410 }
1411 
1419 static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState &state,
1420  const std::string &message = "") {
1421  switch (state.GetResult()) {
1423  break;
1424  // The node is providing invalid data:
1426  Misbehaving(nodeid, 100, message);
1427  return true;
1428  // Conflicting (but not necessarily invalid) data or different policy:
1435  break;
1436  }
1437  if (message != "") {
1438  LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
1439  }
1440  return false;
1441 }
1442 
1444 //
1445 // blockchain -> download logic notification
1446 //
1447 
1448 // To prevent fingerprinting attacks, only send blocks/headers outside of the
1449 // active chain if they are no more than a month older (both in time, and in
1450 // best equivalent proof of work) than the best header chain we know about and
1451 // we fully-validated them at some point.
1452 static bool BlockRequestAllowed(const CBlockIndex *pindex,
1453  const Consensus::Params &consensusParams)
1454  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1455  AssertLockHeld(cs_main);
1456  if (::ChainActive().Contains(pindex)) {
1457  return true;
1458  }
1459  return pindex->IsValid(BlockValidity::SCRIPTS) &&
1460  (pindexBestHeader != nullptr) &&
1461  (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() <
1464  *pindexBestHeader, consensusParams) <
1466 }
1467 
1469  CScheduler &scheduler,
1470  ChainstateManager &chainman,
1471  CTxMemPool &pool)
1472  : m_connman(connman), m_banman(banman), m_chainman(chainman),
1473  m_mempool(pool), m_stale_tip_check_time(0) {
1474  // Initialize global variables that cannot be constructed at startup.
1475  recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
1476 
1477  // Blocks don't typically have more than 4000 transactions, so this should
1478  // be at least six blocks (~1 hr) worth of transactions that we can store.
1479  // If the number of transactions appearing in a block goes up, or if we are
1480  // seeing getdata requests more than an hour after initial announcement, we
1481  // can increase this number.
1482  // The false positive rate of 1/1M should come out to less than 1
1483  // transaction per day that would be inadvertently ignored (which is the
1484  // same probability that we have in the reject filter).
1485  g_recent_confirmed_transactions.reset(
1486  new CRollingBloomFilter(24000, 0.000001));
1487 
1488  const Consensus::Params &consensusParams = Params().GetConsensus();
1489  // Stale tip checking and peer eviction are on two different timers, but we
1490  // don't want them to get out of sync due to drift in the scheduler, so we
1491  // combine them in one function and schedule at the quicker (peer-eviction)
1492  // timer.
1493  static_assert(
1495  "peer eviction timer should be less than stale tip check timer");
1496  scheduler.scheduleEvery(
1497  [this, &consensusParams]() {
1498  this->CheckForStaleTipAndEvictPeers(consensusParams);
1499  return true;
1500  },
1501  std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
1502 }
1503 
1509  const std::shared_ptr<const CBlock> &pblock, const CBlockIndex *pindex) {
1510  {
1511  LOCK(g_cs_orphans);
1512 
1513  std::vector<TxId> vOrphanErase;
1514 
1515  for (const CTransactionRef &ptx : pblock->vtx) {
1516  const CTransaction &tx = *ptx;
1517 
1518  // Which orphan pool entries must we evict?
1519  for (const auto &txin : tx.vin) {
1520  auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
1521  if (itByPrev == mapOrphanTransactionsByPrev.end()) {
1522  continue;
1523  }
1524 
1525  for (auto mi = itByPrev->second.begin();
1526  mi != itByPrev->second.end(); ++mi) {
1527  const CTransaction &orphanTx = *(*mi)->second.tx;
1528  const TxId &orphanId = orphanTx.GetId();
1529  vOrphanErase.push_back(orphanId);
1530  }
1531  }
1532  }
1533 
1534  // Erase orphan transactions included or precluded by this block
1535  if (vOrphanErase.size()) {
1536  int nErased = 0;
1537  for (const auto &orphanId : vOrphanErase) {
1538  nErased += EraseOrphanTx(orphanId);
1539  }
1541  "Erased %d orphan tx included or conflicted by block\n",
1542  nErased);
1543  }
1544 
1545  g_last_tip_update = GetTime();
1546  }
1547  {
1548  LOCK(g_cs_recent_confirmed_transactions);
1549  for (const CTransactionRef &ptx : pblock->vtx) {
1550  g_recent_confirmed_transactions->insert(ptx->GetId());
1551  }
1552  }
1553 }
1554 
1556  const std::shared_ptr<const CBlock> &block, const CBlockIndex *pindex) {
1557  // To avoid relay problems with transactions that were previously
1558  // confirmed, clear our filter of recently confirmed transactions whenever
1559  // there's a reorg.
1560  // This means that in a 1-block reorg (where 1 block is disconnected and
1561  // then another block reconnected), our filter will drop to having only one
1562  // block's worth of transactions in it, but that should be fine, since
1563  // presumably the most common case of relaying a confirmed transaction
1564  // should be just after a new block containing it is found.
1565  LOCK(g_cs_recent_confirmed_transactions);
1566  g_recent_confirmed_transactions->reset();
1567 }
1568 
1569 // All of the following cache a recent block, and are protected by
1570 // cs_most_recent_block
1572 static std::shared_ptr<const CBlock>
1573  most_recent_block GUARDED_BY(cs_most_recent_block);
1574 static std::shared_ptr<const CBlockHeaderAndShortTxIDs>
1575  most_recent_compact_block GUARDED_BY(cs_most_recent_block);
1576 static uint256 most_recent_block_hash GUARDED_BY(cs_most_recent_block);
1577 
1583  const CBlockIndex *pindex, const std::shared_ptr<const CBlock> &pblock) {
1584  std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock =
1585  std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock);
1586  const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
1587 
1588  LOCK(cs_main);
1589 
1590  static int nHighestFastAnnounce = 0;
1591  if (pindex->nHeight <= nHighestFastAnnounce) {
1592  return;
1593  }
1594  nHighestFastAnnounce = pindex->nHeight;
1595 
1596  uint256 hashBlock(pblock->GetHash());
1597 
1598  {
1599  LOCK(cs_most_recent_block);
1600  most_recent_block_hash = hashBlock;
1601  most_recent_block = pblock;
1602  most_recent_compact_block = pcmpctblock;
1603  }
1604 
1605  m_connman.ForEachNode([this, &pcmpctblock, pindex, &msgMaker,
1606  &hashBlock](CNode *pnode) {
1607  AssertLockHeld(cs_main);
1608 
1609  // TODO: Avoid the repeated-serialization here
1610  if (pnode->nVersion < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect) {
1611  return;
1612  }
1613  ProcessBlockAvailability(pnode->GetId());
1614  CNodeState &state = *State(pnode->GetId());
1615  // If the peer has, or we announced to them the previous block already,
1616  // but we don't think they have this one, go ahead and announce it.
1617  if (state.fPreferHeaderAndIDs && !PeerHasHeader(&state, pindex) &&
1618  PeerHasHeader(&state, pindex->pprev)) {
1619  LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n",
1620  "PeerLogicValidation::NewPoWValidBlock",
1621  hashBlock.ToString(), pnode->GetId());
1623  pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
1624  state.pindexBestHeaderSent = pindex;
1625  }
1626  });
1627 }
1628 
1634  const CBlockIndex *pindexFork,
1635  bool fInitialDownload) {
1636  const int nNewHeight = pindexNew->nHeight;
1637  m_connman.SetBestHeight(nNewHeight);
1638 
1639  SetServiceFlagsIBDCache(!fInitialDownload);
1640  if (!fInitialDownload) {
1641  // Find the hashes of all blocks that weren't previously in the best
1642  // chain.
1643  std::vector<BlockHash> vHashes;
1644  const CBlockIndex *pindexToAnnounce = pindexNew;
1645  while (pindexToAnnounce != pindexFork) {
1646  vHashes.push_back(pindexToAnnounce->GetBlockHash());
1647  pindexToAnnounce = pindexToAnnounce->pprev;
1648  if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
1649  // Limit announcements in case of a huge reorganization. Rely on
1650  // the peer's synchronization mechanism in that case.
1651  break;
1652  }
1653  }
1654  // Relay inventory, but don't relay old inventory during initial block
1655  // download.
1656  m_connman.ForEachNode([nNewHeight, &vHashes](CNode *pnode) {
1657  if (nNewHeight > (pnode->nStartingHeight != -1
1658  ? pnode->nStartingHeight - 2000
1659  : 0)) {
1660  for (const BlockHash &hash : reverse_iterate(vHashes)) {
1661  pnode->PushBlockHash(hash);
1662  }
1663  }
1664  });
1666  }
1667 }
1668 
1674  const BlockValidationState &state) {
1675  LOCK(cs_main);
1676 
1677  const BlockHash hash = block.GetHash();
1678  std::map<BlockHash, std::pair<NodeId, bool>>::iterator it =
1679  mapBlockSource.find(hash);
1680 
1681  // If the block failed validation, we know where it came from and we're
1682  // still connected to that peer, maybe punish.
1683  if (state.IsInvalid() && it != mapBlockSource.end() &&
1684  State(it->second.first)) {
1685  MaybePunishNodeForBlock(/*nodeid=*/it->second.first, state,
1686  /*via_compact_block=*/!it->second.second);
1687  }
1688  // Check that:
1689  // 1. The block is valid
1690  // 2. We're not in initial block download
1691  // 3. This is currently the best block we're aware of. We haven't updated
1692  // the tip yet so we have no way to check this directly here. Instead we
1693  // just check that there are currently no other blocks in flight.
1694  else if (state.IsValid() &&
1696  mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
1697  if (it != mapBlockSource.end()) {
1698  MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, m_connman);
1699  }
1700  }
1701 
1702  if (it != mapBlockSource.end()) {
1703  mapBlockSource.erase(it);
1704  }
1705 }
1706 
1708 //
1709 // Messages
1710 //
1711 
1712 static bool AlreadyHave(const CInv &inv, const CTxMemPool &mempool)
1713  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1714  switch (inv.type) {
1715  case MSG_TX: {
1716  assert(recentRejects);
1717  if (::ChainActive().Tip()->GetBlockHash() !=
1718  hashRecentRejectsChainTip) {
1719  // If the chain tip has changed previously rejected transactions
1720  // might be now valid, e.g. due to a nLockTime'd tx becoming
1721  // valid, or a double-spend. Reset the rejects filter and give
1722  // those txs a second chance.
1723  hashRecentRejectsChainTip =
1725  recentRejects->reset();
1726  }
1727 
1728  const TxId txid(inv.hash);
1729  {
1730  LOCK(g_cs_orphans);
1731  if (mapOrphanTransactions.count(txid)) {
1732  return true;
1733  }
1734  }
1735 
1736  {
1737  LOCK(g_cs_recent_confirmed_transactions);
1738  if (g_recent_confirmed_transactions->contains(txid)) {
1739  return true;
1740  }
1741  }
1742 
1743  return recentRejects->contains(txid) || mempool.exists(txid);
1744  }
1745  case MSG_BLOCK:
1746  return LookupBlockIndex(BlockHash(inv.hash)) != nullptr;
1747  }
1748  // Don't know what it is, just say we already got one
1749  return true;
1750 }
1751 
1752 void RelayTransaction(const TxId &txid, const CConnman &connman) {
1753  CInv inv(MSG_TX, txid);
1754  connman.ForEachNode([&inv](CNode *pnode) { pnode->PushInventory(inv); });
1755 }
1756 
1757 static void RelayAddress(const CAddress &addr, bool fReachable,
1758  const CConnman &connman) {
1759  // Limited relaying of addresses outside our network(s)
1760  unsigned int nRelayNodes = fReachable ? 2 : 1;
1761 
1762  // Relay to a limited number of other nodes.
1763  // Use deterministic randomness to send to the same nodes for 24 hours at a
1764  // time so the m_addr_knowns of the chosen nodes prevent repeats
1765  uint64_t hashAddr = addr.GetHash();
1766  const CSipHasher hasher =
1768  .Write(hashAddr << 32)
1769  .Write((GetTime() + hashAddr) / (24 * 60 * 60));
1770  FastRandomContext insecure_rand;
1771 
1772  std::array<std::pair<uint64_t, CNode *>, 2> best{
1773  {{0, nullptr}, {0, nullptr}}};
1774  assert(nRelayNodes <= best.size());
1775 
1776  auto sortfunc = [&best, &hasher, nRelayNodes](CNode *pnode) {
1777  if (pnode->IsAddrRelayPeer()) {
1778  uint64_t hashKey =
1779  CSipHasher(hasher).Write(pnode->GetId()).Finalize();
1780  for (unsigned int i = 0; i < nRelayNodes; i++) {
1781  if (hashKey > best[i].first) {
1782  std::copy(best.begin() + i, best.begin() + nRelayNodes - 1,
1783  best.begin() + i + 1);
1784  best[i] = std::make_pair(hashKey, pnode);
1785  break;
1786  }
1787  }
1788  }
1789  };
1790 
1791  auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] {
1792  for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
1793  best[i].second->PushAddress(addr, insecure_rand);
1794  }
1795  };
1796 
1797  connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
1798 }
1799 
1800 static void ProcessGetBlockData(const Config &config, CNode &pfrom,
1801  const CInv &inv, CConnman &connman,
1802  const std::atomic<bool> &interruptMsgProc) {
1803  const Consensus::Params &consensusParams =
1804  config.GetChainParams().GetConsensus();
1805 
1806  const BlockHash hash(inv.hash);
1807 
1808  bool send = false;
1809  std::shared_ptr<const CBlock> a_recent_block;
1810  std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
1811  {
1813  a_recent_block = most_recent_block;
1814  a_recent_compact_block = most_recent_compact_block;
1815  }
1816 
1817  bool need_activate_chain = false;
1818  {
1819  LOCK(cs_main);
1820  const CBlockIndex *pindex = LookupBlockIndex(hash);
1821  if (pindex) {
1822  if (pindex->HaveTxsDownloaded() &&
1823  !pindex->IsValid(BlockValidity::SCRIPTS) &&
1824  pindex->IsValid(BlockValidity::TREE)) {
1825  // If we have the block and all of its parents, but have not yet
1826  // validated it, we might be in the middle of connecting it (ie
1827  // in the unlock of cs_main before ActivateBestChain but after
1828  // AcceptBlock). In this case, we need to run ActivateBestChain
1829  // prior to checking the relay conditions below.
1830  need_activate_chain = true;
1831  }
1832  }
1833  } // release cs_main before calling ActivateBestChain
1834  if (need_activate_chain) {
1835  BlockValidationState state;
1836  if (!ActivateBestChain(config, state, a_recent_block)) {
1837  LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
1838  state.ToString());
1839  }
1840  }
1841 
1842  LOCK(cs_main);
1843  const CBlockIndex *pindex = LookupBlockIndex(hash);
1844  if (pindex) {
1845  send = BlockRequestAllowed(pindex, consensusParams);
1846  if (!send) {
1848  "%s: ignoring request from peer=%i for old "
1849  "block that isn't in the main chain\n",
1850  __func__, pfrom.GetId());
1851  }
1852  }
1853  const CNetMsgMaker msgMaker(pfrom.GetSendVersion());
1854  // Disconnect node in case we have reached the outbound limit for serving
1855  // historical blocks.
1856  // Never disconnect whitelisted nodes.
1857  if (send && connman.OutboundTargetReached(true) &&
1858  (((pindexBestHeader != nullptr) &&
1859  (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() >
1861  inv.type == MSG_FILTERED_BLOCK) &&
1862  !pfrom.HasPermission(PF_NOBAN)) {
1864  "historical block serving limit reached, disconnect peer=%d\n",
1865  pfrom.GetId());
1866 
1867  // disconnect node
1868  pfrom.fDisconnect = true;
1869  send = false;
1870  }
1871  // Avoid leaking prune-height by never sending blocks below the
1872  // NODE_NETWORK_LIMITED threshold.
1873  // Add two blocks buffer extension for possible races
1874  if (send && !pfrom.HasPermission(PF_NOBAN) &&
1875  ((((pfrom.GetLocalServices() & NODE_NETWORK_LIMITED) ==
1877  ((pfrom.GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) &&
1878  (::ChainActive().Tip()->nHeight - pindex->nHeight >
1879  (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2)))) {
1881  "Ignore block request below NODE_NETWORK_LIMITED "
1882  "threshold from peer=%d\n",
1883  pfrom.GetId());
1884 
1885  // disconnect node and prevent it from stalling (would otherwise wait
1886  // for the missing block)
1887  pfrom.fDisconnect = true;
1888  send = false;
1889  }
1890  // Pruned nodes may have deleted the block, so check whether it's available
1891  // before trying to send.
1892  if (send && pindex->nStatus.hasData()) {
1893  std::shared_ptr<const CBlock> pblock;
1894  if (a_recent_block &&
1895  a_recent_block->GetHash() == pindex->GetBlockHash()) {
1896  pblock = a_recent_block;
1897  } else {
1898  // Send block from disk
1899  std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
1900  if (!ReadBlockFromDisk(*pblockRead, pindex, consensusParams)) {
1901  assert(!"cannot load block from disk");
1902  }
1903  pblock = pblockRead;
1904  }
1905  if (inv.type == MSG_BLOCK) {
1906  connman.PushMessage(&pfrom,
1907  msgMaker.Make(NetMsgType::BLOCK, *pblock));
1908  } else if (inv.type == MSG_FILTERED_BLOCK) {
1909  bool sendMerkleBlock = false;
1910  CMerkleBlock merkleBlock;
1911  if (pfrom.m_tx_relay != nullptr) {
1912  LOCK(pfrom.m_tx_relay->cs_filter);
1913  if (pfrom.m_tx_relay->pfilter) {
1914  sendMerkleBlock = true;
1915  merkleBlock =
1916  CMerkleBlock(*pblock, *pfrom.m_tx_relay->pfilter);
1917  }
1918  }
1919  if (sendMerkleBlock) {
1920  connman.PushMessage(
1921  &pfrom,
1922  msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
1923  // CMerkleBlock just contains hashes, so also push any
1924  // transactions in the block the client did not see. This avoids
1925  // hurting performance by pointlessly requiring a round-trip.
1926  // Note that there is currently no way for a node to request any
1927  // single transactions we didn't send here - they must either
1928  // disconnect and retry or request the full block. Thus, the
1929  // protocol spec specified allows for us to provide duplicate
1930  // txn here, however we MUST always provide at least what the
1931  // remote peer needs.
1932  typedef std::pair<size_t, uint256> PairType;
1933  for (PairType &pair : merkleBlock.vMatchedTxn) {
1934  connman.PushMessage(
1935  &pfrom, msgMaker.Make(NetMsgType::TX,
1936  *pblock->vtx[pair.first]));
1937  }
1938  }
1939  // else
1940  // no response
1941  } else if (inv.type == MSG_CMPCT_BLOCK) {
1942  // If a peer is asking for old blocks, we're almost guaranteed they
1943  // won't have a useful mempool to match against a compact block, and
1944  // we don't feel like constructing the object for them, so instead
1945  // we respond with the full, non-compact block.
1946  int nSendFlags = 0;
1947  if (CanDirectFetch(consensusParams) &&
1948  pindex->nHeight >=
1950  CBlockHeaderAndShortTxIDs cmpctblock(*pblock);
1951  connman.PushMessage(
1952  &pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK,
1953  cmpctblock));
1954  } else {
1955  connman.PushMessage(
1956  &pfrom,
1957  msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
1958  }
1959  }
1960 
1961  // Trigger the peer node to send a getblocks request for the next batch
1962  // of inventory.
1963  if (hash == pfrom.hashContinue) {
1964  // Bypass PushInventory, this must send even if redundant, and we
1965  // want it right after the last block so they don't wait for other
1966  // stuff first.
1967  std::vector<CInv> vInv;
1968  vInv.push_back(
1969  CInv(MSG_BLOCK, ::ChainActive().Tip()->GetBlockHash()));
1970  connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::INV, vInv));
1971  pfrom.hashContinue = BlockHash();
1972  }
1973  }
1974 }
1975 
1976 static void ProcessGetData(const Config &config, CNode &pfrom,
1977  CConnman &connman, const CTxMemPool &mempool,
1978  const std::atomic<bool> &interruptMsgProc)
1979  LOCKS_EXCLUDED(cs_main) {
1980  AssertLockNotHeld(cs_main);
1981 
1982  std::deque<CInv>::iterator it = pfrom.vRecvGetData.begin();
1983  std::vector<CInv> vNotFound;
1984  const CNetMsgMaker msgMaker(pfrom.GetSendVersion());
1985 
1986  // Note that if we receive a getdata for a MSG_TX from a block-relay-only
1987  // outbound peer, we will stop processing further getdata messages from this
1988  // peer (likely resulting in our peer eventually disconnecting us).
1989  if (pfrom.m_tx_relay != nullptr) {
1990  // mempool entries added before this time have likely expired from
1991  // mapRelay
1992  const std::chrono::seconds longlived_mempool_time =
1993  GetTime<std::chrono::seconds>() - RELAY_TX_CACHE_TIME;
1994  const std::chrono::seconds mempool_req =
1995  pfrom.m_tx_relay->m_last_mempool_req.load();
1996 
1997  LOCK(cs_main);
1998 
1999  while (it != pfrom.vRecvGetData.end() && it->type == MSG_TX) {
2000  if (interruptMsgProc) {
2001  return;
2002  }
2003  // Don't bother if send buffer is too full to respond anyway.
2004  if (pfrom.fPauseSend) {
2005  break;
2006  }
2007 
2008  const CInv &inv = *it;
2009  it++;
2010 
2011  // Send stream from relay memory
2012  bool push = false;
2013  auto mi = mapRelay.find(inv.hash);
2014  int nSendFlags = 0;
2015  if (mi != mapRelay.end()) {
2016  connman.PushMessage(
2017  &pfrom,
2018  msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second));
2019  push = true;
2020  } else {
2021  auto txinfo = mempool.info(TxId(inv.hash));
2022  // To protect privacy, do not answer getdata using the mempool
2023  // when that TX couldn't have been INVed in reply to a MEMPOOL
2024  // request, or when it's too recent to have expired from
2025  // mapRelay.
2026  if (txinfo.tx &&
2027  ((mempool_req.count() && txinfo.m_time <= mempool_req) ||
2028  (txinfo.m_time <= longlived_mempool_time))) {
2029  connman.PushMessage(
2030  &pfrom,
2031  msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx));
2032  push = true;
2033  }
2034  }
2035  if (!push) {
2036  vNotFound.push_back(inv);
2037  }
2038  }
2039  } // release cs_main
2040 
2041  if (it != pfrom.vRecvGetData.end() && !pfrom.fPauseSend) {
2042  const CInv &inv = *it;
2043  if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK ||
2044  inv.type == MSG_CMPCT_BLOCK) {
2045  it++;
2046  ProcessGetBlockData(config, pfrom, inv, connman, interruptMsgProc);
2047  }
2048  }
2049 
2050  // Unknown types in the GetData stay in vRecvGetData and block any future
2051  // message from this peer, see vRecvGetData check in ProcessMessages().
2052  // Depending on future p2p changes, we might either drop unknown getdata on
2053  // the floor or disconnect the peer.
2054 
2055  pfrom.vRecvGetData.erase(pfrom.vRecvGetData.begin(), it);
2056 
2057  if (!vNotFound.empty()) {
2058  // Let the peer know that we didn't find what it asked for, so it
2059  // doesn't have to wait around forever. SPV clients care about this
2060  // message: it's needed when they are recursively walking the
2061  // dependencies of relevant unconfirmed transactions. SPV clients want
2062  // to do that because they want to know about (and store and rebroadcast
2063  // and risk analyze) the dependencies of transactions relevant to them,
2064  // without having to download the entire memory pool. Also, other nodes
2065  // can use these messages to automatically request a transaction from
2066  // some other peer that annnounced it, and stop waiting for us to
2067  // respond. In normal operation, we often send NOTFOUND messages for
2068  // parents of transactions that we relay; if a peer is missing a parent,
2069  // they may assume we have them and request the parents from us.
2070  connman.PushMessage(&pfrom,
2071  msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
2072  }
2073 }
2074 
2075 inline static void SendBlockTransactions(const CBlock &block,
2076  const BlockTransactionsRequest &req,
2077  CNode &pfrom, CConnman &connman) {
2078  BlockTransactions resp(req);
2079  for (size_t i = 0; i < req.indices.size(); i++) {
2080  if (req.indices[i] >= block.vtx.size()) {
2081  Misbehaving(pfrom, 100,
2082  "getblocktxn with out-of-bounds tx indices");
2083  return;
2084  }
2085  resp.txn[i] = block.vtx[req.indices[i]];
2086  }
2087  LOCK(cs_main);
2088  const CNetMsgMaker msgMaker(pfrom.GetSendVersion());
2089  int nSendFlags = 0;
2090  connman.PushMessage(&pfrom,
2091  msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
2092 }
2093 
2094 static void ProcessHeadersMessage(const Config &config, CNode &pfrom,
2095  CConnman &connman, CTxMemPool &mempool,
2096  ChainstateManager &chainman,
2097  const std::vector<CBlockHeader> &headers,
2098  bool via_compact_block) {
2099  const CChainParams &chainparams = config.GetChainParams();
2100  const CNetMsgMaker msgMaker(pfrom.GetSendVersion());
2101  size_t nCount = headers.size();
2102 
2103  if (nCount == 0) {
2104  // Nothing interesting. Stop asking this peers for more headers.
2105  return;
2106  }
2107 
2108  bool received_new_header = false;
2109  const CBlockIndex *pindexLast = nullptr;
2110  {
2111  LOCK(cs_main);
2112  CNodeState *nodestate = State(pfrom.GetId());
2113 
2114  // If this looks like it could be a block announcement (nCount <
2115  // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
2116  // don't connect:
2117  // - Send a getheaders message in response to try to connect the chain.
2118  // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
2119  // don't connect before giving DoS points
2120  // - Once a headers message is received that is valid and does connect,
2121  // nUnconnectingHeaders gets reset back to 0.
2122  if (!LookupBlockIndex(headers[0].hashPrevBlock) &&
2123  nCount < MAX_BLOCKS_TO_ANNOUNCE) {
2124  nodestate->nUnconnectingHeaders++;
2125  connman.PushMessage(
2126  &pfrom,
2127  msgMaker.Make(NetMsgType::GETHEADERS,
2128  ::ChainActive().GetLocator(pindexBestHeader),
2129  uint256()));
2130  LogPrint(
2131  BCLog::NET,
2132  "received header %s: missing prev block %s, sending getheaders "
2133  "(%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
2134  headers[0].GetHash().ToString(),
2135  headers[0].hashPrevBlock.ToString(), pindexBestHeader->nHeight,
2136  pfrom.GetId(), nodestate->nUnconnectingHeaders);
2137  // Set hashLastUnknownBlock for this peer, so that if we eventually
2138  // get the headers - even from a different peer - we can use this
2139  // peer to download.
2140  UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash());
2141 
2142  if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS ==
2143  0) {
2144  // The peer is sending us many headers we can't connect.
2145  Misbehaving(pfrom, 20,
2146  strprintf("%d non-connecting headers",
2147  nodestate->nUnconnectingHeaders));
2148  }
2149  return;
2150  }
2151 
2152  BlockHash hashLastBlock;
2153  for (const CBlockHeader &header : headers) {
2154  if (!hashLastBlock.IsNull() &&
2155  header.hashPrevBlock != hashLastBlock) {
2156  Misbehaving(pfrom, 20, "non-continuous headers sequence");
2157  return;
2158  }
2159  hashLastBlock = header.GetHash();
2160  }
2161 
2162  // If we don't have the last header, then they'll have given us
2163  // something new (if these headers are valid).
2164  if (!LookupBlockIndex(hashLastBlock)) {
2165  received_new_header = true;
2166  }
2167  }
2168 
2169  BlockValidationState state;
2170  if (!chainman.ProcessNewBlockHeaders(config, headers, state, &pindexLast)) {
2171  if (state.IsInvalid()) {
2172  MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block,
2173  "invalid header received");
2174  return;
2175  }
2176  }
2177 
2178  {
2179  LOCK(cs_main);
2180  CNodeState *nodestate = State(pfrom.GetId());
2181  if (nodestate->nUnconnectingHeaders > 0) {
2183  "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n",
2184  pfrom.GetId(), nodestate->nUnconnectingHeaders);
2185  }
2186  nodestate->nUnconnectingHeaders = 0;
2187 
2188  assert(pindexLast);
2189  UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash());
2190 
2191  // From here, pindexBestKnownBlock should be guaranteed to be non-null,
2192  // because it is set in UpdateBlockAvailability. Some nullptr checks are
2193  // still present, however, as belt-and-suspenders.
2194 
2195  if (received_new_header &&
2196  pindexLast->nChainWork > ::ChainActive().Tip()->nChainWork) {
2197  nodestate->m_last_block_announcement = GetTime();
2198  }
2199 
2200  if (nCount == MAX_HEADERS_RESULTS) {
2201  // Headers message had its maximum size; the peer may have more
2202  // headers.
2203  // TODO: optimize: if pindexLast is an ancestor of
2204  // ::ChainActive().Tip or pindexBestHeader, continue from there
2205  // instead.
2206  LogPrint(
2207  BCLog::NET,
2208  "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
2209  pindexLast->nHeight, pfrom.GetId(), pfrom.nStartingHeight);
2210  connman.PushMessage(
2211  &pfrom, msgMaker.Make(NetMsgType::GETHEADERS,
2212  ::ChainActive().GetLocator(pindexLast),
2213  uint256()));
2214  }
2215 
2216  bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
2217  // If this set of headers is valid and ends in a block with at least as
2218  // much work as our tip, download as much as possible.
2219  if (fCanDirectFetch && pindexLast->IsValid(BlockValidity::TREE) &&
2220  ::ChainActive().Tip()->nChainWork <= pindexLast->nChainWork) {
2221  std::vector<const CBlockIndex *> vToFetch;
2222  const CBlockIndex *pindexWalk = pindexLast;
2223  // Calculate all the blocks we'd need to switch to pindexLast, up to
2224  // a limit.
2225  while (pindexWalk && !::ChainActive().Contains(pindexWalk) &&
2226  vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2227  if (!pindexWalk->nStatus.hasData() &&
2228  !mapBlocksInFlight.count(pindexWalk->GetBlockHash())) {
2229  // We don't have this block, and it's not yet in flight.
2230  vToFetch.push_back(pindexWalk);
2231  }
2232  pindexWalk = pindexWalk->pprev;
2233  }
2234  // If pindexWalk still isn't on our main chain, we're looking at a
2235  // very large reorg at a time we think we're close to caught up to
2236  // the main chain -- this shouldn't really happen. Bail out on the
2237  // direct fetch and rely on parallel download instead.
2238  if (!::ChainActive().Contains(pindexWalk)) {
2239  LogPrint(
2240  BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
2241  pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
2242  } else {
2243  std::vector<CInv> vGetData;
2244  // Download as much as possible, from earliest to latest.
2245  for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
2246  if (nodestate->nBlocksInFlight >=
2247  MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
2248  // Can't download any more from this peer
2249  break;
2250  }
2251  vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
2252  MarkBlockAsInFlight(config, mempool, pfrom.GetId(),
2253  pindex->GetBlockHash(),
2254  chainparams.GetConsensus(), pindex);
2255  LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
2256  pindex->GetBlockHash().ToString(), pfrom.GetId());
2257  }
2258  if (vGetData.size() > 1) {
2260  "Downloading blocks toward %s (%d) via headers "
2261  "direct fetch\n",
2262  pindexLast->GetBlockHash().ToString(),
2263  pindexLast->nHeight);
2264  }
2265  if (vGetData.size() > 0) {
2266  if (nodestate->fSupportsDesiredCmpctVersion &&
2267  vGetData.size() == 1 && mapBlocksInFlight.size() == 1 &&
2268  pindexLast->pprev->IsValid(BlockValidity::CHAIN)) {
2269  // In any case, we want to download using a compact
2270  // block, not a regular one.
2271  vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
2272  }
2273  connman.PushMessage(
2274  &pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
2275  }
2276  }
2277  }
2278  // If we're in IBD, we want outbound peers that will serve us a useful
2279  // chain. Disconnect peers that are on chains with insufficient work.
2281  nCount != MAX_HEADERS_RESULTS) {
2282  // When nCount < MAX_HEADERS_RESULTS, we know we have no more
2283  // headers to fetch from this peer.
2284  if (nodestate->pindexBestKnownBlock &&
2285  nodestate->pindexBestKnownBlock->nChainWork <
2287  // This peer has too little work on their headers chain to help
2288  // us sync -- disconnect if using an outbound slot (unless
2289  // whitelisted or addnode).
2290  // Note: We compare their tip to nMinimumChainWork (rather than
2291  // ::ChainActive().Tip()) because we won't start block download
2292  // until we have a headers chain that has at least
2293  // nMinimumChainWork, even if a peer has a chain past our tip,
2294  // as an anti-DoS measure.
2295  if (pfrom.IsOutboundOrBlockRelayConn()) {
2296  LogPrintf("Disconnecting outbound peer %d -- headers "
2297  "chain has insufficient work\n",
2298  pfrom.GetId());
2299  pfrom.fDisconnect = true;
2300  }
2301  }
2302  }
2303 
2304  if (!pfrom.fDisconnect && pfrom.IsOutboundOrBlockRelayConn() &&
2305  nodestate->pindexBestKnownBlock != nullptr &&
2306  pfrom.m_tx_relay != nullptr) {
2307  // If this is an outbound full-relay peer, check to see if we should
2308  // protect it from the bad/lagging chain logic. Note that
2309  // block-relay-only peers are already implicitly protected, so we
2310  // only consider setting m_protect for the full-relay peers.
2311  if (g_outbound_peers_with_protect_from_disconnect <
2313  nodestate->pindexBestKnownBlock->nChainWork >=
2314  ::ChainActive().Tip()->nChainWork &&
2315  !nodestate->m_chain_sync.m_protect) {
2317  "Protecting outbound peer=%d from eviction\n",
2318  pfrom.GetId());
2319  nodestate->m_chain_sync.m_protect = true;
2320  ++g_outbound_peers_with_protect_from_disconnect;
2321  }
2322  }
2323  }
2324 }
2325 
2326 void static ProcessOrphanTx(const Config &config, CConnman &connman,
2327  CTxMemPool &mempool,
2328  std::set<TxId> &orphan_work_set)
2329  EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans) {
2330  AssertLockHeld(cs_main);
2331  AssertLockHeld(g_cs_orphans);
2332  std::unordered_map<NodeId, uint32_t> rejectCountPerNode;
2333  bool done = false;
2334  while (!done && !orphan_work_set.empty()) {
2335  const TxId orphanTxId = *orphan_work_set.begin();
2336  orphan_work_set.erase(orphan_work_set.begin());
2337 
2338  auto orphan_it = mapOrphanTransactions.find(orphanTxId);
2339  if (orphan_it == mapOrphanTransactions.end()) {
2340  continue;
2341  }
2342 
2343  const CTransactionRef porphanTx = orphan_it->second.tx;
2344  const CTransaction &orphanTx = *porphanTx;
2345  NodeId fromPeer = orphan_it->second.fromPeer;
2346  // Use a new TxValidationState because orphans come from different peers
2347  // (and we call MaybePunishNodeForTx based on the source peer from the
2348  // orphan map, not based on the peer that relayed the previous
2349  // transaction).
2350  TxValidationState orphan_state;
2351 
2352  auto it = rejectCountPerNode.find(fromPeer);
2353  if (it != rejectCountPerNode.end() &&
2354  it->second > MAX_NON_STANDARD_ORPHAN_PER_NODE) {
2355  continue;
2356  }
2357 
2358  if (AcceptToMemoryPool(config, mempool, orphan_state, porphanTx,
2359  false /* bypass_limits */,
2360  Amount::zero() /* nAbsurdFee */)) {
2361  LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n",
2362  orphanTxId.ToString());
2363  RelayTransaction(orphanTxId, connman);
2364  for (size_t i = 0; i < orphanTx.vout.size(); i++) {
2365  auto it_by_prev =
2366  mapOrphanTransactionsByPrev.find(COutPoint(orphanTxId, i));
2367  if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
2368  for (const auto &elem : it_by_prev->second) {
2369  orphan_work_set.insert(elem->first);
2370  }
2371  }
2372  }
2373  EraseOrphanTx(orphanTxId);
2374  done = true;
2375  } else if (orphan_state.GetResult() !=
2377  if (orphan_state.IsInvalid()) {
2378  // Punish peer that gave us an invalid orphan tx
2379  MaybePunishNodeForTx(fromPeer, orphan_state);
2380  LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s\n",
2381  orphanTxId.ToString());
2382  }
2383  // Has inputs but not accepted to mempool
2384  // Probably non-standard or insufficient fee
2385  LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n",
2386  orphanTxId.ToString());
2387 
2388  assert(recentRejects);
2389  recentRejects->insert(orphanTxId);
2390 
2391  EraseOrphanTx(orphanTxId);
2392  done = true;
2393  }
2394  mempool.check(&::ChainstateActive().CoinsTip());
2395  }
2396 }
2397 
2418  CNode &peer, const CChainParams &chain_params, BlockFilterType filter_type,
2419  uint32_t start_height, const BlockHash &stop_hash, uint32_t max_height_diff,
2420  const CBlockIndex *&stop_index, BlockFilterIndex *&filter_index) {
2421  const bool supported_filter_type =
2422  (filter_type == BlockFilterType::BASIC &&
2424  if (!supported_filter_type) {
2426  "peer %d requested unsupported block filter type: %d\n",
2427  peer.GetId(), static_cast<uint8_t>(filter_type));
2428  peer.fDisconnect = true;
2429  return false;
2430  }
2431 
2432  {
2433  LOCK(cs_main);
2434  stop_index = LookupBlockIndex(stop_hash);
2435 
2436  // Check that the stop block exists and the peer would be allowed to
2437  // fetch it.
2438  if (!stop_index ||
2439  !BlockRequestAllowed(stop_index, chain_params.GetConsensus())) {
2440  LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
2441  peer.GetId(), stop_hash.ToString());
2442  peer.fDisconnect = true;
2443  return false;
2444  }
2445  }
2446 
2447  uint32_t stop_height = stop_index->nHeight;
2448  if (start_height > stop_height) {
2449  LogPrint(
2450  BCLog::NET,
2451  "peer %d sent invalid getcfilters/getcfheaders with " /* Continued
2452  */
2453  "start height %d and stop height %d\n",
2454  peer.GetId(), start_height, stop_height);
2455  peer.fDisconnect = true;
2456  return false;
2457  }
2458  if (stop_height - start_height >= max_height_diff) {
2460  "peer %d requested too many cfilters/cfheaders: %d / %d\n",
2461  peer.GetId(), stop_height - start_height + 1, max_height_diff);
2462  peer.fDisconnect = true;
2463  return false;
2464  }
2465 
2466  filter_index = GetBlockFilterIndex(filter_type);
2467  if (!filter_index) {
2468  LogPrint(BCLog::NET, "Filter index for supported type %s not found\n",
2469  BlockFilterTypeName(filter_type));
2470  return false;
2471  }
2472 
2473  return true;
2474 }
2475 
2486 static void ProcessGetCFilters(CNode &peer, CDataStream &vRecv,
2487  const CChainParams &chain_params,
2488  CConnman &connman) {
2489  uint8_t filter_type_ser;
2490  uint32_t start_height;
2491  BlockHash stop_hash;
2492 
2493  vRecv >> filter_type_ser >> start_height >> stop_hash;
2494 
2495  const BlockFilterType filter_type =
2496  static_cast<BlockFilterType>(filter_type_ser);
2497 
2498  const CBlockIndex *stop_index;
2499  BlockFilterIndex *filter_index;
2501  peer, chain_params, filter_type, start_height, stop_hash,
2502  MAX_GETCFILTERS_SIZE, stop_index, filter_index)) {
2503  return;
2504  }
2505 
2506  std::vector<BlockFilter> filters;
2507  if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
2509  "Failed to find block filter in index: filter_type=%s, "
2510  "start_height=%d, stop_hash=%s\n",
2511  BlockFilterTypeName(filter_type), start_height,
2512  stop_hash.ToString());
2513  return;
2514  }
2515 
2516  for (const auto &filter : filters) {
2518  .Make(NetMsgType::CFILTER, filter);
2519  connman.PushMessage(&peer, std::move(msg));
2520  }
2521 }
2522 
2533 static void ProcessGetCFHeaders(CNode &peer, CDataStream &vRecv,
2534  const CChainParams &chain_params,
2535  CConnman &connman) {
2536  uint8_t filter_type_ser;
2537  uint32_t start_height;
2538  BlockHash stop_hash;
2539 
2540  vRecv >> filter_type_ser >> start_height >> stop_hash;
2541 
2542  const BlockFilterType filter_type =
2543  static_cast<BlockFilterType>(filter_type_ser);
2544 
2545  const CBlockIndex *stop_index;
2546  BlockFilterIndex *filter_index;
2548  peer, chain_params, filter_type, start_height, stop_hash,
2549  MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) {
2550  return;
2551  }
2552 
2553  uint256 prev_header;
2554  if (start_height > 0) {
2555  const CBlockIndex *const prev_block =
2556  stop_index->GetAncestor(static_cast<int>(start_height - 1));
2557  if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
2559  "Failed to find block filter header in index: "
2560  "filter_type=%s, block_hash=%s\n",
2561  BlockFilterTypeName(filter_type),
2562  prev_block->GetBlockHash().ToString());
2563  return;
2564  }
2565  }
2566 
2567  std::vector<uint256> filter_hashes;
2568  if (!filter_index->LookupFilterHashRange(start_height, stop_index,
2569  filter_hashes)) {
2571  "Failed to find block filter hashes in index: filter_type=%s, "
2572  "start_height=%d, stop_hash=%s\n",
2573  BlockFilterTypeName(filter_type), start_height,
2574  stop_hash.ToString());
2575  return;
2576  }
2577 
2578  CSerializedNetMsg msg =
2580  .Make(NetMsgType::CFHEADERS, filter_type_ser,
2581  stop_index->GetBlockHash(), prev_header, filter_hashes);
2582  connman.PushMessage(&peer, std::move(msg));
2583 }
2584 
2595 static void ProcessGetCFCheckPt(CNode &peer, CDataStream &vRecv,
2596  const CChainParams &chain_params,
2597  CConnman &connman) {
2598  uint8_t filter_type_ser;
2599  BlockHash stop_hash;
2600 
2601  vRecv >> filter_type_ser >> stop_hash;
2602 
2603  const BlockFilterType filter_type =
2604  static_cast<BlockFilterType>(filter_type_ser);
2605 
2606  const CBlockIndex *stop_index;
2607  BlockFilterIndex *filter_index;
2609  peer, chain_params, filter_type, /*start_height=*/0, stop_hash,
2610  /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
2611  stop_index, filter_index)) {
2612  return;
2613  }
2614 
2615  std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
2616 
2617  // Populate headers.
2618  const CBlockIndex *block_index = stop_index;
2619  for (int i = headers.size() - 1; i >= 0; i--) {
2620  int height = (i + 1) * CFCHECKPT_INTERVAL;
2621  block_index = block_index->GetAncestor(height);
2622 
2623  if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
2625  "Failed to find block filter header in index: "
2626  "filter_type=%s, block_hash=%s\n",
2627  BlockFilterTypeName(filter_type),
2628  block_index->GetBlockHash().ToString());
2629  return;
2630  }
2631  }
2632 
2634  .Make(NetMsgType::CFCHECKPT, filter_type_ser,
2635  stop_index->GetBlockHash(), headers);
2636  connman.PushMessage(&peer, std::move(msg));
2637 }
2638 
2640  const Config &config, CNode &pfrom, const std::string &msg_type,
2641  CDataStream &vRecv, int64_t nTimeReceived,
2642  const std::atomic<bool> &interruptMsgProc) {
2643  const CChainParams &chainparams = config.GetChainParams();
2644  LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n",
2645  SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
2646  if (gArgs.IsArgSet("-dropmessagestest") &&
2647  GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0) {
2648  LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
2649  return;
2650  }
2651 
2652  if (!(pfrom.GetLocalServices() & NODE_BLOOM) &&
2653  (msg_type == NetMsgType::FILTERLOAD ||
2654  msg_type == NetMsgType::FILTERADD)) {
2655  if (pfrom.nVersion >= NO_BLOOM_VERSION) {
2656  Misbehaving(pfrom, 100, "no-bloom-version");
2657  } else {
2658  pfrom.fDisconnect = true;
2659  }
2660  return;
2661  }
2662 
2663  if (msg_type == NetMsgType::VERSION) {
2664  // Each connection can only send one version message
2665  if (pfrom.nVersion != 0) {
2666  Misbehaving(pfrom, 1, "redundant version message");
2667  return;
2668  }
2669 
2670  int64_t nTime;
2671  CAddress addrMe;
2672  CAddress addrFrom;
2673  uint64_t nNonce = 1;
2674  uint64_t nServiceInt;
2675  ServiceFlags nServices;
2676  int nVersion;
2677  int nSendVersion;
2678  std::string cleanSubVer;
2679  int nStartingHeight = -1;
2680  bool fRelay = true;
2681  uint64_t nExtraEntropy = 1;
2682 
2683  vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
2684  nSendVersion = std::min(nVersion, PROTOCOL_VERSION);
2685  nServices = ServiceFlags(nServiceInt);
2686  if (!pfrom.IsInboundConn()) {
2687  m_connman.SetServices(pfrom.addr, nServices);
2688  }
2689  if (pfrom.ExpectServicesFromConn() &&
2690  !HasAllDesirableServiceFlags(nServices)) {
2692  "peer=%d does not offer the expected services "
2693  "(%08x offered, %08x expected); disconnecting\n",
2694  pfrom.GetId(), nServices,
2695  GetDesirableServiceFlags(nServices));
2696  pfrom.fDisconnect = true;
2697  return;
2698  }
2699 
2700  if (nVersion < MIN_PEER_PROTO_VERSION) {
2701  // disconnect from peers older than this proto version
2703  "peer=%d using obsolete version %i; disconnecting\n",
2704  pfrom.GetId(), nVersion);
2705  pfrom.fDisconnect = true;
2706  return;
2707  }
2708 
2709  if (!vRecv.empty()) {
2710  vRecv >> addrFrom >> nNonce;
2711  }
2712  if (!vRecv.empty()) {
2713  std::string strSubVer;
2714  vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
2715  cleanSubVer = SanitizeString(strSubVer);
2716  }
2717  if (!vRecv.empty()) {
2718  vRecv >> nStartingHeight;
2719  }
2720  if (!vRecv.empty()) {
2721  vRecv >> fRelay;
2722  }
2723  if (!vRecv.empty()) {
2724  vRecv >> nExtraEntropy;
2725  }
2726  // Disconnect if we connected to ourself
2727  if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce)) {
2728  LogPrintf("connected to self at %s, disconnecting\n",
2729  pfrom.addr.ToString());
2730  pfrom.fDisconnect = true;
2731  return;
2732  }
2733 
2734  if (pfrom.IsInboundConn() && addrMe.IsRoutable()) {
2735  SeenLocal(addrMe);
2736  }
2737 
2738  // Be shy and don't send version until we hear
2739  if (pfrom.IsInboundConn()) {
2740  PushNodeVersion(config, pfrom, m_connman, GetAdjustedTime());
2741  }
2742 
2745 
2746  pfrom.nServices = nServices;
2747  pfrom.SetAddrLocal(addrMe);
2748  {
2749  LOCK(pfrom.cs_SubVer);
2750  pfrom.cleanSubVer = cleanSubVer;
2751  }
2752  pfrom.nStartingHeight = nStartingHeight;
2753 
2754  // set nodes not relaying blocks and tx and not serving (parts) of the
2755  // historical blockchain as "clients"
2756  pfrom.fClient = (!(nServices & NODE_NETWORK) &&
2757  !(nServices & NODE_NETWORK_LIMITED));
2758 
2759  // set nodes not capable of serving the complete blockchain history as
2760  // "limited nodes"
2761  pfrom.m_limited_node =
2762  (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED));
2763 
2764  if (pfrom.m_tx_relay != nullptr) {
2765  LOCK(pfrom.m_tx_relay->cs_filter);
2766  // set to true after we get the first filter* message
2767  pfrom.m_tx_relay->fRelayTxes = fRelay;
2768  }
2769 
2770  // Change version
2771  pfrom.SetSendVersion(nSendVersion);
2772  pfrom.nVersion = nVersion;
2773  pfrom.nRemoteHostNonce = nNonce;
2774  pfrom.nRemoteExtraEntropy = nExtraEntropy;
2775 
2776  // Potentially mark this peer as a preferred download peer.
2777  {
2778  LOCK(cs_main);
2779  UpdatePreferredDownload(pfrom, State(pfrom.GetId()));
2780  }
2781 
2782  if (!pfrom.IsInboundConn() && pfrom.IsAddrRelayPeer()) {
2783  // Advertise our address
2785  CAddress addr =
2786  GetLocalAddress(&pfrom.addr, pfrom.GetLocalServices());
2787  FastRandomContext insecure_rand;
2788  if (addr.IsRoutable()) {
2790  "ProcessMessages: advertising address %s\n",
2791  addr.ToString());
2792  pfrom.PushAddress(addr, insecure_rand);
2793  } else if (IsPeerAddrLocalGood(&pfrom)) {
2794  addr.SetIP(addrMe);
2796  "ProcessMessages: advertising address %s\n",
2797  addr.ToString());
2798  pfrom.PushAddress(addr, insecure_rand);
2799  }
2800  }
2801 
2802  // Get recent addresses
2804  &pfrom, CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR));
2805  pfrom.fGetAddr = true;
2807  }
2808 
2809  std::string remoteAddr;
2810  if (fLogIPs) {
2811  remoteAddr = ", peeraddr=" + pfrom.addr.ToString();
2812  }
2813 
2815  "receive version message: [%s] %s: version %d, blocks=%d, "
2816  "us=%s, peer=%d%s\n",
2817  pfrom.addr.ToString(), cleanSubVer, pfrom.nVersion,
2818  pfrom.nStartingHeight, addrMe.ToString(), pfrom.GetId(),
2819  remoteAddr);
2820 
2821  // Ignore time offsets that are improbable (before the Genesis block)
2822  // and may underflow the nTimeOffset calculation.
2823  int64_t currentTime = GetTime();
2824  if (nTime >= int64_t(chainparams.GenesisBlock().nTime)) {
2825  int64_t nTimeOffset = nTime - currentTime;
2826  pfrom.nTimeOffset = nTimeOffset;
2827  AddTimeData(pfrom.addr, nTimeOffset);
2828  } else {
2829  Misbehaving(pfrom, 20,
2830  "Ignoring invalid timestamp in version message");
2831  }
2832 
2833  // Feeler connections exist only to verify if address is online.
2834  if (pfrom.IsFeelerConn()) {
2835  pfrom.fDisconnect = true;
2836  }
2837  return;
2838  }
2839 
2840  if (pfrom.nVersion == 0) {
2841  // Must have a version message before anything else
2842  Misbehaving(pfrom, 10, "non-version message before version handshake");
2843  return;
2844  }
2845 
2846  // At this point, the outgoing message serialization version can't change.
2847  const CNetMsgMaker msgMaker(pfrom.GetSendVersion());
2848 
2849  if (msg_type == NetMsgType::VERACK) {
2850  pfrom.SetRecvVersion(std::min(pfrom.nVersion.load(), PROTOCOL_VERSION));
2851 
2852  if (!pfrom.IsInboundConn()) {
2853  // Mark this node as currently connected, so we update its timestamp
2854  // later.
2855  LOCK(cs_main);
2856  State(pfrom.GetId())->fCurrentlyConnected = true;
2857  LogPrintf(
2858  "New outbound peer connected: version: %d, blocks=%d, "
2859  "peer=%d%s (%s)\n",
2860  pfrom.nVersion.load(), pfrom.nStartingHeight, pfrom.GetId(),
2861  (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToString())
2862  : ""),
2863  pfrom.m_tx_relay == nullptr ? "block-relay" : "full-relay");
2864  }
2865 
2866  if (pfrom.nVersion >= SENDHEADERS_VERSION) {
2867  // Tell our peer we prefer to receive headers rather than inv's
2868  // We send this to non-NODE NETWORK peers as well, because even
2869  // non-NODE NETWORK peers can announce blocks (such as pruning
2870  // nodes)
2871  m_connman.PushMessage(&pfrom,
2872  msgMaker.Make(NetMsgType::SENDHEADERS));
2873  }
2874 
2875  if (pfrom.nVersion >= SHORT_IDS_BLOCKS_VERSION) {
2876  // Tell our peer we are willing to provide version 1 or 2
2877  // cmpctblocks. However, we do not request new block announcements
2878  // using cmpctblock messages. We send this to non-NODE NETWORK peers
2879  // as well, because they may wish to request compact blocks from us.
2880  bool fAnnounceUsingCMPCTBLOCK = false;
2881  uint64_t nCMPCTBLOCKVersion = 1;
2882  m_connman.PushMessage(&pfrom,
2883  msgMaker.Make(NetMsgType::SENDCMPCT,
2884  fAnnounceUsingCMPCTBLOCK,
2885  nCMPCTBLOCKVersion));
2886  }
2887 
2888  if ((pfrom.nServices & NODE_AVALANCHE) && g_avalanche &&
2889  gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED)) {
2890  if (g_avalanche->sendHello(&pfrom)) {
2891  LogPrint(BCLog::NET, "Send avahello to peer %d\n",
2892  pfrom.GetId());
2893  }
2894  }
2895 
2896  pfrom.fSuccessfullyConnected = true;
2897  return;
2898  }
2899 
2900  if (!pfrom.fSuccessfullyConnected) {
2901  // Must have a verack message before anything else
2902  Misbehaving(pfrom, 10, "non-verack message before version handshake");
2903  return;
2904  }
2905 
2906  if (msg_type == NetMsgType::ADDR) {
2907  std::vector<CAddress> vAddr;
2908  vRecv >> vAddr;
2909 
2910  if (!pfrom.IsAddrRelayPeer()) {
2911  return;
2912  }
2913  if (vAddr.size() > 1000) {
2914  Misbehaving(pfrom, 20,
2915  strprintf("oversized-addr: message addr size() = %u",
2916  vAddr.size()));
2917  return;
2918  }
2919 
2920  // Store the new addresses
2921  std::vector<CAddress> vAddrOk;
2922  int64_t nNow = GetAdjustedTime();
2923  int64_t nSince = nNow - 10 * 60;
2924  for (CAddress &addr : vAddr) {
2925  if (interruptMsgProc) {
2926  return;
2927  }
2928 
2929  // We only bother storing full nodes, though this may include things
2930  // which we would not make an outbound connection to, in part
2931  // because we may make feeler connections to them.
2932  if (!MayHaveUsefulAddressDB(addr.nServices) &&
2934  continue;
2935  }
2936 
2937  if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) {
2938  addr.nTime = nNow - 5 * 24 * 60 * 60;
2939  }
2940  pfrom.AddAddressKnown(addr);
2941  if (m_banman &&
2942  (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
2943  // Do not process banned/discouraged addresses beyond
2944  // remembering we received them
2945  continue;
2946  }
2947  bool fReachable = IsReachable(addr);
2948  if (addr.nTime > nSince && !pfrom.fGetAddr && vAddr.size() <= 10 &&
2949  addr.IsRoutable()) {
2950  // Relay to a limited number of other nodes
2951  RelayAddress(addr, fReachable, m_connman);
2952  }
2953  // Do not store addresses outside our network
2954  if (fReachable) {
2955  vAddrOk.push_back(addr);
2956  }
2957  }
2958 
2959  m_connman.AddNewAddresses(vAddrOk, pfrom.addr, 2 * 60 * 60);
2960  if (vAddr.size() < 1000) {
2961  pfrom.fGetAddr = false;
2962  }
2963  if (pfrom.IsAddrFetchConn()) {
2964  pfrom.fDisconnect = true;
2965  }
2966  return;
2967  }
2968 
2969  if (msg_type == NetMsgType::SENDHEADERS) {
2970  LOCK(cs_main);
2971  State(pfrom.GetId())->fPreferHeaders = true;
2972  return;
2973  }
2974 
2975  if (msg_type == NetMsgType::SENDCMPCT) {
2976  bool fAnnounceUsingCMPCTBLOCK = false;
2977  uint64_t nCMPCTBLOCKVersion = 0;
2978  vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
2979  if (nCMPCTBLOCKVersion == 1) {
2980  LOCK(cs_main);
2981  // fProvidesHeaderAndIDs is used to "lock in" version of compact
2982  // blocks we send.
2983  if (!State(pfrom.GetId())->fProvidesHeaderAndIDs) {
2984  State(pfrom.GetId())->fProvidesHeaderAndIDs = true;
2985  }
2986 
2987  State(pfrom.GetId())->fPreferHeaderAndIDs =
2988  fAnnounceUsingCMPCTBLOCK;
2989  if (!State(pfrom.GetId())->fSupportsDesiredCmpctVersion) {
2990  State(pfrom.GetId())->fSupportsDesiredCmpctVersion = true;
2991  }
2992  }
2993  return;
2994  }
2995 
2996  if (msg_type == NetMsgType::INV) {
2997  std::vector<CInv> vInv;
2998  vRecv >> vInv;
2999  if (vInv.size() > MAX_INV_SZ) {
3000  Misbehaving(pfrom, 20,
3001  strprintf("oversized-inv: message inv size() = %u",
3002  vInv.size()));
3003  return;
3004  }
3005 
3006  // We won't accept tx inv's if we're in blocks-only mode, or this is a
3007  // block-relay-only peer
3008  bool fBlocksOnly = !g_relay_txes || (pfrom.m_tx_relay == nullptr);
3009 
3010  // Allow whitelisted peers to send data other than blocks in blocks only
3011  // mode if whitelistrelay is true
3012  if (pfrom.HasPermission(PF_RELAY)) {
3013  fBlocksOnly = false;
3014  }
3015 
3016  LOCK(cs_main);
3017 
3018  const auto current_time = GetTime<std::chrono::microseconds>();
3019 
3020  for (CInv &inv : vInv) {
3021  if (interruptMsgProc) {
3022  return;
3023  }
3024 
3025  bool fAlreadyHave = AlreadyHave(inv, m_mempool);
3026  LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(),
3027  fAlreadyHave ? "have" : "new", pfrom.GetId());
3028 
3029  if (inv.type == MSG_BLOCK) {
3030  const BlockHash hash(inv.hash);
3031  UpdateBlockAvailability(pfrom.GetId(), hash);
3032  if (!fAlreadyHave && !fImporting && !fReindex &&
3033  !mapBlocksInFlight.count(hash)) {
3034  // We used to request the full block here, but since
3035  // headers-announcements are now the primary method of
3036  // announcement on the network, and since, in the case that
3037  // a node fell back to inv we probably have a reorg which we
3038  // should get the headers for first, we now only provide a
3039  // getheaders response here. When we receive the headers, we
3040  // will then ask for the blocks we need.
3042  &pfrom, msgMaker.Make(NetMsgType::GETHEADERS,
3043  ::ChainActive().GetLocator(
3045  hash));
3046  LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
3048  pfrom.GetId());
3049  }
3050  } else {
3051  const TxId txid(inv.hash);
3052  pfrom.AddKnownTx(txid);
3053  if (fBlocksOnly) {
3055  "transaction (%s) inv sent in violation of "
3056  "protocol, disconnecting peer=%d\n",
3057  txid.ToString(), pfrom.GetId());
3058  pfrom.fDisconnect = true;
3059  return;
3060  } else if (!fAlreadyHave && !fImporting && !fReindex &&
3062  RequestTx(State(pfrom.GetId()), txid, current_time);
3063  }
3064  }
3065  }
3066  return;
3067  }
3068 
3069  if (msg_type == NetMsgType::GETDATA) {
3070  std::vector<CInv> vInv;
3071  vRecv >> vInv;
3072  if (vInv.size() > MAX_INV_SZ) {
3073  Misbehaving(pfrom, 20,
3074  strprintf("too-many-inv: message getdata size() = %u",
3075  vInv.size()));
3076  return;
3077  }
3078 
3079  LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n",
3080  vInv.size(), pfrom.GetId());
3081 
3082  if (vInv.size() > 0) {
3083  LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n",
3084  vInv[0].ToString(), pfrom.GetId());
3085  }
3086 
3087  pfrom.vRecvGetData.insert(pfrom.vRecvGetData.end(), vInv.begin(),
3088  vInv.end());
3089  ProcessGetData(config, pfrom, m_connman, m_mempool, interruptMsgProc);
3090  return;
3091  }
3092 
3093  if (msg_type == NetMsgType::GETBLOCKS) {
3094  CBlockLocator locator;
3095  uint256 hashStop;
3096  vRecv >> locator >> hashStop;
3097 
3098  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
3100  "getblocks locator size %lld > %d, disconnect peer=%d\n",
3101  locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
3102  pfrom.fDisconnect = true;
3103  return;
3104  }
3105 
3106  // We might have announced the currently-being-connected tip using a
3107  // compact block, which resulted in the peer sending a getblocks
3108  // request, which we would otherwise respond to without the new block.
3109  // To avoid this situation we simply verify that we are on our best
3110  // known chain now. This is super overkill, but we handle it better
3111  // for getheaders requests, and there are no known nodes which support
3112  // compact blocks but still use getblocks to request blocks.
3113  {
3114  std::shared_ptr<const CBlock> a_recent_block;
3115  {
3117  a_recent_block = most_recent_block;
3118  }
3119  BlockValidationState state;
3120  if (!ActivateBestChain(config, state, a_recent_block)) {
3121  LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
3122  state.ToString());
3123  }
3124  }
3125 
3126  LOCK(cs_main);
3127 
3128  // Find the last block the caller has in the main chain
3129  const CBlockIndex *pindex =
3130  FindForkInGlobalIndex(::ChainActive(), locator);
3131 
3132  // Send the rest of the chain
3133  if (pindex) {
3134  pindex = ::ChainActive().Next(pindex);
3135  }
3136  int nLimit = 500;
3137  LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n",
3138  (pindex ? pindex->nHeight : -1),
3139  hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit,
3140  pfrom.GetId());
3141  for (; pindex; pindex = ::ChainActive().Next(pindex)) {
3142  if (pindex->GetBlockHash() == hashStop) {
3143  LogPrint(BCLog::NET, " getblocks stopping at %d %s\n",
3144  pindex->nHeight, pindex->GetBlockHash().ToString());
3145  break;
3146  }
3147  // If pruning, don't inv blocks unless we have on disk and are
3148  // likely to still have for some reasonable time window (1 hour)
3149  // that block relay might require.
3150  const int nPrunedBlocksLikelyToHave =
3152  3600 / chainparams.GetConsensus().nPowTargetSpacing;
3153  if (fPruneMode &&
3154  (!pindex->nStatus.hasData() ||
3155  pindex->nHeight <= ::ChainActive().Tip()->nHeight -
3156  nPrunedBlocksLikelyToHave)) {
3157  LogPrint(
3158  BCLog::NET,
3159  " getblocks stopping, pruned or too old block at %d %s\n",
3160  pindex->nHeight, pindex->GetBlockHash().ToString());
3161  break;
3162  }
3163  pfrom.PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
3164  if (--nLimit <= 0) {
3165  // When this block is requested, we'll send an inv that'll
3166  // trigger the peer to getblocks the next batch of inventory.
3167  LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n",
3168  pindex->nHeight, pindex->GetBlockHash().ToString());
3169  pfrom.hashContinue = pindex->GetBlockHash();
3170  break;
3171  }
3172  }
3173  return;
3174  }
3175 
3176  if (msg_type == NetMsgType::GETBLOCKTXN) {
3178  vRecv >> req;
3179 
3180  std::shared_ptr<const CBlock> recent_block;
3181  {
3183  if (most_recent_block_hash == req.blockhash) {
3184  recent_block = most_recent_block;
3185  }
3186  // Unlock cs_most_recent_block to avoid cs_main lock inversion
3187  }
3188  if (recent_block) {
3189  SendBlockTransactions(*recent_block, req, pfrom, m_connman);
3190  return;
3191  }
3192 
3193  LOCK(cs_main);
3194 
3195  const CBlockIndex *pindex = LookupBlockIndex(req.blockhash);
3196  if (!pindex || !pindex->nStatus.hasData()) {
3197  LogPrint(
3198  BCLog::NET,
3199  "Peer %d sent us a getblocktxn for a block we don't have\n",
3200  pfrom.GetId());
3201  return;
3202  }
3203 
3204  if (pindex->nHeight < ::ChainActive().Height() - MAX_BLOCKTXN_DEPTH) {
3205  // If an older block is requested (should never happen in practice,
3206  // but can happen in tests) send a block response instead of a
3207  // blocktxn response. Sending a full block response instead of a
3208  // small blocktxn response is preferable in the case where a peer
3209  // might maliciously send lots of getblocktxn requests to trigger
3210  // expensive disk reads, because it will require the peer to
3211  // actually receive all the data read from disk over the network.
3213  "Peer %d sent us a getblocktxn for a block > %i deep\n",
3214  pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
3215  CInv inv;
3216  inv.type = MSG_BLOCK;
3217  inv.hash = req.blockhash;
3218  pfrom.vRecvGetData.push_back(inv);
3219  // The message processing loop will go around again (without
3220  // pausing) and we'll respond then (without cs_main)
3221  return;
3222  }
3223 
3224  CBlock block;
3225  bool ret = ReadBlockFromDisk(block, pindex, chainparams.GetConsensus());
3226  assert(ret);
3227 
3228  SendBlockTransactions(block, req, pfrom, m_connman);
3229  return;
3230  }
3231 
3232  if (msg_type == NetMsgType::GETHEADERS) {
3233  CBlockLocator locator;
3234  BlockHash hashStop;
3235  vRecv >> locator >> hashStop;
3236 
3237  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
3239  "getheaders locator size %lld > %d, disconnect peer=%d\n",
3240  locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
3241  pfrom.fDisconnect = true;
3242  return;
3243  }
3244 
3245  LOCK(cs_main);
3246  if (::ChainstateActive().IsInitialBlockDownload() &&
3247  !pfrom.HasPermission(PF_NOBAN)) {
3249  "Ignoring getheaders from peer=%d because node is in "
3250  "initial block download\n",
3251  pfrom.GetId());
3252  return;
3253  }
3254 
3255  CNodeState *nodestate = State(pfrom.GetId());
3256  const CBlockIndex *pindex = nullptr;
3257  if (locator.IsNull()) {
3258  // If locator is null, return the hashStop block
3259  pindex = LookupBlockIndex(hashStop);
3260  if (!pindex) {
3261  return;
3262  }
3263 
3264  if (!BlockRequestAllowed(pindex, chainparams.GetConsensus())) {
3266  "%s: ignoring request from peer=%i for old block "
3267  "header that isn't in the main chain\n",
3268  __func__, pfrom.GetId());
3269  return;
3270  }
3271  } else {
3272  // Find the last block the caller has in the main chain
3273  pindex = FindForkInGlobalIndex(::ChainActive(), locator);
3274  if (pindex) {
3275  pindex = ::ChainActive().Next(pindex);
3276  }
3277  }
3278 
3279  // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx
3280  // count at the end
3281  std::vector<CBlock> vHeaders;
3282  int nLimit = MAX_HEADERS_RESULTS;
3283  LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n",
3284  (pindex ? pindex->nHeight : -1),
3285  hashStop.IsNull() ? "end" : hashStop.ToString(),
3286  pfrom.GetId());
3287  for (; pindex; pindex = ::ChainActive().Next(pindex)) {
3288  vHeaders.push_back(pindex->GetBlockHeader());
3289  if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) {
3290  break;
3291  }
3292  }
3293  // pindex can be nullptr either if we sent ::ChainActive().Tip() OR
3294  // if our peer has ::ChainActive().Tip() (and thus we are sending an
3295  // empty headers message). In both cases it's safe to update
3296  // pindexBestHeaderSent to be our tip.
3297  //
3298  // It is important that we simply reset the BestHeaderSent value here,
3299  // and not max(BestHeaderSent, newHeaderSent). We might have announced
3300  // the currently-being-connected tip using a compact block, which
3301  // resulted in the peer sending a headers request, which we respond to
3302  // without the new block. By resetting the BestHeaderSent, we ensure we
3303  // will re-announce the new block via headers (or compact blocks again)
3304  // in the SendMessages logic.
3305  nodestate->pindexBestHeaderSent =
3306  pindex ? pindex : ::ChainActive().Tip();
3307  m_connman.PushMessage(&pfrom,
3308  msgMaker.Make(NetMsgType::HEADERS, vHeaders));
3309  return;
3310  }
3311 
3312  if (msg_type == NetMsgType::TX) {
3313  // Stop processing the transaction early if
3314  // We are in blocks only mode and peer is either not whitelisted or
3315  // whitelistrelay is off or if this peer is supposed to be a
3316  // block-relay-only peer
3317  if ((!g_relay_txes && !pfrom.HasPermission(PF_RELAY)) ||
3318  (pfrom.m_tx_relay == nullptr)) {
3320  "transaction sent in violation of protocol peer=%d\n",
3321  pfrom.GetId());
3322  pfrom.fDisconnect = true;
3323  return;
3324  }
3325 
3326  CTransactionRef ptx;
3327  vRecv >> ptx;
3328  const CTransaction &tx = *ptx;
3329  const TxId &txid = tx.GetId();
3330  pfrom.AddKnownTx(txid);
3331 
3332  LOCK2(cs_main, g_cs_orphans);
3333 
3334  TxValidationState state;
3335 
3336  CNodeState *nodestate = State(pfrom.GetId());
3337  nodestate->m_tx_download.m_tx_announced.erase(txid);
3338  nodestate->m_tx_download.m_tx_in_flight.erase(txid);
3339  EraseTxRequest(txid);
3340 
3341  if (!AlreadyHave(CInv(MSG_TX, txid), m_mempool) &&
3342  AcceptToMemoryPool(config, m_mempool, state, ptx,
3343  false /* bypass_limits */,
3344  Amount::zero() /* nAbsurdFee */)) {
3345  m_mempool.check(&::ChainstateActive().CoinsTip());
3347  for (size_t i = 0; i < tx.vout.size(); i++) {
3348  auto it_by_prev =
3349  mapOrphanTransactionsByPrev.find(COutPoint(txid, i));
3350  if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
3351  for (const auto &elem : it_by_prev->second) {
3352  pfrom.orphan_work_set.insert(elem->first);
3353  }
3354  }
3355  }
3356 
3357  pfrom.nLastTXTime = GetTime();
3358 
3360  "AcceptToMemoryPool: peer=%d: accepted %s "
3361  "(poolsz %u txn, %u kB)\n",
3362  pfrom.GetId(), tx.GetId().ToString(), m_mempool.size(),
3363  m_mempool.DynamicMemoryUsage() / 1000);
3364 
3365  // Recursively process any orphan transactions that depended on this
3366  // one
3368  pfrom.orphan_work_set);
3369  } else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) {
3370  // It may be the case that the orphans parents have all been
3371  // rejected.
3372  bool fRejectedParents = false;
3373  for (const CTxIn &txin : tx.vin) {
3374  if (recentRejects->contains(txin.prevout.GetTxId())) {
3375  fRejectedParents = true;
3376  break;
3377  }
3378  }
3379  if (!fRejectedParents) {
3380  const auto current_time = GetTime<std::chrono::microseconds>();
3381 
3382  for (const CTxIn &txin : tx.vin) {
3383  // FIXME: MSG_TX should use a TxHash, not a TxId.
3384  const TxId _txid = txin.prevout.GetTxId();
3385  pfrom.AddKnownTx(_txid);
3386  if (!AlreadyHave(CInv(MSG_TX, _txid), m_mempool)) {
3387  RequestTx(State(pfrom.GetId()), _txid, current_time);
3388  }
3389  }
3390  AddOrphanTx(ptx, pfrom.GetId());
3391 
3392  // DoS prevention: do not allow mapOrphanTransactions to grow
3393  // unbounded (see CVE-2012-3789)
3394  unsigned int nMaxOrphanTx = (unsigned int)std::max(
3395  int64_t(0), gArgs.GetArg("-maxorphantx",
3397  unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
3398  if (nEvicted > 0) {
3400  "mapOrphan overflow, removed %u tx\n", nEvicted);
3401  }
3402  } else {
3404  "not keeping orphan with rejected parents %s\n",
3405  tx.GetId().ToString());
3406  // We will continue to reject this tx since it has rejected
3407  // parents so avoid re-requesting it from other peers.
3408  recentRejects->insert(tx.GetId());
3409  }
3410  } else {
3411  assert(recentRejects);
3412  recentRejects->insert(tx.GetId());
3413 
3414  if (RecursiveDynamicUsage(*ptx) < 100000) {
3416  }
3417 
3418  if (pfrom.HasPermission(PF_FORCERELAY)) {
3419  // Always relay transactions received from whitelisted peers,
3420  // even if they were already in the mempool or rejected from it
3421  // due to policy, allowing the node to function as a gateway for
3422  // nodes hidden behind it.
3423  //
3424  // Never relay transactions that might result in being
3425  // disconnected (or banned).
3426  if (state.IsInvalid() && TxRelayMayResultInDisconnect(state)) {
3427  LogPrintf("Not relaying invalid transaction %s from "
3428  "whitelisted peer=%d (%s)\n",
3429  tx.GetId().ToString(), pfrom.GetId(),
3430  state.ToString());
3431  } else {
3432  LogPrintf("Force relaying tx %s from whitelisted peer=%d\n",
3433  tx.GetId().ToString(), pfrom.GetId());
3435  }
3436  }
3437  }
3438 
3439  // If a tx has been detected by recentRejects, we will have reached
3440  // this point and the tx will have been ignored. Because we haven't run
3441  // the tx through AcceptToMemoryPool, we won't have computed a DoS
3442  // score for it or determined exactly why we consider it invalid.
3443  //
3444  // This means we won't penalize any peer subsequently relaying a DoSy
3445  // tx (even if we penalized the first peer who gave it to us) because
3446  // we have to account for recentRejects showing false positives. In
3447  // other words, we shouldn't penalize a peer if we aren't *sure* they
3448  // submitted a DoSy tx.
3449  //
3450  // Note that recentRejects doesn't just record DoSy or invalid
3451  // transactions, but any tx not accepted by the mempool, which may be
3452  // due to node policy (vs. consensus). So we can't blanket penalize a
3453  // peer simply for relaying a tx that our recentRejects has caught,
3454  // regardless of false positives.
3455 
3456  if (state.IsInvalid()) {
3458  "%s from peer=%d was not accepted: %s\n",
3459  tx.GetHash().ToString(), pfrom.GetId(), state.ToString());
3460  MaybePunishNodeForTx(pfrom.GetId(), state);
3461  }
3462  return;
3463  }
3464 
3465  if (msg_type == NetMsgType::CMPCTBLOCK) {
3466  // Ignore cmpctblock received while importing
3467  if (fImporting || fReindex) {
3469  "Unexpected cmpctblock message received from peer %d\n",
3470  pfrom.GetId());
3471  return;
3472  }
3473 
3474  CBlockHeaderAndShortTxIDs cmpctblock;
3475  vRecv >> cmpctblock;
3476 
3477  bool received_new_header = false;
3478 
3479  {
3480  LOCK(cs_main);
3481 
3482  if (!LookupBlockIndex(cmpctblock.header.hashPrevBlock)) {
3483  // Doesn't connect (or is genesis), instead of DoSing in
3484  // AcceptBlockHeader, request deeper headers
3485  if (!::ChainstateActive().IsInitialBlockDownload()) {
3487  &pfrom, msgMaker.Make(NetMsgType::GETHEADERS,
3488  ::ChainActive().GetLocator(
3490  uint256()));
3491  }
3492  return;
3493  }
3494 
3495  if (!LookupBlockIndex(cmpctblock.header.GetHash())) {
3496  received_new_header = true;
3497  }
3498  }
3499 
3500  const CBlockIndex *pindex = nullptr;
3501  BlockValidationState state;
3502  if (!m_chainman.ProcessNewBlockHeaders(config, {cmpctblock.header},
3503  state, &pindex)) {
3504  if (state.IsInvalid()) {
3505  MaybePunishNodeForBlock(pfrom.GetId(), state,
3506  /*via_compact_block*/ true,
3507  "invalid header via cmpctblock");
3508  return;
3509  }
3510  }
3511 
3512  // When we succeed in decoding a block's txids from a cmpctblock
3513  // message we typically jump to the BLOCKTXN handling code, with a
3514  // dummy (empty) BLOCKTXN message, to re-use the logic there in
3515  // completing processing of the putative block (without cs_main).
3516  bool fProcessBLOCKTXN = false;
3518 
3519  // If we end up treating this as a plain headers message, call that as
3520  // well
3521  // without cs_main.
3522  bool fRevertToHeaderProcessing = false;
3523 
3524  // Keep a CBlock for "optimistic" compactblock reconstructions (see
3525  // below)
3526  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3527  bool fBlockReconstructed = false;
3528 
3529  {
3530  LOCK2(cs_main, g_cs_orphans);
3531  // If AcceptBlockHeader returned true, it set pindex
3532  assert(pindex);
3533  UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
3534 
3535  CNodeState *nodestate = State(pfrom.GetId());
3536 
3537  // If this was a new header with more work than our tip, update the
3538  // peer's last block announcement time
3539  if (received_new_header &&
3540  pindex->nChainWork > ::ChainActive().Tip()->nChainWork) {
3541  nodestate->m_last_block_announcement = GetTime();
3542  }
3543 
3544  std::map<BlockHash,
3545  std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
3546  iterator blockInFlightIt =
3547  mapBlocksInFlight.find(pindex->GetBlockHash());
3548  bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
3549 
3550  if (pindex->nStatus.hasData()) {
3551  // Nothing to do here
3552  return;
3553  }
3554 
3555  if (pindex->nChainWork <=
3556  ::ChainActive()
3557  .Tip()
3558  ->nChainWork || // We know something better
3559  pindex->nTx != 0) {
3560  // We had this block at some point, but pruned it
3561  if (fAlreadyInFlight) {
3562  // We requested this block for some reason, but our mempool
3563  // will probably be useless so we just grab the block via
3564  // normal getdata.
3565  std::vector<CInv> vInv(1);
3566  vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
3568  &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3569  }
3570  return;
3571  }
3572 
3573  // If we're not close to tip yet, give up and let parallel block
3574  // fetch work its magic.
3575  if (!fAlreadyInFlight &&
3576  !CanDirectFetch(chainparams.GetConsensus())) {
3577  return;
3578  }
3579 
3580  // We want to be a bit conservative just to be extra careful about
3581  // DoS possibilities in compact block processing...
3582  if (pindex->nHeight <= ::ChainActive().Height() + 2) {
3583  if ((!fAlreadyInFlight && nodestate->nBlocksInFlight <
3584  MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
3585  (fAlreadyInFlight &&
3586  blockInFlightIt->second.first == pfrom.GetId())) {
3587  std::list<QueuedBlock>::iterator *queuedBlockIt = nullptr;
3588  if (!MarkBlockAsInFlight(config, m_mempool, pfrom.GetId(),
3589  pindex->GetBlockHash(),
3590  chainparams.GetConsensus(), pindex,
3591  &queuedBlockIt)) {
3592  if (!(*queuedBlockIt)->partialBlock) {
3593  (*queuedBlockIt)
3594  ->partialBlock.reset(
3595  new PartiallyDownloadedBlock(config,
3596  &m_mempool));
3597  } else {
3598  // The block was already in flight using compact
3599  // blocks from the same peer.
3600  LogPrint(BCLog::NET, "Peer sent us compact block "
3601  "we were already syncing!\n");
3602  return;
3603  }
3604  }
3605 
3606  PartiallyDownloadedBlock &partialBlock =
3607  *(*queuedBlockIt)->partialBlock;
3608  ReadStatus status =
3609  partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
3610  if (status == READ_STATUS_INVALID) {
3611  // Reset in-flight state in case of whitelist
3612  MarkBlockAsReceived(pindex->GetBlockHash());
3613  Misbehaving(pfrom, 100, "invalid compact block");
3614  return;
3615  } else if (status == READ_STATUS_FAILED) {
3616  // Duplicate txindices, the block is now in-flight, so
3617  // just request it.
3618  std::vector<CInv> vInv(1);
3619  vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
3621  &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3622  return;
3623  }
3624 
3626  for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
3627  if (!partialBlock.IsTxAvailable(i)) {
3628  req.indices.push_back(i);
3629  }
3630  }
3631  if (req.indices.empty()) {
3632  // Dirty hack to jump to BLOCKTXN code (TODO: move
3633  // message handling into their own functions)
3634  BlockTransactions txn;
3635  txn.blockhash = cmpctblock.header.GetHash();
3636  blockTxnMsg << txn;
3637  fProcessBLOCKTXN = true;
3638  } else {
3639  req.blockhash = pindex->GetBlockHash();
3641  &pfrom,
3642  msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
3643  }
3644  } else {
3645  // This block is either already in flight from a different
3646  // peer, or this peer has too many blocks outstanding to
3647  // download from. Optimistically try to reconstruct anyway
3648  // since we might be able to without any round trips.
3649  PartiallyDownloadedBlock tempBlock(config, &m_mempool);
3650  ReadStatus status =
3651  tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
3652  if (status != READ_STATUS_OK) {
3653  // TODO: don't ignore failures
3654  return;
3655  }
3656  std::vector<CTransactionRef> dummy;
3657  status = tempBlock.FillBlock(*pblock, dummy);
3658  if (status == READ_STATUS_OK) {
3659  fBlockReconstructed = true;
3660  }
3661  }
3662  } else {
3663  if (fAlreadyInFlight) {
3664  // We requested this block, but its far into the future, so
3665  // our mempool will probably be useless - request the block
3666  // normally.
3667  std::vector<CInv> vInv(1);
3668  vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
3670  &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
3671  return;
3672  } else {
3673  // If this was an announce-cmpctblock, we want the same
3674  // treatment as a header message.
3675  fRevertToHeaderProcessing = true;
3676  }
3677  }
3678  } // cs_main
3679 
3680  if (fProcessBLOCKTXN) {
3681  return ProcessMessage(config, pfrom, NetMsgType::BLOCKTXN,
3682  blockTxnMsg, nTimeReceived, interruptMsgProc);
3683  }
3684 
3685  if (fRevertToHeaderProcessing) {
3686  // Headers received from HB compact block peers are permitted to be
3687  // relayed before full validation (see BIP 152), so we don't want to
3688  // disconnect the peer if the header turns out to be for an invalid
3689  // block. Note that if a peer tries to build on an invalid chain,
3690  // that will be detected and the peer will be banned.
3691  return ProcessHeadersMessage(config, pfrom, m_connman, m_mempool,
3692  m_chainman, {cmpctblock.header},
3693  /*via_compact_block=*/true);
3694  }
3695 
3696  if (fBlockReconstructed) {
3697  // If we got here, we were able to optimistically reconstruct a
3698  // block that is in flight from some other peer.
3699  {
3700  LOCK(cs_main);
3701  mapBlockSource.emplace(pblock->GetHash(),
3702  std::make_pair(pfrom.GetId(), false));
3703  }
3704  bool fNewBlock = false;
3705  // Setting fForceProcessing to true means that we bypass some of
3706  // our anti-DoS protections in AcceptBlock, which filters
3707  // unrequested blocks that might be trying to waste our resources
3708  // (eg disk space). Because we only try to reconstruct blocks when
3709  // we're close to caught up (via the CanDirectFetch() requirement
3710  // above, combined with the behavior of not requesting blocks until
3711  // we have a chain with at least nMinimumChainWork), and we ignore
3712  // compact blocks with less work than our tip, it is safe to treat
3713  // reconstructed compact blocks as having been requested.
3714  m_chainman.ProcessNewBlock(config, pblock,
3715  /*fForceProcessing=*/true, &fNewBlock);
3716  if (fNewBlock) {
3717  pfrom.nLastBlockTime = GetTime();
3718  } else {
3719  LOCK(cs_main);
3720  mapBlockSource.erase(pblock->GetHash());
3721  }
3722 
3723  // hold cs_main for CBlockIndex::IsValid()
3724  LOCK(cs_main);
3725  if (pindex->IsValid(BlockValidity::TRANSACTIONS)) {
3726  // Clear download state for this block, which is in process from
3727  // some other peer. We do this after calling. ProcessNewBlock so
3728  // that a malleated cmpctblock announcement can't be used to
3729  // interfere with block relay.
3730  MarkBlockAsReceived(pblock->GetHash());
3731  }
3732  }
3733  return;
3734  }
3735 
3736  if (msg_type == NetMsgType::BLOCKTXN) {
3737  // Ignore blocktxn received while importing
3738  if (fImporting || fReindex) {
3740  "Unexpected blocktxn message received from peer %d\n",
3741  pfrom.GetId());
3742  return;
3743  }
3744 
3745  BlockTransactions resp;
3746  vRecv >> resp;
3747 
3748  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3749  bool fBlockRead = false;
3750  {
3751  LOCK(cs_main);
3752 
3753  std::map<BlockHash,
3754  std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
3755  iterator it = mapBlocksInFlight.find(resp.blockhash);
3756  if (it == mapBlocksInFlight.end() ||
3757  !it->second.second->partialBlock ||
3758  it->second.first != pfrom.GetId()) {
3760  "Peer %d sent us block transactions for block "
3761  "we weren't expecting\n",
3762  pfrom.GetId());
3763  return;
3764  }
3765 
3766  PartiallyDownloadedBlock &partialBlock =
3767  *it->second.second->partialBlock;
3768  ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
3769  if (status == READ_STATUS_INVALID) {
3770  // Reset in-flight state in case of whitelist.
3771  MarkBlockAsReceived(resp.blockhash);
3772  Misbehaving(
3773  pfrom, 100,
3774  "invalid compact block/non-matching block transactions");
3775  return;
3776  } else if (status == READ_STATUS_FAILED) {
3777  // Might have collided, fall back to getdata now :(
3778  std::vector<CInv> invs;
3779  invs.push_back(CInv(MSG_BLOCK, resp.blockhash));
3780  m_connman.PushMessage(&pfrom,
3781  msgMaker.Make(NetMsgType::GETDATA, invs));
3782  } else {
3783  // Block is either okay, or possibly we received
3784  // READ_STATUS_CHECKBLOCK_FAILED.
3785  // Note that CheckBlock can only fail for one of a few reasons:
3786  // 1. bad-proof-of-work (impossible here, because we've already
3787  // accepted the header)
3788  // 2. merkleroot doesn't match the transactions given (already
3789  // caught in FillBlock with READ_STATUS_FAILED, so
3790  // impossible here)
3791  // 3. the block is otherwise invalid (eg invalid coinbase,
3792  // block is too big, too many legacy sigops, etc).
3793  // So if CheckBlock failed, #3 is the only possibility.
3794  // Under BIP 152, we don't DoS-ban unless proof of work is
3795  // invalid (we don't require all the stateless checks to have
3796  // been run). This is handled below, so just treat this as
3797  // though the block was successfully read, and rely on the
3798  // handling in ProcessNewBlock to ensure the block index is
3799  // updated, etc.
3800 
3801  // it is now an empty pointer
3802  MarkBlockAsReceived(resp.blockhash);
3803  fBlockRead = true;
3804  // mapBlockSource is used for potentially punishing peers and
3805  // updating which peers send us compact blocks, so the race
3806  // between here and cs_main in ProcessNewBlock is fine.
3807  // BIP 152 permits peers to relay compact blocks after
3808  // validating the header only; we should not punish peers
3809  // if the block turns out to be invalid.
3810  mapBlockSource.emplace(resp.blockhash,
3811  std::make_pair(pfrom.GetId(), false));
3812  }
3813  } // Don't hold cs_main when we call into ProcessNewBlock
3814  if (fBlockRead) {
3815  bool fNewBlock = false;
3816  // Since we requested this block (it was in mapBlocksInFlight),
3817  // force it to be processed, even if it would not be a candidate for
3818  // new tip (missing previous block, chain not long enough, etc)
3819  // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
3820  // disk-space attacks), but this should be safe due to the
3821  // protections in the compact block handler -- see related comment
3822  // in compact block optimistic reconstruction handling.
3823  m_chainman.ProcessNewBlock(config, pblock,
3824  /*fForceProcessing=*/true, &fNewBlock);
3825  if (fNewBlock) {
3826  pfrom.nLastBlockTime = GetTime();
3827  } else {
3828  LOCK(cs_main);
3829  mapBlockSource.erase(pblock->GetHash());
3830  }
3831  }
3832  return;
3833  }
3834 
3835  if (msg_type == NetMsgType::HEADERS) {
3836  // Ignore headers received while importing
3837  if (fImporting || fReindex) {
3839  "Unexpected headers message received from peer %d\n",
3840  pfrom.GetId());
3841  return;
3842  }
3843 
3844  std::vector<CBlockHeader> headers;
3845 
3846  // Bypass the normal CBlock deserialization, as we don't want to risk
3847  // deserializing 2000 full blocks.
3848  unsigned int nCount = ReadCompactSize(vRecv);
3849  if (nCount > MAX_HEADERS_RESULTS) {
3850  Misbehaving(pfrom, 20,
3851  strprintf("too-many-headers: headers message size = %u",
3852  nCount));
3853  return;
3854  }
3855  headers.resize(nCount);
3856  for (unsigned int n = 0; n < nCount; n++) {
3857  vRecv >> headers[n];
3858  // Ignore tx count; assume it is 0.
3859  ReadCompactSize(vRecv);
3860  }
3861 
3862  return ProcessHeadersMessage(config, pfrom, m_connman, m_mempool,
3863  m_chainman, headers,
3864  /*via_compact_block=*/false);
3865  }
3866 
3867  if (msg_type == NetMsgType::BLOCK) {
3868  // Ignore block received while importing
3869  if (fImporting || fReindex) {
3871  "Unexpected block message received from peer %d\n",
3872  pfrom.GetId());
3873  return;
3874  }
3875 
3876  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
3877  vRecv >> *pblock;
3878 
3879  LogPrint(BCLog::NET, "received block %s peer=%d\n",
3880  pblock->GetHash().ToString(), pfrom.GetId());
3881 
3882  // Process all blocks from whitelisted peers, even if not requested,
3883  // unless we're still syncing with the network. Such an unrequested
3884  // block may still be processed, subject to the conditions in
3885  // AcceptBlock().
3886  bool forceProcessing = pfrom.HasPermission(PF_NOBAN) &&
3888  const BlockHash hash = pblock->GetHash();
3889  {
3890  LOCK(cs_main);
3891  // Also always process if we requested the block explicitly, as we
3892  // may need it even though it is not a candidate for a new best tip.
3893  forceProcessing |= MarkBlockAsReceived(hash);
3894  // mapBlockSource is only used for punishing peers and setting
3895  // which peers send us compact blocks, so the race between here and
3896  // cs_main in ProcessNewBlock is fine.
3897  mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
3898  }
3899  bool fNewBlock = false;
3900  m_chainman.ProcessNewBlock(config, pblock, forceProcessing, &fNewBlock);
3901  if (fNewBlock) {
3902  pfrom.nLastBlockTime = GetTime();
3903  } else {
3904  LOCK(cs_main);
3905  mapBlockSource.erase(hash);
3906  }
3907  return;
3908  }
3909 
3910  if (msg_type == NetMsgType::AVAHELLO && g_avalanche &&
3911  gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED)) {
3912  if (!pfrom.m_avalanche_state) {
3913  pfrom.m_avalanche_state = std::make_unique<CNode::AvalancheState>();
3914  }
3915 
3916  CHashVerifier<CDataStream> verifier(&vRecv);
3917  avalanche::Delegation &delegation = pfrom.m_avalanche_state->delegation;
3918  verifier >> delegation;
3919 
3920  avalanche::Proof proof;
3921 
3923  CPubKey pubkey;
3924  if (!delegation.verify(state, proof, pubkey)) {
3925  Misbehaving(pfrom, 100, "invalid-delegation");
3926  return;
3927  }
3928 
3929  std::array<uint8_t, 64> sig;
3930  verifier >> sig;
3931  }
3932 
3933  // Ignore avalanche requests while importing
3934  if (msg_type == NetMsgType::AVAPOLL && !fImporting && !fReindex &&
3935  g_avalanche &&
3936  gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED)) {
3937  auto now = std::chrono::steady_clock::now();
3938  int64_t cooldown =
3939  gArgs.GetArg("-avacooldown", AVALANCHE_DEFAULT_COOLDOWN);
3940 
3941  {
3942  LOCK(cs_main);
3943  auto &node_state = State(pfrom.GetId())->m_avalanche_state;
3944 
3945  if (now <
3946  node_state.last_poll + std::chrono::milliseconds(cooldown)) {
3947  Misbehaving(pfrom, 20, "avapool-cooldown");
3948  }
3949 
3950  node_state.last_poll = now;
3951  }
3952 
3953  uint64_t round;
3954  Unserialize(vRecv, round);
3955 
3956  unsigned int nCount = ReadCompactSize(vRecv);
3957  if (nCount > AVALANCHE_MAX_ELEMENT_POLL) {
3958  Misbehaving(
3959  pfrom, 20,
3960  strprintf("too-many-ava-poll: poll message size = %u", nCount));
3961  return;
3962  }
3963 
3964  std::vector<avalanche::Vote> votes;
3965  votes.reserve(nCount);
3966 
3967  LogPrint(BCLog::NET, "received avalanche poll from peer=%d\n",
3968  pfrom.GetId());
3969 
3970  {
3971  LOCK(cs_main);
3972 
3973  for (unsigned int n = 0; n < nCount; n++) {
3974  CInv inv;
3975  vRecv >> inv;
3976 
3977  const auto insertVote = [&](uint32_t e) {
3978  votes.emplace_back(e, inv.hash);
3979  };
3980 
3981  // Not a block.
3982  if (inv.type != MSG_BLOCK) {
3983  insertVote(-1);
3984  continue;
3985  }
3986 
3987  // We have a block.
3988  const CBlockIndex *pindex =
3989  LookupBlockIndex(BlockHash(inv.hash));
3990 
3991  // Unknown block.
3992  if (!pindex) {
3993  insertVote(-1);
3994  continue;
3995  }
3996 
3997  // Invalid block
3998  if (pindex->nStatus.isInvalid()) {
3999  insertVote(1);
4000  continue;
4001  }
4002 
4003  // Parked block
4004  if (pindex->nStatus.isOnParkedChain()) {
4005  insertVote(2);
4006  continue;
4007  }
4008 
4009  const CBlockIndex *pindexTip = ::ChainActive().Tip();
4010  const CBlockIndex *pindexFork =
4011  LastCommonAncestor(pindex, pindexTip);
4012 
4013  // Active block.
4014  if (pindex == pindexFork) {
4015  insertVote(0);
4016  continue;
4017  }
4018 
4019  // Fork block.
4020  if (pindexFork != pindexTip) {
4021  insertVote(3);
4022  continue;
4023  }
4024 
4025  // Missing block data.
4026  if (!pindex->nStatus.hasData()) {
4027  insertVote(-2);
4028  continue;
4029  }
4030 
4031  // This block is built on top of the tip, we have the data, it
4032  // is pending connection or rejection.
4033  insertVote(-3);
4034  }
4035  }
4036 
4037  // Send the query to the node.
4038  g_avalanche->sendResponse(
4039  &pfrom, avalanche::Response(round, cooldown, std::move(votes)));
4040  return;
4041  }
4042 
4043  // Ignore avalanche requests while importing
4044  if (msg_type == NetMsgType::AVARESPONSE && !fImporting && !fReindex &&
4045  g_avalanche &&
4046  gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED)) {
4047  // As long as QUIC is not implemented, we need to sign response and
4048  // verify response's signatures in order to avoid any manipulation of
4049  // messages at the transport level.
4050  CHashVerifier<CDataStream> verifier(&vRecv);
4052  verifier >> response;
4053 
4054  if (!g_avalanche->forNode(pfrom.GetId(), [&](const avalanche::Node &n) {
4055  std::array<uint8_t, 64> sig;
4056  vRecv >> sig;
4057  return n.pubkey.VerifySchnorr(verifier.GetHash(), sig);
4058  })) {
4059  Misbehaving(pfrom, 100, "invalid-ava-response-signature");
4060  return;
4061  }
4062 
4063  std::vector<avalanche::BlockUpdate> updates;
4064  if (!g_avalanche->registerVotes(pfrom.GetId(), response, updates)) {
4065  return;
4066  }
4067 
4068  if (updates.size()) {
4069  for (avalanche::BlockUpdate &u : updates) {
4070  CBlockIndex *pindex = u.getBlockIndex();
4071  switch (u.getStatus()) {
4072  case avalanche::BlockUpdate::Status::Invalid:
4073  case avalanche::BlockUpdate::Status::Rejected: {
4074  LogPrintf("Avalanche rejected %s, parking\n",
4075  pindex->GetBlockHash().GetHex());
4076  BlockValidationState state;
4077  ::ChainstateActive().ParkBlock(config, state, pindex);
4078  if (!state.IsValid()) {
4079  LogPrintf("ERROR: Database error: %s\n",
4080  state.GetRejectReason());
4081  return;
4082  }
4083  } break;
4084  case avalanche::BlockUpdate::Status::Accepted:
4085  case avalanche::BlockUpdate::Status::Finalized: {
4086  LogPrintf("Avalanche accepted %s\n",
4087  pindex->GetBlockHash().GetHex());
4088  LOCK(cs_main);
4089  UnparkBlock(pindex);
4090  } break;
4091  }
4092  }
4093 
4094  BlockValidationState state;
4095  if (!ActivateBestChain(config, state)) {
4096  LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
4097  state.ToString());
4098  }
4099  }
4100 
4101  return;
4102  }
4103 
4104  if (msg_type == NetMsgType::GETADDR) {
4105  // This asymmetric behavior for inbound and outbound connections was
4106  // introduced to prevent a fingerprinting attack: an attacker can send
4107  // specific fake addresses to users' AddrMan and later request them by
4108  // sending getaddr messages. Making nodes which are behind NAT and can
4109  // only make outgoing connections ignore the getaddr message mitigates
4110  // the attack.
4111  if (!pfrom.IsInboundConn()) {
4113  "Ignoring \"getaddr\" from outbound connection. peer=%d\n",
4114  pfrom.GetId());
4115  return;
4116  }
4117  if (!pfrom.IsAddrRelayPeer()) {
4119  "Ignoring \"getaddr\" from block-relay-only connection. "
4120  "peer=%d\n",
4121  pfrom.GetId());
4122  return;
4123  }
4124 
4125  // Only send one GetAddr response per connection to reduce resource
4126  // waste and discourage addr stamping of INV announcements.
4127  if (pfrom.fSentAddr) {
4128  LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n",
4129  pfrom.GetId());
4130  return;
4131  }
4132  pfrom.fSentAddr = true;
4133 
4134  pfrom.vAddrToSend.clear();
4135  std::vector<CAddress> vAddr = m_connman.GetAddresses();
4136  FastRandomContext insecure_rand;
4137  for (const CAddress &addr : vAddr) {
4138  bool banned_or_discouraged =
4139  m_banman &&
4140  (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr));
4141  if (!banned_or_discouraged) {
4142  pfrom.PushAddress(addr, insecure_rand);
4143  }
4144  }
4145  return;
4146  }
4147 
4148  if (msg_type == NetMsgType::MEMPOOL) {
4149  if (!(pfrom.GetLocalServices() & NODE_BLOOM) &&
4150  !pfrom.HasPermission(PF_MEMPOOL)) {
4151  if (!pfrom.HasPermission(PF_NOBAN)) {
4153  "mempool request with bloom filters disabled, "
4154  "disconnect peer=%d\n",
4155  pfrom.GetId());
4156  pfrom.fDisconnect = true;
4157  }
4158  return;
4159  }
4160 
4161  if (m_connman.OutboundTargetReached(false) &&
4162  !pfrom.HasPermission(PF_MEMPOOL)) {
4163  if (!pfrom.HasPermission(PF_NOBAN)) {
4165  "mempool request with bandwidth limit reached, "
4166  "disconnect peer=%d\n",
4167  pfrom.GetId());
4168  pfrom.fDisconnect = true;
4169  }
4170  return;
4171  }
4172 
4173  if (pfrom.m_tx_relay != nullptr) {
4174  LOCK(pfrom.m_tx_relay->cs_tx_inventory);
4175  pfrom.m_tx_relay->fSendMempool = true;
4176  }
4177  return;
4178  }
4179 
4180  if (msg_type == NetMsgType::PING) {
4181  if (pfrom.nVersion > BIP0031_VERSION) {
4182  uint64_t nonce = 0;
4183  vRecv >> nonce;
4184  // Echo the message back with the nonce. This allows for two useful
4185  // features:
4186  //
4187  // 1) A remote node can quickly check if the connection is
4188  // operational.
4189  // 2) Remote nodes can measure the latency of the network thread. If
4190  // this node is overloaded it won't respond to pings quickly and the
4191  // remote node can avoid sending us more work, like chain download
4192  // requests.
4193  //
4194  // The nonce stops the remote getting confused between different
4195  // pings: without it, if the remote node sends a ping once per
4196  // second and this node takes 5 seconds to respond to each, the 5th
4197  // ping the remote sends would appear to return very quickly.
4198  m_connman.PushMessage(&pfrom,
4199  msgMaker.Make(NetMsgType::PONG, nonce));
4200  }
4201  return;
4202  }
4203 
4204  if (msg_type == NetMsgType::PONG) {
4205  int64_t pingUsecEnd = nTimeReceived;
4206  uint64_t nonce = 0;
4207  size_t nAvail = vRecv.in_avail();
4208  bool bPingFinished = false;
4209  std::string sProblem;
4210 
4211  if (nAvail >= sizeof(nonce)) {
4212  vRecv >> nonce;
4213 
4214  // Only process pong message if there is an outstanding ping (old
4215  // ping without nonce should never pong)
4216  if (pfrom.nPingNonceSent != 0) {
4217  if (nonce == pfrom.nPingNonceSent) {
4218  // Matching pong received, this ping is no longer
4219  // outstanding
4220  bPingFinished = true;
4221  int64_t pingUsecTime = pingUsecEnd - pfrom.nPingUsecStart;
4222  if (pingUsecTime > 0) {
4223  // Successful ping time measurement, replace previous
4224  pfrom.nPingUsecTime = pingUsecTime;
4225  pfrom.nMinPingUsecTime = std::min(
4226  pfrom.nMinPingUsecTime.load(), pingUsecTime);
4227  } else {
4228  // This should never happen
4229  sProblem = "Timing mishap";
4230  }
4231  } else {
4232  // Nonce mismatches are normal when pings are overlapping
4233  sProblem = "Nonce mismatch";
4234  if (nonce == 0) {
4235  // This is most likely a bug in another implementation
4236  // somewhere; cancel this ping
4237  bPingFinished = true;
4238  sProblem = "Nonce zero";
4239  }
4240  }
4241  } else {
4242  sProblem = "Unsolicited pong without ping";
4243  }
4244  } else {
4245  // This is most likely a bug in another implementation somewhere;
4246  // cancel this ping
4247  bPingFinished = true;
4248  sProblem = "Short payload";
4249  }
4250 
4251  if (!(sProblem.empty())) {
4253  "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
4254  pfrom.GetId(), sProblem, pfrom.nPingNonceSent, nonce,
4255  nAvail);
4256  }
4257  if (bPingFinished) {
4258  pfrom.nPingNonceSent = 0;
4259  }
4260  return;
4261  }
4262 
4263  if (msg_type == NetMsgType::FILTERLOAD) {
4264  CBloomFilter filter;
4265  vRecv >> filter;
4266 
4267  if (!filter.IsWithinSizeConstraints()) {
4268  // There is no excuse for sending a too-large filter
4269  Misbehaving(pfrom, 100, "too-large bloom filter");
4270  } else if (pfrom.m_tx_relay != nullptr) {
4271  LOCK(pfrom.m_tx_relay->cs_filter);
4272  pfrom.m_tx_relay->pfilter.reset(new CBloomFilter(filter));
4273  pfrom.m_tx_relay->pfilter->UpdateEmptyFull();
4274  pfrom.m_tx_relay->fRelayTxes = true;
4275  }
4276  return;
4277  }
4278 
4279  if (msg_type == NetMsgType::FILTERADD) {
4280  std::vector<uint8_t> vData;
4281  vRecv >> vData;
4282 
4283  // Nodes must NEVER send a data item > 520 bytes (the max size for a
4284  // script data object, and thus, the maximum size any matched object can
4285  // have) in a filteradd message.
4286  bool bad = false;
4287  if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
4288  bad = true;
4289  } else if (pfrom.m_tx_relay != nullptr) {
4290  LOCK(pfrom.m_tx_relay->cs_filter);
4291  if (pfrom.m_tx_relay->pfilter) {
4292  pfrom.m_tx_relay->pfilter->insert(vData);
4293  } else {
4294  bad = true;
4295  }
4296  }
4297  if (bad) {
4298  // The structure of this code doesn't really allow for a good error
4299  // code. We'll go generic.
4300  Misbehaving(pfrom, 100, "bad filteradd message");
4301  }
4302  return;
4303  }
4304 
4305  if (msg_type == NetMsgType::FILTERCLEAR) {
4306  if (pfrom.m_tx_relay == nullptr) {
4307  return;
4308  }
4309  LOCK(pfrom.m_tx_relay->cs_filter);
4310  if (pfrom.GetLocalServices() & NODE_BLOOM) {
4311  pfrom.m_tx_relay->pfilter = nullptr;
4312  }
4313  pfrom.m_tx_relay->fRelayTxes = true;
4314  return;
4315  }
4316 
4317  if (msg_type == NetMsgType::FEEFILTER) {
4318  Amount newFeeFilter = Amount::zero();
4319  vRecv >> newFeeFilter;
4320  if (MoneyRange(newFeeFilter)) {
4321  if (pfrom.m_tx_relay != nullptr) {
4322  LOCK(pfrom.m_tx_relay->cs_feeFilter);
4323  pfrom.m_tx_relay->minFeeFilter = newFeeFilter;
4324  }
4325  LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n",
4326  CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
4327  }
4328  return;
4329  }
4330 
4331  if (msg_type == NetMsgType::GETCFILTERS) {
4332  ProcessGetCFilters(pfrom, vRecv, chainparams, m_connman);
4333  return;
4334  }
4335 
4336  if (msg_type == NetMsgType::GETCFHEADERS) {
4337  ProcessGetCFHeaders(pfrom, vRecv, chainparams, m_connman);
4338  return;
4339  }
4340 
4341  if (msg_type == NetMsgType::GETCFCHECKPT) {
4342  ProcessGetCFCheckPt(pfrom, vRecv, chainparams, m_connman);
4343  return;
4344  }
4345 
4346  if (msg_type == NetMsgType::NOTFOUND) {
4347  // Remove the NOTFOUND transactions from the peer
4348  LOCK(cs_main);
4349  CNodeState *state = State(pfrom.GetId());
4350  std::vector<CInv> vInv;
4351  vRecv >> vInv;
4352  if (vInv.size() <=
4353  MAX_PEER_TX_IN_FLIGHT + MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
4354  for (CInv &inv : vInv) {
4355  if (inv.type == MSG_TX) {
4356  const TxId txid(inv.hash);
4357  // If we receive a NOTFOUND message for a txid we requested,
4358  // erase it from our data structures for this peer.
4359  auto in_flight_it =
4360  state->m_tx_download.m_tx_in_flight.find(txid);
4361  if (in_flight_it ==
4362  state->m_tx_download.m_tx_in_flight.end()) {
4363  // Skip any further work if this is a spurious NOTFOUND
4364  // message.
4365  continue;
4366  }
4367  state->m_tx_download.m_tx_in_flight.erase(in_flight_it);
4368  state->m_tx_download.m_tx_announced.erase(txid);
4369  }
4370  }
4371  }
4372  return;
4373  }
4374 
4375  // Ignore unknown commands for extensibility
4376  LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n",
4377  SanitizeString(msg_type), pfrom.GetId());
4378  return;
4379 }
4380 
4382  const NodeId peer_id{pnode.GetId()};
4383  PeerRef peer = GetPeerRef(peer_id);
4384  if (peer == nullptr) {
4385  return false;
4386  }
4387  {
4388  LOCK(peer->m_misbehavior_mutex);
4389 
4390  // There's nothing to do if the m_should_discourage flag isn't set
4391  if (!peer->m_should_discourage) {
4392  return false;
4393  }
4394 
4395  peer->m_should_discourage = false;
4396  } // peer.m_misbehavior_mutex
4397 
4398  if (pnode.HasPermission(PF_NOBAN)) {
4399  // We never disconnect or discourage peers for bad behavior if they have
4400  // the NOBAN permission flag
4401  LogPrintf("Warning: not punishing noban peer %d!\n", peer_id);
4402  return false;
4403  }
4404 
4405  if (pnode.IsManualConn()) {
4406  // We never disconnect or discourage manual peers for bad behavior
4407  LogPrintf("Warning: not punishing manually connected peer %d!\n",
4408  peer_id);
4409  return false;
4410  }
4411 
4412  if (pnode.addr.IsLocal()) {
4413  // We disconnect local peers for bad behavior but don't discourage
4414  // (since that would discourage all peers on the same local address)
4415  LogPrintf(
4416  "Warning: disconnecting but not discouraging local peer %d!\n",
4417  peer_id);
4418  pnode.fDisconnect = true;
4419  return true;
4420  }
4421 
4422  // Normal case: Disconnect the peer and discourage all nodes sharing the
4423  // address
4424  LogPrintf("Disconnecting and discouraging peer %d!\n", peer_id);
4425  if (m_banman) {
4426  m_banman->Discourage(pnode.addr);
4427  }
4428  m_connman.DisconnectNode(pnode.addr);
4429  return true;
4430 }
4431 
4433  std::atomic<bool> &interruptMsgProc) {
4434  //
4435  // Message format
4436  // (4) message start
4437  // (12) command
4438  // (4) size
4439  // (4) checksum
4440  // (x) data
4441  //
4442  bool fMoreWork = false;
4443 
4444  if (!pfrom->vRecvGetData.empty()) {
4445  ProcessGetData(config, *pfrom, m_connman, m_mempool, interruptMsgProc);
4446  }
4447 
4448  if (!pfrom->orphan_work_set.empty()) {
4449  LOCK2(cs_main, g_cs_orphans);
4451  }
4452 
4453  if (pfrom->fDisconnect) {
4454  return false;
4455  }
4456 
4457  // this maintains the order of responses and prevents vRecvGetData from
4458  // growing unbounded
4459  if (!pfrom->vRecvGetData.empty()) {
4460  return true;
4461  }
4462  if (!pfrom->orphan_work_set.empty()) {
4463  return true;
4464  }
4465 
4466  // Don't bother if send buffer is too full to respond anyway
4467  if (pfrom->fPauseSend) {
4468  return false;
4469  }
4470 
4471  std::list<CNetMessage> msgs;
4472  {
4473  LOCK(pfrom->cs_vProcessMsg);
4474  if (pfrom->vProcessMsg.empty()) {
4475  return false;
4476  }
4477  // Just take one message
4478  msgs.splice(msgs.begin(), pfrom->vProcessMsg,
4479  pfrom->vProcessMsg.begin());
4480  pfrom->nProcessQueueSize -= msgs.front().m_raw_message_size;
4481  pfrom->fPauseRecv =
4483  fMoreWork = !pfrom->vProcessMsg.empty();
4484  }
4485  CNetMessage &msg(msgs.front());
4486 
4487  msg.SetVersion(pfrom->GetRecvVersion());
4488 
4489  // Check network magic
4490  if (!msg.m_valid_netmagic) {
4492  "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n",
4493  SanitizeString(msg.m_command), pfrom->GetId());
4494 
4495  // Make sure we discourage where that come from for some time.
4496  if (m_banman) {
4497  m_banman->Discourage(pfrom->addr);
4498  }
4499  m_connman.DisconnectNode(pfrom->addr);
4500 
4501  pfrom->fDisconnect = true;
4502  return false;
4503  }
4504 
4505  // Check header
4506  if (!msg.m_valid_header) {
4507  LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n",
4508  SanitizeString(msg.m_command), pfrom->GetId());
4509  return fMoreWork;
4510  }
4511  const std::string &msg_type = msg.m_command;
4512 
4513  // Message size
4514  unsigned int nMessageSize = msg.m_message_size;
4515 
4516  // Checksum
4517  CDataStream &vRecv = msg.m_recv;
4518  if (!msg.m_valid_checksum) {
4519  LogPrint(BCLog::NET, "%s(%s, %u bytes): CHECKSUM ERROR peer=%d\n",
4520  __func__, SanitizeString(msg_type), nMessageSize,
4521  pfrom->GetId());
4522  if (m_banman) {
4523  m_banman->Discourage(pfrom->addr);
4524  }
4525  m_connman.DisconnectNode(pfrom->addr);
4526  return fMoreWork;
4527  }
4528 
4529  try {
4530  ProcessMessage(config, *pfrom, msg_type, vRecv, msg.m_time,
4531  interruptMsgProc);
4532  if (interruptMsgProc) {
4533  return false;
4534  }
4535 
4536  if (!pfrom->vRecvGetData.empty()) {
4537  fMoreWork = true;
4538  }
4539  } catch (const std::exception &e) {
4540  LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n",
4541  __func__, SanitizeString(msg_type), nMessageSize, e.what(),
4542  typeid(e).name());
4543  } catch (...) {
4544  LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n",
4545  __func__, SanitizeString(msg_type), nMessageSize);
4546  }
4547 
4548  return fMoreWork;
4549 }
4550 
4552  int64_t time_in_seconds) {
4553  AssertLockHeld(cs_main);
4554 
4555  CNodeState &state = *State(pto.GetId());
4556  const CNetMsgMaker msgMaker(pto.GetSendVersion());
4557 
4558  if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() &&
4559  state.fSyncStarted) {
4560  // This is an outbound peer subject to disconnection if they don't
4561  // announce a block with as much work as the current tip within
4562  // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if their
4563  // chain has more work than ours, we should sync to it, unless it's
4564  // invalid, in which case we should find that out and disconnect from
4565  // them elsewhere).
4566  if (state.pindexBestKnownBlock != nullptr &&
4567  state.pindexBestKnownBlock->nChainWork >=
4568  ::ChainActive().Tip()->nChainWork) {
4569  if (state.m_chain_sync.m_timeout != 0) {
4570  state.m_chain_sync.m_timeout = 0;
4571  state.m_chain_sync.m_work_header = nullptr;
4572  state.m_chain_sync.m_sent_getheaders = false;
4573  }
4574  } else if (state.m_chain_sync.m_timeout == 0 ||
4575  (state.m_chain_sync.m_work_header != nullptr &&
4576  state.pindexBestKnownBlock != nullptr &&
4577  state.pindexBestKnownBlock->nChainWork >=
4578  state.m_chain_sync.m_work_header->nChainWork)) {
4579  // Our best block known by this peer is behind our tip, and we're
4580  // either noticing that for the first time, OR this peer was able to
4581  // catch up to some earlier point where we checked against our tip.
4582  // Either way, set a new timeout based on current tip.
4583  state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
4584  state.m_chain_sync.m_work_header = ::ChainActive().Tip();
4585  state.m_chain_sync.m_sent_getheaders = false;
4586  } else if (state.m_chain_sync.m_timeout > 0 &&
4587  time_in_seconds > state.m_chain_sync.m_timeout) {
4588  // No evidence yet that our peer has synced to a chain with work
4589  // equal to that of our tip, when we first detected it was behind.
4590  // Send a single getheaders message to give the peer a chance to
4591  // update us.
4592  if (state.m_chain_sync.m_sent_getheaders) {
4593  // They've run out of time to catch up!
4594  LogPrintf(
4595  "Disconnecting outbound peer %d for old chain, best known "
4596  "block = %s\n",
4597  pto.GetId(),
4598  state.pindexBestKnownBlock != nullptr
4599  ? state.pindexBestKnownBlock->GetBlockHash().ToString()
4600  : "<none>");
4601  pto.fDisconnect = true;
4602  } else {
4603  assert(state.m_chain_sync.m_work_header);
4604  LogPrint(
4605  BCLog::NET,
4606  "sending getheaders to outbound peer=%d to verify chain "
4607  "work (current best known block:%s, benchmark blockhash: "
4608  "%s)\n",
4609  pto.GetId(),
4610  state.pindexBestKnownBlock != nullptr
4611  ? state.pindexBestKnownBlock->GetBlockHash().ToString()
4612  : "<none>",
4613  state.m_chain_sync.m_work_header->GetBlockHash()
4614  .ToString());
4616  &pto,
4617  msgMaker.Make(NetMsgType::GETHEADERS,
4618  ::ChainActive().GetLocator(
4619  state.m_chain_sync.m_work_header->pprev),
4620  uint256()));
4621  state.m_chain_sync.m_sent_getheaders = true;
4622  // 2 minutes
4623  constexpr int64_t HEADERS_RESPONSE_TIME = 120;
4624  // Bump the timeout to allow a response, which could clear the
4625  // timeout (if the response shows the peer has synced), reset
4626  // the timeout (if the peer syncs to the required work but not
4627  // to our tip), or result in disconnect (if we advance to the
4628  // timeout and pindexBestKnownBlock has not sufficiently
4629  // progressed)
4630  state.m_chain_sync.m_timeout =
4631  time_in_seconds + HEADERS_RESPONSE_TIME;
4632  }
4633  }
4634  }
4635 }
4636 
4637 void PeerLogicValidation::EvictExtraOutboundPeers(int64_t time_in_seconds) {
4638  // Check whether we have too many outbound peers
4639  int extra_peers = m_connman.GetExtraOutboundCount();
4640  if (extra_peers <= 0) {
4641  return;
4642  }
4643 
4644  // If we have more outbound peers than we target, disconnect one.
4645  // Pick the outbound peer that least recently announced us a new block, with
4646  // ties broken by choosing the more recent connection (higher node id)
4647  NodeId worst_peer = -1;
4648  int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
4649 
4650  m_connman.ForEachNode([&](CNode *pnode) {
4651  AssertLockHeld(cs_main);
4652 
4653  // Ignore non-outbound peers, or nodes marked for disconnect already
4654  if (!pnode->IsOutboundOrBlockRelayConn() || pnode->fDisconnect) {
4655  return;
4656  }
4657  CNodeState *state = State(pnode->GetId());
4658  if (state == nullptr) {
4659  // shouldn't be possible, but just in case
4660  return;
4661  }
4662  // Don't evict our protected peers
4663  if (state->m_chain_sync.m_protect) {
4664  return;
4665  }
4666  // Don't evict our block-relay-only peers.
4667  if (pnode->m_tx_relay == nullptr) {
4668  return;
4669  }
4670 
4671  if (state->m_last_block_announcement < oldest_block_announcement ||
4672  (state->m_last_block_announcement == oldest_block_announcement &&
4673  pnode->GetId() > worst_peer)) {
4674  worst_peer = pnode->GetId();
4675  oldest_block_announcement = state->m_last_block_announcement;
4676  }
4677  });
4678 
4679  if (worst_peer == -1) {
4680  return;
4681  }
4682 
4683  bool disconnected = m_connman.ForNode(worst_peer, [&](CNode *pnode) {
4684  AssertLockHeld(cs_main);
4685 
4686  // Only disconnect a peer that has been connected to us for some
4687  // reasonable fraction of our check-frequency, to give it time for new
4688  // information to have arrived.
4689  // Also don't disconnect any peer we're trying to download a block from.
4690  CNodeState &state = *State(pnode->GetId());
4691  if (time_in_seconds - pnode->nTimeConnected > MINIMUM_CONNECT_TIME &&
4692  state.nBlocksInFlight == 0) {
4694  "disconnecting extra outbound peer=%d (last block "
4695  "announcement received at time %d)\n",
4696  pnode->GetId(), oldest_block_announcement);
4697  pnode->fDisconnect = true;
4698  return true;
4699  } else {
4701  "keeping outbound peer=%d chosen for eviction "
4702  "(connect time: %d, blocks_in_flight: %d)\n",
4703  pnode->GetId(), pnode->nTimeConnected,
4704  state.nBlocksInFlight);
4705  return false;
4706  }
4707  });
4708 
4709  if (disconnected) {
4710  // If we disconnected an extra peer, that means we successfully
4711  // connected to at least one peer after the last time we detected a
4712  // stale tip. Don't try any more extra peers until we next detect a
4713  // stale tip, to limit the load we put on the network from these extra
4714  // connections.
4716  }
4717 }
4718 
4720  const Consensus::Params &consensusParams) {
4721  LOCK(cs_main);
4722 
4723  int64_t time_in_seconds = GetTime();
4724 
4725  EvictExtraOutboundPeers(time_in_seconds);
4726 
4727  if (time_in_seconds <= m_stale_tip_check_time) {
4728  return;
4729  }
4730 
4731  // Check whether our tip is stale, and if so, allow using an extra outbound
4732  // peer.
4734  m_connman.GetUseAddrmanOutgoing() && TipMayBeStale(consensusParams)) {
4735  LogPrintf("Potential stale tip detected, will try using extra outbound "
4736  "peer (last tip update: %d seconds ago)\n",
4737  time_in_seconds - g_last_tip_update);
4739  } else if (m_connman.GetTryNewOutboundPeer()) {
4741  }
4742  m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
4743 }
4744 
4745 namespace {
4746 class CompareInvMempoolOrder {
4747  CTxMemPool *mp;
4748 
4749 public:
4750  explicit CompareInvMempoolOrder(CTxMemPool *_mempool) { mp = _mempool; }
4751 
4752  bool operator()(std::set<TxId>::iterator a, std::set<TxId>::iterator b) {
4757  return mp->CompareDepthAndScore(*b, *a);
4758  }
4759 };
4760 } // namespace
4761 
4763  std::atomic<bool> &interruptMsgProc) {
4764  const Consensus::Params &consensusParams =
4765  config.GetChainParams().GetConsensus();
4766 
4767  // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
4768  // disconnect misbehaving peers even before the version handshake is
4769  // complete.
4770  if (MaybeDiscourageAndDisconnect(*pto)) {
4771  return true;
4772  }
4773 
4774  // Don't send anything until the version handshake is complete
4775  if (!pto->fSuccessfullyConnected || pto->fDisconnect) {
4776  return true;
4777  }
4778 
4779  // If we get here, the outgoing message serialization version is set and
4780  // can't change.
4781  const CNetMsgMaker msgMaker(pto->GetSendVersion());
4782 
4783  //
4784  // Message: ping
4785  //
4786  bool pingSend = false;
4787  if (pto->fPingQueued) {
4788  // RPC ping request by user
4789  pingSend = true;
4790  }
4791  if (pto->nPingNonceSent == 0 &&
4792  pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) {
4793  // Ping automatically sent as a latency probe & keepalive.
4794  pingSend = true;
4795  }
4796  if (pingSend) {
4797  uint64_t nonce = 0;
4798  while (nonce == 0) {
4799  GetRandBytes((uint8_t *)&nonce, sizeof(nonce));
4800  }
4801  pto->fPingQueued = false;
4802  pto->nPingUsecStart = GetTimeMicros();
4803  if (pto->nVersion > BIP0031_VERSION) {
4804  pto->nPingNonceSent = nonce;
4805  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
4806  } else {
4807  // Peer is too old to support ping command with nonce, pong will
4808  // never arrive.
4809  pto->nPingNonceSent = 0;
4810  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING));
4811  }
4812  }
4813 
4814  {
4815  LOCK(cs_main);
4816 
4817  CNodeState &state = *State(pto->GetId());
4818 
4819  // Address refresh broadcast
4820  int64_t nNow = GetTimeMicros();
4821  auto current_time = GetTime<std::chrono::microseconds>();
4822 
4823  if (pto->IsAddrRelayPeer() &&
4825  pto->m_next_local_addr_send < current_time) {
4826  AdvertiseLocal(pto);
4827  pto->m_next_local_addr_send = PoissonNextSend(
4828  current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
4829  }
4830 
4831  //
4832  // Message: addr
4833  //
4834  if (pto->IsAddrRelayPeer() && pto->m_next_addr_send < current_time) {
4835  pto->m_next_addr_send =
4837  std::vector<CAddress> vAddr;
4838  vAddr.reserve(pto->vAddrToSend.size());
4839  assert(pto->m_addr_known);
4840  for (const CAddress &addr : pto->vAddrToSend) {
4841  if (!pto->m_addr_known->contains(addr.GetKey())) {
4842  pto->m_addr_known->insert(addr.GetKey());
4843  vAddr.push_back(addr);
4844  // receiver rejects addr messages larger than 1000
4845  if (vAddr.size() >= 1000) {
4847  pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
4848  vAddr.clear();
4849  }
4850  }
4851  }
4852  pto->vAddrToSend.clear();
4853  if (!vAddr.empty()) {
4854  m_connman.PushMessage(pto,
4855  msgMaker.Make(NetMsgType::ADDR, vAddr));
4856  }
4857 
4858  // we only send the big addr message once
4859  if (pto->vAddrToSend.capacity() > 40) {
4860  pto->vAddrToSend.shrink_to_fit();
4861  }
4862  }
4863 
4864  // Start block sync
4865  if (pindexBestHeader == nullptr) {
4867  }
4868 
4869  // Download if this is a nice peer, or we have no nice peers and this
4870  // one might do.
4871  bool fFetch = state.fPreferredDownload ||
4872  (nPreferredDownload == 0 && !pto->fClient &&
4873  !pto->IsAddrFetchConn());
4874 
4875  if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
4876  // Only actively request headers from a single peer, unless we're
4877  // close to today.
4878  if ((nSyncStarted == 0 && fFetch) ||
4880  GetAdjustedTime() - 24 * 60 * 60) {
4881  state.fSyncStarted = true;
4882  state.nHeadersSyncTimeout =
4886  (consensusParams.nPowTargetSpacing);
4887  nSyncStarted++;
4888  const CBlockIndex *pindexStart = pindexBestHeader;
4897  if (pindexStart->pprev) {
4898  pindexStart = pindexStart->pprev;
4899  }
4900 
4901  LogPrint(
4902  BCLog::NET,
4903  "initial getheaders (%d) to peer=%d (startheight:%d)\n",
4904  pindexStart->nHeight, pto->GetId(), pto->nStartingHeight);
4906  pto, msgMaker.Make(NetMsgType::GETHEADERS,
4907  ::ChainActive().GetLocator(pindexStart),
4908  uint256()));
4909  }
4910  }
4911 
4912  //
4913  // Try sending block announcements via headers
4914  //
4915  {
4916  // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our list of block
4917  // hashes we're relaying, and our peer wants headers announcements,
4918  // then find the first header not yet known to our peer but would
4919  // connect, and send. If no header would connect, or if we have too
4920  // many blocks, or if the peer doesn't want headers, just add all to
4921  // the inv queue.
4922  LOCK(pto->cs_inventory);
4923  std::vector<CBlock> vHeaders;
4924  bool fRevertToInv =
4925  ((!state.fPreferHeaders &&
4926  (!state.fPreferHeaderAndIDs ||
4927  pto->vBlockHashesToAnnounce.size() > 1)) ||
4928  pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
4929  // last header queued for delivery
4930  const CBlockIndex *pBestIndex = nullptr;
4931  // ensure pindexBestKnownBlock is up-to-date
4932  ProcessBlockAvailability(pto->GetId());
4933 
4934  if (!fRevertToInv) {
4935  bool fFoundStartingHeader = false;
4936  // Try to find first header that our peer doesn't have, and then
4937  // send all headers past that one. If we come across an headers
4938  // that aren't on ::ChainActive(), give up.
4939  for (const BlockHash &hash : pto->vBlockHashesToAnnounce) {
4940  const CBlockIndex *pindex = LookupBlockIndex(hash);
4941  assert(pindex);
4942  if (::ChainActive()[pindex->nHeight] != pindex) {
4943  // Bail out if we reorged away from this block
4944  fRevertToInv = true;
4945  break;
4946  }
4947  if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
4948  // This means that the list of blocks to announce don't
4949  // connect to each other. This shouldn't really be
4950  // possible to hit during regular operation (because
4951  // reorgs should take us to a chain that has some block
4952  // not on the prior chain, which should be caught by the
4953  // prior check), but one way this could happen is by
4954  // using invalidateblock / reconsiderblock repeatedly on
4955  // the tip, causing it to be added multiple times to
4956  // vBlockHashesToAnnounce. Robustly deal with this rare
4957  // situation by reverting to an inv.
4958  fRevertToInv = true;
4959  break;
4960  }
4961  pBestIndex = pindex;
4962  if (fFoundStartingHeader) {
4963  // add this to the headers message
4964  vHeaders.push_back(pindex->GetBlockHeader());
4965  } else if (PeerHasHeader(&state, pindex)) {
4966  // Keep looking for the first new block.
4967  continue;
4968  } else if (pindex->pprev == nullptr ||
4969  PeerHasHeader(&state, pindex->pprev)) {
4970  // Peer doesn't have this header but they do have the
4971  // prior one. Start sending headers.
4972  fFoundStartingHeader = true;
4973  vHeaders.push_back(pindex->GetBlockHeader());
4974  } else {
4975  // Peer doesn't have this header or the prior one --
4976  // nothing will connect, so bail out.
4977  fRevertToInv = true;
4978  break;
4979  }
4980  }
4981  }
4982  if (!fRevertToInv && !vHeaders.empty()) {
4983  if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
4984  // We only send up to 1 block as header-and-ids, as
4985  // otherwise probably means we're doing an initial-ish-sync
4986  // or they're slow.
4988  "%s sending header-and-ids %s to peer=%d\n",
4989  __func__, vHeaders.front().GetHash().ToString(),
4990  pto->GetId());
4991 
4992  int nSendFlags = 0;
4993 
4994  bool fGotBlockFromCache = false;
4995  {
4997  if (most_recent_block_hash ==
4998  pBestIndex->GetBlockHash()) {
4999  CBlockHeaderAndShortTxIDs cmpctblock(
5000  *most_recent_block);
5002  pto, msgMaker.Make(nSendFlags,
5004  cmpctblock));
5005  fGotBlockFromCache = true;
5006  }
5007  }
5008  if (!fGotBlockFromCache) {
5009  CBlock block;
5010  bool ret = ReadBlockFromDisk(block, pBestIndex,
5011  consensusParams);
5012  assert(ret);
5013  CBlockHeaderAndShortTxIDs cmpctblock(block);
5015  pto,
5016  msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK,
5017  cmpctblock));
5018  }
5019  state.pindexBestHeaderSent = pBestIndex;
5020  } else if (state.fPreferHeaders) {
5021  if (vHeaders.size() > 1) {
5023  "%s: %u headers, range (%s, %s), to peer=%d\n",
5024  __func__, vHeaders.size(),
5025  vHeaders.front().GetHash().ToString(),
5026  vHeaders.back().GetHash().ToString(),
5027  pto->GetId());
5028  } else {
5030  "%s: sending header %s to peer=%d\n", __func__,
5031  vHeaders.front().GetHash().ToString(),
5032  pto->GetId());
5033  }
5035  pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
5036  state.pindexBestHeaderSent = pBestIndex;
5037  } else {
5038  fRevertToInv = true;
5039  }
5040  }
5041  if (fRevertToInv) {
5042  // If falling back to using an inv, just try to inv the tip. The
5043  // last entry in vBlockHashesToAnnounce was our tip at some
5044  // point in the past.
5045  if (!pto->vBlockHashesToAnnounce.empty()) {
5046  const BlockHash &hashToAnnounce =
5047  pto->vBlockHashesToAnnounce.back();
5048  const CBlockIndex *pindex =
5049  LookupBlockIndex(hashToAnnounce);
5050  assert(pindex);
5051 
5052  // Warn if we're announcing a block that is not on the main
5053  // chain. This should be very rare and could be optimized
5054  // out. Just log for now.
5055  if (::ChainActive()[pindex->nHeight] != pindex) {
5056  LogPrint(
5057  BCLog::NET,
5058  "Announcing block %s not on main chain (tip=%s)\n",
5059  hashToAnnounce.ToString(),
5061  }
5062 
5063  // If the peer's chain has this block, don't inv it back.
5064  if (!PeerHasHeader(&state, pindex)) {
5065  pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
5067  "%s: sending inv peer=%d hash=%s\n", __func__,
5068  pto->GetId(), hashToAnnounce.ToString());
5069  }
5070  }
5071  }
5072  pto->vBlockHashesToAnnounce.clear();
5073  }
5074 
5075  //
5076  // Message: inventory
5077  //
5078  std::vector<CInv> vInv;
5079  {
5080  LOCK(pto->cs_inventory);
5081  vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(),
5082  INVENTORY_BROADCAST_MAX_PER_MB *
5083  config.GetMaxBlockSize() /
5084  1000000));
5085 
5086  // Add blocks
5087  for (const BlockHash &hash : pto->vInventoryBlockToSend) {
5088  vInv.push_back(CInv(MSG_BLOCK, hash));
5089  if (vInv.size() == MAX_INV_SZ) {
5090  m_connman.PushMessage(pto,
5091  msgMaker.Make(NetMsgType::INV, vInv));
5092  vInv.clear();
5093  }
5094  }
5095  pto->vInventoryBlockToSend.clear();
5096 
5097  if (pto->m_tx_relay != nullptr) {
5098  LOCK(pto->m_tx_relay->cs_tx_inventory);
5099  // Check whether periodic sends should happen
5100  bool fSendTrickle = pto->HasPermission(PF_NOBAN);
5101  if (pto->m_tx_relay->nNextInvSend < current_time) {
5102  fSendTrickle = true;
5103  if (pto->IsInboundConn()) {
5104  pto->m_tx_relay->nNextInvSend =
5105  std::chrono::microseconds{
5107  nNow, INVENTORY_BROADCAST_INTERVAL)};
5108  } else {
5109  // Skip delay for outbound peers, as there is less
5110  // privacy concern for them.
5111  pto->m_tx_relay->nNextInvSend = current_time;
5112  }
5113  }
5114 
5115  // Time to send but the peer has requested we not relay
5116  // transactions.
5117  if (fSendTrickle) {
5118  LOCK(pto->m_tx_relay->cs_filter);
5119  if (!pto->m_tx_relay->fRelayTxes) {
5120  pto->m_tx_relay->setInventoryTxToSend.clear();
5121  }
5122  }
5123 
5124  // Respond to BIP35 mempool requests
5125  if (fSendTrickle && pto->m_tx_relay->fSendMempool) {
5126  auto vtxinfo = m_mempool.infoAll();
5127  pto->m_tx_relay->fSendMempool = false;
5128  CFeeRate filterrate;
5129  {
5130  LOCK(pto->m_tx_relay->cs_feeFilter);
5131  filterrate = CFeeRate(pto->m_tx_relay->minFeeFilter);
5132  }
5133 
5134  LOCK(pto->m_tx_relay->cs_filter);
5135 
5136  for (const auto &txinfo : vtxinfo) {
5137  const TxId &txid = txinfo.tx->GetId();
5138  CInv inv(MSG_TX, txid);
5139  pto->m_tx_relay->setInventoryTxToSend.erase(txid);
5140  // Don't send transactions that peers will not put into
5141  // their mempool
5142  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
5143  continue;
5144  }
5145  if (pto->m_tx_relay->pfilter &&
5146  !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(
5147  *txinfo.tx)) {
5148  continue;
5149  }
5150  pto->m_tx_relay->filterInventoryKnown.insert(txid);
5151  vInv.push_back(inv);
5152  if (vInv.size() == MAX_INV_SZ) {
5154  pto, msgMaker.Make(NetMsgType::INV, vInv));
5155  vInv.clear();
5156  }
5157  }
5158  pto->m_tx_relay->m_last_mempool_req =
5159  GetTime<std::chrono::seconds>();
5160  }
5161 
5162  // Determine transactions to relay
5163  if (fSendTrickle) {
5164  // Produce a vector with all candidates for sending
5165  std::vector<std::set<TxId>::iterator> vInvTx;
5166  vInvTx.reserve(
5167  pto->m_tx_relay->setInventoryTxToSend.size());
5168  for (std::set<TxId>::iterator it =
5169  pto->m_tx_relay->setInventoryTxToSend.begin();
5170  it != pto->m_tx_relay->setInventoryTxToSend.end();
5171  it++) {
5172  vInvTx.push_back(it);
5173  }
5174  CFeeRate filterrate;
5175  {
5176  LOCK(pto->m_tx_relay->cs_feeFilter);
5177  filterrate = CFeeRate(pto->m_tx_relay->minFeeFilter);
5178  }
5179  // Topologically and fee-rate sort the inventory we send for
5180  // privacy and priority reasons. A heap is used so that not
5181  // all items need sorting if only a few are being sent.
5182  CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool);
5183  std::make_heap(vInvTx.begin(), vInvTx.end(),
5184  compareInvMempoolOrder);
5185  // No reason to drain out at many times the network's
5186  // capacity, especially since we have many peers and some
5187  // will draw much shorter delays.
5188  unsigned int nRelayedTransactions = 0;
5189  LOCK(pto->m_tx_relay->cs_filter);
5190  while (!vInvTx.empty() &&
5191  nRelayedTransactions <
5192  INVENTORY_BROADCAST_MAX_PER_MB *
5193  config.GetMaxBlockSize() / 1000000) {
5194  // Fetch the top element from the heap
5195  std::pop_heap(vInvTx.begin(), vInvTx.end(),
5196  compareInvMempoolOrder);
5197  std::set<TxId>::iterator it = vInvTx.back();
5198  vInvTx.pop_back();
5199  const TxId txid = *it;
5200  // Remove it from the to-be-sent set
5201  pto->m_tx_relay->setInventoryTxToSend.erase(it);
5202  // Check if not in the filter already
5203  if (pto->m_tx_relay->filterInventoryKnown.contains(
5204  txid)) {
5205  continue;
5206  }
5207  // Not in the mempool anymore? don't bother sending it.
5208  auto txinfo = m_mempool.info(txid);
5209  if (!txinfo.tx) {
5210  continue;
5211  }
5212  // Peer told you to not send transactions at that
5213  // feerate? Don't bother sending it.
5214  if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
5215  continue;
5216  }
5217  if (pto->m_tx_relay->pfilter &&
5218  !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(
5219  *txinfo.tx)) {
5220  continue;
5221  }
5222  // Send
5223  vInv.push_back(CInv(MSG_TX, txid));
5224  nRelayedTransactions++;
5225  {
5226  // Expire old relay messages
5227  while (!vRelayExpiration.empty() &&
5228  vRelayExpiration.front().first < nNow) {
5229  mapRelay.erase(vRelayExpiration.front().second);
5230  vRelayExpiration.pop_front();
5231  }
5232 
5233  auto ret = mapRelay.insert(
5234  std::make_pair(txid, std::move(txinfo.tx)));
5235  if (ret.second) {
5236  vRelayExpiration.push_back(std::make_pair(
5237  nNow +
5238  std::chrono::microseconds{
5240  .count(),
5241  ret.first));
5242  }
5243  }
5244  if (vInv.size() == MAX_INV_SZ) {
5246  pto, msgMaker.Make(NetMsgType::INV, vInv));
5247  vInv.clear();
5248  }
5249  pto->m_tx_relay->filterInventoryKnown.insert(txid);
5250  }
5251  }
5252  }
5253  }
5254  if (!vInv.empty()) {
5255  m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
5256  }
5257 
5258  // Detect whether we're stalling
5259  current_time = GetTime<std::chrono::microseconds>();
5260  // nNow is the current system time (GetTimeMicros is not mockable) and
5261  // should be replaced by the mockable current_time eventually
5262  nNow = GetTimeMicros();
5263  if (state.nStallingSince &&
5264  state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
5265  // Stalling only triggers when the block download window cannot
5266  // move. During normal steady state, the download window should be
5267  // much larger than the to-be-downloaded set of blocks, so
5268  // disconnection should only happen during initial block download.
5269  LogPrintf("Peer=%d is stalling block download, disconnecting\n",
5270  pto->GetId());
5271  pto->fDisconnect = true;
5272  return true;
5273  }
5274  // In case there is a block that has been in flight from this peer for 2
5275  // + 0.5 * N times the block interval (with N the number of peers from
5276  // which we're downloading validated blocks), disconnect due to timeout.
5277  // We compensate for other peers to prevent killing off peers due to our
5278  // own downstream link being saturated. We only count validated
5279  // in-flight blocks so peers can't advertise non-existing block hashes
5280  // to unreasonably increase our timeout.
5281  if (state.vBlocksInFlight.size() > 0) {
5282  QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
5283  int nOtherPeersWithValidatedDownloads =
5284  nPeersWithValidatedDownloads -
5285  (state.nBlocksInFlightValidHeaders > 0);
5286  if (nNow > state.nDownloadingSince +
5287  consensusParams.nPowTargetSpacing *
5288  (BLOCK_DOWNLOAD_TIMEOUT_BASE +
5289  BLOCK_DOWNLOAD_TIMEOUT_PER_PEER *
5290  nOtherPeersWithValidatedDownloads)) {
5291  LogPrintf("Timeout downloading block %s from peer=%d, "
5292  "disconnecting\n",
5293  queuedBlock.hash.ToString(), pto->GetId());
5294  pto->fDisconnect = true;
5295  return true;
5296  }
5297  }
5298 
5299  // Check for headers sync timeouts
5300  if (state.fSyncStarted &&
5301  state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) {
5302  // Detect whether this is a stalling initial-headers-sync peer
5303  if (pindexBestHeader->GetBlockTime() <=
5304  GetAdjustedTime() - 24 * 60 * 60) {
5305  if (nNow > state.nHeadersSyncTimeout && nSyncStarted == 1 &&
5306  (nPreferredDownload - state.fPreferredDownload >= 1)) {
5307  // Disconnect a (non-whitelisted) peer if it is our only
5308  // sync peer, and we have others we could be using instead.
5309  // Note: If all our peers are inbound, then we won't
5310  // disconnect our sync peer for stalling; we have bigger
5311  // problems if we can't get any outbound peers.
5312  if (!pto->HasPermission(PF_NOBAN)) {
5313  LogPrintf("Timeout downloading headers from peer=%d, "
5314  "disconnecting\n",
5315  pto->GetId());
5316  pto->fDisconnect = true;
5317  return true;
5318  } else {
5319  LogPrintf(
5320  "Timeout downloading headers from whitelisted "
5321  "peer=%d, not disconnecting\n",
5322  pto->GetId());
5323  // Reset the headers sync state so that we have a chance
5324  // to try downloading from a different peer. Note: this
5325  // will also result in at least one more getheaders
5326  // message to be sent to this peer (eventually).
5327  state.fSyncStarted = false;
5328  nSyncStarted--;
5329  state.nHeadersSyncTimeout = 0;
5330  }
5331  }
5332  } else {
5333  // After we've caught up once, reset the timeout so we can't
5334  // trigger disconnect later.
5335  state.nHeadersSyncTimeout = std::numeric_limits<int64_t>::max();
5336  }
5337  }
5338 
5339  // Check that outbound peers have reasonable chains GetTime() is used by
5340  // this anti-DoS logic so we can test this using mocktime.
5341  ConsiderEviction(*pto, GetTime());
5342 
5343  //
5344  // Message: getdata (blocks)
5345  //
5346  std::vector<CInv> vGetData;
5347  if (!pto->fClient &&
5348  ((fFetch && !pto->m_limited_node) ||
5349  !::ChainstateActive().IsInitialBlockDownload()) &&
5350  state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
5351  std::vector<const CBlockIndex *> vToDownload;
5352  NodeId staller = -1;
5353  FindNextBlocksToDownload(pto->GetId(),
5354  MAX_BLOCKS_IN_TRANSIT_PER_PEER -
5355  state.nBlocksInFlight,
5356  vToDownload, staller, consensusParams);
5357  for (const CBlockIndex *pindex : vToDownload) {
5358  vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
5359  MarkBlockAsInFlight(config, m_mempool, pto->GetId(),
5360  pindex->GetBlockHash(), consensusParams,
5361  pindex);
5362  LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n",
5363  pindex->GetBlockHash().ToString(), pindex->nHeight,
5364  pto->GetId());
5365  }
5366  if (state.nBlocksInFlight == 0 && staller != -1) {
5367  if (State(staller)->nStallingSince == 0) {
5368  State(staller)->nStallingSince = nNow;
5369  LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
5370  }
5371  }
5372  }
5373 
5374  //
5375  // Message: getdata (transactions)
5376  //
5377 
5378  // For robustness, expire old requests after a long timeout, so that we
5379  // can resume downloading transactions from a peer even if they were
5380  // unresponsive in the past. Eventually we should consider disconnecting
5381  // peers, but this is conservative.
5382  if (state.m_tx_download.m_check_expiry_timer <= current_time) {
5383  for (auto it = state.m_tx_download.m_tx_in_flight.begin();
5384  it != state.m_tx_download.m_tx_in_flight.end();) {
5385  if (it->second <= current_time - TX_EXPIRY_INTERVAL) {
5387  "timeout of inflight tx %s from peer=%d\n",
5388  it->first.ToString(), pto->GetId());
5389  state.m_tx_download.m_tx_announced.erase(it->first);
5390  state.m_tx_download.m_tx_in_flight.erase(it++);
5391  } else {
5392  ++it;
5393  }
5394  }
5395  // On average, we do this check every TX_EXPIRY_INTERVAL. Randomize
5396  // so that we're not doing this for all peers at the same time.
5397  state.m_tx_download.m_check_expiry_timer =
5398  current_time + TX_EXPIRY_INTERVAL / 2 +
5400  }
5401 
5402  auto &tx_process_time = state.m_tx_download.m_tx_process_time;
5403  while (!tx_process_time.empty() &&
5404  tx_process_time.begin()->first <= current_time &&
5405  state.m_tx_download.m_tx_in_flight.size() <
5407  const TxId txid = tx_process_time.begin()->second;
5408  // Erase this entry from tx_process_time (it may be added back for
5409  // processing at a later time, see below)
5410  tx_process_time.erase(tx_process_time.begin());
5411  CInv inv(MSG_TX, txid);
5412  if (!AlreadyHave(inv, m_mempool)) {
5413  // If this transaction was last requested more than 1 minute
5414  // ago, then request.
5415  const auto last_request_time = GetTxRequestTime(txid);
5416  if (last_request_time <= current_time - GETDATA_TX_INTERVAL) {
5417  LogPrint(BCLog::NET, "Requesting %s peer=%d\n",
5418  inv.ToString(), pto->GetId());
5419  vGetData.push_back(inv);
5420  if (vGetData.size() >= MAX_GETDATA_SZ) {
5422  pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
5423  vGetData.clear();
5424  }
5425  UpdateTxRequestTime(txid, current_time);
5426  state.m_tx_download.m_tx_in_flight.emplace(txid,
5427  current_time);
5428  } else {
5429  // This transaction is in flight from someone else; queue
5430  // up processing to happen after the download times out
5431  // (with a slight delay for inbound peers, to prefer
5432  // requests to outbound peers).
5433  const auto next_process_time = CalculateTxGetDataTime(
5434  txid, current_time, !state.fPreferredDownload);
5435  tx_process_time.emplace(next_process_time, txid);
5436  }
5437  } else {
5438  // We have already seen this transaction, no need to download.
5439  state.m_tx_download.m_tx_announced.erase(txid);
5440  state.m_tx_download.m_tx_in_flight.erase(txid);
5441  }
5442  }
5443 
5444  if (!vGetData.empty()) {
5445  m_connman.PushMessage(pto,
5446  msgMaker.Make(NetMsgType::GETDATA, vGetData));
5447  }
5448 
5449  //
5450  // Message: feefilter
5451  //
5452  // We don't want white listed peers to filter txs to us if we have
5453  // -whitelistforcerelay
5454  if (pto->m_tx_relay != nullptr && pto->nVersion >= FEEFILTER_VERSION &&
5455  gArgs.GetBoolArg("-feefilter", DEFAULT_FEEFILTER) &&
5456  !pto->HasPermission(PF_FORCERELAY)) {
5457  Amount currentFilter =
5458  m_mempool
5459  .GetMinFee(
5460  gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) *
5461  1000000)
5462  .GetFeePerK();
5463  int64_t timeNow = GetTimeMicros();
5464  if (timeNow > pto->m_tx_relay->nextSendTimeFeeFilter) {
5465  static CFeeRate default_feerate =
5467  static FeeFilterRounder filterRounder(default_feerate);
5468  Amount filterToSend = filterRounder.round(currentFilter);
5469  filterToSend =
5470  std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
5471 
5472  if (filterToSend != pto->m_tx_relay->lastSentFeeFilter) {
5474  pto,
5475  msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
5476  pto->m_tx_relay->lastSentFeeFilter = filterToSend;
5477  }
5478  pto->m_tx_relay->nextSendTimeFeeFilter =
5479  PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL);
5480  }
5481  // If the fee filter has changed substantially and it's still more
5482  // than MAX_FEEFILTER_CHANGE_DELAY until scheduled broadcast, then
5483  // move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
5484  else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 <
5485  pto->m_tx_relay->nextSendTimeFeeFilter &&
5486  (currentFilter <
5487  3 * pto->m_tx_relay->lastSentFeeFilter / 4 ||
5488  currentFilter >
5489  4 * pto->m_tx_relay->lastSentFeeFilter / 3)) {
5490  pto->m_tx_relay->nextSendTimeFeeFilter =
5491  timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
5492  }
5493  }
5494  } // release cs_main
5495  return true;
5496 }
5497 
5499 public:
5502  // orphan transactions
5503  mapOrphanTransactions.clear();
5504  mapOrphanTransactionsByPrev.clear();
5505  }
5506 };
std::shared_ptr< const CTransaction > CTransactionRef
Definition: transaction.h:338
const char * GETCFILTERS
getcfilters requests compact filters for a range of blocks.
Definition: protocol.cpp:46
arith_uint256 nChainWork
(memory only) Total amount of work (expected number of hashes) in the chain up to and including this ...
Definition: blockindex.h:49
static constexpr int64_t MINIMUM_CONNECT_TIME
Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict...
static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL
How long to wait (in microseconds) before downloading a transaction from an additional peer...
std::string SanitizeString(const std::string &str, int rule)
Remove unsafe chars.
enum ReadStatus_t ReadStatus
const char * PING
The ping message is sent periodically to help confirm that the receiving peer is still connected...
Definition: protocol.cpp:34
static constexpr bool AVALANCHE_DEFAULT_ENABLED
Is avalanche enabled by default.
Definition: processor.h:36
uint64_t nRemoteHostNonce
Definition: net.h:847
bool CompareDepthAndScore(const TxId &txida, const TxId &txidb)
Definition: txmempool.cpp:838
std::atomic< uint64_t > nPingNonceSent
Definition: net.h:1007
BlockFilterIndex is used to store and retrieve block filters, hashes, and headers for a range of bloc...
bool IsArgSet(const std::string &strArg) const
Return true if the given argument has been manually set.
Definition: system.cpp:390
const char * FILTERLOAD
The filterload message tells the receiving peer to filter all relayed transactions and requested merk...
Definition: protocol.cpp:37
const char * MERKLEBLOCK
The merkleblock message is a reply to a getdata message which requested a block using the inventory t...
Definition: protocol.cpp:26
std::atomic_bool fPauseSend
Definition: net.h:878
uint64_t GetRand(uint64_t nMax) noexcept
Definition: random.cpp:641
static void ProcessGetCFCheckPt(CNode &peer, CDataStream &vRecv, const CChainParams &chain_params, CConnman &connman)
Handle a getcfcheckpt request.
invalid by consensus rules
int GetSendVersion() const
Definition: net.cpp:664
std::array< uint8_t, 64 > sig
Definition: processor.cpp:248
const char * BLOCKTXN
Contains a BlockTransactions.
Definition: protocol.cpp:45
bool fPruneMode
True if we&#39;re running in -prune mode.
Definition: validation.cpp:104
std::map< TxId, COrphanTx > mapOrphanTransactions GUARDED_BY(g_cs_orphans)
static constexpr Amount zero()
Definition: amount.h:35
bool ReadBlockFromDisk(CBlock &block, const FlatFilePos &pos, const Consensus::Params &params)
Functions for disk access for blocks.
Definition: blockdb.cpp:33
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER
Number of blocks that can be requested at any given time from a single peer.
static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH
Maximum length of incoming protocol messages (Currently 2MB).
Definition: protocol.h:28
static const int MAX_BLOCKTXN_DEPTH
Maximum depth of blocks we&#39;re willing to respond to GETBLOCKTXN requests for.
void Discourage(const CNetAddr &net_addr)
Definition: banman.cpp:122
Definition: banman.h:58
ReadStatus FillBlock(CBlock &block, const std::vector< CTransactionRef > &vtx_missing)
void AddKnownTx(const TxId &txid)
Definition: net.h:1116
ServiceFlags
nServices flags.
Definition: protocol.h:320
bool IsLocal() const
Definition: netaddress.cpp:218
ChainstateManager & m_chainman
#define LogPrint(category,...)
Definition: logging.h:189
void InitializeNode(const Config &config, CNode *pnode) override
Initialize a peer by adding it to mapNodeState and pushing a message requesting its version...
int64_t GetBlockTime() const
Definition: blockindex.h:160
Describes a place in the block chain to another node such that if the other node doesn&#39;t have the sam...
Definition: block.h:110
bool ActivateBestChain(const Config &config, BlockValidationState &state, std::shared_ptr< const CBlock > pblock)
Find the best known block, and make it the tip of the block chain.
static void ProcessGetData(const Config &config, CNode &pfrom, CConnman &connman, const CTxMemPool &mempool, const std::atomic< bool > &interruptMsgProc) LOCKS_EXCLUDED(cs_main)
CBlockIndex * pprev
pointer to the index of the predecessor of this block
Definition: blockindex.h:30
std::vector< TxMempoolInfo > infoAll() const
Definition: txmempool.cpp:905
CSipHasher & Write(uint64_t data)
Hash a 64-bit integer worth of data.
Definition: siphash.cpp:36
STL-like map container that only keeps the N elements with the highest value.
Definition: limitedmap.h:14
void SetIP(const CNetAddr &ip)
Definition: netaddress.cpp:28
void WakeMessageHandler()
Definition: net.cpp:1627
static void ProcessGetBlockData(const Config &config, CNode &pfrom, const CInv &inv, CConnman &connman, const std::atomic< bool > &interruptMsgProc)
void SetServices(const CService &addr, ServiceFlags nServices)
Definition: net.cpp:2786
std::string ToString() const
Definition: protocol.cpp:247
bool exists(const TxId &txid) const
Definition: txmempool.h:767
static void ProcessGetCFilters(CNode &peer, CDataStream &vRecv, const CChainParams &chain_params, CConnman &connman)
Handle a cfilters request.
Definition: block.h:62
uint64_t ReadCompactSize(Stream &is)
Definition: serialize.h:442
We don&#39;t have the previous block the checked one is built on.
CChain & ChainActive()
Definition: validation.cpp:78
const char * GETADDR
The getaddr message requests an addr message from the receiving node, preferably one with lots of IP ...
Definition: protocol.cpp:32
void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override
Overridden from CValidationInterface.
int64_t nTimeExpire
static const unsigned int DEFAULT_MAX_MEMPOOL_SIZE
Default for -maxmempool, maximum megabytes of mempool memory usage.
Definition: policy.h:48
Provides an interface for creating and interacting with one or two chainstates: an IBD chainstate gen...
Definition: validation.h:1023
Defined in BIP152.
Definition: protocol.h:485
int GetRecvVersion() const
Definition: net.h:1081
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1201
void CheckForStaleTipAndEvictPeers(const Consensus::Params &consensusParams)
Evict extra outbound peers.
bool IsOutboundOrBlockRelayConn() const
Definition: net.h:880
const char * AVAHELLO
Contains a delegation and a signature.
Definition: protocol.cpp:52
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats)
Get statistics from node state.
size_t DynamicMemoryUsage() const
Definition: txmempool.cpp:1055
reverse_range< T > reverse_iterate(T &x)
Inv(ventory) message data.
Definition: protocol.h:493
invalid proof of work or time too old
RecursiveMutex cs_inventory
Definition: net.h:952
const char * SENDCMPCT
Contains a 1-byte bool and 8-byte LE version number.
Definition: protocol.cpp:42
BlockFilterIndex * GetBlockFilterIndex(BlockFilterType filter_type)
Get a block filter index by type.
static constexpr int STALE_RELAY_AGE_LIMIT
Age after which a stale block will no longer be served if requested as protection against fingerprint...
BlockStatus nStatus
Verification status of this block. See enum BlockStatus.
Definition: blockindex.h:76
static const unsigned int MIN_BLOCKS_TO_KEEP
Block files containing a block-height within MIN_BLOCKS_TO_KEEP of ChainActive().Tip() will not be pr...
Definition: validation.h:109
const CBlockIndex * LastCommonAncestor(const CBlockIndex *pa, const CBlockIndex *pb)
Find the last common ancestor two blocks have.
Definition: chain.cpp:118
transaction was missing some of its inputs
CTxMemPool & m_mempool
bool IsFeelerConn() const
Definition: net.h:905
bool hasData() const
Definition: blockstatus.h:54
CBlockHeader GetBlockHeader() const
Definition: blockindex.h:120
int Height() const
Return the maximal height in the chain.
Definition: chain.h:210
BloomFilter is a probabilistic filter which SPV clients provide so that we can filter the transaction...
Definition: bloom.h:45
static bool BlockRequestAllowed(const CBlockIndex *pindex, const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
bool GetTryNewOutboundPeer()
Definition: net.cpp:1907
Definition: amount.h:17
CTransactionRef tx
unsigned long size() const
Definition: txmempool.h:757
block finalization problems
const char * CFHEADERS
cfheaders is a response to a getcfheaders request containing a filter header and a vector of filter h...
Definition: protocol.cpp:49
std::set< TxId > orphan_work_set
Definition: net.h:1017
static const int BIP0031_VERSION
BIP 0031, pong message, is enabled for all versions AFTER this one.
Definition: version.h:20
void PushMessage(CNode *pnode, CSerializedNetMsg &&msg)
Definition: net.cpp:3055
CSerializedNetMsg Make(int nFlags, std::string msg_type, Args &&... args) const
std::vector< BlockHash > vHave
Definition: block.h:111
RecursiveMutex cs_vProcessMsg
Definition: net.h:827
arith_uint256 nMinimumChainWork
Minimum work we will assume exists on some valid chain.
Definition: validation.cpp:113
void SetVersion(int nVersionIn)
Definition: net.h:700
static void LogPrintf(const char *fmt, const Args &... args)
Definition: logging.h:171
const TxId & GetTxId() const
Definition: transaction.h:43
void SetServiceFlagsIBDCache(bool state)
Set the current IBD status in order to figure out the desirable service flags.
Definition: protocol.cpp:212
RollingBloomFilter is a probabilistic "keep track of most recently inserted" set. ...
Definition: bloom.h:131
std::atomic< int64_t > nPingUsecStart
Definition: net.h:1009
const TxHash GetHash() const
Definition: transaction.h:262
static bool AlreadyHave(const CInv &inv, const CTxMemPool &mempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends...
bool ProcessMessages(const Config &config, CNode *pfrom, std::atomic< bool > &interrupt) override
Process protocol messages received from a given node.
CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices)
Definition: net.cpp:173
const CBlock & GenesisBlock() const
Definition: chainparams.h:64
static const int SENDHEADERS_VERSION
"sendheaders" command and announcing blocks with headers starts with this version ...
Definition: version.h:28
CChainParams defines various tweakable parameters of a given instance of the Bitcoin system...
Definition: chainparams.h:47
violated mempool&#39;s fee/size/descendant/etc limits
bool IsNull() const
Definition: block.h:131
Double ended buffer combining vector and stream-like interfaces.
Definition: streams.h:196
bool empty() const
Definition: streams.h:280
bool GetBoolArg(const std::string &strArg, bool fDefault) const
Return boolean argument or default value.
Definition: system.cpp:498
void SetTryNewOutboundPeer(bool flag)
Definition: net.cpp:1911
uint64_t GetLocalNonce() const
Definition: net.h:1067
bool SeenLocal(const CService &addr)
vote for a local address
Definition: net.cpp:285
std::vector< CAddress > vAddrToSend
Definition: net.h:939
transaction spends a coinbase too early, or violates locktime/sequence locks
std::atomic< int > nStartingHeight
Definition: net.h:936
const char * CFILTER
cfilter is a response to a getcfilters request containing a single compact filter.
Definition: protocol.cpp:47
static const unsigned int MAX_SCRIPT_ELEMENT_SIZE
Definition: script.h:23
void PushAddress(const CAddress &_addr, FastRandomContext &insecure_rand)
Definition: net.h:1101
void SetRecvVersion(int nVersionIn)
Definition: net.h:1080
const char * PONG
The pong message replies to a ping message, proving to the pinging node that the ponging node is stil...
Definition: protocol.cpp:35
static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME
How long to cache transactions in mapRelay for normal relay.
initial value. Tx has not yet been rejected
Reads data from an underlying stream, while hashing the read data.
Definition: hash.h:164
static constexpr uint32_t MAX_GETCFILTERS_SIZE
Maximum number of compact filters that may be requested with one getcfilters.
bool IsNull() const
Definition: uint256.h:26
const char * HEADERS
The headers message sends one or more block headers to a node which previously requested certain head...
Definition: protocol.cpp:30
virtual const CChainParams & GetChainParams() const =0
uint32_t nTime
Definition: block.h:28
void PushInventory(const CInv &inv)
Definition: net.h:1123
const char * GETCFCHECKPT
getcfcheckpt requests evenly spaced compact filter headers, enabling parallelized download and valida...
Definition: protocol.cpp:50
std::atomic< ServiceFlags > nServices
Definition: net.h:815
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT
Protect at least this many outbound peers from disconnection due to slow/behind headers chain...
const std::vector< CTxIn > vin
Definition: transaction.h:227
void SetAddrLocal(const CService &addrLocalIn)
May not be called more than once.
Definition: net.cpp:527
std::chrono::microseconds GetRandMicros(std::chrono::microseconds duration_max) noexcept
Definition: random.cpp:646
std::deque< CInv > vRecvGetData
Definition: net.h:833
const char * INV
The inv message (inventory message) transmits one or more inventories of objects known to the transmi...
Definition: protocol.cpp:24
static constexpr size_t AVALANCHE_MAX_ELEMENT_POLL
Maximum item that can be polled at once.
Definition: processor.h:46
bool AddOrphanTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
bool ForNode(NodeId id, std::function< bool(CNode *pnode)> func)
Definition: net.cpp:3092
CChainState & ChainstateActive()
Definition: validation.cpp:72
void GetRandBytes(uint8_t *buf, int num) noexcept
Overall design of the RNG and entropy sources.
Definition: random.cpp:625
static constexpr std::chrono::microseconds MAX_GETDATA_RANDOM_DELAY
Maximum delay (in microseconds) for transaction requests to avoid biasing some peers over others...
void check(const CCoinsViewCache *pcoins) const
If sanity-checking is turned on, check makes sure the pool is consistent (does not contain two transa...
Definition: txmempool.cpp:702
static constexpr int32_t MAX_PEER_TX_IN_FLIGHT
Maximum number of in-flight transactions from a peer.
bool DisconnectNode(const std::string &node)
Definition: net.cpp:2855
static const Amount DEFAULT_MIN_RELAY_TX_FEE_PER_KB(1000 *SATOSHI)
Default for -minrelaytxfee, minimum relay fee for transactions.
bool IsAddrRelayPeer() const
Definition: net.h:946
bool MoneyRange(const Amount nValue)
Definition: amount.h:166
uint64_t nRemoteExtraEntropy
Definition: net.h:849
bool done
bool IsDiscouraged(const CNetAddr &net_addr)
Return whether net_addr is discouraged.
Definition: banman.cpp:84
bool fSentAddr
Definition: net.h:872
BlockHash GetBlockHash() const
Definition: blockindex.h:133
bool IsValid() const
Definition: validation.h:116
Response response
Definition: processor.cpp:247
std::atomic< int64_t > nPingUsecTime
Definition: net.h:1011
BlockFilterType
Definition: blockfilter.h:88
std::string ToString(const T &t)
Locale-independent version of std::to_string.
Definition: string.h:67
static bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState &state, bool via_compact_block, const std::string &message="")
Potentially ban a node based on the contents of a BlockValidationState object.
std::atomic< int64_t > nMinPingUsecTime
Definition: net.h:1013
int GetMyStartingHeight() const
Definition: net.h:1070
#define LOCK2(cs1, cs2)
Definition: sync.h:233
initial value. Block has not yet been rejected
void ProcessMessage(const Config &config, CNode &pfrom, const std::string &msg_type, CDataStream &vRecv, int64_t nTimeReceived, const std::atomic< bool > &interruptMsgProc)
Process a single message from a peer.
static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL
How frequently to check for extra outbound peers and disconnect, in seconds.
ServiceFlags GetLocalServices() const
Definition: net.h:1146
static constexpr uint32_t MAX_NON_STANDARD_ORPHAN_PER_NODE
How many non standard orphan do we consider from a node before ignoring it.
Amount GetFeePerK() const
Return the fee in satoshis for a size of 1000 bytes.
Definition: feerate.h:54
bool fClient
Definition: net.h:865
static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB
Maximum number of inventory items to send per transmission.
Used to create a Merkle proof (usually from a subset of transactions), which consists of a block head...
Definition: merkleblock.h:159
const char * GETHEADERS
The getheaders message requests a headers message that provides block headers starting from a particu...
Definition: protocol.cpp:28
std::vector< std::pair< size_t, uint256 > > vMatchedTxn
Public only for unit testing and relay testing (not relayed).
Definition: merkleblock.h:171
BlockHash hashContinue
Definition: net.h:935
Amount GetFee(size_t nBytes) const
Return the fee in satoshis for the given size in bytes.
Definition: feerate.cpp:51
BanMan *const m_banman
Pointer to this node&#39;s banman.
virtual uint64_t GetMaxBlockSize() const =0
Definition: config.h:19
size_type size() const
Definition: streams.h:279
Invalid by a recent change to consensus rules.
std::unique_ptr< CRollingBloomFilter > m_addr_known
Definition: net.h:940
static constexpr int64_t ORPHAN_TX_EXPIRE_TIME
Expiration time for orphan transactions in seconds.
size_t nProcessQueueSize
Definition: net.h:829
static int EraseOrphanTx(const TxId id) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
std::unordered_map< NodeId, uint32_t > rejectCountPerNode
void FinalizeNode(const Config &config, NodeId nodeid, bool &fUpdateConnectionTime) override
Handle removal of a peer by updating various state and removing it from mapNodeState.
CFeeRate minRelayTxFee
A fee rate smaller than this is considered zero fee (for relaying, mining and transaction creation) ...
Definition: validation.cpp:115
CBlockIndex * pindexBestHeader
Best header we&#39;ve seen so far (used for getheaders queries&#39; starting points).
Definition: validation.cpp:97
std::vector< CTransactionRef > txn
static RecursiveMutex cs_most_recent_block
this block was cached as being invalid and we didn&#39;t store the reason why
An input of a transaction.
Definition: transaction.h:67
static bool PrepareBlockFilterRequest(CNode &peer, const CChainParams &chain_params, BlockFilterType filter_type, uint32_t start_height, const BlockHash &stop_hash, uint32_t max_height_diff, const CBlockIndex *&stop_index, BlockFilterIndex *&filter_index)
Validation logic for compact filters request handling.
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS
Minimum blocks required to signal NODE_NETWORK_LIMITED.
static bool HasAllDesirableServiceFlags(ServiceFlags services)
A shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services)...
Definition: protocol.h:412
bool MaybeDiscourageAndDisconnect(CNode &pnode)
Maybe disconnect a peer and discourage future connections from its address.
#define LOCK(cs)
Definition: sync.h:230
const char * name
Definition: rest.cpp:43
std::string ToString() const
Definition: validation.h:122
static const std::chrono::seconds AVG_ADDRESS_BROADCAST_INTERVAL
Average delay between peer address broadcasts.
the block failed to meet one of our checkpoints
bool IsPeerAddrLocalGood(CNode *pnode)
Definition: net.cpp:193
static const int INIT_PROTO_VERSION
initial proto version, to be increased after version/verack negotiation
Definition: version.h:14
bool Contains(const CBlockIndex *pindex) const
Efficiently check whether a block is present in this chain.
Definition: chain.h:190
A combination of a network address (CNetAddr) and a (TCP) port.
Definition: netaddress.h:179
Fast randomness source.
Definition: random.h:111
Transport protocol agnostic message container.
Definition: net.h:683
bool g_relay_txes
Definition: net.cpp:111
static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY
Maximum feefilter broadcast delay after significant change.
std::unique_ptr< avalanche::Processor > g_avalanche
Global avalanche instance.
Definition: processor.cpp:28
int64_t PoissonNextSendInbound(int64_t now, int average_interval_seconds)
Attempts to obfuscate tx time through exponentially distributed emitting.
Definition: net.cpp:3104
void scheduleEvery(Predicate p, std::chrono::milliseconds delta)
Repeat p until it return false.
Definition: scheduler.cpp:127
bool OutboundTargetReached(bool historicalBlockServingLimit)
check if the outbound target is reached.
Definition: net.cpp:2950
An encapsulated public key.
Definition: pubkey.h:31
int64_t nPowTargetSpacing
Definition: params.h:104
static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE
Headers download timeout expressed in microseconds.
std::vector< CAddress > GetAddresses()
Definition: net.cpp:2799
CBlockIndex * Next(const CBlockIndex *pindex) const
Find the successor of a block in this chain, or nullptr if the given index is not found or is the tip...