Bitcoin ABC 0.33.3
P2P Digital Currency
peermanager.cpp
Go to the documentation of this file.
1// Copyright (c) 2020 The Bitcoin developers
2// Distributed under the MIT software license, see the accompanying
3// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
6
12#include <cashaddrenc.h>
13#include <common/args.h>
15#include <logging.h>
16#include <random.h>
17#include <scheduler.h>
18#include <threadsafety.h>
19#include <uint256.h>
20#include <util/fastrange.h>
21#include <util/fs_helpers.h>
22#include <util/strencodings.h>
23#include <util/time.h>
24#include <validation.h> // For ChainstateManager
25
26#include <algorithm>
27#include <cassert>
28#include <limits>
29
30namespace avalanche {
31static constexpr uint64_t PEERS_DUMP_VERSION{1};
32
33bool PeerManager::addNode(NodeId nodeid, const ProofId &proofid,
34 size_t max_elements) {
35 auto &pview = peers.get<by_proofid>();
36 auto it = pview.find(proofid);
37 if (it == pview.end()) {
38 // If the node exists, it is actually updating its proof to an unknown
39 // one. In this case we need to remove it so it is not both active and
40 // pending at the same time.
41 removeNode(nodeid);
42 pendingNodes.emplace(proofid, nodeid, max_elements);
43 return false;
44 }
45
46 return addOrUpdateNode(peers.project<0>(it), nodeid, max_elements);
47}
48
49bool PeerManager::addOrUpdateNode(const PeerSet::iterator &it, NodeId nodeid,
50 size_t max_elements) {
51 assert(it != peers.end());
52
53 const PeerId peerid = it->peerid;
54
55 auto nit = nodes.find(nodeid);
56 if (nit == nodes.end()) {
57 if (!nodes.emplace(nodeid, peerid, max_elements).second) {
58 return false;
59 }
60 } else {
61 const PeerId oldpeerid = nit->peerid;
62 if (!nodes.modify(nit, [&](Node &n) { n.peerid = peerid; })) {
63 return false;
64 }
65
66 // We actually have this node already, we need to update it.
67 bool success = removeNodeFromPeer(peers.find(oldpeerid));
68 assert(success);
69 }
70
71 // Then increase the node counter, and create the slot if needed
72 bool success = addNodeToPeer(it);
73 assert(success);
74
75 // If the added node was in the pending set, remove it
76 pendingNodes.get<by_nodeid>().erase(nodeid);
77
78 // If the proof was in the dangling pool, remove it
79 const ProofId &proofid = it->getProofId();
80 if (danglingProofPool.getProof(proofid)) {
82 }
83
84 // We know for sure there is at least 1 node. Note that this can fail if
85 // there is more than 1, in this case it's a no-op.
86 shareableProofs.insert(it->proof);
87
88 return true;
89}
90
91bool PeerManager::addNodeToPeer(const PeerSet::iterator &it) {
92 assert(it != peers.end());
93 return peers.modify(it, [&](Peer &p) {
94 if (p.node_count++ > 0) {
95 // We are done.
96 return;
97 }
98
99 // We need to allocate this peer.
100 p.index = uint32_t(slots.size());
101 const uint32_t score = p.getScore();
102 const uint64_t start = slotCount;
103 slots.emplace_back(start, score, it->peerid);
104 slotCount = start + score;
105
106 // Add to our allocated score when we allocate a new peer in the slots
107 connectedPeersScore += score;
108 });
109}
110
112 // Remove all the remote proofs from this node
113 auto &remoteProofsView = remoteProofs.get<by_nodeid>();
114 auto [begin, end] = remoteProofsView.equal_range(nodeid);
115 remoteProofsView.erase(begin, end);
116
117 if (pendingNodes.get<by_nodeid>().erase(nodeid) > 0) {
118 // If this was a pending node, there is nothing else to do.
119 return true;
120 }
121
122 auto it = nodes.find(nodeid);
123 if (it == nodes.end()) {
124 return false;
125 }
126
127 const PeerId peerid = it->peerid;
128 nodes.erase(it);
129
130 // Keep the track of the reference count.
131 bool success = removeNodeFromPeer(peers.find(peerid));
132 assert(success);
133
134 return true;
135}
136
137bool PeerManager::removeNodeFromPeer(const PeerSet::iterator &it,
138 uint32_t count) {
139 // It is possible for nodes to be dangling. If there was an inflight query
140 // when the peer gets removed, the node was not erased. In this case there
141 // is nothing to do.
142 if (it == peers.end()) {
143 return true;
144 }
145
146 assert(count <= it->node_count);
147 if (count == 0) {
148 // This is a NOOP.
149 return false;
150 }
151
152 const uint32_t new_count = it->node_count - count;
153 if (!peers.modify(it, [&](Peer &p) { p.node_count = new_count; })) {
154 return false;
155 }
156
157 if (new_count > 0) {
158 // We are done.
159 return true;
160 }
161
162 // There are no more nodes left, we need to clean up. Remove from the radix
163 // tree (unless it's our local proof), subtract allocated score and remove
164 // from slots.
165 if (!localProof || it->getProofId() != localProof->getId()) {
166 const auto removed = shareableProofs.remove(it->getProofId());
167 assert(removed);
168 }
169
170 const size_t i = it->index;
171 assert(i < slots.size());
172 assert(connectedPeersScore >= slots[i].getScore());
173 connectedPeersScore -= slots[i].getScore();
174
175 if (i + 1 == slots.size()) {
176 slots.pop_back();
177 slotCount = slots.empty() ? 0 : slots.back().getStop();
178 } else {
179 fragmentation += slots[i].getScore();
180 slots[i] = slots[i].withPeerId(NO_PEER);
181 }
182
183 return true;
184}
185
187 SteadyMilliseconds timeout,
188 uint64_t round) {
189 auto it = nodes.find(nodeid);
190 if (it == nodes.end()) {
191 return false;
192 }
193
194 return nodes.modify(it, [&](Node &n) {
195 n.nextRequestTime = timeout;
196 n.last_round = round;
197 });
198}
199
201 const Response &response) {
202 auto it = nodes.find(nodeid);
203 if (it == nodes.end()) {
204 return false;
205 }
206
207 if (it->last_round > response.getRound()) {
208 // This is a response for a previous round, ignore it.
209 return false;
210 }
211
212 auto timeout = Now<SteadyMilliseconds>() +
213 std::chrono::milliseconds(response.getCooldown());
214
215 return nodes.modify(it, [&](Node &n) {
216 n.nextRequestTime = timeout;
217 n.last_round = response.getRound();
218 });
219}
220
222 auto it = nodes.find(nodeid);
223 if (it == nodes.end()) {
224 return false;
225 }
226
227 return !it->avaproofsSent &&
228 nodes.modify(it, [&](Node &n) { n.avaproofsSent = true; });
229}
230
231static bool isImmatureState(const ProofValidationState &state) {
233}
234
236 PeerId peerid, const std::chrono::seconds &nextTime) {
237 auto it = peers.find(peerid);
238 if (it == peers.end()) {
239 // No such peer
240 return false;
241 }
242
243 // Make sure we don't move the time in the past.
244 peers.modify(it, [&](Peer &p) {
246 std::max(p.nextPossibleConflictTime, nextTime);
247 });
248
249 return it->nextPossibleConflictTime == nextTime;
250}
251
253 auto it = peers.find(peerid);
254 if (it == peers.end()) {
255 // No such peer
256 return false;
257 }
258
259 peers.modify(it, [&](Peer &p) { p.hasFinalized = true; });
260
261 return true;
262}
263
264template <typename ProofContainer>
265void PeerManager::moveToConflictingPool(const ProofContainer &proofs) {
266 auto &peersView = peers.get<by_proofid>();
267 for (const ProofRef &proof : proofs) {
268 auto it = peersView.find(proof->getId());
269 if (it != peersView.end()) {
270 removePeer(it->peerid);
271 }
272
274 }
275}
276
278 ProofRegistrationState &registrationState,
279 RegistrationMode mode) {
280 assert(proof);
281
282 const ProofId &proofid = proof->getId();
283
284 auto invalidate = [&](ProofRegistrationResult result,
285 const std::string &message) {
286 return registrationState.Invalid(
287 result, message, strprintf("proofid: %s", proofid.ToString()));
288 };
289
290 if ((mode != RegistrationMode::FORCE_ACCEPT ||
291 !isInConflictingPool(proofid)) &&
292 exists(proofid)) {
293 // In default mode, we expect the proof to be unknown, i.e. in none of
294 // the pools.
295 // In forced accept mode, the proof can be in the conflicting pool.
297 "proof-already-registered");
298 }
299
300 if (danglingProofPool.getProof(proofid) &&
301 pendingNodes.count(proofid) == 0) {
302 // Don't attempt to register a proof that we already evicted because it
303 // was dangling, but rather attempt to retrieve an associated node.
304 needMoreNodes = true;
305 return invalidate(ProofRegistrationResult::DANGLING, "dangling-proof");
306 }
307
308 // Check the proof's validity.
309 ProofValidationState validationState;
310 if (!WITH_LOCK(cs_main, return proof->verify(stakeUtxoDustThreshold,
311 chainman, validationState))) {
312 if (isImmatureState(validationState)) {
316 // Adding this proof exceeds the immature pool limit, so evict
317 // the lowest scoring proof.
320 }
321
322 return invalidate(ProofRegistrationResult::IMMATURE,
323 "immature-proof");
324 }
325
326 if (validationState.GetResult() ==
329 "utxo-missing-or-spent");
330 }
331
332 // Reject invalid proof.
333 return invalidate(ProofRegistrationResult::INVALID, "invalid-proof");
334 }
335
336 auto now = GetTime<std::chrono::seconds>();
337 auto nextCooldownTimePoint =
338 now + std::chrono::seconds(gArgs.GetIntArg(
339 "-avalancheconflictingproofcooldown",
341
342 ProofPool::ConflictingProofSet conflictingProofs;
343 switch (validProofPool.addProofIfNoConflict(proof, conflictingProofs)) {
344 case ProofPool::AddProofStatus::REJECTED: {
345 if (mode != RegistrationMode::FORCE_ACCEPT) {
346 auto bestPossibleConflictTime = std::chrono::seconds(0);
347 auto &pview = peers.get<by_proofid>();
348 for (auto &conflictingProof : conflictingProofs) {
349 auto it = pview.find(conflictingProof->getId());
350 assert(it != pview.end());
351
352 // Search the most recent time over the peers
353 bestPossibleConflictTime = std::max(
354 bestPossibleConflictTime, it->nextPossibleConflictTime);
355
357 nextCooldownTimePoint);
358 }
359
360 if (bestPossibleConflictTime > now) {
361 // Cooldown not elapsed, reject the proof.
362 return invalidate(
364 "cooldown-not-elapsed");
365 }
366
367 // Give the proof a chance to replace the conflicting ones.
369 // If we have overridden other proofs due to conflict,
370 // remove the peers and attempt to move them to the
371 // conflicting pool.
372 moveToConflictingPool(conflictingProofs);
373
374 // Replacement is successful, continue to peer creation
375 break;
376 }
377
378 // Not the preferred proof, or replacement is not enabled
380 ProofPool::AddProofStatus::REJECTED
382 "rejected-proof")
384 "conflicting-utxos");
385 }
386
388
389 // Move the conflicting proofs from the valid pool to the
390 // conflicting pool
391 moveToConflictingPool(conflictingProofs);
392
393 auto status = validProofPool.addProofIfNoConflict(proof);
394 assert(status == ProofPool::AddProofStatus::SUCCEED);
395
396 break;
397 }
398 case ProofPool::AddProofStatus::DUPLICATED:
399 // If the proof was already in the pool, don't duplicate the peer.
401 "proof-already-registered");
402 case ProofPool::AddProofStatus::SUCCEED:
403 break;
404
405 // No default case, so the compiler can warn about missing cases
406 }
407
408 // At this stage we are going to create a peer so the proof should never
409 // exist in the conflicting pool, but use belt and suspenders.
411
412 // New peer means new peerid!
413 const PeerId peerid = nextPeerId++;
414
415 // We have no peer for this proof, time to create it.
416 auto inserted = peers.emplace(peerid, proof, nextCooldownTimePoint);
417 assert(inserted.second);
418
419 if (localProof && proof->getId() == localProof->getId()) {
420 // Add it to the shareable proofs even if there is no node, we are the
421 // node. Otherwise it will be inserted after a node is attached to the
422 // proof.
423 shareableProofs.insert(proof);
424 }
425
426 // Add to our registered score when adding to the peer list
427 totalPeersScore += proof->getScore();
428
429 // If there are nodes waiting for this proof, add them
430 auto &pendingNodesView = pendingNodes.get<by_proofid>();
431 auto range = pendingNodesView.equal_range(proofid);
432
433 // We want to update the nodes then remove them from the pending set. That
434 // will invalidate the range iterators, so we need to save the node ids
435 // first before we can loop over them.
436 std::vector<std::pair<NodeId, size_t>> nodeids_and_max_elements;
437 nodeids_and_max_elements.reserve(std::distance(range.first, range.second));
438 std::transform(range.first, range.second,
439 std::back_inserter(nodeids_and_max_elements),
440 [](const PendingNode &n) {
441 return std::make_pair(n.nodeid, n.max_elements);
442 });
443
444 for (const auto &[nodeid, max_elements] : nodeids_and_max_elements) {
445 addOrUpdateNode(inserted.first, nodeid, max_elements);
446 }
447
449 addStakeContender(proof);
450 }
451
452 return true;
453}
454
456 if (isDangling(proofid) && mode == RejectionMode::INVALIDATE) {
458 return true;
459 }
460
461 if (!exists(proofid)) {
462 return false;
463 }
464
465 if (immatureProofPool.removeProof(proofid)) {
466 return true;
467 }
468
469 if (mode == RejectionMode::DEFAULT &&
471 // In default mode we keep the proof in the conflicting pool
472 return true;
473 }
474
475 if (mode == RejectionMode::INVALIDATE &&
477 // In invalidate mode we remove the proof completely
478 return true;
479 }
480
481 auto &pview = peers.get<by_proofid>();
482 auto it = pview.find(proofid);
483 assert(it != pview.end());
484
485 const ProofRef proof = it->proof;
486
487 if (!removePeer(it->peerid)) {
488 return false;
489 }
490
491 // If there was conflicting proofs, attempt to pull them back
492 for (const SignedStake &ss : proof->getStakes()) {
493 const ProofRef conflictingProof =
494 conflictingProofPool.getProof(ss.getStake().getUTXO());
495 if (!conflictingProof) {
496 continue;
497 }
498
499 conflictingProofPool.removeProof(conflictingProof->getId());
500 registerProof(conflictingProof);
501 }
502
503 if (mode == RejectionMode::DEFAULT) {
505 }
506
507 return true;
508}
509
511 std::unordered_set<ProofRef, SaltedProofHasher> &registeredProofs) {
512 registeredProofs.clear();
513 const auto now = GetTime<std::chrono::seconds>();
514
515 std::vector<ProofRef> newlyDanglingProofs;
516 for (const Peer &peer : peers) {
517 // If the peer is not our local proof, has been registered for some
518 // time and has no node attached, discard it.
519 if ((!localProof || peer.getProofId() != localProof->getId()) &&
520 peer.node_count == 0 &&
521 (peer.registration_time + Peer::DANGLING_TIMEOUT) <= now) {
522 // Check the remotes status to determine if we should set the proof
523 // as dangling. This prevents from dropping a proof on our own due
524 // to a network issue. If the remote presence status is inconclusive
525 // we assume our own position (missing = false).
526 if (!getRemotePresenceStatus(peer.getProofId()).value_or(false)) {
527 newlyDanglingProofs.push_back(peer.proof);
528 }
529 }
530 }
531
532 // Similarly, check if we have dangling proofs that could be pulled back
533 // because the network says so.
534 std::vector<ProofRef> previouslyDanglingProofs;
535 danglingProofPool.forEachProof([&](const ProofRef &proof) {
536 if (getRemotePresenceStatus(proof->getId()).value_or(false)) {
537 previouslyDanglingProofs.push_back(proof);
538 }
539 });
540 for (const ProofRef &proof : previouslyDanglingProofs) {
541 danglingProofPool.removeProof(proof->getId());
542 if (registerProof(proof)) {
543 registeredProofs.insert(proof);
544 }
545 }
546
547 for (const ProofRef &proof : newlyDanglingProofs) {
548 rejectProof(proof->getId(), RejectionMode::INVALIDATE);
550 // If the proof is added, it means there is no better conflicting
551 // dangling proof and this is not a duplicated, so it's worth
552 // printing a message to the log.
554 "Proof dangling for too long (no connected node): %s\n",
555 proof->getId().GetHex());
556 }
557 }
558
559 // If we have dangling proof, this is a good indicator that we need to
560 // request more nodes from our peers.
561 needMoreNodes = !newlyDanglingProofs.empty();
562}
563
565 for (int retry = 0; retry < SELECT_NODE_MAX_RETRY; retry++) {
566 const PeerId p = selectPeer();
567
568 // If we cannot find a peer, it may be due to the fact that it is
569 // unlikely due to high fragmentation, so compact and retry.
570 if (p == NO_PEER) {
571 compact();
572 continue;
573 }
574
575 // See if that peer has an available node.
576 auto &nview = nodes.get<next_request_time>();
577 auto it = nview.lower_bound(boost::make_tuple(p, SteadyMilliseconds()));
578 if (it != nview.end() && it->peerid == p &&
579 it->nextRequestTime <= Now<SteadyMilliseconds>()) {
580 return it->nodeid;
581 }
582 }
583
584 // We failed to find a node to query, flag this so we can request more
585 needMoreNodes = true;
586
587 return NO_NODE;
588}
589
590std::unordered_set<ProofRef, SaltedProofHasher> PeerManager::updatedBlockTip() {
591 std::vector<ProofId> invalidProofIds;
592 std::vector<ProofRef> newImmatures;
593
594 {
595 LOCK(cs_main);
596
597 for (const auto &p : peers) {
599 if (!p.proof->verify(stakeUtxoDustThreshold, chainman, state)) {
600 if (isImmatureState(state)) {
601 newImmatures.push_back(p.proof);
602 }
603 invalidProofIds.push_back(p.getProofId());
604
606 "Invalidating proof %s: verification failed (%s)\n",
607 p.proof->getId().GetHex(), state.ToString());
608 }
609 }
610
611 // Disable thread safety analysis here because it does not play nicely
612 // with the lambda
614 [&](const ProofRef &proof) NO_THREAD_SAFETY_ANALYSIS {
617 if (!proof->verify(stakeUtxoDustThreshold, chainman, state)) {
618 invalidProofIds.push_back(proof->getId());
619
620 LogPrint(
622 "Invalidating dangling proof %s: verification failed "
623 "(%s)\n",
624 proof->getId().GetHex(), state.ToString());
625 }
626 });
627 }
628
629 // Remove the invalid proofs before the immature rescan. This makes it
630 // possible to pull back proofs with utxos that conflicted with these
631 // invalid proofs.
632 for (const ProofId &invalidProofId : invalidProofIds) {
633 rejectProof(invalidProofId, RejectionMode::INVALIDATE);
634 }
635
636 auto registeredProofs = immatureProofPool.rescan(*this);
637
638 for (auto &p : newImmatures) {
640 }
641
642 return registeredProofs;
643}
644
646 ProofRef proof;
647
648 forPeer(proofid, [&](const Peer &p) {
649 proof = p.proof;
650 return true;
651 });
652
653 if (!proof) {
654 proof = conflictingProofPool.getProof(proofid);
655 }
656
657 if (!proof) {
658 proof = immatureProofPool.getProof(proofid);
659 }
660
661 return proof;
662}
663
664bool PeerManager::isBoundToPeer(const ProofId &proofid) const {
665 auto &pview = peers.get<by_proofid>();
666 return pview.find(proofid) != pview.end();
667}
668
669bool PeerManager::isImmature(const ProofId &proofid) const {
670 return immatureProofPool.getProof(proofid) != nullptr;
671}
672
673bool PeerManager::isInConflictingPool(const ProofId &proofid) const {
674 return conflictingProofPool.getProof(proofid) != nullptr;
675}
676
677bool PeerManager::isDangling(const ProofId &proofid) const {
678 return danglingProofPool.getProof(proofid) != nullptr;
679}
680
681void PeerManager::setInvalid(const ProofId &proofid) {
682 invalidProofs.insert(proofid);
683}
684
685bool PeerManager::isInvalid(const ProofId &proofid) const {
686 return invalidProofs.contains(proofid);
687}
688
691}
692
693bool PeerManager::saveRemoteProof(const ProofId &proofid, const NodeId nodeid,
694 const bool present) {
695 if (present && isStakingPreconsensusActivated() && isBoundToPeer(proofid) &&
696 !isRemotelyPresentProof(proofid)) {
697 // If this is the first time this peer's proof becomes a remote proof of
698 // any node, ensure it is included in the contender cache. There is a
699 // special case where the contender cache can lose track of a proof if
700 // it is not saved as a remote proof before the next finalized block
701 // (triggering promotion, where non-remote cache entries are dropped).
702 // This does not happen in the hot path since receiving a proof
703 // immediately saves it as a remote, however it becomes more likely if
704 // the proof was loaded from a file (-persistavapeers) or added via RPC.
705 addStakeContender(getProof(proofid));
706 }
707
708 // Get how many proofs this node has announced
709 auto &remoteProofsByLastUpdate = remoteProofs.get<by_lastUpdate>();
710 auto [begin, end] = remoteProofsByLastUpdate.equal_range(nodeid);
711
712 // Limit the number of proofs a single node can save:
713 // - At least MAX_REMOTE_PROOFS
714 // - Up to 2x as much as we have
715 // The MAX_REMOTE_PROOFS minimum is there to ensure we don't overlimit at
716 // startup when we don't have proofs yet.
717 while (size_t(std::distance(begin, end)) >=
718 std::max(MAX_REMOTE_PROOFS, 2 * peers.size())) {
719 // Remove the proof with the oldest update time
720 begin = remoteProofsByLastUpdate.erase(begin);
721 }
722
723 auto it = remoteProofs.find(boost::make_tuple(proofid, nodeid));
724 if (it != remoteProofs.end()) {
725 remoteProofs.erase(it);
726 }
727
728 return remoteProofs
729 .emplace(RemoteProof{proofid, nodeid, GetTime<std::chrono::seconds>(),
730 present})
731 .second;
732}
733
734std::vector<RemoteProof>
736 std::vector<RemoteProof> nodeRemoteProofs;
737
738 auto &remoteProofsByLastUpdate = remoteProofs.get<by_lastUpdate>();
739 auto [begin, end] = remoteProofsByLastUpdate.equal_range(nodeid);
740
741 for (auto &it = begin; it != end; it++) {
742 nodeRemoteProofs.emplace_back(*it);
743 }
744
745 return nodeRemoteProofs;
746}
747
748bool PeerManager::hasRemoteProofStatus(const ProofId &proofid) const {
749 auto &view = remoteProofs.get<by_proofid>();
750 return view.count(proofid) > 0;
751}
752
754 auto &view = remoteProofs.get<by_proofid>();
755 auto [begin, end] = view.equal_range(proofid);
756 return std::any_of(begin, end, [](const auto &remoteProof) {
757 return remoteProof.present;
758 });
759}
760
761bool PeerManager::removePeer(const PeerId peerid) {
762 auto it = peers.find(peerid);
763 if (it == peers.end()) {
764 return false;
765 }
766
767 // Remove all nodes from this peer.
768 removeNodeFromPeer(it, it->node_count);
769
770 auto &nview = nodes.get<next_request_time>();
771
772 // Add the nodes to the pending set
773 auto range = nview.equal_range(peerid);
774 for (auto &nit = range.first; nit != range.second; ++nit) {
775 pendingNodes.emplace(it->getProofId(), nit->nodeid, nit->maxElements);
776 };
777
778 // Remove nodes associated with this peer, unless their timeout is still
779 // active. This ensure that we don't overquery them in case they are
780 // subsequently added to another peer.
781 nview.erase(
782 nview.lower_bound(boost::make_tuple(peerid, SteadyMilliseconds())),
783 nview.upper_bound(
784 boost::make_tuple(peerid, Now<SteadyMilliseconds>())));
785
786 // Release UTXOs attached to this proof.
787 validProofPool.removeProof(it->getProofId());
788
789 // If there were nodes attached, remove from the radix tree as well
790 auto removed = shareableProofs.remove(Uint256RadixKey(it->getProofId()));
791
792 m_unbroadcast_proofids.erase(it->getProofId());
793
794 // Remove the peer from the PeerSet and remove its score from the registered
795 // score total.
796 assert(totalPeersScore >= it->getScore());
797 totalPeersScore -= it->getScore();
798 peers.erase(it);
799 return true;
800}
801
803 if (slots.empty() || slotCount == 0) {
804 return NO_PEER;
805 }
806
807 const uint64_t max = slotCount;
808 for (int retry = 0; retry < SELECT_PEER_MAX_RETRY; retry++) {
809 size_t i =
810 selectPeerImpl(slots, FastRandomContext().randrange(max), max);
811 if (i != NO_PEER) {
812 return i;
813 }
814 }
815
816 return NO_PEER;
817}
818
820 // There is nothing to compact.
821 if (fragmentation == 0) {
822 return 0;
823 }
824
825 std::vector<Slot> newslots;
826 newslots.reserve(peers.size());
827
828 uint64_t prevStop = 0;
829 uint32_t i = 0;
830 for (auto it = peers.begin(); it != peers.end(); it++) {
831 if (it->node_count == 0) {
832 continue;
833 }
834
835 newslots.emplace_back(prevStop, it->getScore(), it->peerid);
836 prevStop = slots[i].getStop();
837 if (!peers.modify(it, [&](Peer &p) { p.index = i++; })) {
838 return 0;
839 }
840 }
841
842 slots = std::move(newslots);
843
844 const uint64_t saved = slotCount - prevStop;
845 slotCount = prevStop;
846 fragmentation = 0;
847
848 return saved;
849}
850
852 uint64_t prevStop = 0;
853 uint32_t scoreFromSlots = 0;
854 for (size_t i = 0; i < slots.size(); i++) {
855 const Slot &s = slots[i];
856
857 // Slots must be in correct order.
858 if (s.getStart() < prevStop) {
859 return false;
860 }
861
862 prevStop = s.getStop();
863
864 // If this is a dead slot, then nothing more needs to be checked.
865 if (s.getPeerId() == NO_PEER) {
866 continue;
867 }
868
869 // We have a live slot, verify index.
870 auto it = peers.find(s.getPeerId());
871 if (it == peers.end() || it->index != i) {
872 return false;
873 }
874
875 // Accumulate score across slots
876 scoreFromSlots += slots[i].getScore();
877 }
878
879 // Score across slots must be the same as our allocated score
880 if (scoreFromSlots != connectedPeersScore) {
881 return false;
882 }
883
884 uint32_t scoreFromAllPeers = 0;
885 uint32_t scoreFromPeersWithNodes = 0;
886
887 std::unordered_set<COutPoint, SaltedOutpointHasher> peersUtxos;
888 for (const auto &p : peers) {
889 // Accumulate the score across peers to compare with total known score
890 scoreFromAllPeers += p.getScore();
891
892 // A peer should have a proof attached
893 if (!p.proof) {
894 return false;
895 }
896
897 // Check proof pool consistency
898 for (const auto &ss : p.proof->getStakes()) {
899 const COutPoint &outpoint = ss.getStake().getUTXO();
900 auto proof = validProofPool.getProof(outpoint);
901
902 if (!proof) {
903 // Missing utxo
904 return false;
905 }
906 if (proof != p.proof) {
907 // Wrong proof
908 return false;
909 }
910
911 if (!peersUtxos.emplace(outpoint).second) {
912 // Duplicated utxo
913 return false;
914 }
915 }
916
917 // Count node attached to this peer.
918 const auto count_nodes = [&]() {
919 size_t count = 0;
920 auto &nview = nodes.get<next_request_time>();
921 auto begin = nview.lower_bound(
922 boost::make_tuple(p.peerid, SteadyMilliseconds()));
923 auto end = nview.upper_bound(
924 boost::make_tuple(p.peerid + 1, SteadyMilliseconds()));
925
926 for (auto it = begin; it != end; ++it) {
927 count++;
928 }
929
930 return count;
931 };
932
933 if (p.node_count != count_nodes()) {
934 return false;
935 }
936
937 // If there are no nodes attached to this peer, then we are done.
938 if (p.node_count == 0) {
939 continue;
940 }
941
942 scoreFromPeersWithNodes += p.getScore();
943 // The index must point to a slot refering to this peer.
944 if (p.index >= slots.size() || slots[p.index].getPeerId() != p.peerid) {
945 return false;
946 }
947
948 // If the score do not match, same thing.
949 if (slots[p.index].getScore() != p.getScore()) {
950 return false;
951 }
952
953 // Check the proof is in the radix tree only if there are nodes attached
954 if (((localProof && p.getProofId() == localProof->getId()) ||
955 p.node_count > 0) &&
956 shareableProofs.get(p.getProofId()) == nullptr) {
957 return false;
958 }
959 if (p.node_count == 0 &&
960 shareableProofs.get(p.getProofId()) != nullptr) {
961 return false;
962 }
963 }
964
965 // Check our accumulated scores against our registred and allocated scores
966 if (scoreFromAllPeers != totalPeersScore) {
967 return false;
968 }
969 if (scoreFromPeersWithNodes != connectedPeersScore) {
970 return false;
971 }
972
973 // We checked the utxo consistency for all our peers utxos already, so if
974 // the pool size differs from the expected one there are dangling utxos.
975 if (validProofPool.size() != peersUtxos.size()) {
976 return false;
977 }
978
979 // Check there is no dangling proof in the radix tree
981 return isBoundToPeer(pLeaf->getId());
982 });
983}
984
985PeerId selectPeerImpl(const std::vector<Slot> &slots, const uint64_t slot,
986 const uint64_t max) {
987 assert(slot <= max);
988
989 size_t begin = 0, end = slots.size();
990 uint64_t bottom = 0, top = max;
991
992 // Try to find the slot using dichotomic search.
993 while ((end - begin) > 8) {
994 // The slot we picked in not allocated.
995 if (slot < bottom || slot >= top) {
996 return NO_PEER;
997 }
998
999 // Guesstimate the position of the slot.
1000 size_t i = begin + ((slot - bottom) * (end - begin) / (top - bottom));
1001 assert(begin <= i && i < end);
1002
1003 // We have a match.
1004 if (slots[i].contains(slot)) {
1005 return slots[i].getPeerId();
1006 }
1007
1008 // We undershooted.
1009 if (slots[i].precedes(slot)) {
1010 begin = i + 1;
1011 if (begin >= end) {
1012 return NO_PEER;
1013 }
1014
1015 bottom = slots[begin].getStart();
1016 continue;
1017 }
1018
1019 // We overshooted.
1020 if (slots[i].follows(slot)) {
1021 end = i;
1022 top = slots[end].getStart();
1023 continue;
1024 }
1025
1026 // We have an unalocated slot.
1027 return NO_PEER;
1028 }
1029
1030 // Enough of that nonsense, let fallback to linear search.
1031 for (size_t i = begin; i < end; i++) {
1032 // We have a match.
1033 if (slots[i].contains(slot)) {
1034 return slots[i].getPeerId();
1035 }
1036 }
1037
1038 // We failed to find a slot, retry.
1039 return NO_PEER;
1040}
1041
1043 // The proof should be bound to a peer
1044 if (isBoundToPeer(proofid)) {
1045 m_unbroadcast_proofids.insert(proofid);
1046 }
1047}
1048
1050 m_unbroadcast_proofids.erase(proofid);
1051}
1052
1054 const CBlockIndex *pprev,
1055 std::vector<std::pair<ProofId, CScript>> &winners) {
1056 if (!pprev) {
1057 return false;
1058 }
1059
1060 // Don't select proofs that have not been known for long enough, i.e. at
1061 // least since twice the dangling proof cleanup timeout before the last
1062 // block time, so we're sure to not account for proofs more recent than the
1063 // previous block or lacking node connected.
1064 // The previous block time is capped to now for the unlikely event the
1065 // previous block time is in the future.
1066 auto registrationDelay = std::chrono::duration_cast<std::chrono::seconds>(
1068 auto maxRegistrationDelay =
1069 std::chrono::duration_cast<std::chrono::seconds>(
1071 auto minRegistrationDelay =
1072 std::chrono::duration_cast<std::chrono::seconds>(
1074
1075 const int64_t refTime = std::min(pprev->GetBlockTime(), GetTime());
1076
1077 const int64_t targetRegistrationTime = refTime - registrationDelay.count();
1078 const int64_t maxRegistrationTime = refTime - minRegistrationDelay.count();
1079 const int64_t minRegistrationTime = refTime - maxRegistrationDelay.count();
1080
1081 const BlockHash prevblockhash = pprev->GetBlockHash();
1082
1083 std::vector<ProofRef> selectedProofs;
1084 ProofRef firstCompliantProof = ProofRef();
1085 while (selectedProofs.size() < peers.size()) {
1086 double bestRewardRank = std::numeric_limits<double>::max();
1087 ProofRef selectedProof = ProofRef();
1088 int64_t selectedProofRegistrationTime{0};
1089 StakeContenderId bestRewardHash;
1090
1091 for (const Peer &peer : peers) {
1092 if (!peer.proof) {
1093 // Should never happen, continue
1094 continue;
1095 }
1096
1097 if (!peer.hasFinalized ||
1098 peer.registration_time.count() >= maxRegistrationTime) {
1099 continue;
1100 }
1101
1102 if (std::find_if(selectedProofs.begin(), selectedProofs.end(),
1103 [&peer](const ProofRef &proof) {
1104 return peer.getProofId() == proof->getId();
1105 }) != selectedProofs.end()) {
1106 continue;
1107 }
1108
1109 StakeContenderId proofRewardHash(prevblockhash, peer.getProofId());
1110 if (proofRewardHash == uint256::ZERO) {
1111 // This either the result of an incredibly unlikely lucky hash,
1112 // or a the hash is getting abused. In this case, skip the
1113 // proof.
1114 LogPrintf(
1115 "Staking reward hash has a suspicious value of zero for "
1116 "proof %s and blockhash %s, skipping\n",
1117 peer.getProofId().ToString(), prevblockhash.ToString());
1118 continue;
1119 }
1120
1121 double proofRewardRank =
1122 proofRewardHash.ComputeProofRewardRank(peer.getScore());
1123 // If selectedProof is nullptr, this means that bestRewardRank is
1124 // MAX_DOUBLE so the comparison will always select this proof as the
1125 // preferred one. As a consequence it is safe to use 0 as a proofid.
1127 proofRewardHash, proofRewardRank, peer.getProofId(),
1128 bestRewardHash, bestRewardRank,
1129 selectedProof ? selectedProof->getId()
1130 : ProofId(uint256::ZERO))) {
1131 bestRewardRank = proofRewardRank;
1132 selectedProof = peer.proof;
1133 selectedProofRegistrationTime = peer.registration_time.count();
1134 bestRewardHash = proofRewardHash;
1135 }
1136 }
1137
1138 if (!selectedProof) {
1139 // No winner
1140 break;
1141 }
1142
1143 if (!firstCompliantProof &&
1144 selectedProofRegistrationTime < targetRegistrationTime) {
1145 firstCompliantProof = selectedProof;
1146 }
1147
1148 selectedProofs.push_back(selectedProof);
1149
1150 if (selectedProofRegistrationTime < minRegistrationTime &&
1151 !isFlaky(selectedProof->getId())) {
1152 break;
1153 }
1154 }
1155
1156 winners.clear();
1157
1158 if (!firstCompliantProof) {
1159 return false;
1160 }
1161
1162 winners.reserve(selectedProofs.size());
1163
1164 // Find the winner
1165 for (const ProofRef &proof : selectedProofs) {
1166 if (proof->getId() == firstCompliantProof->getId()) {
1167 winners.push_back({proof->getId(), proof->getPayoutScript()});
1168 }
1169 }
1170 // Add the others (if any) after the winner
1171 for (const ProofRef &proof : selectedProofs) {
1172 if (proof->getId() != firstCompliantProof->getId()) {
1173 winners.push_back({proof->getId(), proof->getPayoutScript()});
1174 }
1175 }
1176
1177 return true;
1178}
1179
1180bool PeerManager::setFlaky(const ProofId &proofid) {
1181 return manualFlakyProofids.insert(proofid).second;
1182}
1183
1184bool PeerManager::unsetFlaky(const ProofId &proofid) {
1185 return manualFlakyProofids.erase(proofid) > 0;
1186}
1187
1188bool PeerManager::isFlaky(const ProofId &proofid) const {
1189 if (localProof && proofid == localProof->getId()) {
1190 return false;
1191 }
1192
1193 if (manualFlakyProofids.count(proofid) > 0) {
1194 return true;
1195 }
1196
1197 // If we are missing connection to this proof, consider flaky
1198 if (forPeer(proofid,
1199 [](const Peer &peer) { return peer.node_count == 0; })) {
1200 return true;
1201 }
1202
1203 auto &remoteProofsByNodeId = remoteProofs.get<by_nodeid>();
1204 auto &nview = nodes.get<next_request_time>();
1205
1206 std::unordered_map<PeerId, std::unordered_set<ProofId, SaltedProofIdHasher>>
1207 missing_per_peer;
1208
1209 // Construct a set of missing proof ids per peer
1210 double total_score{0};
1211 for (const Peer &peer : peers) {
1212 const PeerId peerid = peer.peerid;
1213
1214 total_score += peer.getScore();
1215
1216 auto nodes_range = nview.equal_range(peerid);
1217 for (auto &nit = nodes_range.first; nit != nodes_range.second; ++nit) {
1218 auto proofs_range = remoteProofsByNodeId.equal_range(nit->nodeid);
1219 for (auto &proofit = proofs_range.first;
1220 proofit != proofs_range.second; ++proofit) {
1221 if (!proofit->present) {
1222 missing_per_peer[peerid].insert(proofit->proofid);
1223 }
1224 }
1225 };
1226 }
1227
1228 double missing_score{0};
1229
1230 // Now compute a score for the missing proof
1231 for (const auto &[peerid, missingProofs] : missing_per_peer) {
1232 if (missingProofs.size() > 3) {
1233 // Ignore peers with too many missing proofs
1234 continue;
1235 }
1236
1237 auto pit = peers.find(peerid);
1238 if (pit == peers.end()) {
1239 // Peer not found
1240 continue;
1241 }
1242
1243 if (missingProofs.count(proofid) > 0) {
1244 missing_score += pit->getScore();
1245 }
1246 }
1247
1248 return (missing_score / total_score) > 0.3;
1249}
1250
1251std::optional<bool>
1253 auto &remoteProofsView = remoteProofs.get<by_proofid>();
1254 auto [begin, end] = remoteProofsView.equal_range(proofid);
1255
1256 if (begin == end) {
1257 // No remote registered anything yet, we are on our own
1258 return std::nullopt;
1259 }
1260
1261 double total_score{0};
1262 double present_score{0};
1263 double missing_score{0};
1264
1265 for (auto it = begin; it != end; it++) {
1266 auto nit = nodes.find(it->nodeid);
1267 if (nit == nodes.end()) {
1268 // No such node
1269 continue;
1270 }
1271
1272 const PeerId peerid = nit->peerid;
1273
1274 auto pit = peers.find(peerid);
1275 if (pit == peers.end()) {
1276 // Peer not found
1277 continue;
1278 }
1279
1280 uint32_t node_count = pit->node_count;
1281 if (localProof && pit->getProofId() == localProof->getId()) {
1282 // If that's our local proof, account for ourself
1283 ++node_count;
1284 }
1285
1286 if (node_count == 0) {
1287 // should never happen
1288 continue;
1289 }
1290
1291 const double score = double(pit->getScore()) / node_count;
1292
1293 total_score += score;
1294 if (it->present) {
1295 present_score += score;
1296 } else {
1297 missing_score += score;
1298 }
1299 }
1300
1301 if (localProof) {
1302 auto &peersByProofid = peers.get<by_proofid>();
1303
1304 // Do we have a node connected for that proof ?
1305 bool present = false;
1306 auto pit = peersByProofid.find(proofid);
1307 if (pit != peersByProofid.end()) {
1308 present = pit->node_count > 0;
1309 }
1310
1311 pit = peersByProofid.find(localProof->getId());
1312 if (pit != peersByProofid.end()) {
1313 // Also divide by node_count, we can have several nodes even for our
1314 // local proof.
1315 const double score =
1316 double(pit->getScore()) / (1 + pit->node_count);
1317
1318 total_score += score;
1319 if (present) {
1320 present_score += score;
1321 } else {
1322 missing_score += score;
1323 }
1324 }
1325 }
1326
1327 if (present_score / total_score > 0.55) {
1328 return std::make_optional(true);
1329 }
1330
1331 if (missing_score / total_score > 0.55) {
1332 return std::make_optional(false);
1333 }
1334
1335 return std::nullopt;
1336}
1337
1338bool PeerManager::dumpPeersToFile(const fs::path &dumpPath) const {
1339 try {
1340 const fs::path dumpPathTmp = dumpPath + ".new";
1341 FILE *filestr = fsbridge::fopen(dumpPathTmp, "wb");
1342 if (!filestr) {
1343 return false;
1344 }
1345
1346 AutoFile file{filestr};
1347 file << PEERS_DUMP_VERSION;
1348 file << uint64_t(peers.size());
1349 for (const Peer &peer : peers) {
1350 file << peer.proof;
1351 file << peer.hasFinalized;
1352 file << int64_t(peer.registration_time.count());
1353 file << int64_t(peer.nextPossibleConflictTime.count());
1354 }
1355
1356 if (!FileCommit(file.Get())) {
1357 throw std::runtime_error(strprintf("Failed to commit to file %s",
1358 PathToString(dumpPathTmp)));
1359 }
1360 file.fclose();
1361
1362 if (!RenameOver(dumpPathTmp, dumpPath)) {
1363 throw std::runtime_error(strprintf("Rename failed from %s to %s",
1364 PathToString(dumpPathTmp),
1365 PathToString(dumpPath)));
1366 }
1367 } catch (const std::exception &e) {
1368 LogPrint(BCLog::AVALANCHE, "Failed to dump the avalanche peers: %s.\n",
1369 e.what());
1370 return false;
1371 }
1372
1373 LogPrint(BCLog::AVALANCHE, "Successfully dumped %d peers to %s.\n",
1374 peers.size(), PathToString(dumpPath));
1375
1376 return true;
1377}
1378
1380 const fs::path &dumpPath,
1381 std::unordered_set<ProofRef, SaltedProofHasher> &registeredProofs) {
1382 registeredProofs.clear();
1383
1384 FILE *filestr = fsbridge::fopen(dumpPath, "rb");
1385 AutoFile file{filestr};
1386 if (file.IsNull()) {
1388 "Failed to open avalanche peers file from disk.\n");
1389 return false;
1390 }
1391
1392 try {
1393 uint64_t version;
1394 file >> version;
1395
1396 if (version != PEERS_DUMP_VERSION) {
1398 "Unsupported avalanche peers file version.\n");
1399 return false;
1400 }
1401
1402 uint64_t numPeers;
1403 file >> numPeers;
1404
1405 auto &peersByProofId = peers.get<by_proofid>();
1406
1407 for (uint64_t i = 0; i < numPeers; i++) {
1408 ProofRef proof;
1409 bool hasFinalized;
1410 int64_t registrationTime;
1411 int64_t nextPossibleConflictTime;
1412
1413 file >> proof;
1414 file >> hasFinalized;
1415 file >> registrationTime;
1416 file >> nextPossibleConflictTime;
1417
1418 if (registerProof(proof)) {
1419 auto it = peersByProofId.find(proof->getId());
1420 if (it == peersByProofId.end()) {
1421 // Should never happen
1422 continue;
1423 }
1424
1425 // We don't modify any key so we don't need to rehash.
1426 // If the modify fails, it means we don't get the full benefit
1427 // from the file but we still added our peer to the set. The
1428 // non-overridden fields will be set the normal way.
1429 peersByProofId.modify(it, [&](Peer &p) {
1430 p.hasFinalized = hasFinalized;
1432 std::chrono::seconds{registrationTime};
1434 std::chrono::seconds{nextPossibleConflictTime};
1435 });
1436
1437 registeredProofs.insert(proof);
1438 }
1439 }
1440 } catch (const std::exception &e) {
1442 "Failed to read the avalanche peers file data on disk: %s.\n",
1443 e.what());
1444 return false;
1445 }
1446
1447 return true;
1448}
1449
1450void PeerManager::cleanupStakeContenders(const int requestedMinHeight) {
1451 stakeContenderCache.cleanup(requestedMinHeight);
1452}
1453
1455 const CBlockIndex *tip = WITH_LOCK(cs_main, return chainman.ActiveTip());
1456 stakeContenderCache.add(tip, proof);
1457
1458 const BlockHash blockhash = tip->GetBlockHash();
1459 const ProofId &proofid = proof->getId();
1461 "Cached stake contender with proofid %s, payout %s at block "
1462 "%s (height %d) with id %s\n",
1463 proofid.ToString(), HexStr(proof->getPayoutScript()),
1464 blockhash.ToString(), tip->nHeight,
1465 StakeContenderId(blockhash, proofid).ToString());
1466}
1467
1469 BlockHash &prevblockhashout) const {
1470 return stakeContenderCache.getVoteStatus(contenderId, prevblockhashout);
1471}
1472
1474 stakeContenderCache.accept(contenderId);
1475}
1476
1478 const StakeContenderId &contenderId, BlockHash &prevblockhash,
1479 std::vector<std::pair<ProofId, CScript>> &newWinners) {
1480 stakeContenderCache.finalize(contenderId);
1481
1482 // Get block hash related to this contender. We should not assume the
1483 // current chain tip is the block this contender is a winner for.
1484 getStakeContenderStatus(contenderId, prevblockhash);
1485
1486 // Calculate the new winners for this block
1487 stakeContenderCache.getWinners(prevblockhash, newWinners);
1488}
1489
1491 stakeContenderCache.reject(contenderId);
1492}
1493
1495 stakeContenderCache.promoteToBlock(pindex, [&](const ProofId &proofid) {
1496 return isBoundToPeer(proofid) ||
1497 // isDangling check appears redundant, but remote proofs are not
1498 // guaranteed to be cleaned up when one of our peers is removed
1499 // for dangling too long. Whether or not a proof is dangling is
1500 // gated by remote presence status, so only proofs that are very
1501 // poorly connected to the network will stop being promoted.
1502 (isRemotelyPresentProof(proofid) && isDangling(proofid));
1503 });
1504}
1505
1507 const CBlockIndex *prevblock,
1508 const std::vector<std::pair<ProofId, CScript>> winners, size_t maxPollable,
1509 std::vector<StakeContenderId> &pollableContenders) {
1510 const BlockHash prevblockhash = prevblock->GetBlockHash();
1511 // Set status for local winners
1512 for (const auto &winner : winners) {
1513 const StakeContenderId contenderId(prevblockhash, winner.first);
1514 stakeContenderCache.finalize(contenderId);
1516 "Stake contender set as local winner: proofid %s, payout "
1517 "%s at block %s (height %d) with id %s\n",
1518 winner.first.ToString(), HexStr(winner.second),
1519 prevblockhash.ToString(), prevblock->nHeight,
1520 contenderId.ToString());
1521 }
1522
1523 // Treat the highest ranking contender similarly to local winners except
1524 // that it is not automatically included in the winner set (unless it
1525 // happens to be selected as a local winner).
1526 if (stakeContenderCache.getPollableContenders(prevblockhash, maxPollable,
1527 pollableContenders) > 0) {
1528 // Accept the highest ranking contender. This is a no-op if the highest
1529 // ranking contender is already the local winner.
1530 stakeContenderCache.accept(pollableContenders[0]);
1532 "Stake contender set as best contender: id %s at block "
1533 "%s (height %d)\n",
1534 pollableContenders[0].ToString(), prevblockhash.ToString(),
1535 prevblock->nHeight);
1536 return true;
1537 }
1538
1539 return false;
1540}
1541
1543 const CBlockIndex *pindex, const std::vector<CScript> &payoutScripts) {
1544 return stakeContenderCache.setWinners(pindex, payoutScripts);
1545}
1546
1547} // namespace avalanche
ArgsManager gArgs
Definition: args.cpp:39
static constexpr PeerId NO_PEER
Definition: node.h:16
uint32_t PeerId
Definition: node.h:15
static constexpr size_t AVALANCHE_DEFAULT_CONFLICTING_PROOF_COOLDOWN
Conflicting proofs cooldown time default value in seconds.
Definition: avalanche.h:21
int64_t GetIntArg(const std::string &strArg, int64_t nDefault) const
Return integer argument or default value.
Definition: args.cpp:494
Non-refcounted RAII wrapper for FILE*.
Definition: streams.h:430
The block chain is a tree shaped structure starting with the genesis block at the root,...
Definition: blockindex.h:25
int64_t GetBlockTime() const
Definition: blockindex.h:160
BlockHash GetBlockHash() const
Definition: blockindex.h:130
int nHeight
height of the entry in the chain. The genesis block has height 0
Definition: blockindex.h:38
void insert(Span< const uint8_t > vKey)
Definition: bloom.cpp:215
bool contains(Span< const uint8_t > vKey) const
Definition: bloom.cpp:249
CBlockIndex * ActiveTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex())
Definition: validation.h:1443
Fast randomness source.
Definition: random.h:411
bool Invalid(Result result, const std::string &reject_reason="", const std::string &debug_message="")
Definition: validation.h:101
Result GetResult() const
Definition: validation.h:122
std::string ToString() const
Definition: validation.h:125
bool selectStakingRewardWinner(const CBlockIndex *pprev, std::vector< std::pair< ProofId, CScript > > &winners)
Deterministically select a list of payout scripts based on the proof set and the previous block hash.
uint32_t connectedPeersScore
Definition: peermanager.h:240
std::vector< RemoteProof > getRemoteProofs(const NodeId nodeid) const
bool removeNode(NodeId nodeid)
bool setFinalized(PeerId peerid)
Latch on that this peer has a finalized proof.
bool dumpPeersToFile(const fs::path &dumpPath) const
RemoteProofSet remoteProofs
Remember which node sent which proof so we have an image of the proof set of our peers.
Definition: peermanager.h:284
bool updateNextRequestTimeForResponse(NodeId nodeid, const Response &response)
bool isDangling(const ProofId &proofid) const
bool addNode(NodeId nodeid, const ProofId &proofid, size_t max_elements)
Node API.
Definition: peermanager.cpp:33
bool unsetFlaky(const ProofId &proofid)
std::optional< bool > getRemotePresenceStatus(const ProofId &proofid) const
Get the presence remote status of a proof.
bool addNodeToPeer(const PeerSet::iterator &it)
Definition: peermanager.cpp:91
bool exists(const ProofId &proofid) const
Return true if the (valid) proof exists, but only for non-dangling proofs.
Definition: peermanager.h:415
PendingNodeSet pendingNodes
Definition: peermanager.h:226
bool verify() const
Perform consistency check on internal data structures.
bool hasRemoteProofStatus(const ProofId &proofid) const
bool forPeer(const ProofId &proofid, Callable &&func) const
Definition: peermanager.h:423
void finalizeStakeContender(const StakeContenderId &contenderId, BlockHash &prevblockhash, std::vector< std::pair< ProofId, CScript > > &newWinners)
bool latchAvaproofsSent(NodeId nodeid)
Flag that a node did send its compact proofs.
void cleanupStakeContenders(const int requestedMinHeight)
Make some of the contender cache API available.
bool updateNextRequestTimeForPoll(NodeId nodeid, SteadyMilliseconds timeout, uint64_t round)
static constexpr int SELECT_PEER_MAX_RETRY
Definition: peermanager.h:228
ProofIdSet m_unbroadcast_proofids
Track proof ids to broadcast.
Definition: peermanager.h:234
bool loadPeersFromFile(const fs::path &dumpPath, std::unordered_set< ProofRef, SaltedProofHasher > &registeredProofs)
RejectionMode
Rejection mode.
Definition: peermanager.h:403
void addUnbroadcastProof(const ProofId &proofid)
Proof broadcast API.
std::unordered_set< ProofRef, SaltedProofHasher > updatedBlockTip()
Update the peer set when a new block is connected.
void removeUnbroadcastProof(const ProofId &proofid)
void promoteStakeContendersToBlock(const CBlockIndex *pindex)
bool isBoundToPeer(const ProofId &proofid) const
bool setContenderStatusForLocalWinners(const CBlockIndex *prevblock, const std::vector< std::pair< ProofId, CScript > > winners, size_t maxPollable, std::vector< StakeContenderId > &pollableContenders)
ProofRadixTree shareableProofs
Definition: peermanager.h:192
bool saveRemoteProof(const ProofId &proofid, const NodeId nodeid, const bool present)
CRollingBloomFilter invalidProofs
Filter for proofs that are consensus-invalid or were recently invalidated by avalanche (finalized rej...
Definition: peermanager.h:298
bool addOrUpdateNode(const PeerSet::iterator &it, NodeId nodeid, size_t max_elements)
Definition: peermanager.cpp:49
uint64_t compact()
Trigger maintenance of internal data structures.
std::vector< Slot > slots
Definition: peermanager.h:164
uint32_t totalPeersScore
Quorum management.
Definition: peermanager.h:239
ProofPool danglingProofPool
Definition: peermanager.h:189
StakeContenderCache stakeContenderCache
Definition: peermanager.h:302
void setInvalid(const ProofId &proofid)
int getStakeContenderStatus(const StakeContenderId &contenderId, BlockHash &prevblockhashout) const
bool isFlaky(const ProofId &proofid) const
ChainstateManager & chainman
Definition: peermanager.h:244
bool isInvalid(const ProofId &proofid) const
std::unordered_set< ProofId, SaltedProofIdHasher > manualFlakyProofids
Definition: peermanager.h:300
bool removePeer(const PeerId peerid)
Remove an existing peer.
bool isImmature(const ProofId &proofid) const
bool rejectProof(const ProofId &proofid, RejectionMode mode=RejectionMode::DEFAULT)
ProofPool immatureProofPool
Definition: peermanager.h:188
RegistrationMode
Registration mode.
Definition: peermanager.h:380
ProofPool conflictingProofPool
Definition: peermanager.h:187
bool isStakingPreconsensusActivated() const
Definition: peermanager.h:571
static constexpr size_t MAX_REMOTE_PROOFS
Definition: peermanager.h:305
bool setFlaky(const ProofId &proofid)
void addStakeContender(const ProofRef &proof)
std::atomic< bool > needMoreNodes
Flag indicating that we failed to select a node and need to expand our node set.
Definition: peermanager.h:212
PeerId selectPeer() const
Randomly select a peer to poll.
bool isInConflictingPool(const ProofId &proofid) const
bool isRemotelyPresentProof(const ProofId &proofid) const
static constexpr int SELECT_NODE_MAX_RETRY
Definition: peermanager.h:229
void cleanupDanglingProofs(std::unordered_set< ProofRef, SaltedProofHasher > &registeredProofs)
void acceptStakeContender(const StakeContenderId &contenderId)
ProofRef getProof(const ProofId &proofid) const
bool registerProof(const ProofRef &proof, ProofRegistrationState &registrationState, RegistrationMode mode=RegistrationMode::DEFAULT)
void rejectStakeContender(const StakeContenderId &contenderId)
bool removeNodeFromPeer(const PeerSet::iterator &it, uint32_t count=1)
bool updateNextPossibleConflictTime(PeerId peerid, const std::chrono::seconds &nextTime)
Proof and Peer related API.
void moveToConflictingPool(const ProofContainer &proofs)
bool setStakeContenderWinners(const CBlockIndex *pindex, const std::vector< CScript > &payoutScripts)
AddProofStatus addProofIfPreferred(const ProofRef &proof, ConflictingProofSet &conflictingProofs)
Attempt to add a proof to the pool.
Definition: proofpool.cpp:54
size_t size() const
Definition: proofpool.h:135
AddProofStatus addProofIfNoConflict(const ProofRef &proof, ConflictingProofSet &conflictingProofs)
Attempt to add a proof to the pool, and fail if there is a conflict on any UTXO.
Definition: proofpool.cpp:13
size_t countProofs() const
Definition: proofpool.cpp:129
bool removeProof(ProofId proofid)
Definition: proofpool.cpp:79
void forEachProof(Callable &&func) const
Definition: proofpool.h:118
ProofRef getProof(const ProofId &proofid) const
Definition: proofpool.cpp:112
std::set< ProofRef, ConflictingProofComparator > ConflictingProofSet
Definition: proofpool.h:88
ProofRef getLowestScoreProof() const
Definition: proofpool.cpp:123
std::unordered_set< ProofRef, SaltedProofHasher > rescan(PeerManager &peerManager)
Definition: proofpool.cpp:86
bool getWinners(const BlockHash &prevblockhash, std::vector< std::pair< ProofId, CScript > > &winners) const
bool accept(const StakeContenderId &contenderId)
Helpers to set avalanche state of a contender.
void cleanup(const int requestedMinHeight)
size_t getPollableContenders(const BlockHash &prevblockhash, size_t maxPollable, std::vector< StakeContenderId > &pollableContenders) const
Get the best ranking contenders, accepted contenders ranking first.
bool reject(const StakeContenderId &contenderId)
bool setWinners(const CBlockIndex *pindex, const std::vector< CScript > &payoutScripts)
Set proof(s) that should be treated as winners (already finalized).
bool add(const CBlockIndex *pindex, const ProofRef &proof, uint8_t status=StakeContenderStatus::UNKNOWN)
Add a proof to consider in staking rewards pre-consensus.
void promoteToBlock(const CBlockIndex *activeTip, std::function< bool(const ProofId &proofid)> const &shouldPromote)
Promote cache entries to a the active chain tip.
int getVoteStatus(const StakeContenderId &contenderId, BlockHash &prevblockhashout) const
Get contender acceptance state for avalanche voting.
bool finalize(const StakeContenderId &contenderId)
std::string ToString() const
Definition: uint256.h:80
Path class wrapper to block calls to the fs::path(std::string) implicit constructor and the fs::path:...
Definition: fs.h:30
static const uint256 ZERO
Definition: uint256.h:134
RecursiveMutex cs_main
Mutex to guard access to validation specific variables, such as reading or changing the chainstate.
Definition: cs_main.cpp:7
int64_t NodeId
Definition: eviction.h:16
bool RenameOver(fs::path src, fs::path dest)
Rename src to dest.
Definition: fs_helpers.cpp:258
bool FileCommit(FILE *file)
Ensure file contents are fully committed to disk, using a platform-specific feature analogous to fsyn...
Definition: fs_helpers.cpp:111
std::string HexStr(const Span< const uint8_t > s)
Convert a span of bytes to a lower-case hexadecimal string.
Definition: hex_base.cpp:30
#define LogPrint(category,...)
Definition: logging.h:452
#define LogTrace(category,...)
Definition: logging.h:448
#define LogPrintf(...)
Definition: logging.h:424
@ AVALANCHE
Definition: logging.h:91
ProofRegistrationResult
Definition: peermanager.h:146
static constexpr uint32_t AVALANCHE_MAX_IMMATURE_PROOFS
Maximum number of immature proofs the peer manager will accept from the network.
Definition: peermanager.h:46
static bool isImmatureState(const ProofValidationState &state)
static constexpr uint64_t PEERS_DUMP_VERSION
Definition: peermanager.cpp:31
PeerId selectPeerImpl(const std::vector< Slot > &slots, const uint64_t slot, const uint64_t max)
Internal methods that are exposed for testing purposes.
RCUPtr< const Proof > ProofRef
Definition: proof.h:186
static std::string PathToString(const path &path)
Convert path object to byte string.
Definition: fs.h:147
FILE * fopen(const fs::path &p, const char *mode)
Definition: fs.cpp:30
static constexpr NodeId NO_NODE
Special NodeId that represent no node.
Definition: nodeid.h:15
Response response
Definition: processor.cpp:536
static std::string ToString(const CService &ip)
Definition: db.h:36
A BlockHash is a unqiue identifier for a block.
Definition: blockhash.h:13
RCUPtr< T > remove(const KeyType &key)
Remove an element from the tree.
Definition: radix.h:181
RCUPtr< T > get(const KeyType &key)
Get the value corresponding to a key.
Definition: radix.h:118
bool forEachLeaf(Callable &&func) const
Definition: radix.h:144
bool insert(const RCUPtr< T > &value)
Insert a value into the tree.
Definition: radix.h:112
Facility for using an uint256 as a radix tree key.
uint64_t last_round
Definition: node.h:25
SteadyMilliseconds nextRequestTime
Definition: node.h:23
bool avaproofsSent
Definition: node.h:24
std::chrono::seconds registration_time
Definition: peermanager.h:95
std::chrono::seconds nextPossibleConflictTime
Definition: peermanager.h:96
uint32_t node_count
Definition: peermanager.h:89
static constexpr auto DANGLING_TIMEOUT
Consider dropping the peer if no node is attached after this timeout expired.
Definition: peermanager.h:102
uint32_t index
Definition: peermanager.h:88
uint32_t getScore() const
Definition: peermanager.h:111
ProofRef proof
Definition: peermanager.h:91
uint64_t getStop() const
Definition: peermanager.h:75
uint64_t getStart() const
Definition: peermanager.h:74
PeerId getPeerId() const
Definition: peermanager.h:77
StakeContenderIds are unique for each block to ensure that the peer polling for their acceptance has ...
double ComputeProofRewardRank(uint32_t proofScore) const
To make sure the selection is properly weighted according to the proof score, we normalize the conten...
#define LOCK(cs)
Definition: sync.h:306
#define WITH_LOCK(cs, code)
Run code while locking a mutex.
Definition: sync.h:357
static int count
#define NO_THREAD_SAFETY_ANALYSIS
Definition: threadsafety.h:58
int64_t GetTime()
DEPRECATED Use either ClockType::now() or Now<TimePointType>() if a cast is needed.
Definition: time.cpp:80
std::chrono::time_point< std::chrono::steady_clock, std::chrono::milliseconds > SteadyMilliseconds
Definition: time.h:33
#define strprintf
Format arguments and return the string or write to given std::ostream (see tinyformat::format doc for...
Definition: tinyformat.h:1202
AssertLockHeld(pool.cs)
assert(!tx.IsCoinBase())