Bitcoin ABC  0.23.2
P2P Digital Currency
validation.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2018 The Bitcoin Core developers
3 // Copyright (c) 2017-2020 The Bitcoin developers
4 // Distributed under the MIT software license, see the accompanying
5 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
6 
7 #include <validation.h>
8 
9 #include <arith_uint256.h>
10 #include <avalanche/processor.h>
11 #include <blockdb.h>
12 #include <blockvalidity.h>
13 #include <chainparams.h>
14 #include <checkpoints.h>
15 #include <checkqueue.h>
16 #include <config.h>
17 #include <consensus/activation.h>
18 #include <consensus/merkle.h>
19 #include <consensus/tx_check.h>
20 #include <consensus/tx_verify.h>
21 #include <consensus/validation.h>
22 #include <hash.h>
23 #include <index/txindex.h>
24 #include <logging.h>
25 #include <logging/timer.h>
26 #include <minerfund.h>
27 #include <node/ui_interface.h>
28 #include <policy/fees.h>
29 #include <policy/mempool.h>
30 #include <policy/policy.h>
31 #include <policy/settings.h>
32 #include <pow/aserti32d.h> // For ResetASERTAnchorBlockCache
33 #include <pow/pow.h>
34 #include <primitives/block.h>
35 #include <primitives/transaction.h>
36 #include <random.h>
37 #include <reverse_iterator.h>
38 #include <script/script.h>
39 #include <script/scriptcache.h>
40 #include <script/sigcache.h>
41 #include <shutdown.h>
42 #include <timedata.h>
43 #include <tinyformat.h>
44 #include <txdb.h>
45 #include <txmempool.h>
46 #include <undo.h>
47 #include <util/check.h> // For NDEBUG compile time check
48 #include <util/moneystr.h>
49 #include <util/strencodings.h>
50 #include <util/system.h>
51 #include <util/translation.h>
52 #include <validationinterface.h>
53 #include <warnings.h>
54 
55 #include <boost/algorithm/string/replace.hpp>
56 
57 #include <optional>
58 #include <string>
59 #include <thread>
60 
61 #define MICRO 0.000001
62 #define MILLI 0.001
63 
65 static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
67 static constexpr std::chrono::hours DATABASE_FLUSH_INTERVAL{24};
68 const std::vector<std::string> CHECKLEVEL_DOC{
69  "level 0 reads the blocks from disk",
70  "level 1 verifies block validity",
71  "level 2 verifies undo data",
72  "level 3 checks disconnection of tip blocks",
73  "level 4 tries to reconnect the blocks",
74  "each level includes the checks of the previous levels",
75 };
76 
78 
80  LOCK(::cs_main);
81  assert(g_chainman.m_active_chainstate);
82  return *g_chainman.m_active_chainstate;
83 }
84 
86  LOCK(::cs_main);
88 }
89 
103 
106 std::condition_variable g_best_block_cv;
108 std::atomic_bool fImporting(false);
109 std::atomic_bool fReindex(false);
110 bool fHavePruned = false;
111 bool fPruneMode = false;
112 bool fRequireStandard = true;
113 bool fCheckBlockIndex = false;
115 uint64_t nPruneTarget = 0;
117 
120 
122 
124 
125 // Internal stuff
126 namespace {
127 CBlockIndex *pindexBestInvalid = nullptr;
128 CBlockIndex *pindexBestParked = nullptr;
129 
130 RecursiveMutex cs_LastBlockFile;
131 std::vector<CBlockFileInfo> vinfoBlockFile;
132 int nLastBlockFile = 0;
138 bool fCheckForPruning = false;
139 
141 std::set<const CBlockIndex *> setDirtyBlockIndex;
142 
144 std::set<int> setDirtyFileInfo;
145 } // namespace
146 
148  : excessiveBlockSize(config.GetMaxBlockSize()), checkPoW(true),
149  checkMerkleRoot(true) {}
150 
152  AssertLockHeld(cs_main);
153  BlockMap::const_iterator it = g_chainman.BlockIndex().find(hash);
154  return it == g_chainman.BlockIndex().end() ? nullptr : it->second;
155 }
156 
158  const CBlockLocator &locator) {
159  AssertLockHeld(cs_main);
160 
161  // Find the latest block common to locator and chain - we expect that
162  // locator.vHave is sorted descending by height.
163  for (const BlockHash &hash : locator.vHave) {
164  CBlockIndex *pindex = LookupBlockIndex(hash);
165  if (pindex) {
166  if (chain.Contains(pindex)) {
167  return pindex;
168  }
169  if (pindex->GetAncestor(chain.Height()) == chain.Tip()) {
170  return chain.Tip();
171  }
172  }
173  }
174  return chain.Genesis();
175 }
176 
177 std::unique_ptr<CBlockTreeDB> pblocktree;
178 
179 // See definition for documentation
180 static void FindFilesToPruneManual(ChainstateManager &chainman,
181  std::set<int> &setFilesToPrune,
182  int nManualPruneHeight);
183 static void FindFilesToPrune(ChainstateManager &chainman,
184  std::set<int> &setFilesToPrune,
185  uint64_t nPruneAfterHeight);
186 static uint32_t GetNextBlockScriptFlags(const Consensus::Params &params,
187  const CBlockIndex *pindex);
188 
190  AssertLockHeld(cs_main);
191  assert(lp);
192  // If there are relative lock times then the maxInputBlock will be set
193  // If there are no relative lock times, the LockPoints don't depend on the
194  // chain
195  if (lp->maxInputBlock) {
196  // Check whether ::ChainActive() is an extension of the block at which
197  // the LockPoints calculation was valid. If not LockPoints are no longer
198  // valid.
199  if (!::ChainActive().Contains(lp->maxInputBlock)) {
200  return false;
201  }
202  }
203 
204  // LockPoints still valid
205  return true;
206 }
207 
208 bool CheckSequenceLocks(const CTxMemPool &pool, const CTransaction &tx,
209  int flags, LockPoints *lp, bool useExistingLockPoints) {
210  AssertLockHeld(cs_main);
211  AssertLockHeld(pool.cs);
212 
213  CBlockIndex *tip = ::ChainActive().Tip();
214  assert(tip != nullptr);
215 
216  CBlockIndex index;
217  index.pprev = tip;
218  // CheckSequenceLocks() uses ::ChainActive().Height()+1 to evaluate height
219  // based locks because when SequenceLocks() is called within ConnectBlock(),
220  // the height of the block *being* evaluated is what is used. Thus if we
221  // want to know if a transaction can be part of the *next* block, we need to
222  // use one more than ::ChainActive().Height()
223  index.nHeight = tip->nHeight + 1;
224 
225  std::pair<int, int64_t> lockPair;
226  if (useExistingLockPoints) {
227  assert(lp);
228  lockPair.first = lp->height;
229  lockPair.second = lp->time;
230  } else {
231  // CoinsTip() contains the UTXO set for ::ChainActive().Tip()
232  CCoinsViewMemPool viewMemPool(&::ChainstateActive().CoinsTip(), pool);
233  std::vector<int> prevheights;
234  prevheights.resize(tx.vin.size());
235  for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
236  const CTxIn &txin = tx.vin[txinIndex];
237  Coin coin;
238  if (!viewMemPool.GetCoin(txin.prevout, coin)) {
239  return error("%s: Missing input", __func__);
240  }
241  if (coin.GetHeight() == MEMPOOL_HEIGHT) {
242  // Assume all mempool transaction confirm in the next block
243  prevheights[txinIndex] = tip->nHeight + 1;
244  } else {
245  prevheights[txinIndex] = coin.GetHeight();
246  }
247  }
248  lockPair = CalculateSequenceLocks(tx, flags, prevheights, index);
249  if (lp) {
250  lp->height = lockPair.first;
251  lp->time = lockPair.second;
252  // Also store the hash of the block with the highest height of all
253  // the blocks which have sequence locked prevouts. This hash needs
254  // to still be on the chain for these LockPoint calculations to be
255  // valid.
256  // Note: It is impossible to correctly calculate a maxInputBlock if
257  // any of the sequence locked inputs depend on unconfirmed txs,
258  // except in the special case where the relative lock time/height is
259  // 0, which is equivalent to no sequence lock. Since we assume input
260  // height of tip+1 for mempool txs and test the resulting lockPair
261  // from CalculateSequenceLocks against tip+1. We know
262  // EvaluateSequenceLocks will fail if there was a non-zero sequence
263  // lock on a mempool input, so we can use the return value of
264  // CheckSequenceLocks to indicate the LockPoints validity.
265  int maxInputHeight = 0;
266  for (const int height : prevheights) {
267  // Can ignore mempool inputs since we'll fail if they had
268  // non-zero locks.
269  if (height != tip->nHeight + 1) {
270  maxInputHeight = std::max(maxInputHeight, height);
271  }
272  }
273  lp->maxInputBlock = tip->GetAncestor(maxInputHeight);
274  }
275  }
276  return EvaluateSequenceLocks(index, lockPair);
277 }
278 
279 // Command-line argument "-replayprotectionactivationtime=<timestamp>" will
280 // cause the node to switch to replay protected SigHash ForkID value when the
281 // median timestamp of the previous 11 blocks is greater than or equal to
282 // <timestamp>. Defaults to the pre-defined timestamp when not set.
284  int64_t nMedianTimePast) {
285  return nMedianTimePast >= gArgs.GetArg("-replayprotectionactivationtime",
286  params.selectronActivationTime);
287 }
288 
290  const CBlockIndex *pindexPrev) {
291  if (pindexPrev == nullptr) {
292  return false;
293  }
294 
295  return IsReplayProtectionEnabled(params, pindexPrev->GetMedianTimePast());
296 }
297 
298 // Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool
299 // were somehow broken and returning the wrong scriptPubKeys
301  const CTransaction &tx, TxValidationState &state,
302  const CCoinsViewCache &view, const CTxMemPool &pool, const uint32_t flags,
303  PrecomputedTransactionData &txdata, int &nSigChecksOut)
304  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
305  AssertLockHeld(cs_main);
306 
307  // pool.cs should be locked already, but go ahead and re-take the lock here
308  // to enforce that mempool doesn't change between when we check the view and
309  // when we actually call through to CheckInputScripts
310  LOCK(pool.cs);
311 
312  assert(!tx.IsCoinBase());
313  for (const CTxIn &txin : tx.vin) {
314  const Coin &coin = view.AccessCoin(txin.prevout);
315 
316  // AcceptToMemoryPoolWorker has already checked that the coins are
317  // available, so this shouldn't fail. If the inputs are not available
318  // here then return false.
319  if (coin.IsSpent()) {
320  return false;
321  }
322 
323  // Check equivalence for available inputs.
324  const CTransactionRef &txFrom = pool.get(txin.prevout.GetTxId());
325  if (txFrom) {
326  assert(txFrom->GetId() == txin.prevout.GetTxId());
327  assert(txFrom->vout.size() > txin.prevout.GetN());
328  assert(txFrom->vout[txin.prevout.GetN()] == coin.GetTxOut());
329  } else {
330  const Coin &coinFromDisk =
332  assert(!coinFromDisk.IsSpent());
333  assert(coinFromDisk.GetTxOut() == coin.GetTxOut());
334  }
335  }
336 
337  // Call CheckInputScripts() to cache signature and script validity against
338  // current tip consensus rules.
339  return CheckInputScripts(tx, state, view, flags, /* cacheSigStore = */ true,
340  /* cacheFullScriptStore = */ true, txdata,
341  nSigChecksOut);
342 }
343 
344 namespace {
345 
346 class MemPoolAccept {
347 public:
348  MemPoolAccept(CTxMemPool &mempool)
349  : m_pool(mempool), m_view(&m_dummy),
350  m_viewmempool(&::ChainstateActive().CoinsTip(), m_pool),
351  m_limit_ancestors(
352  gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)),
353  m_limit_ancestor_size(
354  gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) *
355  1000),
356  m_limit_descendants(
357  gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)),
358  m_limit_descendant_size(gArgs.GetArg("-limitdescendantsize",
360  1000) {}
361 
362  // We put the arguments we're handed into a struct, so we can pass them
363  // around easier.
364  struct ATMPArgs {
365  const Config &m_config;
366  TxValidationState &m_state;
367  const int64_t m_accept_time;
368  const bool m_bypass_limits;
369  const Amount &m_absurd_fee;
370  /*
371  * Return any outpoints which were not previously present in the coins
372  * cache, but were added as a result of validating the tx for mempool
373  * acceptance. This allows the caller to optionally remove the cache
374  * additions if the associated transaction ends up being rejected by
375  * the mempool.
376  */
377  std::vector<COutPoint> &m_coins_to_uncache;
378  const bool m_test_accept;
379  };
380 
381  // Single transaction acceptance
382  bool AcceptSingleTransaction(const CTransactionRef &ptx, ATMPArgs &args)
383  EXCLUSIVE_LOCKS_REQUIRED(cs_main);
384 
385 private:
386  // All the intermediate state that gets passed between the various levels
387  // of checking a given transaction.
388  struct Workspace {
389  Workspace(const CTransactionRef &ptx,
390  const uint32_t next_block_script_verify_flags)
391  : m_ptx(ptx),
392  m_next_block_script_verify_flags(next_block_script_verify_flags) {
393  }
394  CTxMemPool::setEntries m_ancestors;
395  std::unique_ptr<CTxMemPoolEntry> m_entry;
396 
397  Amount m_modified_fees;
398 
399  const CTransactionRef &m_ptx;
400 
401  // ABC specific flags that are used in both PreChecks and
402  // ConsensusScriptChecks
403  const uint32_t m_next_block_script_verify_flags;
404  int m_sig_checks_standard;
405  };
406 
407  // Run the policy checks on a given transaction, excluding any script
408  // checks. Looks up inputs, calculates feerate, considers replacement,
409  // evaluates package limits, etc. As this function can be invoked for "free"
410  // by a peer, only tests that are fast should be done here (to avoid CPU
411  // DoS).
412  bool PreChecks(ATMPArgs &args, Workspace &ws)
413  EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
414 
415  // Re-run the script checks, using consensus flags, and try to cache the
416  // result in the scriptcache. This should be done after
417  // PolicyScriptChecks(). This requires that all inputs either be in our
418  // utxo set or in the mempool.
419  bool ConsensusScriptChecks(ATMPArgs &args, Workspace &ws,
421  EXCLUSIVE_LOCKS_REQUIRED(cs_main);
422 
423  // Try to add the transaction to the mempool, removing any conflicts first.
424  // Returns true if the transaction is in the mempool after any size
425  // limiting is performed, false otherwise.
426  bool Finalize(ATMPArgs &args, Workspace &ws)
427  EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
428 
429 private:
430  CTxMemPool &m_pool;
431  CCoinsViewCache m_view;
432  CCoinsViewMemPool m_viewmempool;
433  CCoinsView m_dummy;
434 
435  // The package limits in effect at the time of invocation.
436  const size_t m_limit_ancestors;
437  const size_t m_limit_ancestor_size;
438  // These may be modified while evaluating a transaction (eg to account for
439  // in-mempool conflicts; see below).
440  size_t m_limit_descendants;
441  size_t m_limit_descendant_size;
442 };
443 
444 bool MemPoolAccept::PreChecks(ATMPArgs &args, Workspace &ws) {
445  const CTransactionRef &ptx = ws.m_ptx;
446  const CTransaction &tx = *ws.m_ptx;
447  const TxId &txid = ws.m_ptx->GetId();
448 
449  // Copy/alias what we need out of args
450  TxValidationState &state = args.m_state;
451  const int64_t nAcceptTime = args.m_accept_time;
452  const bool bypass_limits = args.m_bypass_limits;
453  const Amount &nAbsurdFee = args.m_absurd_fee;
454  std::vector<COutPoint> &coins_to_uncache = args.m_coins_to_uncache;
455 
456  // Alias what we need out of ws
457  CTxMemPool::setEntries &setAncestors = ws.m_ancestors;
458  std::unique_ptr<CTxMemPoolEntry> &entry = ws.m_entry;
459  Amount &nModifiedFees = ws.m_modified_fees;
460 
461  // Coinbase is only valid in a block, not as a loose transaction.
462  if (!CheckRegularTransaction(tx, state)) {
463  // state filled in by CheckRegularTransaction.
464  return false;
465  }
466 
467  // Rather not work on nonstandard transactions (unless -testnet)
468  std::string reason;
469  if (fRequireStandard && !IsStandardTx(tx, reason)) {
470  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
471  }
472 
473  // Only accept nLockTime-using transactions that can be mined in the next
474  // block; we don't want our mempool filled up with transactions that can't
475  // be mined yet.
476  TxValidationState ctxState;
478  args.m_config.GetChainParams().GetConsensus(), tx, ctxState,
480  // We copy the state from a dummy to ensure we don't increase the
481  // ban score of peer for transaction that could be valid in the future.
483  ctxState.GetRejectReason(),
484  ctxState.GetDebugMessage());
485  }
486 
487  // Is it already in the memory pool?
488  if (m_pool.exists(txid)) {
490  "txn-already-in-mempool");
491  }
492 
493  // Check for conflicts with in-memory transactions
494  for (const CTxIn &txin : tx.vin) {
495  auto itConflicting = m_pool.mapNextTx.find(txin.prevout);
496  if (itConflicting != m_pool.mapNextTx.end()) {
497  // Disable replacement feature for good
499  "txn-mempool-conflict");
500  }
501  }
502 
503  LockPoints lp;
504  m_view.SetBackend(m_viewmempool);
505 
506  CCoinsViewCache &coins_cache = ::ChainstateActive().CoinsTip();
507  // Do all inputs exist?
508  for (const CTxIn &txin : tx.vin) {
509  if (!coins_cache.HaveCoinInCache(txin.prevout)) {
510  coins_to_uncache.push_back(txin.prevout);
511  }
512 
513  // Note: this call may add txin.prevout to the coins cache
514  // (coins_cache.cacheCoins) by way of FetchCoin(). It should be
515  // removed later (via coins_to_uncache) if this tx turns out to be
516  // invalid.
517  if (!m_view.HaveCoin(txin.prevout)) {
518  // Are inputs missing because we already have the tx?
519  for (size_t out = 0; out < tx.vout.size(); out++) {
520  // Optimistically just do efficient check of cache for
521  // outputs.
522  if (coins_cache.HaveCoinInCache(COutPoint(txid, out))) {
524  "txn-already-known");
525  }
526  }
527 
528  // Otherwise assume this might be an orphan tx for which we just
529  // haven't seen parents yet.
531  "bad-txns-inputs-missingorspent");
532  }
533  }
534 
535  // Are the actual inputs available?
536  if (!m_view.HaveInputs(tx)) {
538  "bad-txns-inputs-spent");
539  }
540 
541  // Bring the best block into scope.
542  m_view.GetBestBlock();
543 
544  // we have all inputs cached now, so switch back to dummy (to protect
545  // against bugs where we pull more inputs from disk that miss being
546  // added to coins_to_uncache)
547  m_view.SetBackend(m_dummy);
548 
549  // Only accept BIP68 sequence locked transactions that can be mined in
550  // the next block; we don't want our mempool filled up with transactions
551  // that can't be mined yet. Must keep pool.cs for this unless we change
552  // CheckSequenceLocks to take a CoinsViewCache instead of create its
553  // own.
554  if (!CheckSequenceLocks(m_pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp)) {
556  "non-BIP68-final");
557  }
558 
559  Amount nFees = Amount::zero();
560  if (!Consensus::CheckTxInputs(tx, state, m_view, GetSpendHeight(m_view),
561  nFees)) {
562  return error("%s: Consensus::CheckTxInputs: %s, %s", __func__,
563  tx.GetId().ToString(), state.ToString());
564  }
565 
566  // Check for non-standard pay-to-script-hash in inputs
567  if (fRequireStandard &&
568  !AreInputsStandard(tx, m_view, ws.m_next_block_script_verify_flags)) {
570  "bad-txns-nonstandard-inputs");
571  }
572 
573  // nModifiedFees includes any fee deltas from PrioritiseTransaction
574  nModifiedFees = nFees;
575  m_pool.ApplyDelta(txid, nModifiedFees);
576 
577  // Keep track of transactions that spend a coinbase, which we re-scan
578  // during reorgs to ensure COINBASE_MATURITY is still met.
579  bool fSpendsCoinbase = false;
580  for (const CTxIn &txin : tx.vin) {
581  const Coin &coin = m_view.AccessCoin(txin.prevout);
582  if (coin.IsCoinBase()) {
583  fSpendsCoinbase = true;
584  break;
585  }
586  }
587 
588  unsigned int nSize = tx.GetTotalSize();
589 
590  // No transactions are allowed below minRelayTxFee except from disconnected
591  // blocks.
592  // Do not change this to use virtualsize without coordinating a network
593  // policy upgrade.
594  if (!bypass_limits && nModifiedFees < minRelayTxFee.GetFee(nSize)) {
595  return state.Invalid(
596  TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met",
597  strprintf("%d < %d", nModifiedFees, ::minRelayTxFee.GetFee(nSize)));
598  }
599 
600  if (nAbsurdFee != Amount::zero() && nFees > nAbsurdFee) {
602  "absurdly-high-fee",
603  strprintf("%d > %d", nFees, nAbsurdFee));
604  }
605 
606  // Validate input scripts against standard script flags.
607  const uint32_t scriptVerifyFlags =
608  ws.m_next_block_script_verify_flags | STANDARD_SCRIPT_VERIFY_FLAGS;
609  PrecomputedTransactionData txdata(tx);
610  if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false,
611  txdata, ws.m_sig_checks_standard)) {
612  // State filled in by CheckInputScripts
613  return false;
614  }
615 
616  entry.reset(new CTxMemPoolEntry(ptx, nFees, nAcceptTime,
617  ::ChainActive().Height(), fSpendsCoinbase,
618  ws.m_sig_checks_standard, lp));
619 
620  unsigned int nVirtualSize = entry->GetTxVirtualSize();
621 
622  Amount mempoolRejectFee =
623  m_pool
624  .GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) *
625  1000000)
626  .GetFee(nVirtualSize);
627  if (!bypass_limits && mempoolRejectFee > Amount::zero() &&
628  nModifiedFees < mempoolRejectFee) {
629  return state.Invalid(
630  TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met",
631  strprintf("%d < %d", nModifiedFees, mempoolRejectFee));
632  }
633 
634  // Calculate in-mempool ancestors, up to a limit.
635  std::string errString;
636  if (!m_pool.CalculateMemPoolAncestors(
637  *entry, setAncestors, m_limit_ancestors, m_limit_ancestor_size,
638  m_limit_descendants, m_limit_descendant_size, errString)) {
640  "too-long-mempool-chain", errString);
641  }
642  return true;
643 }
644 
645 bool MemPoolAccept::ConsensusScriptChecks(ATMPArgs &args, Workspace &ws,
646  PrecomputedTransactionData &txdata) {
647  const CTransaction &tx = *ws.m_ptx;
648  const TxId &txid = tx.GetId();
649 
650  TxValidationState &state = args.m_state;
651 
652  // Check again against the next block's script verification flags
653  // to cache our script execution flags.
654  //
655  // This is also useful in case of bugs in the standard flags that cause
656  // transactions to pass as valid when they're actually invalid. For
657  // instance the STRICTENC flag was incorrectly allowing certain CHECKSIG
658  // NOT scripts to pass, even though they were invalid.
659  //
660  // There is a similar check in CreateNewBlock() to prevent creating
661  // invalid blocks (using TestBlockValidity), however allowing such
662  // transactions into the mempool can be exploited as a DoS attack.
663  int nSigChecksConsensus;
664  if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool,
665  ws.m_next_block_script_verify_flags,
666  txdata, nSigChecksConsensus)) {
667  // This can occur under some circumstances, if the node receives an
668  // unrequested tx which is invalid due to new consensus rules not
669  // being activated yet (during IBD).
670  return error("%s: BUG! PLEASE REPORT THIS! CheckInputScripts failed "
671  "against next-block but not STANDARD flags %s, %s",
672  __func__, txid.ToString(), state.ToString());
673  }
674 
675  if (ws.m_sig_checks_standard != nSigChecksConsensus) {
676  // We can't accept this transaction as we've used the standard count
677  // for the mempool/mining, but the consensus count will be enforced
678  // in validation (we don't want to produce bad block templates).
679  return error(
680  "%s: BUG! PLEASE REPORT THIS! SigChecks count differed between "
681  "standard and consensus flags in %s",
682  __func__, txid.ToString());
683  }
684  return true;
685 }
686 
687 bool MemPoolAccept::Finalize(ATMPArgs &args, Workspace &ws) {
688  const TxId &txid = ws.m_ptx->GetId();
689  TxValidationState &state = args.m_state;
690  const bool bypass_limits = args.m_bypass_limits;
691 
692  CTxMemPool::setEntries &setAncestors = ws.m_ancestors;
693  std::unique_ptr<CTxMemPoolEntry> &entry = ws.m_entry;
694 
695  // Store transaction in memory.
696  m_pool.addUnchecked(*entry, setAncestors);
697 
698  // Trim mempool and check if tx was trimmed.
699  if (!bypass_limits) {
700  m_pool.LimitSize(
701  gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
702  std::chrono::hours{
703  gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
704  if (!m_pool.exists(txid)) {
706  "mempool full");
707  }
708  }
709  return true;
710 }
711 
712 bool MemPoolAccept::AcceptSingleTransaction(const CTransactionRef &ptx,
713  ATMPArgs &args) {
714  AssertLockHeld(cs_main);
715  // mempool "read lock" (held through
716  // GetMainSignals().TransactionAddedToMempool())
717  LOCK(m_pool.cs);
718 
719  Workspace workspace(ptx, GetNextBlockScriptFlags(
720  args.m_config.GetChainParams().GetConsensus(),
721  ::ChainActive().Tip()));
722 
723  if (!PreChecks(args, workspace)) {
724  return false;
725  }
726 
727  // Only compute the precomputed transaction data if we need to verify
728  // scripts (ie, other policy checks pass). We perform the inexpensive
729  // checks first and avoid hashing and signature verification unless those
730  // checks pass, to mitigate CPU exhaustion denial-of-service attacks.
731  PrecomputedTransactionData txdata(*ptx);
732 
733  if (!ConsensusScriptChecks(args, workspace, txdata)) {
734  return false;
735  }
736 
737  // Tx was accepted, but not added
738  if (args.m_test_accept) {
739  return true;
740  }
741 
742  if (!Finalize(args, workspace)) {
743  return false;
744  }
745 
747  return true;
748 }
749 
750 } // namespace
751 
755 static bool
757  TxValidationState &state, const CTransactionRef &tx,
758  int64_t nAcceptTime, bool bypass_limits,
759  const Amount nAbsurdFee, bool test_accept)
760  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
761  AssertLockHeld(cs_main);
762  std::vector<COutPoint> coins_to_uncache;
763  MemPoolAccept::ATMPArgs args{config, state, nAcceptTime,
764  bypass_limits, nAbsurdFee, coins_to_uncache,
765  test_accept};
766  bool res = MemPoolAccept(pool).AcceptSingleTransaction(tx, args);
767  if (!res) {
768  // Remove coins that were not present in the coins cache before calling
769  // ATMPW; this is to prevent memory DoS in case we receive a large
770  // number of invalid transactions that attempt to overrun the in-memory
771  // coins cache
772  // (`CCoinsViewCache::cacheCoins`).
773 
774  for (const COutPoint &outpoint : coins_to_uncache) {
775  ::ChainstateActive().CoinsTip().Uncache(outpoint);
776  }
777  }
778 
779  // After we've (potentially) uncached entries, ensure our coins cache is
780  // still within its size limits
781  BlockValidationState stateDummy;
782  ::ChainstateActive().FlushStateToDisk(config.GetChainParams(), stateDummy,
784  return res;
785 }
786 
787 bool AcceptToMemoryPool(const Config &config, CTxMemPool &pool,
788  TxValidationState &state, const CTransactionRef &tx,
789  bool bypass_limits, const Amount nAbsurdFee,
790  bool test_accept) {
791  return AcceptToMemoryPoolWithTime(config, pool, state, tx, GetTime(),
792  bypass_limits, nAbsurdFee, test_accept);
793 }
794 
800 bool GetTransaction(const TxId &txid, CTransactionRef &txOut,
801  const Consensus::Params &params, BlockHash &hashBlock,
802  const CBlockIndex *const block_index) {
803  LOCK(cs_main);
804 
805  if (block_index == nullptr) {
806  CTransactionRef ptx = g_mempool.get(txid);
807  if (ptx) {
808  txOut = ptx;
809  return true;
810  }
811 
812  if (g_txindex) {
813  return g_txindex->FindTx(txid, hashBlock, txOut);
814  }
815  } else {
816  CBlock block;
817  if (ReadBlockFromDisk(block, block_index, params)) {
818  for (const auto &tx : block.vtx) {
819  if (tx->GetId() == txid) {
820  txOut = tx;
821  hashBlock = block_index->GetBlockHash();
822  return true;
823  }
824  }
825  }
826  }
827 
828  return false;
829 }
830 
832 //
833 // CBlock and CBlockIndex
834 //
835 
836 static bool WriteBlockToDisk(const CBlock &block, FlatFilePos &pos,
837  const CMessageHeader::MessageMagic &messageStart) {
838  // Open history file to append
840  if (fileout.IsNull()) {
841  return error("WriteBlockToDisk: OpenBlockFile failed");
842  }
843 
844  // Write index header
845  unsigned int nSize = GetSerializeSize(block, fileout.GetVersion());
846  fileout << messageStart << nSize;
847 
848  // Write block
849  long fileOutPos = ftell(fileout.Get());
850  if (fileOutPos < 0) {
851  return error("WriteBlockToDisk: ftell failed");
852  }
853 
854  pos.nPos = (unsigned int)fileOutPos;
855  fileout << block;
856 
857  return true;
858 }
859 
860 Amount GetBlockSubsidy(int nHeight, const Consensus::Params &consensusParams) {
861  int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
862  // Force block reward to zero when right shift is undefined.
863  if (halvings >= 64) {
864  return Amount::zero();
865  }
866 
867  Amount nSubsidy = 50 * COIN;
868  // Subsidy is cut in half every 210,000 blocks which will occur
869  // approximately every 4 years.
870  return ((nSubsidy / SATOSHI) >> halvings) * SATOSHI;
871 }
872 
873 CoinsViews::CoinsViews(std::string ldb_name, size_t cache_size_bytes,
874  bool in_memory, bool should_wipe)
875  : m_dbview(GetDataDir() / ldb_name, cache_size_bytes, in_memory,
876  should_wipe),
877  m_catcherview(&m_dbview) {}
878 
879 void CoinsViews::InitCache() {
880  m_cacheview = std::make_unique<CCoinsViewCache>(&m_catcherview);
881 }
882 
884  BlockHash from_snapshot_blockhash)
885  : m_blockman(blockman), m_from_snapshot_blockhash(from_snapshot_blockhash) {
886 }
887 
888 void CChainState::InitCoinsDB(size_t cache_size_bytes, bool in_memory,
889  bool should_wipe, std::string leveldb_name) {
891  leveldb_name += "_" + m_from_snapshot_blockhash.ToString();
892  }
893  m_coins_views = std::make_unique<CoinsViews>(leveldb_name, cache_size_bytes,
894  in_memory, should_wipe);
895 }
896 
897 void CChainState::InitCoinsCache(size_t cache_size_bytes) {
898  assert(m_coins_views != nullptr);
899  m_coinstip_cache_size_bytes = cache_size_bytes;
900  m_coins_views->InitCache();
901 }
902 
903 // Note that though this is marked const, we may end up modifying
904 // `m_cached_finished_ibd`, which is a performance-related implementation
905 // detail. This function must be marked `const` so that `CValidationInterface`
906 // clients (which are given a `const CChainState*`) can call it.
907 //
909  // Optimization: pre-test latch before taking the lock.
910  if (m_cached_finished_ibd.load(std::memory_order_relaxed)) {
911  return false;
912  }
913 
914  LOCK(cs_main);
915  if (m_cached_finished_ibd.load(std::memory_order_relaxed)) {
916  return false;
917  }
918  if (fImporting || fReindex) {
919  return true;
920  }
921  if (m_chain.Tip() == nullptr) {
922  return true;
923  }
925  return true;
926  }
927  if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge)) {
928  return true;
929  }
930  LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
931  m_cached_finished_ibd.store(true, std::memory_order_relaxed);
932  return false;
933 }
934 
935 static CBlockIndex const *pindexBestForkTip = nullptr;
936 static CBlockIndex const *pindexBestForkBase = nullptr;
937 
939  LOCK(::cs_main);
940  return g_chainman.m_blockman.m_block_index;
941 }
942 
943 static void AlertNotify(const std::string &strMessage) {
944  uiInterface.NotifyAlertChanged();
945 #if defined(HAVE_SYSTEM)
946  std::string strCmd = gArgs.GetArg("-alertnotify", "");
947  if (strCmd.empty()) {
948  return;
949  }
950 
951  // Alert text should be plain ascii coming from a trusted source, but to be
952  // safe we first strip anything not in safeChars, then add single quotes
953  // around the whole string before passing it to the shell:
954  std::string singleQuote("'");
955  std::string safeStatus = SanitizeString(strMessage);
956  safeStatus = singleQuote + safeStatus + singleQuote;
957  boost::replace_all(strCmd, "%s", safeStatus);
958 
959  std::thread t(runCommand, strCmd);
960  // thread runs free
961  t.detach();
962 #endif
963 }
964 
966  AssertLockHeld(cs_main);
967  // Before we get past initial download, we cannot reliably alert about forks
968  // (we assume we don't get stuck on a fork before finishing our initial
969  // sync)
971  return;
972  }
973 
974  // If our best fork is no longer within 72 blocks (+/- 12 hours if no one
975  // mines it) of our head, drop it
976  if (pindexBestForkTip &&
977  ::ChainActive().Height() - pindexBestForkTip->nHeight >= 72) {
978  pindexBestForkTip = nullptr;
979  }
980 
981  if (pindexBestForkTip ||
982  (pindexBestInvalid &&
983  pindexBestInvalid->nChainWork >
984  ::ChainActive().Tip()->nChainWork +
985  (GetBlockProof(*::ChainActive().Tip()) * 6))) {
986  if (!GetfLargeWorkForkFound() && pindexBestForkBase) {
987  std::string warning =
988  std::string("'Warning: Large-work fork detected, forking after "
989  "block ") +
990  pindexBestForkBase->phashBlock->ToString() + std::string("'");
991  AlertNotify(warning);
992  }
993 
994  if (pindexBestForkTip && pindexBestForkBase) {
995  LogPrintf("%s: Warning: Large fork found\n forking the "
996  "chain at height %d (%s)\n lasting to height %d "
997  "(%s).\nChain state database corruption likely.\n",
998  __func__, pindexBestForkBase->nHeight,
999  pindexBestForkBase->phashBlock->ToString(),
1000  pindexBestForkTip->nHeight,
1001  pindexBestForkTip->phashBlock->ToString());
1002  SetfLargeWorkForkFound(true);
1003  } else {
1004  LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks "
1005  "longer than our best chain.\nChain state database "
1006  "corruption likely.\n",
1007  __func__);
1009  }
1010  } else {
1011  SetfLargeWorkForkFound(false);
1013  }
1014 }
1015 
1016 static void CheckForkWarningConditionsOnNewFork(CBlockIndex *pindexNewForkTip)
1017  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1018  AssertLockHeld(cs_main);
1019  // If we are on a fork that is sufficiently large, set a warning flag.
1020  const CBlockIndex *pfork = ::ChainActive().FindFork(pindexNewForkTip);
1021 
1022  // We define a condition where we should warn the user about as a fork of at
1023  // least 7 blocks with a tip within 72 blocks (+/- 12 hours if no one mines
1024  // it) of ours. We use 7 blocks rather arbitrarily as it represents just
1025  // under 10% of sustained network hash rate operating on the fork, or a
1026  // chain that is entirely longer than ours and invalid (note that this
1027  // should be detected by both). We define it this way because it allows us
1028  // to only store the highest fork tip (+ base) which meets the 7-block
1029  // condition and from this always have the most-likely-to-cause-warning fork
1030  if (pfork &&
1031  (!pindexBestForkTip ||
1032  pindexNewForkTip->nHeight > pindexBestForkTip->nHeight) &&
1033  pindexNewForkTip->nChainWork - pfork->nChainWork >
1034  (GetBlockProof(*pfork) * 7) &&
1035  ::ChainActive().Height() - pindexNewForkTip->nHeight < 72) {
1036  pindexBestForkTip = pindexNewForkTip;
1037  pindexBestForkBase = pfork;
1038  }
1039 
1041 }
1042 
1043 // Called both upon regular invalid block discovery *and* InvalidateBlock
1045  AssertLockHeld(cs_main);
1046  if (!pindexBestInvalid ||
1047  pindexNew->nChainWork > pindexBestInvalid->nChainWork) {
1048  pindexBestInvalid = pindexNew;
1049  }
1050  if (pindexBestHeader != nullptr &&
1051  pindexBestHeader->GetAncestor(pindexNew->nHeight) == pindexNew) {
1052  pindexBestHeader = ::ChainActive().Tip();
1053  }
1054 
1055  // If the invalid chain found is supposed to be finalized, we need to move
1056  // back the finalization point.
1057  if (IsBlockFinalized(pindexNew)) {
1058  m_finalizedBlockIndex = pindexNew->pprev;
1059  }
1060 
1061  LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n",
1062  __func__, pindexNew->GetBlockHash().ToString(),
1063  pindexNew->nHeight,
1064  log(pindexNew->nChainWork.getdouble()) / log(2.0),
1065  FormatISO8601DateTime(pindexNew->GetBlockTime()));
1066  CBlockIndex *tip = ::ChainActive().Tip();
1067  assert(tip);
1068  LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n",
1069  __func__, tip->GetBlockHash().ToString(),
1070  ::ChainActive().Height(),
1071  log(tip->nChainWork.getdouble()) / log(2.0),
1073 }
1074 
1075 // Same as InvalidChainFound, above, except not called directly from
1076 // InvalidateBlock, which does its own setBlockIndexCandidates management.
1078  const BlockValidationState &state) {
1080  pindex->nStatus = pindex->nStatus.withFailed();
1081  m_blockman.m_failed_blocks.insert(pindex);
1082  setDirtyBlockIndex.insert(pindex);
1083  InvalidChainFound(pindex);
1084  }
1085 }
1086 
1087 void SpendCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
1088  int nHeight) {
1089  // Mark inputs spent.
1090  if (tx.IsCoinBase()) {
1091  return;
1092  }
1093 
1094  txundo.vprevout.reserve(tx.vin.size());
1095  for (const CTxIn &txin : tx.vin) {
1096  txundo.vprevout.emplace_back();
1097  bool is_spent = view.SpendCoin(txin.prevout, &txundo.vprevout.back());
1098  assert(is_spent);
1099  }
1100 }
1101 
1102 void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
1103  int nHeight) {
1104  SpendCoins(view, tx, txundo, nHeight);
1105  AddCoins(view, tx, nHeight);
1106 }
1107 
1108 void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, int nHeight) {
1109  // Mark inputs spent.
1110  if (!tx.IsCoinBase()) {
1111  for (const CTxIn &txin : tx.vin) {
1112  bool is_spent = view.SpendCoin(txin.prevout);
1113  assert(is_spent);
1114  }
1115  }
1116 
1117  // Add outputs.
1118  AddCoins(view, tx, nHeight);
1119 }
1120 
1122  const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
1123  if (!VerifyScript(scriptSig, m_tx_out.scriptPubKey, nFlags,
1125  ptxTo, nIn, m_tx_out.nValue, cacheStore, txdata),
1126  metrics, &error)) {
1127  return false;
1128  }
1129  if ((pTxLimitSigChecks &&
1130  !pTxLimitSigChecks->consume_and_check(metrics.nSigChecks)) ||
1131  (pBlockLimitSigChecks &&
1132  !pBlockLimitSigChecks->consume_and_check(metrics.nSigChecks))) {
1133  // we can't assign a meaningful script error (since the script
1134  // succeeded), but remove the ScriptError::OK which could be
1135  // misinterpreted.
1137  return false;
1138  }
1139  return true;
1140 }
1141 
1142 int GetSpendHeight(const CCoinsViewCache &inputs) {
1143  LOCK(cs_main);
1144  CBlockIndex *pindexPrev = LookupBlockIndex(inputs.GetBestBlock());
1145  return pindexPrev->nHeight + 1;
1146 }
1147 
1149  const CCoinsViewCache &inputs, const uint32_t flags,
1150  bool sigCacheStore, bool scriptCacheStore,
1151  const PrecomputedTransactionData &txdata,
1152  int &nSigChecksOut, TxSigCheckLimiter &txLimitSigChecks,
1153  CheckInputsLimiter *pBlockLimitSigChecks,
1154  std::vector<CScriptCheck> *pvChecks) {
1155  AssertLockHeld(cs_main);
1156  assert(!tx.IsCoinBase());
1157 
1158  if (pvChecks) {
1159  pvChecks->reserve(tx.vin.size());
1160  }
1161 
1162  // First check if script executions have been cached with the same flags.
1163  // Note that this assumes that the inputs provided are correct (ie that the
1164  // transaction hash which is in tx's prevouts properly commits to the
1165  // scriptPubKey in the inputs view of that transaction).
1166  ScriptCacheKey hashCacheEntry(tx, flags);
1167  if (IsKeyInScriptCache(hashCacheEntry, !scriptCacheStore, nSigChecksOut)) {
1168  if (!txLimitSigChecks.consume_and_check(nSigChecksOut) ||
1169  (pBlockLimitSigChecks &&
1170  !pBlockLimitSigChecks->consume_and_check(nSigChecksOut))) {
1172  "too-many-sigchecks");
1173  }
1174  return true;
1175  }
1176 
1177  int nSigChecksTotal = 0;
1178 
1179  for (size_t i = 0; i < tx.vin.size(); i++) {
1180  const COutPoint &prevout = tx.vin[i].prevout;
1181  const Coin &coin = inputs.AccessCoin(prevout);
1182  assert(!coin.IsSpent());
1183 
1184  // We very carefully only pass in things to CScriptCheck which are
1185  // clearly committed to by tx's hash. This provides a sanity
1186  // check that our caching is not introducing consensus failures through
1187  // additional data in, eg, the coins being spent being checked as a part
1188  // of CScriptCheck.
1189 
1190  // Verify signature
1191  CScriptCheck check(coin.GetTxOut(), tx, i, flags, sigCacheStore, txdata,
1192  &txLimitSigChecks, pBlockLimitSigChecks);
1193 
1194  // If pvChecks is not null, defer the check execution to the caller.
1195  if (pvChecks) {
1196  pvChecks->push_back(std::move(check));
1197  continue;
1198  }
1199 
1200  if (!check()) {
1201  ScriptError scriptError = check.GetScriptError();
1202  // Compute flags without the optional standardness flags.
1203  // This differs from MANDATORY_SCRIPT_VERIFY_FLAGS as it contains
1204  // additional upgrade flags (see AcceptToMemoryPoolWorker variable
1205  // extraFlags).
1206  uint32_t mandatoryFlags =
1208  if (flags != mandatoryFlags) {
1209  // Check whether the failure was caused by a non-mandatory
1210  // script verification check. If so, ensure we return
1211  // NOT_STANDARD instead of CONSENSUS to avoid downstream users
1212  // splitting the network between upgraded and non-upgraded nodes
1213  // by banning CONSENSUS-failing data providers.
1214  CScriptCheck check2(coin.GetTxOut(), tx, i, mandatoryFlags,
1215  sigCacheStore, txdata);
1216  if (check2()) {
1217  return state.Invalid(
1219  strprintf("non-mandatory-script-verify-flag (%s)",
1220  ScriptErrorString(scriptError)));
1221  }
1222  // update the error message to reflect the mandatory violation.
1223  scriptError = check2.GetScriptError();
1224  }
1225 
1226  // MANDATORY flag failures correspond to
1227  // TxValidationResult::TX_CONSENSUS. Because CONSENSUS failures are
1228  // the most serious case of validation failures, we may need to
1229  // consider using RECENT_CONSENSUS_CHANGE for any script failure
1230  // that could be due to non-upgraded nodes which we may want to
1231  // support, to avoid splitting the network (but this depends on the
1232  // details of how net_processing handles such errors).
1233  return state.Invalid(
1235  strprintf("mandatory-script-verify-flag-failed (%s)",
1236  ScriptErrorString(scriptError)));
1237  }
1238 
1239  nSigChecksTotal += check.GetScriptExecutionMetrics().nSigChecks;
1240  }
1241 
1242  nSigChecksOut = nSigChecksTotal;
1243 
1244  if (scriptCacheStore && !pvChecks) {
1245  // We executed all of the provided scripts, and were told to cache the
1246  // result. Do so now.
1247  AddKeyInScriptCache(hashCacheEntry, nSigChecksTotal);
1248  }
1249 
1250  return true;
1251 }
1252 
1253 static bool UndoWriteToDisk(const CBlockUndo &blockundo, FlatFilePos &pos,
1254  const BlockHash &hashBlock,
1255  const CMessageHeader::MessageMagic &messageStart) {
1256  // Open history file to append
1258  if (fileout.IsNull()) {
1259  return error("%s: OpenUndoFile failed", __func__);
1260  }
1261 
1262  // Write index header
1263  unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion());
1264  fileout << messageStart << nSize;
1265 
1266  // Write undo data
1267  long fileOutPos = ftell(fileout.Get());
1268  if (fileOutPos < 0) {
1269  return error("%s: ftell failed", __func__);
1270  }
1271  pos.nPos = (unsigned int)fileOutPos;
1272  fileout << blockundo;
1273 
1274  // calculate & write checksum
1276  hasher << hashBlock;
1277  hasher << blockundo;
1278  fileout << hasher.GetHash();
1279 
1280  return true;
1281 }
1282 
1283 bool UndoReadFromDisk(CBlockUndo &blockundo, const CBlockIndex *pindex) {
1284  FlatFilePos pos = pindex->GetUndoPos();
1285  if (pos.IsNull()) {
1286  return error("%s: no undo data available", __func__);
1287  }
1288 
1289  // Open history file to read
1290  CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
1291  if (filein.IsNull()) {
1292  return error("%s: OpenUndoFile failed", __func__);
1293  }
1294 
1295  // Read block
1296  uint256 hashChecksum;
1297  // We need a CHashVerifier as reserializing may lose data
1298  CHashVerifier<CAutoFile> verifier(&filein);
1299  try {
1300  verifier << pindex->pprev->GetBlockHash();
1301  verifier >> blockundo;
1302  filein >> hashChecksum;
1303  } catch (const std::exception &e) {
1304  return error("%s: Deserialize or I/O error - %s", __func__, e.what());
1305  }
1306 
1307  // Verify checksum
1308  if (hashChecksum != verifier.GetHash()) {
1309  return error("%s: Checksum mismatch", __func__);
1310  }
1311 
1312  return true;
1313 }
1314 
1316 static bool AbortNode(const std::string &strMessage,
1317  bilingual_str user_message = bilingual_str()) {
1318  SetMiscWarning(strMessage);
1319  LogPrintf("*** %s\n", strMessage);
1320  if (!user_message.empty()) {
1321  user_message =
1322  _("A fatal internal error occurred, see debug.log for details");
1323  }
1324  AbortError(user_message);
1325  StartShutdown();
1326  return false;
1327 }
1328 
1329 static bool AbortNode(BlockValidationState &state,
1330  const std::string &strMessage,
1331  const bilingual_str &userMessage = bilingual_str()) {
1332  AbortNode(strMessage, userMessage);
1333  return state.Error(strMessage);
1334 }
1335 
1338  const COutPoint &out) {
1339  bool fClean = true;
1340 
1341  if (view.HaveCoin(out)) {
1342  // Overwriting transaction output.
1343  fClean = false;
1344  }
1345 
1346  if (undo.GetHeight() == 0) {
1347  // Missing undo metadata (height and coinbase). Older versions included
1348  // this information only in undo records for the last spend of a
1349  // transactions' outputs. This implies that it must be present for some
1350  // other output of the same tx.
1351  const Coin &alternate = AccessByTxid(view, out.GetTxId());
1352  if (alternate.IsSpent()) {
1353  // Adding output for transaction without known metadata
1354  return DisconnectResult::FAILED;
1355  }
1356 
1357  // This is somewhat ugly, but hopefully utility is limited. This is only
1358  // useful when working from legacy on disck data. In any case, putting
1359  // the correct information in there doesn't hurt.
1360  const_cast<Coin &>(undo) = Coin(undo.GetTxOut(), alternate.GetHeight(),
1361  alternate.IsCoinBase());
1362  }
1363 
1364  // If the coin already exists as an unspent coin in the cache, then the
1365  // possible_overwrite parameter to AddCoin must be set to true. We have
1366  // already checked whether an unspent coin exists above using HaveCoin, so
1367  // we don't need to guess. When fClean is false, an unspent coin already
1368  // existed and it is an overwrite.
1369  view.AddCoin(out, std::move(undo), !fClean);
1370 
1372 }
1373 
1379  const CBlockIndex *pindex,
1380  CCoinsViewCache &view) {
1381  CBlockUndo blockUndo;
1382  if (!UndoReadFromDisk(blockUndo, pindex)) {
1383  error("DisconnectBlock(): failure reading undo data");
1384  return DisconnectResult::FAILED;
1385  }
1386 
1387  return ApplyBlockUndo(blockUndo, block, pindex, view);
1388 }
1389 
1391  const CBlock &block, const CBlockIndex *pindex,
1392  CCoinsViewCache &view) {
1393  bool fClean = true;
1394 
1395  if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
1396  error("DisconnectBlock(): block and undo data inconsistent");
1397  return DisconnectResult::FAILED;
1398  }
1399 
1400  // First, restore inputs.
1401  for (size_t i = 1; i < block.vtx.size(); i++) {
1402  const CTransaction &tx = *(block.vtx[i]);
1403  const CTxUndo &txundo = blockUndo.vtxundo[i - 1];
1404  if (txundo.vprevout.size() != tx.vin.size()) {
1405  error("DisconnectBlock(): transaction and undo data inconsistent");
1406  return DisconnectResult::FAILED;
1407  }
1408 
1409  for (size_t j = 0; j < tx.vin.size(); j++) {
1410  const COutPoint &out = tx.vin[j].prevout;
1411  const Coin &undo = txundo.vprevout[j];
1412  DisconnectResult res = UndoCoinSpend(undo, view, out);
1413  if (res == DisconnectResult::FAILED) {
1414  return DisconnectResult::FAILED;
1415  }
1416  fClean = fClean && res != DisconnectResult::UNCLEAN;
1417  }
1418  }
1419 
1420  // Second, revert created outputs.
1421  for (const auto &ptx : block.vtx) {
1422  const CTransaction &tx = *ptx;
1423  const TxId &txid = tx.GetId();
1424  const bool is_coinbase = tx.IsCoinBase();
1425 
1426  // Check that all outputs are available and match the outputs in the
1427  // block itself exactly.
1428  for (size_t o = 0; o < tx.vout.size(); o++) {
1429  if (tx.vout[o].scriptPubKey.IsUnspendable()) {
1430  continue;
1431  }
1432 
1433  COutPoint out(txid, o);
1434  Coin coin;
1435  bool is_spent = view.SpendCoin(out, &coin);
1436  if (!is_spent || tx.vout[o] != coin.GetTxOut() ||
1437  uint32_t(pindex->nHeight) != coin.GetHeight() ||
1438  is_coinbase != coin.IsCoinBase()) {
1439  // transaction output mismatch
1440  fClean = false;
1441  }
1442  }
1443  }
1444 
1445  // Move best block pointer to previous block.
1446  view.SetBestBlock(block.hashPrevBlock);
1447 
1449 }
1450 
1451 static void FlushUndoFile(int block_file, bool finalize = false) {
1452  FlatFilePos undo_pos_old(block_file, vinfoBlockFile[block_file].nUndoSize);
1453  if (!UndoFileSeq().Flush(undo_pos_old, finalize)) {
1454  AbortNode("Flushing undo file to disk failed. This is likely the "
1455  "result of an I/O error.");
1456  }
1457 }
1458 
1459 static void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false) {
1460  LOCK(cs_LastBlockFile);
1461  FlatFilePos block_pos_old(nLastBlockFile,
1462  vinfoBlockFile[nLastBlockFile].nSize);
1463  if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) {
1464  AbortNode("Flushing block file to disk failed. This is likely the "
1465  "result of an I/O error.");
1466  }
1467  // we do not always flush the undo file, as the chain tip may be lagging
1468  // behind the incoming blocks,
1469  // e.g. during IBD or a sync after a node going offline
1470  if (!fFinalize || finalize_undo) {
1471  FlushUndoFile(nLastBlockFile, finalize_undo);
1472  }
1473 }
1474 
1475 static bool FindUndoPos(BlockValidationState &state, int nFile,
1476  FlatFilePos &pos, unsigned int nAddSize);
1477 
1478 static bool WriteUndoDataForBlock(const CBlockUndo &blockundo,
1479  BlockValidationState &state,
1480  CBlockIndex *pindex,
1481  const CChainParams &chainparams) {
1482  // Write undo information to disk
1483  if (pindex->GetUndoPos().IsNull()) {
1484  FlatFilePos _pos;
1485  if (!FindUndoPos(state, pindex->nFile, _pos,
1486  ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40)) {
1487  return error("ConnectBlock(): FindUndoPos failed");
1488  }
1489  if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(),
1490  chainparams.DiskMagic())) {
1491  return AbortNode(state, "Failed to write undo data");
1492  }
1493  // rev files are written in block height order, whereas blk files are
1494  // written as blocks come in (often out of order) we want to flush the
1495  // rev (undo) file once we've written the last block, which is indicated
1496  // by the last height in the block file info as below; note that this
1497  // does not catch the case where the undo writes are keeping up with the
1498  // block writes (usually when a synced up node is getting newly mined
1499  // blocks) -- this case is caught in the FindBlockPos function
1500  if (_pos.nFile < nLastBlockFile &&
1501  static_cast<uint32_t>(pindex->nHeight) ==
1502  vinfoBlockFile[_pos.nFile].nHeightLast) {
1503  FlushUndoFile(_pos.nFile, true);
1504  }
1505 
1506  // update nUndoPos in block index
1507  pindex->nUndoPos = _pos.nPos;
1508  pindex->nStatus = pindex->nStatus.withUndo();
1509  setDirtyBlockIndex.insert(pindex);
1510  }
1511 
1512  return true;
1513 }
1514 
1516 
1517 void ThreadScriptCheck(int worker_num) {
1518  util::ThreadRename(strprintf("scriptch.%i", worker_num));
1519  scriptcheckqueue.Thread();
1520 }
1521 
1522 VersionBitsCache versionbitscache GUARDED_BY(cs_main);
1523 
1524 int32_t ComputeBlockVersion(const CBlockIndex *pindexPrev,
1525  const Consensus::Params &params) {
1526  LOCK(cs_main);
1527  int32_t nVersion = VERSIONBITS_TOP_BITS;
1528 
1529  for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) {
1531  pindexPrev, params, static_cast<Consensus::DeploymentPos>(i),
1532  versionbitscache);
1533  if (state == ThresholdState::LOCKED_IN ||
1534  state == ThresholdState::STARTED) {
1535  nVersion |= VersionBitsMask(
1536  params, static_cast<Consensus::DeploymentPos>(i));
1537  }
1538  }
1539 
1540  // Clear the last 4 bits (miner fund activation).
1541  return nVersion & ~uint32_t(0x0f);
1542 }
1543 
1544 // Returns the script flags which should be checked for the block after
1545 // the given block.
1546 static uint32_t GetNextBlockScriptFlags(const Consensus::Params &params,
1547  const CBlockIndex *pindex) {
1548  uint32_t flags = SCRIPT_VERIFY_NONE;
1549 
1550  // Start enforcing P2SH (BIP16)
1551  if ((pindex->nHeight + 1) >= params.BIP16Height) {
1552  flags |= SCRIPT_VERIFY_P2SH;
1553  }
1554 
1555  // Start enforcing the DERSIG (BIP66) rule.
1556  if ((pindex->nHeight + 1) >= params.BIP66Height) {
1557  flags |= SCRIPT_VERIFY_DERSIG;
1558  }
1559 
1560  // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule.
1561  if ((pindex->nHeight + 1) >= params.BIP65Height) {
1563  }
1564 
1565  // Start enforcing CSV (BIP68, BIP112 and BIP113) rule.
1566  if ((pindex->nHeight + 1) >= params.CSVHeight) {
1568  }
1569 
1570  // If the UAHF is enabled, we start accepting replay protected txns
1571  if (IsUAHFenabled(params, pindex)) {
1572  flags |= SCRIPT_VERIFY_STRICTENC;
1574  }
1575 
1576  // If the DAA HF is enabled, we start rejecting transaction that use a high
1577  // s in their signature. We also make sure that signature that are supposed
1578  // to fail (for instance in multisig or other forms of smart contracts) are
1579  // null.
1580  if (IsDAAEnabled(params, pindex)) {
1581  flags |= SCRIPT_VERIFY_LOW_S;
1582  flags |= SCRIPT_VERIFY_NULLFAIL;
1583  }
1584 
1585  // When the magnetic anomaly fork is enabled, we start accepting
1586  // transactions using the OP_CHECKDATASIG opcode and it's verify
1587  // alternative. We also start enforcing push only signatures and
1588  // clean stack.
1589  if (IsMagneticAnomalyEnabled(params, pindex)) {
1591  flags |= SCRIPT_VERIFY_SIGPUSHONLY;
1592  flags |= SCRIPT_VERIFY_CLEANSTACK;
1593  }
1594 
1595  if (IsGravitonEnabled(params, pindex)) {
1597  flags |= SCRIPT_VERIFY_MINIMALDATA;
1598  }
1599 
1600  if (IsPhononEnabled(params, pindex)) {
1601  flags |= SCRIPT_ENFORCE_SIGCHECKS;
1602  }
1603 
1604  // We make sure this node will have replay protection during the next hard
1605  // fork.
1606  if (IsReplayProtectionEnabled(params, pindex)) {
1608  }
1609 
1610  return flags;
1611 }
1612 
1613 static int64_t nTimeCheck = 0;
1614 static int64_t nTimeForks = 0;
1615 static int64_t nTimeVerify = 0;
1616 static int64_t nTimeConnect = 0;
1617 static int64_t nTimeIndex = 0;
1618 static int64_t nTimeCallbacks = 0;
1619 static int64_t nTimeTotal = 0;
1620 static int64_t nBlocksTotal = 0;
1621 
1629  CBlockIndex *pindex, CCoinsViewCache &view,
1630  const CChainParams &params,
1631  BlockValidationOptions options,
1632  bool fJustCheck) {
1633  AssertLockHeld(cs_main);
1634  assert(pindex);
1635  assert(*pindex->phashBlock == block.GetHash());
1636  int64_t nTimeStart = GetTimeMicros();
1637 
1638  const Consensus::Params &consensusParams = params.GetConsensus();
1639 
1640  // Check it again in case a previous version let a bad block in
1641  // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
1642  // ContextualCheckBlockHeader() here. This means that if we add a new
1643  // consensus rule that is enforced in one of those two functions, then we
1644  // may have let in a block that violates the rule prior to updating the
1645  // software, and we would NOT be enforcing the rule here. Fully solving
1646  // upgrade from one software version to the next after a consensus rule
1647  // change is potentially tricky and issue-specific.
1648  // Also, currently the rule against blocks more than 2 hours in the future
1649  // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
1650  // re-enforce that rule here (at least until we make it impossible for
1651  // GetAdjustedTime() to go backward).
1652  if (!CheckBlock(block, state, consensusParams,
1653  options.withCheckPoW(!fJustCheck)
1654  .withCheckMerkleRoot(!fJustCheck))) {
1656  // We don't write down blocks to disk if they may have been
1657  // corrupted, so this should be impossible unless we're having
1658  // hardware problems.
1659  return AbortNode(state, "Corrupt block found indicating potential "
1660  "hardware failure; shutting down");
1661  }
1662  return error("%s: Consensus::CheckBlock: %s", __func__,
1663  state.ToString());
1664  }
1665 
1666  // Verify that the view's current state corresponds to the previous block
1667  BlockHash hashPrevBlock =
1668  pindex->pprev == nullptr ? BlockHash() : pindex->pprev->GetBlockHash();
1669  assert(hashPrevBlock == view.GetBestBlock());
1670 
1671  nBlocksTotal++;
1672 
1673  // Special case for the genesis block, skipping connection of its
1674  // transactions (its coinbase is unspendable)
1675  if (block.GetHash() == consensusParams.hashGenesisBlock) {
1676  if (!fJustCheck) {
1677  view.SetBestBlock(pindex->GetBlockHash());
1678  }
1679 
1680  return true;
1681  }
1682 
1683  bool fScriptChecks = true;
1684  if (!hashAssumeValid.IsNull()) {
1685  // We've been configured with the hash of a block which has been
1686  // externally verified to have a valid history. A suitable default value
1687  // is included with the software and updated from time to time. Because
1688  // validity relative to a piece of software is an objective fact these
1689  // defaults can be easily reviewed. This setting doesn't force the
1690  // selection of any particular chain but makes validating some faster by
1691  // effectively caching the result of part of the verification.
1692  BlockMap::const_iterator it =
1693  m_blockman.m_block_index.find(hashAssumeValid);
1694  if (it != m_blockman.m_block_index.end()) {
1695  if (it->second->GetAncestor(pindex->nHeight) == pindex &&
1696  pindexBestHeader->GetAncestor(pindex->nHeight) == pindex &&
1697  pindexBestHeader->nChainWork >= nMinimumChainWork) {
1698  // This block is a member of the assumed verified chain and an
1699  // ancestor of the best header.
1700  // Script verification is skipped when connecting blocks under
1701  // the assumevalid block. Assuming the assumevalid block is
1702  // valid this is safe because block merkle hashes are still
1703  // computed and checked, Of course, if an assumed valid block is
1704  // invalid due to false scriptSigs this optimization would allow
1705  // an invalid chain to be accepted.
1706  // The equivalent time check discourages hash power from
1707  // extorting the network via DOS attack into accepting an
1708  // invalid block through telling users they must manually set
1709  // assumevalid. Requiring a software change or burying the
1710  // invalid block, regardless of the setting, makes it hard to
1711  // hide the implication of the demand. This also avoids having
1712  // release candidates that are hardly doing any signature
1713  // verification at all in testing without having to artificially
1714  // set the default assumed verified block further back. The test
1715  // against nMinimumChainWork prevents the skipping when denied
1716  // access to any chain at least as good as the expected chain.
1717  fScriptChecks =
1719  *pindexBestHeader, *pindex, *pindexBestHeader,
1720  consensusParams) <= 60 * 60 * 24 * 7 * 2);
1721  }
1722  }
1723  }
1724 
1725  int64_t nTime1 = GetTimeMicros();
1726  nTimeCheck += nTime1 - nTimeStart;
1727  LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n",
1728  MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO,
1729  nTimeCheck * MILLI / nBlocksTotal);
1730 
1731  // Do not allow blocks that contain transactions which 'overwrite' older
1732  // transactions, unless those are already completely spent. If such
1733  // overwrites are allowed, coinbases and transactions depending upon those
1734  // can be duplicated to remove the ability to spend the first instance --
1735  // even after being sent to another address.
1736  // See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html
1737  // for more information. This logic is not necessary for memory pool
1738  // transactions, as AcceptToMemoryPool already refuses previously-known
1739  // transaction ids entirely. This rule was originally applied to all blocks
1740  // with a timestamp after March 15, 2012, 0:00 UTC. Now that the whole
1741  // chain is irreversibly beyond that time it is applied to all blocks
1742  // except the two in the chain that violate it. This prevents exploiting
1743  // the issue against nodes during their initial block download.
1744  bool fEnforceBIP30 = !((pindex->nHeight == 91842 &&
1745  pindex->GetBlockHash() ==
1746  uint256S("0x00000000000a4d0a398161ffc163c503763"
1747  "b1f4360639393e0e4c8e300e0caec")) ||
1748  (pindex->nHeight == 91880 &&
1749  pindex->GetBlockHash() ==
1750  uint256S("0x00000000000743f190a18c5577a3c2d2a1f"
1751  "610ae9601ac046a38084ccb7cd721")));
1752 
1753  // Once BIP34 activated it was not possible to create new duplicate
1754  // coinbases and thus other than starting with the 2 existing duplicate
1755  // coinbase pairs, not possible to create overwriting txs. But by the time
1756  // BIP34 activated, in each of the existing pairs the duplicate coinbase had
1757  // overwritten the first before the first had been spent. Since those
1758  // coinbases are sufficiently buried it's no longer possible to create
1759  // further duplicate transactions descending from the known pairs either. If
1760  // we're on the known chain at height greater than where BIP34 activated, we
1761  // can save the db accesses needed for the BIP30 check.
1762 
1763  // BIP34 requires that a block at height X (block X) has its coinbase
1764  // scriptSig start with a CScriptNum of X (indicated height X). The above
1765  // logic of no longer requiring BIP30 once BIP34 activates is flawed in the
1766  // case that there is a block X before the BIP34 height of 227,931 which has
1767  // an indicated height Y where Y is greater than X. The coinbase for block
1768  // X would also be a valid coinbase for block Y, which could be a BIP30
1769  // violation. An exhaustive search of all mainnet coinbases before the
1770  // BIP34 height which have an indicated height greater than the block height
1771  // reveals many occurrences. The 3 lowest indicated heights found are
1772  // 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3
1773  // heights would be the first opportunity for BIP30 to be violated.
1774 
1775  // The search reveals a great many blocks which have an indicated height
1776  // greater than 1,983,702, so we simply remove the optimization to skip
1777  // BIP30 checking for blocks at height 1,983,702 or higher. Before we reach
1778  // that block in another 25 years or so, we should take advantage of a
1779  // future consensus change to do a new and improved version of BIP34 that
1780  // will actually prevent ever creating any duplicate coinbases in the
1781  // future.
1782  static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702;
1783 
1784  // There is no potential to create a duplicate coinbase at block 209,921
1785  // because this is still before the BIP34 height and so explicit BIP30
1786  // checking is still active.
1787 
1788  // The final case is block 176,684 which has an indicated height of
1789  // 490,897. Unfortunately, this issue was not discovered until about 2 weeks
1790  // before block 490,897 so there was not much opportunity to address this
1791  // case other than to carefully analyze it and determine it would not be a
1792  // problem. Block 490,897 was, in fact, mined with a different coinbase than
1793  // block 176,684, but it is important to note that even if it hadn't been or
1794  // is remined on an alternate fork with a duplicate coinbase, we would still
1795  // not run into a BIP30 violation. This is because the coinbase for 176,684
1796  // is spent in block 185,956 in transaction
1797  // d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This
1798  // spending transaction can't be duplicated because it also spends coinbase
1799  // 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This
1800  // coinbase has an indicated height of over 4.2 billion, and wouldn't be
1801  // duplicatable until that height, and it's currently impossible to create a
1802  // chain that long. Nevertheless we may wish to consider a future soft fork
1803  // which retroactively prevents block 490,897 from creating a duplicate
1804  // coinbase. The two historical BIP30 violations often provide a confusing
1805  // edge case when manipulating the UTXO and it would be simpler not to have
1806  // another edge case to deal with.
1807 
1808  // testnet3 has no blocks before the BIP34 height with indicated heights
1809  // post BIP34 before approximately height 486,000,000 and presumably will
1810  // be reset before it reaches block 1,983,702 and starts doing unnecessary
1811  // BIP30 checking again.
1812  assert(pindex->pprev);
1813  CBlockIndex *pindexBIP34height =
1814  pindex->pprev->GetAncestor(consensusParams.BIP34Height);
1815  // Only continue to enforce if we're below BIP34 activation height or the
1816  // block hash at that height doesn't correspond.
1817  fEnforceBIP30 =
1818  fEnforceBIP30 &&
1819  (!pindexBIP34height ||
1820  !(pindexBIP34height->GetBlockHash() == consensusParams.BIP34Hash));
1821 
1822  // TODO: Remove BIP30 checking from block height 1,983,702 on, once we have
1823  // a consensus change that ensures coinbases at those heights can not
1824  // duplicate earlier coinbases.
1825  if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) {
1826  for (const auto &tx : block.vtx) {
1827  for (size_t o = 0; o < tx->vout.size(); o++) {
1828  if (view.HaveCoin(COutPoint(tx->GetId(), o))) {
1829  LogPrintf("ERROR: ConnectBlock(): tried to overwrite "
1830  "transaction\n");
1832  "bad-txns-BIP30");
1833  }
1834  }
1835  }
1836  }
1837 
1838  // Start enforcing BIP68 (sequence locks).
1839  int nLockTimeFlags = 0;
1840  if (pindex->nHeight >= consensusParams.CSVHeight) {
1841  nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
1842  }
1843 
1844  const uint32_t flags =
1845  GetNextBlockScriptFlags(consensusParams, pindex->pprev);
1846 
1847  int64_t nTime2 = GetTimeMicros();
1848  nTimeForks += nTime2 - nTime1;
1849  LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n",
1850  MILLI * (nTime2 - nTime1), nTimeForks * MICRO,
1851  nTimeForks * MILLI / nBlocksTotal);
1852 
1853  std::vector<int> prevheights;
1854  Amount nFees = Amount::zero();
1855  int nInputs = 0;
1856 
1857  // Limit the total executed signature operations in the block, a consensus
1858  // rule. Tracking during the CPU-consuming part (validation of uncached
1859  // inputs) is per-input atomic and validation in each thread stops very
1860  // quickly after the limit is exceeded, so an adversary cannot cause us to
1861  // exceed the limit by much at all.
1862  CheckInputsLimiter nSigChecksBlockLimiter(
1864 
1865  std::vector<TxSigCheckLimiter> nSigChecksTxLimiters;
1866  nSigChecksTxLimiters.resize(block.vtx.size() - 1);
1867 
1868  CBlockUndo blockundo;
1869  blockundo.vtxundo.resize(block.vtx.size() - 1);
1870 
1871  CCheckQueueControl<CScriptCheck> control(fScriptChecks ? &scriptcheckqueue
1872  : nullptr);
1873 
1874  // Add all outputs
1875  try {
1876  for (const auto &ptx : block.vtx) {
1877  AddCoins(view, *ptx, pindex->nHeight);
1878  }
1879  } catch (const std::logic_error &e) {
1880  // This error will be thrown from AddCoin if we try to connect a block
1881  // containing duplicate transactions. Such a thing should normally be
1882  // caught early nowadays (due to ContextualCheckBlock's CTOR
1883  // enforcement) however some edge cases can escape that:
1884  // - ContextualCheckBlock does not get re-run after saving the block to
1885  // disk, and older versions may have saved a weird block.
1886  // - its checks are not applied to pre-CTOR chains, which we might visit
1887  // with checkpointing off.
1888  LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
1890  "tx-duplicate");
1891  }
1892 
1893  size_t txIndex = 0;
1894  for (const auto &ptx : block.vtx) {
1895  const CTransaction &tx = *ptx;
1896  const bool isCoinBase = tx.IsCoinBase();
1897  nInputs += tx.vin.size();
1898 
1899  {
1900  Amount txfee = Amount::zero();
1901  TxValidationState tx_state;
1902  if (!isCoinBase &&
1903  !Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight,
1904  txfee)) {
1905  // Any transaction validation failure in ConnectBlock is a block
1906  // consensus failure.
1908  tx_state.GetRejectReason(),
1909  tx_state.GetDebugMessage());
1910 
1911  return error("%s: Consensus::CheckTxInputs: %s, %s", __func__,
1912  tx.GetId().ToString(), state.ToString());
1913  }
1914  nFees += txfee;
1915  }
1916 
1917  if (!MoneyRange(nFees)) {
1918  LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n",
1919  __func__);
1921  "bad-txns-accumulated-fee-outofrange");
1922  }
1923 
1924  // The following checks do not apply to the coinbase.
1925  if (isCoinBase) {
1926  continue;
1927  }
1928 
1929  // Check that transaction is BIP68 final BIP68 lock checks (as
1930  // opposed to nLockTime checks) must be in ConnectBlock because they
1931  // require the UTXO set.
1932  prevheights.resize(tx.vin.size());
1933  for (size_t j = 0; j < tx.vin.size(); j++) {
1934  prevheights[j] = view.AccessCoin(tx.vin[j].prevout).GetHeight();
1935  }
1936 
1937  if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) {
1938  LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n",
1939  __func__);
1941  "bad-txns-nonfinal");
1942  }
1943 
1944  // Don't cache results if we're actually connecting blocks (still
1945  // consult the cache, though).
1946  bool fCacheResults = fJustCheck;
1947 
1948  const bool fEnforceSigCheck = flags & SCRIPT_ENFORCE_SIGCHECKS;
1949  if (!fEnforceSigCheck) {
1950  // Historically, there has been transactions with a very high
1951  // sigcheck count, so we need to disable this check for such
1952  // transactions.
1953  nSigChecksTxLimiters[txIndex] = TxSigCheckLimiter::getDisabled();
1954  }
1955 
1956  std::vector<CScriptCheck> vChecks;
1957  // nSigChecksRet may be accurate (found in cache) or 0 (checks were
1958  // deferred into vChecks).
1959  int nSigChecksRet;
1960  TxValidationState tx_state;
1961  if (fScriptChecks &&
1962  !CheckInputScripts(tx, tx_state, view, flags, fCacheResults,
1963  fCacheResults, PrecomputedTransactionData(tx),
1964  nSigChecksRet, nSigChecksTxLimiters[txIndex],
1965  &nSigChecksBlockLimiter, &vChecks)) {
1966  // Any transaction validation failure in ConnectBlock is a block
1967  // consensus failure
1969  tx_state.GetRejectReason(),
1970  tx_state.GetDebugMessage());
1971  return error(
1972  "ConnectBlock(): CheckInputScripts on %s failed with %s",
1973  tx.GetId().ToString(), state.ToString());
1974  }
1975 
1976  control.Add(vChecks);
1977 
1978  // Note: this must execute in the same iteration as CheckTxInputs (not
1979  // in a separate loop) in order to detect double spends. However,
1980  // this does not prevent double-spending by duplicated transaction
1981  // inputs in the same transaction (cf. CVE-2018-17144) -- that check is
1982  // done in CheckBlock (CheckRegularTransaction).
1983  SpendCoins(view, tx, blockundo.vtxundo.at(txIndex), pindex->nHeight);
1984  txIndex++;
1985  }
1986 
1987  int64_t nTime3 = GetTimeMicros();
1988  nTimeConnect += nTime3 - nTime2;
1990  " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) "
1991  "[%.2fs (%.2fms/blk)]\n",
1992  (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2),
1993  MILLI * (nTime3 - nTime2) / block.vtx.size(),
1994  nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs - 1),
1995  nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
1996 
1997  Amount blockReward =
1998  nFees + GetBlockSubsidy(pindex->nHeight, consensusParams);
1999  if (block.vtx[0]->GetValueOut() > blockReward) {
2000  LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs "
2001  "limit=%d)\n",
2002  block.vtx[0]->GetValueOut(), blockReward);
2004  "bad-cb-amount");
2005  }
2006 
2007  const std::vector<CTxDestination> whitelist =
2008  GetMinerFundWhitelist(consensusParams, pindex->pprev);
2009  if (!whitelist.empty()) {
2010  const Amount required = GetMinerFundAmount(blockReward);
2011 
2012  for (auto &o : block.vtx[0]->vout) {
2013  if (o.nValue < required) {
2014  // This output doesn't qualify because its amount is too low.
2015  continue;
2016  }
2017 
2018  CTxDestination address;
2019  if (!ExtractDestination(o.scriptPubKey, address)) {
2020  // Cannot decode address.
2021  continue;
2022  }
2023 
2024  if (std::find(whitelist.begin(), whitelist.end(), address) !=
2025  whitelist.end()) {
2026  goto MinerFundSuccess;
2027  }
2028  }
2029 
2030  // We did not find an output that match the miner fund requirements.
2032  "bad-cb-minerfund");
2033  }
2034 
2035 MinerFundSuccess:
2036 
2037  if (!control.Wait()) {
2039  "blk-bad-inputs", "parallel script check failed");
2040  }
2041 
2042  int64_t nTime4 = GetTimeMicros();
2043  nTimeVerify += nTime4 - nTime2;
2044  LogPrint(
2045  BCLog::BENCH,
2046  " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n",
2047  nInputs - 1, MILLI * (nTime4 - nTime2),
2048  nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs - 1),
2049  nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
2050 
2051  if (fJustCheck) {
2052  return true;
2053  }
2054 
2055  if (!WriteUndoDataForBlock(blockundo, state, pindex, params)) {
2056  return false;
2057  }
2058 
2059  if (!pindex->IsValid(BlockValidity::SCRIPTS)) {
2061  setDirtyBlockIndex.insert(pindex);
2062  }
2063 
2064  assert(pindex->phashBlock);
2065  // add this block to the view's block chain
2066  view.SetBestBlock(pindex->GetBlockHash());
2067 
2068  int64_t nTime5 = GetTimeMicros();
2069  nTimeIndex += nTime5 - nTime4;
2070  LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n",
2071  MILLI * (nTime5 - nTime4), nTimeIndex * MICRO,
2072  nTimeIndex * MILLI / nBlocksTotal);
2073 
2074  int64_t nTime6 = GetTimeMicros();
2075  nTimeCallbacks += nTime6 - nTime5;
2076  LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs (%.2fms/blk)]\n",
2077  MILLI * (nTime6 - nTime5), nTimeCallbacks * MICRO,
2078  nTimeCallbacks * MILLI / nBlocksTotal);
2079 
2080  return true;
2081 }
2082 
2084 CChainState::GetCoinsCacheSizeState(const CTxMemPool &tx_pool) {
2085  return this->GetCoinsCacheSizeState(
2086  tx_pool, m_coinstip_cache_size_bytes,
2087  gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
2088 }
2089 
2091 CChainState::GetCoinsCacheSizeState(const CTxMemPool &tx_pool,
2092  size_t max_coins_cache_size_bytes,
2093  size_t max_mempool_size_bytes) {
2094  int64_t nMempoolUsage = tx_pool.DynamicMemoryUsage();
2095  int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
2096  int64_t nTotalSpace =
2097  max_coins_cache_size_bytes +
2098  std::max<int64_t>(max_mempool_size_bytes - nMempoolUsage, 0);
2099 
2101  static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES =
2102  10 * 1024 * 1024; // 10MB
2103  int64_t large_threshold = std::max(
2104  (9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES);
2105 
2106  if (cacheSize > nTotalSpace) {
2107  LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize,
2108  nTotalSpace);
2110  } else if (cacheSize > large_threshold) {
2112  }
2113  return CoinsCacheSizeState::OK;
2114 }
2115 
2116 bool CChainState::FlushStateToDisk(const CChainParams &chainparams,
2117  BlockValidationState &state,
2118  FlushStateMode mode,
2119  int nManualPruneHeight) {
2120  LOCK(cs_main);
2121  assert(this->CanFlushToDisk());
2122  static std::chrono::microseconds nLastWrite{0};
2123  static std::chrono::microseconds nLastFlush{0};
2124  std::set<int> setFilesToPrune;
2125  bool full_flush_completed = false;
2126 
2127  const size_t coins_count = CoinsTip().GetCacheSize();
2128  const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage();
2129 
2130  try {
2131  {
2132  bool fFlushForPrune = false;
2133  bool fDoFullFlush = false;
2134  CoinsCacheSizeState cache_state =
2135  GetCoinsCacheSizeState(::g_mempool);
2136  LOCK(cs_LastBlockFile);
2137  if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) &&
2138  !fReindex) {
2139  if (nManualPruneHeight > 0) {
2141  "find files to prune (manual)", BCLog::BENCH);
2142  FindFilesToPruneManual(g_chainman, setFilesToPrune,
2143  nManualPruneHeight);
2144  } else {
2145  LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune",
2146  BCLog::BENCH);
2147  FindFilesToPrune(g_chainman, setFilesToPrune,
2148  chainparams.PruneAfterHeight());
2149  fCheckForPruning = false;
2150  }
2151  if (!setFilesToPrune.empty()) {
2152  fFlushForPrune = true;
2153  if (!fHavePruned) {
2154  pblocktree->WriteFlag("prunedblockfiles", true);
2155  fHavePruned = true;
2156  }
2157  }
2158  }
2159  const auto nNow = GetTime<std::chrono::microseconds>();
2160  // Avoid writing/flushing immediately after startup.
2161  if (nLastWrite.count() == 0) {
2162  nLastWrite = nNow;
2163  }
2164  if (nLastFlush.count() == 0) {
2165  nLastFlush = nNow;
2166  }
2167  // The cache is large and we're within 10% and 10 MiB of the limit,
2168  // but we have time now (not in the middle of a block processing).
2169  bool fCacheLarge = mode == FlushStateMode::PERIODIC &&
2170  cache_state >= CoinsCacheSizeState::LARGE;
2171  // The cache is over the limit, we have to write now.
2172  bool fCacheCritical = mode == FlushStateMode::IF_NEEDED &&
2173  cache_state >= CoinsCacheSizeState::CRITICAL;
2174  // It's been a while since we wrote the block index to disk. Do this
2175  // frequently, so we don't need to redownload after a crash.
2176  bool fPeriodicWrite = mode == FlushStateMode::PERIODIC &&
2177  nNow > nLastWrite + DATABASE_WRITE_INTERVAL;
2178  // It's been very long since we flushed the cache. Do this
2179  // infrequently, to optimize cache usage.
2180  bool fPeriodicFlush = mode == FlushStateMode::PERIODIC &&
2181  nNow > nLastFlush + DATABASE_FLUSH_INTERVAL;
2182  // Combine all conditions that result in a full cache flush.
2183  fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge ||
2184  fCacheCritical || fPeriodicFlush || fFlushForPrune;
2185  // Write blocks and block index to disk.
2186  if (fDoFullFlush || fPeriodicWrite) {
2187  // Depend on nMinDiskSpace to ensure we can write block index
2188  if (!CheckDiskSpace(GetBlocksDir())) {
2189  return AbortNode(state, "Disk space is too low!",
2190  _("Disk space is too low!"));
2191  }
2192 
2193  {
2195  "write block and undo data to disk", BCLog::BENCH);
2196 
2197  // First make sure all block and undo data is flushed to
2198  // disk.
2199  FlushBlockFile();
2200  }
2201  // Then update all block file information (which may refer to
2202  // block and undo files).
2203  {
2204  LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk",
2205  BCLog::BENCH);
2206 
2207  std::vector<std::pair<int, const CBlockFileInfo *>> vFiles;
2208  vFiles.reserve(setDirtyFileInfo.size());
2209  for (int i : setDirtyFileInfo) {
2210  vFiles.push_back(std::make_pair(i, &vinfoBlockFile[i]));
2211  }
2212 
2213  setDirtyFileInfo.clear();
2214 
2215  std::vector<const CBlockIndex *> vBlocks;
2216  vBlocks.reserve(setDirtyBlockIndex.size());
2217  for (const CBlockIndex *cbi : setDirtyBlockIndex) {
2218  vBlocks.push_back(cbi);
2219  }
2220 
2221  setDirtyBlockIndex.clear();
2222 
2223  if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile,
2224  vBlocks)) {
2225  return AbortNode(
2226  state, "Failed to write to block index database");
2227  }
2228  }
2229 
2230  // Finally remove any pruned files
2231  if (fFlushForPrune) {
2232  LOG_TIME_MILLIS_WITH_CATEGORY("unlink pruned files",
2233  BCLog::BENCH);
2234 
2235  UnlinkPrunedFiles(setFilesToPrune);
2236  }
2237  nLastWrite = nNow;
2238  }
2239  // Flush best chain related state. This can only be done if the
2240  // blocks / block index write was also done.
2241  if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
2243  strprintf("write coins cache to disk (%d coins, %.2fkB)",
2244  coins_count, coins_mem_usage / 1000));
2245 
2246  // Typical Coin structures on disk are around 48 bytes in size.
2247  // Pushing a new one to the database can cause it to be written
2248  // twice (once in the log, and once in the tables). This is
2249  // already an overestimation, as most will delete an existing
2250  // entry or overwrite one. Still, use a conservative safety
2251  // factor of 2.
2252  if (!CheckDiskSpace(GetDataDir(),
2253  48 * 2 * 2 * CoinsTip().GetCacheSize())) {
2254  return AbortNode(state, "Disk space is too low!",
2255  _("Disk space is too low!"));
2256  }
2257 
2258  // Flush the chainstate (which may refer to block index
2259  // entries).
2260  if (!CoinsTip().Flush()) {
2261  return AbortNode(state, "Failed to write to coin database");
2262  }
2263  nLastFlush = nNow;
2264  full_flush_completed = true;
2265  }
2266  }
2267 
2268  if (full_flush_completed) {
2269  // Update best block in wallet (so we can detect restored wallets).
2271  }
2272  } catch (const std::runtime_error &e) {
2273  return AbortNode(state, std::string("System error while flushing: ") +
2274  e.what());
2275  }
2276  return true;
2277 }
2278 
2280  BlockValidationState state;
2281  const CChainParams &chainparams = Params();
2282  if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) {
2283  LogPrintf("%s: failed to flush state (%s)\n", __func__,
2284  state.ToString());
2285  }
2286 }
2287 
2289  BlockValidationState state;
2290  fCheckForPruning = true;
2291  const CChainParams &chainparams = Params();
2292  if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::NONE)) {
2293  LogPrintf("%s: failed to flush state (%s)\n", __func__,
2294  state.ToString());
2295  }
2296 }
2297 
2299 static void UpdateTip(const CChainParams &params, CBlockIndex *pindexNew)
2300  EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
2301  // New best block
2302  g_mempool.AddTransactionsUpdated(1);
2303 
2304  {
2305  LOCK(g_best_block_mutex);
2306  g_best_block = pindexNew->GetBlockHash();
2307  g_best_block_cv.notify_all();
2308  }
2309 
2310  LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%ld "
2311  "date='%s' progress=%f cache=%.1fMiB(%utxo)\n",
2312  __func__, pindexNew->GetBlockHash().ToString(),
2313  pindexNew->nHeight, pindexNew->nVersion,
2314  log(pindexNew->nChainWork.getdouble()) / log(2.0),
2315  pindexNew->GetChainTxCount(),
2316  FormatISO8601DateTime(pindexNew->GetBlockTime()),
2317  GuessVerificationProgress(params.TxData(), pindexNew),
2319  (1.0 / (1 << 20)),
2321 }
2322 
2335  BlockValidationState &state,
2336  DisconnectedBlockTransactions *disconnectpool) {
2337  AssertLockHeld(cs_main);
2338  CBlockIndex *pindexDelete = m_chain.Tip();
2339  const Consensus::Params &consensusParams = params.GetConsensus();
2340 
2341  assert(pindexDelete);
2342 
2343  // Read block from disk.
2344  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2345  CBlock &block = *pblock;
2346  if (!ReadBlockFromDisk(block, pindexDelete, consensusParams)) {
2347  return error("DisconnectTip(): Failed to read block");
2348  }
2349 
2350  // Apply the block atomically to the chain state.
2351  int64_t nStart = GetTimeMicros();
2352  {
2353  CCoinsViewCache view(&CoinsTip());
2354  assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
2355  if (DisconnectBlock(block, pindexDelete, view) !=
2357  return error("DisconnectTip(): DisconnectBlock %s failed",
2358  pindexDelete->GetBlockHash().ToString());
2359  }
2360 
2361  bool flushed = view.Flush();
2362  assert(flushed);
2363  }
2364 
2365  LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n",
2366  (GetTimeMicros() - nStart) * MILLI);
2367 
2368  // Write the chain state to disk, if necessary.
2369  if (!FlushStateToDisk(params, state, FlushStateMode::IF_NEEDED)) {
2370  return false;
2371  }
2372 
2373  // If this block is deactivating a fork, we move all mempool transactions
2374  // in front of disconnectpool for reprocessing in a future
2375  // updateMempoolForReorg call
2376  if (pindexDelete->pprev != nullptr &&
2377  GetNextBlockScriptFlags(consensusParams, pindexDelete) !=
2378  GetNextBlockScriptFlags(consensusParams, pindexDelete->pprev)) {
2380  "Disconnecting mempool due to rewind of upgrade block\n");
2381  if (disconnectpool) {
2382  disconnectpool->importMempool(g_mempool);
2383  }
2384  g_mempool.clear();
2385  }
2386 
2387  if (disconnectpool) {
2388  disconnectpool->addForBlock(block.vtx, g_mempool);
2389  }
2390 
2391  // If the tip is finalized, then undo it.
2392  if (m_finalizedBlockIndex == pindexDelete) {
2393  m_finalizedBlockIndex = pindexDelete->pprev;
2394  }
2395 
2396  m_chain.SetTip(pindexDelete->pprev);
2397 
2398  // Update ::ChainActive() and related variables.
2399  UpdateTip(params, pindexDelete->pprev);
2400  // Let wallets know transactions went from 1-confirmed to
2401  // 0-confirmed or conflicted:
2402  GetMainSignals().BlockDisconnected(pblock, pindexDelete);
2403  return true;
2404 }
2405 
2406 static int64_t nTimeReadFromDisk = 0;
2407 static int64_t nTimeConnectTotal = 0;
2408 static int64_t nTimeFlush = 0;
2409 static int64_t nTimeChainState = 0;
2410 static int64_t nTimePostConnect = 0;
2411 
2413  CBlockIndex *pindex = nullptr;
2414  std::shared_ptr<const CBlock> pblock;
2416 };
2417 
2426 private:
2427  std::vector<PerBlockConnectTrace> blocksConnected;
2428 
2429 public:
2430  explicit ConnectTrace() : blocksConnected(1) {}
2431 
2433  std::shared_ptr<const CBlock> pblock) {
2434  assert(!blocksConnected.back().pindex);
2435  assert(pindex);
2436  assert(pblock);
2437  blocksConnected.back().pindex = pindex;
2438  blocksConnected.back().pblock = std::move(pblock);
2439  blocksConnected.emplace_back();
2440  }
2441 
2442  std::vector<PerBlockConnectTrace> &GetBlocksConnected() {
2443  // We always keep one extra block at the end of our list because blocks
2444  // are added after all the conflicted transactions have been filled in.
2445  // Thus, the last entry should always be an empty one waiting for the
2446  // transactions from the next block. We pop the last entry here to make
2447  // sure the list we return is sane.
2448  assert(!blocksConnected.back().pindex);
2449  blocksConnected.pop_back();
2450  return blocksConnected;
2451  }
2452 };
2453 
2455  const CBlockIndex *pindex) {
2456  AssertLockHeld(cs_main);
2457  if (pindex->nStatus.isInvalid()) {
2458  // We try to finalize an invalid block.
2459  LogPrintf("ERROR: %s: Trying to finalize invalid block %s\n", __func__,
2460  pindex->GetBlockHash().ToString());
2462  "finalize-invalid-block");
2463  }
2464 
2465  // Check that the request is consistent with current finalization.
2466  if (m_finalizedBlockIndex &&
2467  !AreOnTheSameFork(pindex, m_finalizedBlockIndex)) {
2468  LogPrintf("ERROR: %s: Trying to finalize block %s which conflicts with "
2469  "already finalized block\n",
2470  __func__, pindex->GetBlockHash().ToString());
2472  "bad-fork-prior-finalized");
2473  }
2474 
2475  if (IsBlockFinalized(pindex)) {
2476  // The block is already finalized.
2477  return true;
2478  }
2479 
2480  // We have a new block to finalize.
2481  m_finalizedBlockIndex = pindex;
2482  return true;
2483 }
2484 
2486  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
2487  AssertLockHeld(cs_main);
2488 
2489  const int32_t maxreorgdepth =
2490  gArgs.GetArg("-maxreorgdepth", DEFAULT_MAX_REORG_DEPTH);
2491 
2492  const int64_t finalizationdelay =
2493  gArgs.GetArg("-finalizationdelay", DEFAULT_MIN_FINALIZATION_DELAY);
2494 
2495  // Find our candidate.
2496  // If maxreorgdepth is < 0 pindex will be null and auto finalization
2497  // disabled
2498  const CBlockIndex *pindex =
2499  pindexNew->GetAncestor(pindexNew->nHeight - maxreorgdepth);
2500 
2501  int64_t now = GetTime();
2502 
2503  // If the finalization delay is not expired since the startup time,
2504  // finalization should be avoided. Header receive time is not saved to disk
2505  // and so cannot be anterior to startup time.
2506  if (now < (GetStartupTime() + finalizationdelay)) {
2507  return nullptr;
2508  }
2509 
2510  // While our candidate is not eligible (finalization delay not expired), try
2511  // the previous one.
2512  while (pindex && (pindex != ::ChainstateActive().GetFinalizedBlock())) {
2513  // Check that the block to finalize is known for a long enough time.
2514  // This test will ensure that an attacker could not cause a block to
2515  // finalize by forking the chain with a depth > maxreorgdepth.
2516  // If the block is loaded from disk, header receive time is 0 and the
2517  // block will be finalized. This is safe because the delay since the
2518  // node startup is already expired.
2519  auto headerReceivedTime = pindex->GetHeaderReceivedTime();
2520 
2521  // If finalization delay is <= 0, finalization always occurs immediately
2522  if (now >= (headerReceivedTime + finalizationdelay)) {
2523  return pindex;
2524  }
2525 
2526  pindex = pindex->pprev;
2527  }
2528 
2529  return nullptr;
2530 }
2531 
2541  CBlockIndex *pindexNew,
2542  const std::shared_ptr<const CBlock> &pblock,
2543  ConnectTrace &connectTrace,
2544  DisconnectedBlockTransactions &disconnectpool) {
2545  AssertLockHeld(cs_main);
2546  AssertLockHeld(g_mempool.cs);
2547 
2548  const CChainParams &params = config.GetChainParams();
2549  const Consensus::Params &consensusParams = params.GetConsensus();
2550 
2551  assert(pindexNew->pprev == m_chain.Tip());
2552  // Read block from disk.
2553  int64_t nTime1 = GetTimeMicros();
2554  std::shared_ptr<const CBlock> pthisBlock;
2555  if (!pblock) {
2556  std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
2557  if (!ReadBlockFromDisk(*pblockNew, pindexNew, consensusParams)) {
2558  return AbortNode(state, "Failed to read block");
2559  }
2560  pthisBlock = pblockNew;
2561  } else {
2562  pthisBlock = pblock;
2563  }
2564 
2565  const CBlock &blockConnecting = *pthisBlock;
2566 
2567  // Apply the block atomically to the chain state.
2568  int64_t nTime2 = GetTimeMicros();
2569  nTimeReadFromDisk += nTime2 - nTime1;
2570  int64_t nTime3;
2571  LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n",
2572  (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO);
2573  {
2574  CCoinsViewCache view(&CoinsTip());
2575  bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, params,
2576  BlockValidationOptions(config));
2577  GetMainSignals().BlockChecked(blockConnecting, state);
2578  if (!rv) {
2579  if (state.IsInvalid()) {
2580  InvalidBlockFound(pindexNew, state);
2581  }
2582 
2583  return error("%s: ConnectBlock %s failed, %s", __func__,
2584  pindexNew->GetBlockHash().ToString(),
2585  state.ToString());
2586  }
2587 
2588  // Update the finalized block.
2589  const CBlockIndex *pindexToFinalize = FindBlockToFinalize(pindexNew);
2590  if (pindexToFinalize && !MarkBlockAsFinal(state, pindexToFinalize)) {
2591  return error("ConnectTip(): MarkBlockAsFinal %s failed (%s)",
2592  pindexNew->GetBlockHash().ToString(),
2593  state.ToString());
2594  }
2595 
2596  nTime3 = GetTimeMicros();
2597  nTimeConnectTotal += nTime3 - nTime2;
2598  assert(nBlocksTotal > 0);
2600  " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n",
2601  (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO,
2602  nTimeConnectTotal * MILLI / nBlocksTotal);
2603  bool flushed = view.Flush();
2604  assert(flushed);
2605  }
2606 
2607  int64_t nTime4 = GetTimeMicros();
2608  nTimeFlush += nTime4 - nTime3;
2609  LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n",
2610  (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO,
2611  nTimeFlush * MILLI / nBlocksTotal);
2612 
2613  // Write the chain state to disk, if necessary.
2614  if (!FlushStateToDisk(config.GetChainParams(), state,
2616  return false;
2617  }
2618 
2619  int64_t nTime5 = GetTimeMicros();
2620  nTimeChainState += nTime5 - nTime4;
2622  " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n",
2623  (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO,
2624  nTimeChainState * MILLI / nBlocksTotal);
2625 
2626  // Remove conflicting transactions from the mempool.;
2627  g_mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
2628  disconnectpool.removeForBlock(blockConnecting.vtx);
2629 
2630  // If this block is activating a fork, we move all mempool transactions
2631  // in front of disconnectpool for reprocessing in a future
2632  // updateMempoolForReorg call
2633  if (pindexNew->pprev != nullptr &&
2634  GetNextBlockScriptFlags(consensusParams, pindexNew) !=
2635  GetNextBlockScriptFlags(consensusParams, pindexNew->pprev)) {
2637  "Disconnecting mempool due to acceptance of upgrade block\n");
2638  disconnectpool.importMempool(g_mempool);
2639  }
2640 
2641  // Update m_chain & related variables.
2642  m_chain.SetTip(pindexNew);
2643  UpdateTip(params, pindexNew);
2644 
2645  int64_t nTime6 = GetTimeMicros();
2646  nTimePostConnect += nTime6 - nTime5;
2647  nTimeTotal += nTime6 - nTime1;
2649  " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n",
2650  (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO,
2651  nTimePostConnect * MILLI / nBlocksTotal);
2652  LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n",
2653  (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO,
2654  nTimeTotal * MILLI / nBlocksTotal);
2655 
2656  connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
2657  return true;
2658 }
2659 
2665  AssertLockHeld(cs_main);
2666  do {
2667  CBlockIndex *pindexNew = nullptr;
2668 
2669  // Find the best candidate header.
2670  {
2671  std::set<CBlockIndex *, CBlockIndexWorkComparator>::reverse_iterator
2672  it = setBlockIndexCandidates.rbegin();
2673  if (it == setBlockIndexCandidates.rend()) {
2674  return nullptr;
2675  }
2676  pindexNew = *it;
2677  }
2678 
2679  // If this block will cause a finalized block to be reorged, then we
2680  // mark it as invalid.
2681  if (m_finalizedBlockIndex &&
2682  !AreOnTheSameFork(pindexNew, m_finalizedBlockIndex)) {
2683  LogPrintf("Mark block %s invalid because it forks prior to the "
2684  "finalization point %d.\n",
2685  pindexNew->GetBlockHash().ToString(),
2686  m_finalizedBlockIndex->nHeight);
2687  pindexNew->nStatus = pindexNew->nStatus.withFailed();
2688  InvalidChainFound(pindexNew);
2689  }
2690 
2691  const bool fAvalancheEnabled =
2692  gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED);
2693  const bool fAutoUnpark =
2694  gArgs.GetBoolArg("-automaticunparking", !fAvalancheEnabled);
2695 
2696  const CBlockIndex *pindexFork = m_chain.FindFork(pindexNew);
2697 
2698  // Check whether all blocks on the path between the currently active
2699  // chain and the candidate are valid. Just going until the active chain
2700  // is an optimization, as we know all blocks in it are valid already.
2701  CBlockIndex *pindexTest = pindexNew;
2702  bool hasValidAncestor = true;
2703  while (hasValidAncestor && pindexTest && pindexTest != pindexFork) {
2704  assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
2705 
2706  // If this is a parked chain, but it has enough PoW, clear the park
2707  // state.
2708  bool fParkedChain = pindexTest->nStatus.isOnParkedChain();
2709  if (fAutoUnpark && fParkedChain) {
2710  const CBlockIndex *pindexTip = m_chain.Tip();
2711 
2712  // During initialization, pindexTip and/or pindexFork may be
2713  // null. In this case, we just ignore the fact that the chain is
2714  // parked.
2715  if (!pindexTip || !pindexFork) {
2716  UnparkBlock(pindexTest);
2717  continue;
2718  }
2719 
2720  // A parked chain can be unparked if it has twice as much PoW
2721  // accumulated as the main chain has since the fork block.
2722  CBlockIndex const *pindexExtraPow = pindexTip;
2723  arith_uint256 requiredWork = pindexTip->nChainWork;
2724  switch (pindexTip->nHeight - pindexFork->nHeight) {
2725  // Limit the penality for depth 1, 2 and 3 to half a block
2726  // worth of work to ensure we don't fork accidentally.
2727  case 3:
2728  case 2:
2729  pindexExtraPow = pindexExtraPow->pprev;
2730  // FALLTHROUGH
2731  case 1: {
2732  const arith_uint256 deltaWork =
2733  pindexExtraPow->nChainWork - pindexFork->nChainWork;
2734  requiredWork += (deltaWork >> 1);
2735  break;
2736  }
2737  default:
2738  requiredWork +=
2739  pindexExtraPow->nChainWork - pindexFork->nChainWork;
2740  break;
2741  }
2742 
2743  if (pindexNew->nChainWork > requiredWork) {
2744  // We have enough, clear the parked state.
2745  LogPrintf("Unpark chain up to block %s as it has "
2746  "accumulated enough PoW.\n",
2747  pindexNew->GetBlockHash().ToString());
2748  fParkedChain = false;
2749  UnparkBlock(pindexTest);
2750  }
2751  }
2752 
2753  // Pruned nodes may have entries in setBlockIndexCandidates for
2754  // which block files have been deleted. Remove those as candidates
2755  // for the most work chain if we come across them; we can't switch
2756  // to a chain unless we have all the non-active-chain parent blocks.
2757  bool fInvalidChain = pindexTest->nStatus.isInvalid();
2758  bool fMissingData = !pindexTest->nStatus.hasData();
2759  if (!(fInvalidChain || fParkedChain || fMissingData)) {
2760  // The current block is acceptable, move to the parent, up to
2761  // the fork point.
2762  pindexTest = pindexTest->pprev;
2763  continue;
2764  }
2765 
2766  // Candidate chain is not usable (either invalid or parked or
2767  // missing data)
2768  hasValidAncestor = false;
2769  setBlockIndexCandidates.erase(pindexTest);
2770 
2771  if (fInvalidChain &&
2772  (pindexBestInvalid == nullptr ||
2773  pindexNew->nChainWork > pindexBestInvalid->nChainWork)) {
2774  pindexBestInvalid = pindexNew;
2775  }
2776 
2777  if (fParkedChain &&
2778  (pindexBestParked == nullptr ||
2779  pindexNew->nChainWork > pindexBestParked->nChainWork)) {
2780  pindexBestParked = pindexNew;
2781  }
2782 
2783  LogPrintf("Considered switching to better tip %s but that chain "
2784  "contains a%s%s%s block.\n",
2785  pindexNew->GetBlockHash().ToString(),
2786  fInvalidChain ? "n invalid" : "",
2787  fParkedChain ? " parked" : "",
2788  fMissingData ? " missing-data" : "");
2789 
2790  CBlockIndex *pindexFailed = pindexNew;
2791  // Remove the entire chain from the set.
2792  while (pindexTest != pindexFailed) {
2793  if (fInvalidChain || fParkedChain) {
2794  pindexFailed->nStatus =
2795  pindexFailed->nStatus.withFailedParent(fInvalidChain)
2796  .withParkedParent(fParkedChain);
2797  } else if (fMissingData) {
2798  // If we're missing data, then add back to
2799  // m_blocks_unlinked, so that if the block arrives in the
2800  // future we can try adding to setBlockIndexCandidates
2801  // again.
2803  std::make_pair(pindexFailed->pprev, pindexFailed));
2804  }
2805  setBlockIndexCandidates.erase(pindexFailed);
2806  pindexFailed = pindexFailed->pprev;
2807  }
2808 
2809  if (fInvalidChain || fParkedChain) {
2810  // We discovered a new chain tip that is either parked or
2811  // invalid, we may want to warn.
2813  }
2814  }
2815 
2816  if (fAvalancheEnabled && g_avalanche) {
2817  g_avalanche->addBlockToReconcile(pindexNew);
2818  }
2819 
2820  // We found a candidate that has valid ancestors. This is our guy.
2821  if (hasValidAncestor) {
2822  return pindexNew;
2823  }
2824  } while (true);
2825 }
2826 
2832  // Note that we can't delete the current block itself, as we may need to
2833  // return to it later in case a reorganization to a better block fails.
2834  auto it = setBlockIndexCandidates.begin();
2835  while (it != setBlockIndexCandidates.end() &&
2836  setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
2837  setBlockIndexCandidates.erase(it++);
2838  }
2839 
2840  // Either the current tip or a successor of it we're working towards is left
2841  // in setBlockIndexCandidates.
2842  assert(!setBlockIndexCandidates.empty());
2843 }
2844 
2853  const Config &config, BlockValidationState &state,
2854  CBlockIndex *pindexMostWork, const std::shared_ptr<const CBlock> &pblock,
2855  bool &fInvalidFound, ConnectTrace &connectTrace) {
2856  AssertLockHeld(cs_main);
2857 
2858  const CBlockIndex *pindexOldTip = m_chain.Tip();
2859  const CBlockIndex *pindexFork = m_chain.FindFork(pindexMostWork);
2860 
2861  // Disconnect active blocks which are no longer in the best chain.
2862  bool fBlocksDisconnected = false;
2863  DisconnectedBlockTransactions disconnectpool;
2864  while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
2865  if (!DisconnectTip(config.GetChainParams(), state, &disconnectpool)) {
2866  // This is likely a fatal error, but keep the mempool consistent,
2867  // just in case. Only remove from the mempool in this case.
2868  disconnectpool.updateMempoolForReorg(config, false, g_mempool);
2869 
2870  // If we're unable to disconnect a block during normal operation,
2871  // then that is a failure of our local system -- we should abort
2872  // rather than stay on a less work chain.
2873  AbortNode(state,
2874  "Failed to disconnect block; see debug.log for details");
2875  return false;
2876  }
2877 
2878  fBlocksDisconnected = true;
2879  }
2880 
2881  // Build list of new blocks to connect.
2882  std::vector<CBlockIndex *> vpindexToConnect;
2883  bool fContinue = true;
2884  int nHeight = pindexFork ? pindexFork->nHeight : -1;
2885  while (fContinue && nHeight != pindexMostWork->nHeight) {
2886  // Don't iterate the entire list of potential improvements toward the
2887  // best tip, as we likely only need a few blocks along the way.
2888  int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
2889  vpindexToConnect.clear();
2890  vpindexToConnect.reserve(nTargetHeight - nHeight);
2891  CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
2892  while (pindexIter && pindexIter->nHeight != nHeight) {
2893  vpindexToConnect.push_back(pindexIter);
2894  pindexIter = pindexIter->pprev;
2895  }
2896 
2897  nHeight = nTargetHeight;
2898 
2899  // Connect new blocks.
2900  for (CBlockIndex *pindexConnect : reverse_iterate(vpindexToConnect)) {
2901  if (!ConnectTip(config, state, pindexConnect,
2902  pindexConnect == pindexMostWork
2903  ? pblock
2904  : std::shared_ptr<const CBlock>(),
2905  connectTrace, disconnectpool)) {
2906  if (state.IsInvalid()) {
2907  // The block violates a consensus rule.
2908  if (state.GetResult() !=
2910  InvalidChainFound(vpindexToConnect.back());
2911  }
2912  state = BlockValidationState();
2913  fInvalidFound = true;
2914  fContinue = false;
2915  break;
2916  }
2917 
2918  // A system error occurred (disk space, database error, ...).
2919  // Make the mempool consistent with the current tip, just in
2920  // case any observers try to use it before shutdown.
2921  disconnectpool.updateMempoolForReorg(config, false, g_mempool);
2922  return false;
2923  } else {
2925  if (!pindexOldTip ||
2926  m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
2927  // We're in a better position than we were. Return
2928  // temporarily to release the lock.
2929  fContinue = false;
2930  break;
2931  }
2932  }
2933  }
2934  }
2935 
2936  if (fBlocksDisconnected || !disconnectpool.isEmpty()) {
2937  // If any blocks were disconnected, we need to update the mempool even
2938  // if disconnectpool is empty. The disconnectpool may also be non-empty
2939  // if the mempool was imported due to new validation rules being in
2940  // effect.
2941  LogPrint(BCLog::MEMPOOL, "Updating mempool due to reorganization or "
2942  "rules upgrade/downgrade\n");
2943  disconnectpool.updateMempoolForReorg(config, true, g_mempool);
2944  }
2945 
2946  g_mempool.check(&CoinsTip());
2947 
2948  // Callbacks/notifications for a new best chain.
2949  if (fInvalidFound) {
2950  CheckForkWarningConditionsOnNewFork(pindexMostWork);
2951  } else {
2953  }
2954 
2955  return true;
2956 }
2957 
2959  if (!init) {
2961  }
2962  if (::fReindex) {
2964  }
2966 }
2967 
2968 static bool NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) {
2969  bool fNotify = false;
2970  bool fInitialBlockDownload = false;
2971  static CBlockIndex *pindexHeaderOld = nullptr;
2972  CBlockIndex *pindexHeader = nullptr;
2973  {
2974  LOCK(cs_main);
2975  pindexHeader = pindexBestHeader;
2976 
2977  if (pindexHeader != pindexHeaderOld) {
2978  fNotify = true;
2979  fInitialBlockDownload =
2981  pindexHeaderOld = pindexHeader;
2982  }
2983  }
2984 
2985  // Send block tip changed notifications without cs_main
2986  if (fNotify) {
2987  uiInterface.NotifyHeaderTip(
2988  GetSynchronizationState(fInitialBlockDownload), pindexHeader);
2989  }
2990  return fNotify;
2991 }
2992 
2994  AssertLockNotHeld(cs_main);
2995 
2996  if (GetMainSignals().CallbacksPending() > 10) {
2998  }
2999 }
3000 
3002  BlockValidationState &state,
3003  std::shared_ptr<const CBlock> pblock) {
3004  // Note that while we're often called here from ProcessNewBlock, this is
3005  // far from a guarantee. Things in the P2P/RPC will often end up calling
3006  // us in the middle of ProcessNewBlock - do not assume pblock is set
3007  // sanely for performance or correctness!
3008  AssertLockNotHeld(cs_main);
3009 
3010  const CChainParams &params = config.GetChainParams();
3011 
3012  // ABC maintains a fair degree of expensive-to-calculate internal state
3013  // because this function periodically releases cs_main so that it does not
3014  // lock up other threads for too long during large connects - and to allow
3015  // for e.g. the callback queue to drain we use m_cs_chainstate to enforce
3016  // mutual exclusion so that only one caller may execute this function at a
3017  // time
3019 
3020  CBlockIndex *pindexMostWork = nullptr;
3021  CBlockIndex *pindexNewTip = nullptr;
3022  int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT);
3023  do {
3024  // Block until the validation queue drains. This should largely
3025  // never happen in normal operation, however may happen during
3026  // reindex, causing memory blowup if we run too far ahead.
3027  // Note that if a validationinterface callback ends up calling
3028  // ActivateBestChain this may lead to a deadlock! We should
3029  // probably have a DEBUG_LOCKORDER test for this in the future.
3031 
3032  {
3033  // Lock transaction pool for at least as long as it takes for
3034  // connectTrace to be consumed
3035  LOCK2(cs_main, ::g_mempool.cs);
3036  CBlockIndex *starting_tip = m_chain.Tip();
3037  bool blocks_connected = false;
3038  do {
3039  // We absolutely may not unlock cs_main until we've made forward
3040  // progress (with the exception of shutdown due to hardware
3041  // issues, low disk space, etc).
3042 
3043  // Destructed before cs_main is unlocked
3044  ConnectTrace connectTrace;
3045 
3046  if (pindexMostWork == nullptr) {
3047  pindexMostWork = FindMostWorkChain();
3048  }
3049 
3050  // Whether we have anything to do at all.
3051  if (pindexMostWork == nullptr ||
3052  pindexMostWork == m_chain.Tip()) {
3053  break;
3054  }
3055 
3056  bool fInvalidFound = false;
3057  std::shared_ptr<const CBlock> nullBlockPtr;
3058  if (!ActivateBestChainStep(
3059  config, state, pindexMostWork,
3060  pblock && pblock->GetHash() ==
3061  pindexMostWork->GetBlockHash()
3062  ? pblock
3063  : nullBlockPtr,
3064  fInvalidFound, connectTrace)) {
3065  // A system error occurred
3066  return false;
3067  }
3068  blocks_connected = true;
3069 
3070  if (fInvalidFound) {
3071  // Wipe cache, we may need another branch now.
3072  pindexMostWork = nullptr;
3073  }
3074 
3075  pindexNewTip = m_chain.Tip();
3076  for (const PerBlockConnectTrace &trace :
3077  connectTrace.GetBlocksConnected()) {
3078  assert(trace.pblock && trace.pindex);
3079  GetMainSignals().BlockConnected(trace.pblock, trace.pindex);
3080  }
3081  } while (!m_chain.Tip() ||
3082  (starting_tip && CBlockIndexWorkComparator()(
3083  m_chain.Tip(), starting_tip)));
3084 
3085  // Check the index once we're done with the above loop, since
3086  // we're going to release cs_main soon. If the index is in a bad
3087  // state now, then it's better to know immediately rather than
3088  // randomly have it cause a problem in a race.
3089  CheckBlockIndex(params.GetConsensus());
3090 
3091  if (!blocks_connected) {
3092  return true;
3093  }
3094 
3095  const CBlockIndex *pindexFork = m_chain.FindFork(starting_tip);
3096  bool fInitialDownload = IsInitialBlockDownload();
3097 
3098  // Notify external listeners about the new tip.
3099  // Enqueue while holding cs_main to ensure that UpdatedBlockTip is
3100  // called in the order in which blocks are connected
3101  if (pindexFork != pindexNewTip) {
3102  // Notify ValidationInterface subscribers
3103  GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork,
3104  fInitialDownload);
3105 
3106  // Always notify the UI if a new block tip was connected
3107  uiInterface.NotifyBlockTip(
3108  GetSynchronizationState(fInitialDownload), pindexNewTip);
3109  }
3110  }
3111  // When we reach this point, we switched to a new tip (stored in
3112  // pindexNewTip).
3113 
3114  if (nStopAtHeight && pindexNewTip &&
3115  pindexNewTip->nHeight >= nStopAtHeight) {
3116  StartShutdown();
3117  }
3118 
3119  // We check shutdown only after giving ActivateBestChainStep a chance to
3120  // run once so that we never shutdown before connecting the genesis
3121  // block during LoadChainTip(). Previously this caused an assert()
3122  // failure during shutdown in such cases as the UTXO DB flushing checks
3123  // that the best block hash is non-null.
3124  if (ShutdownRequested()) {
3125  break;
3126  }
3127  } while (pindexNewTip != pindexMostWork);
3128 
3129  // Write changes periodically to disk, after relay.
3130  if (!FlushStateToDisk(params, state, FlushStateMode::PERIODIC)) {
3131  return false;
3132  }
3133 
3134  return true;
3135 }
3136 
3137 bool ActivateBestChain(const Config &config, BlockValidationState &state,
3138  std::shared_ptr<const CBlock> pblock) {
3140  std::move(pblock));
3141 }
3142 
3144  BlockValidationState &state,
3145  CBlockIndex *pindex) {
3146  {
3147  LOCK(cs_main);
3148  if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
3149  // Nothing to do, this block is not at the tip.
3150  return true;
3151  }
3152 
3154  // The chain has been extended since the last call, reset the
3155  // counter.
3157  }
3158 
3160  setBlockIndexCandidates.erase(pindex);
3162  if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
3163  // We can't keep reducing the counter if somebody really wants to
3164  // call preciousblock 2**31-1 times on the same set of tips...
3166  }
3167 
3168  // In case this was parked, unpark it.
3169  UnparkBlock(pindex);
3170 
3171  // Make sure it is added to the candidate list if appropriate.
3172  if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
3173  pindex->HaveTxsDownloaded()) {
3174  setBlockIndexCandidates.insert(pindex);
3176  }
3177  }
3178 
3179  return ActivateBestChain(config, state);
3180 }
3181 
3182 bool PreciousBlock(const Config &config, BlockValidationState &state,
3183  CBlockIndex *pindex) {
3184  return ::ChainstateActive().PreciousBlock(config, state, pindex);
3185 }
3186 
3188  CBlockIndex *pindex, bool invalidate) {
3189  CBlockIndex *to_mark_failed_or_parked = pindex;
3190  bool pindex_was_in_chain = false;
3191  int disconnected = 0;
3192  const CChainParams &chainparams = config.GetChainParams();
3193 
3194  // We do not allow ActivateBestChain() to run while UnwindBlock() is
3195  // running, as that could cause the tip to change while we disconnect
3196  // blocks. (Note for backport of Core PR16849: we acquire
3197  // LOCK(m_cs_chainstate) in the Park, Invalidate and FinalizeBlock functions
3198  // due to differences in our code)
3200 
3201  // We'll be acquiring and releasing cs_main below, to allow the validation
3202  // callbacks to run. However, we should keep the block index in a
3203  // consistent state as we disconnect blocks -- in particular we need to
3204  // add equal-work blocks to setBlockIndexCandidates as we disconnect.
3205  // To avoid walking the block index repeatedly in search of candidates,
3206  // build a map once so that we can look up candidate blocks by chain
3207  // work as we go.
3208  std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
3209 
3210  {
3211  LOCK(cs_main);
3212  for (const auto &entry : m_blockman.m_block_index) {
3213  CBlockIndex *candidate = entry.second;
3214  // We don't need to put anything in our active chain into the
3215  // multimap, because those candidates will be found and considered
3216  // as we disconnect.
3217  // Instead, consider only non-active-chain blocks that have at
3218  // least as much work as where we expect the new tip to end up.
3219  if (!m_chain.Contains(candidate) &&
3220  !CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
3221  candidate->IsValid(BlockValidity::TRANSACTIONS) &&
3222  candidate->HaveTxsDownloaded()) {
3223  candidate_blocks_by_work.insert(
3224  std::make_pair(candidate->nChainWork, candidate));
3225  }
3226  }
3227  }
3228 
3229  // Disconnect (descendants of) pindex, and mark them invalid.
3230  while (true) {
3231  if (ShutdownRequested()) {
3232  break;
3233  }
3234 
3235  // Make sure the queue of validation callbacks doesn't grow unboundedly.
3237 
3238  LOCK(cs_main);
3239  // Lock for as long as disconnectpool is in scope to make sure
3240  // UpdateMempoolForReorg is called after DisconnectTip without unlocking
3241  // in between
3242  LOCK(::g_mempool.cs);
3243 
3244  if (!m_chain.Contains(pindex)) {
3245  break;
3246  }
3247 
3248  pindex_was_in_chain = true;
3249  CBlockIndex *invalid_walk_tip = m_chain.Tip();
3250 
3251  // ActivateBestChain considers blocks already in m_chain
3252  // unconditionally valid already, so force disconnect away from it.
3253 
3254  DisconnectedBlockTransactions disconnectpool;
3255 
3256  bool ret = DisconnectTip(chainparams, state, &disconnectpool);
3257 
3258  // DisconnectTip will add transactions to disconnectpool.
3259  // Adjust the mempool to be consistent with the new tip, adding
3260  // transactions back to the mempool if disconnecting was successful,
3261  // and we're not doing a very deep invalidation (in which case
3262  // keeping the mempool up to date is probably futile anyway).
3263  disconnectpool.updateMempoolForReorg(
3264  config, /* fAddToMempool = */ (++disconnected <= 10) && ret,
3265  ::g_mempool);
3266 
3267  if (!ret) {
3268  return false;
3269  }
3270 
3271  assert(invalid_walk_tip->pprev == m_chain.Tip());
3272 
3273  // We immediately mark the disconnected blocks as invalid.
3274  // This prevents a case where pruned nodes may fail to invalidateblock
3275  // and be left unable to start as they have no tip candidates (as there
3276  // are no blocks that meet the "have data and are not invalid per
3277  // nStatus" criteria for inclusion in setBlockIndexCandidates).
3278 
3279  invalid_walk_tip->nStatus =
3280  invalidate ? invalid_walk_tip->nStatus.withFailed()
3281  : invalid_walk_tip->nStatus.withParked();
3282 
3283  setDirtyBlockIndex.insert(invalid_walk_tip);
3284  setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
3285 
3286  if (invalid_walk_tip == to_mark_failed_or_parked->pprev &&
3287  (invalidate ? to_mark_failed_or_parked->nStatus.hasFailed()
3288  : to_mark_failed_or_parked->nStatus.isParked())) {
3289  // We only want to mark the last disconnected block as
3290  // Failed (or Parked); its children need to be FailedParent (or
3291  // ParkedParent) instead.
3292  to_mark_failed_or_parked->nStatus =
3293  (invalidate
3294  ? to_mark_failed_or_parked->nStatus.withFailed(false)
3295  .withFailedParent()
3296  : to_mark_failed_or_parked->nStatus.withParked(false)
3297  .withParkedParent());
3298 
3299  setDirtyBlockIndex.insert(to_mark_failed_or_parked);
3300  }
3301 
3302  // Add any equal or more work headers to setBlockIndexCandidates
3303  auto candidate_it = candidate_blocks_by_work.lower_bound(
3304  invalid_walk_tip->pprev->nChainWork);
3305  while (candidate_it != candidate_blocks_by_work.end()) {
3306  if (!CBlockIndexWorkComparator()(candidate_it->second,
3307  invalid_walk_tip->pprev)) {
3308  setBlockIndexCandidates.insert(candidate_it->second);
3309  candidate_it = candidate_blocks_by_work.erase(candidate_it);
3310  } else {
3311  ++candidate_it;
3312  }
3313  }
3314 
3315  // Track the last disconnected block, so we can correct its
3316  // FailedParent (or ParkedParent) status in future iterations, or, if
3317  // it's the last one, call InvalidChainFound on it.
3318  to_mark_failed_or_parked = invalid_walk_tip;
3319  }
3320 
3321  CheckBlockIndex(chainparams.GetConsensus());
3322 
3323  {
3324  LOCK(cs_main);
3325  if (m_chain.Contains(to_mark_failed_or_parked)) {
3326  // If the to-be-marked invalid block is in the active chain,
3327  // something is interfering and we can't proceed.
3328  return false;
3329  }
3330 
3331  // Mark pindex (or the last disconnected block) as invalid (or parked),
3332  // even when it never was in the main chain.
3333  to_mark_failed_or_parked->nStatus =
3334  invalidate ? to_mark_failed_or_parked->nStatus.withFailed()
3335  : to_mark_failed_or_parked->nStatus.withParked();
3336  setDirtyBlockIndex.insert(to_mark_failed_or_parked);
3337  if (invalidate) {
3338  m_blockman.m_failed_blocks.insert(to_mark_failed_or_parked);
3339  }
3340 
3341  // If any new blocks somehow arrived while we were disconnecting
3342  // (above), then the pre-calculation of what should go into
3343  // setBlockIndexCandidates may have missed entries. This would
3344  // technically be an inconsistency in the block index, but if we clean
3345  // it up here, this should be an essentially unobservable error.
3346  // Loop back over all block index entries and add any missing entries
3347  // to setBlockIndexCandidates.
3348  for (const std::pair<const BlockHash, CBlockIndex *> &it :
3349  m_blockman.m_block_index) {
3350  CBlockIndex *i = it.second;
3352  i->HaveTxsDownloaded() &&
3353  !setBlockIndexCandidates.value_comp()(i, m_chain.Tip())) {
3354  setBlockIndexCandidates.insert(i);
3355  }
3356  }
3357 
3358  if (invalidate) {
3359  InvalidChainFound(to_mark_failed_or_parked);
3360  }
3361  }
3362 
3363  // Only notify about a new block tip if the active chain was modified.
3364  if (pindex_was_in_chain) {
3365  uiInterface.NotifyBlockTip(
3367  to_mark_failed_or_parked->pprev);
3368  }
3369  return true;
3370 }
3371 
3373  BlockValidationState &state,
3374  CBlockIndex *pindex) {
3376  // See 'Note for backport of Core PR16849' in CChainState::UnwindBlock
3378 
3379  return UnwindBlock(config, state, pindex, true);
3380 }
3381 
3383  CBlockIndex *pindex) {
3385  // See 'Note for backport of Core PR16849' in CChainState::UnwindBlock
3387 
3388  return UnwindBlock(config, state, pindex, false);
3389 }
3390 
3392  BlockValidationState &state,
3393  CBlockIndex *pindex) {
3395  // See 'Note for backport of Core PR16849' in CChainState::UnwindBlock
3397 
3398  AssertLockNotHeld(cs_main);
3399  CBlockIndex *pindexToInvalidate = nullptr;
3400  {
3401  LOCK(cs_main);
3402  if (!MarkBlockAsFinal(state, pindex)) {
3403  // state is set by MarkBlockAsFinal.
3404  return false;
3405  }
3406 
3407  // We have a valid candidate, make sure it is not parked.
3408  if (pindex->nStatus.isOnParkedChain()) {
3409  UnparkBlock(pindex);
3410  }
3411 
3412  // If the finalized block is on the active chain, there is no need to
3413  // rewind.
3414  if (::ChainActive().Contains(pindex)) {
3415  return true;
3416  }
3417 
3418  // If the finalized block is not on the active chain, that chain is
3419  // invalid
3420  // ...
3421  const CBlockIndex *pindexFork = ::ChainActive().FindFork(pindex);
3422  pindexToInvalidate = ::ChainActive().Next(pindexFork);
3423  if (!pindexToInvalidate) {
3424  return false;
3425  }
3426  } // end of locked cs_main scope
3427 
3428  // ... therefore, we invalidate the block on the active chain that comes
3429  // immediately after it
3430  return UnwindBlock(config, state, pindexToInvalidate,
3431  true /* invalidating */);
3432 }
3433 
3434 template <typename F>
3436  CBlockIndex *pindex, F f) {
3437  BlockStatus newStatus = f(pindex->nStatus);
3438  if (pindex->nStatus != newStatus &&
3439  (!pindexBase ||
3440  pindex->GetAncestor(pindexBase->nHeight) == pindexBase)) {
3441  pindex->nStatus = newStatus;
3442  setDirtyBlockIndex.insert(pindex);
3443  if (newStatus.isValid()) {
3444  m_blockman.m_failed_blocks.erase(pindex);
3445  }
3446 
3447  if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
3448  pindex->HaveTxsDownloaded() &&
3449  setBlockIndexCandidates.value_comp()(::ChainActive().Tip(),
3450  pindex)) {
3451  setBlockIndexCandidates.insert(pindex);
3452  }
3453  return true;
3454  }
3455  return false;
3456 }
3457 
3458 template <typename F, typename C, typename AC>
3460  F f, C fChild, AC fAncestorWasChanged) {
3461  AssertLockHeld(cs_main);
3462 
3463  // Update the current block and ancestors; while we're doing this, identify
3464  // which was the deepest ancestor we changed.
3465  CBlockIndex *pindexDeepestChanged = pindex;
3466  for (auto pindexAncestor = pindex; pindexAncestor != nullptr;
3467  pindexAncestor = pindexAncestor->pprev) {
3468  if (UpdateFlagsForBlock(nullptr, pindexAncestor, f)) {
3469  pindexDeepestChanged = pindexAncestor;
3470  }
3471  }
3472 
3473  if (pindexReset &&
3474  pindexReset->GetAncestor(pindexDeepestChanged->nHeight) ==
3475  pindexDeepestChanged) {
3476  // reset pindexReset if it had a modified ancestor.
3477  pindexReset = nullptr;
3478  }
3479 
3480  // Update all blocks under modified blocks.
3481  BlockMap::iterator it = m_blockman.m_block_index.begin();
3482  while (it != m_blockman.m_block_index.end()) {
3483  UpdateFlagsForBlock(pindex, it->second, fChild);
3484  UpdateFlagsForBlock(pindexDeepestChanged, it->second,
3485  fAncestorWasChanged);
3486  it++;
3487  }
3488 }
3489 
3491  AssertLockHeld(cs_main);
3492 
3493  // In case we are reconsidering something before the finalization point,
3494  // move the finalization point to the last common ancestor.
3495  if (m_finalizedBlockIndex) {
3496  m_finalizedBlockIndex =
3497  LastCommonAncestor(pindex, m_finalizedBlockIndex);
3498  }
3499 
3500  UpdateFlags(
3501  pindex, pindexBestInvalid,
3502  [](const BlockStatus status) {
3503  return status.withClearedFailureFlags();
3504  },
3505  [](const BlockStatus status) {
3506  return status.withClearedFailureFlags();
3507  },
3508  [](const BlockStatus status) {
3509  return status.withFailedParent(false);
3510  });
3511 }
3512 
3515 }
3516 
3517 void CChainState::UnparkBlockImpl(CBlockIndex *pindex, bool fClearChildren) {
3518  AssertLockHeld(cs_main);
3519 
3520  UpdateFlags(
3521  pindex, pindexBestParked,
3522  [](const BlockStatus status) {
3523  return status.withClearedParkedFlags();
3524  },
3525  [fClearChildren](const BlockStatus status) {
3526  return fClearChildren ? status.withClearedParkedFlags()
3527  : status.withParkedParent(false);
3528  },
3529  [](const BlockStatus status) {
3530  return status.withParkedParent(false);
3531  });
3532 }
3533 
3536 }
3537 
3538 void UnparkBlock(CBlockIndex *pindex) {
3539  return ::ChainstateActive().UnparkBlockImpl(pindex, false);
3540 }
3541 
3542 bool CChainState::IsBlockFinalized(const CBlockIndex *pindex) const {
3543  AssertLockHeld(cs_main);
3544  return m_finalizedBlockIndex &&
3545  m_finalizedBlockIndex->GetAncestor(pindex->nHeight) == pindex;
3546 }
3547 
3550  AssertLockHeld(cs_main);
3551  return m_finalizedBlockIndex;
3552 }
3553 
3555  AssertLockHeld(cs_main);
3556 
3557  // Check for duplicate
3558  BlockHash hash = block.GetHash();
3559  BlockMap::iterator it = m_block_index.find(hash);
3560  if (it != m_block_index.end()) {
3561  return it->second;
3562  }
3563 
3564  // Construct new block index object
3565  CBlockIndex *pindexNew = new CBlockIndex(block);
3566  // We assign the sequence id to blocks only when the full data is available,
3567  // to avoid miners withholding blocks but broadcasting headers, to get a
3568  // competitive advantage.
3569  pindexNew->nSequenceId = 0;
3570  BlockMap::iterator mi =
3571  m_block_index.insert(std::make_pair(hash, pindexNew)).first;
3572  pindexNew->phashBlock = &((*mi).first);
3573  BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
3574  if (miPrev != m_block_index.end()) {
3575  pindexNew->pprev = (*miPrev).second;
3576  pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
3577  pindexNew->BuildSkip();
3578  }
3579  pindexNew->nTimeReceived = GetTime();
3580  pindexNew->nTimeMax =
3581  (pindexNew->pprev
3582  ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime)
3583  : pindexNew->nTime);
3584  pindexNew->nChainWork =
3585  (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) +
3586  GetBlockProof(*pindexNew);
3587  pindexNew->RaiseValidity(BlockValidity::TREE);
3588  if (pindexBestHeader == nullptr ||
3589  pindexBestHeader->nChainWork < pindexNew->nChainWork) {
3590  pindexBestHeader = pindexNew;
3591  }
3592 
3593  setDirtyBlockIndex.insert(pindexNew);
3594  return pindexNew;
3595 }
3596 
3602  CBlockIndex *pindexNew,
3603  const FlatFilePos &pos) {
3604  pindexNew->nTx = block.vtx.size();
3605  pindexNew->nSize = ::GetSerializeSize(block, PROTOCOL_VERSION);
3606  pindexNew->nFile = pos.nFile;
3607  pindexNew->nDataPos = pos.nPos;
3608  pindexNew->nUndoPos = 0;
3609  pindexNew->nStatus = pindexNew->nStatus.withData();
3611  setDirtyBlockIndex.insert(pindexNew);
3612 
3613  if (pindexNew->UpdateChainStats()) {
3614  // If pindexNew is the genesis block or all parents are
3615  // BLOCK_VALID_TRANSACTIONS.
3616  std::deque<CBlockIndex *> queue;
3617  queue.push_back(pindexNew);
3618 
3619  // Recursively process any descendant blocks that now may be eligible to
3620  // be connected.
3621  while (!queue.empty()) {
3622  CBlockIndex *pindex = queue.front();
3623  queue.pop_front();
3624  pindex->UpdateChainStats();
3625  if (pindex->nSequenceId == 0) {
3626  // We assign a sequence is when transaction are received to
3627  // prevent a miner from being able to broadcast a block but not
3628  // its content. However, a sequence id may have been set
3629  // manually, for instance via PreciousBlock, in which case, we
3630  // don't need to assign one.
3631  pindex->nSequenceId = nBlockSequenceId++;
3632  }
3633 
3634  if (m_chain.Tip() == nullptr ||
3635  !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
3636  setBlockIndexCandidates.insert(pindex);
3637  }
3638 
3639  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
3640  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
3641  range = m_blockman.m_blocks_unlinked.equal_range(pindex);
3642  while (range.first != range.second) {
3643  std::multimap<CBlockIndex *, CBlockIndex *>::iterator it =
3644  range.first;
3645  queue.push_back(it->second);
3646  range.first++;
3647  m_blockman.m_blocks_unlinked.erase(it);
3648  }
3649  }
3650  } else if (pindexNew->pprev &&
3651  pindexNew->pprev->IsValid(BlockValidity::TREE)) {
3653  std::make_pair(pindexNew->pprev, pindexNew));
3654  }
3655 }
3656 
3657 static bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize,
3658  unsigned int nHeight, uint64_t nTime,
3659  bool fKnown = false) {
3660  LOCK(cs_LastBlockFile);
3661 
3662  unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
3663  if (vinfoBlockFile.size() <= nFile) {
3664  vinfoBlockFile.resize(nFile + 1);
3665  }
3666 
3667  bool finalize_undo = false;
3668  if (!fKnown) {
3669  while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
3670  // when the undo file is keeping up with the block file, we want to
3671  // flush it explicitly when it is lagging behind (more blocks arrive
3672  // than are being connected), we let the undo block write case
3673  // handle it
3674  finalize_undo = (vinfoBlockFile[nFile].nHeightLast ==
3675  (unsigned int)ChainActive().Tip()->nHeight);
3676  nFile++;
3677  if (vinfoBlockFile.size() <= nFile) {
3678  vinfoBlockFile.resize(nFile + 1);
3679  }
3680  }
3681  pos.nFile = nFile;
3682  pos.nPos = vinfoBlockFile[nFile].nSize;
3683  }
3684 
3685  if ((int)nFile != nLastBlockFile) {
3686  if (!fKnown) {
3687  LogPrintf("Leaving block file %i: %s\n", nLastBlockFile,
3688  vinfoBlockFile[nLastBlockFile].ToString());
3689  }
3690  FlushBlockFile(!fKnown, finalize_undo);
3691  nLastBlockFile = nFile;
3692  }
3693 
3694  vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
3695  if (fKnown) {
3696  vinfoBlockFile[nFile].nSize =
3697  std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
3698  } else {
3699  vinfoBlockFile[nFile].nSize += nAddSize;
3700  }
3701 
3702  if (!fKnown) {
3703  bool out_of_space;
3704  size_t bytes_allocated =
3705  BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
3706  if (out_of_space) {
3707  return AbortNode("Disk space is too low!",
3708  _("Disk space is too low!"));
3709  }
3710  if (bytes_allocated != 0 && fPruneMode) {
3711  fCheckForPruning = true;
3712  }
3713  }
3714 
3715  setDirtyFileInfo.insert(nFile);
3716  return true;
3717 }
3718 
3719 static bool FindUndoPos(BlockValidationState &state, int nFile,
3720  FlatFilePos &pos, unsigned int nAddSize) {
3721  pos.nFile = nFile;
3722 
3723  LOCK(cs_LastBlockFile);
3724 
3725  pos.nPos = vinfoBlockFile[nFile].nUndoSize;
3726  vinfoBlockFile[nFile].nUndoSize += nAddSize;
3727  setDirtyFileInfo.insert(nFile);
3728 
3729  bool out_of_space;
3730  size_t bytes_allocated =
3731  UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
3732  if (out_of_space) {
3733  return AbortNode(state, "Disk space is too low!",
3734  _("Disk space is too low!"));
3735  }
3736  if (bytes_allocated != 0 && fPruneMode) {
3737  fCheckForPruning = true;
3738  }
3739 
3740  return true;
3741 }
3742 
3751 static bool CheckBlockHeader(const CBlockHeader &block,
3752  BlockValidationState &state,
3753  const Consensus::Params &params,
3754  BlockValidationOptions validationOptions) {
3755  // Check proof of work matches claimed amount
3756  if (validationOptions.shouldValidatePoW() &&
3757  !CheckProofOfWork(block.GetHash(), block.nBits, params)) {
3759  "high-hash", "proof of work failed");
3760  }
3761 
3762  return true;
3763 }
3764 
3765 bool CheckBlock(const CBlock &block, BlockValidationState &state,
3766  const Consensus::Params &params,
3767  BlockValidationOptions validationOptions) {
3768  // These are checks that are independent of context.
3769  if (block.fChecked) {
3770  return true;
3771  }
3772 
3773  // Check that the header is valid (particularly PoW). This is mostly
3774  // redundant with the call in AcceptBlockHeader.
3775  if (!CheckBlockHeader(block, state, params, validationOptions)) {
3776  return false;
3777  }
3778 
3779  // Check the merkle root.
3780  if (validationOptions.shouldValidateMerkleRoot()) {
3781  bool mutated;
3782  uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
3783  if (block.hashMerkleRoot != hashMerkleRoot2) {
3785  "bad-txnmrklroot", "hashMerkleRoot mismatch");
3786  }
3787 
3788  // Check for merkle tree malleability (CVE-2012-2459): repeating
3789  // sequences of transactions in a block without affecting the merkle
3790  // root of a block, while still invalidating it.
3791  if (mutated) {
3793  "bad-txns-duplicate", "duplicate transaction");
3794  }
3795  }
3796 
3797  // All potential-corruption validation must be done before we do any
3798  // transaction validation, as otherwise we may mark the header as invalid
3799  // because we receive the wrong transactions for it.
3800 
3801  // First transaction must be coinbase.
3802  if (block.vtx.empty()) {
3804  "bad-cb-missing", "first tx is not coinbase");
3805  }
3806 
3807  // Size limits.
3808  auto nMaxBlockSize = validationOptions.getExcessiveBlockSize();
3809 
3810  // Bail early if there is no way this block is of reasonable size.
3811  if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) {
3813  "bad-blk-length", "size limits failed");
3814  }
3815 
3816  auto currentBlockSize = ::GetSerializeSize(block, PROTOCOL_VERSION);
3817  if (currentBlockSize > nMaxBlockSize) {
3819  "bad-blk-length", "size limits failed");
3820  }
3821 
3822  // And a valid coinbase.
3823  TxValidationState tx_state;
3824  if (!CheckCoinbase(*block.vtx[0], tx_state)) {
3826  tx_state.GetRejectReason(),
3827  strprintf("Coinbase check failed (txid %s) %s",
3828  block.vtx[0]->GetId().ToString(),
3829  tx_state.GetDebugMessage()));
3830  }
3831 
3832  // Check transactions for regularity, skipping the first. Note that this
3833  // is the first time we check that all after the first are !IsCoinBase.
3834  for (size_t i = 1; i < block.vtx.size(); i++) {
3835  auto *tx = block.vtx[i].get();
3836  if (!CheckRegularTransaction(*tx, tx_state)) {
3837  return state.Invalid(
3839  tx_state.GetRejectReason(),
3840  strprintf("Transaction check failed (txid %s) %s",
3841  tx->GetId().ToString(), tx_state.GetDebugMessage()));
3842  }
3843  }
3844 
3845  if (validationOptions.shouldValidatePoW() &&
3846  validationOptions.shouldValidateMerkleRoot()) {
3847  block.fChecked = true;
3848  }
3849 
3850  return true;
3851 }
3852 
3863 static bool ContextualCheckBlockHeader(const CChainParams &params,
3864  const CBlockHeader &block,
3865  BlockValidationState &state,
3866  const CBlockIndex *pindexPrev,
3867  int64_t nAdjustedTime)
3868  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
3869  assert(pindexPrev != nullptr);
3870  const int nHeight = pindexPrev->nHeight + 1;
3871 
3872  // Check proof of work
3873  if (block.nBits != GetNextWorkRequired(pindexPrev, &block, params)) {
3874  LogPrintf("bad bits after height: %d\n", pindexPrev->nHeight);
3876  "bad-diffbits", "incorrect proof of work");
3877  }
3878 
3879  // Check against checkpoints
3880  if (fCheckpointsEnabled) {
3881  const CCheckpointData &checkpoints = params.Checkpoints();
3882 
3883  // Check that the block chain matches the known block chain up to a
3884  // checkpoint.
3885  if (!Checkpoints::CheckBlock(checkpoints, nHeight, block.GetHash())) {
3886  LogPrintf("ERROR: %s: rejected by checkpoint lock-in at %d\n",
3887  __func__, nHeight);
3889  "checkpoint mismatch");
3890  }
3891 
3892  // Don't accept any forks from the main chain prior to last checkpoint.
3893  // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's
3894  // in our BlockIndex().
3895  CBlockIndex *pcheckpoint = Checkpoints::GetLastCheckpoint(checkpoints);
3896  if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
3897  LogPrintf("ERROR: %s: forked chain older than last checkpoint "
3898  "(height %d)\n",
3899  __func__, nHeight);
3901  "bad-fork-prior-to-checkpoint");
3902  }
3903  }
3904 
3905  // Check timestamp against prev
3906  if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) {
3908  "time-too-old", "block's timestamp is too early");
3909  }
3910 
3911  // Check timestamp
3912  if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME) {
3914  "time-too-new",
3915  "block timestamp too far in the future");
3916  }
3917 
3918  // Reject outdated version blocks when 95% (75% on testnet) of the network
3919  // has upgraded:
3920  // check for version 2, 3 and 4 upgrades
3921  const Consensus::Params &consensusParams = params.GetConsensus();
3922  if ((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) ||
3923  (block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) ||
3924  (block.nVersion < 4 && nHeight >= consensusParams.BIP65Height)) {
3925  return state.Invalid(
3927  strprintf("bad-version(0x%08x)", block.nVersion),
3928  strprintf("rejected nVersion=0x%08x block", block.nVersion));
3929  }
3930 
3931  return true;
3932 }
3933 
3935  const CTransaction &tx,
3936  TxValidationState &state,
3937  int flags) {
3938  AssertLockHeld(cs_main);
3939 
3940  // By convention a negative value for flags indicates that the current
3941  // network-enforced consensus rules should be used. In a future soft-fork
3942  // scenario that would mean checking which rules would be enforced for the
3943  // next block and setting the appropriate flags. At the present time no
3944  // soft-forks are scheduled, so no flags are set.
3945  flags = std::max(flags, 0);
3946 
3947  // ContextualCheckTransactionForCurrentBlock() uses
3948  // ::ChainActive().Height()+1 to evaluate nLockTime because when IsFinalTx()
3949  // is called within CBlock::AcceptBlock(), the height of the block *being*
3950  // evaluated is what is used. Thus if we want to know if a transaction can
3951  // be part of the *next* block, we need to call ContextualCheckTransaction()
3952  // with one more than ::ChainActive().Height().
3953  const int nBlockHeight = ::ChainActive().Height() + 1;
3954 
3955  // BIP113 will require that time-locked transactions have nLockTime set to
3956  // less than the median time of the previous block they're contained in.
3957  // When the next block is created its previous block will be the current
3958  // chain tip, so we use that to calculate the median time passed to
3959  // ContextualCheckTransaction() if LOCKTIME_MEDIAN_TIME_PAST is set.
3960  const int64_t nMedianTimePast =
3961  ::ChainActive().Tip() == nullptr
3962  ? 0
3964  const int64_t nLockTimeCutoff = (flags & LOCKTIME_MEDIAN_TIME_PAST)
3965  ? nMedianTimePast
3966  : GetAdjustedTime();
3967 
3968  return ContextualCheckTransaction(params, tx, state, nBlockHeight,
3969  nLockTimeCutoff, nMedianTimePast);
3970 }
3971 
3979 static bool ContextualCheckBlock(const CBlock &block,
3980  BlockValidationState &state,
3981  const Consensus::Params &params,
3982  const CBlockIndex *pindexPrev) {
3983  const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
3984 
3985  // Start enforcing BIP113 (Median Time Past).
3986  int nLockTimeFlags = 0;
3987  if (nHeight >= params.CSVHeight) {
3988  assert(pindexPrev != nullptr);
3989  nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
3990  }
3991 
3992  const int64_t nMedianTimePast =
3993  pindexPrev == nullptr ? 0 : pindexPrev->GetMedianTimePast();
3994 
3995  const int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
3996  ? nMedianTimePast
3997  : block.GetBlockTime();
3998 
3999  const bool fIsMagneticAnomalyEnabled =
4000  IsMagneticAnomalyEnabled(params, pindexPrev);
4001 
4002  // Check transactions:
4003  // - canonical ordering
4004  // - ensure they are finalized
4005  // - perform a preliminary block-sigops count (they will be recounted more
4006  // strictly during ConnectBlock).
4007  // - perform a transaction-sigops check (again, a more strict check will
4008  // happen in ConnectBlock).
4009  const CTransaction *prevTx = nullptr;
4010  for (const auto &ptx : block.vtx) {
4011  const CTransaction &tx = *ptx;
4012  if (fIsMagneticAnomalyEnabled) {
4013  if (prevTx && (tx.GetId() <= prevTx->GetId())) {
4014  if (tx.GetId() == prevTx->GetId()) {
4016  "tx-duplicate",
4017  strprintf("Duplicated transaction %s",
4018  tx.GetId().ToString()));
4019  }
4020 
4021  return state.Invalid(
4023  strprintf("Transaction order is invalid (%s < %s)",
4024  tx.GetId().ToString(),
4025  prevTx->GetId().ToString()));
4026  }
4027 
4028  if (prevTx || !tx.IsCoinBase()) {
4029  prevTx = &tx;
4030  }
4031  }
4032 
4033  TxValidationState tx_state;
4034  if (!ContextualCheckTransaction(params, tx, tx_state, nHeight,
4035  nLockTimeCutoff, nMedianTimePast)) {
4037  tx_state.GetRejectReason(),
4038  tx_state.GetDebugMessage());
4039  }
4040  }
4041 
4042  // Enforce rule that the coinbase starts with serialized block height
4043  if (nHeight >= params.BIP34Height) {
4044  CScript expect = CScript() << nHeight;
4045  if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
4046  !std::equal(expect.begin(), expect.end(),
4047  block.vtx[0]->vin[0].scriptSig.begin())) {
4049  "bad-cb-height",
4050  "block height mismatch in coinbase");
4051  }
4052  }
4053 
4054  return true;
4055 }
4056 
4063  const CBlockHeader &block,
4064  BlockValidationState &state,
4065  CBlockIndex **ppindex) {
4066  AssertLockHeld(cs_main);
4067  const CChainParams &chainparams = config.GetChainParams();
4068 
4069  // Check for duplicate
4070  BlockHash hash = block.GetHash();
4071  BlockMap::iterator miSelf = m_block_index.find(hash);
4072  CBlockIndex *pindex = nullptr;
4073  if (hash != chainparams.GetConsensus().hashGenesisBlock) {
4074  if (miSelf != m_block_index.end()) {
4075  // Block header is already known.
4076  pindex = miSelf->second;
4077  if (ppindex) {
4078  *ppindex = pindex;
4079  }
4080 
4081  if (pindex->nStatus.isInvalid()) {
4082  LogPrintf("ERROR: %s: block %s is marked invalid\n", __func__,
4083  hash.ToString());
4084  return state.Invalid(
4086  }
4087 
4088  return true;
4089  }
4090 
4091  if (!CheckBlockHeader(block, state, chainparams.GetConsensus(),
4092  BlockValidationOptions(config))) {
4093  return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__,
4094  hash.ToString(), state.ToString());
4095  }
4096 
4097  // Get prev block index
4098  BlockMap::iterator mi = m_block_index.find(block.hashPrevBlock);
4099  if (mi == m_block_index.end()) {
4100  LogPrintf("ERROR: %s: prev block not found\n", __func__);
4102  "prev-blk-not-found");
4103  }
4104 
4105  CBlockIndex *pindexPrev = (*mi).second;
4106  assert(pindexPrev);
4107  if (pindexPrev->nStatus.isInvalid()) {
4108  LogPrintf("ERROR: %s: prev block invalid\n", __func__);
4110  "bad-prevblk");
4111  }
4112 
4113  if (!ContextualCheckBlockHeader(chainparams, block, state, pindexPrev,
4114  GetAdjustedTime())) {
4115  return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s",
4116  __func__, hash.ToString(), state.ToString());
4117  }
4118 
4119  /* Determine if this block descends from any block which has been found
4120  * invalid (m_failed_blocks), then mark pindexPrev and any blocks
4121  * between them as failed. For example:
4122  *
4123  * D3
4124  * /
4125  * B2 - C2
4126  * / \
4127  * A D2 - E2 - F2
4128  * \
4129  * B1 - C1 - D1 - E1
4130  *
4131  * In the case that we attempted to reorg from E1 to F2, only to find
4132  * C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
4133  * but NOT D3 (it was not in any of our candidate sets at the time).
4134  *
4135  * In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
4136  * in LoadBlockIndex.
4137  */
4138  if (!pindexPrev->IsValid(BlockValidity::SCRIPTS)) {
4139  // The above does not mean "invalid": it checks if the previous
4140  // block hasn't been validated up to BlockValidity::SCRIPTS. This is
4141  // a performance optimization, in the common case of adding a new
4142  // block to the tip, we don't need to iterate over the failed blocks
4143  // list.
4144  for (const CBlockIndex *failedit : m_failed_blocks) {
4145  if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
4146  assert(failedit->nStatus.hasFailed());
4147  CBlockIndex *invalid_walk = pindexPrev;
4148  while (invalid_walk != failedit) {
4149  invalid_walk->nStatus =
4150  invalid_walk->nStatus.withFailedParent();
4151  setDirtyBlockIndex.insert(invalid_walk);
4152  invalid_walk = invalid_walk->pprev;
4153  }
4154  LogPrintf("ERROR: %s: prev block invalid\n", __func__);
4155  return state.Invalid(
4157  "bad-prevblk");
4158  }
4159  }
4160  }
4161  }
4162 
4163  if (pindex == nullptr) {
4164  pindex = AddToBlockIndex(block);
4165  }
4166 
4167  if (ppindex) {
4168  *ppindex = pindex;
4169  }
4170 
4171  return true;
4172 }
4173 
4174 // Exposed wrapper for AcceptBlockHeader
4176  const Config &config, const std::vector<CBlockHeader> &headers,
4177  BlockValidationState &state, const CBlockIndex **ppindex) {
4178  AssertLockNotHeld(cs_main);
4179  {
4180  LOCK(cs_main);
4181  for (const CBlockHeader &header : headers) {
4182  // Use a temp pindex instead of ppindex to avoid a const_cast
4183  CBlockIndex *pindex = nullptr;
4184  bool accepted =
4185  m_blockman.AcceptBlockHeader(config, header, state, &pindex);
4187  config.GetChainParams().GetConsensus());
4188 
4189  if (!accepted) {
4190  return false;
4191  }
4192 
4193  if (ppindex) {
4194  *ppindex = pindex;
4195  }
4196  }
4197  }
4198 
4199  if (NotifyHeaderTip()) {
4200  if (::ChainstateActive().IsInitialBlockDownload() && ppindex &&
4201  *ppindex) {
4202  LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n",
4203  (*ppindex)->nHeight,
4204  100.0 /
4205  ((*ppindex)->nHeight +
4206  (GetAdjustedTime() - (*ppindex)->GetBlockTime()) /
4208  (*ppindex)->nHeight);
4209  }
4210  }
4211  return true;
4212 }
4213 
4218 static FlatFilePos SaveBlockToDisk(const CBlock &block, int nHeight,
4219  const CChainParams &chainparams,
4220  const FlatFilePos *dbp) {
4221  unsigned int nBlockSize = ::GetSerializeSize(block, CLIENT_VERSION);
4222  FlatFilePos blockPos;
4223  if (dbp != nullptr) {
4224  blockPos = *dbp;
4225  }
4226  if (!FindBlockPos(blockPos, nBlockSize + 8, nHeight, block.GetBlockTime(),
4227  dbp != nullptr)) {
4228  error("%s: FindBlockPos failed", __func__);
4229  return FlatFilePos();
4230  }
4231  if (dbp == nullptr) {
4232  if (!WriteBlockToDisk(block, blockPos, chainparams.DiskMagic())) {
4233  AbortNode("Failed to write block");
4234  return FlatFilePos();
4235  }
4236  }
4237  return blockPos;
4238 }
4239 
4252  const std::shared_ptr<const CBlock> &pblock,
4253  BlockValidationState &state, bool fRequested,
4254  const FlatFilePos *dbp, bool *fNewBlock) {
4255  AssertLockHeld(cs_main);
4256 
4257  const CBlock &block = *pblock;
4258  if (fNewBlock) {
4259  *fNewBlock = false;
4260  }
4261 
4262  CBlockIndex *pindex = nullptr;
4263 
4264  bool accepted_header =
4265  m_blockman.AcceptBlockHeader(config, block, state, &pindex);
4267 
4268  if (!accepted_header) {
4269  return false;
4270  }
4271 
4272  // Try to process all requested blocks that we don't have, but only
4273  // process an unrequested block if it's new and has enough work to
4274  // advance our tip, and isn't too many blocks ahead.
4275  bool fAlreadyHave = pindex->nStatus.hasData();
4276 
4277  // TODO: deal better with return value and error conditions for duplicate
4278  // and unrequested blocks.
4279  if (fAlreadyHave) {
4280  return true;
4281  }
4282 
4283  // Compare block header timestamps and received times of the block and the
4284  // chaintip. If they have the same chain height, use these diffs as a
4285  // tie-breaker, attempting to pick the more honestly-mined block.
4286  int64_t newBlockTimeDiff = std::llabs(pindex->GetReceivedTimeDiff());
4287  int64_t chainTipTimeDiff =
4288  m_chain.Tip() ? std::llabs(m_chain.Tip()->GetReceivedTimeDiff()) : 0;
4289 
4290  bool isSameHeight =
4291  m_chain.Tip() && (pindex->nChainWork == m_chain.Tip()->nChainWork);
4292  if (isSameHeight) {
4293  LogPrintf("Chain tip timestamp-to-received-time difference: hash=%s, "
4294  "diff=%d\n",
4295  m_chain.Tip()->GetBlockHash().ToString(), chainTipTimeDiff);
4296  LogPrintf("New block timestamp-to-received-time difference: hash=%s, "
4297  "diff=%d\n",
4298  pindex->GetBlockHash().ToString(), newBlockTimeDiff);
4299  }
4300 
4301  bool fHasMoreOrSameWork =
4302  (m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork
4303  : true);
4304 
4305  // Blocks that are too out-of-order needlessly limit the effectiveness of
4306  // pruning, because pruning will not delete block files that contain any
4307  // blocks which are too close in height to the tip. Apply this test
4308  // regardless of whether pruning is enabled; it should generally be safe to
4309  // not process unrequested blocks.
4310  bool fTooFarAhead =
4311  (pindex->nHeight > int(m_chain.Height() + MIN_BLOCKS_TO_KEEP));
4312 
4313  // TODO: Decouple this function from the block download logic by removing
4314  // fRequested
4315  // This requires some new chain data structure to efficiently look up if a
4316  // block is in a chain leading to a candidate for best tip, despite not
4317  // being such a candidate itself.
4318 
4319  // If we didn't ask for it:
4320  if (!fRequested) {
4321  // This is a previously-processed block that was pruned.
4322  if (pindex->nTx != 0) {
4323  return true;
4324  }
4325 
4326  // Don't process less-work chains.
4327  if (!fHasMoreOrSameWork) {
4328  return true;
4329  }
4330 
4331  // Block height is too high.
4332  if (fTooFarAhead) {
4333  return true;
4334  }
4335 
4336  // Protect against DoS attacks from low-work chains.
4337  // If our tip is behind, a peer could try to send us
4338  // low-work blocks on a fake chain that we would never
4339  // request; don't process these.
4340  if (pindex->nChainWork < nMinimumChainWork) {
4341  return true;
4342  }
4343  }
4344 
4345  const CChainParams &chainparams = config.GetChainParams();
4346  const Consensus::Params &consensusParams = chainparams.GetConsensus();
4347 
4348  if (!CheckBlock(block, state, consensusParams,
4349  BlockValidationOptions(config)) ||
4350  !ContextualCheckBlock(block, state, consensusParams, pindex->pprev)) {
4351  if (state.IsInvalid() &&
4353  pindex->nStatus = pindex->nStatus.withFailed();
4354  setDirtyBlockIndex.insert(pindex);
4355  }
4356 
4357  return error("%s: %s (block %s)", __func__, state.ToString(),
4358  block.GetHash().ToString());
4359  }
4360 
4361  // If connecting the new block would require rewinding more than one block
4362  // from the active chain (i.e., a "deep reorg"), then mark the new block as
4363  // parked. If it has enough work then it will be automatically unparked
4364  // later, during FindMostWorkChain. We mark the block as parked at the very
4365  // last minute so we can make sure everything is ready to be reorged if
4366  // needed.
4367  if (gArgs.GetBoolArg("-parkdeepreorg", true)) {
4368  const CBlockIndex *pindexFork = m_chain.FindFork(pindex);
4369  if (pindexFork && pindexFork->nHeight + 1 < m_chain.Height()) {
4370  LogPrintf("Park block %s as it would cause a deep reorg.\n",
4371  pindex->GetBlockHash().ToString());
4372  pindex->nStatus = pindex->nStatus.withParked();
4373  setDirtyBlockIndex.insert(pindex);
4374  }
4375  }
4376 
4377  // Header is valid/has work and the merkle tree is good.
4378  // Relay now, but if it does not build on our best tip, let the
4379  // SendMessages loop relay it.
4380  if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev) {
4381  GetMainSignals().NewPoWValidBlock(pindex, pblock);
4382  }
4383 
4384  // Write block to history file
4385  if (fNewBlock) {
4386  *fNewBlock = true;
4387  }
4388  try {
4389  FlatFilePos blockPos =
4390  SaveBlockToDisk(block, pindex->nHeight, chainparams, dbp);
4391  if (blockPos.IsNull()) {
4392  state.Error(strprintf(
4393  "%s: Failed to find position to write new block to disk",
4394  __func__));
4395  return false;
4396  }
4397  ReceivedBlockTransactions(block, pindex, blockPos);
4398  } catch (const std::runtime_error &e) {
4399  return AbortNode(state, std::string("System error: ") + e.what());
4400  }
4401 
4402  FlushStateToDisk(chainparams, state, FlushStateMode::NONE);
4403 
4404  CheckBlockIndex(consensusParams);
4405 
4406  return true;
4407 }
4408 
4410  const Config &config, const std::shared_ptr<const CBlock> pblock,
4411  bool fForceProcessing, bool *fNewBlock) {
4412  AssertLockNotHeld(cs_main);
4413 
4414  {
4415  if (fNewBlock) {
4416  *fNewBlock = false;
4417  }
4418 
4419  BlockValidationState state;
4420 
4421  // CheckBlock() does not support multi-threaded block validation
4422  // because CBlock::fChecked can cause data race.
4423  // Therefore, the following critical section must include the
4424  // CheckBlock() call as well.
4425  LOCK(cs_main);
4426 
4427  // Ensure that CheckBlock() passes before calling AcceptBlock, as
4428  // belt-and-suspenders.
4429  bool ret =
4430  CheckBlock(*pblock, state, config.GetChainParams().GetConsensus(),
4431  BlockValidationOptions(config));
4432  if (ret) {
4433  // Store to disk
4435  config, pblock, state, fForceProcessing, nullptr, fNewBlock);
4436  }
4437 
4438  if (!ret) {
4439  GetMainSignals().BlockChecked(*pblock, state);
4440  return error("%s: AcceptBlock FAILED (%s)", __func__,
4441  state.ToString());
4442  }
4443  }
4444 
4445  NotifyHeaderTip();
4446 
4447  // Only used to report errors, not invalidity - ignore it
4448  BlockValidationState state;
4449  if (!::ChainstateActive().ActivateBestChain(config, state, pblock)) {
4450  return error("%s: ActivateBestChain failed (%s)", __func__,
4451  state.ToString());
4452  }
4453 
4454  return true;
4455 }
4456 
4458  const CBlock &block, CBlockIndex *pindexPrev,
4459  BlockValidationOptions validationOptions) {
4460  AssertLockHeld(cs_main);
4461  assert(pindexPrev && pindexPrev == ::ChainActive().Tip());
4462  CCoinsViewCache viewNew(&::ChainstateActive().CoinsTip());
4463  BlockHash block_hash(block.GetHash());
4464  CBlockIndex indexDummy(block);
4465  indexDummy.pprev = pindexPrev;
4466  indexDummy.nHeight = pindexPrev->nHeight + 1;
4467  indexDummy.phashBlock = &block_hash;
4468 
4469  // NOTE: CheckBlockHeader is called by CheckBlock
4470  if (!ContextualCheckBlockHeader(params, block, state, pindexPrev,
4471  GetAdjustedTime())) {
4472  return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__,
4473  state.ToString());
4474  }
4475 
4476  if (!CheckBlock(block, state, params.GetConsensus(), validationOptions)) {
4477  return error("%s: Consensus::CheckBlock: %s", __func__,
4478  state.ToString());
4479  }
4480 
4481  if (!ContextualCheckBlock(block, state, params.GetConsensus(),
4482  pindexPrev)) {
4483  return error("%s: Consensus::ContextualCheckBlock: %s", __func__,
4484  state.ToString());
4485  }
4486 
4487  if (!::ChainstateActive().ConnectBlock(block, state, &indexDummy, viewNew,
4488  params, validationOptions, true)) {
4489  return false;
4490  }
4491 
4492  assert(state.IsValid());
4493  return true;
4494 }
4495 
4504  LOCK(cs_LastBlockFile);
4505 
4506  uint64_t retval = 0;
4507  for (const CBlockFileInfo &file : vinfoBlockFile) {
4508  retval += file.nSize + file.nUndoSize;
4509  }
4510 
4511  return retval;
4512 }
4513 
4514 void ChainstateManager::PruneOneBlockFile(const int fileNumber) {
4515  AssertLockHeld(cs_main);
4516  LOCK(cs_LastBlockFile);
4517 
4518  for (const auto &entry : m_blockman.m_block_index) {
4519  CBlockIndex *pindex = entry.second;
4520  if (pindex->nFile == fileNumber) {
4521  pindex->nStatus = pindex->nStatus.withData(false).withUndo(false);
4522  pindex->nFile = 0;
4523  pindex->nDataPos = 0;
4524  pindex->nUndoPos = 0;
4525  setDirtyBlockIndex.insert(pindex);
4526 
4527  // Prune from m_blocks_unlinked -- any block we prune would have
4528  // to be downloaded again in order to consider its chain, at which
4529  // point it would be considered as a candidate for
4530  // m_blocks_unlinked or setBlockIndexCandidates.
4531  auto range =
4532  m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
4533  while (range.first != range.second) {
4534  std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it =
4535  range.first;
4536  range.first++;
4537  if (_it->second == pindex) {
4538  m_blockman.m_blocks_unlinked.erase(_it);
4539  }
4540  }
4541  }
4542  }
4543 
4544  vinfoBlockFile[fileNumber].SetNull();
4545  setDirtyFileInfo.insert(fileNumber);
4546 }
4547 
4548 void UnlinkPrunedFiles(const std::set<int> &setFilesToPrune) {
4549  for (const int i : setFilesToPrune) {
4550  FlatFilePos pos(i, 0);
4551  fs::remove(BlockFileSeq().FileName(pos));
4552  fs::remove(UndoFileSeq().FileName(pos));
4553  LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, i);
4554  }
4555 }
4556 
4562  std::set<int> &setFilesToPrune,
4563  int nManualPruneHeight) {
4564  assert(fPruneMode && nManualPruneHeight > 0);
4565 
4566  LOCK2(cs_main, cs_LastBlockFile);
4567  if (::ChainActive().Tip() == nullptr) {
4568  return;
4569  }
4570 
4571  // last block to prune is the lesser of (user-specified height,
4572  // MIN_BLOCKS_TO_KEEP from the tip)
4573  unsigned int nLastBlockWeCanPrune =
4574  std::min((unsigned)nManualPruneHeight,
4575  ::ChainActive().Tip()->nHeight - MIN_BLOCKS_TO_KEEP);
4576  int count = 0;
4577  for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
4578  if (vinfoBlockFile[fileNumber].nSize == 0 ||
4579  vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
4580  continue;
4581  }
4582  chainman.PruneOneBlockFile(fileNumber);
4583  setFilesToPrune.insert(fileNumber);
4584  count++;
4585  }
4586  LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n",
4587  nLastBlockWeCanPrune, count);
4588 }
4589 
4590 /* This function is called from the RPC code for pruneblockchain */
4591 void PruneBlockFilesManual(int nManualPruneHeight) {
4592  BlockValidationState state;
4593  const CChainParams &chainparams = Params();
4594  if (!::ChainstateActive().FlushStateToDisk(
4595  chainparams, state, FlushStateMode::NONE, nManualPruneHeight)) {
4596  LogPrintf("%s: failed to flush state (%s)\n", __func__,
4597  state.ToString());
4598  }
4599 }
4600 
4622 static void FindFilesToPrune(ChainstateManager &chainman,
4623  std::set<int> &setFilesToPrune,
4624  uint64_t nPruneAfterHeight) {
4625  LOCK2(cs_main, cs_LastBlockFile);
4626  if (::ChainActive().Tip() == nullptr || nPruneTarget == 0) {
4627  return;
4628  }
4629  if (uint64_t(::ChainActive().Tip()->nHeight) <= nPruneAfterHeight) {
4630  return;
4631  }
4632 
4633  unsigned int nLastBlockWeCanPrune =
4635  uint64_t nCurrentUsage = CalculateCurrentUsage();
4636  // We don't check to prune until after we've allocated new space for files,
4637  // so we should leave a buffer under our target to account for another
4638  // allocation before the next pruning.
4639  uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
4640  uint64_t nBytesToPrune;
4641  int count = 0;
4642 
4643  if (nCurrentUsage + nBuffer >= nPruneTarget) {
4644  // On a prune event, the chainstate DB is flushed.
4645  // To avoid excessive prune events negating the benefit of high dbcache
4646  // values, we should not prune too rapidly.
4647  // So when pruning in IBD, increase the buffer a bit to avoid a re-prune
4648  // too soon.
4650  // Since this is only relevant during IBD, we use a fixed 10%
4651  nBuffer += nPruneTarget / 10;
4652  }
4653 
4654  for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
4655  nBytesToPrune = vinfoBlockFile[fileNumber].nSize +
4656  vinfoBlockFile[fileNumber].nUndoSize;
4657 
4658  if (vinfoBlockFile[fileNumber].nSize == 0) {
4659  continue;
4660  }
4661 
4662  // are we below our target?
4663  if (nCurrentUsage + nBuffer < nPruneTarget) {
4664  break;
4665  }
4666 
4667  // don't prune files that could have a block within
4668  // MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
4669  if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
4670  continue;
4671  }
4672 
4673  chainman.PruneOneBlockFile(fileNumber);
4674  // Queue up the files for removal
4675  setFilesToPrune.insert(fileNumber);
4676  nCurrentUsage -= nBytesToPrune;
4677  count++;
4678  }
4679  }
4680 
4682  "Prune: target=%dMiB actual=%dMiB diff=%dMiB "
4683  "max_prune_height=%d removed %d blk/rev pairs\n",
4684  nPruneTarget / 1024 / 1024, nCurrentUsage / 1024 / 1024,
4685  ((int64_t)nPruneTarget - (int64_t)nCurrentUsage) / 1024 / 1024,
4686  nLastBlockWeCanPrune, count);
4687 }
4688 
4690  AssertLockHeld(cs_main);
4691 
4692  if (hash.IsNull()) {
4693  return nullptr;
4694  }
4695 
4696  // Return existing
4697  BlockMap::iterator mi = m_block_index.find(hash);
4698  if (mi != m_block_index.end()) {
4699  return (*mi).second;
4700  }
4701 
4702  // Create new
4703  CBlockIndex *pindexNew = new CBlockIndex();
4704  mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
4705  pindexNew->phashBlock = &((*mi).first);
4706 
4707  return pindexNew;
4708 }
4709 
4711  const Consensus::Params &params, CBlockTreeDB &blocktree,
4712  std::set<CBlockIndex *, CBlockIndexWorkComparator>
4713  &block_index_candidates) {
4714  AssertLockHeld(cs_main);
4715  if (!blocktree.LoadBlockIndexGuts(
4716  params, [this](const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(
4717  cs_main) { return this->InsertBlockIndex(hash); })) {
4718  return false;
4719  }
4720 
4721  // Calculate nChainWork
4722  std::vector<std::pair<int, CBlockIndex *>> vSortedByHeight;
4723  vSortedByHeight.reserve(m_block_index.size());
4724  for (const std::pair<const BlockHash, CBlockIndex *> &item :
4725  m_block_index) {
4726  CBlockIndex *pindex = item.second;
4727  vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
4728  }
4729 
4730  sort(vSortedByHeight.begin(), vSortedByHeight.end());
4731  for (const std::pair<int, CBlockIndex *> &item : vSortedByHeight) {
4732  if (ShutdownRequested()) {
4733  return false;
4734  }
4735  CBlockIndex *pindex = item.second;
4736  pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) +
4737  GetBlockProof(*pindex);
4738  pindex->nTimeMax =
4739  (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime)
4740  : pindex->nTime);
4741  // We can link the chain of blocks for which we've received transactions
4742  // at some point. Pruned nodes may have deleted the block.
4743  if (pindex->nTx > 0) {
4744  if (!pindex->UpdateChainStats() && pindex->pprev) {
4745  m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
4746  }
4747  }
4748 
4749  if (!pindex->nStatus.hasFailed() && pindex->pprev &&
4750  pindex->pprev->nStatus.hasFailed()) {
4751  pindex->nStatus = pindex->nStatus.withFailedParent();
4752  setDirtyBlockIndex.insert(pindex);
4753  }
4754  if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
4755  (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr)) {
4756  block_index_candidates.insert(pindex);
4757  }
4758 
4759  if (pindex->nStatus.isInvalid() &&
4760  (!pindexBestInvalid ||
4761  pindex->nChainWork > pindexBestInvalid->nChainWork)) {
4762  pindexBestInvalid = pindex;
4763  }
4764 
4765  if (pindex->nStatus.isOnParkedChain() &&
4766  (!pindexBestParked ||
4767  pindex->nChainWork > pindexBestParked->nChainWork)) {
4768  pindexBestParked = pindex;
4769  }
4770 
4771  if (pindex->pprev) {
4772  pindex->BuildSkip();
4773  }
4774 
4775  if (pindex->IsValid(BlockValidity::TREE) &&
4776  (pindexBestHeader == nullptr ||
4778  pindexBestHeader = pindex;
4779  }
4780  }
4781 
4782  return true;
4783 }
4784 
4786  m_failed_blocks.clear();
4787  m_blocks_unlinked.clear();
4788 
4789  for (const BlockMap::value_type &entry : m_block_index) {
4790  delete entry.second;
4791  }
4792 
4793  m_block_index.clear();
4794 }
4795 
4796 static bool LoadBlockIndexDB(ChainstateManager &chainman,
4797  const Consensus::Params &params)
4798  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
4799  if (!chainman.m_blockman.LoadBlockIndex(
4800  params, *pblocktree,
4802  return false;
4803  }
4804 
4805  // Load block file info
4806  pblocktree->ReadLastBlockFile(nLastBlockFile);
4807  vinfoBlockFile.resize(nLastBlockFile + 1);
4808  LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
4809  for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
4810  pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
4811  }
4812  LogPrintf("%s: last block file info: %s\n", __func__,
4813  vinfoBlockFile[nLastBlockFile].ToString());
4814  for (int nFile = nLastBlockFile + 1; true; nFile++) {
4815  CBlockFileInfo info;
4816  if (pblocktree->ReadBlockFileInfo(nFile, info)) {
4817  vinfoBlockFile.push_back(info);
4818  } else {
4819  break;
4820  }
4821  }
4822 
4823  // Check presence of blk files
4824  LogPrintf("Checking all blk files are present...\n");
4825  std::set<int> setBlkDataFiles;
4826  for (const std::pair<const BlockHash, CBlockIndex *> &item :
4827  chainman.BlockIndex()) {
4828  CBlockIndex *pindex = item.second;
4829  if (pindex->nStatus.hasData()) {
4830  setBlkDataFiles.insert(pindex->nFile);
4831  }
4832  }
4833 
4834  for (const int i : setBlkDataFiles) {
4835  FlatFilePos pos(i, 0);
4837  .IsNull()) {
4838  return false;
4839  }
4840  }
4841 
4842  // Check whether we have ever pruned block & undo files
4843  pblocktree->ReadFlag("prunedblockfiles", fHavePruned);
4844  if (fHavePruned) {
4845  LogPrintf(
4846  "LoadBlockIndexDB(): Block files have previously been pruned\n");
4847  }
4848 
4849  // Check whether we need to continue reindexing
4850  if (pblocktree->IsReindexing()) {
4851  fReindex = true;
4852  }
4853 
4854  return true;
4855 }
4856 
4857 bool CChainState::LoadChainTip(const CChainParams &chainparams) {
4858  AssertLockHeld(cs_main);
4859  const CCoinsViewCache &coins_cache = CoinsTip();
4860  // Never called when the coins view is empty
4861  assert(!coins_cache.GetBestBlock().IsNull());
4862  const CBlockIndex *tip = m_chain.Tip();
4863 
4864  if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
4865  return true;
4866  }
4867 
4868  // Load pointer to end of best chain
4869  CBlockIndex *pindex = LookupBlockIndex(coins_cache.GetBestBlock());
4870  if (!pindex) {
4871  return false;
4872  }
4873  m_chain.SetTip(pindex);
4875 
4876  tip = m_chain.Tip();
4877  LogPrintf(
4878  "Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
4879  tip->GetBlockHash().ToString(), m_chain.Height(),
4880  FormatISO8601DateTime(tip->GetBlockTime()),
4881  GuessVerificationProgress(chainparams.TxData(), tip));
4882  return true;
4883 }
4884 
4886  uiInterface.ShowProgress(_("Verifying blocks...").translated, 0, false);
4887 }
4888 
4890  uiInterface.ShowProgress("", 100, false);
4891 }
4892 
4893 bool CVerifyDB::VerifyDB(const Config &config, CCoinsView *coinsview,
4894  int nCheckLevel, int nCheckDepth) {
4895  LOCK(cs_main);
4896 
4897  const CChainParams &params = config.GetChainParams();
4898  const Consensus::Params &consensusParams = params.GetConsensus();
4899 
4900  if (::ChainActive().Tip() == nullptr ||
4901  ::ChainActive().Tip()->pprev == nullptr) {
4902  return true;
4903  }
4904 
4905  // Verify blocks in the best chain
4907  nCheckDepth = ::ChainActive().Height();
4908  }
4909 
4910  nCheckLevel = std::max(0, std::min(4, nCheckLevel));
4911  LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth,
4912  nCheckLevel);
4913 
4914  CCoinsViewCache coins(coinsview);
4915  CBlockIndex *pindex;
4916  CBlockIndex *pindexFailure = nullptr;
4917  int nGoodTransactions = 0;
4918  BlockValidationState state;
4919  int reportDone = 0;
4920  LogPrintfToBeContinued("[0%%]...");
4921  for (pindex = ::ChainActive().Tip(); pindex && pindex->pprev;
4922  pindex = pindex->pprev) {
4923  const int percentageDone =
4924  std::max(1, std::min(99, (int)(((double)(::ChainActive().Height() -
4925  pindex->nHeight)) /
4926  (double)nCheckDepth *
4927  (nCheckLevel >= 4 ? 50 : 100))));
4928  if (reportDone < percentageDone / 10) {
4929  // report every 10% step
4930  LogPrintfToBeContinued("[%d%%]...", percentageDone);
4931  reportDone = percentageDone / 10;
4932  }
4933 
4934  uiInterface.ShowProgress(_("Verifying blocks...").translated,
4935  percentageDone, false);
4936  if (pindex->nHeight <= ::ChainActive().Height() - nCheckDepth) {
4937  break;
4938  }
4939 
4940  if (fPruneMode && !pindex->nStatus.hasData()) {
4941  // If pruning, only go back as far as we have data.
4942  LogPrintf("VerifyDB(): block verification stopping at height %d "
4943  "(pruning, no data)\n",
4944  pindex->nHeight);
4945  break;
4946  }
4947 
4948  CBlock block;
4949 
4950  // check level 0: read from disk
4951  if (!ReadBlockFromDisk(block, pindex, consensusParams)) {
4952  return error(
4953  "VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s",
4954  pindex->nHeight, pindex->GetBlockHash().ToString());
4955  }
4956 
4957  // check level 1: verify block validity
4958  if (nCheckLevel >= 1 && !CheckBlock(block, state, consensusParams,
4959  BlockValidationOptions(config))) {
4960  return error("%s: *** found bad block at %d, hash=%s (%s)\n",
4961  __func__, pindex->nHeight,
4962  pindex->GetBlockHash().ToString(), state.ToString());
4963  }
4964 
4965  // check level 2: verify undo validity
4966  if (nCheckLevel >= 2 && pindex) {
4967  CBlockUndo undo;
4968  if (!pindex->GetUndoPos().IsNull()) {
4969  if (!UndoReadFromDisk(undo, pindex)) {
4970  return error(
4971  "VerifyDB(): *** found bad undo data at %d, hash=%s\n",
4972  pindex->nHeight, pindex->GetBlockHash().ToString());
4973  }
4974  }
4975  }
4976  // check level 3: check for inconsistencies during memory-only
4977  // disconnect of tip blocks
4978  if (nCheckLevel >= 3 &&
4979  (coins.DynamicMemoryUsage() +
4982  assert(coins.GetBestBlock() == pindex->GetBlockHash());
4983  DisconnectResult res =
4984  ::ChainstateActive().DisconnectBlock(block, pindex, coins);
4985  if (res == DisconnectResult::FAILED) {
4986  return error("VerifyDB(): *** irrecoverable inconsistency in "
4987  "block data at %d, hash=%s",
4988  pindex->nHeight,
4989  pindex->GetBlockHash().ToString());
4990  }
4991 
4992  if (res == DisconnectResult::UNCLEAN) {
4993  nGoodTransactions = 0;
4994  pindexFailure = pindex;
4995  } else {
4996  nGoodTransactions += block.vtx.size();
4997  }
4998  }
4999 
5000  if (ShutdownRequested()) {
5001  return true;
5002  }
5003  }
5004 
5005  if (pindexFailure) {
5006  return error("VerifyDB(): *** coin database inconsistencies found "
5007  "(last %i blocks, %i good transactions before that)\n",
5008  ::ChainActive().Height() - pindexFailure->nHeight + 1,
5009  nGoodTransactions);
5010  }
5011 
5012  // store block count as we move pindex at check level >= 4
5013  int block_count = ::ChainActive().Height() - pindex->nHeight;
5014 
5015  // check level 4: try reconnecting blocks
5016  if (nCheckLevel >= 4) {
5017  while (pindex != ::ChainActive().Tip()) {
5018  const int percentageDone = std::max(
5019  1, std::min(99, 100 - int(double(::ChainActive().Height() -
5020  pindex->nHeight) /
5021  double(nCheckDepth) * 50)));
5022  if (reportDone < percentageDone / 10) {
5023  // report every 10% step
5024  LogPrintfToBeContinued("[%d%%]...", percentageDone);
5025  reportDone = percentageDone / 10;
5026  }
5027  uiInterface.ShowProgress(_("Verifying blocks...").translated,
5028  percentageDone, false);
5029  pindex = ::ChainActive().Next(pindex);
5030  CBlock block;
5031  if (!ReadBlockFromDisk(block, pindex, consensusParams)) {
5032  return error(
5033  "VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s",
5034  pindex->nHeight, pindex->GetBlockHash().ToString());
5035  }
5036  if (!::ChainstateActive().ConnectBlock(
5037  block, state, pindex, coins, params,
5038  BlockValidationOptions(config))) {
5039  return error("VerifyDB(): *** found unconnectable block at %d, "
5040  "hash=%s (%s)",
5041  pindex->nHeight, pindex->GetBlockHash().ToString(),
5042  state.ToString());
5043  }
5044  if (ShutdownRequested()) {
5045  return true;
5046  }
5047  }
5048  }
5049 
5050  LogPrintf("[DONE].\n");
5051  LogPrintf("No coin database inconsistencies in last %i blocks (%i "
5052  "transactions)\n",
5053  block_count, nGoodTransactions);
5054 
5055  return true;
5056 }
5057 
5063  CCoinsViewCache &view,
5064  const Consensus::Params &params) {
5065  // TODO: merge with ConnectBlock
5066  CBlock block;
5067  if (!ReadBlockFromDisk(block, pindex, params)) {
5068  return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s",
5069  pindex->nHeight, pindex->GetBlockHash().ToString());
5070  }
5071 
5072  for (const CTransactionRef &tx : block.vtx) {
5073  // Pass check = true as every addition may be an overwrite.
5074  AddCoins(view, *tx, pindex->nHeight, true);
5075  }
5076 
5077  for (const CTransactionRef &tx : block.vtx) {
5078  if (tx->IsCoinBase()) {
5079  continue;
5080  }
5081 
5082  for (const CTxIn &txin : tx->vin) {
5083  view.SpendCoin(txin.prevout);
5084  }
5085  }
5086 
5087  return true;
5088 }
5089 
5091  LOCK(cs_main);
5092 
5093  CCoinsView &db = this->CoinsDB();
5094  CCoinsViewCache cache(&db);
5095 
5096  std::vector<BlockHash> hashHeads = db.GetHeadBlocks();
5097  if (hashHeads.empty()) {
5098  // We're already in a consistent state.
5099  return true;
5100  }
5101  if (hashHeads.size() != 2) {
5102  return error("ReplayBlocks(): unknown inconsistent state");
5103  }
5104 
5105  uiInterface.ShowProgress(_("Replaying blocks...").translated, 0, false);
5106  LogPrintf("Replaying blocks\n");
5107 
5108  // Old tip during the interrupted flush.
5109  const CBlockIndex *pindexOld = nullptr;
5110  // New tip during the interrupted flush.
5111  const CBlockIndex *pindexNew;
5112  // Latest block common to both the old and the new tip.
5113  const CBlockIndex *pindexFork = nullptr;
5114 
5115  if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
5116  return error(
5117  "ReplayBlocks(): reorganization to unknown block requested");
5118  }
5119 
5120  pindexNew = m_blockman.m_block_index[hashHeads[0]];
5121 
5122  if (!hashHeads[1].IsNull()) {
5123  // The old tip is allowed to be 0, indicating it's the first flush.
5124  if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
5125  return error(
5126  "ReplayBlocks(): reorganization from unknown block requested");
5127  }
5128 
5129  pindexOld = m_blockman.m_block_index[hashHeads[1]];
5130  pindexFork = LastCommonAncestor(pindexOld, pindexNew);
5131  assert(pindexFork != nullptr);
5132  }
5133 
5134  // Rollback along the old branch.
5135  while (pindexOld != pindexFork) {
5136  if (pindexOld->nHeight > 0) {
5137  // Never disconnect the genesis block.
5138  CBlock block;
5139  if (!ReadBlockFromDisk(block, pindexOld, params)) {
5140  return error("RollbackBlock(): ReadBlockFromDisk() failed at "
5141  "%d, hash=%s",
5142  pindexOld->nHeight,
5143  pindexOld->GetBlockHash().ToString());
5144  }
5145 
5146  LogPrintf("Rolling back %s (%i)\n",
5147  pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
5148  DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
5149  if (res == DisconnectResult::FAILED) {
5150  return error(
5151  "RollbackBlock(): DisconnectBlock failed at %d, hash=%s",
5152  pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
5153  }
5154 
5155  // If DisconnectResult::UNCLEAN is returned, it means a non-existing
5156  // UTXO was deleted, or an existing UTXO was overwritten. It
5157  // corresponds to cases where the block-to-be-disconnect never had
5158  // all its operations applied to the UTXO set. However, as both
5159  // writing a UTXO and deleting a UTXO are idempotent operations, the
5160  // result is still a version of the UTXO set with the effects of
5161  // that block undone.
5162  }
5163  pindexOld = pindexOld->pprev;
5164  }
5165 
5166  // Roll forward from the forking point to the new tip.
5167  int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
5168  for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight;
5169  ++nHeight) {
5170  const CBlockIndex *pindex = pindexNew->GetAncestor(nHeight);
5171  LogPrintf("Rolling forward %s (%i)\n",
5172  pindex->GetBlockHash().ToString(), nHeight);
5173  uiInterface.ShowProgress(_("Replaying blocks...").translated,
5174  (int)((nHeight - nForkHeight) * 100.0 /
5175  (pindexNew->nHeight - nForkHeight)),
5176  false);
5177  if (!RollforwardBlock(pindex, cache, params)) {
5178  return false;
5179  }
5180  }
5181 
5182  cache.SetBestBlock(pindexNew->GetBlockHash());
5183  cache.Flush();
5184  uiInterface.ShowProgress("", 100, false);
5185  return true;
5186 }
5187 
5188 // May NOT be used after any connections are up as much of the peer-processing
5189 // logic assumes a consistent block index state
5191  nBlockSequenceId = 1;
5192  setBlockIndexCandidates.clear();
5193 
5194  // Do not point to CBlockIndex that will be free'd
5195  m_finalizedBlockIndex = nullptr;
5196 }
5197 
5198 // May NOT be used after any connections are up as much
5199 // of the peer-processing logic assumes a consistent
5200 // block index state
5202  LOCK(cs_main);
5203  g_chainman.Unload();
5204  pindexBestInvalid = nullptr;
5205  pindexBestParked = nullptr;
5206  pindexBestHeader = nullptr;
5207  pindexBestForkTip = nullptr;
5208  pindexBestForkBase = nullptr;
5210  g_mempool.clear();
5211  vinfoBlockFile.clear();
5212  nLastBlockFile = 0;
5213  setDirtyBlockIndex.clear();
5214  setDirtyFileInfo.clear();
5215  fHavePruned = false;
5216 }
5217 
5219  AssertLockHeld(cs_main);
5220  // Load block index from databases
5221  bool needs_init = fReindex;
5222  if (!fReindex) {
5223  bool ret = LoadBlockIndexDB(*this, params);
5224  if (!ret) {
5225  return false;
5226  }
5227 
5228  needs_init = m_blockman.m_block_index.empty();
5229  }
5230 
5231  if (needs_init) {
5232  // Everything here is for *new* reindex/DBs. Thus, though
5233  // LoadBlockIndexDB may have set fReindex if we shut down
5234  // mid-reindex previously, we don't check fReindex and
5235  // instead only check it prior to LoadBlockIndexDB to set
5236  // needs_init.
5237 
5238  LogPrintf("Initializing databases...\n");
5239  }
5240  return true;
5241 }
5242 
5244  LOCK(cs_main);
5245 
5246  // Check whether we're already initialized by checking for genesis in
5247  // m_blockman.m_block_index. Note that we can't use m_chain here, since it
5248  // is set based on the coins db, not the block index db, which is the only
5249  // thing loaded at this point.
5250  if (m_blockman.m_block_index.count(chainparams.GenesisBlock().GetHash())) {
5251  return true;
5252  }
5253 
5254  try {
5255  const CBlock &block = chainparams.GenesisBlock();
5256  FlatFilePos blockPos = SaveBlockToDisk(block, 0, chainparams, nullptr);
5257  if (blockPos.IsNull()) {
5258  return error("%s: writing genesis block to disk failed", __func__);
5259  }
5260  CBlockIndex *pindex = m_blockman.AddToBlockIndex(block);
5261  ReceivedBlockTransactions(block, pindex, blockPos);
5262  } catch (const std::runtime_error &e) {
5263  return error("%s: failed to write genesis block: %s", __func__,
5264  e.what());
5265  }
5266 
5267  return true;
5268 }
5269 
5270 bool LoadGenesisBlock(const CChainParams &chainparams) {
5272 }
5273 
5274 void LoadExternalBlockFile(const Config &config, FILE *fileIn,
5275  FlatFilePos *dbp) {
5276  // Map of disk positions for blocks with unknown parent (only used for
5277  // reindex)
5278  static std::multimap<uint256, FlatFilePos> mapBlocksUnknownParent;
5279  int64_t nStart = GetTimeMillis();
5280 
5281  const CChainParams &chainparams = config.GetChainParams();
5282 
5283  int nLoaded = 0;
5284  try {
5285  // This takes over fileIn and calls fclose() on it in the CBufferedFile
5286  // destructor. Make sure we have at least 2*MAX_TX_SIZE space in there
5287  // so any transaction can fit in the buffer.
5288  CBufferedFile blkdat(fileIn, 2 * MAX_TX_SIZE, MAX_TX_SIZE + 8, SER_DISK,
5289  CLIENT_VERSION);
5290  uint64_t nRewind = blkdat.GetPos();
5291  while (!blkdat.eof()) {
5292  if (ShutdownRequested()) {
5293  return;
5294  }
5295 
5296  blkdat.SetPos(nRewind);
5297  // Start one byte further next time, in case of failure.
5298  nRewind++;
5299  // Remove former limit.
5300  blkdat.SetLimit();
5301  unsigned int nSize = 0;
5302  try {
5303  // Locate a header.
5305  blkdat.FindByte(chainparams.DiskMagic()[0]);
5306  nRewind = blkdat.GetPos() + 1;
5307  blkdat >> buf;
5308  if (memcmp(buf, chainparams.DiskMagic().data(),
5310  continue;
5311  }
5312 
5313  // Read size.
5314  blkdat >> nSize;
5315  if (nSize < 80) {
5316  continue;
5317  }
5318  } catch (const std::exception &) {
5319  // No valid block header found; don't complain.
5320  break;
5321  }
5322 
5323  try {
5324  // read block
5325  uint64_t nBlockPos = blkdat.GetPos();
5326  if (dbp) {
5327  dbp->nPos = nBlockPos;
5328  }
5329  blkdat.SetLimit(nBlockPos + nSize);
5330  blkdat.SetPos(nBlockPos);
5331  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
5332  CBlock &block = *pblock;
5333  blkdat >> block;
5334  nRewind = blkdat.GetPos();
5335 
5336  const BlockHash hash = block.GetHash();
5337  {
5338  LOCK(cs_main);
5339  // detect out of order blocks, and store them for later
5340  if (hash != chainparams.GetConsensus().hashGenesisBlock &&
5341  !LookupBlockIndex(block.hashPrevBlock)) {
5342  LogPrint(
5344  "%s: Out of order block %s, parent %s not known\n",
5345  __func__, hash.ToString(),
5346  block.hashPrevBlock.ToString());
5347  if (dbp) {
5348  mapBlocksUnknownParent.insert(
5349  std::make_pair(block.hashPrevBlock, *dbp));
5350  }
5351  continue;
5352  }
5353 
5354  // process in case the block isn't known yet
5355  CBlockIndex *pindex = LookupBlockIndex(hash);
5356  if (!pindex || !pindex->nStatus.hasData()) {
5357  BlockValidationState state;
5358  if (::ChainstateActive().AcceptBlock(
5359  config, pblock, state, true, dbp, nullptr)) {
5360  nLoaded++;
5361  }
5362  if (state.IsError()) {
5363  break;
5364  }
5365  } else if (hash != chainparams.GetConsensus()
5366  .hashGenesisBlock &&
5367  pindex->nHeight % 1000 == 0) {
5368  LogPrint(
5370  "Block Import: already had block %s at height %d\n",
5371  hash.ToString(), pindex->nHeight);
5372  }
5373  }
5374 
5375  // Activate the genesis block so normal node progress can
5376  // continue
5377  if (hash == chainparams.GetConsensus().hashGenesisBlock) {
5378  BlockValidationState state;
5379  if (!ActivateBestChain(config, state, nullptr)) {
5380  break;
5381  }
5382  }
5383 
5384  NotifyHeaderTip();
5385 
5386  // Recursively process earlier encountered successors of this
5387  // block
5388  std::deque<uint256> queue;
5389  queue.push_back(hash);
5390  while (!queue.empty()) {
5391  uint256 head = queue.front();
5392  queue.pop_front();
5393  std::pair<std::multimap<uint256, FlatFilePos>::iterator,
5394  std::multimap<uint256, FlatFilePos>::iterator>
5395  range = mapBlocksUnknownParent.equal_range(head);
5396  while (range.first != range.second) {
5397  std::multimap<uint256, FlatFilePos>::iterator it =
5398  range.first;
5399  std::shared_ptr<CBlock> pblockrecursive =
5400  std::make_shared<CBlock>();
5401  if (ReadBlockFromDisk(*pblockrecursive, it->second,
5402  chainparams.GetConsensus())) {
5403  LogPrint(
5405  "%s: Processing out of order child %s of %s\n",
5406  __func__, pblockrecursive->GetHash().ToString(),
5407  head.ToString());
5408  LOCK(cs_main);
5409  BlockValidationState dummy;
5410  if (::ChainstateActive().AcceptBlock(
5411  config, pblockrecursive, dummy, true,
5412  &it->second, nullptr)) {
5413  nLoaded++;
5414  queue.push_back(pblockrecursive->GetHash());
5415  }
5416  }
5417  range.first++;
5418  mapBlocksUnknownParent.erase(it);
5419  NotifyHeaderTip();
5420  }
5421  }
5422  } catch (const std::exception &e) {
5423  LogPrintf("%s: Deserialize or I/O error - %s\n", __func__,
5424  e.what());
5425  }
5426  }
5427  } catch (const std::runtime_error &e) {
5428  AbortNode(std::string("System error: ") + e.what());
5429  }
5430 
5431  LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded,
5432  GetTimeMillis() - nStart);
5433 }
5434 
5435 void CChainState::CheckBlockIndex(const Consensus::Params &consensusParams) {
5436  if (!fCheckBlockIndex) {
5437  return;
5438  }
5439 
5440  LOCK(cs_main);
5441 
5442  // During a reindex, we read the genesis block and call CheckBlockIndex
5443  // before ActivateBestChain, so we have the genesis block in
5444  // m_blockman.m_block_index but no active chain. (A few of the tests when
5445  // iterating the block tree require that m_chain has been initialized.)
5446  if (m_chain.Height() < 0) {
5447  assert(m_blockman.m_block_index.size() <= 1);
5448  return;
5449  }
5450 
5451  // Build forward-pointing map of the entire block tree.
5452  std::multimap<CBlockIndex *, CBlockIndex *> forward;
5453  for (const auto &entry : m_blockman.m_block_index) {
5454  forward.emplace(entry.second->pprev, entry.second);
5455  }
5456 
5457  assert(forward.size() == m_blockman.m_block_index.size());
5458 
5459  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
5460  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
5461  rangeGenesis = forward.equal_range(nullptr);
5462  CBlockIndex *pindex = rangeGenesis.first->second;
5463  rangeGenesis.first++;
5464  // There is only one index entry with parent nullptr.
5465  assert(rangeGenesis.first == rangeGenesis.second);
5466 
5467  // Iterate over the entire block tree, using depth-first search.
5468  // Along the way, remember whether there are blocks on the path from genesis
5469  // block being explored which are the first to have certain properties.
5470  size_t nNodes = 0;
5471  int nHeight = 0;
5472  // Oldest ancestor of pindex which is invalid.
5473  CBlockIndex *pindexFirstInvalid = nullptr;
5474  // Oldest ancestor of pindex which is parked.
5475  CBlockIndex *pindexFirstParked = nullptr;
5476  // Oldest ancestor of pindex which does not have data available.
5477  CBlockIndex *pindexFirstMissing = nullptr;
5478  // Oldest ancestor of pindex for which nTx == 0.
5479  CBlockIndex *pindexFirstNeverProcessed = nullptr;
5480  // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE
5481  // (regardless of being valid or not).
5482  CBlockIndex *pindexFirstNotTreeValid = nullptr;
5483  // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS
5484  // (regardless of being valid or not).
5485  CBlockIndex *pindexFirstNotTransactionsValid = nullptr;
5486  // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN
5487  // (regardless of being valid or not).
5488  CBlockIndex *pindexFirstNotChainValid = nullptr;
5489  // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS
5490  // (regardless of being valid or not).
5491  CBlockIndex *pindexFirstNotScriptsValid = nullptr;
5492  while (pindex != nullptr) {
5493  nNodes++;
5494  if (pindexFirstInvalid == nullptr && pindex->nStatus.hasFailed()) {
5495  pindexFirstInvalid = pindex;
5496  }
5497  if (pindexFirstParked == nullptr && pindex->nStatus.isParked()) {
5498  pindexFirstParked = pindex;
5499  }
5500  if (pindexFirstMissing == nullptr && !pindex->nStatus.hasData()) {
5501  pindexFirstMissing = pindex;
5502  }
5503  if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) {
5504  pindexFirstNeverProcessed = pindex;
5505  }
5506  if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr &&
5507  pindex->nStatus.getValidity() < BlockValidity::TREE) {
5508  pindexFirstNotTreeValid = pindex;
5509  }
5510  if (pindex->pprev != nullptr &&
5511  pindexFirstNotTransactionsValid == nullptr &&
5513  pindexFirstNotTransactionsValid = pindex;
5514  }
5515  if (pindex->pprev != nullptr && pindexFirstNotChainValid == nullptr &&
5517  pindexFirstNotChainValid = pindex;
5518  }
5519  if (pindex->pprev != nullptr && pindexFirstNotScriptsValid == nullptr &&
5521  pindexFirstNotScriptsValid = pindex;
5522  }
5523 
5524  // Begin: actual consistency checks.
5525  if (pindex->pprev == nullptr) {
5526  // Genesis block checks.
5527  // Genesis block's hash must match.
5528  assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock);
5529  // The current active chain's genesis block must be this block.
5530  assert(pindex == m_chain.Genesis());
5531  }
5532  if (!pindex->HaveTxsDownloaded()) {
5533  // nSequenceId can't be set positive for blocks that aren't linked
5534  // (negative is used for preciousblock)
5535  assert(pindex->nSequenceId <= 0);
5536  }
5537  // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or
5538  // not pruning has occurred). HAVE_DATA is only equivalent to nTx > 0
5539  // (or VALID_TRANSACTIONS) if no pruning has occurred.
5540  if (!fHavePruned) {
5541  // If we've never pruned, then HAVE_DATA should be equivalent to nTx
5542  // > 0
5543  assert(pindex->nStatus.hasData() == (pindex->nTx > 0));
5544  assert(pindexFirstMissing == pindexFirstNeverProcessed);
5545  } else if (pindex->nStatus.hasData()) {
5546  // If we have pruned, then we can only say that HAVE_DATA implies
5547  // nTx > 0
5548  assert(pindex->nTx > 0);
5549  }
5550  if (pindex->nStatus.hasUndo()) {
5551  assert(pindex->nStatus.hasData());
5552  }
5553  // This is pruning-independent.
5554  assert((pindex->nStatus.getValidity() >= BlockValidity::TRANSACTIONS) ==
5555  (pindex->nTx > 0));
5556  // All parents having had data (at some point) is equivalent to all
5557  // parents being VALID_TRANSACTIONS, which is equivalent to
5558  // HaveTxsDownloaded(). All parents having had data (at some point) is
5559  // equivalent to all parents being VALID_TRANSACTIONS, which is
5560  // equivalent to HaveTxsDownloaded().
5561  assert((pindexFirstNeverProcessed == nullptr) ==
5562  (pindex->HaveTxsDownloaded()));
5563  assert((pindexFirstNotTransactionsValid == nullptr) ==
5564  (pindex->HaveTxsDownloaded()));
5565  // nHeight must be consistent.
5566  assert(pindex->nHeight == nHeight);
5567  // For every block except the genesis block, the chainwork must be
5568  // larger than the parent's.
5569  assert(pindex->pprev == nullptr ||
5570  pindex->nChainWork >= pindex->pprev->nChainWork);
5571  // The pskip pointer must point back for all but the first 2 blocks.
5572  assert(nHeight < 2 ||
5573  (pindex->pskip && (pindex->pskip->nHeight < nHeight)));
5574  // All m_blockman.m_block_index entries must at least be TREE valid
5575  assert(pindexFirstNotTreeValid == nullptr);
5576  if (pindex->nStatus.getValidity() >= BlockValidity::TREE) {
5577  // TREE valid implies all parents are TREE valid
5578  assert(pindexFirstNotTreeValid == nullptr);
5579  }
5580  if (pindex->nStatus.getValidity() >= BlockValidity::CHAIN) {
5581  // CHAIN valid implies all parents are CHAIN valid
5582  assert(pindexFirstNotChainValid == nullptr);
5583  }
5584  if (pindex->nStatus.getValidity() >= BlockValidity::SCRIPTS) {
5585  // SCRIPTS valid implies all parents are SCRIPTS valid
5586  assert(pindexFirstNotScriptsValid == nullptr);
5587  }
5588  if (pindexFirstInvalid == nullptr) {
5589  // Checks for not-invalid blocks.
5590  // The failed mask cannot be set for blocks without invalid parents.
5591  assert(!pindex->nStatus.isInvalid());
5592  }
5593  if (pindexFirstParked == nullptr) {
5594  // Checks for not-parked blocks.
5595  // The parked mask cannot be set for blocks without parked parents.
5596  // (i.e., hasParkedParent only if an ancestor is properly parked).
5597  assert(!pindex->nStatus.isOnParkedChain());
5598  }
5599  if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) &&
5600  pindexFirstNeverProcessed == nullptr) {
5601  if (pindexFirstInvalid == nullptr) {
5602  // If this block sorts at least as good as the current tip and
5603  // is valid and we have all data for its parents, it must be in
5604  // setBlockIndexCandidates or be parked.
5605  if (pindexFirstMissing == nullptr) {
5606  assert(pindex->nStatus.isOnParkedChain() ||
5607  setBlockIndexCandidates.count(pindex));
5608  }
5609  // m_chain.Tip() must also be there even if some data has
5610  // been pruned.
5611  if (pindex == m_chain.Tip()) {
5612  assert(setBlockIndexCandidates.count(pindex));
5613  }
5614  // If some parent is missing, then it could be that this block
5615  // was in setBlockIndexCandidates but had to be removed because
5616  // of the missing data. In this case it must be in
5617  // m_blocks_unlinked -- see test below.
5618  }
5619  } else {
5620  // If this block sorts worse than the current tip or some ancestor's
5621  // block has never been seen, it cannot be in
5622  // setBlockIndexCandidates.
5623  assert(setBlockIndexCandidates.count(pindex) == 0);
5624  }
5625  // Check whether this block is in m_blocks_unlinked.
5626  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
5627  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
5628  rangeUnlinked =
5629  m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
5630  bool foundInUnlinked = false;
5631  while (rangeUnlinked.first != rangeUnlinked.second) {
5632  assert(rangeUnlinked.first->first == pindex->pprev);
5633  if (rangeUnlinked.first->second == pindex) {
5634  foundInUnlinked = true;
5635  break;
5636  }
5637  rangeUnlinked.first++;
5638  }
5639  if (pindex->pprev && pindex->nStatus.hasData() &&
5640  pindexFirstNeverProcessed != nullptr &&
5641  pindexFirstInvalid == nullptr) {
5642  // If this block has block data available, some parent was never
5643  // received, and has no invalid parents, it must be in
5644  // m_blocks_unlinked.
5645  assert(foundInUnlinked);
5646  }
5647  if (!pindex->nStatus.hasData()) {
5648  // Can't be in m_blocks_unlinked if we don't HAVE_DATA
5649  assert(!foundInUnlinked);
5650  }
5651  if (pindexFirstMissing == nullptr) {
5652  // We aren't missing data for any parent -- cannot be in
5653  // m_blocks_unlinked.
5654  assert(!foundInUnlinked);
5655  }
5656  if (pindex->pprev && pindex->nStatus.hasData() &&
5657  pindexFirstNeverProcessed == nullptr &&
5658  pindexFirstMissing != nullptr) {
5659  // We HAVE_DATA for this block, have received data for all parents
5660  // at some point, but we're currently missing data for some parent.
5661  // We must have pruned.
5662  assert(fHavePruned);
5663  // This block may have entered m_blocks_unlinked if:
5664  // - it has a descendant that at some point had more work than the
5665  // tip, and
5666  // - we tried switching to that descendant but were missing
5667  // data for some intermediate block between m_chain and the
5668  // tip.
5669  // So if this block is itself better than m_chain.Tip() and it
5670  // wasn't in
5671  // setBlockIndexCandidates, then it must be in m_blocks_unlinked.
5672  if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) &&
5673  setBlockIndexCandidates.count(pindex) == 0) {
5674  if (pindexFirstInvalid == nullptr) {
5675  assert(foundInUnlinked);
5676  }
5677  }
5678  }
5679  // Perhaps too slow
5680  // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash());
5681  // End: actual consistency checks.
5682 
5683  // Try descending into the first subnode.
5684  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
5685  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
5686  range = forward.equal_range(pindex);
5687  if (range.first != range.second) {
5688  // A subnode was found.
5689  pindex = range.first->second;
5690  nHeight++;
5691  continue;
5692  }
5693  // This is a leaf node. Move upwards until we reach a node of which we
5694  // have not yet visited the last child.
5695  while (pindex) {
5696  // We are going to either move to a parent or a sibling of pindex.
5697  // If pindex was the first with a certain property, unset the
5698  // corresponding variable.
5699  if (pindex == pindexFirstInvalid) {
5700  pindexFirstInvalid = nullptr;
5701  }
5702  if (pindex == pindexFirstParked) {
5703  pindexFirstParked = nullptr;
5704  }
5705  if (pindex == pindexFirstMissing) {
5706  pindexFirstMissing = nullptr;
5707  }
5708  if (pindex == pindexFirstNeverProcessed) {
5709  pindexFirstNeverProcessed = nullptr;
5710  }
5711  if (pindex == pindexFirstNotTreeValid) {
5712  pindexFirstNotTreeValid = nullptr;
5713  }
5714  if (pindex == pindexFirstNotTransactionsValid) {
5715  pindexFirstNotTransactionsValid = nullptr;
5716  }
5717  if (pindex == pindexFirstNotChainValid) {
5718  pindexFirstNotChainValid = nullptr;
5719  }
5720  if (pindex == pindexFirstNotScriptsValid) {
5721  pindexFirstNotScriptsValid = nullptr;
5722  }
5723  // Find our parent.
5724  CBlockIndex *pindexPar = pindex->pprev;
5725  // Find which child we just visited.
5726  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
5727  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
5728  rangePar = forward.equal_range(pindexPar);
5729  while (rangePar.first->second != pindex) {
5730  // Our parent must have at least the node we're coming from as
5731  // child.
5732  assert(rangePar.first != rangePar.second);
5733  rangePar.first++;
5734  }
5735  // Proceed to the next one.
5736  rangePar.first++;
5737  if (rangePar.first != rangePar.second) {
5738  // Move to the sibling.
5739  pindex = rangePar.first->second;
5740  break;
5741  } else {
5742  // Move up further.
5743  pindex = pindexPar;
5744  nHeight--;
5745  continue;
5746  }
5747  }
5748  }
5749 
5750  // Check that we actually traversed the entire map.
5751  assert(nNodes == forward.size());
5752 }
5753 
5754 std::string CChainState::ToString() {
5755  CBlockIndex *tip = m_chain.Tip();
5756  return strprintf("Chainstate [%s] @ height %d (%s)",
5757  m_from_snapshot_blockhash.IsNull() ? "ibd" : "snapshot",
5758  tip ? tip->