Bitcoin ABC  0.22.13
P2P Digital Currency
validation.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2018 The Bitcoin Core developers
3 // Copyright (c) 2017-2020 The Bitcoin developers
4 // Distributed under the MIT software license, see the accompanying
5 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
6 
7 #include <validation.h>
8 
9 #include <arith_uint256.h>
10 #include <avalanche/processor.h>
11 #include <blockdb.h>
12 #include <blockvalidity.h>
13 #include <chainparams.h>
14 #include <checkpoints.h>
15 #include <checkqueue.h>
16 #include <config.h>
17 #include <consensus/activation.h>
18 #include <consensus/merkle.h>
19 #include <consensus/tx_check.h>
20 #include <consensus/tx_verify.h>
21 #include <consensus/validation.h>
22 #include <hash.h>
23 #include <index/txindex.h>
24 #include <logging.h>
25 #include <logging/timer.h>
26 #include <minerfund.h>
27 #include <node/ui_interface.h>
28 #include <policy/fees.h>
29 #include <policy/mempool.h>
30 #include <policy/policy.h>
31 #include <policy/settings.h>
32 #include <pow/aserti32d.h> // For ResetASERTAnchorBlockCache
33 #include <pow/pow.h>
34 #include <primitives/block.h>
35 #include <primitives/transaction.h>
36 #include <random.h>
37 #include <reverse_iterator.h>
38 #include <script/script.h>
39 #include <script/scriptcache.h>
40 #include <script/sigcache.h>
41 #include <shutdown.h>
42 #include <timedata.h>
43 #include <tinyformat.h>
44 #include <txdb.h>
45 #include <txmempool.h>
46 #include <undo.h>
47 #include <util/check.h> // For NDEBUG compile time check
48 #include <util/moneystr.h>
49 #include <util/strencodings.h>
50 #include <util/system.h>
51 #include <util/translation.h>
52 #include <validationinterface.h>
53 #include <warnings.h>
54 
55 #include <boost/algorithm/string/replace.hpp>
56 #include <boost/thread.hpp> // boost::this_thread::interruption_point() (mingw)
57 
58 #include <optional>
59 #include <string>
60 #include <thread>
61 
62 #define MICRO 0.000001
63 #define MILLI 0.001
64 
66 static const unsigned int DATABASE_WRITE_INTERVAL = 60 * 60;
68 static const unsigned int DATABASE_FLUSH_INTERVAL = 24 * 60 * 60;
69 
71 
73  LOCK(::cs_main);
74  assert(g_chainman.m_active_chainstate);
75  return *g_chainman.m_active_chainstate;
76 }
77 
79  LOCK(::cs_main);
81 }
82 
96 
99 std::condition_variable g_best_block_cv;
101 std::atomic_bool fImporting(false);
102 std::atomic_bool fReindex(false);
103 bool fHavePruned = false;
104 bool fPruneMode = false;
105 bool fRequireStandard = true;
106 bool fCheckBlockIndex = false;
108 size_t nCoinCacheUsage = 5000 * 300;
109 uint64_t nPruneTarget = 0;
111 
114 
116 
118 
119 // Internal stuff
120 namespace {
121 CBlockIndex *pindexBestInvalid = nullptr;
122 CBlockIndex *pindexBestParked = nullptr;
123 
124 RecursiveMutex cs_LastBlockFile;
125 std::vector<CBlockFileInfo> vinfoBlockFile;
126 int nLastBlockFile = 0;
132 bool fCheckForPruning = false;
133 
135 std::set<const CBlockIndex *> setDirtyBlockIndex;
136 
138 std::set<int> setDirtyFileInfo;
139 } // namespace
140 
142  : excessiveBlockSize(config.GetMaxBlockSize()), checkPoW(true),
143  checkMerkleRoot(true) {}
144 
146  AssertLockHeld(cs_main);
147  BlockMap::const_iterator it = g_chainman.BlockIndex().find(hash);
148  return it == g_chainman.BlockIndex().end() ? nullptr : it->second;
149 }
150 
152  const CBlockLocator &locator) {
153  AssertLockHeld(cs_main);
154 
155  // Find the latest block common to locator and chain - we expect that
156  // locator.vHave is sorted descending by height.
157  for (const BlockHash &hash : locator.vHave) {
158  CBlockIndex *pindex = LookupBlockIndex(hash);
159  if (pindex) {
160  if (chain.Contains(pindex)) {
161  return pindex;
162  }
163  if (pindex->GetAncestor(chain.Height()) == chain.Tip()) {
164  return chain.Tip();
165  }
166  }
167  }
168  return chain.Genesis();
169 }
170 
171 std::unique_ptr<CBlockTreeDB> pblocktree;
172 
173 // See definition for documentation
174 static void FindFilesToPruneManual(ChainstateManager &chainman,
175  std::set<int> &setFilesToPrune,
176  int nManualPruneHeight);
177 static void FindFilesToPrune(ChainstateManager &chainman,
178  std::set<int> &setFilesToPrune,
179  uint64_t nPruneAfterHeight);
180 static uint32_t GetNextBlockScriptFlags(const Consensus::Params &params,
181  const CBlockIndex *pindex);
182 
184  AssertLockHeld(cs_main);
185  assert(lp);
186  // If there are relative lock times then the maxInputBlock will be set
187  // If there are no relative lock times, the LockPoints don't depend on the
188  // chain
189  if (lp->maxInputBlock) {
190  // Check whether ::ChainActive() is an extension of the block at which
191  // the LockPoints calculation was valid. If not LockPoints are no longer
192  // valid.
193  if (!::ChainActive().Contains(lp->maxInputBlock)) {
194  return false;
195  }
196  }
197 
198  // LockPoints still valid
199  return true;
200 }
201 
202 bool CheckSequenceLocks(const CTxMemPool &pool, const CTransaction &tx,
203  int flags, LockPoints *lp, bool useExistingLockPoints) {
204  AssertLockHeld(cs_main);
205  AssertLockHeld(pool.cs);
206 
207  CBlockIndex *tip = ::ChainActive().Tip();
208  assert(tip != nullptr);
209 
210  CBlockIndex index;
211  index.pprev = tip;
212  // CheckSequenceLocks() uses ::ChainActive().Height()+1 to evaluate height
213  // based locks because when SequenceLocks() is called within ConnectBlock(),
214  // the height of the block *being* evaluated is what is used. Thus if we
215  // want to know if a transaction can be part of the *next* block, we need to
216  // use one more than ::ChainActive().Height()
217  index.nHeight = tip->nHeight + 1;
218 
219  std::pair<int, int64_t> lockPair;
220  if (useExistingLockPoints) {
221  assert(lp);
222  lockPair.first = lp->height;
223  lockPair.second = lp->time;
224  } else {
225  // CoinsTip() contains the UTXO set for ::ChainActive().Tip()
226  CCoinsViewMemPool viewMemPool(&::ChainstateActive().CoinsTip(), pool);
227  std::vector<int> prevheights;
228  prevheights.resize(tx.vin.size());
229  for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
230  const CTxIn &txin = tx.vin[txinIndex];
231  Coin coin;
232  if (!viewMemPool.GetCoin(txin.prevout, coin)) {
233  return error("%s: Missing input", __func__);
234  }
235  if (coin.GetHeight() == MEMPOOL_HEIGHT) {
236  // Assume all mempool transaction confirm in the next block
237  prevheights[txinIndex] = tip->nHeight + 1;
238  } else {
239  prevheights[txinIndex] = coin.GetHeight();
240  }
241  }
242  lockPair = CalculateSequenceLocks(tx, flags, prevheights, index);
243  if (lp) {
244  lp->height = lockPair.first;
245  lp->time = lockPair.second;
246  // Also store the hash of the block with the highest height of all
247  // the blocks which have sequence locked prevouts. This hash needs
248  // to still be on the chain for these LockPoint calculations to be
249  // valid.
250  // Note: It is impossible to correctly calculate a maxInputBlock if
251  // any of the sequence locked inputs depend on unconfirmed txs,
252  // except in the special case where the relative lock time/height is
253  // 0, which is equivalent to no sequence lock. Since we assume input
254  // height of tip+1 for mempool txs and test the resulting lockPair
255  // from CalculateSequenceLocks against tip+1. We know
256  // EvaluateSequenceLocks will fail if there was a non-zero sequence
257  // lock on a mempool input, so we can use the return value of
258  // CheckSequenceLocks to indicate the LockPoints validity.
259  int maxInputHeight = 0;
260  for (const int height : prevheights) {
261  // Can ignore mempool inputs since we'll fail if they had
262  // non-zero locks.
263  if (height != tip->nHeight + 1) {
264  maxInputHeight = std::max(maxInputHeight, height);
265  }
266  }
267  lp->maxInputBlock = tip->GetAncestor(maxInputHeight);
268  }
269  }
270  return EvaluateSequenceLocks(index, lockPair);
271 }
272 
273 // Command-line argument "-replayprotectionactivationtime=<timestamp>" will
274 // cause the node to switch to replay protected SigHash ForkID value when the
275 // median timestamp of the previous 11 blocks is greater than or equal to
276 // <timestamp>. Defaults to the pre-defined timestamp when not set.
278  int64_t nMedianTimePast) {
279  return nMedianTimePast >= gArgs.GetArg("-replayprotectionactivationtime",
280  params.tachyonActivationTime);
281 }
282 
284  const CBlockIndex *pindexPrev) {
285  if (pindexPrev == nullptr) {
286  return false;
287  }
288 
289  return IsReplayProtectionEnabled(params, pindexPrev->GetMedianTimePast());
290 }
291 
292 // Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool
293 // were somehow broken and returning the wrong scriptPubKeys
295  const CTransaction &tx, TxValidationState &state,
296  const CCoinsViewCache &view, const CTxMemPool &pool, const uint32_t flags,
297  PrecomputedTransactionData &txdata, int &nSigChecksOut)
298  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
299  AssertLockHeld(cs_main);
300 
301  // pool.cs should be locked already, but go ahead and re-take the lock here
302  // to enforce that mempool doesn't change between when we check the view and
303  // when we actually call through to CheckInputScripts
304  LOCK(pool.cs);
305 
306  assert(!tx.IsCoinBase());
307  for (const CTxIn &txin : tx.vin) {
308  const Coin &coin = view.AccessCoin(txin.prevout);
309 
310  // AcceptToMemoryPoolWorker has already checked that the coins are
311  // available, so this shouldn't fail. If the inputs are not available
312  // here then return false.
313  if (coin.IsSpent()) {
314  return false;
315  }
316 
317  // Check equivalence for available inputs.
318  const CTransactionRef &txFrom = pool.get(txin.prevout.GetTxId());
319  if (txFrom) {
320  assert(txFrom->GetId() == txin.prevout.GetTxId());
321  assert(txFrom->vout.size() > txin.prevout.GetN());
322  assert(txFrom->vout[txin.prevout.GetN()] == coin.GetTxOut());
323  } else {
324  const Coin &coinFromDisk =
326  assert(!coinFromDisk.IsSpent());
327  assert(coinFromDisk.GetTxOut() == coin.GetTxOut());
328  }
329  }
330 
331  // Call CheckInputScripts() to cache signature and script validity against
332  // current tip consensus rules.
333  return CheckInputScripts(tx, state, view, flags, /* cacheSigStore = */ true,
334  /* cacheFullScriptStore = */ true, txdata,
335  nSigChecksOut);
336 }
337 
338 namespace {
339 
340 class MemPoolAccept {
341 public:
342  MemPoolAccept(CTxMemPool &mempool)
343  : m_pool(mempool), m_view(&m_dummy),
344  m_viewmempool(&::ChainstateActive().CoinsTip(), m_pool),
345  m_limit_ancestors(
346  gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)),
347  m_limit_ancestor_size(
348  gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) *
349  1000),
350  m_limit_descendants(
351  gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)),
352  m_limit_descendant_size(gArgs.GetArg("-limitdescendantsize",
354  1000) {}
355 
356  // We put the arguments we're handed into a struct, so we can pass them
357  // around easier.
358  struct ATMPArgs {
359  const Config &m_config;
360  TxValidationState &m_state;
361  const int64_t m_accept_time;
362  const bool m_bypass_limits;
363  const Amount &m_absurd_fee;
364  /*
365  * Return any outpoints which were not previously present in the coins
366  * cache, but were added as a result of validating the tx for mempool
367  * acceptance. This allows the caller to optionally remove the cache
368  * additions if the associated transaction ends up being rejected by
369  * the mempool.
370  */
371  std::vector<COutPoint> &m_coins_to_uncache;
372  const bool m_test_accept;
373  };
374 
375  // Single transaction acceptance
376  bool AcceptSingleTransaction(const CTransactionRef &ptx, ATMPArgs &args)
377  EXCLUSIVE_LOCKS_REQUIRED(cs_main);
378 
379 private:
380  // All the intermediate state that gets passed between the various levels
381  // of checking a given transaction.
382  struct Workspace {
383  Workspace(const CTransactionRef &ptx,
384  const uint32_t next_block_script_verify_flags)
385  : m_ptx(ptx),
386  m_next_block_script_verify_flags(next_block_script_verify_flags) {
387  }
388  CTxMemPool::setEntries m_ancestors;
389  std::unique_ptr<CTxMemPoolEntry> m_entry;
390 
391  Amount m_modified_fees;
392 
393  const CTransactionRef &m_ptx;
394 
395  // ABC specific flags that are used in both PreChecks and
396  // ConsensusScriptChecks
397  const uint32_t m_next_block_script_verify_flags;
398  int m_sig_checks_standard;
399  };
400 
401  // Run the policy checks on a given transaction, excluding any script
402  // checks. Looks up inputs, calculates feerate, considers replacement,
403  // evaluates package limits, etc. As this function can be invoked for "free"
404  // by a peer, only tests that are fast should be done here (to avoid CPU
405  // DoS).
406  bool PreChecks(ATMPArgs &args, Workspace &ws)
407  EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
408 
409  // Re-run the script checks, using consensus flags, and try to cache the
410  // result in the scriptcache. This should be done after
411  // PolicyScriptChecks(). This requires that all inputs either be in our
412  // utxo set or in the mempool.
413  bool ConsensusScriptChecks(ATMPArgs &args, Workspace &ws,
415  EXCLUSIVE_LOCKS_REQUIRED(cs_main);
416 
417  // Try to add the transaction to the mempool, removing any conflicts first.
418  // Returns true if the transaction is in the mempool after any size
419  // limiting is performed, false otherwise.
420  bool Finalize(ATMPArgs &args, Workspace &ws)
421  EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
422 
423 private:
424  CTxMemPool &m_pool;
425  CCoinsViewCache m_view;
426  CCoinsViewMemPool m_viewmempool;
427  CCoinsView m_dummy;
428 
429  // The package limits in effect at the time of invocation.
430  const size_t m_limit_ancestors;
431  const size_t m_limit_ancestor_size;
432  // These may be modified while evaluating a transaction (eg to account for
433  // in-mempool conflicts; see below).
434  size_t m_limit_descendants;
435  size_t m_limit_descendant_size;
436 };
437 
438 bool MemPoolAccept::PreChecks(ATMPArgs &args, Workspace &ws) {
439  const CTransactionRef &ptx = ws.m_ptx;
440  const CTransaction &tx = *ws.m_ptx;
441  const TxId &txid = ws.m_ptx->GetId();
442 
443  // Copy/alias what we need out of args
444  TxValidationState &state = args.m_state;
445  const int64_t nAcceptTime = args.m_accept_time;
446  const bool bypass_limits = args.m_bypass_limits;
447  const Amount &nAbsurdFee = args.m_absurd_fee;
448  std::vector<COutPoint> &coins_to_uncache = args.m_coins_to_uncache;
449 
450  // Alias what we need out of ws
451  CTxMemPool::setEntries &setAncestors = ws.m_ancestors;
452  std::unique_ptr<CTxMemPoolEntry> &entry = ws.m_entry;
453  Amount &nModifiedFees = ws.m_modified_fees;
454 
455  // Coinbase is only valid in a block, not as a loose transaction.
456  if (!CheckRegularTransaction(tx, state)) {
457  // state filled in by CheckRegularTransaction.
458  return false;
459  }
460 
461  // Rather not work on nonstandard transactions (unless -testnet)
462  std::string reason;
463  if (fRequireStandard && !IsStandardTx(tx, reason)) {
464  return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
465  }
466 
467  // Only accept nLockTime-using transactions that can be mined in the next
468  // block; we don't want our mempool filled up with transactions that can't
469  // be mined yet.
470  TxValidationState ctxState;
472  args.m_config.GetChainParams().GetConsensus(), tx, ctxState,
474  // We copy the state from a dummy to ensure we don't increase the
475  // ban score of peer for transaction that could be valid in the future.
477  ctxState.GetRejectReason(),
478  ctxState.GetDebugMessage());
479  }
480 
481  // Is it already in the memory pool?
482  if (m_pool.exists(txid)) {
484  "txn-already-in-mempool");
485  }
486 
487  // Check for conflicts with in-memory transactions
488  for (const CTxIn &txin : tx.vin) {
489  auto itConflicting = m_pool.mapNextTx.find(txin.prevout);
490  if (itConflicting != m_pool.mapNextTx.end()) {
491  // Disable replacement feature for good
493  "txn-mempool-conflict");
494  }
495  }
496 
497  LockPoints lp;
498  m_view.SetBackend(m_viewmempool);
499 
500  CCoinsViewCache &coins_cache = ::ChainstateActive().CoinsTip();
501  // Do all inputs exist?
502  for (const CTxIn &txin : tx.vin) {
503  if (!coins_cache.HaveCoinInCache(txin.prevout)) {
504  coins_to_uncache.push_back(txin.prevout);
505  }
506 
507  // Note: this call may add txin.prevout to the coins cache
508  // (coins_cache.cacheCoins) by way of FetchCoin(). It should be
509  // removed later (via coins_to_uncache) if this tx turns out to be
510  // invalid.
511  if (!m_view.HaveCoin(txin.prevout)) {
512  // Are inputs missing because we already have the tx?
513  for (size_t out = 0; out < tx.vout.size(); out++) {
514  // Optimistically just do efficient check of cache for
515  // outputs.
516  if (coins_cache.HaveCoinInCache(COutPoint(txid, out))) {
518  "txn-already-known");
519  }
520  }
521 
522  // Otherwise assume this might be an orphan tx for which we just
523  // haven't seen parents yet.
525  "bad-txns-inputs-missingorspent");
526  }
527  }
528 
529  // Are the actual inputs available?
530  if (!m_view.HaveInputs(tx)) {
532  "bad-txns-inputs-spent");
533  }
534 
535  // Bring the best block into scope.
536  m_view.GetBestBlock();
537 
538  // we have all inputs cached now, so switch back to dummy (to protect
539  // against bugs where we pull more inputs from disk that miss being
540  // added to coins_to_uncache)
541  m_view.SetBackend(m_dummy);
542 
543  // Only accept BIP68 sequence locked transactions that can be mined in
544  // the next block; we don't want our mempool filled up with transactions
545  // that can't be mined yet. Must keep pool.cs for this unless we change
546  // CheckSequenceLocks to take a CoinsViewCache instead of create its
547  // own.
548  if (!CheckSequenceLocks(m_pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp)) {
550  "non-BIP68-final");
551  }
552 
553  Amount nFees = Amount::zero();
554  if (!Consensus::CheckTxInputs(tx, state, m_view, GetSpendHeight(m_view),
555  nFees)) {
556  return error("%s: Consensus::CheckTxInputs: %s, %s", __func__,
557  tx.GetId().ToString(), state.ToString());
558  }
559 
560  // Check for non-standard pay-to-script-hash in inputs
561  if (fRequireStandard &&
562  !AreInputsStandard(tx, m_view, ws.m_next_block_script_verify_flags)) {
564  "bad-txns-nonstandard-inputs");
565  }
566 
567  // nModifiedFees includes any fee deltas from PrioritiseTransaction
568  nModifiedFees = nFees;
569  m_pool.ApplyDelta(txid, nModifiedFees);
570 
571  // Keep track of transactions that spend a coinbase, which we re-scan
572  // during reorgs to ensure COINBASE_MATURITY is still met.
573  bool fSpendsCoinbase = false;
574  for (const CTxIn &txin : tx.vin) {
575  const Coin &coin = m_view.AccessCoin(txin.prevout);
576  if (coin.IsCoinBase()) {
577  fSpendsCoinbase = true;
578  break;
579  }
580  }
581 
582  unsigned int nSize = tx.GetTotalSize();
583 
584  // No transactions are allowed below minRelayTxFee except from disconnected
585  // blocks.
586  // Do not change this to use virtualsize without coordinating a network
587  // policy upgrade.
588  if (!bypass_limits && nModifiedFees < minRelayTxFee.GetFee(nSize)) {
589  return state.Invalid(
590  TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met",
591  strprintf("%d < %d", nModifiedFees, ::minRelayTxFee.GetFee(nSize)));
592  }
593 
594  if (nAbsurdFee != Amount::zero() && nFees > nAbsurdFee) {
596  "absurdly-high-fee",
597  strprintf("%d > %d", nFees, nAbsurdFee));
598  }
599 
600  // Validate input scripts against standard script flags.
601  const uint32_t scriptVerifyFlags =
602  ws.m_next_block_script_verify_flags | STANDARD_SCRIPT_VERIFY_FLAGS;
603  PrecomputedTransactionData txdata(tx);
604  if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false,
605  txdata, ws.m_sig_checks_standard)) {
606  // State filled in by CheckInputScripts
607  return false;
608  }
609 
610  entry.reset(new CTxMemPoolEntry(ptx, nFees, nAcceptTime,
611  ::ChainActive().Height(), fSpendsCoinbase,
612  ws.m_sig_checks_standard, lp));
613 
614  unsigned int nVirtualSize = entry->GetTxVirtualSize();
615 
616  Amount mempoolRejectFee =
617  m_pool
618  .GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) *
619  1000000)
620  .GetFee(nVirtualSize);
621  if (!bypass_limits && mempoolRejectFee > Amount::zero() &&
622  nModifiedFees < mempoolRejectFee) {
623  return state.Invalid(
624  TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met",
625  strprintf("%d < %d", nModifiedFees, mempoolRejectFee));
626  }
627 
628  // Calculate in-mempool ancestors, up to a limit.
629  std::string errString;
630  if (!m_pool.CalculateMemPoolAncestors(
631  *entry, setAncestors, m_limit_ancestors, m_limit_ancestor_size,
632  m_limit_descendants, m_limit_descendant_size, errString)) {
634  "too-long-mempool-chain", errString);
635  }
636  return true;
637 }
638 
639 bool MemPoolAccept::ConsensusScriptChecks(ATMPArgs &args, Workspace &ws,
640  PrecomputedTransactionData &txdata) {
641  const CTransaction &tx = *ws.m_ptx;
642  const TxId &txid = tx.GetId();
643 
644  TxValidationState &state = args.m_state;
645 
646  // Check again against the next block's script verification flags
647  // to cache our script execution flags.
648  //
649  // This is also useful in case of bugs in the standard flags that cause
650  // transactions to pass as valid when they're actually invalid. For
651  // instance the STRICTENC flag was incorrectly allowing certain CHECKSIG
652  // NOT scripts to pass, even though they were invalid.
653  //
654  // There is a similar check in CreateNewBlock() to prevent creating
655  // invalid blocks (using TestBlockValidity), however allowing such
656  // transactions into the mempool can be exploited as a DoS attack.
657  int nSigChecksConsensus;
658  if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool,
659  ws.m_next_block_script_verify_flags,
660  txdata, nSigChecksConsensus)) {
661  // This can occur under some circumstances, if the node receives an
662  // unrequested tx which is invalid due to new consensus rules not
663  // being activated yet (during IBD).
664  return error("%s: BUG! PLEASE REPORT THIS! CheckInputScripts failed "
665  "against next-block but not STANDARD flags %s, %s",
666  __func__, txid.ToString(), state.ToString());
667  }
668 
669  if (ws.m_sig_checks_standard != nSigChecksConsensus) {
670  // We can't accept this transaction as we've used the standard count
671  // for the mempool/mining, but the consensus count will be enforced
672  // in validation (we don't want to produce bad block templates).
673  return error(
674  "%s: BUG! PLEASE REPORT THIS! SigChecks count differed between "
675  "standard and consensus flags in %s",
676  __func__, txid.ToString());
677  }
678  return true;
679 }
680 
681 bool MemPoolAccept::Finalize(ATMPArgs &args, Workspace &ws) {
682  const TxId &txid = ws.m_ptx->GetId();
683  TxValidationState &state = args.m_state;
684  const bool bypass_limits = args.m_bypass_limits;
685 
686  CTxMemPool::setEntries &setAncestors = ws.m_ancestors;
687  std::unique_ptr<CTxMemPoolEntry> &entry = ws.m_entry;
688 
689  // Store transaction in memory.
690  m_pool.addUnchecked(*entry, setAncestors);
691 
692  // Trim mempool and check if tx was trimmed.
693  if (!bypass_limits) {
694  m_pool.LimitSize(
695  gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
696  std::chrono::hours{
697  gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
698  if (!m_pool.exists(txid)) {
700  "mempool full");
701  }
702  }
703  return true;
704 }
705 
706 bool MemPoolAccept::AcceptSingleTransaction(const CTransactionRef &ptx,
707  ATMPArgs &args) {
708  AssertLockHeld(cs_main);
709  // mempool "read lock" (held through
710  // GetMainSignals().TransactionAddedToMempool())
711  LOCK(m_pool.cs);
712 
713  Workspace workspace(ptx, GetNextBlockScriptFlags(
714  args.m_config.GetChainParams().GetConsensus(),
715  ::ChainActive().Tip()));
716 
717  if (!PreChecks(args, workspace)) {
718  return false;
719  }
720 
721  // Only compute the precomputed transaction data if we need to verify
722  // scripts (ie, other policy checks pass). We perform the inexpensive
723  // checks first and avoid hashing and signature verification unless those
724  // checks pass, to mitigate CPU exhaustion denial-of-service attacks.
725  PrecomputedTransactionData txdata(*ptx);
726 
727  if (!ConsensusScriptChecks(args, workspace, txdata)) {
728  return false;
729  }
730 
731  // Tx was accepted, but not added
732  if (args.m_test_accept) {
733  return true;
734  }
735 
736  if (!Finalize(args, workspace)) {
737  return false;
738  }
739 
741  return true;
742 }
743 
744 } // namespace
745 
749 static bool
751  TxValidationState &state, const CTransactionRef &tx,
752  int64_t nAcceptTime, bool bypass_limits,
753  const Amount nAbsurdFee, bool test_accept)
754  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
755  AssertLockHeld(cs_main);
756  std::vector<COutPoint> coins_to_uncache;
757  MemPoolAccept::ATMPArgs args{config, state, nAcceptTime,
758  bypass_limits, nAbsurdFee, coins_to_uncache,
759  test_accept};
760  bool res = MemPoolAccept(pool).AcceptSingleTransaction(tx, args);
761  if (!res) {
762  // Remove coins that were not present in the coins cache before calling
763  // ATMPW; this is to prevent memory DoS in case we receive a large
764  // number of invalid transactions that attempt to overrun the in-memory
765  // coins cache
766  // (`CCoinsViewCache::cacheCoins`).
767 
768  for (const COutPoint &outpoint : coins_to_uncache) {
769  ::ChainstateActive().CoinsTip().Uncache(outpoint);
770  }
771  }
772 
773  // After we've (potentially) uncached entries, ensure our coins cache is
774  // still within its size limits
775  BlockValidationState stateDummy;
776  ::ChainstateActive().FlushStateToDisk(config.GetChainParams(), stateDummy,
778  return res;
779 }
780 
781 bool AcceptToMemoryPool(const Config &config, CTxMemPool &pool,
782  TxValidationState &state, const CTransactionRef &tx,
783  bool bypass_limits, const Amount nAbsurdFee,
784  bool test_accept) {
785  return AcceptToMemoryPoolWithTime(config, pool, state, tx, GetTime(),
786  bypass_limits, nAbsurdFee, test_accept);
787 }
788 
794 bool GetTransaction(const TxId &txid, CTransactionRef &txOut,
795  const Consensus::Params &params, BlockHash &hashBlock,
796  const CBlockIndex *const block_index) {
797  LOCK(cs_main);
798 
799  if (block_index == nullptr) {
800  CTransactionRef ptx = g_mempool.get(txid);
801  if (ptx) {
802  txOut = ptx;
803  return true;
804  }
805 
806  if (g_txindex) {
807  return g_txindex->FindTx(txid, hashBlock, txOut);
808  }
809  } else {
810  CBlock block;
811  if (ReadBlockFromDisk(block, block_index, params)) {
812  for (const auto &tx : block.vtx) {
813  if (tx->GetId() == txid) {
814  txOut = tx;
815  hashBlock = block_index->GetBlockHash();
816  return true;
817  }
818  }
819  }
820  }
821 
822  return false;
823 }
824 
826 //
827 // CBlock and CBlockIndex
828 //
829 
830 static bool WriteBlockToDisk(const CBlock &block, FlatFilePos &pos,
831  const CMessageHeader::MessageMagic &messageStart) {
832  // Open history file to append
834  if (fileout.IsNull()) {
835  return error("WriteBlockToDisk: OpenBlockFile failed");
836  }
837 
838  // Write index header
839  unsigned int nSize = GetSerializeSize(block, fileout.GetVersion());
840  fileout << messageStart << nSize;
841 
842  // Write block
843  long fileOutPos = ftell(fileout.Get());
844  if (fileOutPos < 0) {
845  return error("WriteBlockToDisk: ftell failed");
846  }
847 
848  pos.nPos = (unsigned int)fileOutPos;
849  fileout << block;
850 
851  return true;
852 }
853 
854 Amount GetBlockSubsidy(int nHeight, const Consensus::Params &consensusParams) {
855  int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
856  // Force block reward to zero when right shift is undefined.
857  if (halvings >= 64) {
858  return Amount::zero();
859  }
860 
861  Amount nSubsidy = 50 * COIN;
862  // Subsidy is cut in half every 210,000 blocks which will occur
863  // approximately every 4 years.
864  return ((nSubsidy / SATOSHI) >> halvings) * SATOSHI;
865 }
866 
867 CoinsViews::CoinsViews(std::string ldb_name, size_t cache_size_bytes,
868  bool in_memory, bool should_wipe)
869  : m_dbview(GetDataDir() / ldb_name, cache_size_bytes, in_memory,
870  should_wipe),
871  m_catcherview(&m_dbview) {}
872 
873 void CoinsViews::InitCache() {
874  m_cacheview = std::make_unique<CCoinsViewCache>(&m_catcherview);
875 }
876 
878  BlockHash from_snapshot_blockhash)
879  : m_blockman(blockman), m_from_snapshot_blockhash(from_snapshot_blockhash) {
880 }
881 
882 void CChainState::InitCoinsDB(size_t cache_size_bytes, bool in_memory,
883  bool should_wipe, std::string leveldb_name) {
885  leveldb_name += "_" + m_from_snapshot_blockhash.ToString();
886  }
887  m_coins_views = std::make_unique<CoinsViews>(leveldb_name, cache_size_bytes,
888  in_memory, should_wipe);
889 }
890 
891 void CChainState::InitCoinsCache() {
892  assert(m_coins_views != nullptr);
893  m_coins_views->InitCache();
894 }
895 
896 // Note that though this is marked const, we may end up modifying
897 // `m_cached_finished_ibd`, which is a performance-related implementation
898 // detail. This function must be marked `const` so that `CValidationInterface`
899 // clients (which are given a `const CChainState*`) can call it.
900 //
902  // Optimization: pre-test latch before taking the lock.
903  if (m_cached_finished_ibd.load(std::memory_order_relaxed)) {
904  return false;
905  }
906 
907  LOCK(cs_main);
908  if (m_cached_finished_ibd.load(std::memory_order_relaxed)) {
909  return false;
910  }
911  if (fImporting || fReindex) {
912  return true;
913  }
914  if (m_chain.Tip() == nullptr) {
915  return true;
916  }
918  return true;
919  }
920  if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge)) {
921  return true;
922  }
923  LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
924  m_cached_finished_ibd.store(true, std::memory_order_relaxed);
925  return false;
926 }
927 
928 static CBlockIndex const *pindexBestForkTip = nullptr;
929 static CBlockIndex const *pindexBestForkBase = nullptr;
930 
932  LOCK(::cs_main);
933  return g_chainman.m_blockman.m_block_index;
934 }
935 
936 static void AlertNotify(const std::string &strMessage) {
937  uiInterface.NotifyAlertChanged();
938 #if defined(HAVE_SYSTEM)
939  std::string strCmd = gArgs.GetArg("-alertnotify", "");
940  if (strCmd.empty()) {
941  return;
942  }
943 
944  // Alert text should be plain ascii coming from a trusted source, but to be
945  // safe we first strip anything not in safeChars, then add single quotes
946  // around the whole string before passing it to the shell:
947  std::string singleQuote("'");
948  std::string safeStatus = SanitizeString(strMessage);
949  safeStatus = singleQuote + safeStatus + singleQuote;
950  boost::replace_all(strCmd, "%s", safeStatus);
951 
952  std::thread t(runCommand, strCmd);
953  // thread runs free
954  t.detach();
955 #endif
956 }
957 
959  AssertLockHeld(cs_main);
960  // Before we get past initial download, we cannot reliably alert about forks
961  // (we assume we don't get stuck on a fork before finishing our initial
962  // sync)
964  return;
965  }
966 
967  // If our best fork is no longer within 72 blocks (+/- 12 hours if no one
968  // mines it) of our head, drop it
969  if (pindexBestForkTip &&
970  ::ChainActive().Height() - pindexBestForkTip->nHeight >= 72) {
971  pindexBestForkTip = nullptr;
972  }
973 
974  if (pindexBestForkTip ||
975  (pindexBestInvalid &&
976  pindexBestInvalid->nChainWork >
977  ::ChainActive().Tip()->nChainWork +
978  (GetBlockProof(*::ChainActive().Tip()) * 6))) {
979  if (!GetfLargeWorkForkFound() && pindexBestForkBase) {
980  std::string warning =
981  std::string("'Warning: Large-work fork detected, forking after "
982  "block ") +
983  pindexBestForkBase->phashBlock->ToString() + std::string("'");
984  AlertNotify(warning);
985  }
986 
987  if (pindexBestForkTip && pindexBestForkBase) {
988  LogPrintf("%s: Warning: Large fork found\n forking the "
989  "chain at height %d (%s)\n lasting to height %d "
990  "(%s).\nChain state database corruption likely.\n",
991  __func__, pindexBestForkBase->nHeight,
992  pindexBestForkBase->phashBlock->ToString(),
993  pindexBestForkTip->nHeight,
994  pindexBestForkTip->phashBlock->ToString());
996  } else {
997  LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks "
998  "longer than our best chain.\nChain state database "
999  "corruption likely.\n",
1000  __func__);
1002  }
1003  } else {
1004  SetfLargeWorkForkFound(false);
1006  }
1007 }
1008 
1009 static void CheckForkWarningConditionsOnNewFork(CBlockIndex *pindexNewForkTip)
1010  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1011  AssertLockHeld(cs_main);
1012  // If we are on a fork that is sufficiently large, set a warning flag.
1013  const CBlockIndex *pfork = ::ChainActive().FindFork(pindexNewForkTip);
1014 
1015  // We define a condition where we should warn the user about as a fork of at
1016  // least 7 blocks with a tip within 72 blocks (+/- 12 hours if no one mines
1017  // it) of ours. We use 7 blocks rather arbitrarily as it represents just
1018  // under 10% of sustained network hash rate operating on the fork, or a
1019  // chain that is entirely longer than ours and invalid (note that this
1020  // should be detected by both). We define it this way because it allows us
1021  // to only store the highest fork tip (+ base) which meets the 7-block
1022  // condition and from this always have the most-likely-to-cause-warning fork
1023  if (pfork &&
1024  (!pindexBestForkTip ||
1025  pindexNewForkTip->nHeight > pindexBestForkTip->nHeight) &&
1026  pindexNewForkTip->nChainWork - pfork->nChainWork >
1027  (GetBlockProof(*pfork) * 7) &&
1028  ::ChainActive().Height() - pindexNewForkTip->nHeight < 72) {
1029  pindexBestForkTip = pindexNewForkTip;
1030  pindexBestForkBase = pfork;
1031  }
1032 
1034 }
1035 
1036 // Called both upon regular invalid block discovery *and* InvalidateBlock
1038  AssertLockHeld(cs_main);
1039  if (!pindexBestInvalid ||
1040  pindexNew->nChainWork > pindexBestInvalid->nChainWork) {
1041  pindexBestInvalid = pindexNew;
1042  }
1043  if (pindexBestHeader != nullptr &&
1044  pindexBestHeader->GetAncestor(pindexNew->nHeight) == pindexNew) {
1045  pindexBestHeader = ::ChainActive().Tip();
1046  }
1047 
1048  // If the invalid chain found is supposed to be finalized, we need to move
1049  // back the finalization point.
1050  if (IsBlockFinalized(pindexNew)) {
1051  m_finalizedBlockIndex = pindexNew->pprev;
1052  }
1053 
1054  LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n",
1055  __func__, pindexNew->GetBlockHash().ToString(),
1056  pindexNew->nHeight,
1057  log(pindexNew->nChainWork.getdouble()) / log(2.0),
1058  FormatISO8601DateTime(pindexNew->GetBlockTime()));
1059  CBlockIndex *tip = ::ChainActive().Tip();
1060  assert(tip);
1061  LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n",
1062  __func__, tip->GetBlockHash().ToString(),
1063  ::ChainActive().Height(),
1064  log(tip->nChainWork.getdouble()) / log(2.0),
1066 }
1067 
1068 // Same as InvalidChainFound, above, except not called directly from
1069 // InvalidateBlock, which does its own setBlockIndexCandidates management.
1071  const BlockValidationState &state) {
1073  pindex->nStatus = pindex->nStatus.withFailed();
1074  m_blockman.m_failed_blocks.insert(pindex);
1075  setDirtyBlockIndex.insert(pindex);
1076  InvalidChainFound(pindex);
1077  }
1078 }
1079 
1080 void SpendCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
1081  int nHeight) {
1082  // Mark inputs spent.
1083  if (tx.IsCoinBase()) {
1084  return;
1085  }
1086 
1087  txundo.vprevout.reserve(tx.vin.size());
1088  for (const CTxIn &txin : tx.vin) {
1089  txundo.vprevout.emplace_back();
1090  bool is_spent = view.SpendCoin(txin.prevout, &txundo.vprevout.back());
1091  assert(is_spent);
1092  }
1093 }
1094 
1095 void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
1096  int nHeight) {
1097  SpendCoins(view, tx, txundo, nHeight);
1098  AddCoins(view, tx, nHeight);
1099 }
1100 
1101 void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, int nHeight) {
1102  // Mark inputs spent.
1103  if (!tx.IsCoinBase()) {
1104  for (const CTxIn &txin : tx.vin) {
1105  bool is_spent = view.SpendCoin(txin.prevout);
1106  assert(is_spent);
1107  }
1108  }
1109 
1110  // Add outputs.
1111  AddCoins(view, tx, nHeight);
1112 }
1113 
1115  const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
1116  if (!VerifyScript(scriptSig, m_tx_out.scriptPubKey, nFlags,
1118  ptxTo, nIn, m_tx_out.nValue, cacheStore, txdata),
1119  metrics, &error)) {
1120  return false;
1121  }
1122  if ((pTxLimitSigChecks &&
1123  !pTxLimitSigChecks->consume_and_check(metrics.nSigChecks)) ||
1124  (pBlockLimitSigChecks &&
1125  !pBlockLimitSigChecks->consume_and_check(metrics.nSigChecks))) {
1126  // we can't assign a meaningful script error (since the script
1127  // succeeded), but remove the ScriptError::OK which could be
1128  // misinterpreted.
1130  return false;
1131  }
1132  return true;
1133 }
1134 
1135 int GetSpendHeight(const CCoinsViewCache &inputs) {
1136  LOCK(cs_main);
1137  CBlockIndex *pindexPrev = LookupBlockIndex(inputs.GetBestBlock());
1138  return pindexPrev->nHeight + 1;
1139 }
1140 
1142  const CCoinsViewCache &inputs, const uint32_t flags,
1143  bool sigCacheStore, bool scriptCacheStore,
1144  const PrecomputedTransactionData &txdata,
1145  int &nSigChecksOut, TxSigCheckLimiter &txLimitSigChecks,
1146  CheckInputsLimiter *pBlockLimitSigChecks,
1147  std::vector<CScriptCheck> *pvChecks) {
1148  AssertLockHeld(cs_main);
1149  assert(!tx.IsCoinBase());
1150 
1151  if (pvChecks) {
1152  pvChecks->reserve(tx.vin.size());
1153  }
1154 
1155  // First check if script executions have been cached with the same flags.
1156  // Note that this assumes that the inputs provided are correct (ie that the
1157  // transaction hash which is in tx's prevouts properly commits to the
1158  // scriptPubKey in the inputs view of that transaction).
1159  ScriptCacheKey hashCacheEntry(tx, flags);
1160  if (IsKeyInScriptCache(hashCacheEntry, !scriptCacheStore, nSigChecksOut)) {
1161  if (!txLimitSigChecks.consume_and_check(nSigChecksOut) ||
1162  (pBlockLimitSigChecks &&
1163  !pBlockLimitSigChecks->consume_and_check(nSigChecksOut))) {
1165  "too-many-sigchecks");
1166  }
1167  return true;
1168  }
1169 
1170  int nSigChecksTotal = 0;
1171 
1172  for (size_t i = 0; i < tx.vin.size(); i++) {
1173  const COutPoint &prevout = tx.vin[i].prevout;
1174  const Coin &coin = inputs.AccessCoin(prevout);
1175  assert(!coin.IsSpent());
1176 
1177  // We very carefully only pass in things to CScriptCheck which are
1178  // clearly committed to by tx's hash. This provides a sanity
1179  // check that our caching is not introducing consensus failures through
1180  // additional data in, eg, the coins being spent being checked as a part
1181  // of CScriptCheck.
1182 
1183  // Verify signature
1184  CScriptCheck check(coin.GetTxOut(), tx, i, flags, sigCacheStore, txdata,
1185  &txLimitSigChecks, pBlockLimitSigChecks);
1186 
1187  // If pvChecks is not null, defer the check execution to the caller.
1188  if (pvChecks) {
1189  pvChecks->push_back(std::move(check));
1190  continue;
1191  }
1192 
1193  if (!check()) {
1194  ScriptError scriptError = check.GetScriptError();
1195  // Compute flags without the optional standardness flags.
1196  // This differs from MANDATORY_SCRIPT_VERIFY_FLAGS as it contains
1197  // additional upgrade flags (see AcceptToMemoryPoolWorker variable
1198  // extraFlags).
1199  uint32_t mandatoryFlags =
1201  if (flags != mandatoryFlags) {
1202  // Check whether the failure was caused by a non-mandatory
1203  // script verification check. If so, ensure we return
1204  // NOT_STANDARD instead of CONSENSUS to avoid downstream users
1205  // splitting the network between upgraded and non-upgraded nodes
1206  // by banning CONSENSUS-failing data providers.
1207  CScriptCheck check2(coin.GetTxOut(), tx, i, mandatoryFlags,
1208  sigCacheStore, txdata);
1209  if (check2()) {
1210  return state.Invalid(
1212  strprintf("non-mandatory-script-verify-flag (%s)",
1213  ScriptErrorString(scriptError)));
1214  }
1215  // update the error message to reflect the mandatory violation.
1216  scriptError = check2.GetScriptError();
1217  }
1218 
1219  // MANDATORY flag failures correspond to
1220  // TxValidationResult::TX_CONSENSUS. Because CONSENSUS failures are
1221  // the most serious case of validation failures, we may need to
1222  // consider using RECENT_CONSENSUS_CHANGE for any script failure
1223  // that could be due to non-upgraded nodes which we may want to
1224  // support, to avoid splitting the network (but this depends on the
1225  // details of how net_processing handles such errors).
1226  return state.Invalid(
1228  strprintf("mandatory-script-verify-flag-failed (%s)",
1229  ScriptErrorString(scriptError)));
1230  }
1231 
1232  nSigChecksTotal += check.GetScriptExecutionMetrics().nSigChecks;
1233  }
1234 
1235  nSigChecksOut = nSigChecksTotal;
1236 
1237  if (scriptCacheStore && !pvChecks) {
1238  // We executed all of the provided scripts, and were told to cache the
1239  // result. Do so now.
1240  AddKeyInScriptCache(hashCacheEntry, nSigChecksTotal);
1241  }
1242 
1243  return true;
1244 }
1245 
1246 static bool UndoWriteToDisk(const CBlockUndo &blockundo, FlatFilePos &pos,
1247  const BlockHash &hashBlock,
1248  const CMessageHeader::MessageMagic &messageStart) {
1249  // Open history file to append
1251  if (fileout.IsNull()) {
1252  return error("%s: OpenUndoFile failed", __func__);
1253  }
1254 
1255  // Write index header
1256  unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion());
1257  fileout << messageStart << nSize;
1258 
1259  // Write undo data
1260  long fileOutPos = ftell(fileout.Get());
1261  if (fileOutPos < 0) {
1262  return error("%s: ftell failed", __func__);
1263  }
1264  pos.nPos = (unsigned int)fileOutPos;
1265  fileout << blockundo;
1266 
1267  // calculate & write checksum
1269  hasher << hashBlock;
1270  hasher << blockundo;
1271  fileout << hasher.GetHash();
1272 
1273  return true;
1274 }
1275 
1276 bool UndoReadFromDisk(CBlockUndo &blockundo, const CBlockIndex *pindex) {
1277  FlatFilePos pos = pindex->GetUndoPos();
1278  if (pos.IsNull()) {
1279  return error("%s: no undo data available", __func__);
1280  }
1281 
1282  // Open history file to read
1283  CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
1284  if (filein.IsNull()) {
1285  return error("%s: OpenUndoFile failed", __func__);
1286  }
1287 
1288  // Read block
1289  uint256 hashChecksum;
1290  // We need a CHashVerifier as reserializing may lose data
1291  CHashVerifier<CAutoFile> verifier(&filein);
1292  try {
1293  verifier << pindex->pprev->GetBlockHash();
1294  verifier >> blockundo;
1295  filein >> hashChecksum;
1296  } catch (const std::exception &e) {
1297  return error("%s: Deserialize or I/O error - %s", __func__, e.what());
1298  }
1299 
1300  // Verify checksum
1301  if (hashChecksum != verifier.GetHash()) {
1302  return error("%s: Checksum mismatch", __func__);
1303  }
1304 
1305  return true;
1306 }
1307 
1309 static bool AbortNode(const std::string &strMessage,
1310  bilingual_str user_message = bilingual_str()) {
1311  SetMiscWarning(strMessage);
1312  LogPrintf("*** %s\n", strMessage);
1313  if (!user_message.empty()) {
1314  user_message =
1315  _("A fatal internal error occurred, see debug.log for details");
1316  }
1317  AbortError(user_message);
1318  StartShutdown();
1319  return false;
1320 }
1321 
1322 static bool AbortNode(BlockValidationState &state,
1323  const std::string &strMessage,
1324  const bilingual_str &userMessage = bilingual_str()) {
1325  AbortNode(strMessage, userMessage);
1326  return state.Error(strMessage);
1327 }
1328 
1331  const COutPoint &out) {
1332  bool fClean = true;
1333 
1334  if (view.HaveCoin(out)) {
1335  // Overwriting transaction output.
1336  fClean = false;
1337  }
1338 
1339  if (undo.GetHeight() == 0) {
1340  // Missing undo metadata (height and coinbase). Older versions included
1341  // this information only in undo records for the last spend of a
1342  // transactions' outputs. This implies that it must be present for some
1343  // other output of the same tx.
1344  const Coin &alternate = AccessByTxid(view, out.GetTxId());
1345  if (alternate.IsSpent()) {
1346  // Adding output for transaction without known metadata
1347  return DisconnectResult::FAILED;
1348  }
1349 
1350  // This is somewhat ugly, but hopefully utility is limited. This is only
1351  // useful when working from legacy on disck data. In any case, putting
1352  // the correct information in there doesn't hurt.
1353  const_cast<Coin &>(undo) = Coin(undo.GetTxOut(), alternate.GetHeight(),
1354  alternate.IsCoinBase());
1355  }
1356 
1357  // If the coin already exists as an unspent coin in the cache, then the
1358  // possible_overwrite parameter to AddCoin must be set to true. We have
1359  // already checked whether an unspent coin exists above using HaveCoin, so
1360  // we don't need to guess. When fClean is false, an unspent coin already
1361  // existed and it is an overwrite.
1362  view.AddCoin(out, std::move(undo), !fClean);
1363 
1365 }
1366 
1372  const CBlockIndex *pindex,
1373  CCoinsViewCache &view) {
1374  CBlockUndo blockUndo;
1375  if (!UndoReadFromDisk(blockUndo, pindex)) {
1376  error("DisconnectBlock(): failure reading undo data");
1377  return DisconnectResult::FAILED;
1378  }
1379 
1380  return ApplyBlockUndo(blockUndo, block, pindex, view);
1381 }
1382 
1384  const CBlock &block, const CBlockIndex *pindex,
1385  CCoinsViewCache &view) {
1386  bool fClean = true;
1387 
1388  if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
1389  error("DisconnectBlock(): block and undo data inconsistent");
1390  return DisconnectResult::FAILED;
1391  }
1392 
1393  // First, restore inputs.
1394  for (size_t i = 1; i < block.vtx.size(); i++) {
1395  const CTransaction &tx = *(block.vtx[i]);
1396  const CTxUndo &txundo = blockUndo.vtxundo[i - 1];
1397  if (txundo.vprevout.size() != tx.vin.size()) {
1398  error("DisconnectBlock(): transaction and undo data inconsistent");
1399  return DisconnectResult::FAILED;
1400  }
1401 
1402  for (size_t j = 0; j < tx.vin.size(); j++) {
1403  const COutPoint &out = tx.vin[j].prevout;
1404  const Coin &undo = txundo.vprevout[j];
1405  DisconnectResult res = UndoCoinSpend(undo, view, out);
1406  if (res == DisconnectResult::FAILED) {
1407  return DisconnectResult::FAILED;
1408  }
1409  fClean = fClean && res != DisconnectResult::UNCLEAN;
1410  }
1411  }
1412 
1413  // Second, revert created outputs.
1414  for (const auto &ptx : block.vtx) {
1415  const CTransaction &tx = *ptx;
1416  const TxId &txid = tx.GetId();
1417  const bool is_coinbase = tx.IsCoinBase();
1418 
1419  // Check that all outputs are available and match the outputs in the
1420  // block itself exactly.
1421  for (size_t o = 0; o < tx.vout.size(); o++) {
1422  if (tx.vout[o].scriptPubKey.IsUnspendable()) {
1423  continue;
1424  }
1425 
1426  COutPoint out(txid, o);
1427  Coin coin;
1428  bool is_spent = view.SpendCoin(out, &coin);
1429  if (!is_spent || tx.vout[o] != coin.GetTxOut() ||
1430  uint32_t(pindex->nHeight) != coin.GetHeight() ||
1431  is_coinbase != coin.IsCoinBase()) {
1432  // transaction output mismatch
1433  fClean = false;
1434  }
1435  }
1436  }
1437 
1438  // Move best block pointer to previous block.
1439  view.SetBestBlock(block.hashPrevBlock);
1440 
1442 }
1443 
1444 static void FlushBlockFile(bool fFinalize = false) {
1445  LOCK(cs_LastBlockFile);
1446 
1447  FlatFilePos block_pos_old(nLastBlockFile,
1448  vinfoBlockFile[nLastBlockFile].nSize);
1449  FlatFilePos undo_pos_old(nLastBlockFile,
1450  vinfoBlockFile[nLastBlockFile].nUndoSize);
1451 
1452  bool status = true;
1453  status &= BlockFileSeq().Flush(block_pos_old, fFinalize);
1454  status &= UndoFileSeq().Flush(undo_pos_old, fFinalize);
1455  if (!status) {
1456  AbortNode("Flushing block file to disk failed. This is likely the "
1457  "result of an I/O error.");
1458  }
1459 }
1460 
1461 static bool FindUndoPos(BlockValidationState &state, int nFile,
1462  FlatFilePos &pos, unsigned int nAddSize);
1463 
1464 static bool WriteUndoDataForBlock(const CBlockUndo &blockundo,
1465  BlockValidationState &state,
1466  CBlockIndex *pindex,
1467  const CChainParams &chainparams) {
1468  // Write undo information to disk
1469  if (pindex->GetUndoPos().IsNull()) {
1470  FlatFilePos _pos;
1471  if (!FindUndoPos(state, pindex->nFile, _pos,
1472  ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40)) {
1473  return error("ConnectBlock(): FindUndoPos failed");
1474  }
1475  if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(),
1476  chainparams.DiskMagic())) {
1477  return AbortNode(state, "Failed to write undo data");
1478  }
1479 
1480  // update nUndoPos in block index
1481  pindex->nUndoPos = _pos.nPos;
1482  pindex->nStatus = pindex->nStatus.withUndo();
1483  setDirtyBlockIndex.insert(pindex);
1484  }
1485 
1486  return true;
1487 }
1488 
1490 
1491 void ThreadScriptCheck(int worker_num) {
1492  util::ThreadRename(strprintf("scriptch.%i", worker_num));
1493  scriptcheckqueue.Thread();
1494 }
1495 
1496 VersionBitsCache versionbitscache GUARDED_BY(cs_main);
1497 
1498 int32_t ComputeBlockVersion(const CBlockIndex *pindexPrev,
1499  const Consensus::Params &params) {
1500  LOCK(cs_main);
1501  int32_t nVersion = VERSIONBITS_TOP_BITS;
1502 
1503  for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) {
1505  pindexPrev, params, static_cast<Consensus::DeploymentPos>(i),
1506  versionbitscache);
1507  if (state == ThresholdState::LOCKED_IN ||
1508  state == ThresholdState::STARTED) {
1509  nVersion |= VersionBitsMask(
1510  params, static_cast<Consensus::DeploymentPos>(i));
1511  }
1512  }
1513 
1514  // Clear the last 4 bits (miner fund activation).
1515  return nVersion & ~uint32_t(0x0f);
1516 }
1517 
1518 // Returns the script flags which should be checked for the block after
1519 // the given block.
1520 static uint32_t GetNextBlockScriptFlags(const Consensus::Params &params,
1521  const CBlockIndex *pindex) {
1522  uint32_t flags = SCRIPT_VERIFY_NONE;
1523 
1524  // Start enforcing P2SH (BIP16)
1525  if ((pindex->nHeight + 1) >= params.BIP16Height) {
1526  flags |= SCRIPT_VERIFY_P2SH;
1527  }
1528 
1529  // Start enforcing the DERSIG (BIP66) rule.
1530  if ((pindex->nHeight + 1) >= params.BIP66Height) {
1531  flags |= SCRIPT_VERIFY_DERSIG;
1532  }
1533 
1534  // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule.
1535  if ((pindex->nHeight + 1) >= params.BIP65Height) {
1537  }
1538 
1539  // Start enforcing CSV (BIP68, BIP112 and BIP113) rule.
1540  if ((pindex->nHeight + 1) >= params.CSVHeight) {
1542  }
1543 
1544  // If the UAHF is enabled, we start accepting replay protected txns
1545  if (IsUAHFenabled(params, pindex)) {
1546  flags |= SCRIPT_VERIFY_STRICTENC;
1548  }
1549 
1550  // If the DAA HF is enabled, we start rejecting transaction that use a high
1551  // s in their signature. We also make sure that signature that are supposed
1552  // to fail (for instance in multisig or other forms of smart contracts) are
1553  // null.
1554  if (IsDAAEnabled(params, pindex)) {
1555  flags |= SCRIPT_VERIFY_LOW_S;
1556  flags |= SCRIPT_VERIFY_NULLFAIL;
1557  }
1558 
1559  // When the magnetic anomaly fork is enabled, we start accepting
1560  // transactions using the OP_CHECKDATASIG opcode and it's verify
1561  // alternative. We also start enforcing push only signatures and
1562  // clean stack.
1563  if (IsMagneticAnomalyEnabled(params, pindex)) {
1565  flags |= SCRIPT_VERIFY_SIGPUSHONLY;
1566  flags |= SCRIPT_VERIFY_CLEANSTACK;
1567  }
1568 
1569  if (IsGravitonEnabled(params, pindex)) {
1571  flags |= SCRIPT_VERIFY_MINIMALDATA;
1572  }
1573 
1574  if (IsPhononEnabled(params, pindex)) {
1575  flags |= SCRIPT_ENFORCE_SIGCHECKS;
1576  }
1577 
1578  // We make sure this node will have replay protection during the next hard
1579  // fork.
1580  if (IsReplayProtectionEnabled(params, pindex)) {
1582  }
1583 
1584  return flags;
1585 }
1586 
1587 static int64_t nTimeCheck = 0;
1588 static int64_t nTimeForks = 0;
1589 static int64_t nTimeVerify = 0;
1590 static int64_t nTimeConnect = 0;
1591 static int64_t nTimeIndex = 0;
1592 static int64_t nTimeCallbacks = 0;
1593 static int64_t nTimeTotal = 0;
1594 static int64_t nBlocksTotal = 0;
1595 
1603  CBlockIndex *pindex, CCoinsViewCache &view,
1604  const CChainParams &params,
1605  BlockValidationOptions options,
1606  bool fJustCheck) {
1607  AssertLockHeld(cs_main);
1608  assert(pindex);
1609  assert(*pindex->phashBlock == block.GetHash());
1610  int64_t nTimeStart = GetTimeMicros();
1611 
1612  const Consensus::Params &consensusParams = params.GetConsensus();
1613 
1614  // Check it again in case a previous version let a bad block in
1615  // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
1616  // ContextualCheckBlockHeader() here. This means that if we add a new
1617  // consensus rule that is enforced in one of those two functions, then we
1618  // may have let in a block that violates the rule prior to updating the
1619  // software, and we would NOT be enforcing the rule here. Fully solving
1620  // upgrade from one software version to the next after a consensus rule
1621  // change is potentially tricky and issue-specific.
1622  // Also, currently the rule against blocks more than 2 hours in the future
1623  // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
1624  // re-enforce that rule here (at least until we make it impossible for
1625  // GetAdjustedTime() to go backward).
1626  if (!CheckBlock(block, state, consensusParams,
1627  options.withCheckPoW(!fJustCheck)
1628  .withCheckMerkleRoot(!fJustCheck))) {
1630  // We don't write down blocks to disk if they may have been
1631  // corrupted, so this should be impossible unless we're having
1632  // hardware problems.
1633  return AbortNode(state, "Corrupt block found indicating potential "
1634  "hardware failure; shutting down");
1635  }
1636  return error("%s: Consensus::CheckBlock: %s", __func__,
1637  state.ToString());
1638  }
1639 
1640  // Verify that the view's current state corresponds to the previous block
1641  BlockHash hashPrevBlock =
1642  pindex->pprev == nullptr ? BlockHash() : pindex->pprev->GetBlockHash();
1643  assert(hashPrevBlock == view.GetBestBlock());
1644 
1645  nBlocksTotal++;
1646 
1647  // Special case for the genesis block, skipping connection of its
1648  // transactions (its coinbase is unspendable)
1649  if (block.GetHash() == consensusParams.hashGenesisBlock) {
1650  if (!fJustCheck) {
1651  view.SetBestBlock(pindex->GetBlockHash());
1652  }
1653 
1654  return true;
1655  }
1656 
1657  bool fScriptChecks = true;
1658  if (!hashAssumeValid.IsNull()) {
1659  // We've been configured with the hash of a block which has been
1660  // externally verified to have a valid history. A suitable default value
1661  // is included with the software and updated from time to time. Because
1662  // validity relative to a piece of software is an objective fact these
1663  // defaults can be easily reviewed. This setting doesn't force the
1664  // selection of any particular chain but makes validating some faster by
1665  // effectively caching the result of part of the verification.
1666  BlockMap::const_iterator it =
1667  m_blockman.m_block_index.find(hashAssumeValid);
1668  if (it != m_blockman.m_block_index.end()) {
1669  if (it->second->GetAncestor(pindex->nHeight) == pindex &&
1670  pindexBestHeader->GetAncestor(pindex->nHeight) == pindex &&
1671  pindexBestHeader->nChainWork >= nMinimumChainWork) {
1672  // This block is a member of the assumed verified chain and an
1673  // ancestor of the best header.
1674  // Script verification is skipped when connecting blocks under
1675  // the assumevalid block. Assuming the assumevalid block is
1676  // valid this is safe because block merkle hashes are still
1677  // computed and checked, Of course, if an assumed valid block is
1678  // invalid due to false scriptSigs this optimization would allow
1679  // an invalid chain to be accepted.
1680  // The equivalent time check discourages hash power from
1681  // extorting the network via DOS attack into accepting an
1682  // invalid block through telling users they must manually set
1683  // assumevalid. Requiring a software change or burying the
1684  // invalid block, regardless of the setting, makes it hard to
1685  // hide the implication of the demand. This also avoids having
1686  // release candidates that are hardly doing any signature
1687  // verification at all in testing without having to artificially
1688  // set the default assumed verified block further back. The test
1689  // against nMinimumChainWork prevents the skipping when denied
1690  // access to any chain at least as good as the expected chain.
1691  fScriptChecks =
1693  *pindexBestHeader, *pindex, *pindexBestHeader,
1694  consensusParams) <= 60 * 60 * 24 * 7 * 2);
1695  }
1696  }
1697  }
1698 
1699  int64_t nTime1 = GetTimeMicros();
1700  nTimeCheck += nTime1 - nTimeStart;
1701  LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n",
1702  MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO,
1703  nTimeCheck * MILLI / nBlocksTotal);
1704 
1705  // Do not allow blocks that contain transactions which 'overwrite' older
1706  // transactions, unless those are already completely spent. If such
1707  // overwrites are allowed, coinbases and transactions depending upon those
1708  // can be duplicated to remove the ability to spend the first instance --
1709  // even after being sent to another address.
1710  // See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html
1711  // for more information. This logic is not necessary for memory pool
1712  // transactions, as AcceptToMemoryPool already refuses previously-known
1713  // transaction ids entirely. This rule was originally applied to all blocks
1714  // with a timestamp after March 15, 2012, 0:00 UTC. Now that the whole
1715  // chain is irreversibly beyond that time it is applied to all blocks
1716  // except the two in the chain that violate it. This prevents exploiting
1717  // the issue against nodes during their initial block download.
1718  bool fEnforceBIP30 = !((pindex->nHeight == 91842 &&
1719  pindex->GetBlockHash() ==
1720  uint256S("0x00000000000a4d0a398161ffc163c503763"
1721  "b1f4360639393e0e4c8e300e0caec")) ||
1722  (pindex->nHeight == 91880 &&
1723  pindex->GetBlockHash() ==
1724  uint256S("0x00000000000743f190a18c5577a3c2d2a1f"
1725  "610ae9601ac046a38084ccb7cd721")));
1726 
1727  // Once BIP34 activated it was not possible to create new duplicate
1728  // coinbases and thus other than starting with the 2 existing duplicate
1729  // coinbase pairs, not possible to create overwriting txs. But by the time
1730  // BIP34 activated, in each of the existing pairs the duplicate coinbase had
1731  // overwritten the first before the first had been spent. Since those
1732  // coinbases are sufficiently buried it's no longer possible to create
1733  // further duplicate transactions descending from the known pairs either. If
1734  // we're on the known chain at height greater than where BIP34 activated, we
1735  // can save the db accesses needed for the BIP30 check.
1736 
1737  // BIP34 requires that a block at height X (block X) has its coinbase
1738  // scriptSig start with a CScriptNum of X (indicated height X). The above
1739  // logic of no longer requiring BIP30 once BIP34 activates is flawed in the
1740  // case that there is a block X before the BIP34 height of 227,931 which has
1741  // an indicated height Y where Y is greater than X. The coinbase for block
1742  // X would also be a valid coinbase for block Y, which could be a BIP30
1743  // violation. An exhaustive search of all mainnet coinbases before the
1744  // BIP34 height which have an indicated height greater than the block height
1745  // reveals many occurrences. The 3 lowest indicated heights found are
1746  // 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3
1747  // heights would be the first opportunity for BIP30 to be violated.
1748 
1749  // The search reveals a great many blocks which have an indicated height
1750  // greater than 1,983,702, so we simply remove the optimization to skip
1751  // BIP30 checking for blocks at height 1,983,702 or higher. Before we reach
1752  // that block in another 25 years or so, we should take advantage of a
1753  // future consensus change to do a new and improved version of BIP34 that
1754  // will actually prevent ever creating any duplicate coinbases in the
1755  // future.
1756  static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702;
1757 
1758  // There is no potential to create a duplicate coinbase at block 209,921
1759  // because this is still before the BIP34 height and so explicit BIP30
1760  // checking is still active.
1761 
1762  // The final case is block 176,684 which has an indicated height of
1763  // 490,897. Unfortunately, this issue was not discovered until about 2 weeks
1764  // before block 490,897 so there was not much opportunity to address this
1765  // case other than to carefully analyze it and determine it would not be a
1766  // problem. Block 490,897 was, in fact, mined with a different coinbase than
1767  // block 176,684, but it is important to note that even if it hadn't been or
1768  // is remined on an alternate fork with a duplicate coinbase, we would still
1769  // not run into a BIP30 violation. This is because the coinbase for 176,684
1770  // is spent in block 185,956 in transaction
1771  // d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This
1772  // spending transaction can't be duplicated because it also spends coinbase
1773  // 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This
1774  // coinbase has an indicated height of over 4.2 billion, and wouldn't be
1775  // duplicatable until that height, and it's currently impossible to create a
1776  // chain that long. Nevertheless we may wish to consider a future soft fork
1777  // which retroactively prevents block 490,897 from creating a duplicate
1778  // coinbase. The two historical BIP30 violations often provide a confusing
1779  // edge case when manipulating the UTXO and it would be simpler not to have
1780  // another edge case to deal with.
1781 
1782  // testnet3 has no blocks before the BIP34 height with indicated heights
1783  // post BIP34 before approximately height 486,000,000 and presumably will
1784  // be reset before it reaches block 1,983,702 and starts doing unnecessary
1785  // BIP30 checking again.
1786  assert(pindex->pprev);
1787  CBlockIndex *pindexBIP34height =
1788  pindex->pprev->GetAncestor(consensusParams.BIP34Height);
1789  // Only continue to enforce if we're below BIP34 activation height or the
1790  // block hash at that height doesn't correspond.
1791  fEnforceBIP30 =
1792  fEnforceBIP30 &&
1793  (!pindexBIP34height ||
1794  !(pindexBIP34height->GetBlockHash() == consensusParams.BIP34Hash));
1795 
1796  // TODO: Remove BIP30 checking from block height 1,983,702 on, once we have
1797  // a consensus change that ensures coinbases at those heights can not
1798  // duplicate earlier coinbases.
1799  if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) {
1800  for (const auto &tx : block.vtx) {
1801  for (size_t o = 0; o < tx->vout.size(); o++) {
1802  if (view.HaveCoin(COutPoint(tx->GetId(), o))) {
1803  LogPrintf("ERROR: ConnectBlock(): tried to overwrite "
1804  "transaction\n");
1806  "bad-txns-BIP30");
1807  }
1808  }
1809  }
1810  }
1811 
1812  // Start enforcing BIP68 (sequence locks).
1813  int nLockTimeFlags = 0;
1814  if (pindex->nHeight >= consensusParams.CSVHeight) {
1815  nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
1816  }
1817 
1818  const uint32_t flags =
1819  GetNextBlockScriptFlags(consensusParams, pindex->pprev);
1820 
1821  int64_t nTime2 = GetTimeMicros();
1822  nTimeForks += nTime2 - nTime1;
1823  LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n",
1824  MILLI * (nTime2 - nTime1), nTimeForks * MICRO,
1825  nTimeForks * MILLI / nBlocksTotal);
1826 
1827  std::vector<int> prevheights;
1828  Amount nFees = Amount::zero();
1829  int nInputs = 0;
1830 
1831  // Limit the total executed signature operations in the block, a consensus
1832  // rule. Tracking during the CPU-consuming part (validation of uncached
1833  // inputs) is per-input atomic and validation in each thread stops very
1834  // quickly after the limit is exceeded, so an adversary cannot cause us to
1835  // exceed the limit by much at all.
1836  CheckInputsLimiter nSigChecksBlockLimiter(
1838 
1839  std::vector<TxSigCheckLimiter> nSigChecksTxLimiters;
1840  nSigChecksTxLimiters.resize(block.vtx.size() - 1);
1841 
1842  CBlockUndo blockundo;
1843  blockundo.vtxundo.resize(block.vtx.size() - 1);
1844 
1845  CCheckQueueControl<CScriptCheck> control(fScriptChecks ? &scriptcheckqueue
1846  : nullptr);
1847 
1848  // Add all outputs
1849  try {
1850  for (const auto &ptx : block.vtx) {
1851  AddCoins(view, *ptx, pindex->nHeight);
1852  }
1853  } catch (const std::logic_error &e) {
1854  // This error will be thrown from AddCoin if we try to connect a block
1855  // containing duplicate transactions. Such a thing should normally be
1856  // caught early nowadays (due to ContextualCheckBlock's CTOR
1857  // enforcement) however some edge cases can escape that:
1858  // - ContextualCheckBlock does not get re-run after saving the block to
1859  // disk, and older versions may have saved a weird block.
1860  // - its checks are not applied to pre-CTOR chains, which we might visit
1861  // with checkpointing off.
1862  LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
1864  "tx-duplicate");
1865  }
1866 
1867  size_t txIndex = 0;
1868  for (const auto &ptx : block.vtx) {
1869  const CTransaction &tx = *ptx;
1870  const bool isCoinBase = tx.IsCoinBase();
1871  nInputs += tx.vin.size();
1872 
1873  {
1874  Amount txfee = Amount::zero();
1875  TxValidationState tx_state;
1876  if (!isCoinBase &&
1877  !Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight,
1878  txfee)) {
1879  // Any transaction validation failure in ConnectBlock is a block
1880  // consensus failure.
1882  tx_state.GetRejectReason(),
1883  tx_state.GetDebugMessage());
1884 
1885  return error("%s: Consensus::CheckTxInputs: %s, %s", __func__,
1886  tx.GetId().ToString(), state.ToString());
1887  }
1888  nFees += txfee;
1889  }
1890 
1891  if (!MoneyRange(nFees)) {
1892  LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n",
1893  __func__);
1895  "bad-txns-accumulated-fee-outofrange");
1896  }
1897 
1898  // The following checks do not apply to the coinbase.
1899  if (isCoinBase) {
1900  continue;
1901  }
1902 
1903  // Check that transaction is BIP68 final BIP68 lock checks (as
1904  // opposed to nLockTime checks) must be in ConnectBlock because they
1905  // require the UTXO set.
1906  prevheights.resize(tx.vin.size());
1907  for (size_t j = 0; j < tx.vin.size(); j++) {
1908  prevheights[j] = view.AccessCoin(tx.vin[j].prevout).GetHeight();
1909  }
1910 
1911  if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) {
1912  LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n",
1913  __func__);
1915  "bad-txns-nonfinal");
1916  }
1917 
1918  // Don't cache results if we're actually connecting blocks (still
1919  // consult the cache, though).
1920  bool fCacheResults = fJustCheck;
1921 
1922  const bool fEnforceSigCheck = flags & SCRIPT_ENFORCE_SIGCHECKS;
1923  if (!fEnforceSigCheck) {
1924  // Historically, there has been transactions with a very high
1925  // sigcheck count, so we need to disable this check for such
1926  // transactions.
1927  nSigChecksTxLimiters[txIndex] = TxSigCheckLimiter::getDisabled();
1928  }
1929 
1930  std::vector<CScriptCheck> vChecks;
1931  // nSigChecksRet may be accurate (found in cache) or 0 (checks were
1932  // deferred into vChecks).
1933  int nSigChecksRet;
1934  TxValidationState tx_state;
1935  if (fScriptChecks &&
1936  !CheckInputScripts(tx, tx_state, view, flags, fCacheResults,
1937  fCacheResults, PrecomputedTransactionData(tx),
1938  nSigChecksRet, nSigChecksTxLimiters[txIndex],
1939  &nSigChecksBlockLimiter, &vChecks)) {
1940  // Any transaction validation failure in ConnectBlock is a block
1941  // consensus failure
1943  tx_state.GetRejectReason(),
1944  tx_state.GetDebugMessage());
1945  return error(
1946  "ConnectBlock(): CheckInputScripts on %s failed with %s",
1947  tx.GetId().ToString(), state.ToString());
1948  }
1949 
1950  control.Add(vChecks);
1951 
1952  // Note: this must execute in the same iteration as CheckTxInputs (not
1953  // in a separate loop) in order to detect double spends. However,
1954  // this does not prevent double-spending by duplicated transaction
1955  // inputs in the same transaction (cf. CVE-2018-17144) -- that check is
1956  // done in CheckBlock (CheckRegularTransaction).
1957  SpendCoins(view, tx, blockundo.vtxundo.at(txIndex), pindex->nHeight);
1958  txIndex++;
1959  }
1960 
1961  int64_t nTime3 = GetTimeMicros();
1962  nTimeConnect += nTime3 - nTime2;
1964  " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) "
1965  "[%.2fs (%.2fms/blk)]\n",
1966  (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2),
1967  MILLI * (nTime3 - nTime2) / block.vtx.size(),
1968  nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs - 1),
1969  nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
1970 
1971  Amount blockReward =
1972  nFees + GetBlockSubsidy(pindex->nHeight, consensusParams);
1973  if (block.vtx[0]->GetValueOut() > blockReward) {
1974  LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs "
1975  "limit=%d)\n",
1976  block.vtx[0]->GetValueOut(), blockReward);
1978  "bad-cb-amount");
1979  }
1980 
1981  const std::vector<CTxDestination> whitelist =
1982  GetMinerFundWhitelist(consensusParams, pindex->pprev);
1983  if (!whitelist.empty()) {
1984  const Amount required = GetMinerFundAmount(blockReward);
1985 
1986  for (auto &o : block.vtx[0]->vout) {
1987  if (o.nValue < required) {
1988  // This output doesn't qualify because its amount is too low.
1989  continue;
1990  }
1991 
1992  CTxDestination address;
1993  if (!ExtractDestination(o.scriptPubKey, address)) {
1994  // Cannot decode address.
1995  continue;
1996  }
1997 
1998  if (std::find(whitelist.begin(), whitelist.end(), address) !=
1999  whitelist.end()) {
2000  goto MinerFundSuccess;
2001  }
2002  }
2003 
2004  // We did not find an output that match the miner fund requirements.
2006  "bad-cb-minerfund");
2007  }
2008 
2009 MinerFundSuccess:
2010 
2011  if (!control.Wait()) {
2013  "blk-bad-inputs", "parallel script check failed");
2014  }
2015 
2016  int64_t nTime4 = GetTimeMicros();
2017  nTimeVerify += nTime4 - nTime2;
2018  LogPrint(
2019  BCLog::BENCH,
2020  " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n",
2021  nInputs - 1, MILLI * (nTime4 - nTime2),
2022  nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs - 1),
2023  nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
2024 
2025  if (fJustCheck) {
2026  return true;
2027  }
2028 
2029  if (!WriteUndoDataForBlock(blockundo, state, pindex, params)) {
2030  return false;
2031  }
2032 
2033  if (!pindex->IsValid(BlockValidity::SCRIPTS)) {
2035  setDirtyBlockIndex.insert(pindex);
2036  }
2037 
2038  assert(pindex->phashBlock);
2039  // add this block to the view's block chain
2040  view.SetBestBlock(pindex->GetBlockHash());
2041 
2042  int64_t nTime5 = GetTimeMicros();
2043  nTimeIndex += nTime5 - nTime4;
2044  LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n",
2045  MILLI * (nTime5 - nTime4), nTimeIndex * MICRO,
2046  nTimeIndex * MILLI / nBlocksTotal);
2047 
2048  int64_t nTime6 = GetTimeMicros();
2049  nTimeCallbacks += nTime6 - nTime5;
2050  LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs (%.2fms/blk)]\n",
2051  MILLI * (nTime6 - nTime5), nTimeCallbacks * MICRO,
2052  nTimeCallbacks * MILLI / nBlocksTotal);
2053 
2054  return true;
2055 }
2056 
2058 CChainState::GetCoinsCacheSizeState(const CTxMemPool &tx_pool) {
2059  return this->GetCoinsCacheSizeState(
2060  tx_pool, nCoinCacheUsage,
2061  gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000);
2062 }
2063 
2065 CChainState::GetCoinsCacheSizeState(const CTxMemPool &tx_pool,
2066  size_t max_coins_cache_size_bytes,
2067  size_t max_mempool_size_bytes) {
2068  int64_t nMempoolUsage = tx_pool.DynamicMemoryUsage();
2069  int64_t cacheSize = CoinsTip().DynamicMemoryUsage();
2070  int64_t nTotalSpace =
2071  max_coins_cache_size_bytes +
2072  std::max<int64_t>(max_mempool_size_bytes - nMempoolUsage, 0);
2073 
2075  static constexpr int64_t MAX_BLOCK_COINSDB_USAGE_BYTES =
2076  10 * 1024 * 1024; // 10MB
2077  int64_t large_threshold = std::max(
2078  (9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE_BYTES);
2079 
2080  if (cacheSize > nTotalSpace) {
2081  LogPrintf("Cache size (%s) exceeds total space (%s)\n", cacheSize,
2082  nTotalSpace);
2084  } else if (cacheSize > large_threshold) {
2086  }
2087  return CoinsCacheSizeState::OK;
2088 }
2089 
2091  BlockValidationState &state,
2092  FlushStateMode mode,
2093  int nManualPruneHeight) {
2094  LOCK(cs_main);
2095  assert(this->CanFlushToDisk());
2096  static int64_t nLastWrite = 0;
2097  static int64_t nLastFlush = 0;
2098  std::set<int> setFilesToPrune;
2099  bool full_flush_completed = false;
2100 
2101  const size_t coins_count = CoinsTip().GetCacheSize();
2102  const size_t coins_mem_usage = CoinsTip().DynamicMemoryUsage();
2103 
2104  try {
2105  {
2106  bool fFlushForPrune = false;
2107  bool fDoFullFlush = false;
2108  CoinsCacheSizeState cache_state =
2109  GetCoinsCacheSizeState(::g_mempool);
2110  LOCK(cs_LastBlockFile);
2111  if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) &&
2112  !fReindex) {
2113  if (nManualPruneHeight > 0) {
2114  LOG_TIME_MILLIS("find files to prune (manual)",
2115  BCLog::BENCH);
2116  FindFilesToPruneManual(g_chainman, setFilesToPrune,
2117  nManualPruneHeight);
2118  } else {
2119  LOG_TIME_MILLIS("find files to prune", BCLog::BENCH);
2120  FindFilesToPrune(g_chainman, setFilesToPrune,
2121  chainparams.PruneAfterHeight());
2122  fCheckForPruning = false;
2123  }
2124  if (!setFilesToPrune.empty()) {
2125  fFlushForPrune = true;
2126  if (!fHavePruned) {
2127  pblocktree->WriteFlag("prunedblockfiles", true);
2128  fHavePruned = true;
2129  }
2130  }
2131  }
2132  int64_t nNow = GetTimeMicros();
2133  // Avoid writing/flushing immediately after startup.
2134  if (nLastWrite == 0) {
2135  nLastWrite = nNow;
2136  }
2137  if (nLastFlush == 0) {
2138  nLastFlush = nNow;
2139  }
2140  // The cache is large and we're within 10% and 10 MiB of the limit,
2141  // but we have time now (not in the middle of a block processing).
2142  bool fCacheLarge = mode == FlushStateMode::PERIODIC &&
2143  cache_state >= CoinsCacheSizeState::LARGE;
2144  // The cache is over the limit, we have to write now.
2145  bool fCacheCritical = mode == FlushStateMode::IF_NEEDED &&
2146  cache_state >= CoinsCacheSizeState::CRITICAL;
2147  // It's been a while since we wrote the block index to disk. Do this
2148  // frequently, so we don't need to redownload after a crash.
2149  bool fPeriodicWrite =
2150  mode == FlushStateMode::PERIODIC &&
2151  nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000;
2152  // It's been very long since we flushed the cache. Do this
2153  // infrequently, to optimize cache usage.
2154  bool fPeriodicFlush =
2155  mode == FlushStateMode::PERIODIC &&
2156  nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000;
2157  // Combine all conditions that result in a full cache flush.
2158  fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge ||
2159  fCacheCritical || fPeriodicFlush || fFlushForPrune;
2160  // Write blocks and block index to disk.
2161  if (fDoFullFlush || fPeriodicWrite) {
2162  // Depend on nMinDiskSpace to ensure we can write block index
2163  if (!CheckDiskSpace(GetBlocksDir())) {
2164  return AbortNode(state, "Disk space is too low!",
2165  _("Disk space is too low!"));
2166  }
2167 
2168  {
2169  LOG_TIME_MILLIS("write block and undo data to disk",
2170  BCLog::BENCH);
2171 
2172  // First make sure all block and undo data is flushed to
2173  // disk.
2174  FlushBlockFile();
2175  }
2176  // Then update all block file information (which may refer to
2177  // block and undo files).
2178  {
2179  LOG_TIME_MILLIS("write block index to disk", BCLog::BENCH);
2180 
2181  std::vector<std::pair<int, const CBlockFileInfo *>> vFiles;
2182  vFiles.reserve(setDirtyFileInfo.size());
2183  for (int i : setDirtyFileInfo) {
2184  vFiles.push_back(std::make_pair(i, &vinfoBlockFile[i]));
2185  }
2186 
2187  setDirtyFileInfo.clear();
2188 
2189  std::vector<const CBlockIndex *> vBlocks;
2190  vBlocks.reserve(setDirtyBlockIndex.size());
2191  for (const CBlockIndex *cbi : setDirtyBlockIndex) {
2192  vBlocks.push_back(cbi);
2193  }
2194 
2195  setDirtyBlockIndex.clear();
2196 
2197  if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile,
2198  vBlocks)) {
2199  return AbortNode(
2200  state, "Failed to write to block index database");
2201  }
2202  }
2203 
2204  // Finally remove any pruned files
2205  if (fFlushForPrune) {
2206  LOG_TIME_MILLIS("unlink pruned files", BCLog::BENCH);
2207 
2208  UnlinkPrunedFiles(setFilesToPrune);
2209  }
2210  nLastWrite = nNow;
2211  }
2212  // Flush best chain related state. This can only be done if the
2213  // blocks / block index write was also done.
2214  if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
2216  strprintf("write coins cache to disk (%d coins, %.2fkB)",
2217  coins_count, coins_mem_usage / 1000));
2218 
2219  // Typical Coin structures on disk are around 48 bytes in size.
2220  // Pushing a new one to the database can cause it to be written
2221  // twice (once in the log, and once in the tables). This is
2222  // already an overestimation, as most will delete an existing
2223  // entry or overwrite one. Still, use a conservative safety
2224  // factor of 2.
2225  if (!CheckDiskSpace(GetDataDir(),
2226  48 * 2 * 2 * CoinsTip().GetCacheSize())) {
2227  return AbortNode(state, "Disk space is too low!",
2228  _("Disk space is too low!"));
2229  }
2230 
2231  // Flush the chainstate (which may refer to block index
2232  // entries).
2233  if (!CoinsTip().Flush()) {
2234  return AbortNode(state, "Failed to write to coin database");
2235  }
2236  nLastFlush = nNow;
2237  full_flush_completed = true;
2238  }
2239  }
2240 
2241  if (full_flush_completed) {
2242  // Update best block in wallet (so we can detect restored wallets).
2244  }
2245  } catch (const std::runtime_error &e) {
2246  return AbortNode(state, std::string("System error while flushing: ") +
2247  e.what());
2248  }
2249  return true;
2250 }
2251 
2253  BlockValidationState state;
2254  const CChainParams &chainparams = Params();
2255  if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) {
2256  LogPrintf("%s: failed to flush state (%s)\n", __func__,
2257  state.ToString());
2258  }
2259 }
2260 
2262  BlockValidationState state;
2263  fCheckForPruning = true;
2264  const CChainParams &chainparams = Params();
2265  if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::NONE)) {
2266  LogPrintf("%s: failed to flush state (%s)\n", __func__,
2267  state.ToString());
2268  }
2269 }
2270 
2272 static void UpdateTip(const CChainParams &params, CBlockIndex *pindexNew)
2273  EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
2274  // New best block
2275  g_mempool.AddTransactionsUpdated(1);
2276 
2277  {
2278  LOCK(g_best_block_mutex);
2279  g_best_block = pindexNew->GetBlockHash();
2280  g_best_block_cv.notify_all();
2281  }
2282 
2283  LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%ld "
2284  "date='%s' progress=%f cache=%.1fMiB(%utxo)\n",
2285  __func__, pindexNew->GetBlockHash().ToString(),
2286  pindexNew->nHeight, pindexNew->nVersion,
2287  log(pindexNew->nChainWork.getdouble()) / log(2.0),
2288  pindexNew->GetChainTxCount(),
2289  FormatISO8601DateTime(pindexNew->GetBlockTime()),
2290  GuessVerificationProgress(params.TxData(), pindexNew),
2292  (1.0 / (1 << 20)),
2294 }
2295 
2308  BlockValidationState &state,
2309  DisconnectedBlockTransactions *disconnectpool) {
2310  AssertLockHeld(cs_main);
2311  CBlockIndex *pindexDelete = m_chain.Tip();
2312  const Consensus::Params &consensusParams = params.GetConsensus();
2313 
2314  assert(pindexDelete);
2315 
2316  // Read block from disk.
2317  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
2318  CBlock &block = *pblock;
2319  if (!ReadBlockFromDisk(block, pindexDelete, consensusParams)) {
2320  return error("DisconnectTip(): Failed to read block");
2321  }
2322 
2323  // Apply the block atomically to the chain state.
2324  int64_t nStart = GetTimeMicros();
2325  {
2326  CCoinsViewCache view(&CoinsTip());
2327  assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
2328  if (DisconnectBlock(block, pindexDelete, view) !=
2330  return error("DisconnectTip(): DisconnectBlock %s failed",
2331  pindexDelete->GetBlockHash().ToString());
2332  }
2333 
2334  bool flushed = view.Flush();
2335  assert(flushed);
2336  }
2337 
2338  LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n",
2339  (GetTimeMicros() - nStart) * MILLI);
2340 
2341  // Write the chain state to disk, if necessary.
2342  if (!FlushStateToDisk(params, state, FlushStateMode::IF_NEEDED)) {
2343  return false;
2344  }
2345 
2346  // If this block is deactivating a fork, we move all mempool transactions
2347  // in front of disconnectpool for reprocessing in a future
2348  // updateMempoolForReorg call
2349  if (pindexDelete->pprev != nullptr &&
2350  GetNextBlockScriptFlags(consensusParams, pindexDelete) !=
2351  GetNextBlockScriptFlags(consensusParams, pindexDelete->pprev)) {
2353  "Disconnecting mempool due to rewind of upgrade block\n");
2354  if (disconnectpool) {
2355  disconnectpool->importMempool(g_mempool);
2356  }
2357  g_mempool.clear();
2358  }
2359 
2360  if (disconnectpool) {
2361  disconnectpool->addForBlock(block.vtx, g_mempool);
2362  }
2363 
2364  // If the tip is finalized, then undo it.
2365  if (m_finalizedBlockIndex == pindexDelete) {
2366  m_finalizedBlockIndex = pindexDelete->pprev;
2367  }
2368 
2369  m_chain.SetTip(pindexDelete->pprev);
2370 
2371  // Update ::ChainActive() and related variables.
2372  UpdateTip(params, pindexDelete->pprev);
2373  // Let wallets know transactions went from 1-confirmed to
2374  // 0-confirmed or conflicted:
2375  GetMainSignals().BlockDisconnected(pblock, pindexDelete);
2376  return true;
2377 }
2378 
2379 static int64_t nTimeReadFromDisk = 0;
2380 static int64_t nTimeConnectTotal = 0;
2381 static int64_t nTimeFlush = 0;
2382 static int64_t nTimeChainState = 0;
2383 static int64_t nTimePostConnect = 0;
2384 
2386  CBlockIndex *pindex = nullptr;
2387  std::shared_ptr<const CBlock> pblock;
2389 };
2390 
2399 private:
2400  std::vector<PerBlockConnectTrace> blocksConnected;
2401 
2402 public:
2403  explicit ConnectTrace() : blocksConnected(1) {}
2404 
2406  std::shared_ptr<const CBlock> pblock) {
2407  assert(!blocksConnected.back().pindex);
2408  assert(pindex);
2409  assert(pblock);
2410  blocksConnected.back().pindex = pindex;
2411  blocksConnected.back().pblock = std::move(pblock);
2412  blocksConnected.emplace_back();
2413  }
2414 
2415  std::vector<PerBlockConnectTrace> &GetBlocksConnected() {
2416  // We always keep one extra block at the end of our list because blocks
2417  // are added after all the conflicted transactions have been filled in.
2418  // Thus, the last entry should always be an empty one waiting for the
2419  // transactions from the next block. We pop the last entry here to make
2420  // sure the list we return is sane.
2421  assert(!blocksConnected.back().pindex);
2422  blocksConnected.pop_back();
2423  return blocksConnected;
2424  }
2425 };
2426 
2428  const CBlockIndex *pindex) {
2429  AssertLockHeld(cs_main);
2430  if (pindex->nStatus.isInvalid()) {
2431  // We try to finalize an invalid block.
2432  LogPrintf("ERROR: %s: Trying to finalize invalid block %s\n", __func__,
2433  pindex->GetBlockHash().ToString());
2435  "finalize-invalid-block");
2436  }
2437 
2438  // Check that the request is consistent with current finalization.
2439  if (m_finalizedBlockIndex &&
2440  !AreOnTheSameFork(pindex, m_finalizedBlockIndex)) {
2441  LogPrintf("ERROR: %s: Trying to finalize block %s which conflicts with "
2442  "already finalized block\n",
2443  __func__, pindex->GetBlockHash().ToString());
2445  "bad-fork-prior-finalized");
2446  }
2447 
2448  if (IsBlockFinalized(pindex)) {
2449  // The block is already finalized.
2450  return true;
2451  }
2452 
2453  // We have a new block to finalize.
2454  m_finalizedBlockIndex = pindex;
2455  return true;
2456 }
2457 
2459  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
2460  AssertLockHeld(cs_main);
2461 
2462  const int32_t maxreorgdepth =
2463  gArgs.GetArg("-maxreorgdepth", DEFAULT_MAX_REORG_DEPTH);
2464 
2465  const int64_t finalizationdelay =
2466  gArgs.GetArg("-finalizationdelay", DEFAULT_MIN_FINALIZATION_DELAY);
2467 
2468  // Find our candidate.
2469  // If maxreorgdepth is < 0 pindex will be null and auto finalization
2470  // disabled
2471  const CBlockIndex *pindex =
2472  pindexNew->GetAncestor(pindexNew->nHeight - maxreorgdepth);
2473 
2474  int64_t now = GetTime();
2475 
2476  // If the finalization delay is not expired since the startup time,
2477  // finalization should be avoided. Header receive time is not saved to disk
2478  // and so cannot be anterior to startup time.
2479  if (now < (GetStartupTime() + finalizationdelay)) {
2480  return nullptr;
2481  }
2482 
2483  // While our candidate is not eligible (finalization delay not expired), try
2484  // the previous one.
2485  while (pindex && (pindex != ::ChainstateActive().GetFinalizedBlock())) {
2486  // Check that the block to finalize is known for a long enough time.
2487  // This test will ensure that an attacker could not cause a block to
2488  // finalize by forking the chain with a depth > maxreorgdepth.
2489  // If the block is loaded from disk, header receive time is 0 and the
2490  // block will be finalized. This is safe because the delay since the
2491  // node startup is already expired.
2492  auto headerReceivedTime = pindex->GetHeaderReceivedTime();
2493 
2494  // If finalization delay is <= 0, finalization always occurs immediately
2495  if (now >= (headerReceivedTime + finalizationdelay)) {
2496  return pindex;
2497  }
2498 
2499  pindex = pindex->pprev;
2500  }
2501 
2502  return nullptr;
2503 }
2504 
2514  CBlockIndex *pindexNew,
2515  const std::shared_ptr<const CBlock> &pblock,
2516  ConnectTrace &connectTrace,
2517  DisconnectedBlockTransactions &disconnectpool) {
2518  AssertLockHeld(cs_main);
2519  AssertLockHeld(g_mempool.cs);
2520 
2521  const CChainParams &params = config.GetChainParams();
2522  const Consensus::Params &consensusParams = params.GetConsensus();
2523 
2524  assert(pindexNew->pprev == m_chain.Tip());
2525  // Read block from disk.
2526  int64_t nTime1 = GetTimeMicros();
2527  std::shared_ptr<const CBlock> pthisBlock;
2528  if (!pblock) {
2529  std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
2530  if (!ReadBlockFromDisk(*pblockNew, pindexNew, consensusParams)) {
2531  return AbortNode(state, "Failed to read block");
2532  }
2533  pthisBlock = pblockNew;
2534  } else {
2535  pthisBlock = pblock;
2536  }
2537 
2538  const CBlock &blockConnecting = *pthisBlock;
2539 
2540  // Apply the block atomically to the chain state.
2541  int64_t nTime2 = GetTimeMicros();
2542  nTimeReadFromDisk += nTime2 - nTime1;
2543  int64_t nTime3;
2544  LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n",
2545  (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO);
2546  {
2547  CCoinsViewCache view(&CoinsTip());
2548  bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, params,
2549  BlockValidationOptions(config));
2550  GetMainSignals().BlockChecked(blockConnecting, state);
2551  if (!rv) {
2552  if (state.IsInvalid()) {
2553  InvalidBlockFound(pindexNew, state);
2554  }
2555 
2556  return error("%s: ConnectBlock %s failed, %s", __func__,
2557  pindexNew->GetBlockHash().ToString(),
2558  state.ToString());
2559  }
2560 
2561  // Update the finalized block.
2562  const CBlockIndex *pindexToFinalize = FindBlockToFinalize(pindexNew);
2563  if (pindexToFinalize && !MarkBlockAsFinal(state, pindexToFinalize)) {
2564  return error("ConnectTip(): MarkBlockAsFinal %s failed (%s)",
2565  pindexNew->GetBlockHash().ToString(),
2566  state.ToString());
2567  }
2568 
2569  nTime3 = GetTimeMicros();
2570  nTimeConnectTotal += nTime3 - nTime2;
2571  assert(nBlocksTotal > 0);
2573  " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n",
2574  (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO,
2575  nTimeConnectTotal * MILLI / nBlocksTotal);
2576  bool flushed = view.Flush();
2577  assert(flushed);
2578  }
2579 
2580  int64_t nTime4 = GetTimeMicros();
2581  nTimeFlush += nTime4 - nTime3;
2582  LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n",
2583  (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO,
2584  nTimeFlush * MILLI / nBlocksTotal);
2585 
2586  // Write the chain state to disk, if necessary.
2587  if (!FlushStateToDisk(config.GetChainParams(), state,
2589  return false;
2590  }
2591 
2592  int64_t nTime5 = GetTimeMicros();
2593  nTimeChainState += nTime5 - nTime4;
2595  " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n",
2596  (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO,
2597  nTimeChainState * MILLI / nBlocksTotal);
2598 
2599  // Remove conflicting transactions from the mempool.;
2600  g_mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
2601  disconnectpool.removeForBlock(blockConnecting.vtx);
2602 
2603  // If this block is activating a fork, we move all mempool transactions
2604  // in front of disconnectpool for reprocessing in a future
2605  // updateMempoolForReorg call
2606  if (pindexNew->pprev != nullptr &&
2607  GetNextBlockScriptFlags(consensusParams, pindexNew) !=
2608  GetNextBlockScriptFlags(consensusParams, pindexNew->pprev)) {
2610  "Disconnecting mempool due to acceptance of upgrade block\n");
2611  disconnectpool.importMempool(g_mempool);
2612  }
2613 
2614  // Update m_chain & related variables.
2615  m_chain.SetTip(pindexNew);
2616  UpdateTip(params, pindexNew);
2617 
2618  int64_t nTime6 = GetTimeMicros();
2619  nTimePostConnect += nTime6 - nTime5;
2620  nTimeTotal += nTime6 - nTime1;
2622  " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n",
2623  (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO,
2624  nTimePostConnect * MILLI / nBlocksTotal);
2625  LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n",
2626  (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO,
2627  nTimeTotal * MILLI / nBlocksTotal);
2628 
2629  connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
2630  return true;
2631 }
2632 
2638  AssertLockHeld(cs_main);
2639  do {
2640  CBlockIndex *pindexNew = nullptr;
2641 
2642  // Find the best candidate header.
2643  {
2644  std::set<CBlockIndex *, CBlockIndexWorkComparator>::reverse_iterator
2645  it = setBlockIndexCandidates.rbegin();
2646  if (it == setBlockIndexCandidates.rend()) {
2647  return nullptr;
2648  }
2649  pindexNew = *it;
2650  }
2651 
2652  // If this block will cause a finalized block to be reorged, then we
2653  // mark it as invalid.
2654  if (m_finalizedBlockIndex &&
2655  !AreOnTheSameFork(pindexNew, m_finalizedBlockIndex)) {
2656  LogPrintf("Mark block %s invalid because it forks prior to the "
2657  "finalization point %d.\n",
2658  pindexNew->GetBlockHash().ToString(),
2659  m_finalizedBlockIndex->nHeight);
2660  pindexNew->nStatus = pindexNew->nStatus.withFailed();
2661  InvalidChainFound(pindexNew);
2662  }
2663 
2664  const bool fAvalancheEnabled =
2665  gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED);
2666  const bool fAutoUnpark =
2667  gArgs.GetBoolArg("-automaticunparking", !fAvalancheEnabled);
2668 
2669  const CBlockIndex *pindexFork = m_chain.FindFork(pindexNew);
2670 
2671  // Check whether all blocks on the path between the currently active
2672  // chain and the candidate are valid. Just going until the active chain
2673  // is an optimization, as we know all blocks in it are valid already.
2674  CBlockIndex *pindexTest = pindexNew;
2675  bool hasValidAncestor = true;
2676  while (hasValidAncestor && pindexTest && pindexTest != pindexFork) {
2677  assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
2678 
2679  // If this is a parked chain, but it has enough PoW, clear the park
2680  // state.
2681  bool fParkedChain = pindexTest->nStatus.isOnParkedChain();
2682  if (fAutoUnpark && fParkedChain) {
2683  const CBlockIndex *pindexTip = m_chain.Tip();
2684 
2685  // During initialization, pindexTip and/or pindexFork may be
2686  // null. In this case, we just ignore the fact that the chain is
2687  // parked.
2688  if (!pindexTip || !pindexFork) {
2689  UnparkBlock(pindexTest);
2690  continue;
2691  }
2692 
2693  // A parked chain can be unparked if it has twice as much PoW
2694  // accumulated as the main chain has since the fork block.
2695  CBlockIndex const *pindexExtraPow = pindexTip;
2696  arith_uint256 requiredWork = pindexTip->nChainWork;
2697  switch (pindexTip->nHeight - pindexFork->nHeight) {
2698  // Limit the penality for depth 1, 2 and 3 to half a block
2699  // worth of work to ensure we don't fork accidentally.
2700  case 3:
2701  case 2:
2702  pindexExtraPow = pindexExtraPow->pprev;
2703  // FALLTHROUGH
2704  case 1: {
2705  const arith_uint256 deltaWork =
2706  pindexExtraPow->nChainWork - pindexFork->nChainWork;
2707  requiredWork += (deltaWork >> 1);
2708  break;
2709  }
2710  default:
2711  requiredWork +=
2712  pindexExtraPow->nChainWork - pindexFork->nChainWork;
2713  break;
2714  }
2715 
2716  if (pindexNew->nChainWork > requiredWork) {
2717  // We have enough, clear the parked state.
2718  LogPrintf("Unpark chain up to block %s as it has "
2719  "accumulated enough PoW.\n",
2720  pindexNew->GetBlockHash().ToString());
2721  fParkedChain = false;
2722  UnparkBlock(pindexTest);
2723  }
2724  }
2725 
2726  // Pruned nodes may have entries in setBlockIndexCandidates for
2727  // which block files have been deleted. Remove those as candidates
2728  // for the most work chain if we come across them; we can't switch
2729  // to a chain unless we have all the non-active-chain parent blocks.
2730  bool fInvalidChain = pindexTest->nStatus.isInvalid();
2731  bool fMissingData = !pindexTest->nStatus.hasData();
2732  if (!(fInvalidChain || fParkedChain || fMissingData)) {
2733  // The current block is acceptable, move to the parent, up to
2734  // the fork point.
2735  pindexTest = pindexTest->pprev;
2736  continue;
2737  }
2738 
2739  // Candidate chain is not usable (either invalid or parked or
2740  // missing data)
2741  hasValidAncestor = false;
2742  setBlockIndexCandidates.erase(pindexTest);
2743 
2744  if (fInvalidChain &&
2745  (pindexBestInvalid == nullptr ||
2746  pindexNew->nChainWork > pindexBestInvalid->nChainWork)) {
2747  pindexBestInvalid = pindexNew;
2748  }
2749 
2750  if (fParkedChain &&
2751  (pindexBestParked == nullptr ||
2752  pindexNew->nChainWork > pindexBestParked->nChainWork)) {
2753  pindexBestParked = pindexNew;
2754  }
2755 
2756  LogPrintf("Considered switching to better tip %s but that chain "
2757  "contains a%s%s%s block.\n",
2758  pindexNew->GetBlockHash().ToString(),
2759  fInvalidChain ? "n invalid" : "",
2760  fParkedChain ? " parked" : "",
2761  fMissingData ? " missing-data" : "");
2762 
2763  CBlockIndex *pindexFailed = pindexNew;
2764  // Remove the entire chain from the set.
2765  while (pindexTest != pindexFailed) {
2766  if (fInvalidChain || fParkedChain) {
2767  pindexFailed->nStatus =
2768  pindexFailed->nStatus.withFailedParent(fInvalidChain)
2769  .withParkedParent(fParkedChain);
2770  } else if (fMissingData) {
2771  // If we're missing data, then add back to
2772  // m_blocks_unlinked, so that if the block arrives in the
2773  // future we can try adding to setBlockIndexCandidates
2774  // again.
2776  std::make_pair(pindexFailed->pprev, pindexFailed));
2777  }
2778  setBlockIndexCandidates.erase(pindexFailed);
2779  pindexFailed = pindexFailed->pprev;
2780  }
2781 
2782  if (fInvalidChain || fParkedChain) {
2783  // We discovered a new chain tip that is either parked or
2784  // invalid, we may want to warn.
2786  }
2787  }
2788 
2789  if (fAvalancheEnabled && g_avalanche) {
2790  g_avalanche->addBlockToReconcile(pindexNew);
2791  }
2792 
2793  // We found a candidate that has valid ancestors. This is our guy.
2794  if (hasValidAncestor) {
2795  return pindexNew;
2796  }
2797  } while (true);
2798 }
2799 
2805  // Note that we can't delete the current block itself, as we may need to
2806  // return to it later in case a reorganization to a better block fails.
2807  auto it = setBlockIndexCandidates.begin();
2808  while (it != setBlockIndexCandidates.end() &&
2809  setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
2810  setBlockIndexCandidates.erase(it++);
2811  }
2812 
2813  // Either the current tip or a successor of it we're working towards is left
2814  // in setBlockIndexCandidates.
2815  assert(!setBlockIndexCandidates.empty());
2816 }
2817 
2826  const Config &config, BlockValidationState &state,
2827  CBlockIndex *pindexMostWork, const std::shared_ptr<const CBlock> &pblock,
2828  bool &fInvalidFound, ConnectTrace &connectTrace) {
2829  AssertLockHeld(cs_main);
2830 
2831  const CBlockIndex *pindexOldTip = m_chain.Tip();
2832  const CBlockIndex *pindexFork = m_chain.FindFork(pindexMostWork);
2833 
2834  // Disconnect active blocks which are no longer in the best chain.
2835  bool fBlocksDisconnected = false;
2836  DisconnectedBlockTransactions disconnectpool;
2837  while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
2838  if (!DisconnectTip(config.GetChainParams(), state, &disconnectpool)) {
2839  // This is likely a fatal error, but keep the mempool consistent,
2840  // just in case. Only remove from the mempool in this case.
2841  disconnectpool.updateMempoolForReorg(config, false, g_mempool);
2842 
2843  // If we're unable to disconnect a block during normal operation,
2844  // then that is a failure of our local system -- we should abort
2845  // rather than stay on a less work chain.
2846  AbortNode(state,
2847  "Failed to disconnect block; see debug.log for details");
2848  return false;
2849  }
2850 
2851  fBlocksDisconnected = true;
2852  }
2853 
2854  // Build list of new blocks to connect.
2855  std::vector<CBlockIndex *> vpindexToConnect;
2856  bool fContinue = true;
2857  int nHeight = pindexFork ? pindexFork->nHeight : -1;
2858  while (fContinue && nHeight != pindexMostWork->nHeight) {
2859  // Don't iterate the entire list of potential improvements toward the
2860  // best tip, as we likely only need a few blocks along the way.
2861  int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
2862  vpindexToConnect.clear();
2863  vpindexToConnect.reserve(nTargetHeight - nHeight);
2864  CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
2865  while (pindexIter && pindexIter->nHeight != nHeight) {
2866  vpindexToConnect.push_back(pindexIter);
2867  pindexIter = pindexIter->pprev;
2868  }
2869 
2870  nHeight = nTargetHeight;
2871 
2872  // Connect new blocks.
2873  for (CBlockIndex *pindexConnect : reverse_iterate(vpindexToConnect)) {
2874  if (!ConnectTip(config, state, pindexConnect,
2875  pindexConnect == pindexMostWork
2876  ? pblock
2877  : std::shared_ptr<const CBlock>(),
2878  connectTrace, disconnectpool)) {
2879  if (state.IsInvalid()) {
2880  // The block violates a consensus rule.
2881  if (state.GetResult() !=
2883  InvalidChainFound(vpindexToConnect.back());
2884  }
2885  state = BlockValidationState();
2886  fInvalidFound = true;
2887  fContinue = false;
2888  break;
2889  }
2890 
2891  // A system error occurred (disk space, database error, ...).
2892  // Make the mempool consistent with the current tip, just in
2893  // case any observers try to use it before shutdown.
2894  disconnectpool.updateMempoolForReorg(config, false, g_mempool);
2895  return false;
2896  } else {
2898  if (!pindexOldTip ||
2899  m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
2900  // We're in a better position than we were. Return
2901  // temporarily to release the lock.
2902  fContinue = false;
2903  break;
2904  }
2905  }
2906  }
2907  }
2908 
2909  if (fBlocksDisconnected || !disconnectpool.isEmpty()) {
2910  // If any blocks were disconnected, we need to update the mempool even
2911  // if disconnectpool is empty. The disconnectpool may also be non-empty
2912  // if the mempool was imported due to new validation rules being in
2913  // effect.
2914  LogPrint(BCLog::MEMPOOL, "Updating mempool due to reorganization or "
2915  "rules upgrade/downgrade\n");
2916  disconnectpool.updateMempoolForReorg(config, true, g_mempool);
2917  }
2918 
2919  g_mempool.check(&CoinsTip());
2920 
2921  // Callbacks/notifications for a new best chain.
2922  if (fInvalidFound) {
2923  CheckForkWarningConditionsOnNewFork(pindexMostWork);
2924  } else {
2926  }
2927 
2928  return true;
2929 }
2930 
2932  if (!init) {
2934  }
2935  if (::fReindex) {
2937  }
2939 }
2940 
2941 static bool NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) {
2942  bool fNotify = false;
2943  bool fInitialBlockDownload = false;
2944  static CBlockIndex *pindexHeaderOld = nullptr;
2945  CBlockIndex *pindexHeader = nullptr;
2946  {
2947  LOCK(cs_main);
2948  pindexHeader = pindexBestHeader;
2949 
2950  if (pindexHeader != pindexHeaderOld) {
2951  fNotify = true;
2952  fInitialBlockDownload =
2954  pindexHeaderOld = pindexHeader;
2955  }
2956  }
2957 
2958  // Send block tip changed notifications without cs_main
2959  if (fNotify) {
2960  uiInterface.NotifyHeaderTip(
2961  GetSynchronizationState(fInitialBlockDownload), pindexHeader);
2962  }
2963  return fNotify;
2964 }
2965 
2967  AssertLockNotHeld(cs_main);
2968 
2969  if (GetMainSignals().CallbacksPending() > 10) {
2971  }
2972 }
2973 
2975  BlockValidationState &state,
2976  std::shared_ptr<const CBlock> pblock) {
2977  // Note that while we're often called here from ProcessNewBlock, this is
2978  // far from a guarantee. Things in the P2P/RPC will often end up calling
2979  // us in the middle of ProcessNewBlock - do not assume pblock is set
2980  // sanely for performance or correctness!
2981  AssertLockNotHeld(cs_main);
2982 
2983  const CChainParams &params = config.GetChainParams();
2984 
2985  // ABC maintains a fair degree of expensive-to-calculate internal state
2986  // because this function periodically releases cs_main so that it does not
2987  // lock up other threads for too long during large connects - and to allow
2988  // for e.g. the callback queue to drain we use m_cs_chainstate to enforce
2989  // mutual exclusion so that only one caller may execute this function at a
2990  // time
2992 
2993  CBlockIndex *pindexMostWork = nullptr;
2994  CBlockIndex *pindexNewTip = nullptr;
2995  int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT);
2996  do {
2997  boost::this_thread::interruption_point();
2998 
2999  // Block until the validation queue drains. This should largely
3000  // never happen in normal operation, however may happen during
3001  // reindex, causing memory blowup if we run too far ahead.
3002  // Note that if a validationinterface callback ends up calling
3003  // ActivateBestChain this may lead to a deadlock! We should
3004  // probably have a DEBUG_LOCKORDER test for this in the future.
3006 
3007  {
3008  // Lock transaction pool for at least as long as it takes for
3009  // connectTrace to be consumed
3010  LOCK2(cs_main, ::g_mempool.cs);
3011  CBlockIndex *starting_tip = m_chain.Tip();
3012  bool blocks_connected = false;
3013  do {
3014  // We absolutely may not unlock cs_main until we've made forward
3015  // progress (with the exception of shutdown due to hardware
3016  // issues, low disk space, etc).
3017 
3018  // Destructed before cs_main is unlocked
3019  ConnectTrace connectTrace;
3020 
3021  if (pindexMostWork == nullptr) {
3022  pindexMostWork = FindMostWorkChain();
3023  }
3024 
3025  // Whether we have anything to do at all.
3026  if (pindexMostWork == nullptr ||
3027  pindexMostWork == m_chain.Tip()) {
3028  break;
3029  }
3030 
3031  bool fInvalidFound = false;
3032  std::shared_ptr<const CBlock> nullBlockPtr;
3033  if (!ActivateBestChainStep(
3034  config, state, pindexMostWork,
3035  pblock && pblock->GetHash() ==
3036  pindexMostWork->GetBlockHash()
3037  ? pblock
3038  : nullBlockPtr,
3039  fInvalidFound, connectTrace)) {
3040  // A system error occurred
3041  return false;
3042  }
3043  blocks_connected = true;
3044 
3045  if (fInvalidFound) {
3046  // Wipe cache, we may need another branch now.
3047  pindexMostWork = nullptr;
3048  }
3049 
3050  pindexNewTip = m_chain.Tip();
3051  for (const PerBlockConnectTrace &trace :
3052  connectTrace.GetBlocksConnected()) {
3053  assert(trace.pblock && trace.pindex);
3054  GetMainSignals().BlockConnected(trace.pblock, trace.pindex);
3055  }
3056  } while (!m_chain.Tip() ||
3057  (starting_tip && CBlockIndexWorkComparator()(
3058  m_chain.Tip(), starting_tip)));
3059 
3060  // Check the index once we're done with the above loop, since
3061  // we're going to release cs_main soon. If the index is in a bad
3062  // state now, then it's better to know immediately rather than
3063  // randomly have it cause a problem in a race.
3064  CheckBlockIndex(params.GetConsensus());
3065 
3066  if (!blocks_connected) {
3067  return true;
3068  }
3069 
3070  const CBlockIndex *pindexFork = m_chain.FindFork(starting_tip);
3071  bool fInitialDownload = IsInitialBlockDownload();
3072 
3073  // Notify external listeners about the new tip.
3074  // Enqueue while holding cs_main to ensure that UpdatedBlockTip is
3075  // called in the order in which blocks are connected
3076  if (pindexFork != pindexNewTip) {
3077  // Notify ValidationInterface subscribers
3078  GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork,
3079  fInitialDownload);
3080 
3081  // Always notify the UI if a new block tip was connected
3082  uiInterface.NotifyBlockTip(
3083  GetSynchronizationState(fInitialDownload), pindexNewTip);
3084  }
3085  }
3086  // When we reach this point, we switched to a new tip (stored in
3087  // pindexNewTip).
3088 
3089  if (nStopAtHeight && pindexNewTip &&
3090  pindexNewTip->nHeight >= nStopAtHeight) {
3091  StartShutdown();
3092  }
3093 
3094  // We check shutdown only after giving ActivateBestChainStep a chance to
3095  // run once so that we never shutdown before connecting the genesis
3096  // block during LoadChainTip(). Previously this caused an assert()
3097  // failure during shutdown in such cases as the UTXO DB flushing checks
3098  // that the best block hash is non-null.
3099  if (ShutdownRequested()) {
3100  break;
3101  }
3102  } while (pindexNewTip != pindexMostWork);
3103 
3104  // Write changes periodically to disk, after relay.
3105  if (!FlushStateToDisk(params, state, FlushStateMode::PERIODIC)) {
3106  return false;
3107  }
3108 
3109  return true;
3110 }
3111 
3112 bool ActivateBestChain(const Config &config, BlockValidationState &state,
3113  std::shared_ptr<const CBlock> pblock) {
3115  std::move(pblock));
3116 }
3117 
3119  BlockValidationState &state,
3120  CBlockIndex *pindex) {
3121  {
3122  LOCK(cs_main);
3123  if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
3124  // Nothing to do, this block is not at the tip.
3125  return true;
3126  }
3127 
3129  // The chain has been extended since the last call, reset the
3130  // counter.
3132  }
3133 
3135  setBlockIndexCandidates.erase(pindex);
3137  if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
3138  // We can't keep reducing the counter if somebody really wants to
3139  // call preciousblock 2**31-1 times on the same set of tips...
3141  }
3142 
3143  // In case this was parked, unpark it.
3144  UnparkBlock(pindex);
3145 
3146  // Make sure it is added to the candidate list if appropriate.
3147  if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
3148  pindex->HaveTxsDownloaded()) {
3149  setBlockIndexCandidates.insert(pindex);
3151  }
3152  }
3153 
3154  return ActivateBestChain(config, state);
3155 }
3156 
3157 bool PreciousBlock(const Config &config, BlockValidationState &state,
3158  CBlockIndex *pindex) {
3159  return ::ChainstateActive().PreciousBlock(config, state, pindex);
3160 }
3161 
3163  CBlockIndex *pindex, bool invalidate) {
3164  CBlockIndex *to_mark_failed_or_parked = pindex;
3165  bool pindex_was_in_chain = false;
3166  int disconnected = 0;
3167  const CChainParams &chainparams = config.GetChainParams();
3168 
3169  // We do not allow ActivateBestChain() to run while UnwindBlock() is
3170  // running, as that could cause the tip to change while we disconnect
3171  // blocks. (Note for backport of Core PR16849: we acquire
3172  // LOCK(m_cs_chainstate) in the Park, Invalidate and FinalizeBlock functions
3173  // due to differences in our code)
3175 
3176  // We'll be acquiring and releasing cs_main below, to allow the validation
3177  // callbacks to run. However, we should keep the block index in a
3178  // consistent state as we disconnect blocks -- in particular we need to
3179  // add equal-work blocks to setBlockIndexCandidates as we disconnect.
3180  // To avoid walking the block index repeatedly in search of candidates,
3181  // build a map once so that we can look up candidate blocks by chain
3182  // work as we go.
3183  std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
3184 
3185  {
3186  LOCK(cs_main);
3187  for (const auto &entry : m_blockman.m_block_index) {
3188  CBlockIndex *candidate = entry.second;
3189  // We don't need to put anything in our active chain into the
3190  // multimap, because those candidates will be found and considered
3191  // as we disconnect.
3192  // Instead, consider only non-active-chain blocks that have at
3193  // least as much work as where we expect the new tip to end up.
3194  if (!m_chain.Contains(candidate) &&
3195  !CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
3196  candidate->IsValid(BlockValidity::TRANSACTIONS) &&
3197  candidate->HaveTxsDownloaded()) {
3198  candidate_blocks_by_work.insert(
3199  std::make_pair(candidate->nChainWork, candidate));
3200  }
3201  }
3202  }
3203 
3204  // Disconnect (descendants of) pindex, and mark them invalid.
3205  while (true) {
3206  if (ShutdownRequested()) {
3207  break;
3208  }
3209 
3210  // Make sure the queue of validation callbacks doesn't grow unboundedly.
3212 
3213  LOCK(cs_main);
3214  // Lock for as long as disconnectpool is in scope to make sure
3215  // UpdateMempoolForReorg is called after DisconnectTip without unlocking
3216  // in between
3217  LOCK(::g_mempool.cs);
3218 
3219  if (!m_chain.Contains(pindex)) {
3220  break;
3221  }
3222 
3223  pindex_was_in_chain = true;
3224  CBlockIndex *invalid_walk_tip = m_chain.Tip();
3225 
3226  // ActivateBestChain considers blocks already in m_chain
3227  // unconditionally valid already, so force disconnect away from it.
3228 
3229  DisconnectedBlockTransactions disconnectpool;
3230 
3231  bool ret = DisconnectTip(chainparams, state, &disconnectpool);
3232 
3233  // DisconnectTip will add transactions to disconnectpool.
3234  // Adjust the mempool to be consistent with the new tip, adding
3235  // transactions back to the mempool if disconnecting was successful,
3236  // and we're not doing a very deep invalidation (in which case
3237  // keeping the mempool up to date is probably futile anyway).
3238  disconnectpool.updateMempoolForReorg(
3239  config, /* fAddToMempool = */ (++disconnected <= 10) && ret,
3240  ::g_mempool);
3241 
3242  if (!ret) {
3243  return false;
3244  }
3245 
3246  assert(invalid_walk_tip->pprev == m_chain.Tip());
3247 
3248  // We immediately mark the disconnected blocks as invalid.
3249  // This prevents a case where pruned nodes may fail to invalidateblock
3250  // and be left unable to start as they have no tip candidates (as there
3251  // are no blocks that meet the "have data and are not invalid per
3252  // nStatus" criteria for inclusion in setBlockIndexCandidates).
3253 
3254  invalid_walk_tip->nStatus =
3255  invalidate ? invalid_walk_tip->nStatus.withFailed()
3256  : invalid_walk_tip->nStatus.withParked();
3257 
3258  setDirtyBlockIndex.insert(invalid_walk_tip);
3259  setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
3260 
3261  if (invalid_walk_tip == to_mark_failed_or_parked->pprev &&
3262  (invalidate ? to_mark_failed_or_parked->nStatus.hasFailed()
3263  : to_mark_failed_or_parked->nStatus.isParked())) {
3264  // We only want to mark the last disconnected block as
3265  // Failed (or Parked); its children need to be FailedParent (or
3266  // ParkedParent) instead.
3267  to_mark_failed_or_parked->nStatus =
3268  (invalidate
3269  ? to_mark_failed_or_parked->nStatus.withFailed(false)
3270  .withFailedParent()
3271  : to_mark_failed_or_parked->nStatus.withParked(false)
3272  .withParkedParent());
3273 
3274  setDirtyBlockIndex.insert(to_mark_failed_or_parked);
3275  }
3276 
3277  // Add any equal or more work headers to setBlockIndexCandidates
3278  auto candidate_it = candidate_blocks_by_work.lower_bound(
3279  invalid_walk_tip->pprev->nChainWork);
3280  while (candidate_it != candidate_blocks_by_work.end()) {
3281  if (!CBlockIndexWorkComparator()(candidate_it->second,
3282  invalid_walk_tip->pprev)) {
3283  setBlockIndexCandidates.insert(candidate_it->second);
3284  candidate_it = candidate_blocks_by_work.erase(candidate_it);
3285  } else {
3286  ++candidate_it;
3287  }
3288  }
3289 
3290  // Track the last disconnected block, so we can correct its
3291  // FailedParent (or ParkedParent) status in future iterations, or, if
3292  // it's the last one, call InvalidChainFound on it.
3293  to_mark_failed_or_parked = invalid_walk_tip;
3294  }
3295 
3296  CheckBlockIndex(chainparams.GetConsensus());
3297 
3298  {
3299  LOCK(cs_main);
3300  if (m_chain.Contains(to_mark_failed_or_parked)) {
3301  // If the to-be-marked invalid block is in the active chain,
3302  // something is interfering and we can't proceed.
3303  return false;
3304  }
3305 
3306  // Mark pindex (or the last disconnected block) as invalid (or parked),
3307  // even when it never was in the main chain.
3308  to_mark_failed_or_parked->nStatus =
3309  invalidate ? to_mark_failed_or_parked->nStatus.withFailed()
3310  : to_mark_failed_or_parked->nStatus.withParked();
3311  setDirtyBlockIndex.insert(to_mark_failed_or_parked);
3312  if (invalidate) {
3313  m_blockman.m_failed_blocks.insert(to_mark_failed_or_parked);
3314  }
3315 
3316  // If any new blocks somehow arrived while we were disconnecting
3317  // (above), then the pre-calculation of what should go into
3318  // setBlockIndexCandidates may have missed entries. This would
3319  // technically be an inconsistency in the block index, but if we clean
3320  // it up here, this should be an essentially unobservable error.
3321  // Loop back over all block index entries and add any missing entries
3322  // to setBlockIndexCandidates.
3323  for (const std::pair<const BlockHash, CBlockIndex *> &it :
3324  m_blockman.m_block_index) {
3325  CBlockIndex *i = it.second;
3327  i->HaveTxsDownloaded() &&
3328  !setBlockIndexCandidates.value_comp()(i, m_chain.Tip())) {
3329  setBlockIndexCandidates.insert(i);
3330  }
3331  }
3332 
3333  if (invalidate) {
3334  InvalidChainFound(to_mark_failed_or_parked);
3335  }
3336  }
3337 
3338  // Only notify about a new block tip if the active chain was modified.
3339  if (pindex_was_in_chain) {
3340  uiInterface.NotifyBlockTip(
3342  to_mark_failed_or_parked->pprev);
3343  }
3344  return true;
3345 }
3346 
3348  BlockValidationState &state,
3349  CBlockIndex *pindex) {
3351  // See 'Note for backport of Core PR16849' in CChainState::UnwindBlock
3353 
3354  return UnwindBlock(config, state, pindex, true);
3355 }
3356 
3358  CBlockIndex *pindex) {
3360  // See 'Note for backport of Core PR16849' in CChainState::UnwindBlock
3362 
3363  return UnwindBlock(config, state, pindex, false);
3364 }
3365 
3367  BlockValidationState &state,
3368  CBlockIndex *pindex) {
3370  // See 'Note for backport of Core PR16849' in CChainState::UnwindBlock
3372 
3373  AssertLockNotHeld(cs_main);
3374  CBlockIndex *pindexToInvalidate = nullptr;
3375  {
3376  LOCK(cs_main);
3377  if (!MarkBlockAsFinal(state, pindex)) {
3378  // state is set by MarkBlockAsFinal.
3379  return false;
3380  }
3381 
3382  // We have a valid candidate, make sure it is not parked.
3383  if (pindex->nStatus.isOnParkedChain()) {
3384  UnparkBlock(pindex);
3385  }
3386 
3387  // If the finalized block is on the active chain, there is no need to
3388  // rewind.
3389  if (::ChainActive().Contains(pindex)) {
3390  return true;
3391  }
3392 
3393  // If the finalized block is not on the active chain, that chain is
3394  // invalid
3395  // ...
3396  const CBlockIndex *pindexFork = ::ChainActive().FindFork(pindex);
3397  pindexToInvalidate = ::ChainActive().Next(pindexFork);
3398  if (!pindexToInvalidate) {
3399  return false;
3400  }
3401  } // end of locked cs_main scope
3402 
3403  // ... therefore, we invalidate the block on the active chain that comes
3404  // immediately after it
3405  return UnwindBlock(config, state, pindexToInvalidate,
3406  true /* invalidating */);
3407 }
3408 
3409 template <typename F>
3411  CBlockIndex *pindex, F f) {
3412  BlockStatus newStatus = f(pindex->nStatus);
3413  if (pindex->nStatus != newStatus &&
3414  (!pindexBase ||
3415  pindex->GetAncestor(pindexBase->nHeight) == pindexBase)) {
3416  pindex->nStatus = newStatus;
3417  setDirtyBlockIndex.insert(pindex);
3418  if (newStatus.isValid()) {
3419  m_blockman.m_failed_blocks.erase(pindex);
3420  }
3421 
3422  if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
3423  pindex->HaveTxsDownloaded() &&
3424  setBlockIndexCandidates.value_comp()(::ChainActive().Tip(),
3425  pindex)) {
3426  setBlockIndexCandidates.insert(pindex);
3427  }
3428  return true;
3429  }
3430  return false;
3431 }
3432 
3433 template <typename F, typename C, typename AC>
3435  F f, C fChild, AC fAncestorWasChanged) {
3436  AssertLockHeld(cs_main);
3437 
3438  // Update the current block and ancestors; while we're doing this, identify
3439  // which was the deepest ancestor we changed.
3440  CBlockIndex *pindexDeepestChanged = pindex;
3441  for (auto pindexAncestor = pindex; pindexAncestor != nullptr;
3442  pindexAncestor = pindexAncestor->pprev) {
3443  if (UpdateFlagsForBlock(nullptr, pindexAncestor, f)) {
3444  pindexDeepestChanged = pindexAncestor;
3445  }
3446  }
3447 
3448  if (pindexReset &&
3449  pindexReset->GetAncestor(pindexDeepestChanged->nHeight) ==
3450  pindexDeepestChanged) {
3451  // reset pindexReset if it had a modified ancestor.
3452  pindexReset = nullptr;
3453  }
3454 
3455  // Update all blocks under modified blocks.
3456  BlockMap::iterator it = m_blockman.m_block_index.begin();
3457  while (it != m_blockman.m_block_index.end()) {
3458  UpdateFlagsForBlock(pindex, it->second, fChild);
3459  UpdateFlagsForBlock(pindexDeepestChanged, it->second,
3460  fAncestorWasChanged);
3461  it++;
3462  }
3463 }
3464 
3466  AssertLockHeld(cs_main);
3467 
3468  // In case we are reconsidering something before the finalization point,
3469  // move the finalization point to the last common ancestor.
3470  if (m_finalizedBlockIndex) {
3471  m_finalizedBlockIndex =
3472  LastCommonAncestor(pindex, m_finalizedBlockIndex);
3473  }
3474 
3475  UpdateFlags(
3476  pindex, pindexBestInvalid,
3477  [](const BlockStatus status) {
3478  return status.withClearedFailureFlags();
3479  },
3480  [](const BlockStatus status) {
3481  return status.withClearedFailureFlags();
3482  },
3483  [](const BlockStatus status) {
3484  return status.withFailedParent(false);
3485  });
3486 }
3487 
3490 }
3491 
3492 void CChainState::UnparkBlockImpl(CBlockIndex *pindex, bool fClearChildren) {
3493  AssertLockHeld(cs_main);
3494 
3495  UpdateFlags(
3496  pindex, pindexBestParked,
3497  [](const BlockStatus status) {
3498  return status.withClearedParkedFlags();
3499  },
3500  [fClearChildren](const BlockStatus status) {
3501  return fClearChildren ? status.withClearedParkedFlags()
3502  : status.withParkedParent(false);
3503  },
3504  [](const BlockStatus status) {
3505  return status.withParkedParent(false);
3506  });
3507 }
3508 
3511 }
3512 
3513 void UnparkBlock(CBlockIndex *pindex) {
3514  return ::ChainstateActive().UnparkBlockImpl(pindex, false);
3515 }
3516 
3517 bool CChainState::IsBlockFinalized(const CBlockIndex *pindex) const {
3518  AssertLockHeld(cs_main);
3519  return m_finalizedBlockIndex &&
3520  m_finalizedBlockIndex->GetAncestor(pindex->nHeight) == pindex;
3521 }
3522 
3525  AssertLockHeld(cs_main);
3526  return m_finalizedBlockIndex;
3527 }
3528 
3530  AssertLockHeld(cs_main);
3531 
3532  // Check for duplicate
3533  BlockHash hash = block.GetHash();
3534  BlockMap::iterator it = m_block_index.find(hash);
3535  if (it != m_block_index.end()) {
3536  return it->second;
3537  }
3538 
3539  // Construct new block index object
3540  CBlockIndex *pindexNew = new CBlockIndex(block);
3541  // We assign the sequence id to blocks only when the full data is available,
3542  // to avoid miners withholding blocks but broadcasting headers, to get a
3543  // competitive advantage.
3544  pindexNew->nSequenceId = 0;
3545  BlockMap::iterator mi =
3546  m_block_index.insert(std::make_pair(hash, pindexNew)).first;
3547  pindexNew->phashBlock = &((*mi).first);
3548  BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
3549  if (miPrev != m_block_index.end()) {
3550  pindexNew->pprev = (*miPrev).second;
3551  pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
3552  pindexNew->BuildSkip();
3553  }
3554  pindexNew->nTimeReceived = GetTime();
3555  pindexNew->nTimeMax =
3556  (pindexNew->pprev
3557  ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime)
3558  : pindexNew->nTime);
3559  pindexNew->nChainWork =
3560  (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) +
3561  GetBlockProof(*pindexNew);
3562  pindexNew->RaiseValidity(BlockValidity::TREE);
3563  if (pindexBestHeader == nullptr ||
3564  pindexBestHeader->nChainWork < pindexNew->nChainWork) {
3565  pindexBestHeader = pindexNew;
3566  }
3567 
3568  setDirtyBlockIndex.insert(pindexNew);
3569  return pindexNew;
3570 }
3571 
3577  CBlockIndex *pindexNew,
3578  const FlatFilePos &pos) {
3579  pindexNew->nTx = block.vtx.size();
3580  pindexNew->nSize = ::GetSerializeSize(block, PROTOCOL_VERSION);
3581  pindexNew->nFile = pos.nFile;
3582  pindexNew->nDataPos = pos.nPos;
3583  pindexNew->nUndoPos = 0;
3584  pindexNew->nStatus = pindexNew->nStatus.withData();
3586  setDirtyBlockIndex.insert(pindexNew);
3587 
3588  if (pindexNew->UpdateChainStats()) {
3589  // If pindexNew is the genesis block or all parents are
3590  // BLOCK_VALID_TRANSACTIONS.
3591  std::deque<CBlockIndex *> queue;
3592  queue.push_back(pindexNew);
3593 
3594  // Recursively process any descendant blocks that now may be eligible to
3595  // be connected.
3596  while (!queue.empty()) {
3597  CBlockIndex *pindex = queue.front();
3598  queue.pop_front();
3599  pindex->UpdateChainStats();
3600  if (pindex->nSequenceId == 0) {
3601  // We assign a sequence is when transaction are received to
3602  // prevent a miner from being able to broadcast a block but not
3603  // its content. However, a sequence id may have been set
3604  // manually, for instance via PreciousBlock, in which case, we
3605  // don't need to assign one.
3606  pindex->nSequenceId = nBlockSequenceId++;
3607  }
3608 
3609  if (m_chain.Tip() == nullptr ||
3610  !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
3611  setBlockIndexCandidates.insert(pindex);
3612  }
3613 
3614  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
3615  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
3616  range = m_blockman.m_blocks_unlinked.equal_range(pindex);
3617  while (range.first != range.second) {
3618  std::multimap<CBlockIndex *, CBlockIndex *>::iterator it =
3619  range.first;
3620  queue.push_back(it->second);
3621  range.first++;
3622  m_blockman.m_blocks_unlinked.erase(it);
3623  }
3624  }
3625  } else if (pindexNew->pprev &&
3626  pindexNew->pprev->IsValid(BlockValidity::TREE)) {
3628  std::make_pair(pindexNew->pprev, pindexNew));
3629  }
3630 }
3631 
3632 static bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize,
3633  unsigned int nHeight, uint64_t nTime,
3634  bool fKnown = false) {
3635  LOCK(cs_LastBlockFile);
3636 
3637  unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
3638  if (vinfoBlockFile.size() <= nFile) {
3639  vinfoBlockFile.resize(nFile + 1);
3640  }
3641 
3642  if (!fKnown) {
3643  while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
3644  nFile++;
3645  if (vinfoBlockFile.size() <= nFile) {
3646  vinfoBlockFile.resize(nFile + 1);
3647  }
3648  }
3649  pos.nFile = nFile;
3650  pos.nPos = vinfoBlockFile[nFile].nSize;
3651  }
3652 
3653  if ((int)nFile != nLastBlockFile) {
3654  if (!fKnown) {
3655  LogPrintf("Leaving block file %i: %s\n", nLastBlockFile,
3656  vinfoBlockFile[nLastBlockFile].ToString());
3657  }
3658  FlushBlockFile(!fKnown);
3659  nLastBlockFile = nFile;
3660  }
3661 
3662  vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
3663  if (fKnown) {
3664  vinfoBlockFile[nFile].nSize =
3665  std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
3666  } else {
3667  vinfoBlockFile[nFile].nSize += nAddSize;
3668  }
3669 
3670  if (!fKnown) {
3671  bool out_of_space;
3672  size_t bytes_allocated =
3673  BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
3674  if (out_of_space) {
3675  return AbortNode("Disk space is too low!",
3676  _("Disk space is too low!"));
3677  }
3678  if (bytes_allocated != 0 && fPruneMode) {
3679  fCheckForPruning = true;
3680  }
3681  }
3682 
3683  setDirtyFileInfo.insert(nFile);
3684  return true;
3685 }
3686 
3687 static bool FindUndoPos(BlockValidationState &state, int nFile,
3688  FlatFilePos &pos, unsigned int nAddSize) {
3689  pos.nFile = nFile;
3690 
3691  LOCK(cs_LastBlockFile);
3692 
3693  pos.nPos = vinfoBlockFile[nFile].nUndoSize;
3694  vinfoBlockFile[nFile].nUndoSize += nAddSize;
3695  setDirtyFileInfo.insert(nFile);
3696 
3697  bool out_of_space;
3698  size_t bytes_allocated =
3699  UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
3700  if (out_of_space) {
3701  return AbortNode(state, "Disk space is too low!",
3702  _("Disk space is too low!"));
3703  }
3704  if (bytes_allocated != 0 && fPruneMode) {
3705  fCheckForPruning = true;
3706  }
3707 
3708  return true;
3709 }
3710 
3719 static bool CheckBlockHeader(const CBlockHeader &block,
3720  BlockValidationState &state,
3721  const Consensus::Params &params,
3722  BlockValidationOptions validationOptions) {
3723  // Check proof of work matches claimed amount
3724  if (validationOptions.shouldValidatePoW() &&
3725  !CheckProofOfWork(block.GetHash(), block.nBits, params)) {
3727  "high-hash", "proof of work failed");
3728  }
3729 
3730  return true;
3731 }
3732 
3733 bool CheckBlock(const CBlock &block, BlockValidationState &state,
3734  const Consensus::Params &params,
3735  BlockValidationOptions validationOptions) {
3736  // These are checks that are independent of context.
3737  if (block.fChecked) {
3738  return true;
3739  }
3740 
3741  // Check that the header is valid (particularly PoW). This is mostly
3742  // redundant with the call in AcceptBlockHeader.
3743  if (!CheckBlockHeader(block, state, params, validationOptions)) {
3744  return false;
3745  }
3746 
3747  // Check the merkle root.
3748  if (validationOptions.shouldValidateMerkleRoot()) {
3749  bool mutated;
3750  uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
3751  if (block.hashMerkleRoot != hashMerkleRoot2) {
3753  "bad-txnmrklroot", "hashMerkleRoot mismatch");
3754  }
3755 
3756  // Check for merkle tree malleability (CVE-2012-2459): repeating
3757  // sequences of transactions in a block without affecting the merkle
3758  // root of a block, while still invalidating it.
3759  if (mutated) {
3761  "bad-txns-duplicate", "duplicate transaction");
3762  }
3763  }
3764 
3765  // All potential-corruption validation must be done before we do any
3766  // transaction validation, as otherwise we may mark the header as invalid
3767  // because we receive the wrong transactions for it.
3768 
3769  // First transaction must be coinbase.
3770  if (block.vtx.empty()) {
3772  "bad-cb-missing", "first tx is not coinbase");
3773  }
3774 
3775  // Size limits.
3776  auto nMaxBlockSize = validationOptions.getExcessiveBlockSize();
3777 
3778  // Bail early if there is no way this block is of reasonable size.
3779  if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) {
3781  "bad-blk-length", "size limits failed");
3782  }
3783 
3784  auto currentBlockSize = ::GetSerializeSize(block, PROTOCOL_VERSION);
3785  if (currentBlockSize > nMaxBlockSize) {
3787  "bad-blk-length", "size limits failed");
3788  }
3789 
3790  // And a valid coinbase.
3791  TxValidationState tx_state;
3792  if (!CheckCoinbase(*block.vtx[0], tx_state)) {
3794  tx_state.GetRejectReason(),
3795  strprintf("Coinbase check failed (txid %s) %s",
3796  block.vtx[0]->GetId().ToString(),
3797  tx_state.GetDebugMessage()));
3798  }
3799 
3800  // Check transactions for regularity, skipping the first. Note that this
3801  // is the first time we check that all after the first are !IsCoinBase.
3802  for (size_t i = 1; i < block.vtx.size(); i++) {
3803  auto *tx = block.vtx[i].get();
3804  if (!CheckRegularTransaction(*tx, tx_state)) {
3805  return state.Invalid(
3807  tx_state.GetRejectReason(),
3808  strprintf("Transaction check failed (txid %s) %s",
3809  tx->GetId().ToString(), tx_state.GetDebugMessage()));
3810  }
3811  }
3812 
3813  if (validationOptions.shouldValidatePoW() &&
3814  validationOptions.shouldValidateMerkleRoot()) {
3815  block.fChecked = true;
3816  }
3817 
3818  return true;
3819 }
3820 
3831 static bool ContextualCheckBlockHeader(const CChainParams &params,
3832  const CBlockHeader &block,
3833  BlockValidationState &state,
3834  const CBlockIndex *pindexPrev,
3835  int64_t nAdjustedTime)
3836  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
3837  assert(pindexPrev != nullptr);
3838  const int nHeight = pindexPrev->nHeight + 1;
3839 
3840  // Check proof of work
3841  if (block.nBits != GetNextWorkRequired(pindexPrev, &block, params)) {
3842  LogPrintf("bad bits after height: %d\n", pindexPrev->nHeight);
3844  "bad-diffbits", "incorrect proof of work");
3845  }
3846 
3847  // Check against checkpoints
3848  if (fCheckpointsEnabled) {
3849  const CCheckpointData &checkpoints = params.Checkpoints();
3850 
3851  // Check that the block chain matches the known block chain up to a
3852  // checkpoint.
3853  if (!Checkpoints::CheckBlock(checkpoints, nHeight, block.GetHash())) {
3854  LogPrintf("ERROR: %s: rejected by checkpoint lock-in at %d\n",
3855  __func__, nHeight);
3857  "checkpoint mismatch");
3858  }
3859 
3860  // Don't accept any forks from the main chain prior to last checkpoint.
3861  // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's
3862  // in our BlockIndex().
3863  CBlockIndex *pcheckpoint = Checkpoints::GetLastCheckpoint(checkpoints);
3864  if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
3865  LogPrintf("ERROR: %s: forked chain older than last checkpoint "
3866  "(height %d)\n",
3867  __func__, nHeight);
3869  "bad-fork-prior-to-checkpoint");
3870  }
3871  }
3872 
3873  // Check timestamp against prev
3874  if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) {
3876  "time-too-old", "block's timestamp is too early");
3877  }
3878 
3879  // Check timestamp
3880  if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME) {
3882  "time-too-new",
3883  "block timestamp too far in the future");
3884  }
3885 
3886  // Reject outdated version blocks when 95% (75% on testnet) of the network
3887  // has upgraded:
3888  // check for version 2, 3 and 4 upgrades
3889  const Consensus::Params &consensusParams = params.GetConsensus();
3890  if ((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) ||
3891  (block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) ||
3892  (block.nVersion < 4 && nHeight >= consensusParams.BIP65Height)) {
3893  return state.Invalid(
3895  strprintf("bad-version(0x%08x)", block.nVersion),
3896  strprintf("rejected nVersion=0x%08x block", block.nVersion));
3897  }
3898 
3899  return true;
3900 }
3901 
3903  const CTransaction &tx,
3904  TxValidationState &state,
3905  int flags) {
3906  AssertLockHeld(cs_main);
3907 
3908  // By convention a negative value for flags indicates that the current
3909  // network-enforced consensus rules should be used. In a future soft-fork
3910  // scenario that would mean checking which rules would be enforced for the
3911  // next block and setting the appropriate flags. At the present time no
3912  // soft-forks are scheduled, so no flags are set.
3913  flags = std::max(flags, 0);
3914 
3915  // ContextualCheckTransactionForCurrentBlock() uses
3916  // ::ChainActive().Height()+1 to evaluate nLockTime because when IsFinalTx()
3917  // is called within CBlock::AcceptBlock(), the height of the block *being*
3918  // evaluated is what is used. Thus if we want to know if a transaction can
3919  // be part of the *next* block, we need to call ContextualCheckTransaction()
3920  // with one more than ::ChainActive().Height().
3921  const int nBlockHeight = ::ChainActive().Height() + 1;
3922 
3923  // BIP113 will require that time-locked transactions have nLockTime set to
3924  // less than the median time of the previous block they're contained in.
3925  // When the next block is created its previous block will be the current
3926  // chain tip, so we use that to calculate the median time passed to
3927  // ContextualCheckTransaction() if LOCKTIME_MEDIAN_TIME_PAST is set.
3928  const int64_t nMedianTimePast =
3929  ::ChainActive().Tip() == nullptr
3930  ? 0
3932  const int64_t nLockTimeCutoff = (flags & LOCKTIME_MEDIAN_TIME_PAST)
3933  ? nMedianTimePast
3934  : GetAdjustedTime();
3935 
3936  return ContextualCheckTransaction(params, tx, state, nBlockHeight,
3937  nLockTimeCutoff, nMedianTimePast);
3938 }
3939 
3947 static bool ContextualCheckBlock(const CBlock &block,
3948  BlockValidationState &state,
3949  const Consensus::Params &params,
3950  const CBlockIndex *pindexPrev) {
3951  const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
3952 
3953  // Start enforcing BIP113 (Median Time Past).
3954  int nLockTimeFlags = 0;
3955  if (nHeight >= params.CSVHeight) {
3956  assert(pindexPrev != nullptr);
3957  nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
3958  }
3959 
3960  const int64_t nMedianTimePast =
3961  pindexPrev == nullptr ? 0 : pindexPrev->GetMedianTimePast();
3962 
3963  const int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
3964  ? nMedianTimePast
3965  : block.GetBlockTime();
3966 
3967  const bool fIsMagneticAnomalyEnabled =
3968  IsMagneticAnomalyEnabled(params, pindexPrev);
3969 
3970  // Check transactions:
3971  // - canonical ordering
3972  // - ensure they are finalized
3973  // - perform a preliminary block-sigops count (they will be recounted more
3974  // strictly during ConnectBlock).
3975  // - perform a transaction-sigops check (again, a more strict check will
3976  // happen in ConnectBlock).
3977  const CTransaction *prevTx = nullptr;
3978  for (const auto &ptx : block.vtx) {
3979  const CTransaction &tx = *ptx;
3980  if (fIsMagneticAnomalyEnabled) {
3981  if (prevTx && (tx.GetId() <= prevTx->GetId())) {
3982  if (tx.GetId() == prevTx->GetId()) {
3984  "tx-duplicate",
3985  strprintf("Duplicated transaction %s",
3986  tx.GetId().ToString()));
3987  }
3988 
3989  return state.Invalid(
3991  strprintf("Transaction order is invalid (%s < %s)",
3992  tx.GetId().ToString(),
3993  prevTx->GetId().ToString()));
3994  }
3995 
3996  if (prevTx || !tx.IsCoinBase()) {
3997  prevTx = &tx;
3998  }
3999  }
4000 
4001  TxValidationState tx_state;
4002  if (!ContextualCheckTransaction(params, tx, tx_state, nHeight,
4003  nLockTimeCutoff, nMedianTimePast)) {
4005  tx_state.GetRejectReason(),
4006  tx_state.GetDebugMessage());
4007  }
4008  }
4009 
4010  // Enforce rule that the coinbase starts with serialized block height
4011  if (nHeight >= params.BIP34Height) {
4012  CScript expect = CScript() << nHeight;
4013  if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
4014  !std::equal(expect.begin(), expect.end(),
4015  block.vtx[0]->vin[0].scriptSig.begin())) {
4017  "bad-cb-height",
4018  "block height mismatch in coinbase");
4019  }
4020  }
4021 
4022  return true;
4023 }
4024 
4031  const CBlockHeader &block,
4032  BlockValidationState &state,
4033  CBlockIndex **ppindex) {
4034  AssertLockHeld(cs_main);
4035  const CChainParams &chainparams = config.GetChainParams();
4036 
4037  // Check for duplicate
4038  BlockHash hash = block.GetHash();
4039  BlockMap::iterator miSelf = m_block_index.find(hash);
4040  CBlockIndex *pindex = nullptr;
4041  if (hash != chainparams.GetConsensus().hashGenesisBlock) {
4042  if (miSelf != m_block_index.end()) {
4043  // Block header is already known.
4044  pindex = miSelf->second;
4045  if (ppindex) {
4046  *ppindex = pindex;
4047  }
4048 
4049  if (pindex->nStatus.isInvalid()) {
4050  LogPrintf("ERROR: %s: block %s is marked invalid\n", __func__,
4051  hash.ToString());
4052  return state.Invalid(
4054  }
4055 
4056  return true;
4057  }
4058 
4059  if (!CheckBlockHeader(block, state, chainparams.GetConsensus(),
4060  BlockValidationOptions(config))) {
4061  return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__,
4062  hash.ToString(), state.ToString());
4063  }
4064 
4065  // Get prev block index
4066  BlockMap::iterator mi = m_block_index.find(block.hashPrevBlock);
4067  if (mi == m_block_index.end()) {
4068  LogPrintf("ERROR: %s: prev block not found\n", __func__);
4070  "prev-blk-not-found");
4071  }
4072 
4073  CBlockIndex *pindexPrev = (*mi).second;
4074  assert(pindexPrev);
4075  if (pindexPrev->nStatus.isInvalid()) {
4076  LogPrintf("ERROR: %s: prev block invalid\n", __func__);
4078  "bad-prevblk");
4079  }
4080 
4081  if (!ContextualCheckBlockHeader(chainparams, block, state, pindexPrev,
4082  GetAdjustedTime())) {
4083  return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s",
4084  __func__, hash.ToString(), state.ToString());
4085  }
4086 
4087  /* Determine if this block descends from any block which has been found
4088  * invalid (m_failed_blocks), then mark pindexPrev and any blocks
4089  * between them as failed. For example:
4090  *
4091  * D3
4092  * /
4093  * B2 - C2
4094  * / \
4095  * A D2 - E2 - F2
4096  * \
4097  * B1 - C1 - D1 - E1
4098  *
4099  * In the case that we attempted to reorg from E1 to F2, only to find
4100  * C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
4101  * but NOT D3 (it was not in any of our candidate sets at the time).
4102  *
4103  * In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
4104  * in LoadBlockIndex.
4105  */
4106  if (!pindexPrev->IsValid(BlockValidity::SCRIPTS)) {
4107  // The above does not mean "invalid": it checks if the previous
4108  // block hasn't been validated up to BlockValidity::SCRIPTS. This is
4109  // a performance optimization, in the common case of adding a new
4110  // block to the tip, we don't need to iterate over the failed blocks
4111  // list.
4112  for (const CBlockIndex *failedit : m_failed_blocks) {
4113  if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
4114  assert(failedit->nStatus.hasFailed());
4115  CBlockIndex *invalid_walk = pindexPrev;
4116  while (invalid_walk != failedit) {
4117  invalid_walk->nStatus =
4118  invalid_walk->nStatus.withFailedParent();
4119  setDirtyBlockIndex.insert(invalid_walk);
4120  invalid_walk = invalid_walk->pprev;
4121  }
4122  LogPrintf("ERROR: %s: prev block invalid\n", __func__);
4123  return state.Invalid(
4125  "bad-prevblk");
4126  }
4127  }
4128  }
4129  }
4130 
4131  if (pindex == nullptr) {
4132  pindex = AddToBlockIndex(block);
4133  }
4134 
4135  if (ppindex) {
4136  *ppindex = pindex;
4137  }
4138 
4139  return true;
4140 }
4141 
4142 // Exposed wrapper for AcceptBlockHeader
4144  const Config &config, const std::vector<CBlockHeader> &headers,
4145  BlockValidationState &state, const CBlockIndex **ppindex) {
4146  AssertLockNotHeld(cs_main);
4147  {
4148  LOCK(cs_main);
4149  for (const CBlockHeader &header : headers) {
4150  // Use a temp pindex instead of ppindex to avoid a const_cast
4151  CBlockIndex *pindex = nullptr;
4152  bool accepted =
4153  m_blockman.AcceptBlockHeader(config, header, state, &pindex);
4155  config.GetChainParams().GetConsensus());
4156 
4157  if (!accepted) {
4158  return false;
4159  }
4160 
4161  if (ppindex) {
4162  *ppindex = pindex;
4163  }
4164  }
4165  }
4166 
4167  if (NotifyHeaderTip()) {
4168  if (::ChainstateActive().IsInitialBlockDownload() && ppindex &&
4169  *ppindex) {
4170  LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n",
4171  (*ppindex)->nHeight,
4172  100.0 /
4173  ((*ppindex)->nHeight +
4174  (GetAdjustedTime() - (*ppindex)->GetBlockTime()) /
4176  (*ppindex)->nHeight);
4177  }
4178  }
4179  return true;
4180 }
4181 
4186 static FlatFilePos SaveBlockToDisk(const CBlock &block, int nHeight,
4187  const CChainParams &chainparams,
4188  const FlatFilePos *dbp) {
4189  unsigned int nBlockSize = ::GetSerializeSize(block, CLIENT_VERSION);
4190  FlatFilePos blockPos;
4191  if (dbp != nullptr) {
4192  blockPos = *dbp;
4193  }
4194  if (!FindBlockPos(blockPos, nBlockSize + 8, nHeight, block.GetBlockTime(),
4195  dbp != nullptr)) {
4196  error("%s: FindBlockPos failed", __func__);
4197  return FlatFilePos();
4198  }
4199  if (dbp == nullptr) {
4200  if (!WriteBlockToDisk(block, blockPos, chainparams.DiskMagic())) {
4201  AbortNode("Failed to write block");
4202  return FlatFilePos();
4203  }
4204  }
4205  return blockPos;
4206 }
4207 
4220  const std::shared_ptr<const CBlock> &pblock,
4221  BlockValidationState &state, bool fRequested,
4222  const FlatFilePos *dbp, bool *fNewBlock) {
4223  AssertLockHeld(cs_main);
4224 
4225  const CBlock &block = *pblock;
4226  if (fNewBlock) {
4227  *fNewBlock = false;
4228  }
4229 
4230  CBlockIndex *pindex = nullptr;
4231 
4232  bool accepted_header =
4233  m_blockman.AcceptBlockHeader(config, block, state, &pindex);
4235 
4236  if (!accepted_header) {
4237  return false;
4238  }
4239 
4240  // Try to process all requested blocks that we don't have, but only
4241  // process an unrequested block if it's new and has enough work to
4242  // advance our tip, and isn't too many blocks ahead.
4243  bool fAlreadyHave = pindex->nStatus.hasData();
4244 
4245  // TODO: deal better with return value and error conditions for duplicate
4246  // and unrequested blocks.
4247  if (fAlreadyHave) {
4248  return true;
4249  }
4250 
4251  // Compare block header timestamps and received times of the block and the
4252  // chaintip. If they have the same chain height, use these diffs as a
4253  // tie-breaker, attempting to pick the more honestly-mined block.
4254  int64_t newBlockTimeDiff = std::llabs(pindex->GetReceivedTimeDiff());
4255  int64_t chainTipTimeDiff =
4256  m_chain.Tip() ? std::llabs(m_chain.Tip()->GetReceivedTimeDiff()) : 0;
4257 
4258  bool isSameHeight =
4259  m_chain.Tip() && (pindex->nChainWork == m_chain.Tip()->nChainWork);
4260  if (isSameHeight) {
4261  LogPrintf("Chain tip timestamp-to-received-time difference: hash=%s, "
4262  "diff=%d\n",
4263  m_chain.Tip()->GetBlockHash().ToString(), chainTipTimeDiff);
4264  LogPrintf("New block timestamp-to-received-time difference: hash=%s, "
4265  "diff=%d\n",
4266  pindex->GetBlockHash().ToString(), newBlockTimeDiff);
4267  }
4268 
4269  bool fHasMoreOrSameWork =
4270  (m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork
4271  : true);
4272 
4273  // Blocks that are too out-of-order needlessly limit the effectiveness of
4274  // pruning, because pruning will not delete block files that contain any
4275  // blocks which are too close in height to the tip. Apply this test
4276  // regardless of whether pruning is enabled; it should generally be safe to
4277  // not process unrequested blocks.
4278  bool fTooFarAhead =
4279  (pindex->nHeight > int(m_chain.Height() + MIN_BLOCKS_TO_KEEP));
4280 
4281  // TODO: Decouple this function from the block download logic by removing
4282  // fRequested
4283  // This requires some new chain data structure to efficiently look up if a
4284  // block is in a chain leading to a candidate for best tip, despite not
4285  // being such a candidate itself.
4286 
4287  // If we didn't ask for it:
4288  if (!fRequested) {
4289  // This is a previously-processed block that was pruned.
4290  if (pindex->nTx != 0) {
4291  return true;
4292  }
4293 
4294  // Don't process less-work chains.
4295  if (!fHasMoreOrSameWork) {
4296  return true;
4297  }
4298 
4299  // Block height is too high.
4300  if (fTooFarAhead) {
4301  return true;
4302  }
4303 
4304  // Protect against DoS attacks from low-work chains.
4305  // If our tip is behind, a peer could try to send us
4306  // low-work blocks on a fake chain that we would never
4307  // request; don't process these.
4308  if (pindex->nChainWork < nMinimumChainWork) {
4309  return true;
4310  }
4311  }
4312 
4313  const CChainParams &chainparams = config.GetChainParams();
4314  const Consensus::Params &consensusParams = chainparams.GetConsensus();
4315 
4316  if (!CheckBlock(block, state, consensusParams,
4317  BlockValidationOptions(config)) ||
4318  !ContextualCheckBlock(block, state, consensusParams, pindex->pprev)) {
4319  if (state.IsInvalid() &&
4321  pindex->nStatus = pindex->nStatus.withFailed();
4322  setDirtyBlockIndex.insert(pindex);
4323  }
4324 
4325  return error("%s: %s (block %s)", __func__, state.ToString(),
4326  block.GetHash().ToString());
4327  }
4328 
4329  // If connecting the new block would require rewinding more than one block
4330  // from the active chain (i.e., a "deep reorg"), then mark the new block as
4331  // parked. If it has enough work then it will be automatically unparked
4332  // later, during FindMostWorkChain. We mark the block as parked at the very
4333  // last minute so we can make sure everything is ready to be reorged if
4334  // needed.
4335  if (gArgs.GetBoolArg("-parkdeepreorg", true)) {
4336  const CBlockIndex *pindexFork = m_chain.FindFork(pindex);
4337  if (pindexFork && pindexFork->nHeight + 1 < m_chain.Height()) {
4338  LogPrintf("Park block %s as it would cause a deep reorg.\n",
4339  pindex->GetBlockHash().ToString());
4340  pindex->nStatus = pindex->nStatus.withParked();
4341  setDirtyBlockIndex.insert(pindex);
4342  }
4343  }
4344 
4345  // Header is valid/has work and the merkle tree is good.
4346  // Relay now, but if it does not build on our best tip, let the
4347  // SendMessages loop relay it.
4348  if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev) {
4349  GetMainSignals().NewPoWValidBlock(pindex, pblock);
4350  }
4351 
4352  // Write block to history file
4353  if (fNewBlock) {
4354  *fNewBlock = true;
4355  }
4356  try {
4357  FlatFilePos blockPos =
4358  SaveBlockToDisk(block, pindex->nHeight, chainparams, dbp);
4359  if (blockPos.IsNull()) {
4360  state.Error(strprintf(
4361  "%s: Failed to find position to write new block to disk",
4362  __func__));
4363  return false;
4364  }
4365  ReceivedBlockTransactions(block, pindex, blockPos);
4366  } catch (const std::runtime_error &e) {
4367  return AbortNode(state, std::string("System error: ") + e.what());
4368  }
4369 
4370  FlushStateToDisk(chainparams, state, FlushStateMode::NONE);
4371 
4372  CheckBlockIndex(consensusParams);
4373 
4374  return true;
4375 }
4376 
4378  const Config &config, const std::shared_ptr<const CBlock> pblock,
4379  bool fForceProcessing, bool *fNewBlock) {
4380  AssertLockNotHeld(cs_main);
4381 
4382  {
4383  if (fNewBlock) {
4384  *fNewBlock = false;
4385  }
4386 
4387  BlockValidationState state;
4388 
4389  // CheckBlock() does not support multi-threaded block validation
4390  // because CBlock::fChecked can cause data race.
4391  // Therefore, the following critical section must include the
4392  // CheckBlock() call as well.
4393  LOCK(cs_main);
4394 
4395  // Ensure that CheckBlock() passes before calling AcceptBlock, as
4396  // belt-and-suspenders.
4397  bool ret =
4398  CheckBlock(*pblock, state, config.GetChainParams().GetConsensus(),
4399  BlockValidationOptions(config));
4400  if (ret) {
4401  // Store to disk
4403  config, pblock, state, fForceProcessing, nullptr, fNewBlock);
4404  }
4405 
4406  if (!ret) {
4407  GetMainSignals().BlockChecked(*pblock, state);
4408  return error("%s: AcceptBlock FAILED (%s)", __func__,
4409  state.ToString());
4410  }
4411  }
4412 
4413  NotifyHeaderTip();
4414 
4415  // Only used to report errors, not invalidity - ignore it
4416  BlockValidationState state;
4417  if (!::ChainstateActive().ActivateBestChain(config, state, pblock)) {
4418  return error("%s: ActivateBestChain failed (%s)", __func__,
4419  state.ToString());
4420  }
4421 
4422  return true;
4423 }
4424 
4426  const CBlock &block, CBlockIndex *pindexPrev,
4427  BlockValidationOptions validationOptions) {
4428  AssertLockHeld(cs_main);
4429  assert(pindexPrev && pindexPrev == ::ChainActive().Tip());
4430  CCoinsViewCache viewNew(&::ChainstateActive().CoinsTip());
4431  BlockHash block_hash(block.GetHash());
4432  CBlockIndex indexDummy(block);
4433  indexDummy.pprev = pindexPrev;
4434  indexDummy.nHeight = pindexPrev->nHeight + 1;
4435  indexDummy.phashBlock = &block_hash;
4436 
4437  // NOTE: CheckBlockHeader is called by CheckBlock
4438  if (!ContextualCheckBlockHeader(params, block, state, pindexPrev,
4439  GetAdjustedTime())) {
4440  return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__,
4441  state.ToString());
4442  }
4443 
4444  if (!CheckBlock(block, state, params.GetConsensus(), validationOptions)) {
4445  return error("%s: Consensus::CheckBlock: %s", __func__,
4446  state.ToString());
4447  }
4448 
4449  if (!ContextualCheckBlock(block, state, params.GetConsensus(),
4450  pindexPrev)) {
4451  return error("%s: Consensus::ContextualCheckBlock: %s", __func__,
4452  state.ToString());
4453  }
4454 
4455  if (!::ChainstateActive().ConnectBlock(block, state, &indexDummy, viewNew,
4456  params, validationOptions, true)) {
4457  return false;
4458  }
4459 
4460  assert(state.IsValid());
4461  return true;
4462 }
4463 
4472  LOCK(cs_LastBlockFile);
4473 
4474  uint64_t retval = 0;
4475  for (const CBlockFileInfo &file : vinfoBlockFile) {
4476  retval += file.nSize + file.nUndoSize;
4477  }
4478 
4479  return retval;
4480 }
4481 
4482 void ChainstateManager::PruneOneBlockFile(const int fileNumber) {
4483  AssertLockHeld(cs_main);
4484  LOCK(cs_LastBlockFile);
4485 
4486  for (const auto &entry : m_blockman.m_block_index) {
4487  CBlockIndex *pindex = entry.second;
4488  if (pindex->nFile == fileNumber) {
4489  pindex->nStatus = pindex->nStatus.withData(false).withUndo(false);
4490  pindex->nFile = 0;
4491  pindex->nDataPos = 0;
4492  pindex->nUndoPos = 0;
4493  setDirtyBlockIndex.insert(pindex);
4494 
4495  // Prune from m_blocks_unlinked -- any block we prune would have
4496  // to be downloaded again in order to consider its chain, at which
4497  // point it would be considered as a candidate for
4498  // m_blocks_unlinked or setBlockIndexCandidates.
4499  auto range =
4500  m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
4501  while (range.first != range.second) {
4502  std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it =
4503  range.first;
4504  range.first++;
4505  if (_it->second == pindex) {
4506  m_blockman.m_blocks_unlinked.erase(_it);
4507  }
4508  }
4509  }
4510  }
4511 
4512  vinfoBlockFile[fileNumber].SetNull();
4513  setDirtyFileInfo.insert(fileNumber);
4514 }
4515 
4516 void UnlinkPrunedFiles(const std::set<int> &setFilesToPrune) {
4517  for (const int i : setFilesToPrune) {
4518  FlatFilePos pos(i, 0);
4519  fs::remove(BlockFileSeq().FileName(pos));
4520  fs::remove(UndoFileSeq().FileName(pos));
4521  LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, i);
4522  }
4523 }
4524 
4530  std::set<int> &setFilesToPrune,
4531  int nManualPruneHeight) {
4532  assert(fPruneMode && nManualPruneHeight > 0);
4533 
4534  LOCK2(cs_main, cs_LastBlockFile);
4535  if (::ChainActive().Tip() == nullptr) {
4536  return;
4537  }
4538 
4539  // last block to prune is the lesser of (user-specified height,
4540  // MIN_BLOCKS_TO_KEEP from the tip)
4541  unsigned int nLastBlockWeCanPrune =
4542  std::min((unsigned)nManualPruneHeight,
4543  ::ChainActive().Tip()->nHeight - MIN_BLOCKS_TO_KEEP);
4544  int count = 0;
4545  for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
4546  if (vinfoBlockFile[fileNumber].nSize == 0 ||
4547  vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
4548  continue;
4549  }
4550  chainman.PruneOneBlockFile(fileNumber);
4551  setFilesToPrune.insert(fileNumber);
4552  count++;
4553  }
4554  LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n",
4555  nLastBlockWeCanPrune, count);
4556 }
4557 
4558 /* This function is called from the RPC code for pruneblockchain */
4559 void PruneBlockFilesManual(int nManualPruneHeight) {
4560  BlockValidationState state;
4561  const CChainParams &chainparams = Params();
4563  chainparams, state, FlushStateMode::NONE, nManualPruneHeight)) {
4564  LogPrintf("%s: failed to flush state (%s)\n", __func__,
4565  state.ToString());
4566  }
4567 }
4568 
4590 static void FindFilesToPrune(ChainstateManager &chainman,
4591  std::set<int> &setFilesToPrune,
4592  uint64_t nPruneAfterHeight) {
4593  LOCK2(cs_main, cs_LastBlockFile);
4594  if (::ChainActive().Tip() == nullptr || nPruneTarget == 0) {
4595  return;
4596  }
4597  if (uint64_t(::ChainActive().Tip()->nHeight) <= nPruneAfterHeight) {
4598  return;
4599  }
4600 
4601  unsigned int nLastBlockWeCanPrune =
4603  uint64_t nCurrentUsage = CalculateCurrentUsage();
4604  // We don't check to prune until after we've allocated new space for files,
4605  // so we should leave a buffer under our target to account for another
4606  // allocation before the next pruning.
4607  uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
4608  uint64_t nBytesToPrune;
4609  int count = 0;
4610 
4611  if (nCurrentUsage + nBuffer >= nPruneTarget) {
4612  // On a prune event, the chainstate DB is flushed.
4613  // To avoid excessive prune events negating the benefit of high dbcache
4614  // values, we should not prune too rapidly.
4615  // So when pruning in IBD, increase the buffer a bit to avoid a re-prune
4616  // too soon.
4618  // Since this is only relevant during IBD, we use a fixed 10%
4619  nBuffer += nPruneTarget / 10;
4620  }
4621 
4622  for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
4623  nBytesToPrune = vinfoBlockFile[fileNumber].nSize +
4624  vinfoBlockFile[fileNumber].nUndoSize;
4625 
4626  if (vinfoBlockFile[fileNumber].nSize == 0) {
4627  continue;
4628  }
4629 
4630  // are we below our target?
4631  if (nCurrentUsage + nBuffer < nPruneTarget) {
4632  break;
4633  }
4634 
4635  // don't prune files that could have a block within
4636  // MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
4637  if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
4638  continue;
4639  }
4640 
4641  chainman.PruneOneBlockFile(fileNumber);
4642  // Queue up the files for removal
4643  setFilesToPrune.insert(fileNumber);
4644  nCurrentUsage -= nBytesToPrune;
4645  count++;
4646  }
4647  }
4648 
4650  "Prune: target=%dMiB actual=%dMiB diff=%dMiB "
4651  "max_prune_height=%d removed %d blk/rev pairs\n",
4652  nPruneTarget / 1024 / 1024, nCurrentUsage / 1024 / 1024,
4653  ((int64_t)nPruneTarget - (int64_t)nCurrentUsage) / 1024 / 1024,
4654  nLastBlockWeCanPrune, count);
4655 }
4656 
4658  AssertLockHeld(cs_main);
4659 
4660  if (hash.IsNull()) {
4661  return nullptr;
4662  }
4663 
4664  // Return existing
4665  BlockMap::iterator mi = m_block_index.find(hash);
4666  if (mi != m_block_index.end()) {
4667  return (*mi).second;
4668  }
4669 
4670  // Create new
4671  CBlockIndex *pindexNew = new CBlockIndex();
4672  mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
4673  pindexNew->phashBlock = &((*mi).first);
4674 
4675  return pindexNew;
4676 }
4677 
4679  const Consensus::Params &params, CBlockTreeDB &blocktree,
4680  std::set<CBlockIndex *, CBlockIndexWorkComparator>
4681  &block_index_candidates) {
4682  AssertLockHeld(cs_main);
4683  if (!blocktree.LoadBlockIndexGuts(
4684  params, [this](const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(
4685  cs_main) { return this->InsertBlockIndex(hash); })) {
4686  return false;
4687  }
4688 
4689  // Calculate nChainWork
4690  std::vector<std::pair<int, CBlockIndex *>> vSortedByHeight;
4691  vSortedByHeight.reserve(m_block_index.size());
4692  for (const std::pair<const BlockHash, CBlockIndex *> &item :
4693  m_block_index) {
4694  CBlockIndex *pindex = item.second;
4695  vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
4696  }
4697 
4698  sort(vSortedByHeight.begin(), vSortedByHeight.end());
4699  for (const std::pair<int, CBlockIndex *> &item : vSortedByHeight) {
4700  if (ShutdownRequested()) {
4701  return false;
4702  }
4703  CBlockIndex *pindex = item.second;
4704  pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) +
4705  GetBlockProof(*pindex);
4706  pindex->nTimeMax =
4707  (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime)
4708  : pindex->nTime);
4709  // We can link the chain of blocks for which we've received transactions
4710  // at some point. Pruned nodes may have deleted the block.
4711  if (pindex->nTx > 0) {
4712  if (!pindex->UpdateChainStats() && pindex->pprev) {
4713  m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
4714  }
4715  }
4716 
4717  if (!pindex->nStatus.hasFailed() && pindex->pprev &&
4718  pindex->pprev->nStatus.hasFailed()) {
4719  pindex->nStatus = pindex->nStatus.withFailedParent();
4720  setDirtyBlockIndex.insert(pindex);
4721  }
4722  if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
4723  (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr)) {
4724  block_index_candidates.insert(pindex);
4725  }
4726 
4727  if (pindex->nStatus.isInvalid() &&
4728  (!pindexBestInvalid ||
4729  pindex->nChainWork > pindexBestInvalid->nChainWork)) {
4730  pindexBestInvalid = pindex;
4731  }
4732 
4733  if (pindex->nStatus.isOnParkedChain() &&
4734  (!pindexBestParked ||
4735  pindex->nChainWork > pindexBestParked->nChainWork)) {
4736  pindexBestParked = pindex;
4737  }
4738 
4739  if (pindex->pprev) {
4740  pindex->BuildSkip();
4741  }
4742 
4743  if (pindex->IsValid(BlockValidity::TREE) &&
4744  (pindexBestHeader == nullptr ||
4746  pindexBestHeader = pindex;
4747  }
4748  }
4749 
4750  return true;
4751 }
4752 
4754  m_failed_blocks.clear();
4755  m_blocks_unlinked.clear();
4756 
4757  for (const BlockMap::value_type &entry : m_block_index) {
4758  delete entry.second;
4759  }
4760 
4761  m_block_index.clear();
4762 }
4763 
4764 static bool LoadBlockIndexDB(ChainstateManager &chainman,
4765  const Consensus::Params &params)
4766  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
4767  if (!chainman.m_blockman.LoadBlockIndex(
4768  params, *pblocktree,
4770  return false;
4771  }
4772 
4773  // Load block file info
4774  pblocktree->ReadLastBlockFile(nLastBlockFile);
4775  vinfoBlockFile.resize(nLastBlockFile + 1);
4776  LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
4777  for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
4778  pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
4779  }
4780  LogPrintf("%s: last block file info: %s\n", __func__,
4781  vinfoBlockFile[nLastBlockFile].ToString());
4782  for (int nFile = nLastBlockFile + 1; true; nFile++) {
4783  CBlockFileInfo info;
4784  if (pblocktree->ReadBlockFileInfo(nFile, info)) {
4785  vinfoBlockFile.push_back(info);
4786  } else {
4787  break;
4788  }
4789  }
4790 
4791  // Check presence of blk files
4792  LogPrintf("Checking all blk files are present...\n");
4793  std::set<int> setBlkDataFiles;
4794  for (const std::pair<const BlockHash, CBlockIndex *> &item :
4795  chainman.BlockIndex()) {
4796  CBlockIndex *pindex = item.second;
4797  if (pindex->nStatus.hasData()) {
4798  setBlkDataFiles.insert(pindex->nFile);
4799  }
4800  }
4801 
4802  for (const int i : setBlkDataFiles) {
4803  FlatFilePos pos(i, 0);
4805  .IsNull()) {
4806  return false;
4807  }
4808  }
4809 
4810  // Check whether we have ever pruned block & undo files
4811  pblocktree->ReadFlag("prunedblockfiles", fHavePruned);
4812  if (fHavePruned) {
4813  LogPrintf(
4814  "LoadBlockIndexDB(): Block files have previously been pruned\n");
4815  }
4816 
4817  // Check whether we need to continue reindexing
4818  if (pblocktree->IsReindexing()) {
4819  fReindex = true;
4820  }
4821 
4822  return true;
4823 }
4824 
4825 bool CChainState::LoadChainTip(const CChainParams &chainparams) {
4826  AssertLockHeld(cs_main);
4827  const CCoinsViewCache &coins_cache = CoinsTip();
4828  // Never called when the coins view is empty
4829  assert(!coins_cache.GetBestBlock().IsNull());
4830  const CBlockIndex *tip = m_chain.Tip();
4831 
4832  if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
4833  return true;
4834  }
4835 
4836  // Load pointer to end of best chain
4837  CBlockIndex *pindex = LookupBlockIndex(coins_cache.GetBestBlock());
4838  if (!pindex) {
4839  return false;
4840  }
4841  m_chain.SetTip(pindex);
4843 
4844  tip = m_chain.Tip();
4845  LogPrintf(
4846  "Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
4847  tip->GetBlockHash().ToString(), m_chain.Height(),
4848  FormatISO8601DateTime(tip->GetBlockTime()),
4849  GuessVerificationProgress(chainparams.TxData(), tip));
4850  return true;
4851 }
4852 
4854  uiInterface.ShowProgress(_("Verifying blocks...").translated, 0, false);
4855 }
4856 
4858  uiInterface.ShowProgress("", 100, false);
4859 }
4860 
4861 bool CVerifyDB::VerifyDB(const Config &config, CCoinsView *coinsview,
4862  int nCheckLevel, int nCheckDepth) {
4863  LOCK(cs_main);
4864 
4865  const CChainParams &params = config.GetChainParams();
4866  const Consensus::Params &consensusParams = params.GetConsensus();
4867 
4868  if (::ChainActive().Tip() == nullptr ||
4869  ::ChainActive().Tip()->pprev == nullptr) {
4870  return true;
4871  }
4872 
4873  // Verify blocks in the best chain
4875  nCheckDepth = ::ChainActive().Height();
4876  }
4877 
4878  nCheckLevel = std::max(0, std::min(4, nCheckLevel));
4879  LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth,
4880  nCheckLevel);
4881 
4882  CCoinsViewCache coins(coinsview);
4883  CBlockIndex *pindex;
4884  CBlockIndex *pindexFailure = nullptr;
4885  int nGoodTransactions = 0;
4886  BlockValidationState state;
4887  int reportDone = 0;
4888  LogPrintfToBeContinued("[0%%]...");
4889  for (pindex = ::ChainActive().Tip(); pindex && pindex->pprev;
4890  pindex = pindex->pprev) {
4891  boost::this_thread::interruption_point();
4892  const int percentageDone =
4893  std::max(1, std::min(99, (int)(((double)(::ChainActive().Height() -
4894  pindex->nHeight)) /
4895  (double)nCheckDepth *
4896  (nCheckLevel >= 4 ? 50 : 100))));
4897  if (reportDone < percentageDone / 10) {
4898  // report every 10% step
4899  LogPrintfToBeContinued("[%d%%]...", percentageDone);
4900  reportDone = percentageDone / 10;
4901  }
4902 
4903  uiInterface.ShowProgress(_("Verifying blocks...").translated,
4904  percentageDone, false);
4905  if (pindex->nHeight <= ::ChainActive().Height() - nCheckDepth) {
4906  break;
4907  }
4908 
4909  if (fPruneMode && !pindex->nStatus.hasData()) {
4910  // If pruning, only go back as far as we have data.
4911  LogPrintf("VerifyDB(): block verification stopping at height %d "
4912  "(pruning, no data)\n",
4913  pindex->nHeight);
4914  break;
4915  }
4916 
4917  CBlock block;
4918 
4919  // check level 0: read from disk
4920  if (!ReadBlockFromDisk(block, pindex, consensusParams)) {
4921  return error(
4922  "VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s",
4923  pindex->nHeight, pindex->GetBlockHash().ToString());
4924  }
4925 
4926  // check level 1: verify block validity
4927  if (nCheckLevel >= 1 && !CheckBlock(block, state, consensusParams,
4928  BlockValidationOptions(config))) {
4929  return error("%s: *** found bad block at %d, hash=%s (%s)\n",
4930  __func__, pindex->nHeight,
4931  pindex->GetBlockHash().ToString(), state.ToString());
4932  }
4933 
4934  // check level 2: verify undo validity
4935  if (nCheckLevel >= 2 && pindex) {
4936  CBlockUndo undo;
4937  if (!pindex->GetUndoPos().IsNull()) {
4938  if (!UndoReadFromDisk(undo, pindex)) {
4939  return error(
4940  "VerifyDB(): *** found bad undo data at %d, hash=%s\n",
4941  pindex->nHeight, pindex->GetBlockHash().ToString());
4942  }
4943  }
4944  }
4945 
4946  // check level 3: check for inconsistencies during memory-only
4947  // disconnect of tip blocks
4948  if (nCheckLevel >= 3 &&
4949  (coins.DynamicMemoryUsage() +
4951  nCoinCacheUsage) {
4952  assert(coins.GetBestBlock() == pindex->GetBlockHash());
4953  DisconnectResult res =
4954  ::ChainstateActive().DisconnectBlock(block, pindex, coins);
4955  if (res == DisconnectResult::FAILED) {
4956  return error("VerifyDB(): *** irrecoverable inconsistency in "
4957  "block data at %d, hash=%s",
4958  pindex->nHeight,
4959  pindex->GetBlockHash().ToString());
4960  }
4961 
4962  if (res == DisconnectResult::UNCLEAN) {
4963  nGoodTransactions = 0;
4964  pindexFailure = pindex;
4965  } else {
4966  nGoodTransactions += block.vtx.size();
4967  }
4968  }
4969 
4970  if (ShutdownRequested()) {
4971  return true;
4972  }
4973  }
4974 
4975  if (pindexFailure) {
4976  return error("VerifyDB(): *** coin database inconsistencies found "
4977  "(last %i blocks, %i good transactions before that)\n",
4978  ::ChainActive().Height() - pindexFailure->nHeight + 1,
4979  nGoodTransactions);
4980  }
4981 
4982  // store block count as we move pindex at check level >= 4
4983  int block_count = ::ChainActive().Height() - pindex->nHeight;
4984 
4985  // check level 4: try reconnecting blocks
4986  if (nCheckLevel >= 4) {
4987  while (pindex != ::ChainActive().Tip()) {
4988  boost::this_thread::interruption_point();
4989  const int percentageDone = std::max(
4990  1, std::min(99, 100 - int(double(::ChainActive().Height() -
4991  pindex->nHeight) /
4992  double(nCheckDepth) * 50)));
4993  if (reportDone < percentageDone / 10) {
4994  // report every 10% step
4995  LogPrintfToBeContinued("[%d%%]...", percentageDone);
4996  reportDone = percentageDone / 10;
4997  }
4998  uiInterface.ShowProgress(_("Verifying blocks...").translated,
4999  percentageDone, false);
5000  pindex = ::ChainActive().Next(pindex);
5001  CBlock block;
5002  if (!ReadBlockFromDisk(block, pindex, consensusParams)) {
5003  return error(
5004  "VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s",
5005  pindex->nHeight, pindex->GetBlockHash().ToString());
5006  }
5007  if (!::ChainstateActive().ConnectBlock(
5008  block, state, pindex, coins, params,
5009  BlockValidationOptions(config))) {
5010  return error("VerifyDB(): *** found unconnectable block at %d, "
5011  "hash=%s (%s)",
5012  pindex->nHeight, pindex->GetBlockHash().ToString(),
5013  state.ToString());
5014  }
5015  }
5016  }
5017 
5018  LogPrintf("[DONE].\n");
5019  LogPrintf("No coin database inconsistencies in last %i blocks (%i "
5020  "transactions)\n",
5021  block_count, nGoodTransactions);
5022 
5023  return true;
5024 }
5025 
5031  CCoinsViewCache &view,
5032  const Consensus::Params &params) {
5033  // TODO: merge with ConnectBlock
5034  CBlock block;
5035  if (!ReadBlockFromDisk(block, pindex, params)) {
5036  return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s",
5037  pindex->nHeight, pindex->GetBlockHash().ToString());
5038  }
5039 
5040  for (const CTransactionRef &tx : block.vtx) {
5041  // Pass check = true as every addition may be an overwrite.
5042  AddCoins(view, *tx, pindex->nHeight, true);
5043  }
5044 
5045  for (const CTransactionRef &tx : block.vtx) {
5046  if (tx->IsCoinBase()) {
5047  continue;
5048  }
5049 
5050  for (const CTxIn &txin : tx->vin) {
5051  view.SpendCoin(txin.prevout);
5052  }
5053  }
5054 
5055  return true;
5056 }
5057 
5059  LOCK(cs_main);
5060 
5061  CCoinsView &db = this->CoinsDB();
5062  CCoinsViewCache cache(&db);
5063 
5064  std::vector<BlockHash> hashHeads = db.GetHeadBlocks();
5065  if (hashHeads.empty()) {
5066  // We're already in a consistent state.
5067  return true;
5068  }
5069  if (hashHeads.size() != 2) {
5070  return error("ReplayBlocks(): unknown inconsistent state");
5071  }
5072 
5073  uiInterface.ShowProgress(_("Replaying blocks...").translated, 0, false);
5074  LogPrintf("Replaying blocks\n");
5075 
5076  // Old tip during the interrupted flush.
5077  const CBlockIndex *pindexOld = nullptr;
5078  // New tip during the interrupted flush.
5079  const CBlockIndex *pindexNew;
5080  // Latest block common to both the old and the new tip.
5081  const CBlockIndex *pindexFork = nullptr;
5082 
5083  if (m_blockman.m_block_index.count(hashHeads[0]) == 0) {
5084  return error(
5085  "ReplayBlocks(): reorganization to unknown block requested");
5086  }
5087 
5088  pindexNew = m_blockman.m_block_index[hashHeads[0]];
5089 
5090  if (!hashHeads[1].IsNull()) {
5091  // The old tip is allowed to be 0, indicating it's the first flush.
5092  if (m_blockman.m_block_index.count(hashHeads[1]) == 0) {
5093  return error(
5094  "ReplayBlocks(): reorganization from unknown block requested");
5095  }
5096 
5097  pindexOld = m_blockman.m_block_index[hashHeads[1]];
5098  pindexFork = LastCommonAncestor(pindexOld, pindexNew);
5099  assert(pindexFork != nullptr);
5100  }
5101 
5102  // Rollback along the old branch.
5103  while (pindexOld != pindexFork) {
5104  if (pindexOld->nHeight > 0) {
5105  // Never disconnect the genesis block.
5106  CBlock block;
5107  if (!ReadBlockFromDisk(block, pindexOld, params)) {
5108  return error("RollbackBlock(): ReadBlockFromDisk() failed at "
5109  "%d, hash=%s",
5110  pindexOld->nHeight,
5111  pindexOld->GetBlockHash().ToString());
5112  }
5113 
5114  LogPrintf("Rolling back %s (%i)\n",
5115  pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
5116  DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
5117  if (res == DisconnectResult::FAILED) {
5118  return error(
5119  "RollbackBlock(): DisconnectBlock failed at %d, hash=%s",
5120  pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
5121  }
5122 
5123  // If DisconnectResult::UNCLEAN is returned, it means a non-existing
5124  // UTXO was deleted, or an existing UTXO was overwritten. It
5125  // corresponds to cases where the block-to-be-disconnect never had
5126  // all its operations applied to the UTXO set. However, as both
5127  // writing a UTXO and deleting a UTXO are idempotent operations, the
5128  // result is still a version of the UTXO set with the effects of
5129  // that block undone.
5130  }
5131  pindexOld = pindexOld->pprev;
5132  }
5133 
5134  // Roll forward from the forking point to the new tip.
5135  int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
5136  for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight;
5137  ++nHeight) {
5138  const CBlockIndex *pindex = pindexNew->GetAncestor(nHeight);
5139  LogPrintf("Rolling forward %s (%i)\n",
5140  pindex->GetBlockHash().ToString(), nHeight);
5141  uiInterface.ShowProgress(_("Replaying blocks...").translated,
5142  (int)((nHeight - nForkHeight) * 100.0 /
5143  (pindexNew->nHeight - nForkHeight)),
5144  false);
5145  if (!RollforwardBlock(pindex, cache, params)) {
5146  return false;
5147  }
5148  }
5149 
5150  cache.SetBestBlock(pindexNew->GetBlockHash());
5151  cache.Flush();
5152  uiInterface.ShowProgress("", 100, false);
5153  return true;
5154 }
5155 
5156 // May NOT be used after any connections are up as much of the peer-processing
5157 // logic assumes a consistent block index state
5159  nBlockSequenceId = 1;
5160  setBlockIndexCandidates.clear();
5161 
5162  // Do not point to CBlockIndex that will be free'd
5163  m_finalizedBlockIndex = nullptr;
5164 }
5165 
5166 // May NOT be used after any connections are up as much
5167 // of the peer-processing logic assumes a consistent
5168 // block index state
5170  LOCK(cs_main);
5171  g_chainman.Unload();
5172  pindexBestInvalid = nullptr;
5173  pindexBestParked = nullptr;
5174  pindexBestHeader = nullptr;
5175  pindexBestForkTip = nullptr;
5176  pindexBestForkBase = nullptr;
5178  g_mempool.clear();
5179  vinfoBlockFile.clear();
5180  nLastBlockFile = 0;
5181  setDirtyBlockIndex.clear();
5182  setDirtyFileInfo.clear();
5183  fHavePruned = false;
5184 }
5185 
5187  AssertLockHeld(cs_main);
5188  // Load block index from databases
5189  bool needs_init = fReindex;
5190  if (!fReindex) {
5191  bool ret = LoadBlockIndexDB(*this, params);
5192  if (!ret) {
5193  return false;
5194  }
5195 
5196  needs_init = m_blockman.m_block_index.empty();
5197  }
5198 
5199  if (needs_init) {
5200  // Everything here is for *new* reindex/DBs. Thus, though
5201  // LoadBlockIndexDB may have set fReindex if we shut down
5202  // mid-reindex previously, we don't check fReindex and
5203  // instead only check it prior to LoadBlockIndexDB to set
5204  // needs_init.
5205 
5206  LogPrintf("Initializing databases...\n");
5207  }
5208  return true;
5209 }
5210 
5212  LOCK(cs_main);
5213 
5214  // Check whether we're already initialized by checking for genesis in
5215  // m_blockman.m_block_index. Note that we can't use m_chain here, since it
5216  // is set based on the coins db, not the block index db, which is the only
5217  // thing loaded at this point.
5218  if (m_blockman.m_block_index.count(chainparams.GenesisBlock().GetHash())) {
5219  return true;
5220  }
5221 
5222  try {
5223  const CBlock &block = chainparams.GenesisBlock();
5224  FlatFilePos blockPos = SaveBlockToDisk(block, 0, chainparams, nullptr);
5225  if (blockPos.IsNull()) {
5226  return error("%s: writing genesis block to disk failed", __func__);
5227  }
5228  CBlockIndex *pindex = m_blockman.AddToBlockIndex(block);
5229  ReceivedBlockTransactions(block, pindex, blockPos);
5230  } catch (const std::runtime_error &e) {
5231  return error("%s: failed to write genesis block: %s", __func__,
5232  e.what());
5233  }
5234 
5235  return true;
5236 }
5237 
5238 bool LoadGenesisBlock(const CChainParams &chainparams) {
5240 }
5241 
5242 void LoadExternalBlockFile(const Config &config, FILE *fileIn,
5243  FlatFilePos *dbp) {
5244  // Map of disk positions for blocks with unknown parent (only used for
5245  // reindex)
5246  static std::multimap<uint256, FlatFilePos> mapBlocksUnknownParent;
5247  int64_t nStart = GetTimeMillis();
5248 
5249  const CChainParams &chainparams = config.GetChainParams();
5250 
5251  int nLoaded = 0;
5252  try {
5253  // This takes over fileIn and calls fclose() on it in the CBufferedFile
5254  // destructor. Make sure we have at least 2*MAX_TX_SIZE space in there
5255  // so any transaction can fit in the buffer.
5256  CBufferedFile blkdat(fileIn, 2 * MAX_TX_SIZE, MAX_TX_SIZE + 8, SER_DISK,
5257  CLIENT_VERSION);
5258  uint64_t nRewind = blkdat.GetPos();
5259  while (!blkdat.eof()) {
5260  if (ShutdownRequested()) {
5261  return;
5262  }
5263 
5264  blkdat.SetPos(nRewind);
5265  // Start one byte further next time, in case of failure.
5266  nRewind++;
5267  // Remove former limit.
5268  blkdat.SetLimit();
5269  unsigned int nSize = 0;
5270  try {
5271  // Locate a header.
5273  blkdat.FindByte(chainparams.DiskMagic()[0]);
5274  nRewind = blkdat.GetPos() + 1;
5275  blkdat >> buf;
5276  if (memcmp(buf, chainparams.DiskMagic().data(),
5278  continue;
5279  }
5280 
5281  // Read size.
5282  blkdat >> nSize;
5283  if (nSize < 80) {
5284  continue;
5285  }
5286  } catch (const std::exception &) {
5287  // No valid block header found; don't complain.
5288  break;
5289  }
5290 
5291  try {
5292  // read block
5293  uint64_t nBlockPos = blkdat.GetPos();
5294  if (dbp) {
5295  dbp->nPos = nBlockPos;
5296  }
5297  blkdat.SetLimit(nBlockPos + nSize);
5298  blkdat.SetPos(nBlockPos);
5299  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
5300  CBlock &block = *pblock;
5301  blkdat >> block;
5302  nRewind = blkdat.GetPos();
5303 
5304  const BlockHash hash = block.GetHash();
5305  {
5306  LOCK(cs_main);
5307  // detect out of order blocks, and store them for later
5308  if (hash != chainparams.GetConsensus().hashGenesisBlock &&
5309  !LookupBlockIndex(block.hashPrevBlock)) {
5310  LogPrint(
5312  "%s: Out of order block %s, parent %s not known\n",
5313  __func__, hash.ToString(),
5314  block.hashPrevBlock.ToString());
5315  if (dbp) {
5316  mapBlocksUnknownParent.insert(
5317  std::make_pair(block.hashPrevBlock, *dbp));
5318  }
5319  continue;
5320  }
5321 
5322  // process in case the block isn't known yet
5323  CBlockIndex *pindex = LookupBlockIndex(hash);
5324  if (!pindex || !pindex->nStatus.hasData()) {
5325  BlockValidationState state;
5326  if (::ChainstateActive().AcceptBlock(
5327  config, pblock, state, true, dbp, nullptr)) {
5328  nLoaded++;
5329  }
5330  if (state.IsError()) {
5331  break;
5332  }
5333  } else if (hash != chainparams.GetConsensus()
5334  .hashGenesisBlock &&
5335  pindex->nHeight % 1000 == 0) {
5336  LogPrint(
5338  "Block Import: already had block %s at height %d\n",
5339  hash.ToString(), pindex->nHeight);
5340  }
5341  }
5342 
5343  // Activate the genesis block so normal node progress can
5344  // continue
5345  if (hash == chainparams.GetConsensus().hashGenesisBlock) {
5346  BlockValidationState state;
5347  if (!ActivateBestChain(config, state, nullptr)) {
5348  break;
5349  }
5350  }
5351 
5352  NotifyHeaderTip();
5353 
5354  // Recursively process earlier encountered successors of this
5355  // block
5356  std::deque<uint256> queue;
5357  queue.push_back(hash);
5358  while (!queue.empty()) {
5359  uint256 head = queue.front();
5360  queue.pop_front();
5361  std::pair<std::multimap<uint256, FlatFilePos>::iterator,
5362  std::multimap<uint256, FlatFilePos>::iterator>
5363  range = mapBlocksUnknownParent.equal_range(head);
5364  while (range.first != range.second) {
5365  std::multimap<uint256, FlatFilePos>::iterator it =
5366  range.first;
5367  std::shared_ptr<CBlock> pblockrecursive =
5368  std::make_shared<CBlock>();
5369  if (ReadBlockFromDisk(*pblockrecursive, it->second,
5370  chainparams.GetConsensus())) {
5371  LogPrint(
5373  "%s: Processing out of order child %s of %s\n",
5374  __func__, pblockrecursive->GetHash().ToString(),
5375  head.ToString());
5376  LOCK(cs_main);
5377  BlockValidationState dummy;
5378  if (::ChainstateActive().AcceptBlock(
5379  config, pblockrecursive, dummy, true,
5380  &it->second, nullptr)) {
5381  nLoaded++;
5382  queue.push_back(pblockrecursive->GetHash());
5383  }
5384  }
5385  range.first++;
5386  mapBlocksUnknownParent.erase(it);
5387  NotifyHeaderTip();
5388  }
5389  }
5390  } catch (const std::exception &e) {
5391  LogPrintf("%s: Deserialize or I/O error - %s\n", __func__,
5392  e.what());
5393  }
5394  }
5395  } catch (const std::runtime_error &e) {
5396  AbortNode(std::string("System error: ") + e.what());
5397  }
5398 
5399  LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded,
5400  GetTimeMillis() - nStart);
5401 }
5402 
5403 void CChainState::CheckBlockIndex(const Consensus::Params &consensusParams) {
5404  if (!fCheckBlockIndex) {
5405  return;
5406  }
5407 
5408  LOCK(cs_main);
5409 
5410  // During a reindex, we read the genesis block and call CheckBlockIndex
5411  // before ActivateBestChain, so we have the genesis block in
5412  // m_blockman.m_block_index but no active chain. (A few of the tests when
5413  // iterating the block tree require that m_chain has been initialized.)
5414  if (m_chain.Height() < 0) {
5415  assert(m_blockman.m_block_index.size() <= 1);
5416  return;
5417  }
5418 
5419  // Build forward-pointing map of the entire block tree.
5420  std::multimap<CBlockIndex *, CBlockIndex *> forward;
5421  for (const auto &entry : m_blockman.m_block_index) {
5422  forward.emplace(entry.second->pprev, entry.second);
5423  }
5424 
5425  assert(forward.size() == m_blockman.m_block_index.size());
5426 
5427  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
5428  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
5429  rangeGenesis = forward.equal_range(nullptr);
5430  CBlockIndex *pindex = rangeGenesis.first->second;
5431  rangeGenesis.first++;
5432  // There is only one index entry with parent nullptr.
5433  assert(rangeGenesis.first == rangeGenesis.second);
5434 
5435  // Iterate over the entire block tree, using depth-first search.
5436  // Along the way, remember whether there are blocks on the path from genesis
5437  // block being explored which are the first to have certain properties.
5438  size_t nNodes = 0;
5439  int nHeight = 0;
5440  // Oldest ancestor of pindex which is invalid.
5441  CBlockIndex *pindexFirstInvalid = nullptr;
5442  // Oldest ancestor of pindex which is parked.
5443  CBlockIndex *pindexFirstParked = nullptr;
5444  // Oldest ancestor of pindex which does not have data available.
5445  CBlockIndex *pindexFirstMissing = nullptr;
5446  // Oldest ancestor of pindex for which nTx == 0.
5447  CBlockIndex *pindexFirstNeverProcessed = nullptr;
5448  // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE
5449  // (regardless of being valid or not).
5450  CBlockIndex *pindexFirstNotTreeValid = nullptr;
5451  // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS
5452  // (regardless of being valid or not).
5453  CBlockIndex *pindexFirstNotTransactionsValid = nullptr;
5454  // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN
5455  // (regardless of being valid or not).
5456  CBlockIndex *pindexFirstNotChainValid = nullptr;
5457  // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS
5458  // (regardless of being valid or not).
5459  CBlockIndex *pindexFirstNotScriptsValid = nullptr;
5460  while (pindex != nullptr) {
5461  nNodes++;
5462  if (pindexFirstInvalid == nullptr && pindex->nStatus.hasFailed()) {
5463  pindexFirstInvalid = pindex;
5464  }
5465  if (pindexFirstParked == nullptr && pindex->nStatus.isParked()) {
5466  pindexFirstParked = pindex;
5467  }
5468  if (pindexFirstMissing == nullptr && !pindex->nStatus.hasData()) {
5469  pindexFirstMissing = pindex;
5470  }
5471  if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) {
5472  pindexFirstNeverProcessed = pindex;
5473  }
5474  if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr &&
5475  pindex->nStatus.getValidity() < BlockValidity::TREE) {
5476  pindexFirstNotTreeValid = pindex;
5477  }
5478  if (pindex->pprev != nullptr &&
5479  pindexFirstNotTransactionsValid == nullptr &&
5481  pindexFirstNotTransactionsValid = pindex;
5482  }
5483  if (pindex->pprev != nullptr && pindexFirstNotChainValid == nullptr &&
5485  pindexFirstNotChainValid = pindex;
5486  }
5487  if (pindex->pprev != nullptr && pindexFirstNotScriptsValid == nullptr &&
5489  pindexFirstNotScriptsValid = pindex;
5490  }
5491 
5492  // Begin: actual consistency checks.
5493  if (pindex->pprev == nullptr) {
5494  // Genesis block checks.
5495  // Genesis block's hash must match.
5496  assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock);
5497  // The current active chain's genesis block must be this block.
5498  assert(pindex == m_chain.Genesis());
5499  }
5500  if (!pindex->HaveTxsDownloaded()) {
5501  // nSequenceId can't be set positive for blocks that aren't linked
5502  // (negative is used for preciousblock)
5503  assert(pindex->nSequenceId <= 0);
5504  }
5505  // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or
5506  // not pruning has occurred). HAVE_DATA is only equivalent to nTx > 0
5507  // (or VALID_TRANSACTIONS) if no pruning has occurred.
5508  if (!fHavePruned) {
5509  // If we've never pruned, then HAVE_DATA should be equivalent to nTx
5510  // > 0
5511  assert(pindex->nStatus.hasData() == (pindex->nTx > 0));
5512  assert(pindexFirstMissing == pindexFirstNeverProcessed);
5513  } else if (pindex->nStatus.hasData()) {
5514  // If we have pruned, then we can only say that HAVE_DATA implies
5515  // nTx > 0
5516  assert(pindex->nTx > 0);
5517  }
5518  if (pindex->nStatus.hasUndo()) {
5519  assert(pindex->nStatus.hasData());
5520  }
5521  // This is pruning-independent.
5522  assert((pindex->nStatus.getValidity() >= BlockValidity::TRANSACTIONS) ==
5523  (pindex->nTx > 0));
5524  // All parents having had data (at some point) is equivalent to all
5525  // parents being VALID_TRANSACTIONS, which is equivalent to
5526  // HaveTxsDownloaded(). All parents having had data (at some point) is
5527  // equivalent to all parents being VALID_TRANSACTIONS, which is
5528  // equivalent to HaveTxsDownloaded().
5529  assert((pindexFirstNeverProcessed == nullptr) ==
5530  (pindex->HaveTxsDownloaded()));
5531  assert((pindexFirstNotTransactionsValid == nullptr) ==
5532  (pindex->HaveTxsDownloaded()));
5533  // nHeight must be consistent.
5534  assert(pindex->nHeight == nHeight);
5535  // For every block except the genesis block, the chainwork must be
5536  // larger than the parent's.
5537  assert(pindex->pprev == nullptr ||
5538  pindex->nChainWork >= pindex->pprev->nChainWork);
5539  // The pskip pointer must point back for all but the first 2 blocks.
5540  assert(nHeight < 2 ||
5541  (pindex->pskip && (pindex->pskip->nHeight < nHeight)));
5542  // All m_blockman.m_block_index entries must at least be TREE valid
5543  assert(pindexFirstNotTreeValid == nullptr);
5544  if (pindex->nStatus.getValidity() >= BlockValidity::TREE) {
5545  // TREE valid implies all parents are TREE valid
5546  assert(pindexFirstNotTreeValid == nullptr);
5547  }
5548  if (pindex->nStatus.getValidity() >= BlockValidity::CHAIN) {
5549  // CHAIN valid implies all parents are CHAIN valid
5550  assert(pindexFirstNotChainValid == nullptr);
5551  }
5552  if (pindex->nStatus.getValidity() >= BlockValidity::SCRIPTS) {
5553  // SCRIPTS valid implies all parents are SCRIPTS valid
5554  assert(pindexFirstNotScriptsValid == nullptr);
5555  }
5556  if (pindexFirstInvalid == nullptr) {
5557  // Checks for not-invalid blocks.
5558  // The failed mask cannot be set for blocks without invalid parents.
5559  assert(!pindex->nStatus.isInvalid());
5560  }
5561  if (pindexFirstParked == nullptr) {
5562  // Checks for not-parked blocks.
5563  // The parked mask cannot be set for blocks without parked parents.
5564  // (i.e., hasParkedParent only if an ancestor is properly parked).
5565  assert(!pindex->nStatus.isOnParkedChain());
5566  }
5567  if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) &&
5568  pindexFirstNeverProcessed == nullptr) {
5569  if (pindexFirstInvalid == nullptr) {
5570  // If this block sorts at least as good as the current tip and
5571  // is valid and we have all data for its parents, it must be in
5572  // setBlockIndexCandidates or be parked.
5573  if (pindexFirstMissing == nullptr) {
5574  assert(pindex->nStatus.isOnParkedChain() ||
5575  setBlockIndexCandidates.count(pindex));
5576  }
5577  // m_chain.Tip() must also be there even if some data has
5578  // been pruned.
5579  if (pindex == m_chain.Tip()) {
5580  assert(setBlockIndexCandidates.count(pindex));
5581  }
5582  // If some parent is missing, then it could be that this block
5583  // was in setBlockIndexCandidates but had to be removed because
5584  // of the missing data. In this case it must be in
5585  // m_blocks_unlinked -- see test below.
5586  }
5587  } else {
5588  // If this block sorts worse than the current tip or some ancestor's
5589  // block has never been seen, it cannot be in
5590  // setBlockIndexCandidates.
5591  assert(setBlockIndexCandidates.count(pindex) == 0);
5592  }
5593  // Check whether this block is in m_blocks_unlinked.
5594  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
5595  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
5596  rangeUnlinked =
5597  m_blockman.m_blocks_unlinked.equal_range(pindex->pprev);
5598  bool foundInUnlinked = false;
5599  while (rangeUnlinked.first != rangeUnlinked.second) {
5600  assert(rangeUnlinked.first->first == pindex->pprev);
5601  if (rangeUnlinked.first->second == pindex) {
5602  foundInUnlinked = true;
5603  break;
5604  }
5605  rangeUnlinked.first++;
5606  }
5607  if (pindex->pprev && pindex->nStatus.hasData() &&
5608  pindexFirstNeverProcessed != nullptr &&
5609  pindexFirstInvalid == nullptr) {
5610  // If this block has block data available, some parent was never
5611  // received, and has no invalid parents, it must be in
5612  // m_blocks_unlinked.
5613  assert(foundInUnlinked);
5614  }
5615  if (!pindex->nStatus.hasData()) {
5616  // Can't be in m_blocks_unlinked if we don't HAVE_DATA
5617  assert(!foundInUnlinked);
5618  }
5619  if (pindexFirstMissing == nullptr) {
5620  // We aren't missing data for any parent -- cannot be in
5621  // m_blocks_unlinked.
5622  assert(!foundInUnlinked);
5623  }
5624  if (pindex->pprev && pindex->nStatus.hasData() &&
5625  pindexFirstNeverProcessed == nullptr &&
5626  pindexFirstMissing != nullptr) {
5627  // We HAVE_DATA for this block, have received data for all parents
5628  // at some point, but we're currently missing data for some parent.
5629  // We must have pruned.
5630  assert(fHavePruned);
5631  // This block may have entered m_blocks_unlinked if:
5632  // - it has a descendant that at some point had more work than the
5633  // tip, and
5634  // - we tried switching to that descendant but were missing
5635  // data for some intermediate block between m_chain and the
5636  // tip.
5637  // So if this block is itself better than m_chain.Tip() and it
5638  // wasn't in
5639  // setBlockIndexCandidates, then it must be in m_blocks_unlinked.
5640  if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) &&
5641  setBlockIndexCandidates.count(pindex) == 0) {
5642  if (pindexFirstInvalid == nullptr) {
5643  assert(foundInUnlinked);
5644  }
5645  }
5646  }
5647  // Perhaps too slow
5648  // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash());
5649  // End: actual consistency checks.
5650 
5651  // Try descending into the first subnode.
5652  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
5653  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
5654  range = forward.equal_range(pindex);
5655  if (range.first != range.second) {
5656  // A subnode was found.
5657  pindex = range.first->second;
5658  nHeight++;
5659  continue;
5660  }
5661  // This is a leaf node. Move upwards until we reach a node of which we
5662  // have not yet visited the last child.
5663  while (pindex) {
5664  // We are going to either move to a parent or a sibling of pindex.
5665  // If pindex was the first with a certain property, unset the
5666  // corresponding variable.
5667  if (pindex == pindexFirstInvalid) {
5668  pindexFirstInvalid = nullptr;
5669  }
5670  if (pindex == pindexFirstParked) {
5671  pindexFirstParked = nullptr;
5672  }
5673  if (pindex == pindexFirstMissing) {
5674  pindexFirstMissing = nullptr;
5675  }
5676  if (pindex == pindexFirstNeverProcessed) {
5677  pindexFirstNeverProcessed = nullptr;
5678  }
5679  if (pindex == pindexFirstNotTreeValid) {
5680  pindexFirstNotTreeValid = nullptr;
5681  }
5682  if (pindex == pindexFirstNotTransactionsValid) {
5683  pindexFirstNotTransactionsValid = nullptr;
5684  }
5685  if (pindex == pindexFirstNotChainValid) {
5686  pindexFirstNotChainValid = nullptr;
5687  }
5688  if (pindex == pindexFirstNotScriptsValid) {
5689  pindexFirstNotScriptsValid = nullptr;
5690  }
5691  // Find our parent.
5692  CBlockIndex *pindexPar = pindex->pprev;
5693  // Find which child we just visited.
5694  std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
5695  std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
5696  rangePar = forward.equal_range(pindexPar);
5697  while (rangePar.first->second != pindex) {
5698  // Our parent must have at least the node we're coming from as
5699  // child.
5700  assert(rangePar.first != rangePar.second);
5701  rangePar.first++;
5702  }
5703  // Proceed to the next one.
5704  rangePar.first++;
5705  if (rangePar.first != rangePar.second) {
5706  // Move to the sibling.
5707  pindex = rangePar.first->second;
5708  break;
5709  } else {
5710  // Move up further.
5711  pindex = pindexPar;
5712  nHeight--;
5713  continue;
5714  }
5715  }
5716  }
5717 
5718  // Check that we actually traversed the entire map.
5719  assert(nNodes == forward.size());
5720 }
5721 
5722 std::string CChainState::ToString() {
5723  CBlockIndex *tip = m_chain.Tip();
5724  return strprintf("Chainstate [%s] @ height %d (%s)",
5725  m_from_snapshot_blockhash.IsNull() ? "ibd" : "snapshot",
5726  tip ? tip->nHeight : -1,
5727  tip ? tip->GetBlockHash().ToString() : "null");
5728 }
5729 
5730 std::string CBlockFileInfo::ToString() const {
5731  return strprintf(
5732  "CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)",
5733  nBlocks, nSize, nHeightFirst, nHeightLast,
5734  FormatISO8601DateTime(nTimeFirst), FormatISO8601DateTime(nTimeLast));
5735 }
5736 
5738  LOCK(cs_LastBlockFile);
5739 
5740  return &vinfoBlockFile.at(n);
5741 }
5742 
5745  const CBlockIndex *pindex)
5746  EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
5747  return VersionBitsState(pindex, params, pos, versionbitscache);
5748 }
5749 
5752  LOCK(cs_main);
5753  return VersionBitsStateImpl(params, pos, ::ChainActive().Tip());
5754 }
5755