/Users/brunogarcia/projects/bitcoin-core-dev/src/headerssync.cpp
Line | Count | Source |
1 | | // Copyright (c) 2022-present The Bitcoin Core developers |
2 | | // Distributed under the MIT software license, see the accompanying |
3 | | // file COPYING or http://www.opensource.org/licenses/mit-license.php. |
4 | | |
5 | | #include <headerssync.h> |
6 | | |
7 | | #include <logging.h> |
8 | | #include <pow.h> |
9 | | #include <util/check.h> |
10 | | #include <util/time.h> |
11 | | #include <util/vector.h> |
12 | | |
13 | | // Our memory analysis in headerssync-params.py assumes this many bytes for a |
14 | | // CompressedHeader (we should re-calculate parameters if we compress further). |
15 | | static_assert(sizeof(CompressedHeader) == 48); |
16 | | |
17 | | HeadersSyncState::HeadersSyncState(NodeId id, const Consensus::Params& consensus_params, |
18 | | const HeadersSyncParams& params, const CBlockIndex* chain_start, |
19 | | const arith_uint256& minimum_required_work) : |
20 | 0 | m_commit_offset((assert(params.commitment_period > 0), // HeadersSyncParams field must be initialized to non-zero. |
21 | 0 | FastRandomContext().randrange(params.commitment_period))), |
22 | 0 | m_id(id), m_consensus_params(consensus_params), |
23 | 0 | m_params(params), |
24 | 0 | m_chain_start(chain_start), |
25 | 0 | m_minimum_required_work(minimum_required_work), |
26 | 0 | m_current_chain_work(chain_start->nChainWork), |
27 | 0 | m_last_header_received(m_chain_start->GetBlockHeader()), |
28 | 0 | m_current_height(chain_start->nHeight) |
29 | 0 | { |
30 | | // Estimate the number of blocks that could possibly exist on the peer's |
31 | | // chain *right now* using 6 blocks/second (fastest blockrate given the MTP |
32 | | // rule) times the number of seconds from the last allowed block until |
33 | | // today. This serves as a memory bound on how many commitments we might |
34 | | // store from this peer, and we can safely give up syncing if the peer |
35 | | // exceeds this bound, because it's not possible for a consensus-valid |
36 | | // chain to be longer than this (at the current time -- in the future we |
37 | | // could try again, if necessary, to sync a longer chain). |
38 | 0 | const auto max_seconds_since_start{(Ticks<std::chrono::seconds>(NodeClock::now() - NodeSeconds{std::chrono::seconds{chain_start->GetMedianTimePast()}})) |
39 | 0 | + MAX_FUTURE_BLOCK_TIME}; |
40 | 0 | m_max_commitments = 6 * max_seconds_since_start / m_params.commitment_period; |
41 | |
|
42 | 0 | LogDebug(BCLog::NET, "Initial headers sync started with peer=%d: height=%i, max_commitments=%i, min_work=%s\n", m_id, m_current_height, m_max_commitments, m_minimum_required_work.ToString()); Line | Count | Source | 393 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 385 | 0 | do { \ | 386 | 0 | if (LogAcceptCategory((category), (level))) { \ | 387 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 388 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 362 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) |
| 389 | 0 | } \ | 390 | 0 | } while (0) |
|
|
43 | 0 | } |
44 | | |
45 | | /** Free any memory in use, and mark this object as no longer usable. This is |
46 | | * required to guarantee that we won't reuse this object with the same |
47 | | * SaltedUint256Hasher for another sync. */ |
48 | | void HeadersSyncState::Finalize() |
49 | 0 | { |
50 | 0 | Assume(m_download_state != State::FINAL); Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
51 | 0 | ClearShrink(m_header_commitments); |
52 | 0 | m_last_header_received.SetNull(); |
53 | 0 | ClearShrink(m_redownloaded_headers); |
54 | 0 | m_redownload_buffer_last_hash.SetNull(); |
55 | 0 | m_redownload_buffer_first_prev_hash.SetNull(); |
56 | 0 | m_process_all_remaining_headers = false; |
57 | 0 | m_current_height = 0; |
58 | |
|
59 | 0 | m_download_state = State::FINAL; |
60 | 0 | } |
61 | | |
62 | | /** Process the next batch of headers received from our peer. |
63 | | * Validate and store commitments, and compare total chainwork to our target to |
64 | | * see if we can switch to REDOWNLOAD mode. */ |
65 | | HeadersSyncState::ProcessingResult HeadersSyncState::ProcessNextHeaders( |
66 | | std::span<const CBlockHeader> received_headers, const bool full_headers_message) |
67 | 0 | { |
68 | 0 | ProcessingResult ret; |
69 | |
|
70 | 0 | Assume(!received_headers.empty()); Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
71 | 0 | if (received_headers.empty()) return ret; |
72 | | |
73 | 0 | Assume(m_download_state != State::FINAL); Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
74 | 0 | if (m_download_state == State::FINAL) return ret; |
75 | | |
76 | 0 | if (m_download_state == State::PRESYNC) { |
77 | | // During PRESYNC, we minimally validate block headers and |
78 | | // occasionally add commitments to them, until we reach our work |
79 | | // threshold (at which point m_download_state is updated to REDOWNLOAD). |
80 | 0 | ret.success = ValidateAndStoreHeadersCommitments(received_headers); |
81 | 0 | if (ret.success) { |
82 | 0 | if (full_headers_message || m_download_state == State::REDOWNLOAD) { |
83 | | // A full headers message means the peer may have more to give us; |
84 | | // also if we just switched to REDOWNLOAD then we need to re-request |
85 | | // headers from the beginning. |
86 | 0 | ret.request_more = true; |
87 | 0 | } else { |
88 | 0 | Assume(m_download_state == State::PRESYNC); Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
89 | | // If we're in PRESYNC and we get a non-full headers |
90 | | // message, then the peer's chain has ended and definitely doesn't |
91 | | // have enough work, so we can stop our sync. |
92 | 0 | LogDebug(BCLog::NET, "Initial headers sync aborted with peer=%d: incomplete headers message at height=%i (presync phase)\n", m_id, m_current_height); Line | Count | Source | 393 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 385 | 0 | do { \ | 386 | 0 | if (LogAcceptCategory((category), (level))) { \ | 387 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 388 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 362 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) |
| 389 | 0 | } \ | 390 | 0 | } while (0) |
|
|
93 | 0 | } |
94 | 0 | } |
95 | 0 | } else if (m_download_state == State::REDOWNLOAD) { |
96 | | // During REDOWNLOAD, we compare our stored commitments to what we |
97 | | // receive, and add headers to our redownload buffer. When the buffer |
98 | | // gets big enough (meaning that we've checked enough commitments), |
99 | | // we'll return a batch of headers to the caller for processing. |
100 | 0 | ret.success = true; |
101 | 0 | for (const auto& hdr : received_headers) { |
102 | 0 | if (!ValidateAndStoreRedownloadedHeader(hdr)) { |
103 | | // Something went wrong -- the peer gave us an unexpected chain. |
104 | | // We could consider looking at the reason for failure and |
105 | | // punishing the peer, but for now just give up on sync. |
106 | 0 | ret.success = false; |
107 | 0 | break; |
108 | 0 | } |
109 | 0 | } |
110 | |
|
111 | 0 | if (ret.success) { |
112 | | // Return any headers that are ready for acceptance. |
113 | 0 | ret.pow_validated_headers = PopHeadersReadyForAcceptance(); |
114 | | |
115 | | // If we hit our target blockhash, then all remaining headers will be |
116 | | // returned and we can clear any leftover internal state. |
117 | 0 | if (m_redownloaded_headers.empty() && m_process_all_remaining_headers) { |
118 | 0 | LogDebug(BCLog::NET, "Initial headers sync complete with peer=%d: releasing all at height=%i (redownload phase)\n", m_id, m_redownload_buffer_last_height); Line | Count | Source | 393 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 385 | 0 | do { \ | 386 | 0 | if (LogAcceptCategory((category), (level))) { \ | 387 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 388 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 362 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) |
| 389 | 0 | } \ | 390 | 0 | } while (0) |
|
|
119 | 0 | } else if (full_headers_message) { |
120 | | // If the headers message is full, we need to request more. |
121 | 0 | ret.request_more = true; |
122 | 0 | } else { |
123 | | // For some reason our peer gave us a high-work chain, but is now |
124 | | // declining to serve us that full chain again. Give up. |
125 | | // Note that there's no more processing to be done with these |
126 | | // headers, so we can still return success. |
127 | 0 | LogDebug(BCLog::NET, "Initial headers sync aborted with peer=%d: incomplete headers message at height=%i (redownload phase)\n", m_id, m_redownload_buffer_last_height); Line | Count | Source | 393 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 385 | 0 | do { \ | 386 | 0 | if (LogAcceptCategory((category), (level))) { \ | 387 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 388 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 362 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) |
| 389 | 0 | } \ | 390 | 0 | } while (0) |
|
|
128 | 0 | } |
129 | 0 | } |
130 | 0 | } |
131 | |
|
132 | 0 | if (!(ret.success && ret.request_more)) Finalize(); |
133 | 0 | return ret; |
134 | 0 | } |
135 | | |
136 | | bool HeadersSyncState::ValidateAndStoreHeadersCommitments(std::span<const CBlockHeader> headers) |
137 | 0 | { |
138 | | // The caller should not give us an empty set of headers. |
139 | 0 | Assume(headers.size() > 0); Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
140 | 0 | if (headers.size() == 0) return true; |
141 | | |
142 | 0 | Assume(m_download_state == State::PRESYNC); Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
143 | 0 | if (m_download_state != State::PRESYNC) return false; |
144 | | |
145 | 0 | if (headers[0].hashPrevBlock != m_last_header_received.GetHash()) { |
146 | | // Somehow our peer gave us a header that doesn't connect. |
147 | | // This might be benign -- perhaps our peer reorged away from the chain |
148 | | // they were on. Give up on this sync for now (likely we will start a |
149 | | // new sync with a new starting point). |
150 | 0 | LogDebug(BCLog::NET, "Initial headers sync aborted with peer=%d: non-continuous headers at height=%i (presync phase)\n", m_id, m_current_height); Line | Count | Source | 393 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 385 | 0 | do { \ | 386 | 0 | if (LogAcceptCategory((category), (level))) { \ | 387 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 388 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 362 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) |
| 389 | 0 | } \ | 390 | 0 | } while (0) |
|
|
151 | 0 | return false; |
152 | 0 | } |
153 | | |
154 | | // If it does connect, (minimally) validate and occasionally store |
155 | | // commitments. |
156 | 0 | for (const auto& hdr : headers) { |
157 | 0 | if (!ValidateAndProcessSingleHeader(hdr)) { |
158 | 0 | return false; |
159 | 0 | } |
160 | 0 | } |
161 | | |
162 | 0 | if (m_current_chain_work >= m_minimum_required_work) { |
163 | 0 | m_redownloaded_headers.clear(); |
164 | 0 | m_redownload_buffer_last_height = m_chain_start->nHeight; |
165 | 0 | m_redownload_buffer_first_prev_hash = m_chain_start->GetBlockHash(); |
166 | 0 | m_redownload_buffer_last_hash = m_chain_start->GetBlockHash(); |
167 | 0 | m_redownload_chain_work = m_chain_start->nChainWork; |
168 | 0 | m_download_state = State::REDOWNLOAD; |
169 | 0 | LogDebug(BCLog::NET, "Initial headers sync transition with peer=%d: reached sufficient work at height=%i, redownloading from height=%i\n", m_id, m_current_height, m_redownload_buffer_last_height); Line | Count | Source | 393 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 385 | 0 | do { \ | 386 | 0 | if (LogAcceptCategory((category), (level))) { \ | 387 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 388 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 362 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) |
| 389 | 0 | } \ | 390 | 0 | } while (0) |
|
|
170 | 0 | } |
171 | 0 | return true; |
172 | 0 | } |
173 | | |
174 | | bool HeadersSyncState::ValidateAndProcessSingleHeader(const CBlockHeader& current) |
175 | 0 | { |
176 | 0 | Assume(m_download_state == State::PRESYNC); Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
177 | 0 | if (m_download_state != State::PRESYNC) return false; |
178 | | |
179 | 0 | int next_height = m_current_height + 1; |
180 | | |
181 | | // Verify that the difficulty isn't growing too fast; an adversary with |
182 | | // limited hashing capability has a greater chance of producing a high |
183 | | // work chain if they compress the work into as few blocks as possible, |
184 | | // so don't let anyone give a chain that would violate the difficulty |
185 | | // adjustment maximum. |
186 | 0 | if (!PermittedDifficultyTransition(m_consensus_params, next_height, |
187 | 0 | m_last_header_received.nBits, current.nBits)) { |
188 | 0 | LogDebug(BCLog::NET, "Initial headers sync aborted with peer=%d: invalid difficulty transition at height=%i (presync phase)\n", m_id, next_height); Line | Count | Source | 393 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 385 | 0 | do { \ | 386 | 0 | if (LogAcceptCategory((category), (level))) { \ | 387 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 388 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 362 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) |
| 389 | 0 | } \ | 390 | 0 | } while (0) |
|
|
189 | 0 | return false; |
190 | 0 | } |
191 | | |
192 | 0 | if (next_height % m_params.commitment_period == m_commit_offset) { |
193 | | // Add a commitment. |
194 | 0 | m_header_commitments.push_back(m_hasher(current.GetHash()) & 1); |
195 | 0 | if (m_header_commitments.size() > m_max_commitments) { |
196 | | // The peer's chain is too long; give up. |
197 | | // It's possible the chain grew since we started the sync; so |
198 | | // potentially we could succeed in syncing the peer's chain if we |
199 | | // try again later. |
200 | 0 | LogDebug(BCLog::NET, "Initial headers sync aborted with peer=%d: exceeded max commitments at height=%i (presync phase)\n", m_id, next_height); Line | Count | Source | 393 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 385 | 0 | do { \ | 386 | 0 | if (LogAcceptCategory((category), (level))) { \ | 387 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 388 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 362 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) |
| 389 | 0 | } \ | 390 | 0 | } while (0) |
|
|
201 | 0 | return false; |
202 | 0 | } |
203 | 0 | } |
204 | | |
205 | 0 | m_current_chain_work += GetBlockProof(CBlockIndex(current)); |
206 | 0 | m_last_header_received = current; |
207 | 0 | m_current_height = next_height; |
208 | |
|
209 | 0 | return true; |
210 | 0 | } |
211 | | |
212 | | bool HeadersSyncState::ValidateAndStoreRedownloadedHeader(const CBlockHeader& header) |
213 | 0 | { |
214 | 0 | Assume(m_download_state == State::REDOWNLOAD); Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
215 | 0 | if (m_download_state != State::REDOWNLOAD) return false; |
216 | | |
217 | 0 | int64_t next_height = m_redownload_buffer_last_height + 1; |
218 | | |
219 | | // Ensure that we're working on a header that connects to the chain we're |
220 | | // downloading. |
221 | 0 | if (header.hashPrevBlock != m_redownload_buffer_last_hash) { |
222 | 0 | LogDebug(BCLog::NET, "Initial headers sync aborted with peer=%d: non-continuous headers at height=%i (redownload phase)\n", m_id, next_height); Line | Count | Source | 393 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 385 | 0 | do { \ | 386 | 0 | if (LogAcceptCategory((category), (level))) { \ | 387 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 388 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 362 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) |
| 389 | 0 | } \ | 390 | 0 | } while (0) |
|
|
223 | 0 | return false; |
224 | 0 | } |
225 | | |
226 | | // Check that the difficulty adjustments are within our tolerance: |
227 | 0 | uint32_t previous_nBits{0}; |
228 | 0 | if (!m_redownloaded_headers.empty()) { |
229 | 0 | previous_nBits = m_redownloaded_headers.back().nBits; |
230 | 0 | } else { |
231 | 0 | previous_nBits = m_chain_start->nBits; |
232 | 0 | } |
233 | |
|
234 | 0 | if (!PermittedDifficultyTransition(m_consensus_params, next_height, |
235 | 0 | previous_nBits, header.nBits)) { |
236 | 0 | LogDebug(BCLog::NET, "Initial headers sync aborted with peer=%d: invalid difficulty transition at height=%i (redownload phase)\n", m_id, next_height); Line | Count | Source | 393 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 385 | 0 | do { \ | 386 | 0 | if (LogAcceptCategory((category), (level))) { \ | 387 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 388 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 362 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) |
| 389 | 0 | } \ | 390 | 0 | } while (0) |
|
|
237 | 0 | return false; |
238 | 0 | } |
239 | | |
240 | | // Track work on the redownloaded chain |
241 | 0 | m_redownload_chain_work += GetBlockProof(CBlockIndex(header)); |
242 | |
|
243 | 0 | if (m_redownload_chain_work >= m_minimum_required_work) { |
244 | 0 | m_process_all_remaining_headers = true; |
245 | 0 | } |
246 | | |
247 | | // If we're at a header for which we previously stored a commitment, verify |
248 | | // it is correct. Failure will result in aborting download. |
249 | | // Also, don't check commitments once we've gotten to our target blockhash; |
250 | | // it's possible our peer has extended its chain between our first sync and |
251 | | // our second, and we don't want to return failure after we've seen our |
252 | | // target blockhash just because we ran out of commitments. |
253 | 0 | if (!m_process_all_remaining_headers && next_height % m_params.commitment_period == m_commit_offset) { |
254 | 0 | if (m_header_commitments.size() == 0) { |
255 | 0 | LogDebug(BCLog::NET, "Initial headers sync aborted with peer=%d: commitment overrun at height=%i (redownload phase)\n", m_id, next_height); Line | Count | Source | 393 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 385 | 0 | do { \ | 386 | 0 | if (LogAcceptCategory((category), (level))) { \ | 387 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 388 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 362 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) |
| 389 | 0 | } \ | 390 | 0 | } while (0) |
|
|
256 | | // Somehow our peer managed to feed us a different chain and |
257 | | // we've run out of commitments. |
258 | 0 | return false; |
259 | 0 | } |
260 | 0 | bool commitment = m_hasher(header.GetHash()) & 1; |
261 | 0 | bool expected_commitment = m_header_commitments.front(); |
262 | 0 | m_header_commitments.pop_front(); |
263 | 0 | if (commitment != expected_commitment) { |
264 | 0 | LogDebug(BCLog::NET, "Initial headers sync aborted with peer=%d: commitment mismatch at height=%i (redownload phase)\n", m_id, next_height); Line | Count | Source | 393 | 0 | #define LogDebug(category, ...) LogPrintLevel(category, BCLog::Level::Debug, __VA_ARGS__) Line | Count | Source | 385 | 0 | do { \ | 386 | 0 | if (LogAcceptCategory((category), (level))) { \ | 387 | 0 | bool rate_limit{level >= BCLog::Level::Info}; \ | 388 | 0 | LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \ Line | Count | Source | 362 | 0 | #define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__) |
| 389 | 0 | } \ | 390 | 0 | } while (0) |
|
|
265 | 0 | return false; |
266 | 0 | } |
267 | 0 | } |
268 | | |
269 | | // Store this header for later processing. |
270 | 0 | m_redownloaded_headers.emplace_back(header); |
271 | 0 | m_redownload_buffer_last_height = next_height; |
272 | 0 | m_redownload_buffer_last_hash = header.GetHash(); |
273 | |
|
274 | 0 | return true; |
275 | 0 | } |
276 | | |
277 | | std::vector<CBlockHeader> HeadersSyncState::PopHeadersReadyForAcceptance() |
278 | 0 | { |
279 | 0 | std::vector<CBlockHeader> ret; |
280 | |
|
281 | 0 | Assume(m_download_state == State::REDOWNLOAD); Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
282 | 0 | if (m_download_state != State::REDOWNLOAD) return ret; |
283 | | |
284 | 0 | while (m_redownloaded_headers.size() > m_params.redownload_buffer_size || |
285 | 0 | (m_redownloaded_headers.size() > 0 && m_process_all_remaining_headers)) { |
286 | 0 | ret.emplace_back(m_redownloaded_headers.front().GetFullHeader(m_redownload_buffer_first_prev_hash)); |
287 | 0 | m_redownloaded_headers.pop_front(); |
288 | 0 | m_redownload_buffer_first_prev_hash = ret.back().GetHash(); |
289 | 0 | } |
290 | 0 | return ret; |
291 | 0 | } |
292 | | |
293 | | CBlockLocator HeadersSyncState::NextHeadersRequestLocator() const |
294 | 0 | { |
295 | 0 | Assume(m_download_state != State::FINAL); Line | Count | Source | 125 | 0 | #define Assume(val) inline_assertion_check<false>(val, std::source_location::current(), #val) |
|
296 | 0 | if (m_download_state == State::FINAL) return {}; |
297 | | |
298 | 0 | auto chain_start_locator = LocatorEntries(m_chain_start); |
299 | 0 | std::vector<uint256> locator; |
300 | |
|
301 | 0 | if (m_download_state == State::PRESYNC) { |
302 | | // During pre-synchronization, we continue from the last header received. |
303 | 0 | locator.push_back(m_last_header_received.GetHash()); |
304 | 0 | } |
305 | |
|
306 | 0 | if (m_download_state == State::REDOWNLOAD) { |
307 | | // During redownload, we will download from the last received header that we stored. |
308 | 0 | locator.push_back(m_redownload_buffer_last_hash); |
309 | 0 | } |
310 | |
|
311 | 0 | locator.insert(locator.end(), chain_start_locator.begin(), chain_start_locator.end()); |
312 | |
|
313 | 0 | return CBlockLocator{std::move(locator)}; |
314 | 0 | } |