CCF
Loading...
Searching...
No Matches
snapshotter.h
Go to the documentation of this file.
1// Copyright (c) Microsoft Corporation. All rights reserved.
2// Licensed under the Apache 2.0 License.
3#pragma once
4
5#include "ccf/pal/locking.h"
7#include "ds/ccf_assert.h"
9#include "kv/kv_types.h"
10#include "kv/store.h"
11#include "node/network_state.h"
15#include "tasks/task_system.h"
16
17#include <chrono>
18#include <deque>
19#include <optional>
20
21namespace ccf
22{
23 class Snapshotter : public std::enable_shared_from_this<Snapshotter>,
25 {
26 private:
27 static constexpr auto max_tx_interval = std::numeric_limits<size_t>::max();
28
29 // Maximum number of pending snapshots allowed at a given time. No more
30 // snapshots are emitted when this threshold is reached and until pending
31 // snapshots are flushed on commit.
32 static constexpr auto max_pending_snapshots_count = 5;
33
35
36 ccf::pal::Mutex lock;
37
38 std::shared_ptr<ccf::kv::Store> store;
39
40 // Snapshots are never generated by default (e.g. during public recovery)
41 size_t snapshot_tx_interval = max_tx_interval;
42
43 // Minimum number of transactions before a time-based snapshot can trigger
44 size_t min_snapshot_tx_interval = 0;
45
46 // Time interval after which a snapshot should be triggered
47 std::chrono::microseconds snapshot_time_interval =
48 std::chrono::microseconds(0);
49
50 using Clock = std::chrono::system_clock;
51 using TimePoint = Clock::time_point;
52
53 struct SnapshotInfo
54 {
55 ccf::kv::Version version = 0;
56 ccf::crypto::Sha256Hash write_set_digest;
57 std::string commit_evidence;
58 ccf::crypto::Sha256Hash snapshot_digest;
59 std::vector<uint8_t> serialised_snapshot;
60
61 // Prevents the receipt from being passed to the host (on commit) in case
62 // host has not yet allocated memory for the snapshot.
63 bool is_stored = false;
64
65 std::optional<::consensus::Index> evidence_idx = std::nullopt;
66
67 std::optional<std::vector<uint8_t>> cose_sig = std::nullopt;
68 std::optional<std::vector<uint8_t>> tree = std::nullopt;
69
70 SnapshotInfo() = default;
71 };
72 // Queue of pending snapshots that have been generated, but are not yet
73 // committed
74 std::map<uint32_t, SnapshotInfo> pending_snapshots;
75
76 // Initial snapshot index
77 static constexpr ::consensus::Index initial_snapshot_idx = 0;
78
79 // Seqno of the latest globally committed snapshot baseline.
80 ::consensus::Index last_snapshot_idx = 0;
81
82 // Baseline time of the latest globally committed snapshot.
83 TimePoint last_snapshot_time = Clock::now();
84 // The times for which inflight snapshots have been scheduled
85 std::map<::consensus::Index, TimePoint> scheduled_snapshot_times;
86
87 // Used to suspend snapshot generation during public recovery
88 bool snapshot_generation_enabled = true;
89
90 // Indices at which a snapshot will be next generated and Boolean to
91 // indicate whether a snapshot was forced at the given index
92 struct SnapshotEntry
93 {
95 bool forced;
96 bool done;
97 };
98 std::deque<SnapshotEntry> next_snapshot_indices;
99
100 static TimePoint time_point_from_snapshot_status(uint64_t timestamp)
101 {
102 return TimePoint(std::chrono::duration_cast<TimePoint::duration>(
103 std::chrono::nanoseconds(static_cast<int64_t>(timestamp))));
104 }
105
106 static uint64_t snapshot_status_timestamp_from_time_point(
107 const TimePoint& timestamp)
108 {
109 const auto timestamp_ns =
110 std::chrono::duration_cast<std::chrono::nanoseconds>(
111 timestamp.time_since_epoch())
112 .count();
113
115 timestamp_ns >= 0,
116 "Snapshot timestamp {} precedes the Unix epoch",
117 timestamp_ns);
118
119 return static_cast<uint64_t>(timestamp_ns);
120 }
121
122 ::consensus::Index latest_scheduled_or_committed_snapshot_idx() const
123 {
124 auto latest_idx = last_snapshot_idx;
125 for (const auto& entry : next_snapshot_indices)
126 {
127 latest_idx = std::max(latest_idx, entry.idx);
128 }
129
130 return latest_idx;
131 }
132
133 TimePoint latest_scheduled_or_committed_snapshot_time(
134 ::consensus::Index latest_snapshot_idx) const
135 {
136 auto latest_time = last_snapshot_time;
137 for (const auto& [idx, time] : scheduled_snapshot_times)
138 {
139 if (idx <= latest_snapshot_idx)
140 {
141 latest_time = std::max(latest_time, time);
142 }
143 }
144
145 return latest_time;
146 }
147
148 void commit_snapshot(
149 ::consensus::Index snapshot_idx,
150 const std::vector<uint8_t>& serialised_receipt)
151 {
152 // The snapshot_idx is used to retrieve the correct snapshot file
153 // previously generated.
154 auto to_host = writer_factory.create_writer_to_outside();
156 ::consensus::snapshot_commit,
157 to_host,
158 snapshot_idx,
159 serialised_receipt);
160 }
161
162 struct SnapshotTask : public ccf::tasks::BaseTask
163 {
164 std::shared_ptr<Snapshotter> self;
165 std::unique_ptr<ccf::kv::AbstractStore::AbstractSnapshot> snapshot;
166 uint32_t generation_count;
167 TimePoint timestamp;
168
169 const std::string name;
170
171 SnapshotTask(
172 std::shared_ptr<Snapshotter> _self,
173 std::unique_ptr<ccf::kv::AbstractStore::AbstractSnapshot>&& _snapshot,
174 uint32_t _generation_count,
175 TimePoint _timestamp) :
176 self(std::move(_self)),
177 snapshot(std::move(_snapshot)),
178 generation_count(_generation_count),
179 timestamp(_timestamp),
180 name(fmt::format(
181 "snapshot@{}[{}]", snapshot->get_version(), generation_count))
182 {}
183
184 void do_task_implementation() override
185 {
186 self->snapshot_(std::move(snapshot), generation_count, timestamp);
187 }
188
189 [[nodiscard]] const std::string& get_name() const override
190 {
191 return name;
192 }
193 };
194
195 void snapshot_(
196 std::unique_ptr<ccf::kv::AbstractStore::AbstractSnapshot> snapshot,
197 uint32_t generation_count,
198 TimePoint timestamp)
199 {
200 auto snapshot_version = snapshot->get_version();
201
202 {
203 std::unique_lock<ccf::pal::Mutex> guard(lock);
204 if (pending_snapshots.size() >= max_pending_snapshots_count)
205 {
207 "Skipping new snapshot generation as {} snapshots are already "
208 "pending",
209 pending_snapshots.size());
210 return;
211 }
212
213 // It is possible that the signature following the snapshot evidence is
214 // scheduled by another thread while the below snapshot evidence
215 // transaction is committed. To allow for such scenario, the evidence
216 // seqno is recorded via `record_snapshot_evidence_idx()` on a hook
217 // rather than here.
218 pending_snapshots[generation_count].version = snapshot_version;
219 }
220
221 auto serialised_snapshot = store->serialise_snapshot(std::move(snapshot));
222 auto serialised_snapshot_size = serialised_snapshot.size();
223
224 auto tx = store->create_tx();
225 auto* evidence = tx.rw<SnapshotEvidence>(Tables::SNAPSHOT_EVIDENCE);
226 auto snapshot_hash = ccf::crypto::Sha256Hash(serialised_snapshot);
227 evidence->put({snapshot_hash, snapshot_version});
228
229 auto* status = tx.rw<SnapshotStatusValue>(Tables::SNAPSHOT_STATUS);
230 const auto timestamp_ns =
231 snapshot_status_timestamp_from_time_point(timestamp);
232 status->put({snapshot_version, timestamp_ns});
233
235 // NOLINTNEXTLINE(performance-move-const-arg)
236 cd.set(std::move(snapshot_hash));
237
238 ccf::crypto::Sha256Hash ws_digest;
239 std::string commit_evidence;
240 auto capture_ws_digest_and_commit_evidence =
241 [&ws_digest, &commit_evidence](
242 const ccf::crypto::Sha256Hash& write_set_digest,
243 const std::string& commit_evidence_) {
244 ws_digest = write_set_digest;
245 commit_evidence = commit_evidence_;
246 };
247
248 auto rc = tx.commit(cd, nullptr, capture_ws_digest_and_commit_evidence);
250 {
252 "Could not commit snapshot evidence for seqno {}: {}",
253 snapshot_version,
254 rc);
255 return;
256 }
257
258 auto evidence_version = tx.commit_version();
259
260 {
261 std::unique_lock<ccf::pal::Mutex> guard(lock);
262 pending_snapshots[generation_count].commit_evidence = commit_evidence;
263 pending_snapshots[generation_count].write_set_digest = ws_digest;
264 pending_snapshots[generation_count].snapshot_digest = cd.value();
265 pending_snapshots[generation_count].serialised_snapshot =
266 std::move(serialised_snapshot);
267 }
268
269 auto to_host = writer_factory.create_writer_to_outside();
271 ::consensus::snapshot_allocate,
272 to_host,
273 snapshot_version,
274 evidence_version,
275 serialised_snapshot_size,
276 generation_count);
277
279 "Request to allocate snapshot [{} bytes] for seqno {}, with evidence "
280 "seqno {}: {}, ws digest: {}",
281 serialised_snapshot_size,
282 snapshot_version,
283 evidence_version,
284 cd.value(),
285 ws_digest);
286 }
287
288 void update_indices(::consensus::Index idx)
289 {
290 // Prune all but one of the requested snapshots below idx
291 while ((next_snapshot_indices.size() > 1) &&
292 (std::next(next_snapshot_indices.begin())->idx <= idx))
293 {
294 const auto coalesced_idx = next_snapshot_indices.front().idx;
295 const auto has_pending_snapshot = std::any_of(
296 pending_snapshots.begin(),
297 pending_snapshots.end(),
298 [coalesced_idx](const auto& entry) {
299 return entry.second.version == coalesced_idx;
300 });
301
302 // Coalesced snapshots can be removed from the scheduling queue, but
303 // their scheduled time must be retained until any already-generated
304 // snapshot at that seqno has been durably released. Non-pending
305 // entries still need to be erased here, otherwise stale scheduled
306 // times survive and break the rollback bookkeeping.
307 if (!has_pending_snapshot)
308 {
309 scheduled_snapshot_times.erase(coalesced_idx);
310 }
311 next_snapshot_indices.pop_front();
312 }
313
314 // Release any pending snapshots which now have commit evidence
315 for (auto it = pending_snapshots.begin(); it != pending_snapshots.end();)
316 {
317 auto& snapshot_info = it->second;
318
319 if (
320 snapshot_info.is_stored && snapshot_info.evidence_idx.has_value() &&
321 idx > snapshot_info.evidence_idx.value() &&
322 snapshot_info.cose_sig.has_value() && snapshot_info.tree.has_value())
323 {
324 auto serialised_receipt = build_and_serialise_receipt(
325 snapshot_info.cose_sig.value(),
326 snapshot_info.tree.value(),
327 snapshot_info.evidence_idx.value(),
328 snapshot_info.write_set_digest,
329 snapshot_info.commit_evidence,
330 std::move(snapshot_info.snapshot_digest));
331
332 commit_snapshot(snapshot_info.version, serialised_receipt);
333 it = pending_snapshots.erase(it);
334 }
335 else
336 {
337 ++it;
338 }
339 }
340 }
341
342 public:
344 ringbuffer::AbstractWriterFactory& writer_factory_,
345 std::shared_ptr<ccf::kv::Store>& store_,
346 size_t snapshot_tx_interval_,
347 size_t min_snapshot_tx_interval_ = 0,
348 std::chrono::microseconds snapshot_time_interval_ =
349 std::chrono::microseconds(0)) :
350 writer_factory(writer_factory_),
351 store(store_),
352 snapshot_tx_interval(snapshot_tx_interval_),
353 min_snapshot_tx_interval(min_snapshot_tx_interval_),
354 snapshot_time_interval(snapshot_time_interval_)
355 {
356 next_snapshot_indices.push_back({initial_snapshot_idx, false, true});
357 }
358
360 {
361 // After public recovery, the first node should have restored all
362 // snapshot indices in next_snapshot_indices so that snapshot
363 // generation can continue at the correct interval
364 std::lock_guard<ccf::pal::Mutex> guard(lock);
365
366 last_snapshot_idx = next_snapshot_indices.back().idx;
367 last_snapshot_time = Clock::now();
368 }
369
370 void set_snapshot_generation(bool enabled)
371 {
372 std::lock_guard<ccf::pal::Mutex> guard(lock);
373 snapshot_generation_enabled = enabled;
374 }
375
377 {
378 std::lock_guard<ccf::pal::Mutex> guard(lock);
379
380 const auto timestamp = time_point_from_snapshot_status(status.timestamp);
381 last_snapshot_idx = status.version;
382 last_snapshot_time = timestamp;
383
384 next_snapshot_indices.clear();
385 next_snapshot_indices.push_back({last_snapshot_idx, false, true});
386 }
387
389 std::span<uint8_t> snapshot_buf, uint32_t generation_count)
390 {
391 std::lock_guard<ccf::pal::Mutex> guard(lock);
392
393 auto search = pending_snapshots.find(generation_count);
394 if (search == pending_snapshots.end())
395 {
397 "Could not find pending snapshot to write for generation count {}",
398 generation_count);
399 return false;
400 }
401
402 auto& pending_snapshot = search->second;
403 if (snapshot_buf.size() != pending_snapshot.serialised_snapshot.size())
404 {
405 // Unreliable host: allocated snapshot buffer is not of expected
406 // size. The pending snapshot is discarded to reduce enclave memory
407 // usage.
409 "Host allocated snapshot buffer [{} bytes] is not of expected "
410 "size [{} bytes]. Discarding snapshot for seqno {}",
411 snapshot_buf.size(),
412 pending_snapshot.serialised_snapshot.size(),
413 pending_snapshot.version);
414 pending_snapshots.erase(search);
415 return false;
416 }
417
418 std::copy(
419 pending_snapshot.serialised_snapshot.begin(),
420 pending_snapshot.serialised_snapshot.end(),
421 snapshot_buf.begin());
422 pending_snapshot.is_stored = true;
423
425 "Successfully copied snapshot at seqno {} to host memory [{} "
426 "bytes]",
427 pending_snapshot.version,
428 pending_snapshot.serialised_snapshot.size());
429 return true;
430 }
431
433 {
434 auto latest_snapshot_idx = latest_scheduled_or_committed_snapshot_idx();
435 // Trigger if the tx count since that index exceeds the full interval,
436 // or if the minimum tx threshold is met and the time interval has
437 // elapsed.
438 auto count = threshold_idx - latest_snapshot_idx;
439 auto count_overdue = count >= snapshot_tx_interval;
440
441 auto latest_scheduled_or_committed_time =
442 latest_scheduled_or_committed_snapshot_time(latest_snapshot_idx);
443 auto time_enabled = snapshot_time_interval.count() > 0;
444 auto min_count_met = count > min_snapshot_tx_interval;
445 const auto now = Clock::now();
446 auto time_overdue = time_enabled && min_count_met &&
447 (now - latest_scheduled_or_committed_time >= snapshot_time_interval);
448
449 if (count_overdue || time_overdue)
450 {
452 "Snapshot at seqno {} is due (c: {}, t: {}): count since last "
453 "queued snapshot is {}, time since last queued snapshot is {}s",
454 threshold_idx,
455 count_overdue ? "overdue" : "not overdue",
456 time_overdue ? "overdue" : "not overdue",
457 count,
458 std::chrono::duration_cast<std::chrono::seconds>(
459 now - latest_scheduled_or_committed_time)
460 .count());
461 }
462
463 return count_overdue || time_overdue;
464 }
465
466 bool should_schedule_snapshot(::consensus::Index threshold_idx) override
467 {
468 std::lock_guard<ccf::pal::Mutex> guard(lock);
469 return should_schedule_snapshot_unsafe(threshold_idx);
470 }
471
473 {
474 // Returns true if the committable idx will require the generation of a
475 // snapshot, and thus a new ledger chunk
476 std::lock_guard<ccf::pal::Mutex> guard(lock);
477
479 idx >= next_snapshot_indices.back().idx,
480 "Committable seqno {} < next snapshot seqno {}",
481 idx,
482 next_snapshot_indices.back().idx);
483
484 bool forced = store->flag_enabled_unsafe(
486
487 auto due = should_schedule_snapshot_unsafe(idx);
488
489 if (due || forced)
490 {
491 auto actually_forced = !due && forced;
492 next_snapshot_indices.push_back({idx, actually_forced, false});
493 scheduled_snapshot_times[idx] = Clock::now();
495 "{} {} as snapshot index",
496 actually_forced ? "Forced" : "Recorded",
497 idx);
498 store->unset_flag_unsafe(
500 return true;
501 }
502
503 return false;
504 }
505
507 ::consensus::Index idx, const std::vector<uint8_t>& cose_sig)
508 {
509 std::lock_guard<ccf::pal::Mutex> guard(lock);
510
511 for (auto& [_, pending_snapshot] : pending_snapshots)
512 {
513 if (
514 pending_snapshot.evidence_idx.has_value() &&
515 idx > pending_snapshot.evidence_idx.value() &&
516 !pending_snapshot.cose_sig.has_value())
517 {
519 "Recording COSE signature at {} for snapshot {} with evidence at "
520 "{}",
521 idx,
522 pending_snapshot.version,
523 pending_snapshot.evidence_idx.value());
524
525 pending_snapshot.cose_sig = cose_sig;
526 }
527 }
528 }
529
531 ::consensus::Index idx, const std::vector<uint8_t>& tree)
532 {
533 std::lock_guard<ccf::pal::Mutex> guard(lock);
534
535 for (auto& [_, pending_snapshot] : pending_snapshots)
536 {
537 if (
538 pending_snapshot.evidence_idx.has_value() &&
539 idx > pending_snapshot.evidence_idx.value() &&
540 !pending_snapshot.tree.has_value())
541 {
543 "Recording serialised tree at {} for snapshot {} with evidence at "
544 "{}",
545 idx,
546 pending_snapshot.version,
547 pending_snapshot.evidence_idx.value());
548
549 pending_snapshot.tree = tree;
550 }
551 }
552 }
553
555 ::consensus::Index idx, const SnapshotHash& snapshot)
556 {
557 std::lock_guard<ccf::pal::Mutex> guard(lock);
558
559 for (auto& [_, pending_snapshot] : pending_snapshots)
560 {
561 if (pending_snapshot.version == snapshot.version)
562 {
564 "Recording evidence idx at {} for snapshot {}",
565 idx,
566 pending_snapshot.version);
567
568 pending_snapshot.evidence_idx = idx;
569 }
570 }
571 }
572
573 // Called from globally committed snapshot status updates to keep the local
574 // globally committed baseline aligned with replicated state.
576 {
577 std::lock_guard<ccf::pal::Mutex> guard(lock);
578
579 const auto timestamp = time_point_from_snapshot_status(status.timestamp);
580 last_snapshot_idx = status.version;
581 last_snapshot_time = timestamp;
582
583 // Snapshot evidence write is now durable. Prune all in-flight entries up
584 // to this point. The globally committed baseline itself is updated from
585 // the SNAPSHOT_STATUS table.
586 std::erase_if(scheduled_snapshot_times, [&status](const auto& entry) {
587 return entry.first <= status.version;
588 });
589 }
590
591 void schedule_snapshot(::consensus::Index idx, TimePoint timestamp)
592 {
593 static uint32_t generation_count = 0;
594
595 auto task = std::make_shared<SnapshotTask>(
596 shared_from_this(),
597 store->snapshot_unsafe_maps(idx),
598 generation_count++,
599 timestamp);
600
602 }
603
604 void commit(::consensus::Index idx, bool generate_snapshot) override
605 {
606 // If generate_snapshot is true, takes a snapshot of the key value store
607 // at the last snapshottable index before idx, and schedule snapshot
608 // serialisation on another thread (round-robin). Otherwise, only record
609 // that a snapshot was generated.
610
611 ccf::kv::ScopedStoreMapsLock maps_lock(store);
612 std::lock_guard<ccf::pal::Mutex> guard(lock);
613
614 // Prune all but one of the requested snapshots below idx and also take
615 // the opportunity to release any pending snapshots which now have commit
616 // evidence
617 update_indices(idx);
618
619 if (idx < last_snapshot_idx)
620 {
621 throw std::logic_error(fmt::format(
622 "Cannot snapshot at seqno {} which is earlier than last snapshot "
623 "seqno {}",
624 idx,
625 last_snapshot_idx));
626 }
627
629 idx >= next_snapshot_indices.front().idx,
630 "Cannot commit snapshotter at {}, which is before last snapshottable "
631 "idx {}",
632 idx,
633 next_snapshot_indices.front().idx);
634
635 auto& next = next_snapshot_indices.front();
636 if (!next.done)
637 {
638 if (
639 snapshot_generation_enabled && generate_snapshot && (next.idx != 0u))
640 {
641 auto snapshot_time = scheduled_snapshot_times.find(next.idx);
642 if (snapshot_time == scheduled_snapshot_times.end())
643 {
644 const auto timestamp = Clock::now();
646 "Could not find scheduled snapshot time for idx {}", next.idx);
647 scheduled_snapshot_times[next.idx] = timestamp;
648 schedule_snapshot(next.idx, timestamp);
649 }
650 else
651 {
652 schedule_snapshot(next.idx, snapshot_time->second);
653 }
654 next.done = true;
655 }
656 }
657
658 if (last_snapshot_idx != next.idx)
659 {
660 // Record the latest released snapshot index, including forced
661 // snapshots, so rollback and replay continue from the same baseline
662 // used by the primary.
663 last_snapshot_idx = next.idx;
664 LOG_TRACE_FMT("Recorded {} as last snapshot index", last_snapshot_idx);
665 }
666 }
667
668 void rollback(::consensus::Index idx) override
669 {
670 std::lock_guard<ccf::pal::Mutex> guard(lock);
671
672 while (!next_snapshot_indices.empty() &&
673 (next_snapshot_indices.back().idx > idx))
674 {
675 next_snapshot_indices.pop_back();
676 }
677
678 std::erase_if(scheduled_snapshot_times, [idx](const auto& entry) {
679 return entry.first > idx;
680 });
681
682 if (next_snapshot_indices.empty())
683 {
684 next_snapshot_indices.push_back({last_snapshot_idx, false, true});
685 }
686
688 "Rolled back snapshotter: last snapshottable idx is now {}",
689 next_snapshot_indices.front().idx);
690
691 while (!pending_snapshots.empty())
692 {
693 const auto& last_snapshot = std::prev(pending_snapshots.end());
694 if (auto evidence_opt = last_snapshot->second.evidence_idx;
695 evidence_opt.has_value() && idx >= evidence_opt.value())
696 {
697 break;
698 }
699
700 pending_snapshots.erase(last_snapshot);
701 }
702 }
703 };
704}
#define CCF_ASSERT_FMT(expr,...)
Definition ccf_assert.h:10
Definition claims_digest.h:10
void set(Digest &&digest_)
Definition claims_digest.h:21
const Digest & value() const
Definition claims_digest.h:38
Definition snapshotter.h:25
void init_after_public_recovery()
Definition snapshotter.h:359
void commit(::consensus::Index idx, bool generate_snapshot) override
Definition snapshotter.h:604
void init_from_snapshot_status(const SnapshotStatus &status)
Definition snapshotter.h:376
void record_snapshot_evidence_idx(::consensus::Index idx, const SnapshotHash &snapshot)
Definition snapshotter.h:554
void schedule_snapshot(::consensus::Index idx, TimePoint timestamp)
Definition snapshotter.h:591
bool should_schedule_snapshot(::consensus::Index threshold_idx) override
Definition snapshotter.h:466
bool write_snapshot(std::span< uint8_t > snapshot_buf, uint32_t generation_count)
Definition snapshotter.h:388
bool should_schedule_snapshot_unsafe(::consensus::Index threshold_idx)
Definition snapshotter.h:432
void record_cose_signature(::consensus::Index idx, const std::vector< uint8_t > &cose_sig)
Definition snapshotter.h:506
void rollback(::consensus::Index idx) override
Definition snapshotter.h:668
Snapshotter(ringbuffer::AbstractWriterFactory &writer_factory_, std::shared_ptr< ccf::kv::Store > &store_, size_t snapshot_tx_interval_, size_t min_snapshot_tx_interval_=0, std::chrono::microseconds snapshot_time_interval_=std::chrono::microseconds(0))
Definition snapshotter.h:343
void record_serialised_tree(::consensus::Index idx, const std::vector< uint8_t > &tree)
Definition snapshotter.h:530
void set_snapshot_generation(bool enabled)
Definition snapshotter.h:370
bool record_committable(::consensus::Index idx) override
Definition snapshotter.h:472
void record_snapshot_status(const SnapshotStatus &status)
Definition snapshotter.h:575
Definition sha256_hash.h:16
Definition kv_types.h:530
Definition kv_types.h:715
Definition value.h:32
Definition ring_buffer_types.h:157
virtual WriterPtr create_writer_to_outside()=0
#define LOG_TRACE_FMT
Definition internal_logger.h:13
#define LOG_DEBUG_FMT
Definition internal_logger.h:14
#define LOG_FAIL_FMT
Definition internal_logger.h:16
@ SUCCESS
Definition kv_types.h:213
uint64_t Version
Definition version.h:10
std::mutex Mutex
Definition locking.h:12
void add_task(Task task)
Definition task_system.cpp:65
Definition app_interface.h:13
uint64_t Index
Definition ledger_enclave_types.h:11
#define RINGBUFFER_WRITE_MESSAGE(MSG,...)
Definition ring_buffer_types.h:259
Definition snapshot_evidence.h:12
ccf::kv::Version version
Sequence number to which the snapshot corresponds.
Definition snapshot_evidence.h:16
Definition snapshot_status.h:12
ccf::kv::Version version
Sequence number of the latest globally committed snapshot baseline.
Definition snapshot_status.h:14
uint64_t timestamp
Definition snapshot_status.h:17
Definition task.h:15