diff --git a/src/llfs/api_types.hpp b/src/llfs/api_types.hpp index f87f97a..502bf60 100644 --- a/src/llfs/api_types.hpp +++ b/src/llfs/api_types.hpp @@ -81,6 +81,10 @@ BATT_STRONG_TYPEDEF(usize, BufferCount); */ BATT_STRONG_TYPEDEF(usize, BufferSize); +/** \brief True if a page contains outgoing references to other pages. + */ +BATT_STRONG_TYPEDEF(bool, HasOutgoingRefs); + } // namespace llfs #endif // LLFS_API_TYPES_HPP diff --git a/src/llfs/committable_page_cache_job.cpp b/src/llfs/committable_page_cache_job.cpp index 7170943..c8b023a 100644 --- a/src/llfs/committable_page_cache_job.cpp +++ b/src/llfs/committable_page_cache_job.cpp @@ -88,11 +88,8 @@ usize CommittablePageCacheJob::new_page_count() const noexcept // BoxedSeq CommittablePageCacheJob::deleted_page_ids() const { - return as_seq(this->job_->get_deleted_pages().begin(), this->job_->get_deleted_pages().end()) // - | seq::map([](const auto& kv_pair) -> PageId { - return kv_pair.first; - }) // - | seq::boxed(); + return BoxedSeq{ + as_seq(this->job_->get_deleted_pages().begin(), this->job_->get_deleted_pages().end())}; } //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - @@ -482,7 +479,7 @@ Status CommittablePageCacheJob::await_ref_count_updates(const PageRefCountUpdate //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - // -auto CommittablePageCacheJob::get_page_ref_count_updates(u64 /*callers*/) const +auto CommittablePageCacheJob::get_page_ref_count_updates(u64 /*callers*/) -> StatusOr { std::unordered_map ref_count_delta = this->job_->get_root_set_delta(); @@ -512,19 +509,22 @@ auto CommittablePageCacheJob::get_page_ref_count_updates(u64 /*callers*/) const // Trace deleted pages non-recursively, decrementing the ref counts of all pages they directly // reference. // - for (const auto& p : this->job_->get_deleted_pages()) { - // Sanity check; deleted pages should have a ref_count_delta of kRefCount_1_to_0. + LoadingPageTracer loading_tracer{loader, /*ok_if_not_found=*/true}; + CachingPageTracer caching_tracer{this->job_->cache().devices_by_id(), loading_tracer}; + for (const PageId& deleted_page_id : this->job_->get_deleted_pages()) { + // Decrement ref counts. // - const PageId deleted_page_id = p.first; - { - auto iter = ref_count_delta.find(deleted_page_id); - BATT_CHECK_NE(iter, ref_count_delta.end()); - BATT_CHECK_EQ(iter->second, kRefCount_1_to_0); + batt::StatusOr> outgoing_refs = + caching_tracer.trace_page_refs(deleted_page_id); + if (outgoing_refs.status() == batt::StatusCode::kNotFound) { + this->not_found_deleted_pages_.insert(deleted_page_id); + continue; } + BATT_REQUIRE_OK(outgoing_refs); - // Decrement ref counts. - // - p.second->trace_refs() | seq::for_each([&ref_count_delta, deleted_page_id](PageId id) { + ref_count_delta[deleted_page_id] = kRefCount_1_to_0; + + *outgoing_refs | seq::for_each([&ref_count_delta, deleted_page_id](PageId id) { if (id) { LLFS_VLOG(1) << " decrementing ref count for page " << id << " (because it was referenced from deleted page " << deleted_page_id << ")"; @@ -596,13 +596,13 @@ Status CommittablePageCacheJob::drop_deleted_pages(u64 callers) { LLFS_VLOG(1) << "commit(PageCacheJob): dropping deleted pages"; - const auto& deleted_pages = this->job_->get_deleted_pages(); - - return parallel_drop_pages(as_seq(deleted_pages.begin(), deleted_pages.end()) // - | seq::map([](const auto& kv_pair) -> PageId { - return kv_pair.first; - }) // - | seq::collect_vec(), + // From the set of all deleted pages, filter out those that were not found during the attempt to + // trace their outgoing refs. + // + return parallel_drop_pages(this->deleted_page_ids() | seq::filter([this](const PageId& id) { + auto iter = this->not_found_deleted_pages_.find(id); + return iter == this->not_found_deleted_pages_.end(); + }) | seq::collect_vec(), this->job_->cache(), this->job_->job_id, callers); } diff --git a/src/llfs/committable_page_cache_job.hpp b/src/llfs/committable_page_cache_job.hpp index 4aeee5b..3e51503 100644 --- a/src/llfs/committable_page_cache_job.hpp +++ b/src/llfs/committable_page_cache_job.hpp @@ -193,7 +193,7 @@ class CommittablePageCacheJob Status write_new_pages(); - StatusOr get_page_ref_count_updates(u64 callers) const; + StatusOr get_page_ref_count_updates(u64 callers); StatusOr start_ref_count_updates(const JobCommitParams& params, PageRefCountUpdates& updates, u64 callers); @@ -212,6 +212,7 @@ class CommittablePageCacheJob boost::intrusive_ptr tracker_; PageRefCountUpdates ref_count_updates_; std::unique_ptr write_new_pages_context_; + std::unordered_set not_found_deleted_pages_; }; /** \brief Write all changes in `job` to durable storage. This is guaranteed to be atomic. diff --git a/src/llfs/no_outgoing_refs_cache.cpp b/src/llfs/no_outgoing_refs_cache.cpp new file mode 100644 index 0000000..9b0797c --- /dev/null +++ b/src/llfs/no_outgoing_refs_cache.cpp @@ -0,0 +1,99 @@ +//#=##=##=#==#=#==#===#+==#+==========+==+=+=+=+=+=++=+++=+++++=-++++=-+++++++++++ +// +// Part of the LLFS Project, under Apache License v2.0. +// See https://www.apache.org/licenses/LICENSE-2.0 for license information. +// SPDX short identifier: Apache-2.0 +// +//+++++++++++-+-+--+----- --- -- - - - - +#include + +namespace llfs { + +//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - +// +NoOutgoingRefsCache::NoOutgoingRefsCache(const PageIdFactory& page_ids) noexcept + : page_ids_{page_ids} + , cache_(this->page_ids_.get_physical_page_count().value()) +{ +} + +//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - +// +void NoOutgoingRefsCache::set_page_state(PageId page_id, + HasOutgoingRefs new_has_outgoing_refs_state) noexcept +{ + BATT_CHECK_EQ(PageIdFactory::get_device_id(page_id), this->page_ids_.get_device_id()); + const u64 physical_page = this->page_ids_.get_physical_page(page_id); + const page_generation_int generation = this->page_ids_.get_generation(page_id); + BATT_CHECK_LT((usize)physical_page, this->cache_.size()); + + u64 new_cache_entry = generation << this->kGenerationShift; + + // Set the "valid" bit to 1. + // + new_cache_entry |= this->kValidBitMask; + + // If new_has_outgoing_refs_state has value false, page_id has no outgoing references. + // + if (!new_has_outgoing_refs_state) { + // Set the "has no outgoing references" bit to 1. + // + new_cache_entry |= this->kHasNoOutgoingRefsBitMask; + } + + u64 old_cache_entry = this->cache_[physical_page].exchange(new_cache_entry); + + // Two sanity checks: + // 1) We are not going backwards in generation. + // 2) If the cache entry is set of the same generation multiple times, the same value should be + // set. + // + page_generation_int old_generation = old_cache_entry >> this->kGenerationShift; + BATT_CHECK_GE(generation, old_generation); + if (generation == old_generation) { + BATT_CHECK_EQ(new_cache_entry, old_cache_entry); + } +} + +//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - +// +batt::BoolStatus NoOutgoingRefsCache::has_outgoing_refs(PageId page_id) const noexcept +{ + BATT_CHECK_EQ(PageIdFactory::get_device_id(page_id), this->page_ids_.get_device_id()); + const u64 physical_page = this->page_ids_.get_physical_page(page_id); + const page_generation_int generation = this->page_ids_.get_generation(page_id); + BATT_CHECK_LT((usize)physical_page, this->cache_.size()); + + u64 current_cache_entry = this->cache_[physical_page].load(); + page_generation_int stored_generation = current_cache_entry >> this->kGenerationShift; + u64 outgoing_refs_status = current_cache_entry & this->kOutgoingRefsStatusBitsMask; + + // If the generation that is currently stored in the cache is not the same as the generation we + // are querying for, this cache entry is invalid. Thus, we return a "unknown" status. + // + if (stored_generation != generation) { + return batt::BoolStatus::kUnknown; + } + + switch (outgoing_refs_status) { + case 0: + // Bit status 00, not traced yet. + // + return batt::BoolStatus::kUnknown; + case 1: + BATT_PANIC() << "The lower two outgoing refs bits in a cache entry can never be 01!"; + BATT_UNREACHABLE(); + case 2: + // Bit status 10, has outgoing refs. + // + return batt::BoolStatus::kTrue; + case 3: + // Bit status 11, no outgoing refs. + // + return batt::BoolStatus::kFalse; + default: + BATT_PANIC() << "Impossible outgoing refs bits state: " << outgoing_refs_status; + BATT_UNREACHABLE(); + } +} +} // namespace llfs \ No newline at end of file diff --git a/src/llfs/no_outgoing_refs_cache.hpp b/src/llfs/no_outgoing_refs_cache.hpp new file mode 100644 index 0000000..7d7410f --- /dev/null +++ b/src/llfs/no_outgoing_refs_cache.hpp @@ -0,0 +1,102 @@ +//#=##=##=#==#=#==#===#+==#+==========+==+=+=+=+=+=++=+++=+++++=-++++=-+++++++++++ +// +// Part of the LLFS Project, under Apache License v2.0. +// See https://www.apache.org/licenses/LICENSE-2.0 for license information. +// SPDX short identifier: Apache-2.0 +// +//+++++++++++-+-+--+----- --- -- - - - - + +#pragma once +#ifndef LLFS_NO_OUTGOING_REFS_CACHE_HPP +#define LLFS_NO_OUTGOING_REFS_CACHE_HPP + +#include +#include + +#include + +#include +#include + +namespace llfs { + +//=#=#==#==#===============+=+=+=+=++=++++++++++++++-++-+--+-+----+--------------- +/** \brief A cache to store information about a page's outgoing references to other pages. Can be + * used by an implementer of PageTracer as a way to organize and look up this information on the + * PageDevice level. This cache is implemented as a vector of unsigned 64-bit integers, where every + * element of the vector represents the outgoing refs "state" of a physical page in a PageDevice. + * The lowest bit in an element represents if the page has no outgoing refs. The second lowest bit + * represents the validity of the page's state to help determine if a page's outgoing refs have ever + * been traced. The remaining upper 62 bits are used to store the generation of the physical page. + * + * Example cache entry: 0000000000000000000000000000000000000000000000000000000000000110 + * The upper 62 bits represent generation, which in this case is 1. The second lowest bit is the + * validity bit, which is 1 here. This indicates that the page has been traced and therefore the + * cache entry contains valid information for the page of this generation. The lowest bit is 0, + * which indicates that the page has outgoing references to other pages. + */ +class NoOutgoingRefsCache +{ + public: + explicit NoOutgoingRefsCache(const PageIdFactory& page_ids) noexcept; + + NoOutgoingRefsCache(const NoOutgoingRefsCache&) = delete; + NoOutgoingRefsCache& operator=(const NoOutgoingRefsCache&) = delete; + + //----- --- -- - - - - + /** \brief Sets the two outgoing refs state bits for the given `page_id` based on + * whether the page has outgoing refs or not, as indicated by `new_has_outgoing_refs_state`. This + * function also updates the generation stored in the cache for the physical page associated with + * `page_id`. + * + * @param page_id The id of the page whose outgoing refs information we are storing. + * + * @param new_has_outgoing_refs_state The status to store, indicating whether or not the page has + * outgoing refs. A value of `false` indicates that the page does not have any outgoing refs, and + * a value of `true` indicates that a page does have outgoing refs. + */ + void set_page_state(PageId page_id, HasOutgoingRefs new_has_outgoing_refs_state) noexcept; + + //----- --- -- - - - - + /** \brief Queries for whether or not the page with id `page_id` contains outgoing references to + * other pages. + * + * \return Returns a `batt::BoolStatus` type, where `kFalse` indicates that the page has no + * outgoing refs, `kTrue` indicates that the page does have outgoing refs, and `kUnknown` + * indicates that the cache entry for the given page does not have any valid information stored in + * it currently. + */ + batt::BoolStatus has_outgoing_refs(PageId page_id) const noexcept; + + private: + /** \brief A mask to retrieve the lowest two outgoing refs state bits. + */ + static constexpr u64 kOutgoingRefsStatusBitsMask = 0b11; + + /** \brief A mask to access the "valid" bit of the two outgoing refs state bits. This is the + * higher of the two bits. + */ + static constexpr u64 kValidBitMask = 0b10; + + /** \brief A mask to access the "has no outgoing references" bit of the two outgoing refs state + * bits. This is the lower of the two bits. + */ + static constexpr u64 kHasNoOutgoingRefsBitMask = 0b01; + + /** \brief A constant representing the number of bits to shift a cache entry in order retrieve the + * generation stored. + */ + static constexpr u8 kGenerationShift = 2; + + /** \brief A PageIdFactory object to help resolve physical page and generation values from a + * PageId object. + */ + const PageIdFactory page_ids_; + + /** \brief The vector storing the cache entries, indexed by physical page id. + */ + std::vector> cache_; +}; +} // namespace llfs + +#endif // LLFS_NO_OUTGOING_REFS_CACHE_HPP \ No newline at end of file diff --git a/src/llfs/page_cache.cpp b/src/llfs/page_cache.cpp index 39c6011..0d55ac3 100644 --- a/src/llfs/page_cache.cpp +++ b/src/llfs/page_cache.cpp @@ -22,7 +22,7 @@ namespace llfs { //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - // -usize get_page_size(const PageCache::PageDeviceEntry* entry) +usize get_page_size(const PageDeviceEntry* entry) { return entry ? get_page_size(entry->arena) : 0; } @@ -420,14 +420,21 @@ void PageCache::prefetch_hint(PageId page_id) //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - // -Slice PageCache::all_devices() const +const std::vector>& PageCache::devices_by_id() const +{ + return this->page_devices_; +} + +//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - +// +Slice PageCache::all_devices() const { return as_slice(this->page_devices_by_page_size_); } //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - // -Slice PageCache::devices_with_page_size(usize size) const +Slice PageCache::devices_with_page_size(usize size) const { const usize size_log2 = batt::log2_ceil(size); BATT_CHECK_EQ(size, usize{1} << size_log2) << "page size must be a power of 2"; @@ -437,8 +444,7 @@ Slice PageCache::devices_with_page_size(usize //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - // -Slice PageCache::devices_with_page_size_log2( - usize size_log2) const +Slice PageCache::devices_with_page_size_log2(usize size_log2) const { BATT_CHECK_LT(size_log2, kMaxPageSizeLog2); @@ -533,7 +539,7 @@ void PageCache::purge(PageId page_id, u64 callers, u64 job_id) //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - // -PageCache::PageDeviceEntry* PageCache::get_device_for_page(PageId page_id) +PageDeviceEntry* PageCache::get_device_for_page(PageId page_id) { const page_device_id_int device_id = PageIdFactory::get_device_id(page_id); if (BATT_HINT_FALSE(device_id >= this->page_devices_.size())) { diff --git a/src/llfs/page_cache.hpp b/src/llfs/page_cache.hpp index da1f12e..ba41a43 100644 --- a/src/llfs/page_cache.hpp +++ b/src/llfs/page_cache.hpp @@ -19,12 +19,11 @@ #include #include #include -#include #include #include #include #include -#include +#include #include #include #include @@ -103,26 +102,6 @@ class PageCache : public PageLoader int line; }; - /** \brief All the per-PageDevice state for a single device in the storage pool. - */ - struct PageDeviceEntry { - explicit PageDeviceEntry(PageArena&& arena, - boost::intrusive_ptr&& slot_pool) noexcept - : arena{std::move(arena)} - , cache{this->arena.device().page_ids(), std::move(slot_pool)} - { - } - - /** \brief The PageDevice and PageAllocator. - */ - PageArena arena; - - /** \brief A per-device page cache; shares a PageCacheSlot::Pool with all other PageDeviceEntry - * objects that have the same page size. - */ - PageDeviceCache cache; - }; - class PageDeleterImpl : public PageDeleter { public: @@ -195,6 +174,8 @@ class PageCache : public PageLoader Slice all_devices() const; + const std::vector>& devices_by_id() const; + const PageArena& arena_for_page_id(PageId id_val) const; const PageArena& arena_for_device_id(page_device_id_int device_id_val) const; diff --git a/src/llfs/page_cache_job.cpp b/src/llfs/page_cache_job.cpp index 9eed0ce..4e1676c 100644 --- a/src/llfs/page_cache_job.cpp +++ b/src/llfs/page_cache_job.cpp @@ -356,17 +356,10 @@ Status PageCacheJob::delete_page(PageId page_id) if (!page_id) { return OkStatus(); } - StatusOr page_view = this->get_page(page_id, OkIfNotFound{true}); - if (page_view.ok()) { - this->pruned_ = false; - this->deleted_pages_.emplace(page_id, *page_view); - this->root_set_delta_[page_id] = kRefCount_1_to_0; - return OkStatus(); - } - if (page_view.status() == batt::StatusCode::kNotFound) { - return OkStatus(); - } - return page_view.status(); + + this->deleted_pages_.insert(page_id); + this->pruned_ = false; + return OkStatus(); } //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - diff --git a/src/llfs/page_cache_job.hpp b/src/llfs/page_cache_job.hpp index 480b25b..eecf06a 100644 --- a/src/llfs/page_cache_job.hpp +++ b/src/llfs/page_cache_job.hpp @@ -177,9 +177,7 @@ class PageCacheJob : public PageLoader Status recover_page(PageId page_id, const boost::uuids::uuid& caller_uuid, slot_offset_type caller_slot); - // Mark the page as deleted in this job. Will load the page as a side-effect, in order to access - // the set of pages referenced by the page contents. Returns `true` if the page is being deleted; - // `false` if it doesn't exist (i.e., it was already deleted). + // Mark the page as deleted in this job. // Status delete_page(PageId page_id); @@ -258,8 +256,9 @@ class PageCacheJob : public PageLoader Status trace_new_roots(PageLoader& page_loader, PageIdFn&& page_id_fn) const { LoadingPageTracer loading_tracer{page_loader}; + CachingPageTracer caching_tracer{this->cache().devices_by_id(), loading_tracer}; return trace_refs_recursive( - loading_tracer, + caching_tracer, // Trace all new pages in the root set. // @@ -303,7 +302,7 @@ class PageCacheJob : public PageLoader return this->new_pages_; } - const std::unordered_map& get_deleted_pages() const + const std::unordered_set& get_deleted_pages() const { return this->deleted_pages_; } @@ -327,7 +326,7 @@ class PageCacheJob : public PageLoader PageCache* const cache_; std::unordered_map pinned_; std::unordered_map new_pages_; - std::unordered_map deleted_pages_; + std::unordered_set deleted_pages_; std::unordered_map root_set_delta_; std::unordered_mapstd::shared_ptr>, PageId::Hash> deferred_new_pages_; diff --git a/src/llfs/page_device_entry.hpp b/src/llfs/page_device_entry.hpp new file mode 100644 index 0000000..e43eb7c --- /dev/null +++ b/src/llfs/page_device_entry.hpp @@ -0,0 +1,46 @@ +//#=##=##=#==#=#==#===#+==#+==========+==+=+=+=+=+=++=+++=+++++=-++++=-+++++++++++ +// +// Part of the LLFS Project, under Apache License v2.0. +// See https://www.apache.org/licenses/LICENSE-2.0 for license information. +// SPDX short identifier: Apache-2.0 +// +//+++++++++++-+-+--+----- --- -- - - - - + +#pragma once +#ifndef LLFS_PAGE_DEVICE_ENTRY_HPP +#define LLFS_PAGE_DEVICE_ENTRY_HPP + +#include +#include +#include + +namespace llfs { + +/** \brief All the per-PageDevice state for a single device in the storage pool. + */ +struct PageDeviceEntry { + explicit PageDeviceEntry(PageArena&& arena, + boost::intrusive_ptr&& slot_pool) noexcept + : arena{std::move(arena)} + , cache{this->arena.device().page_ids(), std::move(slot_pool)} + , no_outgoing_refs_cache{this->arena.device().page_ids()} + { + } + + /** \brief The PageDevice and PageAllocator. + */ + PageArena arena; + + /** \brief A per-device page cache; shares a PageCacheSlot::Pool with all other PageDeviceEntry + * objects that have the same page size. + */ + PageDeviceCache cache; + + /** \brief A per-device tracker of the outgoing reference status of each physical page in the + * device. + */ + NoOutgoingRefsCache no_outgoing_refs_cache; +}; +} // namespace llfs + +#endif // LLFS_PAGE_DEVICE_ENTRY_HPP diff --git a/src/llfs/page_tracer.cpp b/src/llfs/page_tracer.cpp index 58bb9a6..aea58e0 100644 --- a/src/llfs/page_tracer.cpp +++ b/src/llfs/page_tracer.cpp @@ -10,9 +10,15 @@ namespace llfs { +//=#=#==#==#===============+=+=+=+=++=++++++++++++++-++-+--+-+----+--------------- +// class LoadingPageTracer +//=#=#==#==#===============+=+=+=+=++=++++++++++++++-++-+--+-+----+--------------- + //==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - // -LoadingPageTracer::LoadingPageTracer(PageLoader& page_loader) noexcept : page_loader_{page_loader} +LoadingPageTracer::LoadingPageTracer(PageLoader& page_loader, bool ok_if_not_found) noexcept + : page_loader_{page_loader} + , ok_if_not_found_{ok_if_not_found} { } @@ -28,7 +34,7 @@ batt::StatusOr> LoadingPageTracer::trace_page_refs( PageId from_page_id) noexcept { batt::StatusOr status_or_page = - this->page_loader_.get_page(from_page_id, OkIfNotFound{false}); + this->page_loader_.get_page(from_page_id, OkIfNotFound{this->ok_if_not_found_}); BATT_REQUIRE_OK(status_or_page); this->pinned_page_ = std::move(*status_or_page); @@ -37,4 +43,60 @@ batt::StatusOr> LoadingPageTracer::trace_page_refs( return this->pinned_page_->trace_refs(); } +//=#=#==#==#===============+=+=+=+=++=++++++++++++++-++-+--+-+----+--------------- +// class CachingPageTracer +//=#=#==#==#===============+=+=+=+=++=++++++++++++++-++-+--+-+----+--------------- + +//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - +// +CachingPageTracer::CachingPageTracer( + const std::vector>& page_devices, PageTracer& loader) noexcept + : page_devices_{page_devices} + , loader_{loader} +{ +} + +//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - +// +CachingPageTracer::~CachingPageTracer() +{ +} + +//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - +// +batt::StatusOr> CachingPageTracer::trace_page_refs( + PageId from_page_id) noexcept +{ + const page_device_id_int device_id = PageIdFactory::get_device_id(from_page_id); + BATT_CHECK_LT(device_id, this->page_devices_.size()); + + const batt::BoolStatus has_outgoing_refs = + this->page_devices_[device_id]->no_outgoing_refs_cache.has_outgoing_refs(from_page_id); + + // If we already have information that this page has no outgoing refs, do not load the page and + // call trace refs; there's nothing we need to do. + // + if (has_outgoing_refs == batt::BoolStatus::kFalse) { + return batt::seq::Empty{} | batt::seq::boxed(); + } + + // If has_outgoing_refs has any other value, we must call trace_refs and set + // the outgoing refs information if it hasn't ever been traced before. + // + batt::StatusOr> outgoing_refs = + this->loader_.trace_page_refs(from_page_id); + BATT_REQUIRE_OK(outgoing_refs); + + if (has_outgoing_refs == batt::BoolStatus::kUnknown) { + bool new_status_has_refs = false; + if ((*outgoing_refs).peek()) { + new_status_has_refs = true; + } + this->page_devices_[device_id]->no_outgoing_refs_cache.set_page_state( + from_page_id, HasOutgoingRefs{new_status_has_refs}); + } + + return outgoing_refs; +} + } // namespace llfs diff --git a/src/llfs/page_tracer.hpp b/src/llfs/page_tracer.hpp index 4c3e0af..5a17038 100644 --- a/src/llfs/page_tracer.hpp +++ b/src/llfs/page_tracer.hpp @@ -10,6 +10,7 @@ #ifndef LLFS_PAGE_TRACER_HPP #define LLFS_PAGE_TRACER_HPP +#include #include #include @@ -30,10 +31,11 @@ class PageTracer virtual ~PageTracer() = default; /** \brief Traces the outgoing references of the page with id `from_page_id`. - * - * The returned `BoxedSeq` will remain valid until the `PageTracer` object calling `trace_page_refs` - * goes out of scope, or until the next call to this function (whichever comes first). - * + * + * The returned `BoxedSeq` will remain valid until the `PageTracer` object calling + * `trace_page_refs` goes out of scope, or until the next call to this function (whichever comes + * first). + * * \return A sequence of page ids for the pages referenced from the page `from_page_id`. */ virtual batt::StatusOr> trace_page_refs(PageId from_page_id) noexcept = 0; @@ -49,9 +51,9 @@ class PageTracer class LoadingPageTracer : public PageTracer { public: - explicit LoadingPageTracer(PageLoader& page_loader) noexcept; + explicit LoadingPageTracer(PageLoader& page_loader, bool ok_if_not_found = false) noexcept; - ~LoadingPageTracer(); + ~LoadingPageTracer() noexcept; LoadingPageTracer(const LoadingPageTracer&) = delete; LoadingPageTracer& operator=(const LoadingPageTracer&) = delete; @@ -70,6 +72,49 @@ class LoadingPageTracer : public PageTracer * `trace_page_refs` function. */ PinnedPage pinned_page_; + + /** \brief A boolean to pass into the PageLoader's `get_page` function. + */ + bool ok_if_not_found_; +}; + +//=#=#==#==#===============+=+=+=+=++=++++++++++++++-++-+--+-+----+--------------- +/** \brief A PageTracer with the ability to load a page and trace its outgoing references to other + * pages, as well as look up cached information about a page's outgoing references to other pages. + * If this PageTracer can find valid cached information stating that a page does not have any + * outgoing refs, it will not need to load the page like LoadingPageTracer always does. The + * accessibility of any cached information occurs through a PageDeviceEntry object. + */ +class CachingPageTracer : public PageTracer +{ + public: + explicit CachingPageTracer(const std::vector>& page_devices, + PageTracer& loader) noexcept; + + CachingPageTracer(const CachingPageTracer&) = delete; + CachingPageTracer operator=(const CachingPageTracer&) = delete; + + ~CachingPageTracer() noexcept; + + /** \brief Traces the outgoing references of the page with page id `from_page_id`. First, this + * function will attempt to see if there exists any cached information about the page's outgoing + * refs status. If it cannot find any valid information, or if it finds that there are outgoing + * refs, it will fall back on its wrapped PageTracer to load and trace the outgoing refs. + * + * \return A sequence of page ids for the pages referenced from the page `from_page_id`. + */ + batt::StatusOr> trace_page_refs(PageId from_page_id) noexcept override; + + private: + /** \brief A vector of page device entries used to access cached information about a page's + * outgoing refs status. + */ + const std::vector>& page_devices_; + + /** \brief The "wrapped" PageTracer that this CachingPageTracer instance falls back on in the + * event that no cached information is found about a page. + */ + PageTracer& loader_; }; } // namespace llfs diff --git a/src/llfs/page_tracer.test.cpp b/src/llfs/page_tracer.test.cpp index 64e49a1..6c53493 100644 --- a/src/llfs/page_tracer.test.cpp +++ b/src/llfs/page_tracer.test.cpp @@ -7,16 +7,26 @@ //+++++++++++-+-+--+----- --- -- - - - - #include + +#include +#include +#include #include +#include + +#include +#include #include #include +#include #include #include namespace { -using namespace batt::int_types; +using namespace llfs::constants; +using namespace llfs::int_types; class MockPageTracer : public llfs::PageTracer { @@ -30,7 +40,7 @@ class MockPageTracer : public llfs::PageTracer // 1. Mock a PageTracer and test its integration with trace_refs_recursive by creating a DAG of // PageIds. -TEST(PageTracerTest, TraceRefsRecursiveTest) +TEST(PageTracerMockTest, TraceRefsRecursiveTest) { MockPageTracer mock_tracer; @@ -42,7 +52,7 @@ TEST(PageTracerTest, TraceRefsRecursiveTest) usize num_nodes = 100; std::mt19937 rng{num_nodes}; - for (u16 u = 1; u <= num_nodes; ++u) { + for (usize u = 1; u <= num_nodes; ++u) { roots.insert(llfs::PageId{u}); } @@ -86,4 +96,445 @@ TEST(PageTracerTest, TraceRefsRecursiveTest) // EXPECT_EQ(expected_total_num_edges, calculated_num_edges); } + +class PageTracerTest : public ::testing::Test +{ + public: + using PageTracerTestEvent = llfs::PackedVariant; + + void SetUp() override + { + this->reset_cache(); + this->reset_logs(); + } + + void TearDown() override + { + } + + //+++++++++++-+-+--+----- --- -- - - - - + + void reset_cache() + { + llfs::StatusOr> page_cache_created = + llfs::make_memory_page_cache( + batt::Runtime::instance().default_scheduler(), + /*arena_sizes=*/ + { + {llfs::PageCount{this->page_device_capacity}, llfs::PageSize{256}}, + }, + this->max_refs_per_page); + + ASSERT_TRUE(page_cache_created.ok()); + + this->page_cache = std::move(*page_cache_created); + batt::Status register_reader_status = + this->page_cache->register_page_reader(llfs::PageGraphNodeView::page_layout_id(), __FILE__, + __LINE__, llfs::PageGraphNodeView::page_reader()); + ASSERT_TRUE(register_reader_status.ok()); + } + + void reset_logs() + { + this->root_log.emplace(2 * kMiB); + + const auto recycler_options = + llfs::PageRecyclerOptions{}.set_max_refs_per_page(this->max_refs_per_page); + + const u64 recycler_log_size = llfs::PageRecycler::calculate_log_size(recycler_options); + + EXPECT_GE(::llfs::PageRecycler::calculate_max_buffered_page_count(recycler_options, + recycler_log_size), + llfs::PageRecycler::default_max_buffered_page_count(recycler_options)); + + this->recycler_log.emplace(recycler_log_size); + } + + template + std::unique_ptr open_volume_or_die(llfs::LogDeviceFactory& root_log, + llfs::LogDeviceFactory& recycler_log, + SlotVisitorFn&& slot_visitor_fn) + { + llfs::StatusOr> test_volume_recovered = llfs::Volume::recover( + llfs::VolumeRecoverParams{ + &batt::Runtime::instance().default_scheduler(), + llfs::VolumeOptions{ + .name = "test_volume", + .uuid = llfs::None, + .max_refs_per_page = this->max_refs_per_page, + .trim_lock_update_interval = llfs::TrimLockUpdateInterval{0u}, + .trim_delay_byte_count = this->trim_delay, + }, + this->page_cache, + /*root_log=*/&root_log, + /*recycler_log=*/&recycler_log, + nullptr, + }, // + BATT_FORWARD(slot_visitor_fn)); + + BATT_CHECK(test_volume_recovered.ok()) << BATT_INSPECT(test_volume_recovered.status()); + + return std::move(*test_volume_recovered); + } + + batt::StatusOr build_page_with_refs_to( + const std::vector& referenced_page_ids, llfs::PageSize page_size, + llfs::PageCacheJob& job) + { + batt::StatusOr page_builder = + llfs::PageGraphNodeBuilder::from_new_page(job.new_page( + page_size, batt::WaitForResource::kFalse, llfs::PageGraphNodeView::page_layout_id(), + /*callers=*/0, /*cancel_token=*/llfs::None)); + + BATT_REQUIRE_OK(page_builder); + + for (llfs::PageId page_id : referenced_page_ids) { + page_builder->add_page(page_id); + } + + batt::StatusOr pinned_page = std::move(*page_builder).build(job); + BATT_REQUIRE_OK(pinned_page); + + return pinned_page->page_id(); + } + + batt::StatusOr commit_job(llfs::Volume& test_volume, + std::unique_ptr job, + llfs::PageId page_id) + { + auto event = llfs::pack_as_variant(llfs::PackedPageId::from(page_id)); + + llfs::StatusOr appendable_job = + llfs::make_appendable_job(std::move(job), llfs::PackableRef{event}); + + BATT_REQUIRE_OK(appendable_job); + + const usize required_size = test_volume.calculate_grant_size(*appendable_job); + + LLFS_VLOG(1) << BATT_INSPECT(required_size); + + llfs::StatusOr grant = + test_volume.reserve(required_size, batt::WaitForResource::kFalse); + + BATT_REQUIRE_OK(grant); + + EXPECT_EQ(grant->size(), required_size); + + return test_volume.append(std::move(*appendable_job), *grant); + } + + /** \brief Get the generation of the given page with id `page_id`. + */ + llfs::page_generation_int get_generation(llfs::PageId page_id) + { + const std::vector>& page_devices = + this->page_cache->devices_by_id(); + llfs::page_device_id_int device = llfs::PageIdFactory::get_device_id(page_id); + llfs::page_generation_int generation = + page_devices[device]->arena.device().page_ids().get_generation(page_id); + return generation; + } + + /** \brief Get the outgoing reference status of the given page with id `page_id`. + * + * \return An u64 value telling the caller if the page has outgoing references to + * other pages, does not have any outgoing references, or has never been traced yet. + */ + batt::BoolStatus get_outgoing_refs_status(llfs::PageId page_id) + { + const std::vector>& page_devices = + this->page_cache->devices_by_id(); + llfs::page_device_id_int device = llfs::PageIdFactory::get_device_id(page_id); + + batt::BoolStatus has_outgoing_refs = + page_devices[device]->no_outgoing_refs_cache.has_outgoing_refs(page_id); + return has_outgoing_refs; + } + + /** \brief A utility function that triggers a call to `trace_refs_recursive` with the PageCache as + * the PageLoader being used. This way, a caller can test how many calls to PageCache's `get_page` + * method occurred. + * + * \return The difference between the number of `get_page` calls before and after the execution of + * `trace_refs_recursive`. + */ + batt::StatusOr test_cache_get_count_delta(llfs::PageCacheJob& job) + { + int initial_get_count = this->page_cache->metrics().get_count.load(); + batt::Status trace_status = + job.trace_new_roots(/*page_loader=*/*(this->page_cache), /*page_id_fn=*/ + []([[maybe_unused]] llfs::PageId page_id) { + + }); + BATT_REQUIRE_OK(trace_status); + return this->page_cache->metrics().get_count.load() - initial_get_count; + } + + const u64 page_device_capacity{16}; + + const llfs::MaxRefsPerPage max_refs_per_page{8}; + + llfs::TrimDelayByteCount trim_delay{0}; + + batt::SharedPtr page_cache; + + llfs::Optional root_log; + + llfs::Optional recycler_log; +}; + +//=#=#==#==#===============+=+=+=+=++=++++++++++++++-++-+--+-+----+--------------- +// Test Plan: +// 1. Test the basic operations on an instance of a NoOutgoingRefsCache using the PageCache in the +// test fixture. +// +// 2. Test using just a PageCache and a PageCacheJob (without committing the job) to +// test CachingPageTracer's usage in the `trace_refs_recursive` function . This will make sure we +// are getting and setting the OutgoingRefsStatus correctly, as well as ensure that we are +// performing loads from the PageCache only when need be. +// +// 3. Create a more complete set up with a Volume (which will also include its PageRecycler and the +// PageCache) to test the full workflow of setting, getting, and invalidating the +// OutgoingRefsStatus. + +//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - +TEST_F(PageTracerTest, NoOutgoingRefsCacheDeath) +{ + const std::unique_ptr& page_device1 = this->page_cache->devices_by_id()[0]; + + llfs::HasOutgoingRefs no_outgoing_refs{false}; + llfs::PageId page = page_device1->arena.device().page_ids().make_page_id(1, 1); + + // Create a page with physical page id 1 and generation 1, then set and get its page bits in the + // PageDevice's NoOutgoingRefsCache. The first get will return an unknown status. + // + EXPECT_EQ(page_device1->no_outgoing_refs_cache.has_outgoing_refs(page), + batt::BoolStatus::kUnknown); + page_device1->no_outgoing_refs_cache.set_page_state(page, no_outgoing_refs); + batt::BoolStatus has_outgoing_refs = page_device1->no_outgoing_refs_cache.has_outgoing_refs(page); + + // When a page has no outgoing refs, its BoolStatus will be kFalse. + // + EXPECT_EQ(has_outgoing_refs, batt::BoolStatus::kFalse); + + // We can't set different outgoing refs bits for the same generation of a page! + // + EXPECT_DEATH( + page_device1->no_outgoing_refs_cache.set_page_state(page, llfs::HasOutgoingRefs{true}), + "Assertion failed: new_cache_entry == old_cache_entry"); + + page = page_device1->arena.device().page_ids().make_page_id(2, 1); + page_device1->no_outgoing_refs_cache.set_page_state(page, no_outgoing_refs); + + llfs::PageId new_generation_page = + page_device1->arena.device().page_ids().advance_generation(page); + page_device1->no_outgoing_refs_cache.set_page_state(new_generation_page, no_outgoing_refs); + has_outgoing_refs = page_device1->no_outgoing_refs_cache.has_outgoing_refs(page); + + // Since we just queried for a previous generation's page state bits, we will be returned a + // BoolStatus of value kUnknown. + // + EXPECT_EQ(has_outgoing_refs, batt::BoolStatus::kUnknown); +} + +//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - +TEST_F(PageTracerTest, CachingPageTracerTest) +{ + std::unique_ptr job = this->page_cache->new_job(); + + // Maintain a set of PageIds for pages that won't be traced when `trace_refs_recursive` is called, + // since it has no incoming refs. + // + std::unordered_set not_traced_set; + std::vector leaves; + + // BoolStatus kFalse: page has no outgoing refs. + // BoolStatus kTrue: page has outgoing refs. + // + std::unordered_map + expected_outgoing_refs_status; + + // Use half of the PageDevice's capacity to make leaf pages, i.e. pages with no outgoing refs. + // + for (usize i = 0; i < (this->page_device_capacity / 2); ++i) { + batt::StatusOr leaf = + this->build_page_with_refs_to({}, llfs::PageSize{256}, *job); + ASSERT_TRUE(leaf.ok()) << BATT_INSPECT(leaf.status()); + not_traced_set.insert(*leaf); + leaves.emplace_back(*leaf); + expected_outgoing_refs_status[*leaf] = batt::BoolStatus::kFalse; + } + + // Use (half of PageDevice's capacity - 1) number of pages for pages with outgoing refs. + // + u64 num_pages_with_outgoing_refs = 0; + for (usize i = 1; i < (this->page_device_capacity / 2); ++i) { + std::mt19937 rng{i}; + std::vector outgoing_refs; + // Randomly generate outoing refs to the leaves we created above. + // + std::sample(leaves.begin(), leaves.end(), std::back_inserter(outgoing_refs), i, rng); + batt::StatusOr root = + this->build_page_with_refs_to(outgoing_refs, llfs::PageSize{256}, *job); + job->new_root(*root); + expected_outgoing_refs_status[*root] = batt::BoolStatus::kTrue; + ++num_pages_with_outgoing_refs; + + // Those leaves that were initially placed in the not_traced set are now traceable since they + // now have incoming refs. + // + for (usize j = 0; j < outgoing_refs.size(); ++j) { + not_traced_set.erase(outgoing_refs[j]); + } + } + + // Since we have 1 page left to allocate in the PageDevice, use that to guarentee we have at least + // one untraceable page. + // + batt::StatusOr island = + this->build_page_with_refs_to({}, llfs::PageSize{256}, *job); + not_traced_set.insert(*island); + for (const llfs::PageId& id : not_traced_set) { + // All non traceable pages will never has their outgoing refs information set, so their status + // will be kUnknown. + // + expected_outgoing_refs_status[id] = batt::BoolStatus::kUnknown; + } + + batt::Status trace_status = job->trace_new_roots(/*page_loader=*/*job, /*page_id_fn=*/ + []([[maybe_unused]] llfs::PageId page_id) { + + }); + ASSERT_TRUE(trace_status.ok()); + + // Verify the OutgoingRefsStatus for each page. + // + for (const auto& [page_id, expected_status] : expected_outgoing_refs_status) { + batt::BoolStatus actual_status = this->get_outgoing_refs_status(page_id); + EXPECT_EQ(actual_status, expected_status); + } + + // Perform another call to trace_new_roots, this time with the PageCache itself as the + // PageLoader being passed in. This way, we can compare the get_count metric of PageCache to make + // sure we aren't doing unecessary loads in this function. + // + batt::StatusOr get_count_delta = this->test_cache_get_count_delta(*job); + ASSERT_TRUE(get_count_delta.ok()) << BATT_INSPECT(get_count_delta.status()); + EXPECT_EQ(*get_count_delta, num_pages_with_outgoing_refs); +} + +//==#==========+==+=+=++=+++++++++++-+-+--+----- --- -- - - - - +TEST_F(PageTracerTest, VolumeAndRecyclerTest) +{ + auto fake_root_log = llfs::testing::make_fake_log_device_factory(*this->root_log); + auto fake_recycler_log = llfs::testing::make_fake_log_device_factory(*this->recycler_log); + + std::unique_ptr test_volume = this->open_volume_or_die( + fake_root_log, fake_recycler_log, + /*slot_visitor_fn=*/[](const llfs::SlotParse&, const auto& /*payload*/) { + return llfs::OkStatus(); + }); + + // BoolStatus kFalse: page has no outgoing refs. + // BoolStatus kTrue: page has outgoing refs. + // + std::unordered_map expected_outgoing_refs; + u64 num_pages_created = 0; + + // Create the first job with some new pages and commit it. + // + std::unique_ptr job1 = test_volume->new_job(); + + batt::StatusOr leaf1 = + this->build_page_with_refs_to({}, llfs::PageSize{256}, *job1); + ASSERT_TRUE(leaf1.ok()) << BATT_INSPECT(leaf1.status()); + ++num_pages_created; + + batt::StatusOr leaf2 = + this->build_page_with_refs_to({}, llfs::PageSize{256}, *job1); + ASSERT_TRUE(leaf2.ok()) << BATT_INSPECT(leaf2.status()); + ++num_pages_created; + + batt::StatusOr root1 = + this->build_page_with_refs_to({*leaf1, *leaf2}, llfs::PageSize{256}, *job1); + ASSERT_TRUE(root1.ok()) << BATT_INSPECT(root1.status()); + ++num_pages_created; + + llfs::StatusOr commit_root1 = + this->commit_job(*test_volume, std::move(job1), *root1); + ASSERT_TRUE(commit_root1.ok()) << BATT_INSPECT(commit_root1.status()); + + // Create a second job and commit it. This gives the first leaf page two incoming references from + // other pages, so a Volume trim wouldn't recycle it right away. + // + std::unique_ptr job2 = test_volume->new_job(); + batt::StatusOr root2 = + this->build_page_with_refs_to({*leaf1}, llfs::PageSize{256}, *job2); + ASSERT_TRUE(root2.ok()) << BATT_INSPECT(root2.status()); + ++num_pages_created; + + llfs::StatusOr commit_root2 = + this->commit_job(*test_volume, std::move(job2), *root2); + ASSERT_TRUE(commit_root2.ok()) << BATT_INSPECT(commit_root2.status()); + + expected_outgoing_refs[*leaf1] = batt::BoolStatus::kFalse; + expected_outgoing_refs[*leaf2] = batt::BoolStatus::kFalse; + expected_outgoing_refs[*root1] = batt::BoolStatus::kTrue; + expected_outgoing_refs[*root2] = batt::BoolStatus::kTrue; + + // Create the last job to fill up the PageDevice. + // + std::unique_ptr job3 = test_volume->new_job(); + batt::StatusOr page = this->build_page_with_refs_to({}, llfs::PageSize{256}, *job3); + ++num_pages_created; + expected_outgoing_refs[*page] = batt::BoolStatus::kFalse; + for (usize i = 0; i < (this->page_device_capacity - num_pages_created); ++i) { + llfs::PageId prev_page{*page}; + page = this->build_page_with_refs_to({prev_page}, llfs::PageSize{256}, *job3); + expected_outgoing_refs[*page] = batt::BoolStatus::kTrue; + } + llfs::StatusOr commit_remaining = + this->commit_job(*test_volume, std::move(job3), *page); + ASSERT_TRUE(commit_remaining.ok()) << BATT_INSPECT(commit_remaining.status()); + + llfs::StatusOr flushed = test_volume->sync( + llfs::LogReadMode::kDurable, llfs::SlotUpperBoundAt{commit_remaining->upper_bound}); + ASSERT_TRUE(flushed.ok()); + + // Verify the outgoing refs status. + // + for (const auto& [page_id, expected_status] : expected_outgoing_refs) { + batt::BoolStatus actual_status = this->get_outgoing_refs_status(page_id); + EXPECT_EQ(actual_status, expected_status); + } + + // Trim the Volume. This should cause root1 and leaf2 to be recycled. + // + llfs::Status trim_set = test_volume->trim(commit_root1->upper_bound); + ASSERT_TRUE(trim_set.ok()); + llfs::Status trimmed = test_volume->await_trim(commit_root1->upper_bound); + ASSERT_TRUE(trimmed.ok()); + + ASSERT_TRUE(this->page_cache->arena_for_page_id(*root1).allocator().await_ref_count(*root1, 0)); + ASSERT_TRUE(this->page_cache->arena_for_page_id(*leaf2).allocator().await_ref_count(*leaf2, 0)); + + // Now, create a new job to create a new page. This new page should be the second generation of a + // physical page due to the recycling that just happened. + // + std::unique_ptr job4 = test_volume->new_job(); + batt::StatusOr new_leaf = + this->build_page_with_refs_to({}, llfs::PageSize{256}, *job4); + ASSERT_TRUE(new_leaf.ok()) << BATT_INSPECT(new_leaf.status()); + EXPECT_GT(this->get_generation(*new_leaf), 1); + job4->new_root(*new_leaf); + + // Test to make sure we DON'T use the existing NoOutgoingRefsCache cache entry for this physical + // page since we are tracing a new generation of the physical page, and that we fall back on our + // wrapped loading tracer to trace the outgoing refs for the page. + // + batt::StatusOr get_count_delta = this->test_cache_get_count_delta(*job4); + ASSERT_TRUE(get_count_delta.ok()) << BATT_INSPECT(get_count_delta.status()); + EXPECT_EQ(*get_count_delta, 1); +} + } // namespace diff --git a/src/llfs/storage_context.test.cpp b/src/llfs/storage_context.test.cpp index 2918d1d..777feca 100644 --- a/src/llfs/storage_context.test.cpp +++ b/src/llfs/storage_context.test.cpp @@ -130,11 +130,11 @@ TEST(StorageContextTest, GetPageCache) ASSERT_TRUE(cache.ok()) << BATT_INSPECT(cache.status()); ASSERT_NE(*cache, nullptr); - llfs::Slice devices_4kb = + llfs::Slice devices_4kb = (*cache)->devices_with_page_size(4 * kKiB); EXPECT_EQ(devices_4kb.size(), 1u); - llfs::Slice devices_2mb = + llfs::Slice devices_2mb = (*cache)->devices_with_page_size(2 * kMiB); EXPECT_EQ(devices_2mb.size(), 1u); } diff --git a/src/llfs/volume.cpp b/src/llfs/volume.cpp index c4bd46f..2d95452 100644 --- a/src/llfs/volume.cpp +++ b/src/llfs/volume.cpp @@ -168,7 +168,7 @@ u64 Volume::calculate_grant_size(const AppendableJob& appendable) const metadata.ids->recycler_uuid, metadata.ids->trimmer_uuid, }) { - for (PageCache::PageDeviceEntry* entry : cache->all_devices()) { + for (PageDeviceEntry* entry : cache->all_devices()) { BATT_CHECK_NOT_NULLPTR(entry); const PageArena& arena = entry->arena; Optional attachment = @@ -280,7 +280,7 @@ u64 Volume::calculate_grant_size(const AppendableJob& appendable) const metadata.ids->recycler_uuid, metadata.ids->trimmer_uuid, }) { - for (PageCache::PageDeviceEntry* entry : cache->all_devices()) { + for (PageDeviceEntry* entry : cache->all_devices()) { BATT_CHECK_NOT_NULLPTR(entry); BATT_REQUIRE_OK(entry->arena.allocator().notify_user_recovered(uuid)); } diff --git a/src/llfs/volume.test.cpp b/src/llfs/volume.test.cpp index 628190a..dea3e4c 100644 --- a/src/llfs/volume.test.cpp +++ b/src/llfs/volume.test.cpp @@ -1599,8 +1599,7 @@ TEST_F(VolumeSimTest, ConcurrentAppendJobs) LLFS_VLOG(1) << "checking ref counts..."; - for (llfs::PageCache::PageDeviceEntry* entry : - sim.cache()->devices_with_page_size(1 * kKiB)) { + for (llfs::PageDeviceEntry* entry : sim.cache()->devices_with_page_size(1 * kKiB)) { BATT_CHECK_NOT_NULLPTR(entry); for (llfs::PageId page_id : page_ids) { EXPECT_EQ(entry->arena.allocator().get_ref_count(page_id).first, kExpectedRefCount); @@ -1888,24 +1887,21 @@ void VolumeSimTest::verify_post_recovery_expectations(RecoverySimState& state, if (state.recovered_second_page) { EXPECT_FALSE(state.second_job_will_not_commit); - for (llfs::PageCache::PageDeviceEntry* entry : - sim.cache()->devices_with_page_size(1 * kKiB)) { + for (llfs::PageDeviceEntry* entry : sim.cache()->devices_with_page_size(1 * kKiB)) { BATT_CHECK_NOT_NULLPTR(entry); EXPECT_EQ(entry->arena.allocator().free_pool_size(), this->pages_per_device - 1); EXPECT_EQ(entry->arena.allocator().get_ref_count(state.first_page_id).first, 3); ASSERT_TRUE(sim.has_data_for_page_id(state.first_page_id).ok()); EXPECT_TRUE(*sim.has_data_for_page_id(state.first_page_id)); } - for (llfs::PageCache::PageDeviceEntry* entry : - sim.cache()->devices_with_page_size(2 * kKiB)) { + for (llfs::PageDeviceEntry* entry : sim.cache()->devices_with_page_size(2 * kKiB)) { BATT_CHECK_NOT_NULLPTR(entry); EXPECT_EQ(entry->arena.allocator().free_pool_size(), this->pages_per_device - 1); EXPECT_EQ(entry->arena.allocator().get_ref_count(state.second_root_page_id).first, 2); ASSERT_TRUE(sim.has_data_for_page_id(state.second_root_page_id).ok()); EXPECT_TRUE(*sim.has_data_for_page_id(state.second_root_page_id)); } - for (llfs::PageCache::PageDeviceEntry* entry : - sim.cache()->devices_with_page_size(4 * kKiB)) { + for (llfs::PageDeviceEntry* entry : sim.cache()->devices_with_page_size(4 * kKiB)) { BATT_CHECK_NOT_NULLPTR(entry); EXPECT_EQ(entry->arena.allocator().free_pool_size(), this->pages_per_device - 1); EXPECT_EQ(entry->arena.allocator().get_ref_count(state.third_page_id).first, 2); @@ -1913,16 +1909,14 @@ void VolumeSimTest::verify_post_recovery_expectations(RecoverySimState& state, EXPECT_TRUE(*sim.has_data_for_page_id(state.third_page_id)); } } else { - for (llfs::PageCache::PageDeviceEntry* entry : - sim.cache()->devices_with_page_size(1 * kKiB)) { + for (llfs::PageDeviceEntry* entry : sim.cache()->devices_with_page_size(1 * kKiB)) { BATT_CHECK_NOT_NULLPTR(entry); EXPECT_EQ(entry->arena.allocator().free_pool_size(), this->pages_per_device - 1); EXPECT_EQ(entry->arena.allocator().get_ref_count(state.first_page_id).first, 2); ASSERT_TRUE(sim.has_data_for_page_id(state.first_page_id).ok()); EXPECT_TRUE(*sim.has_data_for_page_id(state.first_page_id)); } - for (llfs::PageCache::PageDeviceEntry* entry : - sim.cache()->devices_with_page_size(2 * kKiB)) { + for (llfs::PageDeviceEntry* entry : sim.cache()->devices_with_page_size(2 * kKiB)) { BATT_CHECK_NOT_NULLPTR(entry); EXPECT_EQ(entry->arena.allocator().free_pool_size(), this->pages_per_device); if (state.second_root_page_id.is_valid()) { @@ -1933,8 +1927,7 @@ void VolumeSimTest::verify_post_recovery_expectations(RecoverySimState& state, } } } - for (llfs::PageCache::PageDeviceEntry* entry : - sim.cache()->devices_with_page_size(4 * kKiB)) { + for (llfs::PageDeviceEntry* entry : sim.cache()->devices_with_page_size(4 * kKiB)) { BATT_CHECK_NOT_NULLPTR(entry); EXPECT_EQ(entry->arena.allocator().free_pool_size(), this->pages_per_device); if (state.third_page_id.is_valid()) {