diff --git a/cloud/filestore/config/storage.proto b/cloud/filestore/config/storage.proto index 109d98ea65d..7838ae1ca7b 100644 --- a/cloud/filestore/config/storage.proto +++ b/cloud/filestore/config/storage.proto @@ -505,4 +505,11 @@ message TStorageConfig // Enables directory creation in shards (by default directories are created // only in the main tablet). optional bool DirectoryCreationInShardsEnabled = 414; + + // Mixed blocks map are stored in memory only for actively used ranges. + // Additionally MixedBlocksOffloadedRangesCapacity ranges that are not + // actively used are also stored in memory. Their memory footprint is + // proportional to the aforementioned value multiplied my the size of + // TRange + optional uint64 MixedBlocksOffloadedRangesCapacity = 415; } diff --git a/cloud/filestore/libs/storage/core/config.cpp b/cloud/filestore/libs/storage/core/config.cpp index bd908877114..d35f7db474c 100644 --- a/cloud/filestore/libs/storage/core/config.cpp +++ b/cloud/filestore/libs/storage/core/config.cpp @@ -241,6 +241,8 @@ using TAliases = NProto::TStorageConfig::TFilestoreAliases; xxx(DestroyFilestoreDenyList, TVector, {} )\ \ xxx(SSProxyFallbackMode, bool, false )\ + \ + xxx(MixedBlocksOffloadedRangesCapacity, ui64, 0 )\ // FILESTORE_STORAGE_CONFIG #define FILESTORE_STORAGE_CONFIG_REF(xxx) \ diff --git a/cloud/filestore/libs/storage/core/config.h b/cloud/filestore/libs/storage/core/config.h index bf62371b029..5030d3c6a34 100644 --- a/cloud/filestore/libs/storage/core/config.h +++ b/cloud/filestore/libs/storage/core/config.h @@ -301,6 +301,7 @@ class TStorageConfig bool GetDirectoryCreationInShardsEnabled() const; bool GetGuestWritebackCacheEnabled() const; + ui64 GetMixedBlocksOffloadedRangesCapacity() const; }; } // namespace NCloud::NFileStore::NStorage diff --git a/cloud/filestore/libs/storage/tablet/model/deletion_markers.cpp b/cloud/filestore/libs/storage/tablet/model/deletion_markers.cpp index 2628e923ad0..75218dd32e9 100644 --- a/cloud/filestore/libs/storage/tablet/model/deletion_markers.cpp +++ b/cloud/filestore/libs/storage/tablet/model/deletion_markers.cpp @@ -201,4 +201,9 @@ TVector TDeletionMarkers::Extract() return Impl->Extract(); } +void TDeletionMarkers::Swap(TDeletionMarkers& other) +{ + Impl.swap(other.Impl); +} + } // namespace NCloud::NFileStore::NStorage diff --git a/cloud/filestore/libs/storage/tablet/model/deletion_markers.h b/cloud/filestore/libs/storage/tablet/model/deletion_markers.h index 6e94d4dc770..662bdbf87f2 100644 --- a/cloud/filestore/libs/storage/tablet/model/deletion_markers.h +++ b/cloud/filestore/libs/storage/tablet/model/deletion_markers.h @@ -60,6 +60,7 @@ class TDeletionMarkers ui32 Apply(TBlock& block) const; ui32 Apply(TArrayRef blocks) const; TVector Extract(); + void Swap(TDeletionMarkers& other); }; } // namespace NCloud::NFileStore::NStorage diff --git a/cloud/filestore/libs/storage/tablet/model/mixed_blocks.cpp b/cloud/filestore/libs/storage/tablet/model/mixed_blocks.cpp index 8f4e97b7752..14fc943825f 100644 --- a/cloud/filestore/libs/storage/tablet/model/mixed_blocks.cpp +++ b/cloud/filestore/libs/storage/tablet/model/mixed_blocks.cpp @@ -3,6 +3,8 @@ #include "alloc.h" #include "deletion_markers.h" +#include + #include #include @@ -97,7 +99,15 @@ struct TRange ui64 RefCount = 1; - TRange(IAllocator* alloc) + void Swap(TRange& other) + { + Blobs.swap(other.Blobs); + DeletionMarkers.Swap(other.DeletionMarkers); + Levels.swap(other.Levels); + std::swap(RefCount, other.RefCount); + } + + explicit TRange(IAllocator* alloc) : Blobs{alloc} , DeletionMarkers(alloc) {} @@ -109,32 +119,70 @@ struct TRange struct TMixedBlocks::TImpl { + // Holds all ranges that are actively used, i.e. have ref count > 0 using TRangeMap = THashMap; + // Is used to store ranges that are not actively used, i.e. have + // ref count == 0. May be useful for caching + using TOffloadedRangeMap = NCloud::TLRUCache; + + void SetOffloadedRangesCapacity(ui64 offloadedRangesCapacity) + { + OffloadedRanges.SetCapacity(offloadedRangesCapacity); + } + + explicit TImpl(IAllocator* alloc) + : Alloc(alloc) + , OffloadedRanges(alloc) + {} + + TRange* FindRange(ui32 rangeId) + { + auto* it = Ranges.FindPtr(rangeId); + if (it) { + return it; + } + return OffloadedRanges.FindPtr(rangeId); + } IAllocator* Alloc; TRangeMap Ranges; + TOffloadedRangeMap OffloadedRanges; }; //////////////////////////////////////////////////////////////////////////////// TMixedBlocks::TMixedBlocks(IAllocator* alloc) - : Impl(new TImpl{alloc, {}}) + : Impl(new TImpl(alloc)) {} TMixedBlocks::~TMixedBlocks() {} +void TMixedBlocks::Reset(ui64 offloadedRangesCapacity) +{ + Impl->SetOffloadedRangesCapacity(offloadedRangesCapacity); +} + bool TMixedBlocks::IsLoaded(ui32 rangeId) const { - auto* range = Impl->Ranges.FindPtr(rangeId); + auto* range = Impl->FindRange(rangeId); return range; } void TMixedBlocks::RefRange(ui32 rangeId) { - TImpl::TRangeMap::insert_ctx ctx; + TImpl::TRangeMap::insert_ctx ctx = nullptr; if (auto it = Impl->Ranges.find(rangeId, ctx); it == Impl->Ranges.end()) { - Impl->Ranges.emplace_direct(ctx, rangeId, Impl->Alloc); + // If the range is offloaded, move it to active ranges. Otherwise, + // create a new range + if (auto offloadedIt = Impl->OffloadedRanges.find(rangeId)) { + Impl->Ranges.emplace_direct(ctx, rangeId, Impl->Alloc); + Impl->Ranges.at(rangeId).Swap(offloadedIt->second); + Impl->Ranges.at(rangeId).RefCount = 1; + Impl->OffloadedRanges.erase(offloadedIt); + } else { + Impl->Ranges.emplace_direct(ctx, rangeId, Impl->Alloc); + } } else { it->second.RefCount++; } @@ -147,7 +195,16 @@ void TMixedBlocks::UnRefRange(ui32 rangeId) Y_ABORT_UNLESS(it->second.RefCount, "invalid ref count for range: %u", rangeId); it->second.RefCount--; - if (!it->second.RefCount) { + + // If ref count drops to 0, move the range to offloaded ranges. No need to + // offload the range if the capacity is 0 + if (it->second.RefCount == 0) { + if (Impl->OffloadedRanges.capacity() != 0) { + auto [_, inserted] = + Impl->OffloadedRanges.emplace(rangeId, Impl->Alloc); + Y_DEBUG_ABORT_UNLESS(inserted); + Impl->OffloadedRanges.at(rangeId).Swap(it->second); + } Impl->Ranges.erase(it); } } @@ -158,7 +215,7 @@ bool TMixedBlocks::AddBlocks( TBlockList blockList, const TMixedBlobStats& stats) { - auto* range = Impl->Ranges.FindPtr(rangeId); + auto* range = Impl->FindRange(rangeId); Y_ABORT_UNLESS(range); // TODO: pick level @@ -186,7 +243,7 @@ bool TMixedBlocks::RemoveBlocks( const TPartialBlobId& blobId, TMixedBlobStats* stats) { - auto* range = Impl->Ranges.FindPtr(rangeId); + auto* range = Impl->FindRange(rangeId); Y_ABORT_UNLESS(range); auto it = range->Blobs.find(blobId); @@ -216,7 +273,7 @@ void TMixedBlocks::FindBlocks( ui32 blockIndex, ui32 blocksCount) const { - const auto* range = Impl->Ranges.FindPtr(rangeId); + auto* range = Impl->FindRange(rangeId); Y_ABORT_UNLESS(range); // TODO: limit range scan @@ -246,7 +303,7 @@ void TMixedBlocks::AddDeletionMarker( ui32 rangeId, TDeletionMarker deletionMarker) { - auto* range = Impl->Ranges.FindPtr(rangeId); + auto* range = Impl->FindRange(rangeId); Y_ABORT_UNLESS(range); range->DeletionMarkers.Add(deletionMarker); @@ -254,7 +311,7 @@ void TMixedBlocks::AddDeletionMarker( TVector TMixedBlocks::ExtractDeletionMarkers(ui32 rangeId) { - auto* range = Impl->Ranges.FindPtr(rangeId); + auto* range = Impl->FindRange(rangeId); Y_ABORT_UNLESS(range); return range->DeletionMarkers.Extract(); @@ -266,7 +323,7 @@ void TMixedBlocks::ApplyDeletionMarkers( { const auto rangeId = GetMixedRangeIndex(hasher, blocks); - const auto* range = Impl->Ranges.FindPtr(rangeId); + auto* range = Impl->FindRange(rangeId); Y_ABORT_UNLESS(range); range->DeletionMarkers.Apply(MakeArrayRef(blocks)); @@ -274,7 +331,7 @@ void TMixedBlocks::ApplyDeletionMarkers( TVector TMixedBlocks::ApplyDeletionMarkers(ui32 rangeId) const { - const auto* range = Impl->Ranges.FindPtr(rangeId); + auto* range = Impl->FindRange(rangeId); Y_ABORT_UNLESS(range); TVector result; @@ -293,7 +350,7 @@ TVector TMixedBlocks::ApplyDeletionMarkers(ui32 rangeId) const auto TMixedBlocks::ApplyDeletionMarkersAndGetMetas(ui32 rangeId) const -> TVector { - const auto* range = Impl->Ranges.FindPtr(rangeId); + auto* range = Impl->FindRange(rangeId); Y_ABORT_UNLESS(range); TVector result; @@ -313,7 +370,7 @@ auto TMixedBlocks::ApplyDeletionMarkersAndGetMetas(ui32 rangeId) const TVector TMixedBlocks::GetBlobsForCompaction(ui32 rangeId) const { - const auto* range = Impl->Ranges.FindPtr(rangeId); + auto* range = Impl->FindRange(rangeId); Y_ABORT_UNLESS(range); TVector result; @@ -331,7 +388,7 @@ TVector TMixedBlocks::GetBlobsForCompaction(ui32 rangeId) const TMixedBlobMeta TMixedBlocks::FindBlob(ui32 rangeId, TPartialBlobId blobId) const { - const auto* range = Impl->Ranges.FindPtr(rangeId); + auto* range = Impl->FindRange(rangeId); Y_ABORT_UNLESS(range); TVector result; @@ -347,7 +404,7 @@ TMixedBlobMeta TMixedBlocks::FindBlob(ui32 rangeId, TPartialBlobId blobId) const ui32 TMixedBlocks::CalculateGarbageBlockCount(ui32 rangeId) const { - const auto* range = Impl->Ranges.FindPtr(rangeId); + auto* range = Impl->FindRange(rangeId); Y_DEBUG_ABORT_UNLESS(range); if (!range) { return 0; diff --git a/cloud/filestore/libs/storage/tablet/model/mixed_blocks.h b/cloud/filestore/libs/storage/tablet/model/mixed_blocks.h index d70c86c9cac..6acd2ab5248 100644 --- a/cloud/filestore/libs/storage/tablet/model/mixed_blocks.h +++ b/cloud/filestore/libs/storage/tablet/model/mixed_blocks.h @@ -24,6 +24,8 @@ class TMixedBlocks TMixedBlocks(IAllocator* allocator); ~TMixedBlocks(); + void Reset(ui64 offloadedRangesCapacity); + bool IsLoaded(ui32 rangeId) const; void RefRange(ui32 rangeId); diff --git a/cloud/filestore/libs/storage/tablet/model/mixed_blocks_ut.cpp b/cloud/filestore/libs/storage/tablet/model/mixed_blocks_ut.cpp index 03b59d88101..e68711059f7 100644 --- a/cloud/filestore/libs/storage/tablet/model/mixed_blocks_ut.cpp +++ b/cloud/filestore/libs/storage/tablet/model/mixed_blocks_ut.cpp @@ -153,6 +153,93 @@ Y_UNIT_TEST_SUITE(TMixedBlocksTest) mixedBlocks.UnRefRange(rangeId); UNIT_ASSERT(!mixedBlocks.IsLoaded(rangeId)); } + + Y_UNIT_TEST(ShouldEvictLeastRecentlyUsedRanges) + { + constexpr ui32 rangeId1 = 0; + constexpr ui32 rangeId2 = 1; + constexpr ui32 rangeId3 = 2; + + constexpr ui64 nodeId = 1; + constexpr ui64 minCommitId = MakeCommitId(12, 345); + constexpr ui64 maxCommitId = InvalidCommitId; + + constexpr ui32 blockIndex = 123456; + constexpr size_t blocksCount = 100; + + TBlock block(nodeId, blockIndex, minCommitId, maxCommitId); + + auto list = TBlockList::EncodeBlocks( + block, + blocksCount, + TDefaultAllocator::Instance()); + + TMixedBlocks mixedBlocks(TDefaultAllocator::Instance()); + mixedBlocks.Reset(1); + mixedBlocks.RefRange(rangeId1); + UNIT_ASSERT(mixedBlocks.IsLoaded(rangeId1)); + + mixedBlocks.RefRange(rangeId2); + mixedBlocks.AddBlocks(rangeId2, TPartialBlobId(), list); + UNIT_ASSERT(mixedBlocks.IsLoaded(rangeId1)); + UNIT_ASSERT(mixedBlocks.IsLoaded(rangeId2)); + + mixedBlocks.AddBlocks(rangeId1, TPartialBlobId(), list); + mixedBlocks.UnRefRange(rangeId2); + // So now the least recently used range is rangeId2. It should be added + // to the offloaded list + + // The rangeId2 is not evicted, because it fits into the capacity of + // offloaded ranges + UNIT_ASSERT(mixedBlocks.IsLoaded(rangeId2)); + mixedBlocks.RefRange(rangeId3); + mixedBlocks.UnRefRange(rangeId3); + + // Now the least recently used range is rangeId2, and it is evicted from + // the offloaded ranges. It is replaced by rangeId3 + UNIT_ASSERT(!mixedBlocks.IsLoaded(rangeId2)); + UNIT_ASSERT(mixedBlocks.IsLoaded(rangeId3)); + + mixedBlocks.UnRefRange(rangeId1); + UNIT_ASSERT(mixedBlocks.IsLoaded(rangeId1)); + + mixedBlocks.RefRange(rangeId1); + // The range is moved from offloaded ranges to active ranges and its + // data should be preserved + UNIT_ASSERT(mixedBlocks.IsLoaded(rangeId1)); + UNIT_ASSERT_VALUES_EQUAL( + blocksCount, + mixedBlocks.FindBlob(rangeId1, TPartialBlobId()).Blocks.size()); + { + TMixedBlockVisitor visitor; + mixedBlocks.FindBlocks( + visitor, + rangeId1, + nodeId, + minCommitId + 1, + blockIndex, + blocksCount); + + auto blocks = visitor.Finish(); + UNIT_ASSERT_VALUES_EQUAL(blocksCount, blocks.size()); + } + + // And this can not be said about rangeId2 + mixedBlocks.RefRange(rangeId2); + { + TMixedBlockVisitor visitor; + mixedBlocks.FindBlocks( + visitor, + rangeId2, + nodeId, + minCommitId + 1, + blockIndex, + blocksCount); + + auto blocks = visitor.Finish(); + UNIT_ASSERT_VALUES_EQUAL(0, blocks.size()); + } + } } } // namespace NCloud::NFileStore::NStorage diff --git a/cloud/filestore/libs/storage/tablet/tablet_state.cpp b/cloud/filestore/libs/storage/tablet/tablet_state.cpp index 9a4a047d91e..8ed011c0802 100644 --- a/cloud/filestore/libs/storage/tablet/tablet_state.cpp +++ b/cloud/filestore/libs/storage/tablet/tablet_state.cpp @@ -150,6 +150,7 @@ void TIndexTabletState::LoadState( config.GetInMemoryIndexCacheNodeRefsCapacity(), GetNodesCount(), config.GetInMemoryIndexCacheNodesToNodeRefsCapacityRatio())); + Impl->MixedBlocks.Reset(config.GetMixedBlocksOffloadedRangesCapacity()); for (const auto& deletionMarker: largeDeletionMarkers) { Impl->LargeBlocks.AddDeletionMarker(deletionMarker); diff --git a/cloud/filestore/tests/loadtest/service-kikimr-newfeatures-test/nfs-storage.txt b/cloud/filestore/tests/loadtest/service-kikimr-newfeatures-test/nfs-storage.txt index 209156ad525..17c391a9630 100644 --- a/cloud/filestore/tests/loadtest/service-kikimr-newfeatures-test/nfs-storage.txt +++ b/cloud/filestore/tests/loadtest/service-kikimr-newfeatures-test/nfs-storage.txt @@ -13,3 +13,4 @@ InMemoryIndexCacheNodesCapacity: 10 InMemoryIndexCacheNodeRefsCapacity: 10 InMemoryIndexCacheNodeAttrsCapacity: 10 UseMixedBlocksInsteadOfAliveBlocksInCompaction: true +MixedBlocksOffloadedRangesCapacity: 1024 diff --git a/cloud/storage/core/libs/common/lru_cache.cpp b/cloud/storage/core/libs/common/lru_cache.cpp new file mode 100644 index 00000000000..a2e5d28182f --- /dev/null +++ b/cloud/storage/core/libs/common/lru_cache.cpp @@ -0,0 +1 @@ +#include "lru_cache.h" diff --git a/cloud/storage/core/libs/common/lru_cache.h b/cloud/storage/core/libs/common/lru_cache.h new file mode 100644 index 00000000000..d6d0c25e667 --- /dev/null +++ b/cloud/storage/core/libs/common/lru_cache.h @@ -0,0 +1,144 @@ +#pragma once + +#include "alloc.h" + +#include +#include +#include + +namespace NCloud { + +//////////////////////////////////////////////////////////////////////////////// + +// A simple wrapper around THashMap that also evicts the least recently used +// elements when the capacity is reached. It keeps track of the order in which +// keys are accessed +template +class TLRUCache: public TMapOps> +{ + using TBase = + THashMap, TEqualTo, TStlAllocator>; + using TOrderList = TList; + using TOrderPositions = THashMap< + TKey, + typename TOrderList::iterator, + THash, + TEqualTo, + TStlAllocator>; + + // Contains the actual mapping key -> value + TBase Base; + // Contains the keys in order of access, from most to least recently + // accessed (the last element is the oldest one to be accessed) + TOrderList OrderList; + // Contains the position of each key in OrderList, needed to quickly find + // and update the order when accessing a key + TOrderPositions OrderPositions; + + IAllocator* Alloc; + + size_t Capacity = 0; + +private: + // Bumps the key to the front of the order list, used upon any access + void UpdateOrder(const TKey& key) + { + auto it = OrderPositions.find(key); + if (it != OrderPositions.end()) { + OrderList.splice(OrderList.begin(), OrderList, it->second); + it->second = OrderList.insert(OrderList.begin(), key); + } else { + OrderPositions.emplace( + key, + OrderList.insert(OrderList.begin(), key)); + } + } + + inline void RemoveFromOrder(const TKey& key) + { + auto it = OrderPositions.find(key); + if (it != OrderPositions.end()) { + OrderList.erase(it->second); + OrderPositions.erase(it); + } + } + + void CleanupIfNeeded() + { + while (Base.size() > Capacity) { + auto& key = OrderList.back(); + Base.erase(key); + OrderPositions.erase(key); + OrderList.pop_back(); + } + } + +public: + using iterator = typename TBase::iterator; + + explicit TLRUCache(IAllocator* alloc) + : Base(alloc) + , OrderList(alloc) + , OrderPositions(alloc) + , Alloc(alloc) + {} + + void SetCapacity(size_t capacity) + { + Capacity = capacity; + CleanupIfNeeded(); + Base.reserve(Capacity); + OrderPositions.reserve(Capacity); + } + + iterator end() + { + return Base.end(); + } + + iterator find(const TKey& key) + { + auto result = Base.find(key); + if (result != Base.end()) { + UpdateOrder(key); + } + return result; + } + + void erase(iterator it) + { + RemoveFromOrder(it->first); + Base.erase(it); + } + + template + std::pair emplace(Args&&... args) + { + if (Capacity == 0) { + return {Base.end(), false}; + } + auto result = Base.emplace(std::forward(args)...); + UpdateOrder(result.first->first); + CleanupIfNeeded(); + return result; + } + + template + TValue& at(const T& key) { + auto& result = Base.at(key); + UpdateOrder(key); + return result; + } + + [[nodiscard]] size_t size() const + { + return Base.size(); + } + + [[nodiscard]] size_t capacity() const + { + return Capacity; + } +}; + +} // namespace NCloud diff --git a/cloud/storage/core/libs/common/lru_cache_ut.cpp b/cloud/storage/core/libs/common/lru_cache_ut.cpp new file mode 100644 index 00000000000..65c3d6b9372 --- /dev/null +++ b/cloud/storage/core/libs/common/lru_cache_ut.cpp @@ -0,0 +1,123 @@ +#include "lru_cache.h" + +#include + +namespace NCloud { + +//////////////////////////////////////////////////////////////////////////////// + +Y_UNIT_TEST_SUITE(TLRUCache) +{ + Y_UNIT_TEST(ShouldEnforceCapacity) + { + TLRUCache hashMap(TDefaultAllocator::Instance()); + hashMap.SetCapacity(2); + + UNIT_ASSERT_VALUES_EQUAL(0, hashMap.size()); + UNIT_ASSERT_VALUES_EQUAL(2, hashMap.capacity()); + + hashMap.emplace("key1", "value1"); + hashMap.emplace("key2", "value2"); + + UNIT_ASSERT_VALUES_EQUAL(2, hashMap.size()); + UNIT_ASSERT_VALUES_EQUAL("value1", hashMap.find("key1")->second); + UNIT_ASSERT_VALUES_EQUAL("value2", hashMap.find("key2")->second); + + hashMap.emplace("key3", "value3"); // Should evict "key1" + + UNIT_ASSERT_VALUES_EQUAL(2, hashMap.size()); + UNIT_ASSERT_EQUAL(hashMap.end(), hashMap.find("key1")); + UNIT_ASSERT_VALUES_EQUAL("value2", hashMap.find("key2")->second); + UNIT_ASSERT_VALUES_EQUAL("value3", hashMap.find("key3")->second); + } + + Y_UNIT_TEST(ShouldHandleAccessOrder) + { + TLRUCache hashMap(TDefaultAllocator::Instance()); + hashMap.SetCapacity(3); + + hashMap.emplace("key1", "value1"); + hashMap.emplace("key2", "value2"); + hashMap.emplace("key3", "value3"); + + // Access key2 to make it most recently used + hashMap.find("key2"); + + // Insert a new key, evicting the least recently used (key1) + hashMap.emplace("key4", "value4"); + + UNIT_ASSERT_EQUAL(hashMap.end(), hashMap.find("key1")); + UNIT_ASSERT_VALUES_EQUAL("value2", hashMap.find("key2")->second); + UNIT_ASSERT_VALUES_EQUAL("value3", hashMap.find("key3")->second); + UNIT_ASSERT_VALUES_EQUAL("value4", hashMap.find("key4")->second); + } + + Y_UNIT_TEST(ShouldHandleErase) + { + TLRUCache hashMap(TDefaultAllocator::Instance()); + hashMap.SetCapacity(3); + + hashMap.emplace("key1", "value1"); + hashMap.emplace("key2", "value2"); + hashMap.emplace("key3", "value3"); + + UNIT_ASSERT_VALUES_EQUAL(3, hashMap.size()); + + // Erase key2 and ensure order is preserved + hashMap.erase(hashMap.find("key2")); + + UNIT_ASSERT_VALUES_EQUAL(2, hashMap.size()); + UNIT_ASSERT_EQUAL(hashMap.end(), hashMap.find("key2")); + UNIT_ASSERT_VALUES_EQUAL("value1", hashMap.find("key1")->second); + UNIT_ASSERT_VALUES_EQUAL("value3", hashMap.find("key3")->second); + + // Erase remaining keys + hashMap.erase(hashMap.find("key1")); + hashMap.erase(hashMap.find("key3")); + + UNIT_ASSERT_VALUES_EQUAL(0, hashMap.size()); + UNIT_ASSERT_EQUAL(hashMap.end(), hashMap.find("key1")); + UNIT_ASSERT_EQUAL(hashMap.end(), hashMap.find("key3")); + } + + Y_UNIT_TEST(ShouldThrowOnAtForNonExistentKey) + { + TLRUCache hashMap(TDefaultAllocator::Instance()); + hashMap.SetCapacity(2); + + hashMap.emplace("key1", "value1"); + + UNIT_ASSERT_VALUES_EQUAL("value1", hashMap.at("key1")); + + UNIT_ASSERT_EXCEPTION(hashMap.at("key2"), yexception); + } + + Y_UNIT_TEST(ShouldHandleEdgeCases) + { + TLRUCache hashMap(TDefaultAllocator::Instance()); + hashMap.SetCapacity(0); + + // Test capacity 0 + auto [it, inserted1] = hashMap.emplace("key1", "value1"); + UNIT_ASSERT_VALUES_EQUAL(false, inserted1); + UNIT_ASSERT_EQUAL(hashMap.end(), it); + UNIT_ASSERT_VALUES_EQUAL(0, hashMap.size()); + UNIT_ASSERT_EQUAL(hashMap.end(), hashMap.find("key1")); + + hashMap.SetCapacity(2); + UNIT_ASSERT_VALUES_EQUAL(0, hashMap.size()); + UNIT_ASSERT_VALUES_EQUAL(2, hashMap.capacity()); + + // Test inserting duplicate keys - emplace should not overwrite the + // value + auto [it1, inserted2] = hashMap.emplace("key1", "value1"); + UNIT_ASSERT_VALUES_EQUAL(true, inserted2); + auto [it2, inserted3] = hashMap.emplace("key1", "value2"); + UNIT_ASSERT_VALUES_EQUAL(false, inserted3); + + UNIT_ASSERT_VALUES_EQUAL(1, hashMap.size()); + UNIT_ASSERT_VALUES_EQUAL("value1", hashMap.find("key1")->second); + } +} + +} // namespace NCloud diff --git a/cloud/storage/core/libs/common/ut/ya.make b/cloud/storage/core/libs/common/ut/ya.make index 1c47576f093..e0a6042e385 100644 --- a/cloud/storage/core/libs/common/ut/ya.make +++ b/cloud/storage/core/libs/common/ut/ya.make @@ -25,6 +25,7 @@ SRCS( file_io_service_ut.cpp guarded_sglist_ut.cpp history_ut.cpp + lru_cache_ut.cpp persistent_table_ut.cpp ring_buffer_ut.cpp scheduler_ut.cpp diff --git a/cloud/storage/core/libs/common/ya.make b/cloud/storage/core/libs/common/ya.make index 7800448c0dc..c36942ea105 100644 --- a/cloud/storage/core/libs/common/ya.make +++ b/cloud/storage/core/libs/common/ya.make @@ -19,6 +19,7 @@ SRCS( guarded_sglist.cpp helpers.cpp history.cpp + lru_cache.cpp media.cpp persistent_table.cpp proto_helpers.cpp