diff --git a/Directory.Packages.props b/Directory.Packages.props index daf5a8f300..411e403aed 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -5,6 +5,7 @@ + diff --git a/benchmark/BDN.benchmark/Resp/RespTsavoriteStress.cs b/benchmark/BDN.benchmark/Resp/RespTsavoriteStress.cs new file mode 100644 index 0000000000..e11e4133e5 --- /dev/null +++ b/benchmark/BDN.benchmark/Resp/RespTsavoriteStress.cs @@ -0,0 +1,81 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +using System.Runtime.CompilerServices; +using BenchmarkDotNet.Attributes; +using Embedded.perftest; +using Garnet.server; + +namespace BDN.benchmark.Resp +{ + public unsafe class RespTsavoriteStress + { + EmbeddedRespServer server; + RespServerSession session; + + const int batchSize = 128; + + static ReadOnlySpan GET => "*2\r\n$3\r\nGET\r\n$1\r\nx\r\n"u8; + byte[] getRequestBuffer; + byte* getRequestBufferPointer; + + static ReadOnlySpan SET => "*3\r\n$3\r\nSET\r\n$1\r\nx\r\n$1\r\n1\r\n"u8; + byte[] setRequestBuffer; + byte* setRequestBufferPointer; + + static ReadOnlySpan INCR => "*2\r\n$4\r\nINCR\r\n$1\r\nx\r\n"u8; + byte[] incrRequestBuffer; + byte* incrRequestBufferPointer; + + [GlobalSetup] + public void GlobalSetup() + { + var opt = new GarnetServerOptions + { + QuietMode = true + }; + server = new EmbeddedRespServer(opt); + session = server.GetRespSession(); + + CreateBuffer(GET, out getRequestBuffer, out getRequestBufferPointer); + CreateBuffer(SET, out setRequestBuffer, out setRequestBufferPointer); + CreateBuffer(INCR, out incrRequestBuffer, out incrRequestBufferPointer); + + // Set the initial value (needed for GET) + _ = session.TryConsumeMessages(setRequestBufferPointer, setRequestBuffer.Length); + } + + unsafe void CreateBuffer(ReadOnlySpan cmd, out byte[] buffer, out byte* bufferPointer) + { + buffer = GC.AllocateArray(cmd.Length * batchSize, pinned: true); + bufferPointer = (byte*)Unsafe.AsPointer(ref buffer[0]); + for (int i = 0; i < batchSize; i++) + cmd.CopyTo(new Span(buffer).Slice(i * cmd.Length)); + } + + [GlobalCleanup] + public void GlobalCleanup() + { + session.Dispose(); + server.Dispose(); + } + + [Benchmark] + public void Get() + { + _ = session.TryConsumeMessages(getRequestBufferPointer, getRequestBuffer.Length); + } + + [Benchmark] + public void Set() + { + _ = session.TryConsumeMessages(setRequestBufferPointer, setRequestBuffer.Length); + } + + [Benchmark] + public void Incr() + { + _ = session.TryConsumeMessages(incrRequestBufferPointer, incrRequestBuffer.Length); + } + } +} \ No newline at end of file diff --git a/libs/cluster/Server/ClusterManagerSlotState.cs b/libs/cluster/Server/ClusterManagerSlotState.cs index bee3da807b..243207ea71 100644 --- a/libs/cluster/Server/ClusterManagerSlotState.cs +++ b/libs/cluster/Server/ClusterManagerSlotState.cs @@ -13,7 +13,12 @@ namespace Garnet.cluster { - using BasicGarnetApi = GarnetApi, BasicContext>; + using BasicGarnetApi = GarnetApi, + SpanByteAllocator>>, + BasicContext>, + GenericAllocator>>>>; /// /// Cluster manager diff --git a/libs/cluster/Server/ClusterProvider.cs b/libs/cluster/Server/ClusterProvider.cs index 7cf5b6f787..2f8218c1a8 100644 --- a/libs/cluster/Server/ClusterProvider.cs +++ b/libs/cluster/Server/ClusterProvider.cs @@ -15,7 +15,12 @@ namespace Garnet.cluster { - using BasicGarnetApi = GarnetApi, BasicContext>; + using BasicGarnetApi = GarnetApi, + SpanByteAllocator>>, + BasicContext>, + GenericAllocator>>>>; /// /// Cluster provider diff --git a/libs/cluster/Session/ClusterSession.cs b/libs/cluster/Session/ClusterSession.cs index 3d24efd9b8..ce7da413d7 100644 --- a/libs/cluster/Session/ClusterSession.cs +++ b/libs/cluster/Session/ClusterSession.cs @@ -14,7 +14,12 @@ namespace Garnet.cluster { - using BasicGarnetApi = GarnetApi, BasicContext>; + using BasicGarnetApi = GarnetApi, + SpanByteAllocator>>, + BasicContext>, + GenericAllocator>>>>; internal sealed unsafe partial class ClusterSession : IClusterSession { diff --git a/libs/host/GarnetServer.cs b/libs/host/GarnetServer.cs index 60c7e60e01..fb0b064679 100644 --- a/libs/host/GarnetServer.cs +++ b/libs/host/GarnetServer.cs @@ -14,6 +14,12 @@ namespace Garnet { + using MainStoreAllocator = SpanByteAllocator>; + using MainStoreFunctions = StoreFunctions; + + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + /// /// Implementation Garnet server /// @@ -23,13 +29,14 @@ public class GarnetServer : IDisposable private readonly GarnetServerOptions opts; private IGarnetServer server; - private TsavoriteKV store; - private TsavoriteKV objectStore; + private TsavoriteKV store; + private TsavoriteKV objectStore; private IDevice aofDevice; private TsavoriteLog appendOnlyFile; - private SubscribeBroker> broker; + private SubscribeBroker> subscribeBroker; private CollectionItemBroker itemBroker; - private LogSettings logSettings, objLogSettings; + private KVSettings kvSettings; + private KVSettings objKvSettings; private INamedDeviceFactory logFactory; private MemoryLogger initLogger; private ILogger logger; @@ -157,8 +164,7 @@ private void InitializeServer() {normal}"); } - IClusterFactory clusterFactory = null; - if (opts.EnableCluster) clusterFactory = new ClusterFactory(); + var clusterFactory = opts.EnableCluster ? new ClusterFactory() : null; this.logger = this.loggerFactory?.CreateLogger("GarnetServer"); logger?.LogInformation("Garnet {version} {bits} bit; {clusterMode} mode; Port: {port}", version, IntPtr.Size == 8 ? "64" : "32", opts.EnableCluster ? "cluster" : "standalone", opts.Port); @@ -168,128 +174,117 @@ private void InitializeServer() var customCommandManager = new CustomCommandManager(); - bool setMax = true; - if (opts.ThreadPoolMaxThreads > 0) - setMax = ThreadPool.SetMaxThreads(opts.ThreadPoolMaxThreads, opts.ThreadPoolMaxThreads); + var setMax = opts.ThreadPoolMaxThreads <= 0 || ThreadPool.SetMaxThreads(opts.ThreadPoolMaxThreads, opts.ThreadPoolMaxThreads); - if (opts.ThreadPoolMinThreads > 0) - { - if (!ThreadPool.SetMinThreads(opts.ThreadPoolMinThreads, opts.ThreadPoolMinThreads)) - throw new Exception($"Unable to call ThreadPool.SetMinThreads with {opts.ThreadPoolMinThreads}"); - } + if (opts.ThreadPoolMinThreads > 0 && !ThreadPool.SetMinThreads(opts.ThreadPoolMinThreads, opts.ThreadPoolMinThreads)) + throw new Exception($"Unable to call ThreadPool.SetMinThreads with {opts.ThreadPoolMinThreads}"); // Retry to set max threads if it wasn't set in the previous step if (!setMax && !ThreadPool.SetMaxThreads(opts.ThreadPoolMaxThreads, opts.ThreadPoolMaxThreads)) throw new Exception($"Unable to call ThreadPool.SetMaxThreads with {opts.ThreadPoolMaxThreads}"); - opts.GetSettings(out logSettings, out var indexSize, out var revivSettings, out logFactory); + CreateMainStore(clusterFactory, out var checkpointDir); + CreateObjectStore(clusterFactory, customCommandManager, checkpointDir, out var objectStoreSizeTracker, out itemBroker); - var CheckpointDir = opts.CheckpointDir; - if (CheckpointDir == null) CheckpointDir = opts.LogDir; + if (!opts.DisablePubSub) + subscribeBroker = new SubscribeBroker>(new SpanByteKeySerializer(), null, opts.PubSubPageSizeBytes(), true); + + CreateAOF(); + + logger?.LogTrace("TLS is {tlsEnabled}", opts.TlsOptions == null ? "disabled" : "enabled"); + + // Create Garnet TCP server if none was provided. + this.server ??= new GarnetServerTcp(opts.Address, opts.Port, 0, opts.TlsOptions, opts.NetworkSendThrottleMax, logger); + + storeWrapper = new StoreWrapper(version, redisProtocolVersion, server, store, objectStore, objectStoreSizeTracker, + customCommandManager, appendOnlyFile, opts, clusterFactory: clusterFactory, loggerFactory: loggerFactory); + + // Create session provider for Garnet + Provider = new GarnetProvider(storeWrapper, subscribeBroker, itemBroker); + + // Create user facing API endpoints + Metrics = new MetricsApi(Provider); + Register = new RegisterApi(Provider); + Store = new StoreApi(storeWrapper); + + server.Register(WireFormat.ASCII, Provider); + } + + private void CreateMainStore(IClusterFactory clusterFactory, out string checkpointDir) + { + kvSettings = opts.GetSettings(this.loggerFactory?.CreateLogger("TsavoriteKV [main]"), out logFactory); + + checkpointDir = opts.CheckpointDir ?? opts.LogDir; + + // Run checkpoint on its own thread to control p99 + kvSettings.ThrottleCheckpointFlushDelayMs = opts.CheckpointThrottleFlushDelayMs; + kvSettings.CheckpointVersionSwitchBarrier = opts.EnableCluster; - var checkpointSettings = new CheckpointSettings - { - // Run checkpoint on its own thread to control p99 - ThrottleCheckpointFlushDelayMs = opts.CheckpointThrottleFlushDelayMs, - CheckpointVersionSwitchBarrier = opts.EnableCluster - }; var checkpointFactory = opts.DeviceFactoryCreator(); if (opts.EnableCluster) { - checkpointSettings.CheckpointManager = clusterFactory.CreateCheckpointManager(checkpointFactory, new DefaultCheckpointNamingScheme(CheckpointDir + "/Store/checkpoints"), true, logger); + kvSettings.CheckpointManager = clusterFactory.CreateCheckpointManager(checkpointFactory, + new DefaultCheckpointNamingScheme(checkpointDir + "/Store/checkpoints"), isMainStore: true, logger); } else { - checkpointSettings.CheckpointManager = new DeviceLogCommitCheckpointManager(checkpointFactory, - new DefaultCheckpointNamingScheme(CheckpointDir + "/Store/checkpoints"), - removeOutdated: true); + kvSettings.CheckpointManager = new DeviceLogCommitCheckpointManager(checkpointFactory, + new DefaultCheckpointNamingScheme(checkpointDir + "/Store/checkpoints"), removeOutdated: true); } + store = new(kvSettings + , StoreFunctions.Create() + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions)); + } - store = new TsavoriteKV(indexSize, logSettings, checkpointSettings, revivificationSettings: revivSettings, logger: this.loggerFactory?.CreateLogger("TsavoriteKV [main]")); - - CacheSizeTracker objectStoreSizeTracker = null; + private void CreateObjectStore(IClusterFactory clusterFactory, CustomCommandManager customCommandManager, string CheckpointDir, out CacheSizeTracker objectStoreSizeTracker, out CollectionItemBroker itemBroker) + { + objectStoreSizeTracker = null; + itemBroker = null; if (!opts.DisableObjects) { - opts.GetObjectStoreSettings(out objLogSettings, out var objRevivSettings, out var objIndexSize, out var objTotalMemorySize); + objKvSettings = opts.GetObjectStoreSettings(this.loggerFactory?.CreateLogger("TsavoriteKV [obj]"), out var objTotalMemorySize); + + // Run checkpoint on its own thread to control p99 + objKvSettings.ThrottleCheckpointFlushDelayMs = opts.CheckpointThrottleFlushDelayMs; + objKvSettings.CheckpointVersionSwitchBarrier = opts.EnableCluster; - var objCheckpointSettings = new CheckpointSettings - { - ThrottleCheckpointFlushDelayMs = opts.CheckpointThrottleFlushDelayMs, - CheckpointVersionSwitchBarrier = opts.EnableCluster - }; - var objectCheckpointFactory = opts.DeviceFactoryCreator(); if (opts.EnableCluster) - { - objCheckpointSettings.CheckpointManager = clusterFactory.CreateCheckpointManager(opts.DeviceFactoryCreator(), new DefaultCheckpointNamingScheme(CheckpointDir + "/ObjectStore/checkpoints"), false, logger); - } + objKvSettings.CheckpointManager = clusterFactory.CreateCheckpointManager(opts.DeviceFactoryCreator(), + new DefaultCheckpointNamingScheme(CheckpointDir + "/ObjectStore/checkpoints"), isMainStore: false, logger); else - { - objCheckpointSettings.CheckpointManager = new DeviceLogCommitCheckpointManager(opts.DeviceFactoryCreator(), - new DefaultCheckpointNamingScheme(CheckpointDir + "/ObjectStore/checkpoints"), - removeOutdated: true); - } + objKvSettings.CheckpointManager = new DeviceLogCommitCheckpointManager(opts.DeviceFactoryCreator(), + new DefaultCheckpointNamingScheme(CheckpointDir + "/ObjectStore/checkpoints"), removeOutdated: true); + + objectStore = new(objKvSettings + , StoreFunctions.Create(new ByteArrayKeyComparer(), () => new ByteArrayBinaryObjectSerializer(), () => new GarnetObjectSerializer(customCommandManager)) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions)); - objectStore = new TsavoriteKV(objIndexSize, objLogSettings, objCheckpointSettings, - new SerializerSettings { keySerializer = () => new ByteArrayBinaryObjectSerializer(), valueSerializer = () => new GarnetObjectSerializer(customCommandManager) }, - revivificationSettings: objRevivSettings, logger: this.loggerFactory?.CreateLogger("TsavoriteKV [obj]")); if (objTotalMemorySize > 0) - objectStoreSizeTracker = new CacheSizeTracker(objectStore, objLogSettings, objTotalMemorySize, this.loggerFactory); + objectStoreSizeTracker = new CacheSizeTracker(objectStore, objKvSettings, objTotalMemorySize, this.loggerFactory); itemBroker = new CollectionItemBroker(); } + } - if (!opts.DisablePubSub) - { - broker = new SubscribeBroker>(new SpanByteKeySerializer(), null, opts.PubSubPageSizeBytes(), true); - } - + private void CreateAOF() + { if (opts.EnableAOF) { - if (opts.MainMemoryReplication) - { - if (opts.CommitFrequencyMs != -1) - throw new Exception("Need to set CommitFrequencyMs to -1 (manual commits) with MainMemoryReplication"); - } + if (opts.MainMemoryReplication && opts.CommitFrequencyMs != -1) + throw new Exception("Need to set CommitFrequencyMs to -1 (manual commits) with MainMemoryReplication"); opts.GetAofSettings(out var aofSettings); aofDevice = aofSettings.LogDevice; appendOnlyFile = new TsavoriteLog(aofSettings, logger: this.loggerFactory?.CreateLogger("TsavoriteLog [aof]")); if (opts.CommitFrequencyMs < 0 && opts.WaitForCommit) - { throw new Exception("Cannot use CommitWait with manual commits"); - } - } - else - { - if (opts.CommitFrequencyMs != 0 || opts.WaitForCommit) - { - throw new Exception("Cannot use CommitFrequencyMs or CommitWait without EnableAOF"); - } + return; } - - logger?.LogTrace("TLS is {tlsEnabled}", (opts.TlsOptions == null ? "disabled" : "enabled")); - - - // Create Garnet TCP server if none was provided. - if (this.server == null) - { - server = new GarnetServerTcp(opts.Address, opts.Port, 0, opts.TlsOptions, opts.NetworkSendThrottleMax, logger); - } - - storeWrapper = new StoreWrapper(version, redisProtocolVersion, server, store, objectStore, objectStoreSizeTracker, customCommandManager, appendOnlyFile, opts, clusterFactory: clusterFactory, loggerFactory: loggerFactory); - - // Create session provider for Garnet - Provider = new GarnetProvider(storeWrapper, broker, itemBroker); - - // Create user facing API endpoints - Metrics = new MetricsApi(Provider); - Register = new RegisterApi(Provider); - Store = new StoreApi(storeWrapper); - - server.Register(WireFormat.ASCII, Provider); + if (opts.CommitFrequencyMs != 0 || opts.WaitForCommit) + throw new Exception("Cannot use CommitFrequencyMs or CommitWait without EnableAOF"); } /// @@ -335,16 +330,17 @@ private void InternalDispose() { Provider?.Dispose(); server.Dispose(); - broker?.Dispose(); + subscribeBroker?.Dispose(); + itemBroker?.Dispose(); store.Dispose(); appendOnlyFile?.Dispose(); aofDevice?.Dispose(); - logSettings.LogDevice?.Dispose(); + kvSettings.LogDevice?.Dispose(); if (!opts.DisableObjects) { objectStore.Dispose(); - objLogSettings.LogDevice?.Dispose(); - objLogSettings.ObjectLogDevice?.Dispose(); + objKvSettings.LogDevice?.Dispose(); + objKvSettings.ObjectLogDevice?.Dispose(); } opts.AuthSettings?.Dispose(); if (disposeLoggerFactory) diff --git a/libs/server/AOF/AofProcessor.cs b/libs/server/AOF/AofProcessor.cs index a0bdb0ce93..b9e8e46b40 100644 --- a/libs/server/AOF/AofProcessor.cs +++ b/libs/server/AOF/AofProcessor.cs @@ -13,6 +13,12 @@ namespace Garnet.server { + using MainStoreAllocator = SpanByteAllocator>; + using MainStoreFunctions = StoreFunctions; + + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + /// /// Wrapper for store and store-specific information /// @@ -31,12 +37,12 @@ public sealed unsafe partial class AofProcessor /// /// Session for main store /// - readonly BasicContext basicContext; + readonly BasicContext basicContext; /// /// Session for object store /// - readonly BasicContext objectStoreBasicContext; + readonly BasicContext objectStoreBasicContext; readonly Dictionary> inflightTxns; readonly byte[] buffer; @@ -262,7 +268,7 @@ private unsafe bool ReplayOp(byte* entryPtr) return true; } - static unsafe void StoreUpsert(BasicContext basicContext, byte* ptr) + static unsafe void StoreUpsert(BasicContext basicContext, byte* ptr) { ref var key = ref Unsafe.AsRef(ptr + sizeof(AofHeader)); ref var input = ref Unsafe.AsRef(ptr + sizeof(AofHeader) + key.TotalSize); @@ -274,7 +280,7 @@ static unsafe void StoreUpsert(BasicContext basicContext, byte* ptr) + static unsafe void StoreRMW(BasicContext basicContext, byte* ptr) { byte* pbOutput = stackalloc byte[32]; ref var key = ref Unsafe.AsRef(ptr + sizeof(AofHeader)); @@ -286,13 +292,14 @@ static unsafe void StoreRMW(BasicContext basicContext, byte* ptr) + static unsafe void StoreDelete(BasicContext basicContext, byte* ptr) { ref var key = ref Unsafe.AsRef(ptr + sizeof(AofHeader)); basicContext.Delete(ref key); } - static unsafe void ObjectStoreUpsert(BasicContext basicContext, GarnetObjectSerializer garnetObjectSerializer, byte* ptr, byte* outputPtr, int outputLength) + static unsafe void ObjectStoreUpsert(BasicContext basicContext, + GarnetObjectSerializer garnetObjectSerializer, byte* ptr, byte* outputPtr, int outputLength) { ref var key = ref Unsafe.AsRef(ptr + sizeof(AofHeader)); var keyB = key.ToByteArray(); @@ -306,7 +313,8 @@ static unsafe void ObjectStoreUpsert(BasicContext basicContext, byte* ptr, byte* outputPtr, int outputLength) + static unsafe void ObjectStoreRMW(BasicContext basicContext, + byte* ptr, byte* outputPtr, int outputLength) { ref var key = ref Unsafe.AsRef(ptr + sizeof(AofHeader)); var keyB = key.ToByteArray(); @@ -324,7 +332,7 @@ static unsafe void ObjectStoreRMW(BasicContext basicContext, byte* ptr) + static unsafe void ObjectStoreDelete(BasicContext basicContext, byte* ptr) { ref var key = ref Unsafe.AsRef(ptr + sizeof(AofHeader)); var keyB = key.ToByteArray(); diff --git a/libs/server/API/GarnetApi.cs b/libs/server/API/GarnetApi.cs index ece745115d..4aec2f705d 100644 --- a/libs/server/API/GarnetApi.cs +++ b/libs/server/API/GarnetApi.cs @@ -8,16 +8,22 @@ namespace Garnet.server { + using MainStoreAllocator = SpanByteAllocator>; + using MainStoreFunctions = StoreFunctions; + + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + // Example aliases: - // using BasicGarnetApi = GarnetApi, BasicContext>; - // using LockableGarnetApi = GarnetApi, LockableContext>; + // using BasicGarnetApi = GarnetApi, BasicContext>; + // using LockableGarnetApi = GarnetApi, LockableContext>; /// /// Garnet API implementation /// public partial struct GarnetApi : IGarnetApi, IGarnetWatchApi - where TContext : ITsavoriteContext - where TObjectContext : ITsavoriteContext + where TContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { readonly StorageSession storageSession; TContext context; diff --git a/libs/server/API/GarnetApiObjectCommands.cs b/libs/server/API/GarnetApiObjectCommands.cs index bfa448788a..34501e55c8 100644 --- a/libs/server/API/GarnetApiObjectCommands.cs +++ b/libs/server/API/GarnetApiObjectCommands.cs @@ -6,13 +6,18 @@ namespace Garnet.server { + using MainStoreAllocator = SpanByteAllocator>; + using MainStoreFunctions = StoreFunctions; + + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; /// /// Garnet API implementation /// public partial struct GarnetApi : IGarnetApi, IGarnetWatchApi - where TContext : ITsavoriteContext - where TObjectContext : ITsavoriteContext + where TContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { #region SortedSet Methods diff --git a/libs/server/Cluster/IClusterProvider.cs b/libs/server/Cluster/IClusterProvider.cs index 086a937a43..210fdc429c 100644 --- a/libs/server/Cluster/IClusterProvider.cs +++ b/libs/server/Cluster/IClusterProvider.cs @@ -11,7 +11,12 @@ namespace Garnet.server { - using BasicGarnetApi = GarnetApi, BasicContext>; + using BasicGarnetApi = GarnetApi, + SpanByteAllocator>>, + BasicContext>, + GenericAllocator>>>>; /// /// Cluster provider diff --git a/libs/server/Providers/GarnetProvider.cs b/libs/server/Providers/GarnetProvider.cs index 527d9d474e..554bd76bfa 100644 --- a/libs/server/Providers/GarnetProvider.cs +++ b/libs/server/Providers/GarnetProvider.cs @@ -7,11 +7,14 @@ namespace Garnet.server { + using MainStoreAllocator = SpanByteAllocator>; + using MainStoreFunctions = StoreFunctions; + /// /// Session provider for Garnet, based on /// [K, V, I, O, C] = [SpanByte, SpanByte, SpanByte, SpanByteAndMemory, long] /// - public sealed class GarnetProvider : TsavoriteKVProviderBase, SpanByteServerSerializer> + public sealed class GarnetProvider : TsavoriteKVProviderBase, MainStoreFunctions, MainStoreAllocator, SpanByteServerSerializer> { readonly StoreWrapper storeWrapper; diff --git a/libs/server/Providers/TsavoriteKVProviderBase.cs b/libs/server/Providers/TsavoriteKVProviderBase.cs index 2966a51a06..c451580648 100644 --- a/libs/server/Providers/TsavoriteKVProviderBase.cs +++ b/libs/server/Providers/TsavoriteKVProviderBase.cs @@ -11,14 +11,16 @@ namespace Garnet.server /// Abstract session provider for TsavoriteKV store based on /// [K, V, I, O, F, P] /// - public abstract class TsavoriteKVProviderBase : ISessionProvider - where Functions : ISessionFunctions + public abstract class TsavoriteKVProviderBase : ISessionProvider + where TSessionFunctions : ISessionFunctions + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator where ParameterSerializer : IServerSerializer { /// /// Store /// - protected readonly TsavoriteKV store; + protected readonly TsavoriteKV store; /// /// Serializer @@ -43,7 +45,8 @@ public abstract class TsavoriteKVProviderBase /// /// - public TsavoriteKVProviderBase(TsavoriteKV store, ParameterSerializer serializer, SubscribeBroker> broker = null, bool recoverStore = false, MaxSizeSettings maxSizeSettings = default) + public TsavoriteKVProviderBase(TsavoriteKV store, ParameterSerializer serializer, + SubscribeBroker> broker = null, bool recoverStore = false, MaxSizeSettings maxSizeSettings = default) { this.store = store; if (recoverStore) @@ -69,7 +72,7 @@ public TsavoriteKVProviderBase(TsavoriteKV store, ParameterSerialize /// GetFunctions() for custom functions provided by the client /// /// - public abstract Functions GetFunctions(); + public abstract TSessionFunctions GetFunctions(); /// public abstract IMessageConsumer GetSession(WireFormat wireFormat, INetworkSender networkSender); diff --git a/libs/server/Resp/LocalServerSession.cs b/libs/server/Resp/LocalServerSession.cs index ce1f725b0e..79f670f99a 100644 --- a/libs/server/Resp/LocalServerSession.cs +++ b/libs/server/Resp/LocalServerSession.cs @@ -7,7 +7,12 @@ namespace Garnet.server { - using BasicGarnetApi = GarnetApi, BasicContext>; + using BasicGarnetApi = GarnetApi, + SpanByteAllocator>>, + BasicContext>, + GenericAllocator>>>>; /// /// Local server session diff --git a/libs/server/Resp/RespServerSession.cs b/libs/server/Resp/RespServerSession.cs index b42c7afaad..fafee08a7a 100644 --- a/libs/server/Resp/RespServerSession.cs +++ b/libs/server/Resp/RespServerSession.cs @@ -18,8 +18,18 @@ namespace Garnet.server { - using BasicGarnetApi = GarnetApi, BasicContext>; - using LockableGarnetApi = GarnetApi, LockableContext>; + using BasicGarnetApi = GarnetApi, + SpanByteAllocator>>, + BasicContext>, + GenericAllocator>>>>; + using LockableGarnetApi = GarnetApi, + SpanByteAllocator>>, + LockableContext>, + GenericAllocator>>>>; /// /// RESP server session diff --git a/libs/server/Servers/GarnetServerOptions.cs b/libs/server/Servers/GarnetServerOptions.cs index 814535c46f..bd937261e5 100644 --- a/libs/server/Servers/GarnetServerOptions.cs +++ b/libs/server/Servers/GarnetServerOptions.cs @@ -332,10 +332,10 @@ public class GarnetServerOptions : ServerOptions public int RevivObjBinRecordCount; /// Max size of hash index (cache lines) after rounding down size in bytes to power of 2. - public int AdjustedIndexMaxSize; + public int AdjustedIndexMaxCacheLines; /// Max size of object store hash index (cache lines) after rounding down size in bytes to power of 2. - public int AdjustedObjectStoreIndexMaxSize; + public int AdjustedObjectStoreIndexMaxCacheLines; /// /// Directories on server from which custom command binaries can be loaded by admin users @@ -356,56 +356,54 @@ public GarnetServerOptions(ILogger logger = null) : base(logger) } /// - /// Get log settings + /// Get KVSettings for the main store log /// - /// - /// - /// - /// - public void GetSettings(out LogSettings logSettings, out int indexSize, out RevivificationSettings revivSettings, out INamedDeviceFactory logFactory) + public KVSettings GetSettings(ILogger logger, out INamedDeviceFactory logFactory) { - if (MutablePercent < 10 || MutablePercent > 95) + if (MutablePercent is < 10 or > 95) throw new Exception("MutablePercent must be between 10 and 95"); - logSettings = new LogSettings + KVSettings kvSettings = new(baseDir: null, logger: logger); + + var indexCacheLines = IndexSizeCachelines("hash index size", IndexSize); + kvSettings = new() { + IndexSize = indexCacheLines * 64L, PreallocateLog = false, MutableFraction = MutablePercent / 100.0, - PageSizeBits = PageSizeBits() + PageSize = 1L << PageSizeBits() }; - logger?.LogInformation($"[Store] Using page size of {PrettySize((long)Math.Pow(2, logSettings.PageSizeBits))}"); + logger?.LogInformation($"[Store] Using page size of {PrettySize(kvSettings.PageSize)}"); - logSettings.MemorySizeBits = MemorySizeBits(MemorySize, PageSize, out var storeEmptyPageCount); - logSettings.MinEmptyPageCount = storeEmptyPageCount; + kvSettings.MemorySize = 1L << MemorySizeBits(MemorySize, PageSize, out var storeEmptyPageCount); + kvSettings.MinEmptyPageCount = storeEmptyPageCount; - long effectiveSize = (1L << logSettings.MemorySizeBits) - storeEmptyPageCount * (1L << logSettings.PageSizeBits); + long effectiveSize = kvSettings.MemorySize - storeEmptyPageCount * kvSettings.MemorySize; if (storeEmptyPageCount == 0) - logger?.LogInformation($"[Store] Using log memory size of {PrettySize((long)Math.Pow(2, logSettings.MemorySizeBits))}"); + logger?.LogInformation($"[Store] Using log memory size of {PrettySize(kvSettings.MemorySize)}"); else - logger?.LogInformation($"[Store] Using log memory size of {PrettySize((long)Math.Pow(2, logSettings.MemorySizeBits))}, with {storeEmptyPageCount} empty pages, for effective size of {PrettySize(effectiveSize)}"); + logger?.LogInformation($"[Store] Using log memory size of {PrettySize(kvSettings.MemorySize)}, with {storeEmptyPageCount} empty pages, for effective size of {PrettySize(effectiveSize)}"); - logger?.LogInformation($"[Store] There are {PrettySize(1 << (logSettings.MemorySizeBits - logSettings.PageSizeBits))} log pages in memory"); + logger?.LogInformation($"[Store] There are {PrettySize(kvSettings.MemorySize / kvSettings.PageSize)} log pages in memory"); - logSettings.SegmentSizeBits = SegmentSizeBits(); - logger?.LogInformation($"[Store] Using disk segment size of {PrettySize((long)Math.Pow(2, logSettings.SegmentSizeBits))}"); + kvSettings.SegmentSize = 1L << SegmentSizeBits(); + logger?.LogInformation($"[Store] Using disk segment size of {PrettySize(kvSettings.SegmentSize)}"); - indexSize = IndexSizeCachelines("hash index size", IndexSize); - logger?.LogInformation($"[Store] Using hash index size of {PrettySize(indexSize * 64L)} ({PrettySize(indexSize)} cache lines)"); - logger?.LogInformation($"[Store] Hash index size is optimized for up to ~{PrettySize(indexSize * 4L)} distinct keys"); + logger?.LogInformation($"[Store] Using hash index size of {PrettySize(kvSettings.IndexSize)} ({PrettySize(indexCacheLines)} cache lines)"); + logger?.LogInformation($"[Store] Hash index size is optimized for up to ~{PrettySize(indexCacheLines * 4L)} distinct keys"); - AdjustedIndexMaxSize = IndexMaxSize == string.Empty ? 0 : IndexSizeCachelines("hash index max size", IndexMaxSize); - if (AdjustedIndexMaxSize != 0 && AdjustedIndexMaxSize < indexSize) + AdjustedIndexMaxCacheLines = IndexMaxSize == string.Empty ? 0 : IndexSizeCachelines("hash index max size", IndexMaxSize); + if (AdjustedIndexMaxCacheLines != 0 && AdjustedIndexMaxCacheLines < indexCacheLines) throw new Exception($"Index size {IndexSize} should not be less than index max size {IndexMaxSize}"); - if (AdjustedIndexMaxSize > 0) + if (AdjustedIndexMaxCacheLines > 0) { - logger?.LogInformation($"[Store] Using hash index max size of {PrettySize(AdjustedIndexMaxSize * 64L)}, ({PrettySize(AdjustedIndexMaxSize)} cache lines)"); - logger?.LogInformation($"[Store] Hash index max size is optimized for up to ~{PrettySize(AdjustedIndexMaxSize * 4L)} distinct keys"); + logger?.LogInformation($"[Store] Using hash index max size of {PrettySize(AdjustedIndexMaxCacheLines * 64L)}, ({PrettySize(AdjustedIndexMaxCacheLines)} cache lines)"); + logger?.LogInformation($"[Store] Hash index max size is optimized for up to ~{PrettySize(AdjustedIndexMaxCacheLines * 4L)} distinct keys"); } logger?.LogInformation($"[Store] Using log mutable percentage of {MutablePercent}%"); - if (DeviceFactoryCreator == null) - DeviceFactoryCreator = () => new LocalStorageNamedDeviceFactory(useNativeDeviceLinux: UseNativeDeviceLinux, logger: logger); + DeviceFactoryCreator ??= () => new LocalStorageNamedDeviceFactory(useNativeDeviceLinux: UseNativeDeviceLinux, logger: logger); if (LatencyMonitor && MetricsSamplingFrequency == 0) throw new Exception("LatencyMonitor requires MetricsSamplingFrequency to be set"); @@ -415,30 +413,30 @@ public void GetSettings(out LogSettings logSettings, out int indexSize, out Revi if (LogDir is null or "") LogDir = Directory.GetCurrentDirectory(); logFactory = GetInitializedDeviceFactory(LogDir); - logSettings.LogDevice = logFactory.Get(new FileDescriptor("Store", "hlog")); + kvSettings.LogDevice = logFactory.Get(new FileDescriptor("Store", "hlog")); } else { if (LogDir != null) throw new Exception("LogDir specified without enabling tiered storage (UseStorage)"); - logSettings.LogDevice = new NullDevice(); + kvSettings.LogDevice = new NullDevice(); logFactory = null; } if (CopyReadsToTail) - logSettings.ReadCopyOptions = new(ReadCopyFrom.AllImmutable, ReadCopyTo.MainLog); + kvSettings.ReadCopyOptions = new(ReadCopyFrom.AllImmutable, ReadCopyTo.MainLog); if (RevivInChainOnly) { logger?.LogInformation($"[Store] Using Revivification in-chain only"); - revivSettings = RevivificationSettings.InChainOnly.Clone(); + kvSettings.RevivificationSettings = RevivificationSettings.InChainOnly.Clone(); } else if (UseRevivBinsPowerOf2) { logger?.LogInformation($"[Store] Using Revivification with power-of-2 bins"); - revivSettings = RevivificationSettings.PowerOf2Bins.Clone(); - revivSettings.NumberOfBinsToSearch = RevivNumberOfBinsToSearch; - revivSettings.RevivifiableFraction = RevivifiableFraction; + kvSettings.RevivificationSettings = RevivificationSettings.PowerOf2Bins.Clone(); + kvSettings.RevivificationSettings.NumberOfBinsToSearch = RevivNumberOfBinsToSearch; + kvSettings.RevivificationSettings.RevivifiableFraction = RevivifiableFraction; } else if (RevivBinRecordSizes?.Length > 0) { @@ -446,7 +444,7 @@ public void GetSettings(out LogSettings logSettings, out int indexSize, out Revi // We use this in the RevivBinRecordCounts and RevivObjBinRecordCount Options help text, so assert it here because we can't use an interpolated string there. System.Diagnostics.Debug.Assert(RevivificationBin.DefaultRecordsPerBin == 256); - revivSettings = new() + kvSettings.RevivificationSettings = new() { NumberOfBinsToSearch = RevivNumberOfBinsToSearch, FreeRecordBins = new RevivificationBin[RevivBinRecordSizes.Length], @@ -460,7 +458,7 @@ public void GetSettings(out LogSettings logSettings, out int indexSize, out Revi 1 => RevivBinRecordCounts[0], _ => RevivBinRecordCounts[ii] }; - revivSettings.FreeRecordBins[ii] = new() + kvSettings.RevivificationSettings.FreeRecordBins[ii] = new() { RecordSize = RevivBinRecordSizes[ii], NumberOfRecords = recordCount, @@ -471,8 +469,9 @@ public void GetSettings(out LogSettings logSettings, out int indexSize, out Revi else { logger?.LogInformation($"[Store] Not using Revivification"); - revivSettings = default; } + + return kvSettings; } /// @@ -495,55 +494,53 @@ public static int MemorySizeBits(string memorySize, string storePageSize, out in } /// - /// Get object store settings + /// Get KVSettings for the object store log /// - /// - /// - /// - /// - - public void GetObjectStoreSettings(out LogSettings objLogSettings, out RevivificationSettings objRevivSettings, out int objIndexSize, out long objTotalMemorySize) + public KVSettings GetObjectStoreSettings(ILogger logger, out long objTotalMemorySize) { - if (ObjectStoreMutablePercent < 10 || ObjectStoreMutablePercent > 95) + if (ObjectStoreMutablePercent is < 10 or > 95) throw new Exception("ObjectStoreMutablePercent must be between 10 and 95"); - objLogSettings = new LogSettings + KVSettings kvSettings = new(baseDir: null, logger: logger); + + var indexCacheLines = IndexSizeCachelines("object store hash index size", ObjectStoreIndexSize); + kvSettings = new() { + IndexSize = indexCacheLines * 64L, PreallocateLog = false, MutableFraction = ObjectStoreMutablePercent / 100.0, - PageSizeBits = ObjectStorePageSizeBits() + PageSize = 1L << ObjectStorePageSizeBits() }; - logger?.LogInformation($"[Object Store] Using page size of {PrettySize((long)Math.Pow(2, objLogSettings.PageSizeBits))}"); - logger?.LogInformation($"[Object Store] Each page can hold ~{(long)(Math.Pow(2, objLogSettings.PageSizeBits) / 24)} key-value pairs of objects"); + logger?.LogInformation($"[Object Store] Using page size of {PrettySize(kvSettings.PageSize)}"); + logger?.LogInformation($"[Object Store] Each page can hold ~{kvSettings.PageSize / 24} key-value pairs of objects"); - objLogSettings.MemorySizeBits = MemorySizeBits(ObjectStoreLogMemorySize, ObjectStorePageSize, out var objectStoreEmptyPageCount); - objLogSettings.MinEmptyPageCount = objectStoreEmptyPageCount; + kvSettings.MemorySize = 1L << MemorySizeBits(ObjectStoreLogMemorySize, ObjectStorePageSize, out var objectStoreEmptyPageCount); + kvSettings.MinEmptyPageCount = objectStoreEmptyPageCount; - long effectiveSize = (1L << objLogSettings.MemorySizeBits) - objectStoreEmptyPageCount * (1L << objLogSettings.PageSizeBits); + long effectiveSize = kvSettings.MemorySize - objectStoreEmptyPageCount * kvSettings.PageSize; if (objectStoreEmptyPageCount == 0) - logger?.LogInformation($"[Object Store] Using log memory size of {PrettySize((long)Math.Pow(2, objLogSettings.MemorySizeBits))}"); + logger?.LogInformation($"[Object Store] Using log memory size of {PrettySize(kvSettings.MemorySize)}"); else - logger?.LogInformation($"[Object Store] Using log memory size of {PrettySize((long)Math.Pow(2, objLogSettings.MemorySizeBits))}, with {objectStoreEmptyPageCount} empty pages, for effective size of {PrettySize(effectiveSize)}"); + logger?.LogInformation($"[Object Store] Using log memory size of {PrettySize(kvSettings.MemorySize)}, with {objectStoreEmptyPageCount} empty pages, for effective size of {PrettySize(effectiveSize)}"); logger?.LogInformation($"[Object Store] This can hold ~{effectiveSize / 24} key-value pairs of objects in memory total"); - logger?.LogInformation($"[Object Store] There are {PrettySize(1 << (objLogSettings.MemorySizeBits - objLogSettings.PageSizeBits))} log pages in memory"); + logger?.LogInformation($"[Object Store] There are {PrettySize(kvSettings.MemorySize / kvSettings.PageSize)} log pages in memory"); - objLogSettings.SegmentSizeBits = ObjectStoreSegmentSizeBits(); - logger?.LogInformation($"[Object Store] Using disk segment size of {PrettySize((long)Math.Pow(2, objLogSettings.SegmentSizeBits))}"); + kvSettings.SegmentSize = 1L << ObjectStoreSegmentSizeBits(); + logger?.LogInformation($"[Object Store] Using disk segment size of {PrettySize(kvSettings.SegmentSize)}"); - objIndexSize = IndexSizeCachelines("object store hash index size", ObjectStoreIndexSize); - logger?.LogInformation($"[Object Store] Using hash index size of {PrettySize(objIndexSize * 64L)} ({PrettySize(objIndexSize)} cache lines)"); - logger?.LogInformation($"[Object Store] Hash index size is optimized for up to ~{PrettySize(objIndexSize * 4L)} distinct keys"); + logger?.LogInformation($"[Object Store] Using hash index size of {PrettySize(kvSettings.IndexSize)} ({PrettySize(indexCacheLines)} cache lines)"); + logger?.LogInformation($"[Object Store] Hash index size is optimized for up to ~{PrettySize(indexCacheLines * 4L)} distinct keys"); - AdjustedObjectStoreIndexMaxSize = ObjectStoreIndexMaxSize == string.Empty ? 0 : IndexSizeCachelines("hash index max size", ObjectStoreIndexMaxSize); - if (AdjustedObjectStoreIndexMaxSize != 0 && AdjustedObjectStoreIndexMaxSize < objIndexSize) + AdjustedObjectStoreIndexMaxCacheLines = ObjectStoreIndexMaxSize == string.Empty ? 0 : IndexSizeCachelines("hash index max size", ObjectStoreIndexMaxSize); + if (AdjustedObjectStoreIndexMaxCacheLines != 0 && AdjustedObjectStoreIndexMaxCacheLines < indexCacheLines) throw new Exception($"Index size {IndexSize} should not be less than index max size {IndexMaxSize}"); - if (AdjustedObjectStoreIndexMaxSize > 0) + if (AdjustedObjectStoreIndexMaxCacheLines > 0) { - logger?.LogInformation($"[Object Store] Using hash index max size of {PrettySize(AdjustedObjectStoreIndexMaxSize * 64L)}, ({PrettySize(AdjustedObjectStoreIndexMaxSize)} cache lines)"); - logger?.LogInformation($"[Object Store] Hash index max size is optimized for up to ~{PrettySize(AdjustedObjectStoreIndexMaxSize * 4L)} distinct keys"); + logger?.LogInformation($"[Object Store] Using hash index max size of {PrettySize(AdjustedObjectStoreIndexMaxCacheLines * 64L)}, ({PrettySize(AdjustedObjectStoreIndexMaxCacheLines)} cache lines)"); + logger?.LogInformation($"[Object Store] Hash index max size is optimized for up to ~{PrettySize(AdjustedObjectStoreIndexMaxCacheLines * 4L)} distinct keys"); } logger?.LogInformation($"[Object Store] Using log mutable percentage of {ObjectStoreMutablePercent}%"); @@ -554,37 +551,38 @@ public void GetObjectStoreSettings(out LogSettings objLogSettings, out Revivific { if (LogDir is null or "") LogDir = Directory.GetCurrentDirectory(); - objLogSettings.LogDevice = GetInitializedDeviceFactory(LogDir).Get(new FileDescriptor("ObjectStore", "hlog")); - objLogSettings.ObjectLogDevice = GetInitializedDeviceFactory(LogDir).Get(new FileDescriptor("ObjectStore", "hlog.obj")); + kvSettings.LogDevice = GetInitializedDeviceFactory(LogDir).Get(new FileDescriptor("ObjectStore", "hlog")); + kvSettings.ObjectLogDevice = GetInitializedDeviceFactory(LogDir).Get(new FileDescriptor("ObjectStore", "hlog.obj")); } else { if (LogDir != null) throw new Exception("LogDir specified without enabling tiered storage (UseStorage)"); - objLogSettings.LogDevice = objLogSettings.ObjectLogDevice = new NullDevice(); + kvSettings.LogDevice = kvSettings.ObjectLogDevice = new NullDevice(); } if (ObjectStoreCopyReadsToTail) - objLogSettings.ReadCopyOptions = new(ReadCopyFrom.AllImmutable, ReadCopyTo.MainLog); + kvSettings.ReadCopyOptions = new(ReadCopyFrom.AllImmutable, ReadCopyTo.MainLog); if (RevivInChainOnly) { logger?.LogInformation($"[Object Store] Using Revivification in-chain only"); - objRevivSettings = RevivificationSettings.InChainOnly.Clone(); + kvSettings.RevivificationSettings = RevivificationSettings.InChainOnly.Clone(); } else if (UseRevivBinsPowerOf2 || RevivBinRecordSizes?.Length > 0) { logger?.LogInformation($"[Object Store] Using Revivification with a single fixed-size bin"); - objRevivSettings = RevivificationSettings.DefaultFixedLength.Clone(); - objRevivSettings.RevivifiableFraction = RevivifiableFraction; - objRevivSettings.FreeRecordBins[0].NumberOfRecords = RevivObjBinRecordCount; - objRevivSettings.FreeRecordBins[0].BestFitScanLimit = RevivBinBestFitScanLimit; + kvSettings.RevivificationSettings = RevivificationSettings.DefaultFixedLength.Clone(); + kvSettings.RevivificationSettings.RevivifiableFraction = RevivifiableFraction; + kvSettings.RevivificationSettings.FreeRecordBins[0].NumberOfRecords = RevivObjBinRecordCount; + kvSettings.RevivificationSettings.FreeRecordBins[0].BestFitScanLimit = RevivBinBestFitScanLimit; } else { logger?.LogInformation($"[Object Store] Not using Revivification"); - objRevivSettings = default; } + + return kvSettings; } /// diff --git a/libs/server/Servers/ServerOptions.cs b/libs/server/Servers/ServerOptions.cs index 820b76eafb..91e0212c98 100644 --- a/libs/server/Servers/ServerOptions.cs +++ b/libs/server/Servers/ServerOptions.cs @@ -173,42 +173,40 @@ public int IndexSizeCachelines(string name, string indexSize) } /// - /// Get log settings + /// Get KVSettings /// - /// - /// - /// - public void GetSettings(out LogSettings logSettings, out CheckpointSettings checkpointSettings, out int indexSize) + public void GetSettings() { - logSettings = new LogSettings + var indexCacheLines = IndexSizeCachelines("hash index size", IndexSize); + var kvSettings = new KVSettings() { + IndexSize = indexCacheLines * 64L, PreallocateLog = false, - PageSizeBits = PageSizeBits() + PageSize = 1L << PageSizeBits() }; - logger?.LogInformation($"[Store] Using page size of {PrettySize((long)Math.Pow(2, logSettings.PageSizeBits))}"); + logger?.LogInformation($"[Store] Using page size of {PrettySize(kvSettings.PageSize)}"); - logSettings.MemorySizeBits = MemorySizeBits(); - logger?.LogInformation($"[Store] Using log memory size of {PrettySize((long)Math.Pow(2, logSettings.MemorySizeBits))}"); + kvSettings.MemorySize = 1L << MemorySizeBits(); + logger?.LogInformation($"[Store] Using log memory size of {PrettySize(kvSettings.MemorySize)}"); - logger?.LogInformation($"[Store] There are {PrettySize(1 << (logSettings.MemorySizeBits - logSettings.PageSizeBits))} log pages in memory"); + logger?.LogInformation($"[Store] There are {PrettySize(kvSettings.MemorySize / kvSettings.PageSize)} log pages in memory"); - logSettings.SegmentSizeBits = SegmentSizeBits(); - logger?.LogInformation($"[Store] Using disk segment size of {PrettySize((long)Math.Pow(2, logSettings.SegmentSizeBits))}"); + kvSettings.SegmentSize = 1L << SegmentSizeBits(); + logger?.LogInformation($"[Store] Using disk segment size of {PrettySize(kvSettings.SegmentSize)}"); - indexSize = IndexSizeCachelines("hash index size", IndexSize); - logger?.LogInformation($"[Store] Using hash index size of {PrettySize(indexSize * 64L)} ({PrettySize(indexSize)} cache lines)"); + logger?.LogInformation($"[Store] Using hash index size of {PrettySize(kvSettings.IndexSize)} ({PrettySize(indexCacheLines)} cache lines)"); if (EnableStorageTier) { if (LogDir is null or "") LogDir = Directory.GetCurrentDirectory(); - logSettings.LogDevice = Devices.CreateLogDevice(LogDir + "/Store/hlog", logger: logger); + kvSettings.LogDevice = Devices.CreateLogDevice(LogDir + "/Store/hlog", logger: logger); } else { if (LogDir != null) throw new Exception("LogDir specified without enabling tiered storage (UseStorage)"); - logSettings.LogDevice = new NullDevice(); + kvSettings.LogDevice = new NullDevice(); } if (CheckpointDir == null) CheckpointDir = LogDir; @@ -216,11 +214,8 @@ public void GetSettings(out LogSettings logSettings, out CheckpointSettings chec if (CheckpointDir is null or "") CheckpointDir = Directory.GetCurrentDirectory(); - checkpointSettings = new CheckpointSettings - { - CheckpointDir = CheckpointDir + "/Store/checkpoints", - RemoveOutdated = true, - }; + kvSettings.CheckpointDir = CheckpointDir + "/Store/checkpoints"; + kvSettings.RemoveOutdatedCheckpoints = true; } /// diff --git a/libs/server/Storage/Functions/MainStore/CallbackMethods.cs b/libs/server/Storage/Functions/MainStore/CallbackMethods.cs index 0de37bd162..73d8bdebfb 100644 --- a/libs/server/Storage/Functions/MainStore/CallbackMethods.cs +++ b/libs/server/Storage/Functions/MainStore/CallbackMethods.cs @@ -8,7 +8,7 @@ namespace Garnet.server /// /// Callback functions for main store /// - public readonly unsafe partial struct MainStoreFunctions : ISessionFunctions + public readonly unsafe partial struct MainSessionFunctions : ISessionFunctions { /// public void ReadCompletionCallback(ref SpanByte key, ref SpanByte input, ref SpanByteAndMemory output, long ctx, Status status, RecordMetadata recordMetadata) diff --git a/libs/server/Storage/Functions/MainStore/DeleteMethods.cs b/libs/server/Storage/Functions/MainStore/DeleteMethods.cs index a6ec4a05d9..4afb7064b3 100644 --- a/libs/server/Storage/Functions/MainStore/DeleteMethods.cs +++ b/libs/server/Storage/Functions/MainStore/DeleteMethods.cs @@ -8,7 +8,7 @@ namespace Garnet.server /// /// Callback functions for main store /// - public readonly unsafe partial struct MainStoreFunctions : ISessionFunctions + public readonly unsafe partial struct MainSessionFunctions : ISessionFunctions { /// public bool SingleDeleter(ref SpanByte key, ref SpanByte value, ref DeleteInfo deleteInfo, ref RecordInfo recordInfo) diff --git a/libs/server/Storage/Functions/MainStore/DisposeMethods.cs b/libs/server/Storage/Functions/MainStore/DisposeMethods.cs deleted file mode 100644 index 6575389a17..0000000000 --- a/libs/server/Storage/Functions/MainStore/DisposeMethods.cs +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -using Tsavorite.core; - -namespace Garnet.server -{ - /// - /// Callback functions for main store - /// - public readonly unsafe partial struct MainStoreFunctions : ISessionFunctions - { - /// - public void DisposeSingleWriter(ref SpanByte key, ref SpanByte input, ref SpanByte src, ref SpanByte dst, ref SpanByteAndMemory output, ref UpsertInfo upsertInfo, WriteReason reason) - { - } - - /// - public void DisposeCopyUpdater(ref SpanByte key, ref SpanByte input, ref SpanByte oldValue, ref SpanByte newValue, ref SpanByteAndMemory output, ref RMWInfo rmwInfo) - { - } - - /// - public void DisposeInitialUpdater(ref SpanByte key, ref SpanByte input, ref SpanByte value, ref SpanByteAndMemory output, ref RMWInfo rmwInfo) - { - } - - /// - public void DisposeSingleDeleter(ref SpanByte key, ref SpanByte value, ref DeleteInfo deleteInfo) - { - } - - /// - public void DisposeDeserializedFromDisk(ref SpanByte key, ref SpanByte value) - { - } - - /// - public void DisposeForRevivification(ref SpanByte key, ref SpanByte value, int newKeySize) - { - } - } -} \ No newline at end of file diff --git a/libs/server/Storage/Functions/MainStore/MainStoreFunctions.cs b/libs/server/Storage/Functions/MainStore/MainSessionFunctions.cs similarity index 78% rename from libs/server/Storage/Functions/MainStore/MainStoreFunctions.cs rename to libs/server/Storage/Functions/MainStore/MainSessionFunctions.cs index 9ed485cc4f..7d9c1af22c 100644 --- a/libs/server/Storage/Functions/MainStore/MainStoreFunctions.cs +++ b/libs/server/Storage/Functions/MainStore/MainSessionFunctions.cs @@ -8,7 +8,7 @@ namespace Garnet.server /// /// Callback functions for main store /// - public readonly unsafe partial struct MainStoreFunctions : ISessionFunctions + public readonly unsafe partial struct MainSessionFunctions : ISessionFunctions { readonly FunctionsState functionsState; @@ -16,7 +16,7 @@ namespace Garnet.server /// Constructor /// /// - internal MainStoreFunctions(FunctionsState functionsState) + internal MainSessionFunctions(FunctionsState functionsState) { this.functionsState = functionsState; } diff --git a/libs/server/Storage/Functions/MainStore/PrivateMethods.cs b/libs/server/Storage/Functions/MainStore/PrivateMethods.cs index 5d7b13a6ea..888c8ed745 100644 --- a/libs/server/Storage/Functions/MainStore/PrivateMethods.cs +++ b/libs/server/Storage/Functions/MainStore/PrivateMethods.cs @@ -12,7 +12,7 @@ namespace Garnet.server /// /// Callback functions for main store /// - public readonly unsafe partial struct MainStoreFunctions : ISessionFunctions + public readonly unsafe partial struct MainSessionFunctions : ISessionFunctions { static void CopyTo(ref SpanByte src, ref SpanByteAndMemory dst, MemoryPool memoryPool) { @@ -359,7 +359,7 @@ static bool InPlaceUpdateNumber(long val, ref SpanByte value, ref SpanByteAndMem return false; rmwInfo.ClearExtraValueLength(ref recordInfo, ref value, value.TotalSize); - _ = value.ShrinkSerializedLength(ndigits + value.MetadataSize); + value.ShrinkSerializedLength(ndigits + value.MetadataSize); _ = NumUtils.LongToSpanByte(val, value.AsSpan()); rmwInfo.SetUsedValueLength(ref recordInfo, ref value, value.TotalSize); diff --git a/libs/server/Storage/Functions/MainStore/RMWMethods.cs b/libs/server/Storage/Functions/MainStore/RMWMethods.cs index 6c3b5d572d..f05a01c617 100644 --- a/libs/server/Storage/Functions/MainStore/RMWMethods.cs +++ b/libs/server/Storage/Functions/MainStore/RMWMethods.cs @@ -12,7 +12,7 @@ namespace Garnet.server /// /// Callback functions for main store /// - public readonly unsafe partial struct MainStoreFunctions : ISessionFunctions + public readonly unsafe partial struct MainSessionFunctions : ISessionFunctions { /// public bool NeedInitialUpdate(ref SpanByte key, ref SpanByte input, ref SpanByteAndMemory output, ref RMWInfo rmwInfo) diff --git a/libs/server/Storage/Functions/MainStore/ReadMethods.cs b/libs/server/Storage/Functions/MainStore/ReadMethods.cs index c8f7e21c3a..36b74eb0f1 100644 --- a/libs/server/Storage/Functions/MainStore/ReadMethods.cs +++ b/libs/server/Storage/Functions/MainStore/ReadMethods.cs @@ -10,7 +10,7 @@ namespace Garnet.server /// /// Callback functions for main store /// - public readonly unsafe partial struct MainStoreFunctions : ISessionFunctions + public readonly unsafe partial struct MainSessionFunctions : ISessionFunctions { /// public bool SingleReader(ref SpanByte key, ref SpanByte input, ref SpanByte value, ref SpanByteAndMemory dst, ref ReadInfo readInfo) diff --git a/libs/server/Storage/Functions/MainStore/UpsertMethods.cs b/libs/server/Storage/Functions/MainStore/UpsertMethods.cs index 7e25c2104f..d46ef4a9cd 100644 --- a/libs/server/Storage/Functions/MainStore/UpsertMethods.cs +++ b/libs/server/Storage/Functions/MainStore/UpsertMethods.cs @@ -8,7 +8,7 @@ namespace Garnet.server /// /// Callback functions for main store /// - public readonly unsafe partial struct MainStoreFunctions : ISessionFunctions + public readonly unsafe partial struct MainSessionFunctions : ISessionFunctions { /// public bool SingleWriter(ref SpanByte key, ref SpanByte input, ref SpanByte src, ref SpanByte dst, ref SpanByteAndMemory output, ref UpsertInfo upsertInfo, WriteReason reason, ref RecordInfo recordInfo) diff --git a/libs/server/Storage/Functions/MainStore/VarLenInputMethods.cs b/libs/server/Storage/Functions/MainStore/VarLenInputMethods.cs index eb23d7e5b3..6c9d812d87 100644 --- a/libs/server/Storage/Functions/MainStore/VarLenInputMethods.cs +++ b/libs/server/Storage/Functions/MainStore/VarLenInputMethods.cs @@ -9,7 +9,7 @@ namespace Garnet.server /// /// Callback functions for main store /// - public readonly unsafe partial struct MainStoreFunctions : ISessionFunctions + public readonly unsafe partial struct MainSessionFunctions : ISessionFunctions { /// /// Parse ASCII byte array into long and validate that only contains ASCII decimal characters diff --git a/libs/server/Storage/Functions/ObjectStore/CallbackMethods.cs b/libs/server/Storage/Functions/ObjectStore/CallbackMethods.cs index 1e0756b702..3765fb0bdb 100644 --- a/libs/server/Storage/Functions/ObjectStore/CallbackMethods.cs +++ b/libs/server/Storage/Functions/ObjectStore/CallbackMethods.cs @@ -8,7 +8,7 @@ namespace Garnet.server /// /// Object store functions /// - public readonly unsafe partial struct ObjectStoreFunctions : ISessionFunctions + public readonly unsafe partial struct ObjectSessionFunctions : ISessionFunctions { /// public void ReadCompletionCallback(ref byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput output, long ctx, Status status, RecordMetadata recordMetadata) diff --git a/libs/server/Storage/Functions/ObjectStore/DeleteMethods.cs b/libs/server/Storage/Functions/ObjectStore/DeleteMethods.cs index 32e3bbe927..05bb422b47 100644 --- a/libs/server/Storage/Functions/ObjectStore/DeleteMethods.cs +++ b/libs/server/Storage/Functions/ObjectStore/DeleteMethods.cs @@ -8,7 +8,7 @@ namespace Garnet.server /// /// Object store functions /// - public readonly unsafe partial struct ObjectStoreFunctions : ISessionFunctions + public readonly unsafe partial struct ObjectSessionFunctions : ISessionFunctions { /// public bool SingleDeleter(ref byte[] key, ref IGarnetObject value, ref DeleteInfo deleteInfo, ref RecordInfo recordInfo) diff --git a/libs/server/Storage/Functions/ObjectStore/DisposeMethods.cs b/libs/server/Storage/Functions/ObjectStore/DisposeMethods.cs deleted file mode 100644 index 016994822f..0000000000 --- a/libs/server/Storage/Functions/ObjectStore/DisposeMethods.cs +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -using Tsavorite.core; - -namespace Garnet.server -{ - /// - /// Object store functions - /// - public readonly unsafe partial struct ObjectStoreFunctions : ISessionFunctions - { - /// - public void DisposeSingleWriter(ref byte[] key, ref ObjectInput input, ref IGarnetObject src, ref IGarnetObject dst, ref GarnetObjectStoreOutput output, ref UpsertInfo upsertInfo, WriteReason reason) - { - } - - /// - public void DisposeCopyUpdater(ref byte[] key, ref ObjectInput input, ref IGarnetObject oldValue, ref IGarnetObject newValue, ref GarnetObjectStoreOutput output, ref RMWInfo rmwInfo) - { - } - - /// - public void DisposeInitialUpdater(ref byte[] key, ref ObjectInput input, ref IGarnetObject value, ref GarnetObjectStoreOutput output, ref RMWInfo rmwInfo) - { - } - - /// - public void DisposeSingleDeleter(ref byte[] key, ref IGarnetObject value, ref DeleteInfo deleteInfo) - { - } - - /// - public void DisposeDeserializedFromDisk(ref byte[] key, ref IGarnetObject value) - { - } - - /// - public void DisposeForRevivification(ref byte[] key, ref IGarnetObject value, int newKeySize) - { - } - } -} \ No newline at end of file diff --git a/libs/server/Storage/Functions/ObjectStore/ObjectStoreFunctions.cs b/libs/server/Storage/Functions/ObjectStore/ObjectSessionFunctions.cs similarity index 75% rename from libs/server/Storage/Functions/ObjectStore/ObjectStoreFunctions.cs rename to libs/server/Storage/Functions/ObjectStore/ObjectSessionFunctions.cs index 2e21c619ce..2295eac2a7 100644 --- a/libs/server/Storage/Functions/ObjectStore/ObjectStoreFunctions.cs +++ b/libs/server/Storage/Functions/ObjectStore/ObjectSessionFunctions.cs @@ -8,14 +8,14 @@ namespace Garnet.server /// /// Object store functions /// - public readonly unsafe partial struct ObjectStoreFunctions : ISessionFunctions + public readonly unsafe partial struct ObjectSessionFunctions : ISessionFunctions { readonly FunctionsState functionsState; /// /// Constructor /// - internal ObjectStoreFunctions(FunctionsState functionsState) + internal ObjectSessionFunctions(FunctionsState functionsState) { this.functionsState = functionsState; } diff --git a/libs/server/Storage/Functions/ObjectStore/PrivateMethods.cs b/libs/server/Storage/Functions/ObjectStore/PrivateMethods.cs index c9d6d67855..39c1a2d13e 100644 --- a/libs/server/Storage/Functions/ObjectStore/PrivateMethods.cs +++ b/libs/server/Storage/Functions/ObjectStore/PrivateMethods.cs @@ -13,7 +13,7 @@ namespace Garnet.server /// /// Object store functions /// - public readonly unsafe partial struct ObjectStoreFunctions : ISessionFunctions + public readonly unsafe partial struct ObjectSessionFunctions : ISessionFunctions { /// /// Logging upsert from diff --git a/libs/server/Storage/Functions/ObjectStore/RMWMethods.cs b/libs/server/Storage/Functions/ObjectStore/RMWMethods.cs index 57dc5fb3e3..5cde80723a 100644 --- a/libs/server/Storage/Functions/ObjectStore/RMWMethods.cs +++ b/libs/server/Storage/Functions/ObjectStore/RMWMethods.cs @@ -10,7 +10,7 @@ namespace Garnet.server /// /// Object store functions /// - public readonly unsafe partial struct ObjectStoreFunctions : ISessionFunctions + public readonly unsafe partial struct ObjectSessionFunctions : ISessionFunctions { /// public bool NeedInitialUpdate(ref byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput output, ref RMWInfo rmwInfo) diff --git a/libs/server/Storage/Functions/ObjectStore/ReadMethods.cs b/libs/server/Storage/Functions/ObjectStore/ReadMethods.cs index aa78d21a67..5dd899b76e 100644 --- a/libs/server/Storage/Functions/ObjectStore/ReadMethods.cs +++ b/libs/server/Storage/Functions/ObjectStore/ReadMethods.cs @@ -11,7 +11,7 @@ namespace Garnet.server /// /// Object store functions /// - public readonly unsafe partial struct ObjectStoreFunctions : ISessionFunctions + public readonly unsafe partial struct ObjectSessionFunctions : ISessionFunctions { /// public bool SingleReader(ref byte[] key, ref ObjectInput input, ref IGarnetObject value, ref GarnetObjectStoreOutput dst, ref ReadInfo readInfo) diff --git a/libs/server/Storage/Functions/ObjectStore/UpsertMethods.cs b/libs/server/Storage/Functions/ObjectStore/UpsertMethods.cs index 0c283d9baf..0d150e7cef 100644 --- a/libs/server/Storage/Functions/ObjectStore/UpsertMethods.cs +++ b/libs/server/Storage/Functions/ObjectStore/UpsertMethods.cs @@ -8,7 +8,7 @@ namespace Garnet.server /// /// Object store functions /// - public readonly unsafe partial struct ObjectStoreFunctions : ISessionFunctions + public readonly unsafe partial struct ObjectSessionFunctions : ISessionFunctions { /// public bool SingleWriter(ref byte[] key, ref ObjectInput input, ref IGarnetObject src, ref IGarnetObject dst, ref GarnetObjectStoreOutput output, ref UpsertInfo upsertInfo, WriteReason reason, ref RecordInfo recordInfo) diff --git a/libs/server/Storage/Functions/ObjectStore/VarLenInputMethods.cs b/libs/server/Storage/Functions/ObjectStore/VarLenInputMethods.cs index ab88504609..dff1d58284 100644 --- a/libs/server/Storage/Functions/ObjectStore/VarLenInputMethods.cs +++ b/libs/server/Storage/Functions/ObjectStore/VarLenInputMethods.cs @@ -9,7 +9,7 @@ namespace Garnet.server /// /// Object store functions /// - public readonly unsafe partial struct ObjectStoreFunctions : ISessionFunctions + public readonly unsafe partial struct ObjectSessionFunctions : ISessionFunctions { /// public int GetRMWModifiedValueLength(ref IGarnetObject value, ref ObjectInput input) diff --git a/libs/server/Storage/Session/Common/ArrayKeyIterationFunctions.cs b/libs/server/Storage/Session/Common/ArrayKeyIterationFunctions.cs index d3e88dc1b9..41b767255c 100644 --- a/libs/server/Storage/Session/Common/ArrayKeyIterationFunctions.cs +++ b/libs/server/Storage/Session/Common/ArrayKeyIterationFunctions.cs @@ -214,7 +214,7 @@ public bool SingleReader(ref SpanByte key, ref SpanByte value, RecordMetadata re public bool ConcurrentReader(ref SpanByte key, ref SpanByte value, RecordMetadata recordMetadata, long numberOfRecords, out CursorRecordResult cursorRecordResult) { if ((patternB != null && !GlobUtils.Match(patternB, patternLength, key.ToPointer(), key.Length, true)) - || (value.MetadataSize != 0 && MainStoreFunctions.CheckExpiry(ref value))) + || (value.MetadataSize != 0 && MainSessionFunctions.CheckExpiry(ref value))) { cursorRecordResult = CursorRecordResult.Skip; } @@ -251,7 +251,7 @@ public bool SingleReader(ref byte[] key, ref IGarnetObject value, RecordMetadata public bool ConcurrentReader(ref byte[] key, ref IGarnetObject value, RecordMetadata recordMetadata, long numberOfRecords, out CursorRecordResult cursorRecordResult) { - if (value.Expiration > 0 && ObjectStoreFunctions.CheckExpiry(value)) + if (value.Expiration > 0 && ObjectSessionFunctions.CheckExpiry(value)) { cursorRecordResult = CursorRecordResult.Skip; return true; @@ -294,7 +294,7 @@ internal sealed class MainStoreGetDBSize : IScanIteratorFunctions 0 && ObjectStoreFunctions.CheckExpiry(value)) + if (value.Expiration > 0 && ObjectSessionFunctions.CheckExpiry(value)) cursorRecordResult = CursorRecordResult.Skip; else { diff --git a/libs/server/Storage/Session/MainStore/AdvancedOps.cs b/libs/server/Storage/Session/MainStore/AdvancedOps.cs index 1befbcdf64..259330d94f 100644 --- a/libs/server/Storage/Session/MainStore/AdvancedOps.cs +++ b/libs/server/Storage/Session/MainStore/AdvancedOps.cs @@ -8,10 +8,13 @@ namespace Garnet.server { + using MainStoreAllocator = SpanByteAllocator>; + using MainStoreFunctions = StoreFunctions; + sealed partial class StorageSession : IDisposable { public GarnetStatus GET_WithPending(ref SpanByte key, ref SpanByte input, ref SpanByteAndMemory output, long ctx, out bool pending, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { var status = context.Read(ref key, ref input, ref output, ctx); @@ -36,7 +39,7 @@ public GarnetStatus GET_WithPending(ref SpanByte key, ref SpanByte inp } public bool GET_CompletePending((GarnetStatus, SpanByteAndMemory)[] outputArr, bool wait, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { Debug.Assert(outputArr != null); @@ -60,7 +63,7 @@ public bool GET_CompletePending((GarnetStatus, SpanByteAndMemory)[] ou } public bool GET_CompletePending(out CompletedOutputIterator completedOutputs, bool wait, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { latencyMetrics?.Start(LatencyMetricsType.PENDING_LAT); var ret = context.CompletePendingWithOutputs(out completedOutputs, wait); @@ -69,7 +72,7 @@ public bool GET_CompletePending(out CompletedOutputIterator(ref SpanByte key, ref SpanByte input, ref SpanByteAndMemory output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { var status = context.RMW(ref key, ref input, ref output); @@ -83,7 +86,7 @@ public GarnetStatus RMW_MainStore(ref SpanByte key, ref SpanByte input } public GarnetStatus Read_MainStore(ref SpanByte key, ref SpanByte input, ref SpanByteAndMemory output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { var status = context.Read(ref key, ref input, ref output); diff --git a/libs/server/Storage/Session/MainStore/BitmapOps.cs b/libs/server/Storage/Session/MainStore/BitmapOps.cs index 25dd38ed6e..f2e0093e5f 100644 --- a/libs/server/Storage/Session/MainStore/BitmapOps.cs +++ b/libs/server/Storage/Session/MainStore/BitmapOps.cs @@ -6,15 +6,17 @@ using System.Diagnostics; using System.Runtime.CompilerServices; using Garnet.common; -using Microsoft.Extensions.Logging; using Tsavorite.core; namespace Garnet.server { + using MainStoreAllocator = SpanByteAllocator>; + using MainStoreFunctions = StoreFunctions; + sealed partial class StorageSession : IDisposable { public unsafe GarnetStatus StringSetBit(ArgSlice key, ArgSlice offset, bool bit, out bool previous, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { previous = false; @@ -48,7 +50,7 @@ public unsafe GarnetStatus StringSetBit(ArgSlice key, ArgSlice offset, } public unsafe GarnetStatus StringGetBit(ArgSlice key, ArgSlice offset, out bool bValue, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { bValue = false; @@ -227,7 +229,7 @@ public GarnetStatus StringBitOperation(BitmapOperation bitop, ArgSlice destinati } public unsafe GarnetStatus StringBitCount(ArgSlice key, long start, long end, bool useBitInterval, out long result, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { result = 0; @@ -273,7 +275,7 @@ public unsafe GarnetStatus StringBitCount(ArgSlice key, long start, lo } public unsafe GarnetStatus StringBitField(ArgSlice key, List commandArguments, out List result, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { int inputSize = sizeof(int) + RespInputHeader.Size + sizeof(byte) + sizeof(byte) + sizeof(long) + sizeof(long) + sizeof(byte); byte* input = scratchBufferManager.CreateArgSlice(inputSize).ptr; @@ -345,23 +347,23 @@ public unsafe GarnetStatus StringBitField(ArgSlice key, List(ref SpanByte key, ref SpanByte input, ref SpanByteAndMemory output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext => RMW_MainStore(ref key, ref input, ref output, ref context); public GarnetStatus StringGetBit(ref SpanByte key, ref SpanByte input, ref SpanByteAndMemory output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext => Read_MainStore(ref key, ref input, ref output, ref context); public unsafe GarnetStatus StringBitCount(ref SpanByte key, ref SpanByte input, ref SpanByteAndMemory output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext => Read_MainStore(ref key, ref input, ref output, ref context); public unsafe GarnetStatus StringBitPosition(ref SpanByte key, ref SpanByte input, ref SpanByteAndMemory output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext => Read_MainStore(ref key, ref input, ref output, ref context); public unsafe GarnetStatus StringBitField(ref SpanByte key, ref SpanByte input, byte secondaryCommand, ref SpanByteAndMemory output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { GarnetStatus status; if (secondaryCommand == (byte)RespCommand.GET) @@ -372,7 +374,7 @@ public unsafe GarnetStatus StringBitField(ref SpanByte key, ref SpanBy } public unsafe GarnetStatus StringBitFieldReadOnly(ref SpanByte key, ref SpanByte input, byte secondaryCommand, ref SpanByteAndMemory output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { GarnetStatus status = GarnetStatus.NOTFOUND; diff --git a/libs/server/Storage/Session/MainStore/CompletePending.cs b/libs/server/Storage/Session/MainStore/CompletePending.cs index 1ffde7ef47..e83d3a8677 100644 --- a/libs/server/Storage/Session/MainStore/CompletePending.cs +++ b/libs/server/Storage/Session/MainStore/CompletePending.cs @@ -6,6 +6,9 @@ namespace Garnet.server { + using MainStoreAllocator = SpanByteAllocator>; + using MainStoreFunctions = StoreFunctions; + sealed partial class StorageSession { /// @@ -15,7 +18,7 @@ sealed partial class StorageSession /// /// static void CompletePendingForSession(ref Status status, ref SpanByteAndMemory output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { context.CompletePendingWithOutputs(out var completedOutputs, wait: true); var more = completedOutputs.Next(); diff --git a/libs/server/Storage/Session/MainStore/HyperLogLogOps.cs b/libs/server/Storage/Session/MainStore/HyperLogLogOps.cs index 6db648cc3a..655b494482 100644 --- a/libs/server/Storage/Session/MainStore/HyperLogLogOps.cs +++ b/libs/server/Storage/Session/MainStore/HyperLogLogOps.cs @@ -10,14 +10,16 @@ namespace Garnet.server { + using MainStoreAllocator = SpanByteAllocator>; + using MainStoreFunctions = StoreFunctions; + sealed partial class StorageSession : IDisposable { - /// /// Adds all the element arguments to the HyperLogLog data structure stored at the variable name specified as key. /// public unsafe GarnetStatus HyperLogLogAdd(ArgSlice key, string[] elements, out bool updated, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { updated = false; int inputSize = sizeof(int) + RespInputHeader.Size + sizeof(int) + sizeof(long); @@ -70,7 +72,7 @@ public unsafe GarnetStatus HyperLogLogAdd(ArgSlice key, string[] eleme /// /// public GarnetStatus HyperLogLogAdd(ref SpanByte key, ref SpanByte input, ref SpanByteAndMemory output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext => RMW_MainStore(ref key, ref input, ref output, ref context); /// @@ -84,7 +86,7 @@ public GarnetStatus HyperLogLogAdd(ref SpanByte key, ref SpanByte inpu /// /// public unsafe GarnetStatus HyperLogLogLength(Span keys, ref SpanByte input, out long count, out bool error, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { count = 0; error = false; @@ -114,7 +116,7 @@ public unsafe GarnetStatus HyperLogLogLength(Span keys, ref } public unsafe GarnetStatus HyperLogLogLength(Span keys, out long count, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { //4 byte length of input //1 byte RespCommand diff --git a/libs/server/Storage/Session/MainStore/MainStoreOps.cs b/libs/server/Storage/Session/MainStore/MainStoreOps.cs index 6d4f79d09d..dc5aac7a3a 100644 --- a/libs/server/Storage/Session/MainStore/MainStoreOps.cs +++ b/libs/server/Storage/Session/MainStore/MainStoreOps.cs @@ -9,10 +9,16 @@ namespace Garnet.server { + using MainStoreAllocator = SpanByteAllocator>; + using MainStoreFunctions = StoreFunctions; + + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + sealed partial class StorageSession : IDisposable { public GarnetStatus GET(ref SpanByte key, ref SpanByte input, ref SpanByteAndMemory output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { long ctx = default; var status = context.Read(ref key, ref input, ref output, ctx); @@ -37,7 +43,7 @@ public GarnetStatus GET(ref SpanByte key, ref SpanByte input, ref Span } public unsafe GarnetStatus ReadWithUnsafeContext(ArgSlice key, ref SpanByte input, ref SpanByteAndMemory output, long localHeadAddress, out bool epochChanged, ref TContext context) - where TContext : ITsavoriteContext, IUnsafeContext + where TContext : ITsavoriteContext, IUnsafeContext { var _key = key.SpanByte; @@ -74,7 +80,7 @@ public unsafe GarnetStatus ReadWithUnsafeContext(ArgSlice key, ref Spa } public unsafe GarnetStatus GET(ArgSlice key, out ArgSlice value, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { int inputSize = sizeof(int) + RespInputHeader.Size; @@ -105,7 +111,7 @@ public unsafe GarnetStatus GET(ArgSlice key, out ArgSlice value, ref T } public unsafe GarnetStatus GET(ArgSlice key, out MemoryResult value, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { int inputSize = sizeof(int) + RespInputHeader.Size; @@ -124,7 +130,7 @@ public unsafe GarnetStatus GET(ArgSlice key, out MemoryResult va } public GarnetStatus GET(byte[] key, out GarnetObjectStoreOutput output, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { long ctx = default; var status = objectContext.Read(key, out output, ctx); @@ -156,7 +162,7 @@ public GarnetStatus GET(byte[] key, out GarnetObjectStoreOutput /// Basic Context of the store /// Operation status public unsafe GarnetStatus GETDEL(ArgSlice key, ref SpanByteAndMemory output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { var _key = key.SpanByte; return GETDEL(ref _key, ref output, ref context); @@ -170,7 +176,7 @@ public unsafe GarnetStatus GETDEL(ArgSlice key, ref SpanByteAndMemory /// Basic Context of the store /// Operation status public unsafe GarnetStatus GETDEL(ref SpanByte key, ref SpanByteAndMemory output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { // size data + header int inputSize = sizeof(int) + RespInputHeader.Size; @@ -194,7 +200,7 @@ public unsafe GarnetStatus GETDEL(ref SpanByte key, ref SpanByteAndMem } public unsafe GarnetStatus GETRANGE(ref SpanByte key, int sliceStart, int sliceLength, ref SpanByteAndMemory output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { int inputSize = sizeof(int) + RespInputHeader.Size + sizeof(int) * 2; byte* pbCmdInput = stackalloc byte[inputSize]; @@ -243,8 +249,8 @@ public unsafe GarnetStatus GETRANGE(ref SpanByte key, int sliceStart, /// when true the command to execute is PTTL. /// public unsafe GarnetStatus TTL(ref SpanByte key, StoreType storeType, ref SpanByteAndMemory output, ref TContext context, ref TObjectContext objectContext, bool milliseconds = false) - where TContext : ITsavoriteContext - where TObjectContext : ITsavoriteContext + where TContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { int inputSize = sizeof(int) + RespInputHeader.Size; byte* pbCmdInput = stackalloc byte[inputSize]; @@ -300,14 +306,14 @@ public unsafe GarnetStatus TTL(ref SpanByte key, Store } public GarnetStatus SET(ref SpanByte key, ref SpanByte value, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { context.Upsert(ref key, ref value); return GarnetStatus.OK; } public unsafe GarnetStatus SET_Conditional(ref SpanByte key, ref SpanByte input, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { byte* pbOutput = stackalloc byte[8]; var o = new SpanByteAndMemory(pbOutput, 8); @@ -334,7 +340,7 @@ public unsafe GarnetStatus SET_Conditional(ref SpanByte key, ref SpanB } public unsafe GarnetStatus SET_Conditional(ref SpanByte key, ref SpanByte input, ref SpanByteAndMemory output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { var status = context.RMW(ref key, ref input, ref output); @@ -358,7 +364,7 @@ public unsafe GarnetStatus SET_Conditional(ref SpanByte key, ref SpanB } public GarnetStatus SET(ArgSlice key, ArgSlice value, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { var _key = key.SpanByte; var _value = value.SpanByte; @@ -366,14 +372,14 @@ public GarnetStatus SET(ArgSlice key, ArgSlice value, ref TContext con } public GarnetStatus SET(byte[] key, IGarnetObject value, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { objectContext.Upsert(key, value); return GarnetStatus.OK; } public GarnetStatus SET(ArgSlice key, Memory value, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { var _key = key.SpanByte; unsafe @@ -387,11 +393,11 @@ public GarnetStatus SET(ArgSlice key, Memory value, ref TContext } public unsafe GarnetStatus SETEX(ArgSlice key, ArgSlice value, ArgSlice expiryMs, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext => SETEX(key, value, TimeSpan.FromMilliseconds(NumUtils.BytesToLong(expiryMs.Length, expiryMs.ptr)), ref context); public GarnetStatus SETEX(ArgSlice key, ArgSlice value, TimeSpan expiry, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { var _key = key.SpanByte; var valueSB = scratchBufferManager.FormatScratch(sizeof(long), value).SpanByte; @@ -409,7 +415,7 @@ public GarnetStatus SETEX(ArgSlice key, ArgSlice value, TimeSpan expir /// Store context /// Operation status public unsafe GarnetStatus APPEND(ArgSlice key, ArgSlice value, ref ArgSlice output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { var _key = key.SpanByte; var _value = value.SpanByte; @@ -428,7 +434,7 @@ public unsafe GarnetStatus APPEND(ArgSlice key, ArgSlice value, ref Ar /// Store context /// Operation status public unsafe GarnetStatus APPEND(ref SpanByte key, ref SpanByte value, ref SpanByteAndMemory output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { int inputSize = sizeof(int) + RespInputHeader.Size + sizeof(int) + sizeof(long); byte* pbCmdInput = stackalloc byte[inputSize]; @@ -458,16 +464,16 @@ public unsafe GarnetStatus APPEND(ref SpanByte key, ref SpanByte value } public GarnetStatus DELETE(ArgSlice key, StoreType storeType, ref TContext context, ref TObjectContext objectContext) - where TContext : ITsavoriteContext - where TObjectContext : ITsavoriteContext + where TContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { var _key = key.SpanByte; return DELETE(ref _key, storeType, ref context, ref objectContext); } public GarnetStatus DELETE(ref SpanByte key, StoreType storeType, ref TContext context, ref TObjectContext objectContext) - where TContext : ITsavoriteContext - where TObjectContext : ITsavoriteContext + where TContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { var found = false; @@ -489,8 +495,8 @@ public GarnetStatus DELETE(ref SpanByte key, StoreType } public GarnetStatus DELETE(byte[] key, StoreType storeType, ref TContext context, ref TObjectContext objectContext) - where TContext : ITsavoriteContext - where TObjectContext : ITsavoriteContext + where TContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { bool found = false; @@ -624,8 +630,8 @@ public unsafe GarnetStatus RENAME(ArgSlice oldKeySlice, ArgSlice newKeySlice, St /// Object context for the object store. /// public GarnetStatus EXISTS(ArgSlice key, StoreType storeType, ref TContext context, ref TObjectContext objectContext) - where TContext : ITsavoriteContext - where TObjectContext : ITsavoriteContext + where TContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { GarnetStatus status = GarnetStatus.NOTFOUND; @@ -666,8 +672,8 @@ public GarnetStatus EXISTS(ArgSlice key, StoreType sto /// Object context for the object store. /// public unsafe GarnetStatus EXPIRE(ArgSlice key, ArgSlice expiryMs, out bool timeoutSet, StoreType storeType, ExpireOption expireOption, ref TContext context, ref TObjectContext objectStoreContext) - where TContext : ITsavoriteContext - where TObjectContext : ITsavoriteContext + where TContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => EXPIRE(key, TimeSpan.FromMilliseconds(NumUtils.BytesToLong(expiryMs.Length, expiryMs.ptr)), out timeoutSet, storeType, expireOption, ref context, ref objectStoreContext); /// @@ -685,8 +691,8 @@ public unsafe GarnetStatus EXPIRE(ArgSlice key, ArgSli /// When true the command executed is PEXPIRE, expire by default. /// public unsafe GarnetStatus EXPIRE(ArgSlice key, TimeSpan expiry, out bool timeoutSet, StoreType storeType, ExpireOption expireOption, ref TContext context, ref TObjectContext objectStoreContext, bool milliseconds = false) - where TContext : ITsavoriteContext - where TObjectContext : ITsavoriteContext + where TContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { byte* pbCmdInput = stackalloc byte[sizeof(int) + sizeof(long) + RespInputHeader.Size + sizeof(byte)]; *(int*)pbCmdInput = sizeof(long) + RespInputHeader.Size; @@ -749,8 +755,8 @@ public unsafe GarnetStatus EXPIRE(ArgSlice key, TimeSp } public unsafe GarnetStatus PERSIST(ArgSlice key, StoreType storeType, ref TContext context, ref TObjectContext objectStoreContext) - where TContext : ITsavoriteContext - where TObjectContext : ITsavoriteContext + where TContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { GarnetStatus status = GarnetStatus.NOTFOUND; @@ -821,7 +827,7 @@ public unsafe GarnetStatus PERSIST(ArgSlice key, Store /// Basic context for the main store /// public unsafe GarnetStatus SETRANGE(ArgSlice key, ArgSlice value, int offset, ref ArgSlice output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { var sbKey = key.SpanByte; SpanByteAndMemory sbmOut = new(output.SpanByte); @@ -853,7 +859,7 @@ public unsafe GarnetStatus SETRANGE(ArgSlice key, ArgSlice value, int } public GarnetStatus Increment(ArgSlice key, ArgSlice input, ref ArgSlice output, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { var _key = key.SpanByte; var _input = input.SpanByte; @@ -868,7 +874,7 @@ public GarnetStatus Increment(ArgSlice key, ArgSlice input, ref ArgSli } public unsafe GarnetStatus Increment(ArgSlice key, out long output, long increment, ref TContext context) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { var cmd = RespCommand.INCRBY; if (increment < 0) @@ -922,8 +928,8 @@ public unsafe GarnetStatus SCAN(long cursor, ArgSlice match, long coun } public GarnetStatus GetKeyType(ArgSlice key, out string keyType, ref TContext context, ref TObjectContext objectContext) - where TContext : ITsavoriteContext - where TObjectContext : ITsavoriteContext + where TContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { keyType = "string"; // Check if key exists in Main store @@ -962,8 +968,8 @@ public GarnetStatus GetKeyType(ArgSlice key, out strin } public GarnetStatus MemoryUsageForKey(ArgSlice key, out long memoryUsage, ref TContext context, ref TObjectContext objectContext, int samples = 0) - where TContext : ITsavoriteContext - where TObjectContext : ITsavoriteContext + where TContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { memoryUsage = -1; diff --git a/libs/server/Storage/Session/ObjectStore/AdvancedOps.cs b/libs/server/Storage/Session/ObjectStore/AdvancedOps.cs index a97f9c4b20..d7a7cb4d65 100644 --- a/libs/server/Storage/Session/ObjectStore/AdvancedOps.cs +++ b/libs/server/Storage/Session/ObjectStore/AdvancedOps.cs @@ -6,10 +6,13 @@ namespace Garnet.server { + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + sealed partial class StorageSession : IDisposable { public GarnetStatus RMW_ObjectStore(ref byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { var status = objectStoreContext.RMW(ref key, ref input, ref output); @@ -28,7 +31,7 @@ public GarnetStatus RMW_ObjectStore(ref byte[] key, ref ObjectIn } public GarnetStatus Read_ObjectStore(ref byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { var status = objectStoreContext.Read(ref key, ref input, ref output); diff --git a/libs/server/Storage/Session/ObjectStore/Common.cs b/libs/server/Storage/Session/ObjectStore/Common.cs index 2e0bd8486c..214233b669 100644 --- a/libs/server/Storage/Session/ObjectStore/Common.cs +++ b/libs/server/Storage/Session/ObjectStore/Common.cs @@ -9,13 +9,15 @@ namespace Garnet.server { + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + sealed partial class StorageSession : IDisposable { #region Common ObjectStore Methods - unsafe GarnetStatus RMWObjectStoreOperation(byte[] key, ref ObjectInput input, - out ObjectOutputHeader output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + unsafe GarnetStatus RMWObjectStoreOperation(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectStoreContext) + where TObjectContext : ITsavoriteContext { if (objectStoreContext.Session is null) ThrowObjectStoreUninitializedException(); @@ -35,7 +37,7 @@ unsafe GarnetStatus RMWObjectStoreOperation(byte[] key, ref Obje unsafe GarnetStatus RMWObjectStoreOperation(byte[] key, ArgSlice input, out ObjectOutputHeader output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { if (objectStoreContext.Session is null) ThrowObjectStoreUninitializedException(); @@ -55,9 +57,8 @@ unsafe GarnetStatus RMWObjectStoreOperation(byte[] key, ArgSlice /// /// /// - unsafe GarnetStatus RMWObjectStoreOperationWithOutput(byte[] key, ref ObjectInput input, - ref TObjectContext objectStoreContext, ref GarnetObjectStoreOutput outputFooter) - where TObjectContext : ITsavoriteContext + GarnetStatus RMWObjectStoreOperationWithOutput(byte[] key, ref ObjectInput input, ref TObjectContext objectStoreContext, ref GarnetObjectStoreOutput outputFooter) + where TObjectContext : ITsavoriteContext { if (objectStoreContext.Session is null) ThrowObjectStoreUninitializedException(); @@ -78,9 +79,8 @@ unsafe GarnetStatus RMWObjectStoreOperationWithOutput(byte[] key /// /// /// - unsafe GarnetStatus ReadObjectStoreOperationWithOutput(byte[] key, ref ObjectInput input, - ref TObjectContext objectStoreContext, ref GarnetObjectStoreOutput outputFooter) - where TObjectContext : ITsavoriteContext + GarnetStatus ReadObjectStoreOperationWithOutput(byte[] key, ref ObjectInput input, ref TObjectContext objectStoreContext, ref GarnetObjectStoreOutput outputFooter) + where TObjectContext : ITsavoriteContext { if (objectStoreContext.Session is null) ThrowObjectStoreUninitializedException(); @@ -103,7 +103,7 @@ unsafe GarnetStatus ReadObjectStoreOperationWithOutput(byte[] ke /// unsafe GarnetStatus ReadObjectStoreOperationWithOutput(byte[] key, ArgSlice input, ref TObjectContext objectStoreContext, ref GarnetObjectStoreOutput outputFooter) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { if (objectStoreContext.Session is null) ThrowObjectStoreUninitializedException(); @@ -299,7 +299,7 @@ unsafe ArgSlice ProcessRespSingleTokenOutput(GarnetObjectStoreOutput outputFoote /// /// unsafe GarnetStatus ReadObjectStoreOperation(byte[] key, ArgSlice input, out ObjectOutputHeader output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { if (objectStoreContext.Session is null) ThrowObjectStoreUninitializedException(); @@ -335,7 +335,7 @@ unsafe GarnetStatus ReadObjectStoreOperation(byte[] key, ArgSlic /// /// unsafe GarnetStatus ReadObjectStoreOperation(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { if (objectStoreContext.Session is null) ThrowObjectStoreUninitializedException(); @@ -368,7 +368,7 @@ unsafe GarnetStatus ReadObjectStoreOperation(byte[] key, ref Obj /// /// public GarnetStatus ObjectScan(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperationWithOutput(key, ref input, ref objectStoreContext, ref outputFooter); [MethodImpl(MethodImplOptions.NoInlining)] @@ -386,7 +386,7 @@ static void ThrowObjectStoreUninitializedException() /// /// private GarnetStatus CompletePendingAndGetGarnetStatus(Status status, ref TObjectContext objectStoreContext, ref GarnetObjectStoreOutput outputFooter) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { if (status.IsPending) CompletePendingForObjectStoreSession(ref status, ref outputFooter, ref objectStoreContext); diff --git a/libs/server/Storage/Session/ObjectStore/CompletePending.cs b/libs/server/Storage/Session/ObjectStore/CompletePending.cs index ae90c009e8..3529766112 100644 --- a/libs/server/Storage/Session/ObjectStore/CompletePending.cs +++ b/libs/server/Storage/Session/ObjectStore/CompletePending.cs @@ -6,6 +6,9 @@ namespace Garnet.server { + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + sealed partial class StorageSession { /// @@ -15,7 +18,7 @@ sealed partial class StorageSession /// /// static void CompletePendingForObjectStoreSession(ref Status status, ref GarnetObjectStoreOutput output, ref TContext objectContext) - where TContext : ITsavoriteContext + where TContext : ITsavoriteContext { objectContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); var more = completedOutputs.Next(); diff --git a/libs/server/Storage/Session/ObjectStore/HashOps.cs b/libs/server/Storage/Session/ObjectStore/HashOps.cs index 867c30f1e1..fa051e9af9 100644 --- a/libs/server/Storage/Session/ObjectStore/HashOps.cs +++ b/libs/server/Storage/Session/ObjectStore/HashOps.cs @@ -7,6 +7,9 @@ namespace Garnet.server { + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + /// /// Server API methods - HASH /// @@ -28,7 +31,7 @@ sealed partial class StorageSession : IDisposable /// /// public unsafe GarnetStatus HashSet(ArgSlice key, ArgSlice field, ArgSlice value, out int itemsDoneCount, ref TObjectContext objectStoreContext, bool nx = false) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { itemsDoneCount = 0; @@ -68,7 +71,7 @@ public unsafe GarnetStatus HashSet(ArgSlice key, ArgSlice field, /// /// public unsafe GarnetStatus HashSet(ArgSlice key, (ArgSlice field, ArgSlice value)[] elements, out int itemsDoneCount, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { itemsDoneCount = 0; @@ -114,7 +117,7 @@ public unsafe GarnetStatus HashSet(ArgSlice key, (ArgSlice field /// /// public GarnetStatus HashDelete(ArgSlice key, ArgSlice field, out int itemsDoneCount, ref TObjectContext objectStoreContext, bool nx = false) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => HashDelete(key, new ArgSlice[] { field }, out itemsDoneCount, ref objectStoreContext); /// @@ -127,7 +130,7 @@ public GarnetStatus HashDelete(ArgSlice key, ArgSlice field, out /// /// public unsafe GarnetStatus HashDelete(ArgSlice key, ArgSlice[] fields, out int itemsDoneCount, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { itemsDoneCount = 0; @@ -172,7 +175,7 @@ public unsafe GarnetStatus HashDelete(ArgSlice key, ArgSlice[] f /// /// public unsafe GarnetStatus HashGet(ArgSlice key, ArgSlice field, out ArgSlice value, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { value = default; @@ -219,7 +222,7 @@ public unsafe GarnetStatus HashGet(ArgSlice key, ArgSlice field, /// /// public unsafe GarnetStatus HashGetMultiple(ArgSlice key, ArgSlice[] fields, out ArgSlice[] values, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { values = default; @@ -269,7 +272,7 @@ public unsafe GarnetStatus HashGetMultiple(ArgSlice key, ArgSlic /// /// public unsafe GarnetStatus HashGetAll(ArgSlice key, out ArgSlice[] values, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { values = default; @@ -312,7 +315,7 @@ public unsafe GarnetStatus HashGetAll(ArgSlice key, out ArgSlice /// /// public unsafe GarnetStatus HashLength(ArgSlice key, out int items, ref TObjectContext objectStoreContext, bool nx = false) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { items = 0; @@ -351,7 +354,7 @@ public unsafe GarnetStatus HashLength(ArgSlice key, out int item /// /// public unsafe GarnetStatus HashExists(ArgSlice key, ArgSlice field, out bool exists, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { exists = false; if (key.Length == 0) @@ -387,7 +390,7 @@ public unsafe GarnetStatus HashExists(ArgSlice key, ArgSlice fie /// /// public unsafe GarnetStatus HashRandomField(ArgSlice key, out ArgSlice field, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { field = default; @@ -439,7 +442,7 @@ public unsafe GarnetStatus HashRandomField(ArgSlice key, out Arg /// /// public unsafe GarnetStatus HashRandomField(ArgSlice key, int count, bool withValues, out ArgSlice[] fields, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { fields = default; @@ -489,7 +492,7 @@ public unsafe GarnetStatus HashRandomField(ArgSlice key, int cou /// /// public unsafe GarnetStatus HashScan(ArgSlice key, long cursor, string match, long count, out ArgSlice[] items, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { items = default; @@ -561,7 +564,7 @@ public unsafe GarnetStatus HashScan(ArgSlice key, long cursor, s /// /// public GarnetStatus HashSet(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => RMWObjectStoreOperation(key, ref input, out output, ref objectStoreContext); /// @@ -577,7 +580,7 @@ public GarnetStatus HashSet(byte[] key, ref ObjectInput input, o /// /// public GarnetStatus HashGet(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperationWithOutput(key, ref input, ref objectStoreContext, ref outputFooter); /// @@ -590,7 +593,7 @@ public GarnetStatus HashGet(byte[] key, ref ObjectInput input, r /// /// public GarnetStatus HashGetAll(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperationWithOutput(key, ref input, ref objectStoreContext, ref outputFooter); /// @@ -603,7 +606,7 @@ public GarnetStatus HashGetAll(byte[] key, ref ObjectInput input /// /// public GarnetStatus HashGetMultiple(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperationWithOutput(key, ref input, ref objectStoreContext, ref outputFooter); /// @@ -616,7 +619,7 @@ public GarnetStatus HashGetMultiple(byte[] key, ref ObjectInput /// /// public GarnetStatus HashRandomField(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperationWithOutput(key, ref input, ref objectStoreContext, ref outputFooter); /// @@ -629,7 +632,7 @@ public GarnetStatus HashRandomField(byte[] key, ref ObjectInput /// /// public GarnetStatus HashLength(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperation(key, ref input, out output, ref objectStoreContext); /// @@ -642,7 +645,7 @@ public GarnetStatus HashLength(byte[] key, ref ObjectInput input /// /// public GarnetStatus HashStrLength(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperation(key, ref input, out output, ref objectStoreContext); /// @@ -655,7 +658,7 @@ public GarnetStatus HashStrLength(byte[] key, ref ObjectInput in /// /// public GarnetStatus HashDelete(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => RMWObjectStoreOperation(key, ref input, out output, ref objectStoreContext); /// @@ -668,7 +671,7 @@ public GarnetStatus HashDelete(byte[] key, ref ObjectInput input /// /// public GarnetStatus HashExists(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperation(key, ref input, out output, ref objectStoreContext); /// @@ -681,7 +684,7 @@ public GarnetStatus HashExists(byte[] key, ref ObjectInput input /// /// public GarnetStatus HashKeys(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperationWithOutput(key, ref input, ref objectContext, ref outputFooter); /// @@ -694,7 +697,7 @@ public GarnetStatus HashKeys(byte[] key, ref ObjectInput input, /// /// public GarnetStatus HashVals(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperationWithOutput(key, ref input, ref objectContext, ref outputFooter); /// @@ -707,7 +710,7 @@ public GarnetStatus HashVals(byte[] key, ref ObjectInput input, /// /// public GarnetStatus HashIncrement(byte[] key, ArgSlice input, out ObjectOutputHeader output, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => RMWObjectStoreOperation(key, input, out output, ref objectContext); /// @@ -721,7 +724,7 @@ public GarnetStatus HashIncrement(byte[] key, ArgSlice input, ou /// /// public GarnetStatus HashIncrement(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => RMWObjectStoreOperationWithOutput(key, ref input, ref objectContext, ref outputFooter); } } \ No newline at end of file diff --git a/libs/server/Storage/Session/ObjectStore/ListOps.cs b/libs/server/Storage/Session/ObjectStore/ListOps.cs index 27d1c46f06..aa89ddc392 100644 --- a/libs/server/Storage/Session/ObjectStore/ListOps.cs +++ b/libs/server/Storage/Session/ObjectStore/ListOps.cs @@ -7,6 +7,9 @@ namespace Garnet.server { + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + sealed partial class StorageSession : IDisposable { /// @@ -23,7 +26,7 @@ sealed partial class StorageSession : IDisposable /// /// public unsafe GarnetStatus ListPush(ArgSlice key, ArgSlice[] elements, ListOperation lop, out int itemsDoneCount, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { itemsDoneCount = 0; @@ -74,7 +77,7 @@ public unsafe GarnetStatus ListPush(ArgSlice key, ArgSlice[] ele /// /// public unsafe GarnetStatus ListPush(ArgSlice key, ArgSlice element, ListOperation lop, out int itemsDoneCount, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { itemsDoneCount = 0; @@ -110,7 +113,7 @@ public unsafe GarnetStatus ListPush(ArgSlice key, ArgSlice eleme /// /// The popped element public GarnetStatus ListPop(ArgSlice key, ListOperation lop, ref TObjectContext objectStoreContext, out ArgSlice element) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { var status = ListPop(key, 1, lop, ref objectStoreContext, out var elements); element = elements.FirstOrDefault(); @@ -129,7 +132,7 @@ public GarnetStatus ListPop(ArgSlice key, ListOperation lop, ref /// /// The count elements popped from the list public unsafe GarnetStatus ListPop(ArgSlice key, int count, ListOperation lop, ref TObjectContext objectStoreContext, out ArgSlice[] elements) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { // Prepare the payload var inputPayload = scratchBufferManager.CreateArgSlice(0); @@ -171,7 +174,7 @@ public unsafe GarnetStatus ListPop(ArgSlice key, int count, List /// /// The count elements popped from the list public unsafe GarnetStatus ListPopMultiple(ArgSlice[] keys, OperationDirection direction, int count, ref TObjectContext objectContext, out ArgSlice key, out ArgSlice[] elements) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { foreach (var k in keys) { @@ -206,7 +209,7 @@ public unsafe GarnetStatus ListPopMultiple(ArgSlice[] keys, Oper /// /// public unsafe GarnetStatus ListLength(ArgSlice key, ref TObjectContext objectStoreContext, out int count) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { count = 0; @@ -366,7 +369,7 @@ public GarnetStatus ListMove(ArgSlice sourceKey, ArgSlice destinationKey, Operat /// /// true when successful public unsafe bool ListTrim(ArgSlice key, int start, int stop, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { // Prepare the payload var inputLength = 0; @@ -400,7 +403,7 @@ public unsafe bool ListTrim(ArgSlice key, int start, int stop, r /// /// public GarnetStatus ListPush(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { var status = RMWObjectStoreOperation(key, ref input, out output, ref objectStoreContext); itemBroker.HandleCollectionUpdate(key); @@ -416,7 +419,7 @@ public GarnetStatus ListPush(byte[] key, ref ObjectInput input, /// /// public GarnetStatus ListTrim(byte[] key, ref ObjectInput input, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => RMWObjectStoreOperation(key, ref input, out _, ref objectStoreContext); /// @@ -429,7 +432,7 @@ public GarnetStatus ListTrim(byte[] key, ref ObjectInput input, /// /// public GarnetStatus ListRange(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => RMWObjectStoreOperationWithOutput(key, ref input, ref objectStoreContext, ref outputFooter); /// @@ -442,7 +445,7 @@ public GarnetStatus ListRange(byte[] key, ref ObjectInput input, /// /// public GarnetStatus ListInsert(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { var status = RMWObjectStoreOperation(key, ref input, out output, ref objectStoreContext); itemBroker.HandleCollectionUpdate(key); @@ -459,7 +462,7 @@ public GarnetStatus ListInsert(byte[] key, ref ObjectInput input /// /// public GarnetStatus ListIndex(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperationWithOutput(key, ref input, ref objectStoreContext, ref outputFooter); /// @@ -473,7 +476,7 @@ public GarnetStatus ListIndex(byte[] key, ref ObjectInput input, /// /// public GarnetStatus ListRemove(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => RMWObjectStoreOperation(key, ref input, out output, ref objectStoreContext); /// @@ -487,7 +490,7 @@ public GarnetStatus ListRemove(byte[] key, ref ObjectInput input /// /// public unsafe GarnetStatus ListPop(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => RMWObjectStoreOperationWithOutput(key, ref input, ref objectStoreContext, ref outputFooter); /// @@ -501,7 +504,7 @@ public unsafe GarnetStatus ListPop(byte[] key, ref ObjectInput i /// /// public unsafe GarnetStatus ListLength(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperation(key, ref input, out output, ref objectStoreContext); /// @@ -514,7 +517,7 @@ public unsafe GarnetStatus ListLength(byte[] key, ref ObjectInpu /// /// public unsafe GarnetStatus ListSet(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => RMWObjectStoreOperationWithOutput(key, ref input, ref objectStoreContext, ref outputFooter); } } \ No newline at end of file diff --git a/libs/server/Storage/Session/ObjectStore/SetOps.cs b/libs/server/Storage/Session/ObjectStore/SetOps.cs index 1ef8dd0b0c..7fbe1205bf 100644 --- a/libs/server/Storage/Session/ObjectStore/SetOps.cs +++ b/libs/server/Storage/Session/ObjectStore/SetOps.cs @@ -10,6 +10,9 @@ namespace Garnet.server { + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + /// /// Server session for RESP protocol - SET /// @@ -27,7 +30,7 @@ sealed partial class StorageSession : IDisposable /// /// internal unsafe GarnetStatus SetAdd(ArgSlice key, ArgSlice member, out int saddCount, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { saddCount = 0; @@ -64,7 +67,7 @@ internal unsafe GarnetStatus SetAdd(ArgSlice key, ArgSlice membe /// /// internal unsafe GarnetStatus SetAdd(ArgSlice key, ArgSlice[] members, out int saddCount, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { saddCount = 0; @@ -113,7 +116,7 @@ internal unsafe GarnetStatus SetAdd(ArgSlice key, ArgSlice[] mem /// /// internal unsafe GarnetStatus SetRemove(ArgSlice key, ArgSlice member, out int sremCount, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { sremCount = 0; @@ -151,7 +154,7 @@ internal unsafe GarnetStatus SetRemove(ArgSlice key, ArgSlice me /// /// internal unsafe GarnetStatus SetRemove(ArgSlice key, ArgSlice[] members, out int sremCount, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { sremCount = 0; @@ -195,7 +198,7 @@ internal unsafe GarnetStatus SetRemove(ArgSlice key, ArgSlice[] /// /// internal unsafe GarnetStatus SetLength(ArgSlice key, out int count, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { count = 0; @@ -232,7 +235,7 @@ internal unsafe GarnetStatus SetLength(ArgSlice key, out int cou /// /// internal unsafe GarnetStatus SetMembers(ArgSlice key, out ArgSlice[] members, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { members = default; @@ -273,7 +276,7 @@ internal unsafe GarnetStatus SetMembers(ArgSlice key, out ArgSli /// /// internal GarnetStatus SetPop(ArgSlice key, out ArgSlice element, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { var status = SetPop(key, int.MinValue, out var elements, ref objectStoreContext); element = default; @@ -293,7 +296,7 @@ internal GarnetStatus SetPop(ArgSlice key, out ArgSlice element, /// /// internal unsafe GarnetStatus SetPop(ArgSlice key, int count, out ArgSlice[] elements, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { elements = default; @@ -339,7 +342,7 @@ internal unsafe GarnetStatus SetPop(ArgSlice key, int count, out /// The list of items for the response /// public unsafe GarnetStatus SetScan(ArgSlice key, long cursor, string match, int count, out ArgSlice[] items, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { items = default; @@ -608,7 +611,7 @@ public GarnetStatus SetIntersectStore(byte[] key, ArgSlice[] keys, out int count private GarnetStatus SetIntersect(ArgSlice[] keys, ref TObjectContext objectContext, out HashSet output) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { output = new HashSet(ByteArrayComparer.Instance); @@ -771,7 +774,7 @@ public GarnetStatus SetUnionStore(byte[] key, ArgSlice[] keys, out int count) } private GarnetStatus SetUnion(ArgSlice[] keys, ref TObjectContext objectContext, out HashSet output) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { output = new HashSet(ByteArrayComparer.Instance); if (keys.Length == 0) @@ -808,8 +811,8 @@ private GarnetStatus SetUnion(ArgSlice[] keys, ref TObjectContex /// /// public GarnetStatus SetAdd(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext - => RMWObjectStoreOperation(key, ref input, out output, ref objectContext); + where TObjectContext : ITsavoriteContext + => RMWObjectStoreOperation(key, ref input, out output, ref objectContext); /// /// Removes the specified members from the set. @@ -823,7 +826,7 @@ public GarnetStatus SetAdd(byte[] key, ref ObjectInput input, ou /// /// public GarnetStatus SetRemove(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => RMWObjectStoreOperation(key, ref input, out output, ref objectContext); /// @@ -836,7 +839,7 @@ public GarnetStatus SetRemove(byte[] key, ref ObjectInput input, /// /// public GarnetStatus SetLength(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperation(key, ref input, out output, ref objectContext); /// @@ -849,7 +852,7 @@ public GarnetStatus SetLength(byte[] key, ref ObjectInput input, /// /// public GarnetStatus SetMembers(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperationWithOutput(key, ref input, ref objectContext, ref outputFooter); /// @@ -862,7 +865,7 @@ public GarnetStatus SetMembers(byte[] key, ref ObjectInput input /// /// public GarnetStatus SetIsMember(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperationWithOutput(key, ref input, ref objectContext, ref outputFooter); /// @@ -875,7 +878,7 @@ public GarnetStatus SetIsMember(byte[] key, ref ObjectInput inpu /// /// public GarnetStatus SetPop(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => RMWObjectStoreOperationWithOutput(key, ref input, ref objectContext, ref outputFooter); /// @@ -892,7 +895,7 @@ public GarnetStatus SetPop(byte[] key, ref ObjectInput input, re /// /// public GarnetStatus SetRandomMember(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperationWithOutput(key, ref input, ref objectContext, ref outputFooter); /// @@ -1000,7 +1003,7 @@ public GarnetStatus SetDiffStore(byte[] key, ArgSlice[] keys, out int count) } private GarnetStatus SetDiff(ArgSlice[] keys, ref TObjectContext objectContext, out HashSet output) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { output = new HashSet(); if (keys.Length == 0) diff --git a/libs/server/Storage/Session/ObjectStore/SortedSetGeoOps.cs b/libs/server/Storage/Session/ObjectStore/SortedSetGeoOps.cs index 35b216cc22..c040dbef1f 100644 --- a/libs/server/Storage/Session/ObjectStore/SortedSetGeoOps.cs +++ b/libs/server/Storage/Session/ObjectStore/SortedSetGeoOps.cs @@ -6,6 +6,9 @@ namespace Garnet.server { + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + sealed partial class StorageSession : IDisposable { /// @@ -19,7 +22,7 @@ sealed partial class StorageSession : IDisposable /// /// public GarnetStatus GeoAdd(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => RMWObjectStoreOperation(key, ref input, out output, ref objectContext); /// @@ -35,7 +38,7 @@ public GarnetStatus GeoAdd(byte[] key, ref ObjectInput input, ou /// /// public GarnetStatus GeoCommands(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperationWithOutput(key, ref input, ref objectContext, ref outputFooter); } diff --git a/libs/server/Storage/Session/ObjectStore/SortedSetOps.cs b/libs/server/Storage/Session/ObjectStore/SortedSetOps.cs index afdef18e4b..878afcc0ce 100644 --- a/libs/server/Storage/Session/ObjectStore/SortedSetOps.cs +++ b/libs/server/Storage/Session/ObjectStore/SortedSetOps.cs @@ -11,6 +11,9 @@ namespace Garnet.server { + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + sealed partial class StorageSession : IDisposable { @@ -25,7 +28,7 @@ sealed partial class StorageSession : IDisposable /// /// public unsafe GarnetStatus SortedSetAdd(ArgSlice key, ArgSlice score, ArgSlice member, out int zaddCount, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { zaddCount = 0; if (key.Length == 0) @@ -63,7 +66,7 @@ public unsafe GarnetStatus SortedSetAdd(ArgSlice key, ArgSlice s /// /// public unsafe GarnetStatus SortedSetAdd(ArgSlice key, (ArgSlice score, ArgSlice member)[] inputs, out int zaddCount, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { zaddCount = 0; @@ -108,7 +111,7 @@ public unsafe GarnetStatus SortedSetAdd(ArgSlice key, (ArgSlice /// /// public unsafe GarnetStatus SortedSetRemove(byte[] key, ArgSlice member, out int zremCount, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { zremCount = 0; @@ -147,7 +150,7 @@ public unsafe GarnetStatus SortedSetRemove(byte[] key, ArgSlice /// /// public unsafe GarnetStatus SortedSetRemove(byte[] key, ArgSlice[] members, out int zremCount, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { zremCount = 0; @@ -192,7 +195,7 @@ public unsafe GarnetStatus SortedSetRemove(byte[] key, ArgSlice[ /// /// public unsafe GarnetStatus SortedSetRemoveRangeByLex(ArgSlice key, string min, string max, out int countRemoved, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { countRemoved = 0; @@ -242,7 +245,7 @@ public unsafe GarnetStatus SortedSetRemoveRangeByLex(ArgSlice ke /// /// public unsafe GarnetStatus SortedSetRemoveRangeByScore(ArgSlice key, string min, string max, out int countRemoved, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { countRemoved = 0; @@ -292,7 +295,7 @@ public unsafe GarnetStatus SortedSetRemoveRangeByScore(ArgSlice /// /// public unsafe GarnetStatus SortedSetRemoveRangeByRank(ArgSlice key, int start, int stop, out int countRemoved, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { countRemoved = 0; @@ -342,7 +345,7 @@ public unsafe GarnetStatus SortedSetRemoveRangeByRank(ArgSlice k /// /// public unsafe GarnetStatus SortedSetPop(ArgSlice key, int count, bool lowScoresFirst, out (ArgSlice member, ArgSlice score)[] pairs, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { pairs = default; if (key.Length == 0) @@ -388,7 +391,7 @@ public unsafe GarnetStatus SortedSetPop(ArgSlice key, int count, /// /// public unsafe GarnetStatus SortedSetIncrement(ArgSlice key, double increment, ArgSlice member, out double newScore, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { newScore = 0; @@ -442,7 +445,7 @@ public unsafe GarnetStatus SortedSetIncrement(ArgSlice key, doub /// /// public unsafe GarnetStatus SortedSetLength(ArgSlice key, out int zcardCount, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { zcardCount = 0; @@ -488,7 +491,7 @@ public unsafe GarnetStatus SortedSetLength(ArgSlice key, out int /// /// public unsafe GarnetStatus SortedSetRange(ArgSlice key, ArgSlice min, ArgSlice max, SortedSetOrderOperation sortedSetOrderOperation, ref TObjectContext objectContext, out ArgSlice[] elements, out string error, bool withScores = false, bool reverse = false, (string, int) limit = default) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { elements = default; error = default; @@ -686,7 +689,7 @@ public unsafe GarnetStatus SortedSetDifference(ArgSlice[] keys, out DictionaryThe list of items for the response /// public unsafe GarnetStatus SortedSetScan(ArgSlice key, long cursor, string match, int count, out ArgSlice[] items, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { items = default; @@ -761,7 +764,7 @@ public unsafe GarnetStatus SortedSetScan(ArgSlice key, long curs /// /// public unsafe GarnetStatus SortedSetRank(ArgSlice key, ArgSlice member, bool reverse, out long? rank, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext { rank = null; if (key.Length == 0) @@ -813,7 +816,7 @@ public unsafe GarnetStatus SortedSetRank(ArgSlice key, ArgSlice /// /// public GarnetStatus SortedSetAdd(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => RMWObjectStoreOperation(key, ref input, out output, ref objectStoreContext); /// @@ -827,7 +830,7 @@ public GarnetStatus SortedSetAdd(byte[] key, ref ObjectInput inp /// /// public GarnetStatus SortedSetRemove(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => RMWObjectStoreOperation(key, ref input, out output, ref objectStoreContext); /// @@ -840,7 +843,7 @@ public GarnetStatus SortedSetRemove(byte[] key, ref ObjectInput /// /// public GarnetStatus SortedSetLength(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperation(key, ref input, out output, ref objectStoreContext); /// @@ -855,7 +858,7 @@ public GarnetStatus SortedSetLength(byte[] key, ref ObjectInput /// /// public GarnetStatus SortedSetRange(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperationWithOutput(key, ref input, ref objectStoreContext, ref outputFooter); /// @@ -869,7 +872,7 @@ public GarnetStatus SortedSetRange(byte[] key, ref ObjectInput i /// /// public GarnetStatus SortedSetScore(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperationWithOutput(key, ref input, ref objectStoreContext, ref outputFooter); /// @@ -883,7 +886,7 @@ public GarnetStatus SortedSetScore(byte[] key, ref ObjectInput i /// /// public GarnetStatus SortedSetScores(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperationWithOutput(key, ref input, ref objectStoreContext, ref outputFooter); /// @@ -897,7 +900,7 @@ public GarnetStatus SortedSetScores(byte[] key, ref ObjectInput /// /// public GarnetStatus SortedSetPop(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => RMWObjectStoreOperationWithOutput(key, ref input, ref objectStoreContext, ref outputFooter); /// @@ -910,7 +913,7 @@ public GarnetStatus SortedSetPop(byte[] key, ref ObjectInput inp /// /// public GarnetStatus SortedSetCount(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperation(key, ref input, out output, ref objectContext); /// @@ -924,7 +927,7 @@ public GarnetStatus SortedSetCount(byte[] key, ref ObjectInput i /// /// public GarnetStatus SortedSetRemoveRangeByLex(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => RMWObjectStoreOperation(key, ref input, out output, ref objectContext); /// @@ -939,7 +942,7 @@ public GarnetStatus SortedSetRemoveRangeByLex(byte[] key, ref Ob /// /// public GarnetStatus SortedSetLengthByValue(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperation(key, ref input, out output, ref objectStoreContext); /// @@ -953,7 +956,7 @@ public GarnetStatus SortedSetLengthByValue(byte[] key, ref Objec /// /// public GarnetStatus SortedSetIncrement(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => RMWObjectStoreOperationWithOutput(key, ref input, ref objectStoreContext, ref outputFooter); /// @@ -968,7 +971,7 @@ public GarnetStatus SortedSetIncrement(byte[] key, ref ObjectInp /// /// public GarnetStatus SortedSetRemoveRange(byte[] key, ref ObjectInput input, out ObjectOutputHeader output, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => RMWObjectStoreOperation(key, ref input, out output, ref objectContext); /// @@ -981,7 +984,7 @@ public GarnetStatus SortedSetRemoveRange(byte[] key, ref ObjectI /// /// public GarnetStatus SortedSetRank(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperationWithOutput(key, ref input, ref objectContext, ref outputFooter); /// @@ -994,7 +997,7 @@ public GarnetStatus SortedSetRank(byte[] key, ref ObjectInput in /// /// public GarnetStatus SortedSetRandomMember(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectContext) - where TObjectContext : ITsavoriteContext + where TObjectContext : ITsavoriteContext => ReadObjectStoreOperationWithOutput(key, ref input, ref objectContext, ref outputFooter); /// @@ -1007,8 +1010,8 @@ public GarnetStatus SortedSetRandomMember(byte[] key, ref Object /// /// /// - public GarnetStatus SortedSetScan(byte[] key, ArgSlice input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectStoreContext) - where TObjectContext : ITsavoriteContext - => ReadObjectStoreOperationWithOutput(key, input, ref objectStoreContext, ref outputFooter); + public GarnetStatus SortedSetScan(byte[] key, ref ObjectInput input, ref GarnetObjectStoreOutput outputFooter, ref TObjectContext objectStoreContext) + where TObjectContext : ITsavoriteContext + => ReadObjectStoreOperationWithOutput(key, ref input, ref objectStoreContext, ref outputFooter); } } \ No newline at end of file diff --git a/libs/server/Storage/Session/StorageSession.cs b/libs/server/Storage/Session/StorageSession.cs index fa3cc03973..5e2e563b7e 100644 --- a/libs/server/Storage/Session/StorageSession.cs +++ b/libs/server/Storage/Session/StorageSession.cs @@ -7,6 +7,12 @@ namespace Garnet.server { + using MainStoreAllocator = SpanByteAllocator>; + using MainStoreFunctions = StoreFunctions; + + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + /// /// Storage Session - the internal layer that Garnet uses to perform storage operations /// @@ -19,8 +25,8 @@ sealed partial class StorageSession : IDisposable /// /// Session Contexts for main store /// - public BasicContext basicContext; - public LockableContext lockableContext; + public BasicContext basicContext; + public LockableContext lockableContext; SectorAlignedMemory sectorAlignedMemoryHll; readonly int hllBufferSize = HyperLogLog.DefaultHLL.DenseBytes; @@ -29,8 +35,8 @@ sealed partial class StorageSession : IDisposable /// /// Session Contexts for object store /// - public BasicContext objectStoreBasicContext; - public LockableContext objectStoreLockableContext; + public BasicContext objectStoreBasicContext; + public LockableContext objectStoreLockableContext; public readonly ScratchBufferManager scratchBufferManager; public readonly FunctionsState functionsState; @@ -61,11 +67,11 @@ public StorageSession(StoreWrapper storeWrapper, functionsState = storeWrapper.CreateFunctionsState(); - var functions = new MainStoreFunctions(functionsState); - var session = storeWrapper.store.NewSession(functions); + var functions = new MainSessionFunctions(functionsState); + var session = storeWrapper.store.NewSession(functions); - var objstorefunctions = new ObjectStoreFunctions(functionsState); - var objectStoreSession = storeWrapper.objectStore?.NewSession(objstorefunctions); + var objstorefunctions = new ObjectSessionFunctions(functionsState); + var objectStoreSession = storeWrapper.objectStore?.NewSession(objstorefunctions); basicContext = session.BasicContext; lockableContext = session.LockableContext; diff --git a/libs/server/Storage/SizeTracker/CacheSizeTracker.cs b/libs/server/Storage/SizeTracker/CacheSizeTracker.cs index 5e1e255b2e..b319ef59eb 100644 --- a/libs/server/Storage/SizeTracker/CacheSizeTracker.cs +++ b/libs/server/Storage/SizeTracker/CacheSizeTracker.cs @@ -10,6 +10,9 @@ namespace Garnet.server { + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + /// /// Tracks the size of the main log and read cache. /// Based on the current size and the target size, it uses the corresponding LogSizeTracker objects to increase @@ -17,12 +20,12 @@ namespace Garnet.server /// public class CacheSizeTracker { - internal readonly LogSizeTracker mainLogTracker; - internal readonly LogSizeTracker readCacheTracker; + internal readonly LogSizeTracker mainLogTracker; + internal readonly LogSizeTracker readCacheTracker; internal long targetSize; private const int deltaFraction = 10; // 10% of target size - private TsavoriteKV store; + private TsavoriteKV store; internal long IndexSizeBytes => store.IndexSize * 64 + store.OverflowBucketCount * 64; @@ -37,7 +40,7 @@ internal struct LogSizeCalculator : ILogSizeCalculator /// The record's value /// The size of the record [MethodImpl(MethodImplOptions.AggressiveInlining)] - public long CalculateRecordSize(RecordInfo recordInfo, byte[] key, IGarnetObject value) + public readonly long CalculateRecordSize(RecordInfo recordInfo, byte[] key, IGarnetObject value) { long size = Utility.RoundUp(key.Length, IntPtr.Size) + MemoryUtils.ByteArrayOverhead; @@ -53,7 +56,8 @@ public long CalculateRecordSize(RecordInfo recordInfo, byte[] key, IGarnetObject /// Hybrid log settings /// Total memory size target /// - public CacheSizeTracker(TsavoriteKV store, LogSettings logSettings, long targetSize, ILoggerFactory loggerFactory = null) + public CacheSizeTracker(TsavoriteKV store, KVSettings logSettings, + long targetSize, ILoggerFactory loggerFactory = null) { Debug.Assert(store != null); Debug.Assert(logSettings != null); @@ -64,15 +68,15 @@ public CacheSizeTracker(TsavoriteKV store, LogSettings lo var (mainLogTargetSizeBytes, readCacheTargetSizeBytes) = CalculateLogTargetSizeBytes(targetSize); - this.mainLogTracker = new LogSizeTracker(store.Log, logSizeCalculator, + this.mainLogTracker = new LogSizeTracker(store.Log, logSizeCalculator, mainLogTargetSizeBytes, mainLogTargetSizeBytes / deltaFraction, loggerFactory?.CreateLogger("ObjSizeTracker")); store.Log.SubscribeEvictions(mainLogTracker); - store.Log.SubscribeDeserializations(new LogOperationObserver(mainLogTracker, LogOperationType.Deserialize)); + store.Log.SubscribeDeserializations(new LogOperationObserver(mainLogTracker, LogOperationType.Deserialize)); store.Log.IsSizeBeyondLimit = () => mainLogTracker.IsSizeBeyondLimit; if (store.ReadCache != null) { - this.readCacheTracker = new LogSizeTracker(store.ReadCache, logSizeCalculator, + this.readCacheTracker = new LogSizeTracker(store.ReadCache, logSizeCalculator, readCacheTargetSizeBytes, readCacheTargetSizeBytes / deltaFraction, loggerFactory?.CreateLogger("ObjReadCacheSizeTracker")); store.ReadCache.SubscribeEvictions(readCacheTracker); } diff --git a/libs/server/StoreWrapper.cs b/libs/server/StoreWrapper.cs index a711e32c7d..a0e67899b6 100644 --- a/libs/server/StoreWrapper.cs +++ b/libs/server/StoreWrapper.cs @@ -15,6 +15,12 @@ namespace Garnet.server { + using MainStoreAllocator = SpanByteAllocator>; + using MainStoreFunctions = StoreFunctions; + + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + /// /// Wrapper for store and store-specific information /// @@ -28,12 +34,12 @@ public sealed class StoreWrapper /// /// Store /// - public readonly TsavoriteKV store; + public readonly TsavoriteKV store; /// /// Object store /// - public readonly TsavoriteKV objectStore; + public readonly TsavoriteKV objectStore; /// /// Server options @@ -99,8 +105,8 @@ public StoreWrapper( string version, string redisProtocolVersion, IGarnetServer server, - TsavoriteKV store, - TsavoriteKV objectStore, + TsavoriteKV store, + TsavoriteKV objectStore, CacheSizeTracker objectStoreSizeTracker, CustomCommandManager customCommandManager, TsavoriteLog appendOnlyFile, @@ -511,7 +517,7 @@ internal void Start() Task.Run(async () => await CompactionTask(serverOptions.CompactionFrequencySecs, ctsCommit.Token)); } - if (serverOptions.AdjustedIndexMaxSize > 0 || serverOptions.AdjustedObjectStoreIndexMaxSize > 0) + if (serverOptions.AdjustedIndexMaxCacheLines > 0 || serverOptions.AdjustedObjectStoreIndexMaxCacheLines > 0) { Task.Run(() => IndexAutoGrowTask(ctsCommit.Token)); } @@ -525,8 +531,8 @@ private async void IndexAutoGrowTask(CancellationToken token) { try { - bool indexMaxedOut = serverOptions.AdjustedIndexMaxSize == 0; - bool objectStoreIndexMaxedOut = serverOptions.AdjustedObjectStoreIndexMaxSize == 0; + bool indexMaxedOut = serverOptions.AdjustedIndexMaxCacheLines == 0; + bool objectStoreIndexMaxedOut = serverOptions.AdjustedObjectStoreIndexMaxCacheLines == 0; while (!indexMaxedOut || !objectStoreIndexMaxedOut) { if (token.IsCancellationRequested) break; @@ -534,11 +540,11 @@ private async void IndexAutoGrowTask(CancellationToken token) await Task.Delay(TimeSpan.FromSeconds(serverOptions.IndexResizeFrequencySecs), token); if (!indexMaxedOut) - indexMaxedOut = GrowIndexIfNeeded(StoreType.Main, serverOptions.AdjustedIndexMaxSize, store.OverflowBucketAllocations, + indexMaxedOut = GrowIndexIfNeeded(StoreType.Main, serverOptions.AdjustedIndexMaxCacheLines, store.OverflowBucketAllocations, () => store.IndexSize, () => store.GrowIndex()); if (!objectStoreIndexMaxedOut) - objectStoreIndexMaxedOut = GrowIndexIfNeeded(StoreType.Object, serverOptions.AdjustedObjectStoreIndexMaxSize, objectStore.OverflowBucketAllocations, + objectStoreIndexMaxedOut = GrowIndexIfNeeded(StoreType.Object, serverOptions.AdjustedObjectStoreIndexMaxCacheLines, objectStore.OverflowBucketAllocations, () => objectStore.IndexSize, () => objectStore.GrowIndex()); } } diff --git a/libs/server/Transaction/TransactionManager.cs b/libs/server/Transaction/TransactionManager.cs index cf9db7f5db..bce85f8592 100644 --- a/libs/server/Transaction/TransactionManager.cs +++ b/libs/server/Transaction/TransactionManager.cs @@ -10,8 +10,24 @@ namespace Garnet.server { - using BasicGarnetApi = GarnetApi, BasicContext>; - using LockableGarnetApi = GarnetApi, LockableContext>; + using BasicGarnetApi = GarnetApi, + SpanByteAllocator>>, + BasicContext>, + GenericAllocator>>>>; + using LockableGarnetApi = GarnetApi, + SpanByteAllocator>>, + LockableContext>, + GenericAllocator>>>>; + + using MainStoreAllocator = SpanByteAllocator>; + using MainStoreFunctions = StoreFunctions; + + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; /// /// Transaction manager @@ -21,22 +37,22 @@ public sealed unsafe partial class TransactionManager /// /// Basic context for main store /// - readonly BasicContext basicContext; + readonly BasicContext basicContext; /// /// Lockable context for main store /// - readonly LockableContext lockableContext; + readonly LockableContext lockableContext; /// /// Basic context for object store /// - readonly BasicContext objectStoreBasicContext; + readonly BasicContext objectStoreBasicContext; /// /// Lockable context for object store /// - readonly LockableContext objectStoreLockableContext; + readonly LockableContext objectStoreLockableContext; // Not readonly to avoid defensive copy GarnetWatchApi garnetTxPrepareApi; @@ -67,11 +83,11 @@ public sealed unsafe partial class TransactionManager StoreType transactionStoreType; readonly ILogger logger; - internal LockableContext LockableContext + internal LockableContext LockableContext => lockableContext; - internal LockableUnsafeContext LockableUnsafeContext + internal LockableUnsafeContext LockableUnsafeContext => basicContext.Session.LockableUnsafeContext; - internal LockableContext ObjectStoreLockableContext + internal LockableContext ObjectStoreLockableContext => objectStoreLockableContext; /// diff --git a/libs/server/Transaction/TxnKeyEntry.cs b/libs/server/Transaction/TxnKeyEntry.cs index b986360250..dd5316e5a7 100644 --- a/libs/server/Transaction/TxnKeyEntry.cs +++ b/libs/server/Transaction/TxnKeyEntry.cs @@ -8,6 +8,12 @@ namespace Garnet.server { + using MainStoreAllocator = SpanByteAllocator>; + using MainStoreFunctions = StoreFunctions; + + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + /// /// Entry for a key to lock and unlock in transactions /// @@ -55,7 +61,8 @@ internal sealed class TxnKeyEntries public int phase; - internal TxnKeyEntries(int initialCount, LockableContext lockableContext, LockableContext objectStoreLockableContext) + internal TxnKeyEntries(int initialCount, LockableContext lockableContext, + LockableContext objectStoreLockableContext) { keys = new TxnKeyEntry[initialCount]; // We sort a single array for speed, and the sessions use the same sorting logic, diff --git a/libs/server/Transaction/TxnKeyEntryComparer.cs b/libs/server/Transaction/TxnKeyEntryComparer.cs index 25145b9eb2..894298aed0 100644 --- a/libs/server/Transaction/TxnKeyEntryComparer.cs +++ b/libs/server/Transaction/TxnKeyEntryComparer.cs @@ -6,12 +6,19 @@ namespace Garnet.server { + using MainStoreAllocator = SpanByteAllocator>; + using MainStoreFunctions = StoreFunctions; + + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + internal sealed class TxnKeyEntryComparer : IComparer { - public LockableContext lockableContext; - public LockableContext objectStoreLockableContext; + public LockableContext lockableContext; + public LockableContext objectStoreLockableContext; - internal TxnKeyEntryComparer(LockableContext lockableContext, LockableContext objectStoreLockableContext) + internal TxnKeyEntryComparer(LockableContext lockableContext, + LockableContext objectStoreLockableContext) { this.lockableContext = lockableContext; this.objectStoreLockableContext = objectStoreLockableContext; diff --git a/libs/storage/Tsavorite/cs/Tsavorite.sln b/libs/storage/Tsavorite/cs/Tsavorite.sln index 8aa440d2d3..cc68559985 100644 --- a/libs/storage/Tsavorite/cs/Tsavorite.sln +++ b/libs/storage/Tsavorite/cs/Tsavorite.sln @@ -3,7 +3,7 @@ Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio Version 17 VisualStudioVersion = 17.2.32526.322 MinimumVisualStudioVersion = 10.0.40219.1 -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tsavorite.benchmark", "benchmark\Tsavorite.benchmark.csproj", "{33A732D1-2B58-4FEE-9696-B9483496229F}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "YCSB.benchmark", "benchmark\YCSB.benchmark\YCSB.benchmark.csproj", "{33A732D1-2B58-4FEE-9696-B9483496229F}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tsavorite.test", "test\Tsavorite.test.csproj", "{0DC7F5A2-E963-4E7F-BD37-6F7864B726F2}" EndProject @@ -21,6 +21,8 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "devices", "devices", "{A6B1 EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tsavorite.devices.AzureStorageDevice", "src\devices\AzureStorageDevice\Tsavorite.devices.AzureStorageDevice.csproj", "{E571E686-01A0-44D5-BFF5-B7678284258B}" EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "BDN-Tsavorite.benchmark", "benchmark\BDN-Tsavorite.Benchmark\BDN-Tsavorite.benchmark.csproj", "{1C3591CE-7521-4BE4-8B24-626AD92F7B19}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -61,6 +63,14 @@ Global {E571E686-01A0-44D5-BFF5-B7678284258B}.Release|Any CPU.Build.0 = Release|Any CPU {E571E686-01A0-44D5-BFF5-B7678284258B}.Release|x64.ActiveCfg = Release|x64 {E571E686-01A0-44D5-BFF5-B7678284258B}.Release|x64.Build.0 = Release|x64 + {1C3591CE-7521-4BE4-8B24-626AD92F7B19}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {1C3591CE-7521-4BE4-8B24-626AD92F7B19}.Debug|Any CPU.Build.0 = Debug|Any CPU + {1C3591CE-7521-4BE4-8B24-626AD92F7B19}.Debug|x64.ActiveCfg = Debug|Any CPU + {1C3591CE-7521-4BE4-8B24-626AD92F7B19}.Debug|x64.Build.0 = Debug|Any CPU + {1C3591CE-7521-4BE4-8B24-626AD92F7B19}.Release|Any CPU.ActiveCfg = Release|Any CPU + {1C3591CE-7521-4BE4-8B24-626AD92F7B19}.Release|Any CPU.Build.0 = Release|Any CPU + {1C3591CE-7521-4BE4-8B24-626AD92F7B19}.Release|x64.ActiveCfg = Release|Any CPU + {1C3591CE-7521-4BE4-8B24-626AD92F7B19}.Release|x64.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE @@ -72,6 +82,7 @@ Global {F947BC6A-2943-4AC7-ACA7-F17351E25FE7} = {9531E3D2-217B-4446-98E8-E48F0FDD1452} {A6B14415-D316-4955-BE5F-725BB2DEBEBE} = {28800357-C8CE-4CD0-A2AD-D4A910ABB496} {E571E686-01A0-44D5-BFF5-B7678284258B} = {A6B14415-D316-4955-BE5F-725BB2DEBEBE} + {1C3591CE-7521-4BE4-8B24-626AD92F7B19} = {CA6AB459-A31A-4C15-B1A6-A82C349B54B4} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {A0750637-2CCB-4139-B25E-F2CE740DCFAC} diff --git a/libs/storage/Tsavorite/cs/benchmark/BDN-Tsavorite.Benchmark/BDN-Tsavorite.benchmark.csproj b/libs/storage/Tsavorite/cs/benchmark/BDN-Tsavorite.Benchmark/BDN-Tsavorite.benchmark.csproj new file mode 100644 index 0000000000..84cce29a6b --- /dev/null +++ b/libs/storage/Tsavorite/cs/benchmark/BDN-Tsavorite.Benchmark/BDN-Tsavorite.benchmark.csproj @@ -0,0 +1,20 @@ + + + + Exe + enable + true + ../../../../../../Garnet.snk + false + + + + + + + + + + + + diff --git a/libs/storage/Tsavorite/cs/benchmark/BDN-Tsavorite.Benchmark/BenchmarkDotNetTestsApp.cs b/libs/storage/Tsavorite/cs/benchmark/BDN-Tsavorite.Benchmark/BenchmarkDotNetTestsApp.cs new file mode 100644 index 0000000000..85baa0ed05 --- /dev/null +++ b/libs/storage/Tsavorite/cs/benchmark/BDN-Tsavorite.Benchmark/BenchmarkDotNetTestsApp.cs @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using BenchmarkDotNet.Running; + +namespace BenchmarkDotNetTests +{ + public class BenchmarkDotNetTestsApp + { + public static string TestDirectory => Path.Combine(Path.GetDirectoryName(typeof(BenchmarkDotNetTestsApp).Assembly.Location), "Tests"); + + public static void Main(string[] args) + { + BenchmarkSwitcher.FromAssembly(typeof(BenchmarkDotNetTestsApp).Assembly).Run(args); + } + } +} \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/benchmark/BDN-Tsavorite.Benchmark/InliningTests.cs b/libs/storage/Tsavorite/cs/benchmark/BDN-Tsavorite.Benchmark/InliningTests.cs new file mode 100644 index 0000000000..9efa88bdb4 --- /dev/null +++ b/libs/storage/Tsavorite/cs/benchmark/BDN-Tsavorite.Benchmark/InliningTests.cs @@ -0,0 +1,144 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Configs; +using BenchmarkDotNet.Diagnostics.Windows.Configs; +using Tsavorite.core; + +#pragma warning disable 0649 // Field 'field' is never assigned to, and will always have its default value 'value'; happens due to [Params(..)] +#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member +#pragma warning disable IDE0048 // Add parentheses for clarity +#pragma warning disable IDE0130 // Namespace does not match folder structure + +namespace BenchmarkDotNetTests +{ +#pragma warning disable IDE0065 // Misplaced using directive + using SpanByteStoreFunctions = StoreFunctions; + + [InliningDiagnoser(logFailuresOnly: true, allowedNamespaces: ["Tsavorite.core"])] + [GroupBenchmarksBy(BenchmarkLogicalGroupRule.ByCategory, BenchmarkLogicalGroupRule.ByParams)] + public class InliningTests + { + [Params(1_000_000)] + public int NumRecords; + + TsavoriteKV> store; + IDevice logDevice; + string logDirectory; + + void SetupStore() + { + logDirectory = BenchmarkDotNetTestsApp.TestDirectory; + var logFilename = Path.Combine(logDirectory, $"{nameof(InliningTests)}_{Guid.NewGuid()}.log"); + logDevice = Devices.CreateLogDevice(logFilename, preallocateFile: true, deleteOnClose: true, useIoCompletionPort: true); + + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = logDevice + }, StoreFunctions.Create() + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); + } + + unsafe void PopulateStore() + { + using var session = store.NewSession>(new()); + var bContext = session.BasicContext; + + Span keyVec = stackalloc byte[sizeof(long)]; + var keySpanByte = SpanByte.FromPinnedSpan(keyVec); + + Span valueVec = stackalloc byte[sizeof(long)]; + var valueSpanByte = SpanByte.FromPinnedSpan(valueVec); + + for (long ii = 0; ii < NumRecords; ++ii) + { + *(long*)keySpanByte.ToPointer() = ii; + *(long*)valueSpanByte.ToPointer() = ii + NumRecords; + _ = bContext.Upsert(keySpanByte, valueSpanByte); + } + } + + [GlobalSetup] + public void SetupPopulatedStore() + { + SetupStore(); + PopulateStore(); + } + + [GlobalCleanup] + public void TearDown() + { + store?.Dispose(); + store = null; + logDevice?.Dispose(); + logDevice = null; + try + { + Directory.Delete(logDirectory); + } + catch { } + } + + [BenchmarkCategory("Upsert"), Benchmark] + public unsafe void Upsert() + { + using var session = store.NewSession>(new()); + var bContext = session.BasicContext; + + Span keyVec = stackalloc byte[sizeof(long)]; + var keySpanByte = SpanByte.FromPinnedSpan(keyVec); + + Span valueVec = stackalloc byte[sizeof(long)]; + var valueSpanByte = SpanByte.FromPinnedSpan(valueVec); + + for (long ii = 0; ii < NumRecords; ++ii) + { + *(long*)keySpanByte.ToPointer() = ii; + *(long*)valueSpanByte.ToPointer() = ii + NumRecords * 2; + _ = bContext.Upsert(keySpanByte, valueSpanByte); + } + } + + [BenchmarkCategory("RMW"), Benchmark] + public unsafe void RMW() + { + using var session = store.NewSession>(new()); + var bContext = session.BasicContext; + + Span keyVec = stackalloc byte[sizeof(long)]; + var keySpanByte = SpanByte.FromPinnedSpan(keyVec); + + Span inputVec = stackalloc byte[sizeof(long)]; + var inputSpanByte = SpanByte.FromPinnedSpan(inputVec); + + for (long ii = 0; ii < NumRecords; ++ii) + { + *(long*)keySpanByte.ToPointer() = ii; + *(long*)inputSpanByte.ToPointer() = ii + NumRecords * 3; + _ = bContext.RMW(keySpanByte, inputSpanByte); + } + + _ = bContext.CompletePending(); + } + + [BenchmarkCategory("Read"), Benchmark] + public unsafe void Read() + { + using var session = store.NewSession>(new()); + var bContext = session.BasicContext; + + Span keyVec = stackalloc byte[sizeof(long)]; + var keySpanByte = SpanByte.FromPinnedSpan(keyVec); + + for (long ii = 0; ii < NumRecords; ++ii) + { + *(long*)keySpanByte.ToPointer() = ii; + _ = bContext.Read(keySpanByte); + } + _ = bContext.CompletePending(); + } + } +} \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/benchmark/Key.cs b/libs/storage/Tsavorite/cs/benchmark/Key.cs deleted file mode 100644 index 895635573c..0000000000 --- a/libs/storage/Tsavorite/cs/benchmark/Key.cs +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT license. - -using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; -using Tsavorite.core; - -namespace Tsavorite.benchmark -{ - [StructLayout(LayoutKind.Explicit, Size = 8)] - public struct Key : ITsavoriteEqualityComparer - { - [FieldOffset(0)] - public long value; - - - public override string ToString() - { - return "{ " + value + " }"; - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public long GetHashCode64(ref Key k) - { - return Utility.GetHashCode(k.value); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public bool Equals(ref Key k1, ref Key k2) - { - return k1.value == k2.value; - } - - } -} \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/benchmark/ConcurrentDictionaryBenchmark.cs b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/ConcurrentDictionaryBenchmark.cs similarity index 100% rename from libs/storage/Tsavorite/cs/benchmark/ConcurrentDictionaryBenchmark.cs rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/ConcurrentDictionaryBenchmark.cs diff --git a/libs/storage/Tsavorite/cs/benchmark/FixedLenYcsbBenchmark.cs b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/FixedLenYcsbBenchmark.cs similarity index 86% rename from libs/storage/Tsavorite/cs/benchmark/FixedLenYcsbBenchmark.cs rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/FixedLenYcsbBenchmark.cs index 40d4a0662f..c74ad0335f 100644 --- a/libs/storage/Tsavorite/cs/benchmark/FixedLenYcsbBenchmark.cs +++ b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/FixedLenYcsbBenchmark.cs @@ -7,8 +7,13 @@ using System.Threading; using Tsavorite.core; +#pragma warning disable IDE0007 // Use implicit type + namespace Tsavorite.benchmark { +#pragma warning disable IDE0065 // Misplaced using directive + using StructStoreFunctions = StoreFunctions>; + internal class Tsavorite_YcsbBenchmark { // Ensure sizes are aligned to chunk sizes @@ -26,7 +31,7 @@ internal class Tsavorite_YcsbBenchmark readonly Key[] txn_keys_; readonly IDevice device; - readonly TsavoriteKV store; + readonly TsavoriteKV> store; long idx_ = 0; long total_ops_done = 0; @@ -76,14 +81,27 @@ internal Tsavorite_YcsbBenchmark(Key[] i_keys_, Key[] t_keys_, TestLoader testLo if (testLoader.Options.ThreadCount >= 16) device.ThrottleLimit = testLoader.Options.ThreadCount * 12; + var kvSettings = new KVSettings() + { + IndexSize = testLoader.GetHashTableSize(), + LogDevice = device, + PreallocateLog = true, + MemorySize = 1L << 34, + RevivificationSettings = revivificationSettings, + CheckpointDir = testLoader.BackupPath + }; + if (testLoader.Options.UseSmallMemoryLog) - store = new TsavoriteKV - (testLoader.GetHashTableSize(), new LogSettings { LogDevice = device, PreallocateLog = true, PageSizeBits = 25, SegmentSizeBits = 30, MemorySizeBits = 28 }, - new CheckpointSettings { CheckpointDir = testLoader.BackupPath }, revivificationSettings: revivificationSettings); - else - store = new TsavoriteKV - (testLoader.GetHashTableSize(), new LogSettings { LogDevice = device, PreallocateLog = true }, - new CheckpointSettings { CheckpointDir = testLoader.BackupPath }, revivificationSettings: revivificationSettings); + { + kvSettings.PageSize = 1L << 25; + kvSettings.SegmentSize = 1L << 30; + kvSettings.MemorySize = 1L << 28; + } + + store = new(kvSettings + , StoreFunctions.Create(new Key.Comparer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } internal void Dispose() @@ -136,34 +154,34 @@ private void RunYcsbUnsafeContext(int thread_idx) if (idx % 512 == 0) { uContext.Refresh(); - uContext.CompletePending(false); + _ = uContext.CompletePending(false); } int r = (int)rng.Generate(100); // rng.Next() is not inclusive of the upper bound so this will be <= 99 if (r < readPercent) { - uContext.Read(ref txn_keys_[idx], ref input, ref output, Empty.Default); + _ = uContext.Read(ref txn_keys_[idx], ref input, ref output, Empty.Default); ++reads_done; continue; } if (r < upsertPercent) { - uContext.Upsert(ref txn_keys_[idx], ref value, Empty.Default); + _ = uContext.Upsert(ref txn_keys_[idx], ref value, Empty.Default); ++writes_done; continue; } if (r < rmwPercent) { - uContext.RMW(ref txn_keys_[idx], ref input_[idx & 0x7], Empty.Default); + _ = uContext.RMW(ref txn_keys_[idx], ref input_[idx & 0x7], Empty.Default); ++writes_done; continue; } - uContext.Delete(ref txn_keys_[idx], Empty.Default); + _ = uContext.Delete(ref txn_keys_[idx], Empty.Default); ++deletes_done; } } - uContext.CompletePending(true); + _ = uContext.CompletePending(true); } finally { @@ -173,7 +191,7 @@ private void RunYcsbUnsafeContext(int thread_idx) sw.Stop(); Console.WriteLine($"Thread {thread_idx} done; {reads_done} reads, {writes_done} writes, {deletes_done} deletes in {sw.ElapsedMilliseconds} ms."); - Interlocked.Add(ref total_ops_done, reads_done + writes_done + deletes_done); + _ = Interlocked.Add(ref total_ops_done, reads_done + writes_done + deletes_done); } private void RunYcsbSafeContext(int thread_idx) @@ -215,38 +233,38 @@ private void RunYcsbSafeContext(int thread_idx) for (long idx = chunk_idx; idx < chunk_idx + YcsbConstants.kChunkSize && !done; ++idx) { if (idx % 512 == 0) - bContext.CompletePending(false); + _ = bContext.CompletePending(false); int r = (int)rng.Generate(100); // rng.Next() is not inclusive of the upper bound so this will be <= 99 if (r < readPercent) { - bContext.Read(ref txn_keys_[idx], ref input, ref output, Empty.Default); + _ = bContext.Read(ref txn_keys_[idx], ref input, ref output, Empty.Default); ++reads_done; continue; } if (r < upsertPercent) { - bContext.Upsert(ref txn_keys_[idx], ref value, Empty.Default); + _ = bContext.Upsert(ref txn_keys_[idx], ref value, Empty.Default); ++writes_done; continue; } if (r < rmwPercent) { - bContext.RMW(ref txn_keys_[idx], ref input_[idx & 0x7], Empty.Default); + _ = bContext.RMW(ref txn_keys_[idx], ref input_[idx & 0x7], Empty.Default); ++writes_done; continue; } - bContext.Delete(ref txn_keys_[idx], Empty.Default); + _ = bContext.Delete(ref txn_keys_[idx], Empty.Default); ++deletes_done; } } - bContext.CompletePending(true); + _ = bContext.CompletePending(true); sw.Stop(); Console.WriteLine($"Thread {thread_idx} done; {reads_done} reads, {writes_done} writes, {deletes_done} deletes in {sw.ElapsedMilliseconds} ms."); - Interlocked.Add(ref total_ops_done, reads_done + writes_done); + _ = Interlocked.Add(ref total_ops_done, reads_done + writes_done); } internal unsafe (double insPerSec, double opsPerSec, long tailAddress) Run(TestLoader testLoader) @@ -388,17 +406,14 @@ private void SetupYcsbUnsafeContext(int thread_idx) if (idx % 256 == 0) { uContext.Refresh(); - if (idx % 65536 == 0) - { - uContext.CompletePending(false); - } + _ = uContext.CompletePending(false); } - uContext.Upsert(ref init_keys_[idx], ref value, Empty.Default); + _ = uContext.Upsert(ref init_keys_[idx], ref value, Empty.Default); } } - uContext.CompletePending(true); + _ = uContext.CompletePending(true); } finally { @@ -432,18 +447,15 @@ private void SetupYcsbSafeContext(int thread_idx) if (idx % 256 == 0) { bContext.Refresh(); - if (idx % 65536 == 0) - { - bContext.CompletePending(false); - } + _ = bContext.CompletePending(false); } - bContext.Upsert(ref init_keys_[idx], ref value, Empty.Default); + _ = bContext.Upsert(ref init_keys_[idx], ref value, Empty.Default); } } - bContext.CompletePending(true); + _ = bContext.CompletePending(true); } #region Load Data diff --git a/libs/storage/Tsavorite/cs/benchmark/Input.cs b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/Input.cs similarity index 100% rename from libs/storage/Tsavorite/cs/benchmark/Input.cs rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/Input.cs diff --git a/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/Key.cs b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/Key.cs new file mode 100644 index 0000000000..708026612c --- /dev/null +++ b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/Key.cs @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using Tsavorite.core; + +namespace Tsavorite.benchmark +{ + [StructLayout(LayoutKind.Explicit, Size = 8)] + public struct Key + { + [FieldOffset(0)] + public long value; + + public override string ToString() => "{ " + value + " }"; + + public struct Comparer : IKeyComparer + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly long GetHashCode64(ref Key key) => Utility.GetHashCode(key.value); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public bool Equals(ref Key key1, ref Key key2) => key1.value == key2.value; + } + } +} \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/benchmark/KeySpanByte.cs b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/KeySpanByte.cs similarity index 100% rename from libs/storage/Tsavorite/cs/benchmark/KeySpanByte.cs rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/KeySpanByte.cs diff --git a/libs/storage/Tsavorite/cs/benchmark/Options.cs b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/Options.cs similarity index 100% rename from libs/storage/Tsavorite/cs/benchmark/Options.cs rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/Options.cs diff --git a/libs/storage/Tsavorite/cs/benchmark/Output.cs b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/Output.cs similarity index 100% rename from libs/storage/Tsavorite/cs/benchmark/Output.cs rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/Output.cs diff --git a/libs/storage/Tsavorite/cs/benchmark/Program.cs b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/Program.cs similarity index 100% rename from libs/storage/Tsavorite/cs/benchmark/Program.cs rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/Program.cs diff --git a/libs/storage/Tsavorite/cs/benchmark/RandomGenerator.cs b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/RandomGenerator.cs similarity index 100% rename from libs/storage/Tsavorite/cs/benchmark/RandomGenerator.cs rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/RandomGenerator.cs diff --git a/libs/storage/Tsavorite/cs/benchmark/SessionFunctions.cs b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/SessionFunctions.cs similarity index 84% rename from libs/storage/Tsavorite/cs/benchmark/SessionFunctions.cs rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/SessionFunctions.cs index dab6881dc2..cc4991ce9f 100644 --- a/libs/storage/Tsavorite/cs/benchmark/SessionFunctions.cs +++ b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/SessionFunctions.cs @@ -87,13 +87,6 @@ public void PostSingleDeleter(ref Key key, ref DeleteInfo deleteInfo) { } public void PostSingleWriter(ref Key key, ref Input input, ref Value src, ref Value dst, ref Output output, ref UpsertInfo upsertInfo, WriteReason reason) { } - public void DisposeSingleWriter(ref Key key, ref Input input, ref Value src, ref Value dst, ref Output output, ref UpsertInfo upsertInfo, WriteReason reason) { } - public void DisposeCopyUpdater(ref Key key, ref Input input, ref Value oldValue, ref Value newValue, ref Output output, ref RMWInfo rmwInfo) { } - public void DisposeInitialUpdater(ref Key key, ref Input input, ref Value value, ref Output output, ref RMWInfo rmwInfo) { } - public void DisposeSingleDeleter(ref Key key, ref Value value, ref DeleteInfo deleteInfo) { } - public void DisposeDeserializedFromDisk(ref Key key, ref Value value) { } - public void DisposeForRevivification(ref Key key, ref Value value, int newKeySize) { } - public void ConvertOutputToHeap(ref Input input, ref Output output) { } } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/benchmark/SessionSpanByteFunctions.cs b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/SessionSpanByteFunctions.cs similarity index 100% rename from libs/storage/Tsavorite/cs/benchmark/SessionSpanByteFunctions.cs rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/SessionSpanByteFunctions.cs diff --git a/libs/storage/Tsavorite/cs/benchmark/SpanByteYcsbBenchmark.cs b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/SpanByteYcsbBenchmark.cs similarity index 94% rename from libs/storage/Tsavorite/cs/benchmark/SpanByteYcsbBenchmark.cs rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/SpanByteYcsbBenchmark.cs index f56a423f81..1064a611c7 100644 --- a/libs/storage/Tsavorite/cs/benchmark/SpanByteYcsbBenchmark.cs +++ b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/SpanByteYcsbBenchmark.cs @@ -7,8 +7,13 @@ using System.Threading; using Tsavorite.core; +#pragma warning disable IDE0007 // Use implicit type + namespace Tsavorite.benchmark { +#pragma warning disable IDE0065 // Misplaced using directive + using SpanByteStoreFunctions = StoreFunctions; + internal class SpanByteYcsbBenchmark { // Ensure sizes are aligned to chunk sizes @@ -26,7 +31,7 @@ internal class SpanByteYcsbBenchmark readonly KeySpanByte[] txn_keys_; readonly IDevice device; - readonly TsavoriteKV store; + readonly TsavoriteKV> store; long idx_ = 0; long total_ops_done = 0; @@ -85,14 +90,27 @@ internal SpanByteYcsbBenchmark(KeySpanByte[] i_keys_, KeySpanByte[] t_keys_, Tes device = Devices.CreateLogDevice(TestLoader.DevicePath, preallocateFile: true, deleteOnClose: !testLoader.RecoverMode, useIoCompletionPort: true); + var kvSettings = new KVSettings() + { + IndexSize = testLoader.GetHashTableSize(), + LogDevice = device, + PreallocateLog = true, + MemorySize = 1L << 35, + RevivificationSettings = revivificationSettings, + CheckpointDir = testLoader.BackupPath + }; + if (testLoader.Options.UseSmallMemoryLog) - store = new TsavoriteKV - (testLoader.GetHashTableSize(), new LogSettings { LogDevice = device, PreallocateLog = true, PageSizeBits = 22, SegmentSizeBits = 26, MemorySizeBits = 26 }, - new CheckpointSettings { CheckpointDir = testLoader.BackupPath }, revivificationSettings: revivificationSettings); - else - store = new TsavoriteKV - (testLoader.GetHashTableSize(), new LogSettings { LogDevice = device, PreallocateLog = true, MemorySizeBits = 35 }, - new CheckpointSettings { CheckpointDir = testLoader.BackupPath }, revivificationSettings: revivificationSettings); + { + kvSettings.PageSize = 1L << 22; + kvSettings.SegmentSize = 1L << 26; + kvSettings.MemorySize = 1L << 26; + } + + store = new(kvSettings + , StoreFunctions.Create() + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } internal void Dispose() diff --git a/libs/storage/Tsavorite/cs/benchmark/TestLoader.cs b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/TestLoader.cs similarity index 89% rename from libs/storage/Tsavorite/cs/benchmark/TestLoader.cs rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/TestLoader.cs index de13cf694d..166c338d02 100644 --- a/libs/storage/Tsavorite/cs/benchmark/TestLoader.cs +++ b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/TestLoader.cs @@ -90,7 +90,7 @@ static bool verifyOption(bool isValid, string name, string info = null) error = false; } - internal long GetHashTableSize() => (long)(MaxKey / Options.HashPacking); + internal long GetHashTableSize() => (long)(MaxKey / Options.HashPacking) << 6; // << 6 for consistency with pre-StoreFunctions (because it will be converted to cache lines) internal void LoadData() { @@ -167,12 +167,12 @@ private unsafe void LoadDataFromFile(string filePath, string d Console.WriteLine($"loading subset of keys and txns from {txn_filename} into memory..."); using FileStream stream = File.Open(txn_filename, FileMode.Open, FileAccess.Read, FileShare.Read); - var initValueSet = new HashSet(init_keys.Length); + var initKeySet = new HashSet(init_keys.Length); + long[] initKeyArray = null; - long init_count = 0; long txn_count = 0; - long offset = 0; + RandomGenerator rng = new((uint)Options.RandomSeed); byte[] chunk = new byte[YcsbConstants.kFileChunkSize]; fixed (byte* chunk_ptr = chunk) @@ -183,25 +183,27 @@ private unsafe void LoadDataFromFile(string filePath, string d int size = stream.Read(chunk, 0, YcsbConstants.kFileChunkSize); for (int idx = 0; idx < size && txn_count < txn_keys.Length; idx += 8) { - var value = *(long*)(chunk_ptr + idx); - if (!initValueSet.Contains(value)) + var key = *(long*)(chunk_ptr + idx); + if (!initKeySet.Contains(key)) { - if (init_count >= init_keys.Length) + if (initKeySet.Count >= init_keys.Length) { + // Zipf txn has a high hit rate in the init array, so we'll fill up the small-txn count by just skipping out-of-range txn keys. if (distribution == YcsbConstants.ZipfDist) continue; - // Uniform distribution at current small-data counts is about a 1% hit rate, which is too slow here, so just modulo. - value %= init_keys.Length; + // Uniform txn at current small-data counts has about a 1% hit rate in the init array, too low to fill the small-txn count, + // so convert the init_key set to an array for random indexing get a random key from init_keys. + initKeyArray ??= initKeySet.ToArray(); + key = initKeyArray[rng.Generate((uint)initKeySet.Count)]; } else { - initValueSet.Add(value); - keySetter.Set(init_keys, init_count, value); - ++init_count; + keySetter.Set(init_keys, initKeySet.Count, key); + _ = initKeySet.Add(key); } } - keySetter.Set(txn_keys, txn_count, value); + keySetter.Set(txn_keys, txn_count, key); ++txn_count; } if (size == YcsbConstants.kFileChunkSize) @@ -216,8 +218,8 @@ private unsafe void LoadDataFromFile(string filePath, string d sw.Stop(); - if (init_count != init_keys.Length) - throw new InvalidDataException($"Init file subset load fail! Expected {init_keys.Length} keys; found {init_count}"); + if (initKeySet.Count != init_keys.Length) + throw new InvalidDataException($"Init file subset load fail! Expected {init_keys.Length} keys; found {initKeySet.Count}"); if (txn_count != txn_keys.Length) throw new InvalidDataException($"Txn file subset load fail! Expected {txn_keys.Length} keys; found {txn_count}"); @@ -338,7 +340,9 @@ private static void LoadSyntheticData(string distribution, uin internal string BackupPath => $"{DataPath}/{Distribution}_{(Options.UseSyntheticData ? "synthetic" : "ycsb")}_{(Options.UseSmallData ? "2.5M_10M" : "250M_1000M")}"; - internal bool MaybeRecoverStore(TsavoriteKV store) + internal bool MaybeRecoverStore(TsavoriteKV store) + where SF : IStoreFunctions + where A : IAllocator { // Recover database for fast benchmark repeat runs. if (RecoverMode) @@ -367,7 +371,9 @@ internal bool MaybeRecoverStore(TsavoriteKV store) return false; } - internal void MaybeCheckpointStore(TsavoriteKV store) + internal void MaybeCheckpointStore(TsavoriteKV store) + where SF : IStoreFunctions + where A : IAllocator { // Checkpoint database for fast benchmark repeat runs. if (RecoverMode) diff --git a/libs/storage/Tsavorite/cs/benchmark/TestStats.cs b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/TestStats.cs similarity index 100% rename from libs/storage/Tsavorite/cs/benchmark/TestStats.cs rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/TestStats.cs diff --git a/libs/storage/Tsavorite/cs/benchmark/Value.cs b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/Value.cs similarity index 100% rename from libs/storage/Tsavorite/cs/benchmark/Value.cs rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/Value.cs diff --git a/libs/storage/Tsavorite/cs/benchmark/Tsavorite.benchmark.csproj b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/YCSB.benchmark.csproj similarity index 65% rename from libs/storage/Tsavorite/cs/benchmark/Tsavorite.benchmark.csproj rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/YCSB.benchmark.csproj index 44739e64c3..b7c72d0a07 100644 --- a/libs/storage/Tsavorite/cs/benchmark/Tsavorite.benchmark.csproj +++ b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/YCSB.benchmark.csproj @@ -2,7 +2,7 @@ Exe - ../../../../../Garnet.snk + ../../../../../../Garnet.snk false true @@ -12,6 +12,6 @@ - + \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/benchmark/YcsbConstants.cs b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/YcsbConstants.cs similarity index 100% rename from libs/storage/Tsavorite/cs/benchmark/YcsbConstants.cs rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/YcsbConstants.cs diff --git a/libs/storage/Tsavorite/cs/benchmark/ZipfGenerator.cs b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/ZipfGenerator.cs similarity index 100% rename from libs/storage/Tsavorite/cs/benchmark/ZipfGenerator.cs rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/ZipfGenerator.cs diff --git a/libs/storage/Tsavorite/cs/benchmark/scripts/compare_runs.ps1 b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/scripts/compare_runs.ps1 similarity index 100% rename from libs/storage/Tsavorite/cs/benchmark/scripts/compare_runs.ps1 rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/scripts/compare_runs.ps1 diff --git a/libs/storage/Tsavorite/cs/benchmark/scripts/run_benchmark.ps1 b/libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/scripts/run_benchmark.ps1 similarity index 100% rename from libs/storage/Tsavorite/cs/benchmark/scripts/run_benchmark.ps1 rename to libs/storage/Tsavorite/cs/benchmark/YCSB.benchmark/scripts/run_benchmark.ps1 diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/AllocatorBase.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/AllocatorBase.cs index 98cd1ddbc3..e9e1f5ee03 100644 --- a/libs/storage/Tsavorite/cs/src/core/Allocator/AllocatorBase.cs +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/AllocatorBase.cs @@ -5,139 +5,89 @@ using System.Diagnostics; using System.IO; using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; namespace Tsavorite.core { - internal enum PMMFlushStatus : int { Flushed, InProgress }; - - internal enum PMMCloseStatus : int { Closed, Open }; - - [StructLayout(LayoutKind.Explicit)] - internal struct FullPageStatus - { - [FieldOffset(0)] - public long LastFlushedUntilAddress; - [FieldOffset(8)] - public long Dirty; - } - - [StructLayout(LayoutKind.Explicit)] - internal struct PageOffset - { - [FieldOffset(0)] - public int Offset; - [FieldOffset(4)] - public int Page; - [FieldOffset(0)] - public long PageAndOffset; - } - /// - /// Base class for hybrid log memory allocator + /// Base class for hybrid log memory allocator. Contains utility methods, some of which are not performance-critical so can be virtual. /// - /// - /// - internal abstract partial class AllocatorBase : IDisposable + public abstract partial class AllocatorBase : IDisposable + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - /// - /// Epoch information - /// + /// The epoch we are operating with protected readonly LightEpoch epoch; + /// Whether we own (and thus must dispose) private readonly bool ownedEpoch; - /// - /// Comparer - /// - protected readonly ITsavoriteEqualityComparer comparer; + /// The store functions for this instance of TsavoriteKV + internal readonly TStoreFunctions _storeFunctions; + + /// The fully-derived allocator struct wrapper (so calls on it are inlined rather than virtual) for this log. + internal readonly TAllocator _wrapper; #region Protected size definitions - /// - /// Buffer size - /// + /// Buffer size internal readonly int BufferSize; - /// - /// Log page size - /// + + /// Log page size internal readonly int LogPageSizeBits; - /// - /// Page size - /// + /// Page size internal readonly int PageSize; - /// - /// Page size mask - /// + + /// Page size mask internal readonly int PageSizeMask; - /// - /// Buffer size mask - /// + + /// Buffer size mask protected readonly int BufferSizeMask; - /// - /// Aligned page size in bytes - /// + + /// Aligned page size in bytes protected readonly int AlignedPageSizeBytes; - /// - /// Total hybrid log size (bits) - /// + /// Total hybrid log size (bits) protected readonly int LogTotalSizeBits; - /// - /// Total hybrid log size (bytes) - /// + + /// Total hybrid log size (bytes) protected readonly long LogTotalSizeBytes; - /// - /// Segment size in bits - /// + /// Segment size in bits protected readonly int LogSegmentSizeBits; - /// - /// Segment size - /// + + /// Segment size protected readonly long SegmentSize; - /// - /// Segment buffer size - /// + + /// Segment buffer size protected readonly int SegmentBufferSize; - /// - /// How many pages do we leave empty in the in-memory buffer (between 0 and BufferSize-1) - /// + /// How many pages do we leave empty in the in-memory buffer (between 0 and BufferSize-1) private int emptyPageCount; - /// - /// HeadOFfset lag address - /// + /// HeadOFfset lag address internal long HeadOffsetLagAddress; /// - /// Number of or instances active. + /// Number of or + /// instances active. /// internal long NumActiveLockingSessions = 0; - /// - /// Log mutable fraction - /// + /// Log mutable fraction protected readonly double LogMutableFraction; - /// - /// ReadOnlyOffset lag (from tail) - /// + + /// ReadOnlyOffset lag (from tail) protected long ReadOnlyLagAddress; #endregion #region Public addresses - /// - /// Read-only address - /// + /// The maximum address of the immutable in-memory log region public long ReadOnlyAddress; - /// - /// Safe read-only address - /// + /// Safe read-only address public long SafeReadOnlyAddress; /// @@ -152,9 +102,7 @@ internal abstract partial class AllocatorBase : IDisposable /// public long SafeHeadAddress; - /// - /// Flushed until address - /// + /// Flushed until address public long FlushedUntilAddress; /// @@ -163,14 +111,10 @@ internal abstract partial class AllocatorBase : IDisposable /// public long ClosedUntilAddress; - /// - /// The lowest valid address in the log - /// + /// The lowest valid address in the log public long BeginAddress; - /// - /// The lowest valid address on disk - updated when truncating log - /// + /// The lowest valid address on disk - updated when truncating log public long PersistedBeginAddress; /// @@ -182,17 +126,13 @@ internal abstract partial class AllocatorBase : IDisposable /// public override string ToString() => $"TA {GetTailAddress()}, ROA {ReadOnlyAddress}, SafeROA {SafeReadOnlyAddress}, HA {HeadAddress}, SafeHA {SafeHeadAddress}, CUA {ClosedUntilAddress}, FUA {FlushedUntilAddress}, BA {BeginAddress}"; - #endregion #region Protected device info - /// - /// Device - /// + /// Log Device protected readonly IDevice device; - /// - /// Sector size - /// + + /// Sector size protected readonly int sectorSize; #endregion @@ -202,224 +142,57 @@ public override string ToString() internal readonly FullPageStatus[] PageStatusIndicator; internal readonly PendingFlushList[] PendingFlush; - /// - /// Global address of the current tail (next element to be allocated from the circular buffer) - /// + /// Global address of the current tail (next element to be allocated from the circular buffer) private PageOffset TailPageOffset; - /// - /// Whether log is disposed - /// + /// Whether log is disposed private bool disposed = false; - /// - /// Whether device is a null device - /// + /// Whether device is a null device internal readonly bool IsNullDevice; #endregion - /// - /// Buffer pool - /// + #region Contained classes and related + /// Buffer pool internal SectorAlignedBufferPool bufferPool; - /// - /// Read cache - /// - protected readonly bool ReadCache = false; + /// This hlog is an instance of a Read cache + protected readonly bool IsReadCache = false; - /// - /// Read cache eviction callback - /// + /// Read cache eviction callback protected readonly Action EvictCallback = null; - /// - /// Flush callback - /// + /// Flush callback protected readonly Action FlushCallback = null; - /// - /// Whether to preallocate log on initialization - /// + /// Whether to preallocate log on initialization private readonly bool PreallocateLog = false; - /// - /// Error handling - /// + /// Error handling private readonly ErrorList errorList = new(); - /// - /// Observer for records entering read-only region - /// + /// Observer for records entering read-only region internal IObserver> OnReadOnlyObserver; - /// - /// Observer for records getting evicted from memory (page closed) - /// + /// Observer for records getting evicted from memory (page closed) internal IObserver> OnEvictionObserver; - /// - /// Observer for records brought into memory by deserializing pages - /// + /// Observer for records brought into memory by deserializing pages internal IObserver> OnDeserializationObserver; - /// - /// The "event" to be waited on for flush completion by the initiator of an operation - /// + /// The "event" to be waited on for flush completion by the initiator of an operation internal CompletionEvent FlushEvent; + /// If set, this is a function to call to determine whether the object size tracker reports maximum memory size has been exceeded. public Func IsSizeBeyondLimit; + #endregion - #region Abstract methods - /// - /// Initialize - /// + #region Abstract and virtual methods + /// Initialize fully derived allocator public abstract void Initialize(); - /// - /// Get start logical address - /// - /// - /// - public abstract long GetStartLogicalAddress(long page); - /// - /// Get first valid logical address - /// - /// - /// - public abstract long GetFirstValidLogicalAddress(long page); - /// - /// Get physical address - /// - /// - /// - public abstract long GetPhysicalAddress(long newLogicalAddress); - /// - /// Get address info - /// - /// - /// - public abstract ref RecordInfo GetInfo(long physicalAddress); - - /// - /// Get info from byte pointer - /// - /// - /// - public abstract unsafe ref RecordInfo GetInfoFromBytePointer(byte* ptr); - - /// - /// Get key - /// - /// - /// - public abstract ref Key GetKey(long physicalAddress); - /// - /// Get value - /// - /// - /// - public abstract ref Value GetValue(long physicalAddress); - /// - /// Get value from address range. For this will also initialize the value. - /// - /// - /// - /// - public virtual ref Value GetAndInitializeValue(long physicalAddress, long endPhysicalAddress) => ref GetValue(physicalAddress); - /// - /// Get address info for key - /// - /// - /// - public abstract unsafe AddressInfo* GetKeyAddressInfo(long physicalAddress); - /// - /// Get address info for value - /// - /// - /// - public abstract unsafe AddressInfo* GetValueAddressInfo(long physicalAddress); - - /// - /// Get record size - /// - /// - /// - public abstract (int actualSize, int allocatedSize) GetRecordSize(long physicalAddress); - - /// - /// Get copy destination size for RMW, taking Input into account - /// - /// - public abstract (int actualSize, int allocatedSize, int keySize) GetRMWCopyDestinationRecordSize(ref Key key, ref Input input, ref Value value, ref RecordInfo recordInfo, TVariableLengthInput varlenInput) - where TVariableLengthInput : IVariableLengthInput; - - /// - /// Get number of bytes required - /// - /// - /// - /// - public virtual int GetRequiredRecordSize(long physicalAddress, int availableBytes) => GetAverageRecordSize(); - - /// - /// Get average record size - /// - /// - public abstract int GetAverageRecordSize(); - - /// - /// Get size of fixed (known) part of record on the main log - /// - /// - public abstract int GetFixedRecordSize(); - - /// - /// Get initial record size - /// - /// - /// - /// - /// - public abstract (int actualSize, int allocatedSize, int keySize) GetRMWInitialRecordSize(ref Key key, ref Input input, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : IVariableLengthInput; - - /// - /// Get record size - /// - /// - /// - /// - public abstract (int actualSize, int allocatedSize, int keySize) GetRecordSize(ref Key key, ref Value value); - - /// - /// Get value size - /// - /// - public abstract int GetValueLength(ref Value value); - - /// - /// Allocate page - /// - /// - internal abstract void AllocatePage(int index); - /// - /// Whether page is allocated - /// - /// - /// - internal abstract bool IsAllocated(int pageIndex); - /// - /// Populate page - /// - /// - /// - /// - internal abstract unsafe void PopulatePage(byte* src, int required_bytes, long destinationPage); - /// - /// Write async to device - /// + /// Write async to device /// /// /// @@ -432,24 +205,16 @@ public abstract (int actualSize, int allocatedSize, int keySize) GetRMWInitialRe /// Start address of fuzzy region, which contains old and new version records (we use this to selectively flush only old-version records during snapshot checkpoint) protected abstract void WriteAsyncToDevice(long startPage, long flushPage, int pageSize, DeviceIOCompletionCallback callback, PageAsyncFlushResult result, IDevice device, IDevice objectLogDevice, long[] localSegmentOffsets, long fuzzyStartLogicalAddress); - private protected void VerifyCompatibleSectorSize(IDevice device) - { - if (sectorSize % device.SectorSize != 0) - throw new TsavoriteException($"Allocator with sector size {sectorSize} cannot flush to device with sector size {device.SectorSize}"); - } + /// Read objects to memory (async) + protected abstract unsafe void AsyncReadRecordObjectsToMemory(long fromLogical, int numBytes, DeviceIOCompletionCallback callback, AsyncIOContext context, SectorAlignedMemory result = default); - internal long GetReadOnlyLagAddress() => ReadOnlyLagAddress; + /// Read page from device (async) + protected abstract void ReadAsync(ulong alignedSourceAddress, int destinationPageIndex, uint aligned_read_length, DeviceIOCompletionCallback callback, PageAsyncReadResult asyncResult, IDevice device, IDevice objlogDevice); - /// - /// Delta flush - /// - /// - /// - /// - /// - /// - /// - /// + /// Write page to device (async) + protected abstract void WriteAsync(long flushPage, DeviceIOCompletionCallback callback, PageAsyncFlushResult asyncResult); + + /// Flush checkpoint Delta to the Device internal virtual unsafe void AsyncFlushDeltaToDevice(long startAddress, long endAddress, long prevEndAddress, long version, DeltaLog deltaLog, out SemaphoreSlim completedSemaphore, int throttleCheckpointFlushDelayMs) { logger?.LogTrace("Starting async delta log flush with throttling {throttlingEnabled}", throttleCheckpointFlushDelayMs >= 0 ? $"enabled ({throttleCheckpointFlushDelayMs}ms)" : "disabled"); @@ -458,7 +223,7 @@ internal virtual unsafe void AsyncFlushDeltaToDevice(long startAddress, long end completedSemaphore = _completedSemaphore; if (throttleCheckpointFlushDelayMs >= 0) - Task.Run(FlushRunner); + _ = Task.Run(FlushRunner); else FlushRunner(); @@ -466,7 +231,7 @@ void FlushRunner() { long startPage = GetPage(startAddress); long endPage = GetPage(endAddress); - if (endAddress > GetStartLogicalAddress(endPage)) + if (endAddress > _wrapper.GetStartLogicalAddress(endPage)) endPage++; long prevEndPage = GetPage(prevEndAddress); @@ -495,7 +260,7 @@ void FlushRunner() continue; var logicalAddress = p << LogPageSizeBits; - var physicalAddress = GetPhysicalAddress(logicalAddress); + var physicalAddress = _wrapper.GetPhysicalAddress(logicalAddress); var endLogicalAddress = logicalAddress + PageSize; if (endAddress < endLogicalAddress) endLogicalAddress = endAddress; @@ -510,8 +275,8 @@ void FlushRunner() while (physicalAddress < endPhysicalAddress) { - ref var info = ref GetInfo(physicalAddress); - var (_, alignedRecordSize) = GetRecordSize(physicalAddress); + ref var info = ref _wrapper.GetInfo(physicalAddress); + var (_, alignedRecordSize) = _wrapper.GetRecordSize(physicalAddress); if (info.Dirty) { info.ClearDirtyAtomic(); // there may be read locks being taken, hence atomic @@ -554,12 +319,91 @@ void FlushRunner() } } + /// Delete in-memory portion of the log + internal abstract void DeleteFromMemory(); + + /// Reset the hybrid log. WARNING: assumes that threads have drained out at this point. + public virtual void Reset() + { + var newBeginAddress = GetTailAddress(); + + // Shift read-only addresses to tail without flushing + _ = Utility.MonotonicUpdate(ref ReadOnlyAddress, newBeginAddress, out _); + _ = Utility.MonotonicUpdate(ref SafeReadOnlyAddress, newBeginAddress, out _); + + // Shift head address to tail + if (Utility.MonotonicUpdate(ref HeadAddress, newBeginAddress, out _)) + { + // Close addresses + OnPagesClosed(newBeginAddress); + + // Wait for pages to get closed + while (ClosedUntilAddress < newBeginAddress) + { + _ = Thread.Yield(); + if (epoch.ThisInstanceProtected()) + epoch.ProtectAndDrain(); + } + } + + // Update begin address to tail + _ = Utility.MonotonicUpdate(ref BeginAddress, newBeginAddress, out _); + + FlushEvent.Initialize(); + Array.Clear(PageStatusIndicator, 0, BufferSize); + if (PendingFlush != null) + { + for (int i = 0; i < BufferSize; i++) + PendingFlush[i]?.list?.Clear(); + } + device.Reset(); + } + + /// Wraps when an allocator potentially has to interact with multiple devices + protected virtual void TruncateUntilAddress(long toAddress) + { + PersistedBeginAddress = toAddress; + _ = Task.Run(() => device.TruncateUntilAddress(toAddress)); + } + + /// Wraps when an allocator potentially has to interact with multiple devices + protected virtual void TruncateUntilAddressBlocking(long toAddress) => device.TruncateUntilAddress(toAddress); + + /// Remove disk segment + protected virtual void RemoveSegment(int segment) => device.RemoveSegment(segment); + + internal virtual bool TryComplete() => device.TryComplete(); + + /// Dispose allocator + public virtual void Dispose() + { + disposed = true; + + if (ownedEpoch) + epoch.Dispose(); + bufferPool.Free(); + + FlushEvent.Dispose(); + notifyFlushedUntilAddressSemaphore?.Dispose(); + + OnReadOnlyObserver?.OnCompleted(); + OnEvictionObserver?.OnCompleted(); + } + + #endregion abstract and virtual methods + + private protected void VerifyCompatibleSectorSize(IDevice device) + { + if (sectorSize % device.SectorSize != 0) + throw new TsavoriteException($"Allocator with sector size {sectorSize} cannot flush to device with sector size {device.SectorSize}"); + } + internal unsafe void ApplyDelta(DeltaLog log, long startPage, long endPage, long recoverTo) { if (log == null) return; - long startLogicalAddress = GetStartLogicalAddress(startPage); - long endLogicalAddress = GetStartLogicalAddress(endPage); + long startLogicalAddress = _wrapper.GetStartLogicalAddress(startPage); + long endLogicalAddress = _wrapper.GetStartLogicalAddress(endPage); log.Reset(); while (log.GetNext(out long physicalAddress, out int entryLength, out var type)) @@ -571,16 +415,16 @@ internal unsafe void ApplyDelta(DeltaLog log, long startPage, long endPage, long long endAddress = physicalAddress + entryLength; while (physicalAddress < endAddress) { - long address = *(long*)physicalAddress; + var address = *(long*)physicalAddress; physicalAddress += sizeof(long); - int size = *(int*)physicalAddress; + var size = *(int*)physicalAddress; physicalAddress += sizeof(int); if (address >= startLogicalAddress && address < endLogicalAddress) { - var destination = GetPhysicalAddress(address); + var destination = _wrapper.GetPhysicalAddress(address); // Clear extra space (if any) in old record - var oldSize = GetRecordSize(destination).Item2; + var oldSize = _wrapper.GetRecordSize(destination).Item2; if (oldSize > size) new Span((byte*)(destination + size), oldSize - size).Clear(); @@ -588,7 +432,7 @@ internal unsafe void ApplyDelta(DeltaLog log, long startPage, long endPage, long Buffer.MemoryCopy((void*)physicalAddress, (void*)destination, size, size); // Clean up temporary bits when applying the delta log - ref var destInfo = ref GetInfo(destination); + ref var destInfo = ref _wrapper.GetInfo(destination); destInfo.ClearBitsForDiskImages(); } physicalAddress += size; @@ -635,168 +479,39 @@ internal void MarkPageAtomic(long logicalAddress, long version) Utility.MonotonicUpdate(ref PageStatusIndicator[offset].Dirty, version, out _); } - internal void WriteAsync(IntPtr alignedSourceAddress, ulong alignedDestinationAddress, uint numBytesToWrite, - DeviceIOCompletionCallback callback, PageAsyncFlushResult asyncResult, - IDevice device) - { - if (asyncResult.partial) - { - // Write only required bytes within the page - int aligned_start = (int)((asyncResult.fromAddress - (asyncResult.page << LogPageSizeBits))); - aligned_start = (aligned_start / sectorSize) * sectorSize; - - int aligned_end = (int)((asyncResult.untilAddress - (asyncResult.page << LogPageSizeBits))); - aligned_end = ((aligned_end + (sectorSize - 1)) & ~(sectorSize - 1)); - - numBytesToWrite = (uint)(aligned_end - aligned_start); - device.WriteAsync(alignedSourceAddress + aligned_start, alignedDestinationAddress + (ulong)aligned_start, numBytesToWrite, callback, asyncResult); - } - else - { - device.WriteAsync(alignedSourceAddress, alignedDestinationAddress, - numBytesToWrite, callback, asyncResult); - } - } - - private unsafe void VerifyPage(long page) - { - var startLogicalAddress = GetStartLogicalAddress(page); - var physicalAddress = GetPhysicalAddress(startLogicalAddress); - - long untilLogicalAddressInPage = GetPageSize(); - long pointer = 0; - - while (pointer < untilLogicalAddressInPage) - { - long recordStart = physicalAddress + pointer; - ref RecordInfo info = ref GetInfo(recordStart); - - if (info.IsNull()) - pointer += RecordInfo.GetLength(); - else - { - int size = GetRecordSize(recordStart).Item2; - Debug.Assert(size < 50); - Debug.Assert(size <= GetPageSize()); - pointer += size; - } - } - } - - /// - /// Read objects to memory (async) - /// - /// - /// - /// - /// - /// - protected abstract unsafe void AsyncReadRecordObjectsToMemory(long fromLogical, int numBytes, DeviceIOCompletionCallback callback, AsyncIOContext context, SectorAlignedMemory result = default); - /// - /// Read page (async) - /// - /// - /// - /// - /// - /// - /// - /// - /// - protected abstract void ReadAsync(ulong alignedSourceAddress, int destinationPageIndex, uint aligned_read_length, DeviceIOCompletionCallback callback, PageAsyncReadResult asyncResult, IDevice device, IDevice objlogDevice); - /// - /// Clear page - /// - /// Page number to be cleared - /// Offset to clear from (if partial clear) - internal abstract void ClearPage(long page, int offset = 0); - - internal abstract void FreePage(long page); - - /// - /// Write page (async) - /// - /// - /// - /// - /// - protected abstract void WriteAsync(long flushPage, DeviceIOCompletionCallback callback, PageAsyncFlushResult asyncResult); - /// - /// Retrieve full record - /// - /// - /// - /// - protected abstract unsafe bool RetrievedFullRecord(byte* record, ref AsyncIOContext ctx); - - /// - /// Retrieve value from context - /// - /// - /// - public virtual ref Key GetContextRecordKey(ref AsyncIOContext ctx) => ref ctx.key; - - /// - /// Retrieve value from context - /// - /// - /// - public virtual ref Value GetContextRecordValue(ref AsyncIOContext ctx) => ref ctx.value; - - /// - /// Get heap container for pending key - /// - /// - /// - public abstract IHeapContainer GetKeyContainer(ref Key key); - - /// - /// Get heap container for pending value - /// - /// - /// - public abstract IHeapContainer GetValueContainer(ref Value value); - - /// - /// Copy value to context - /// - /// - /// - public virtual void PutContext(ref AsyncIOContext ctx, ref Value value) => ctx.value = value; - - /// - /// Whether key has objects - /// - /// - public abstract bool KeyHasObjects(); + internal void WriteAsync(IntPtr alignedSourceAddress, ulong alignedDestinationAddress, uint numBytesToWrite, + DeviceIOCompletionCallback callback, PageAsyncFlushResult asyncResult, + IDevice device) + { + if (asyncResult.partial) + { + // Write only required bytes within the page + int aligned_start = (int)((asyncResult.fromAddress - (asyncResult.page << LogPageSizeBits))); + aligned_start = (aligned_start / sectorSize) * sectorSize; - /// - /// Whether value has objects - /// - /// - public abstract bool ValueHasObjects(); + int aligned_end = (int)(asyncResult.untilAddress - (asyncResult.page << LogPageSizeBits)); + aligned_end = (aligned_end + (sectorSize - 1)) & ~(sectorSize - 1); - /// - /// Get segment offsets - /// - /// - public abstract long[] GetSegmentOffsets(); + numBytesToWrite = (uint)(aligned_end - aligned_start); + device.WriteAsync(alignedSourceAddress + aligned_start, alignedDestinationAddress + (ulong)aligned_start, numBytesToWrite, callback, asyncResult); + } + else + { + device.WriteAsync(alignedSourceAddress, alignedDestinationAddress, + numBytesToWrite, callback, asyncResult); + } + } - #endregion + internal long GetReadOnlyLagAddress() => ReadOnlyLagAddress; protected readonly ILogger logger; - /// - /// Instantiate base allocator - /// - /// - /// - /// - /// - /// - /// - public AllocatorBase(LogSettings settings, ITsavoriteEqualityComparer comparer, Action evictCallback, LightEpoch epoch, Action flushCallback, ILogger logger = null) + /// Instantiate base allocator implementation + private protected AllocatorBase(LogSettings settings, TStoreFunctions storeFunctions, Func wrapperCreator, Action evictCallback, LightEpoch epoch, Action flushCallback, ILogger logger = null) { + _storeFunctions = storeFunctions; + _wrapper = wrapperCreator(this); + // Validation if (settings.PageSizeBits < LogSettings.kMinPageSizeBits || settings.PageSizeBits > LogSettings.kMaxPageSizeBits) throw new TsavoriteException($"{nameof(settings.PageSizeBits)} must be between {LogSettings.kMinPageSizeBits} and {LogSettings.kMaxPageSizeBits}"); @@ -821,19 +536,15 @@ public AllocatorBase(LogSettings settings, ITsavoriteEqualityComparer compa if (settings.LogDevice == null) throw new TsavoriteException("LogSettings.LogDevice needs to be specified (e.g., use Devices.CreateLogDevice, AzureStorageDevice, or NullDevice)"); - if (evictCallback != null) - { - ReadCache = true; - EvictCallback = evictCallback; - } + IsReadCache = evictCallback != null; + EvictCallback = evictCallback; + FlushCallback = flushCallback; PreallocateLog = settings.PreallocateLog; FlushEvent.Initialize(); - if (settings.LogDevice is NullDevice) - IsNullDevice = true; + IsNullDevice = settings.LogDevice is NullDevice; - this.comparer = comparer; if (epoch == null) { this.epoch = new LightEpoch(); @@ -893,51 +604,6 @@ public AllocatorBase(LogSettings settings, ITsavoriteEqualityComparer compa AlignedPageSizeBytes = (PageSize + (sectorSize - 1)) & ~(sectorSize - 1); } - /// - /// Number of extra overflow pages allocated - /// - internal abstract int OverflowPageCount { get; } - - - /// - /// Reset the hybrid log. WARNING: assumes that threads have drained out at this point. - /// - public virtual void Reset() - { - var newBeginAddress = GetTailAddress(); - - // Shift read-only addresses to tail without flushing - Utility.MonotonicUpdate(ref ReadOnlyAddress, newBeginAddress, out _); - Utility.MonotonicUpdate(ref SafeReadOnlyAddress, newBeginAddress, out _); - - // Shift head address to tail - if (Utility.MonotonicUpdate(ref HeadAddress, newBeginAddress, out _)) - { - // Close addresses - OnPagesClosed(newBeginAddress); - - // Wait for pages to get closed - while (ClosedUntilAddress < newBeginAddress) - { - Thread.Yield(); - if (epoch.ThisInstanceProtected()) - epoch.ProtectAndDrain(); - } - } - - // Update begin address to tail - Utility.MonotonicUpdate(ref BeginAddress, newBeginAddress, out _); - - FlushEvent.Initialize(); - Array.Clear(PageStatusIndicator, 0, BufferSize); - if (PendingFlush != null) - { - for (int i = 0; i < BufferSize; i++) - PendingFlush[i]?.list?.Clear(); - } - device.Reset(); - } - internal void VerifyRecoveryInfo(HybridLogCheckpointInfo recoveredHLCInfo, bool trimLog = false) { // Note: trimLog is unused right now. Can be used to trim the log to the minimum @@ -955,7 +621,7 @@ internal void VerifyRecoveryInfo(HybridLogCheckpointInfo recoveredHLCInfo, bool long firstValidSegment = (int)(diskBeginAddress >> LogSegmentSizeBits); // Last valid disk segment required for recovery - int lastValidSegment = (int)(diskFlushedUntilAddress >> LogSegmentSizeBits); + var lastValidSegment = (int)(diskFlushedUntilAddress >> LogSegmentSizeBits); if ((diskFlushedUntilAddress & ((1L << LogSegmentSizeBits) - 1)) == 0) lastValidSegment--; @@ -964,7 +630,7 @@ internal void VerifyRecoveryInfo(HybridLogCheckpointInfo recoveredHLCInfo, bool var firstAvailSegment = device.StartSegment; var lastAvailSegment = device.EndSegment; - if (FlushedUntilAddress > GetFirstValidLogicalAddress(0)) + if (FlushedUntilAddress > _wrapper.GetFirstValidLogicalAddress(0)) { int currTailSegment = (int)(FlushedUntilAddress >> LogSegmentSizeBits); if ((FlushedUntilAddress & ((1L << LogSegmentSizeBits) - 1)) == 0) @@ -995,10 +661,7 @@ internal void VerifyRecoveryInfo(HybridLogCheckpointInfo recoveredHLCInfo, bool } } - /// - /// Initialize allocator - /// - /// + /// Initialize allocator protected void Initialize(long firstValidAddress) { Debug.Assert(firstValidAddress <= PageSize, $"firstValidAddress {firstValidAddress} shoulld be <= PageSize {PageSize}"); @@ -1009,21 +672,21 @@ protected void Initialize(long firstValidAddress) { long tailPage = firstValidAddress >> LogPageSizeBits; int tailPageIndex = (int)(tailPage % BufferSize); - if (!IsAllocated(tailPageIndex)) - AllocatePage(tailPageIndex); + if (!_wrapper.IsAllocated(tailPageIndex)) + _wrapper.AllocatePage(tailPageIndex); // Allocate next page as well int nextPageIndex = (int)(tailPage + 1) % BufferSize; - if (!IsAllocated(nextPageIndex)) - AllocatePage(nextPageIndex); + if (!_wrapper.IsAllocated(nextPageIndex)) + _wrapper.AllocatePage(nextPageIndex); } if (PreallocateLog) { for (int i = 0; i < BufferSize; i++) { - if (!IsAllocated(i)) - AllocatePage(i); + if (!_wrapper.IsAllocated(i)) + _wrapper.AllocatePage(i); } } @@ -1039,46 +702,19 @@ protected void Initialize(long firstValidAddress) TailPageOffset.Offset = (int)(firstValidAddress & PageSizeMask); } - /// - /// Dispose allocator - /// - public virtual void Dispose() - { - disposed = true; - - if (ownedEpoch) - epoch.Dispose(); - bufferPool.Free(); - - FlushEvent.Dispose(); - - OnReadOnlyObserver?.OnCompleted(); - OnEvictionObserver?.OnCompleted(); - } - - /// - /// Number of pages in circular buffer that are allocated - /// + /// Number of pages in circular buffer that are allocated public int AllocatedPageCount; - /// - /// Max number of pages that have been allocated at any point in time - /// + /// Max number of pages that have been allocated at any point in time public int MaxAllocatedPageCount; - /// - /// Maximum possible number of empty pages in circular buffer - /// + /// Maximum possible number of empty pages in circular buffer public int MaxEmptyPageCount => BufferSize - 1; - /// - /// Minimum number of empty pages in circular buffer to be maintained to account for non-power-of-two size - /// + /// Minimum number of empty pages in circular buffer to be maintained to account for non-power-of-two size public int MinEmptyPageCount; - /// - /// How many pages do we leave empty in the in-memory buffer (between 0 and BufferSize-1) - /// + /// How many pages do we leave empty in the in-memory buffer (between 0 and BufferSize-1) public int EmptyPageCount { get => emptyPageCount; @@ -1123,15 +759,7 @@ public int EmptyPageCount } } - /// - /// Delete in-memory portion of the log - /// - internal abstract void DeleteFromMemory(); - - /// - /// Increments AllocatedPageCount - /// Update MaxAllocatedPageCount, if a higher number of pages have been allocated. - /// + /// Increments AllocatedPageCount. Updates MaxAllocatedPageCount if a higher number of pages have been allocated. [MethodImpl(MethodImplOptions.AggressiveInlining)] protected void IncrementAllocatedPageCount() { @@ -1145,19 +773,11 @@ protected void IncrementAllocatedPageCount() } } - /// - /// Segment size - /// - /// - public long GetSegmentSize() - { - return SegmentSize; - } + /// Segment size + public long GetSegmentSize() => SegmentSize; - /// - /// Get tail address - /// - /// + /// Get tail address + [MethodImpl(MethodImplOptions.AggressiveInlining)] public long GetTailAddress() { var local = TailPageOffset; @@ -1169,85 +789,38 @@ public long GetTailAddress() return ((long)local.Page << LogPageSizeBits) | (uint)local.Offset; } - /// - /// Get page - /// - /// - /// - public long GetPage(long logicalAddress) - { - return logicalAddress >> LogPageSizeBits; - } - - /// - /// Get page index for page - /// - /// - /// - public int GetPageIndexForPage(long page) - { - return (int)(page % BufferSize); - } + /// Get page index from + public long GetPage(long logicalAddress) => logicalAddress >> LogPageSizeBits; - /// - /// Get page index for address - /// - /// - /// - public int GetPageIndexForAddress(long address) - { - return (int)((address >> LogPageSizeBits) % BufferSize); - } + /// Get page index for page + public int GetPageIndexForPage(long page) => (int)(page % BufferSize); - /// - /// Get capacity (number of pages) - /// - /// - public int GetCapacityNumPages() - { - return BufferSize; - } + /// Get page index for address + public int GetPageIndexForAddress(long address) => (int)((address >> LogPageSizeBits) % BufferSize); + /// Get capacity (number of pages) + public int GetCapacityNumPages() => BufferSize; - /// - /// Get page size - /// - /// - public long GetPageSize() - { - return PageSize; - } + /// Get page size + public long GetPageSize() => PageSize; - /// - /// Get offset in page - /// - /// - /// - public long GetOffsetInPage(long address) - { - return address & PageSizeMask; - } + /// Get offset in page + public long GetOffsetInPage(long address) => address & PageSizeMask; - /// - /// Get sector size for main hlog device - /// - /// - public int GetDeviceSectorSize() - { - return sectorSize; - } + /// Get sector size for main hlog device + public int GetDeviceSectorSize() => sectorSize; void AllocatePagesWithException(int pageIndex, PageOffset localTailPageOffset) { try { // Allocate this page, if needed - if (!IsAllocated(pageIndex % BufferSize)) - AllocatePage(pageIndex % BufferSize); + if (!_wrapper.IsAllocated(pageIndex % BufferSize)) + _wrapper.AllocatePage(pageIndex % BufferSize); // Allocate next page in advance, if needed - if (!IsAllocated((pageIndex + 1) % BufferSize)) - AllocatePage((pageIndex + 1) % BufferSize); + if (!_wrapper.IsAllocated((pageIndex + 1) % BufferSize)) + _wrapper.AllocatePage((pageIndex + 1) % BufferSize); } catch { @@ -1257,9 +830,7 @@ void AllocatePagesWithException(int pageIndex, PageOffset localTailPageOffset) } } - /// - /// Try allocate, no thread spinning allowed - /// + /// Try allocate, no thread spinning allowed /// Number of slots to allocate /// The allocated logical address, or 0 in case of inability to allocate [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -1322,7 +893,7 @@ public long TryAllocate(int numSlots = 1) return -1; // RETRY_NOW } - if (!IsAllocated(pageIndex % BufferSize) || !IsAllocated((pageIndex + 1) % BufferSize)) + if (!_wrapper.IsAllocated(pageIndex % BufferSize) || !_wrapper.IsAllocated((pageIndex + 1) % BufferSize)) AllocatePagesWithException(pageIndex, localTailPageOffset); localTailPageOffset.Page++; @@ -1336,9 +907,7 @@ public long TryAllocate(int numSlots = 1) return (((long)page) << LogPageSizeBits) | ((long)offset); } - /// - /// Try allocate, spin for RETRY_NOW case - /// + /// Try allocate, spin for RETRY_NOW case /// Number of slots to allocate /// The allocated logical address, or 0 in case of inability to allocate [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -1350,21 +919,23 @@ public long TryAllocateRetryNow(int numSlots = 1) return logicalAddress; } - // If the page we are trying to allocate is past the last page with an unclosed address region, - // then we can retry immediately because this is called after NeedToWait, so we know we've - // completed the wait on flushEvent for the necessary pages to be flushed, and are waiting for - // OnPagesClosed to be completed. + /// + /// If the page we are trying to allocate is past the last page with an unclosed address region, + /// then we can retry immediately because this is called after NeedToWait, so we know we've + /// completed the wait on flushEvent for the necessary pages to be flushed, and are waiting for + /// OnPagesClosed to be completed. + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] private bool CannotAllocate(int page) => page >= BufferSize + (ClosedUntilAddress >> LogPageSizeBits); - // If the page we are trying to allocate is past the last page with an unflushed address region, - // we have to wait for the flushEvent. - private bool NeedToWait(int page) => page >= BufferSize + (FlushedUntilAddress >> LogPageSizeBits); - /// - /// Used by applications to make the current state of the database immutable quickly + /// If the page we are trying to allocate is past the last page with an unflushed address region, + /// we have to wait for the flushEvent. /// - /// - /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private bool NeedToWait(int page) => page >= BufferSize + (FlushedUntilAddress >> LogPageSizeBits); + + /// Used by applications to make the current state of the database immutable quickly public bool ShiftReadOnlyToTail(out long tailAddress, out SemaphoreSlim notifyDone) { notifyDone = null; @@ -1381,11 +952,7 @@ public bool ShiftReadOnlyToTail(out long tailAddress, out SemaphoreSlim notifyDo return false; } - /// - /// Used by applications to move read-only forward - /// - /// - /// + /// Used by applications to move read-only forward public bool ShiftReadOnlyAddress(long newReadOnlyAddress, bool noFlush = false) { if (Utility.MonotonicUpdate(ref ReadOnlyAddress, newReadOnlyAddress, out _)) @@ -1396,12 +963,7 @@ public bool ShiftReadOnlyAddress(long newReadOnlyAddress, bool noFlush = false) return false; } - /// - /// Shift begin address - /// - /// - /// - /// + /// Shift begin address public void ShiftBeginAddress(long newBeginAddress, bool truncateLog, bool noFlush = false) { // First update the begin address @@ -1414,7 +976,7 @@ public void ShiftBeginAddress(long newBeginAddress, bool truncateLog, bool noFlu // Shift read-only address var flushEvent = FlushEvent; - ShiftReadOnlyAddress(newBeginAddress, noFlush); + _ = ShiftReadOnlyAddress(newBeginAddress, noFlush); // Wait for flush to complete var spins = 0; @@ -1424,7 +986,7 @@ public void ShiftBeginAddress(long newBeginAddress, bool truncateLog, bool noFlu break; if (++spins < Constants.kFlushSpinCount) { - Thread.Yield(); + _ = Thread.Yield(); continue; } try @@ -1454,58 +1016,20 @@ public void ShiftBeginAddress(long newBeginAddress, bool truncateLog, bool noFlu } } - /// - /// Invokes eviction observer if set and then frees the page. - /// - /// - public virtual void EvictPage(long page) + /// Invokes eviction observer if set and then frees the page. + internal virtual void EvictPage(long page) { var start = page << LogPageSizeBits; var end = (page + 1) << LogPageSizeBits; if (OnEvictionObserver is not null) MemoryPageScan(start, end, OnEvictionObserver); - FreePage(page); - } - - /// - /// Wraps when an allocator potentially has to interact with multiple devices - /// - /// - protected virtual void TruncateUntilAddress(long toAddress) - { - PersistedBeginAddress = toAddress; - Task.Run(() => device.TruncateUntilAddress(toAddress)); - } - - /// - /// Wraps when an allocator potentially has to interact with multiple devices - /// - /// - protected virtual void TruncateUntilAddressBlocking(long toAddress) - { - device.TruncateUntilAddress(toAddress); - } - - /// - /// Remove disk segment - /// - /// - protected virtual void RemoveSegment(int segment) - { - device.RemoveSegment(segment); - } - - internal virtual bool TryComplete() - { - return device.TryComplete(); + _wrapper.FreePage(page); } /// /// Seal: make sure there are no longer any threads writing to the page /// Flush: send page to secondary store /// - /// - /// private void OnPagesMarkedReadOnly(long newSafeReadOnlyAddress, bool noFlush = false) { if (Utility.MonotonicUpdate(ref SafeReadOnlyAddress, newSafeReadOnlyAddress, out long oldSafeReadOnlyAddress)) @@ -1522,11 +1046,7 @@ private void OnPagesMarkedReadOnly(long newSafeReadOnlyAddress, bool noFlush = f } } - /// - /// Action to be performed for when all threads have - /// agreed that a page range is closed. - /// - /// + /// Action to be performed for when all threads have agreed that a page range is closed. private void OnPagesClosed(long newSafeHeadAddress) { Debug.Assert(newSafeHeadAddress > 0); @@ -1567,7 +1087,7 @@ private void OnPagesClosedWorker() long closeStartAddress = ClosedUntilAddress; long closeEndAddress = OngoingCloseUntilAddress; - if (ReadCache) + if (IsReadCache) EvictCallback(closeStartAddress, closeEndAddress); for (long closePageAddress = closeStartAddress & ~PageSizeMask; closePageAddress < closeEndAddress; closePageAddress += PageSize) @@ -1582,13 +1102,13 @@ private void OnPagesClosedWorker() // If we are using a null storage device, we must also shift BeginAddress if (IsNullDevice) - Utility.MonotonicUpdate(ref BeginAddress, end, out _); + _ = Utility.MonotonicUpdate(ref BeginAddress, end, out _); // If the end of the closing range is at the end of the page, free the page if (end == closePageAddress + PageSize) - FreePage((int)(closePageAddress >> LogPageSizeBits)); + _wrapper.FreePage((int)(closePageAddress >> LogPageSizeBits)); - Utility.MonotonicUpdate(ref ClosedUntilAddress, end, out _); + _ = Utility.MonotonicUpdate(ref ClosedUntilAddress, end, out _); } // End if we have exhausted co-operative work @@ -1619,11 +1139,9 @@ private void DebugPrintAddresses() } /// - /// Called every time a new tail page is allocated. Here the read-only is - /// shifted only to page boundaries unlike ShiftReadOnlyToTail where shifting - /// can happen to any fine-grained address. + /// Called every time a new tail page is allocated. Here the read-only is shifted only to page boundaries + /// unlike ShiftReadOnlyToTail where shifting can happen to any fine-grained address. /// - /// private void PageAlignedShiftReadOnlyAddress(long currentTailAddress) { long pageAlignedTailAddress = currentTailAddress & ~PageSizeMask; @@ -1640,6 +1158,7 @@ private void PageAlignedShiftReadOnlyAddress(long currentTailAddress) /// Tries to shift head address based on the head offset lag size. /// /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] private void PageAlignedShiftHeadAddress(long currentTailAddress) => ShiftHeadAddress((currentTailAddress & ~PageSizeMask) - HeadOffsetLagAddress); @@ -1654,9 +1173,7 @@ public long ShiftHeadAddress(long desiredHeadAddress) long newHeadAddress = desiredHeadAddress; if (currentFlushedUntilAddress < newHeadAddress) - { newHeadAddress = currentFlushedUntilAddress; - } if (Utility.MonotonicUpdate(ref HeadAddress, newHeadAddress, out _)) { @@ -1703,9 +1220,7 @@ protected void ShiftFlushedUntilAddress() FlushEvent.Set(); if ((oldFlushedUntilAddress < notifyFlushedUntilAddress) && (currentFlushedUntilAddress >= notifyFlushedUntilAddress)) - { - notifyFlushedUntilAddressSemaphore.Release(); - } + _ = notifyFlushedUntilAddressSemaphore.Release(); } } @@ -1714,32 +1229,20 @@ protected void ShiftFlushedUntilAddress() var info = errorList.GetEarliestError(); if (info.FromAddress == FlushedUntilAddress) { - // all requests before error range has finished successfully -- this is the earliest error and we - // can invoke callback on it. + // All requests before error range has finished successfully -- this is the earliest error and we can invoke callback on it. FlushCallback?.Invoke(info); } // Otherwise, do nothing and wait for the next invocation. } } - /// - /// Address for notification of flushed-until - /// + /// Address for notification of flushed-until public long notifyFlushedUntilAddress; - /// - /// Semaphore for notification of flushed-until - /// + /// Semaphore for notification of flushed-until public SemaphoreSlim notifyFlushedUntilAddressSemaphore; - - /// - /// Reset for recovery - /// - /// - /// - /// - /// + /// Reset for recovery public void RecoveryReset(long tailAddress, long headAddress, long beginAddress, long readonlyAddress) { long tailPage = GetPage(tailAddress); @@ -1749,13 +1252,13 @@ public void RecoveryReset(long tailAddress, long headAddress, long beginAddress, // Allocate current page if necessary var pageIndex = TailPageOffset.Page % BufferSize; - if (!IsAllocated(pageIndex)) - AllocatePage(pageIndex); + if (!_wrapper.IsAllocated(pageIndex)) + _wrapper.AllocatePage(pageIndex); // Allocate next page as well - this is an invariant in the allocator! var nextPageIndex = (pageIndex + 1) % BufferSize; - if (!IsAllocated(nextPageIndex)) - AllocatePage(nextPageIndex); + if (!_wrapper.IsAllocated(nextPageIndex)) + _wrapper.AllocatePage(nextPageIndex); BeginAddress = beginAddress; HeadAddress = headAddress; @@ -1769,7 +1272,7 @@ public void RecoveryReset(long tailAddress, long headAddress, long beginAddress, pageIndex = GetPageIndexForAddress(tailAddress); // clear the last page starting from tail address - ClearPage(pageIndex, (int)GetOffsetInPage(tailAddress)); + _wrapper.ClearPage(pageIndex, (int)GetOffsetInPage(tailAddress)); // Printing debug info logger?.LogInformation("******* Recovered HybridLog Stats *******"); @@ -1780,21 +1283,13 @@ public void RecoveryReset(long tailAddress, long headAddress, long beginAddress, logger?.LogInformation("Tail Address: {tailAddress}", tailAddress); } - /// - /// Invoked by users to obtain a record from disk. It uses sector aligned memory to read - /// the record efficiently into memory. - /// - /// - /// - /// - /// - /// + /// Invoked by users to obtain a record from disk. It uses sector aligned memory to read the record efficiently into memory. internal unsafe void AsyncReadRecordToMemory(long fromLogical, int numBytes, DeviceIOCompletionCallback callback, ref AsyncIOContext context) { - ulong fileOffset = (ulong)(AlignedPageSizeBytes * (fromLogical >> LogPageSizeBits) + (fromLogical & PageSizeMask)); - ulong alignedFileOffset = (ulong)(((long)fileOffset / sectorSize) * sectorSize); + var fileOffset = (ulong)(AlignedPageSizeBytes * (fromLogical >> LogPageSizeBits) + (fromLogical & PageSizeMask)); + var alignedFileOffset = (ulong)(((long)fileOffset / sectorSize) * sectorSize); - uint alignedReadLength = (uint)((long)fileOffset + numBytes - (long)alignedFileOffset); + var alignedReadLength = (uint)((long)fileOffset + numBytes - (long)alignedFileOffset); alignedReadLength = (uint)((alignedReadLength + (sectorSize - 1)) & ~(sectorSize - 1)); var record = bufferPool.Get((int)alignedReadLength); @@ -1821,10 +1316,10 @@ internal unsafe void AsyncReadRecordToMemory(long fromLogical, int numBytes, Dev /// internal unsafe void AsyncReadRecordToMemory(long fromLogical, int numBytes, DeviceIOCompletionCallback callback, ref SimpleReadContext context) { - ulong fileOffset = (ulong)(AlignedPageSizeBytes * (fromLogical >> LogPageSizeBits) + (fromLogical & PageSizeMask)); - ulong alignedFileOffset = (ulong)(((long)fileOffset / sectorSize) * sectorSize); + var fileOffset = (ulong)(AlignedPageSizeBytes * (fromLogical >> LogPageSizeBits) + (fromLogical & PageSizeMask)); + var alignedFileOffset = (ulong)(((long)fileOffset / sectorSize) * sectorSize); - uint alignedReadLength = (uint)((long)fileOffset + numBytes - (long)alignedFileOffset); + var alignedReadLength = (uint)((long)fileOffset + numBytes - (long)alignedFileOffset); alignedReadLength = (uint)((alignedReadLength + (sectorSize - 1)) & ~(sectorSize - 1)); context.record = bufferPool.Get((int)alignedReadLength); @@ -1839,18 +1334,7 @@ internal unsafe void AsyncReadRecordToMemory(long fromLogical, int numBytes, Dev context); } - /// - /// Read pages from specified device - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// + /// Read pages from specified device public void AsyncReadPagesFromDevice( long readPageStart, int numPages, @@ -1859,24 +1343,9 @@ public void AsyncReadPagesFromDevice( TContext context, long devicePageOffset = 0, IDevice logDevice = null, IDevice objectLogDevice = null) - { - AsyncReadPagesFromDevice(readPageStart, numPages, untilAddress, callback, context, - out _, devicePageOffset, logDevice, objectLogDevice); - } + => AsyncReadPagesFromDevice(readPageStart, numPages, untilAddress, callback, context, out _, devicePageOffset, logDevice, objectLogDevice); - /// - /// Read pages from specified device - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// + /// Read pages from specified device private void AsyncReadPagesFromDevice( long readPageStart, int numPages, @@ -1887,27 +1356,18 @@ private void AsyncReadPagesFromDevice( long devicePageOffset = 0, IDevice device = null, IDevice objectLogDevice = null) { - var usedDevice = device; + var usedDevice = device ?? this.device; IDevice usedObjlogDevice = objectLogDevice; - if (device == null) - { - usedDevice = this.device; - } - completed = new CountdownEvent(numPages); for (long readPage = readPageStart; readPage < (readPageStart + numPages); readPage++) { - int pageIndex = (int)(readPage % BufferSize); - if (!IsAllocated(pageIndex)) - { - // Allocate a new page - AllocatePage(pageIndex); - } + var pageIndex = (int)(readPage % BufferSize); + if (!_wrapper.IsAllocated(pageIndex)) + _wrapper.AllocatePage(pageIndex); else - { - ClearPage(readPage); - } + _wrapper.ClearPage(readPage); + var asyncResult = new PageAsyncReadResult() { page = readPage, @@ -1917,8 +1377,8 @@ private void AsyncReadPagesFromDevice( maxPtr = PageSize }; - ulong offsetInFile = (ulong)(AlignedPageSizeBytes * readPage); - uint readLength = (uint)AlignedPageSizeBytes; + var offsetInFile = (ulong)(AlignedPageSizeBytes * readPage); + var readLength = (uint)AlignedPageSizeBytes; long adjustedUntilAddress = AlignedPageSizeBytes * (untilAddress >> LogPageSizeBits) + (untilAddress & PageSizeMask); if (adjustedUntilAddress > 0 && ((adjustedUntilAddress - (long)offsetInFile) < PageSize)) @@ -1946,7 +1406,7 @@ public void AsyncFlushPages(long fromAddress, long untilAddress, bool noFlush = { long startPage = fromAddress >> LogPageSizeBits; long endPage = untilAddress >> LogPageSizeBits; - int numPages = (int)(endPage - startPage); + var numPages = (int)(endPage - startPage); long offsetInEndPage = GetOffsetInPage(untilAddress); @@ -1987,7 +1447,7 @@ public void AsyncFlushPages(long fromAddress, long untilAddress, bool noFlush = if (asyncResult.untilAddress <= BeginAddress) { // Short circuit as no flush needed - Utility.MonotonicUpdate(ref PageStatusIndicator[flushPage % BufferSize].LastFlushedUntilAddress, BeginAddress, out _); + _ = Utility.MonotonicUpdate(ref PageStatusIndicator[flushPage % BufferSize].LastFlushedUntilAddress, BeginAddress, out _); ShiftFlushedUntilAddress(); continue; } @@ -1995,7 +1455,7 @@ public void AsyncFlushPages(long fromAddress, long untilAddress, bool noFlush = if (IsNullDevice || noFlush) { // Short circuit as no flush needed - Utility.MonotonicUpdate(ref PageStatusIndicator[flushPage % BufferSize].LastFlushedUntilAddress, asyncResult.untilAddress, out _); + _ = Utility.MonotonicUpdate(ref PageStatusIndicator[flushPage % BufferSize].LastFlushedUntilAddress, asyncResult.untilAddress, out _); ShiftFlushedUntilAddress(); continue; } @@ -2008,18 +1468,14 @@ public void AsyncFlushPages(long fromAddress, long untilAddress, bool noFlush = // Try to merge request with existing adjacent (earlier) pending requests while (PendingFlush[index].RemovePreviousAdjacent(asyncResult.fromAddress, out var existingRequest)) - { asyncResult.fromAddress = existingRequest.fromAddress; - } // Enqueue work in shared queue PendingFlush[index].Add(asyncResult); // Perform work from shared queue if possible if (PendingFlush[index].RemoveNextAdjacent(FlushedUntilAddress, out PageAsyncFlushResult request)) - { WriteAsync(request.fromAddress >> LogPageSizeBits, AsyncFlushPageCallback, request); - } } else WriteAsync(flushPage, AsyncFlushPageCallback, asyncResult); @@ -2073,13 +1529,13 @@ public void AsyncFlushPagesToDevice(long startPage, long endPage, long endLogica // If throttled, convert rest of the method into a truly async task run // because issuing IO can take up synchronous time if (throttleCheckpointFlushDelayMs >= 0) - Task.Run(FlushRunner); + _ = Task.Run(FlushRunner); else FlushRunner(); void FlushRunner() { - int totalNumPages = (int)(endPage - startPage); + var totalNumPages = (int)(endPage - startPage); var flushCompletionTracker = new FlushCompletionTracker(_completedSemaphore, throttleCheckpointFlushDelayMs >= 0 ? new SemaphoreSlim(0) : null, totalNumPages); var localSegmentOffsets = new long[SegmentBufferSize]; @@ -2118,8 +1574,8 @@ internal void AsyncGetFromDisk(long fromLogical, int numBytes, AsyncIOContext= requiredBytes) { - Debug.Assert(!GetInfoFromBytePointer(record).Invalid, "Invalid records should not be in the hash chain for pending IO"); + Debug.Assert(!_wrapper.GetInfoFromBytePointer(record).Invalid, "Invalid records should not be in the hash chain for pending IO"); // We have all the required bytes. If we don't have the complete record, RetrievedFullRecord calls AsyncGetFromDisk. - if (!RetrievedFullRecord(record, ref ctx)) + if (!_wrapper.RetrievedFullRecord(record, ref ctx)) return; // If request_key is null we're called from ReadAtAddress, so it is an implicit match. - if (ctx.request_key is not null && !comparer.Equals(ref ctx.request_key.Get(), ref GetContextRecordKey(ref ctx))) + if (ctx.request_key is not null && !_storeFunctions.KeysEqual(ref ctx.request_key.Get(), ref _wrapper.GetContextRecordKey(ref ctx))) { // Keys don't match so request the previous record in the chain if it is in the range to resolve. - ctx.logicalAddress = GetInfoFromBytePointer(record).PreviousAddress; + ctx.logicalAddress = _wrapper.GetInfoFromBytePointer(record).PreviousAddress; if (ctx.logicalAddress >= BeginAddress && ctx.logicalAddress >= ctx.minAddress) { ctx.record.Return(); @@ -2169,7 +1625,7 @@ private unsafe void AsyncGetFromDiskCallback(uint errorCode, uint numBytes, obje else if (ctx.callbackQueue is not null) ctx.callbackQueue.Enqueue(ctx); else - ctx.asyncOperation.TrySetResult(ctx); + _ = ctx.asyncOperation.TrySetResult(ctx); } else { @@ -2183,7 +1639,7 @@ private unsafe void AsyncGetFromDiskCallback(uint errorCode, uint numBytes, obje if (ctx.completionEvent is not null) ctx.completionEvent.SetException(e); else if (ctx.asyncOperation is not null) - ctx.asyncOperation.TrySetException(e); + _ = ctx.asyncOperation.TrySetException(e); else throw; } @@ -2200,12 +1656,10 @@ private void AsyncFlushPageCallback(uint errorCode, uint numBytes, object contex try { if (errorCode != 0) - { logger?.LogError("AsyncFlushPageCallback error: {0}", errorCode); - } // Set the page status to flushed - PageAsyncFlushResult result = (PageAsyncFlushResult)context; + var result = (PageAsyncFlushResult)context; if (Interlocked.Decrement(ref result.count) == 0) { @@ -2218,7 +1672,7 @@ private void AsyncFlushPageCallback(uint errorCode, uint numBytes, object contex else { // Update the page's last flushed until address only if there is no failure. - Utility.MonotonicUpdate( + _ = Utility.MonotonicUpdate( ref PageStatusIndicator[result.page % BufferSize].LastFlushedUntilAddress, result.untilAddress, out _); } @@ -2229,9 +1683,7 @@ private void AsyncFlushPageCallback(uint errorCode, uint numBytes, object contex var _flush = FlushedUntilAddress; if (GetOffsetInPage(_flush) > 0 && PendingFlush[GetPage(_flush) % BufferSize].RemoveNextAdjacent(_flush, out PageAsyncFlushResult request)) - { WriteAsync(request.fromAddress >> LogPageSizeBits, AsyncFlushPageCallback, request); - } } catch when (disposed) { } } @@ -2242,18 +1694,13 @@ internal void UnsafeSkipError(CommitInfo info) { errorList.TruncateUntil(info.UntilAddress); var page = info.FromAddress >> PageSizeMask; - Utility.MonotonicUpdate( - ref PageStatusIndicator[page % BufferSize].LastFlushedUntilAddress, - info.UntilAddress, out _); + _ = Utility.MonotonicUpdate(ref PageStatusIndicator[page % BufferSize].LastFlushedUntilAddress, info.UntilAddress, out _); ShiftFlushedUntilAddress(); var _flush = FlushedUntilAddress; if (GetOffsetInPage(_flush) > 0 && PendingFlush[GetPage(_flush) % BufferSize].RemoveNextAdjacent(_flush, out PageAsyncFlushResult request)) - { WriteAsync(request.fromAddress >> LogPageSizeBits, AsyncFlushPageCallback, request); - } } catch when (disposed) { } - } /// @@ -2267,20 +1714,18 @@ protected void AsyncFlushPageToDeviceCallback(uint errorCode, uint numBytes, obj try { if (errorCode != 0) - { logger?.LogError("AsyncFlushPageToDeviceCallback error: {0}", errorCode); - } - PageAsyncFlushResult result = (PageAsyncFlushResult)context; + var result = (PageAsyncFlushResult)context; - // Unset dirty bit for flushed pages - bool epochTaken = false; + var epochTaken = false; if (!epoch.ThisInstanceProtected()) { epochTaken = true; epoch.Resume(); } + // Unset dirty bit for flushed pages try { var startAddress = result.page << LogPageSizeBits; @@ -2297,17 +1742,17 @@ protected void AsyncFlushPageToDeviceCallback(uint errorCode, uint numBytes, obj if (_readOnlyAddress > endAddress) endAddress = _readOnlyAddress; - int flushWidth = (int)(endAddress - startAddress); + var flushWidth = (int)(endAddress - startAddress); if (flushWidth > 0) { - var physicalAddress = GetPhysicalAddress(startAddress); + var physicalAddress = _wrapper.GetPhysicalAddress(startAddress); var endPhysicalAddress = physicalAddress + flushWidth; while (physicalAddress < endPhysicalAddress) { - ref var info = ref GetInfo(physicalAddress); - var (_, alignedRecordSize) = GetRecordSize(physicalAddress); + ref var info = ref _wrapper.GetInfo(physicalAddress); + var (_, alignedRecordSize) = _wrapper.GetRecordSize(physicalAddress); if (info.Dirty) info.ClearDirtyAtomic(); // there may be read locks being taken, hence atomic physicalAddress += alignedRecordSize; @@ -2321,36 +1766,11 @@ protected void AsyncFlushPageToDeviceCallback(uint errorCode, uint numBytes, obj } if (Interlocked.Decrement(ref result.count) == 0) - { result.Free(); - } } catch when (disposed) { } } - /// - /// Serialize to log - /// - /// - /// - public virtual void SerializeKey(ref Key src, long physicalAddress) - { - GetKey(physicalAddress) = src; - } - - /// - /// Serialize to log - /// - /// - /// - public virtual void SerializeValue(ref Value src, long physicalAddress) - { - GetValue(physicalAddress) = src; - } - - internal string PrettyPrint(long address) - { - return $"{GetPage(address)}:{GetOffsetInPage(address)}"; - } + internal string PrettyPrintLogicalAddress(long logicalAddress) => $"{logicalAddress}:{GetPage(logicalAddress)}.{GetOffsetInPage(logicalAddress)}"; } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/AllocatorRecord.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/AllocatorRecord.cs new file mode 100644 index 0000000000..6386e7170a --- /dev/null +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/AllocatorRecord.cs @@ -0,0 +1,28 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +using System.Runtime.InteropServices; + +#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member + +namespace Tsavorite.core +{ + [StructLayout(LayoutKind.Sequential, Pack = 1)] + public struct AllocatorRecord + { + public RecordInfo info; + public Key key; + public Value value; + + public override string ToString() + { + var keyString = key?.ToString() ?? "null"; + if (keyString.Length > 20) + keyString = keyString.Substring(0, 20) + "..."; + var valueString = value?.ToString() ?? "null"; ; + if (valueString.Length > 20) + valueString = valueString.Substring(0, 20) + "..."; + return $"{keyString} | {valueString} | {info}"; + } + } +} \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/AllocatorScan.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/AllocatorScan.cs index 1dfcfcb40c..d3e32daa08 100644 --- a/libs/storage/Tsavorite/cs/src/core/Allocator/AllocatorScan.cs +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/AllocatorScan.cs @@ -24,19 +24,21 @@ internal void Initialize(IScanIteratorFunctions scanIteratorFunction } } - internal abstract partial class AllocatorBase + public abstract partial class AllocatorBase : IDisposable + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Pull-based scan interface for HLOG; user calls GetNext() which advances through the address range. /// /// Pull Scan iterator instance - public abstract ITsavoriteScanIterator Scan(TsavoriteKV store, long beginAddress, long endAddress, ScanBufferingMode scanBufferingMode = ScanBufferingMode.DoublePageBuffering, bool includeSealedRecords = false); + public abstract ITsavoriteScanIterator Scan(TsavoriteKV store, long beginAddress, long endAddress, ScanBufferingMode scanBufferingMode = ScanBufferingMode.DoublePageBuffering, bool includeSealedRecords = false); /// /// Push-based scan interface for HLOG, called from LogAccessor; scan the log given address range, calling for each record. /// /// True if Scan completed; false if Scan ended early due to one of the TScanIterator reader functions returning false - internal abstract bool Scan(TsavoriteKV store, long beginAddress, long endAddress, ref TScanFunctions scanFunctions, + internal abstract bool Scan(TsavoriteKV store, long beginAddress, long endAddress, ref TScanFunctions scanFunctions, ScanBufferingMode scanBufferingMode = ScanBufferingMode.DoublePageBuffering) where TScanFunctions : IScanIteratorFunctions; @@ -44,16 +46,16 @@ internal abstract bool Scan(TsavoriteKV store, long /// Push-based iteration of key versions, calling for each record. /// /// True if Scan completed; false if Scan ended early due to one of the TScanIterator reader functions returning false - internal bool IterateKeyVersions(TsavoriteKV store, ref Key key, ref TScanFunctions scanFunctions) + internal bool IterateKeyVersions(TsavoriteKV store, ref Key key, ref TScanFunctions scanFunctions) where TScanFunctions : IScanIteratorFunctions { - OperationStackContext stackCtx = new(store.comparer.GetHashCode64(ref key)); + OperationStackContext stackCtx = new(_storeFunctions.GetKeyHashCode64(ref key)); if (!store.FindTag(ref stackCtx.hei)) return false; - stackCtx.SetRecordSourceToHashEntry(store.hlog); + stackCtx.SetRecordSourceToHashEntry(store.hlogBase); if (store.UseReadCache) store.SkipReadCache(ref stackCtx, out _); - if (stackCtx.recSrc.LogicalAddress < store.hlog.BeginAddress) + if (stackCtx.recSrc.LogicalAddress < store.hlogBase.BeginAddress) return false; return IterateKeyVersions(store, ref key, stackCtx.recSrc.LogicalAddress, ref scanFunctions); } @@ -62,7 +64,7 @@ internal bool IterateKeyVersions(TsavoriteKV store, /// Push-based iteration of key versions, calling for each record. /// /// True if Scan completed; false if Scan ended early due to one of the TScanIterator reader functions returning false - internal abstract bool IterateKeyVersions(TsavoriteKV store, ref Key key, long beginAddress, ref TScanFunctions scanFunctions) + internal abstract bool IterateKeyVersions(TsavoriteKV store, ref Key key, long beginAddress, ref TScanFunctions scanFunctions) where TScanFunctions : IScanIteratorFunctions; /// @@ -77,7 +79,7 @@ internal bool PushScanImpl(long beginAddress, lon var headAddress = HeadAddress; long numRecords = 1; - bool stop = false; + var stop = false; for (; !stop && iter.GetNext(out var recordInfo); ++numRecords) { try @@ -105,7 +107,7 @@ internal bool PushScanImpl(long beginAddress, lon /// /// Implementation for push-iterating key versions /// - internal bool IterateKeyVersionsImpl(TsavoriteKV store, ref Key key, long beginAddress, ref TScanFunctions scanFunctions, TScanIterator iter) + internal bool IterateKeyVersionsImpl(TsavoriteKV store, ref Key key, long beginAddress, ref TScanFunctions scanFunctions, TScanIterator iter) where TScanFunctions : IScanIteratorFunctions where TScanIterator : ITsavoriteScanIterator, IPushScanIterator { @@ -117,13 +119,13 @@ internal bool IterateKeyVersionsImpl(TsavoriteKV< bool stop = false, continueOnDisk = false; for (; !stop && iter.BeginGetPrevInMemory(ref key, out var recordInfo, out continueOnDisk); ++numRecords) { - OperationStackContext stackCtx = default; + OperationStackContext stackCtx = default; try { // Iter records above headAddress will be in log memory and must be locked. if (iter.CurrentAddress >= headAddress && !recordInfo.IsClosed) { - store.LockForScan(ref stackCtx, ref key, ref iter.GetLockableInfo()); + store.LockForScan(ref stackCtx, ref key); stop = !scanFunctions.ConcurrentReader(ref key, ref iter.GetValue(), new RecordMetadata(recordInfo, iter.CurrentAddress), numRecords, out _); } else @@ -137,7 +139,7 @@ internal bool IterateKeyVersionsImpl(TsavoriteKV< finally { if (stackCtx.recSrc.HasLock) - store.UnlockForScan(ref stackCtx, ref iter.GetKey(), ref iter.GetLockableInfo()); + store.UnlockForScan(ref stackCtx); iter.EndGetPrevInMemory(); } } @@ -170,9 +172,9 @@ internal unsafe bool GetFromDiskAndPushToReader(ref Key key, ref AsyncIOContextCompletionEvent completionEvent, out bool stop) where TScanFunctions : IScanIteratorFunctions { - completionEvent.Prepare(GetKeyContainer(ref key), logicalAddress); + completionEvent.Prepare(_wrapper.GetKeyContainer(ref key), logicalAddress); - AsyncGetFromDisk(logicalAddress, GetAverageRecordSize(), completionEvent.request); + AsyncGetFromDisk(logicalAddress, _wrapper.GetAverageRecordSize(), completionEvent.request); completionEvent.Wait(); stop = false; @@ -184,9 +186,9 @@ internal unsafe bool GetFromDiskAndPushToReader(ref Key key, ref if (completionEvent.request.logicalAddress < BeginAddress) return false; - RecordInfo recordInfo = GetInfoFromBytePointer(completionEvent.request.record.GetValidPointer()); + RecordInfo recordInfo = _wrapper.GetInfoFromBytePointer(completionEvent.request.record.GetValidPointer()); recordInfo.ClearBitsForDiskImages(); - stop = !scanFunctions.SingleReader(ref key, ref GetContextRecordValue(ref completionEvent.request), new RecordMetadata(recordInfo, completionEvent.request.logicalAddress), numRecords, out _); + stop = !scanFunctions.SingleReader(ref key, ref _wrapper.GetContextRecordValue(ref completionEvent.request), new RecordMetadata(recordInfo, completionEvent.request.logicalAddress), numRecords, out _); logicalAddress = recordInfo.PreviousAddress; return !stop; } @@ -200,10 +202,10 @@ internal unsafe bool GetFromDiskAndPushToReader(ref Key key, ref /// Currently we load an entire page, which while inefficient in performance, allows us to make the cursor safe (by ensuring we align to a valid record) if it is not /// the last one returned. We could optimize this to load only the subset of a page that is pointed to by the cursor and do GetRequiredRecordSize/RetrievedFullRecord as in /// AsyncGetFromDiskCallback. However, this would not validate the cursor and would therefore require maintaining a cursor history. - internal abstract bool ScanCursor(TsavoriteKV store, ScanCursorState scanCursorState, ref long cursor, long count, TScanFunctions scanFunctions, long endAddress, bool validateCursor) + internal abstract bool ScanCursor(TsavoriteKV store, ScanCursorState scanCursorState, ref long cursor, long count, TScanFunctions scanFunctions, long endAddress, bool validateCursor) where TScanFunctions : IScanIteratorFunctions; - protected bool ScanLookup(TsavoriteKV store, ScanCursorState scanCursorState, ref long cursor, long count, TScanFunctions scanFunctions, TScanIterator iter, bool validateCursor) + private protected bool ScanLookup(TsavoriteKV store, ScanCursorState scanCursorState, ref long cursor, long count, TScanFunctions scanFunctions, TScanIterator iter, bool validateCursor) where TScanFunctions : IScanIteratorFunctions where TScanIterator : ITsavoriteScanIterator, IPushScanIterator { @@ -260,13 +262,13 @@ protected bool ScanLookup(Tsavor [MethodImpl(MethodImplOptions.AggressiveInlining)] internal Status ConditionalScanPush(TSessionFunctionsWrapper sessionFunctions, ScanCursorState scanCursorState, RecordInfo recordInfo, ref Key key, ref Value value, long minAddress) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { Debug.Assert(epoch.ThisInstanceProtected(), "This is called only from ScanLookup so the epoch should be protected"); - TsavoriteKV.PendingContext pendingContext = new(comparer.GetHashCode64(ref key)); + TsavoriteKV.PendingContext pendingContext = new(_storeFunctions.GetKeyHashCode64(ref key)); OperationStatus internalStatus; - OperationStackContext stackCtx = new(pendingContext.keyHash); + OperationStackContext stackCtx = new(pendingContext.keyHash); bool needIO; do { @@ -308,10 +310,10 @@ internal Status ConditionalScanPush(TSessionFunctionsWrapper sessionFunctions, - ref TsavoriteKV.PendingContext pendingContext, + ref TsavoriteKV.PendingContext pendingContext, ref Key key, ref Input input, ref Value value, ref Output output, Context userContext, - ref OperationStackContext stackCtx, long minAddress, ScanCursorState scanCursorState) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + ref OperationStackContext stackCtx, long minAddress, ScanCursorState scanCursorState) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { // WriteReason is not surfaced for this operation, so pick anything. var status = sessionFunctions.Store.PrepareIOForConditionalOperation(sessionFunctions, ref pendingContext, ref key, ref input, ref value, ref output, @@ -349,13 +351,6 @@ public void RMWCompletionCallback(ref Key key, ref Input input, ref Output outpu public int GetRMWModifiedValueLength(ref Value value, ref Input input) => 0; public int GetRMWInitialValueLength(ref Input input) => 0; - public void DisposeSingleWriter(ref Key key, ref Input input, ref Value src, ref Value dst, ref Output output, ref UpsertInfo upsertInfo, WriteReason reason) { } - public void DisposeCopyUpdater(ref Key key, ref Input input, ref Value oldValue, ref Value newValue, ref Output output, ref RMWInfo rmwInfo) { } - public void DisposeInitialUpdater(ref Key key, ref Input input, ref Value value, ref Output output, ref RMWInfo rmwInfo) { } - public void DisposeSingleDeleter(ref Key key, ref Value value, ref DeleteInfo deleteInfo) { } - public void DisposeDeserializedFromDisk(ref Key key, ref Value value) { } - public void DisposeForRevivification(ref Key key, ref Value value, int keySize) { } - public void ConvertOutputToHeap(ref Input input, ref Output output) { } } diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/AllocatorSettings.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/AllocatorSettings.cs new file mode 100644 index 0000000000..935859aa37 --- /dev/null +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/AllocatorSettings.cs @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +using System; +using Microsoft.Extensions.Logging; + +namespace Tsavorite.core +{ + /// + /// This class is created by to pass parameters to the allocator factory function. + /// + public struct AllocatorSettings + { + /// The Log settings, usually from + internal LogSettings LogSettings; + + /// The epoch created for the + internal LightEpoch epoch; + + /// The logger to use, either from or created by + internal ILogger logger; + + /// The action to call on page eviction; used only for readcache + internal Action evictCallback; + + /// The action to execute on flush completion; used only for + internal Action flushCallback; + + internal AllocatorSettings(LogSettings logSettings, LightEpoch epoch, ILogger logger) + { + this.LogSettings = logSettings; + this.epoch = epoch; + this.logger = logger; + } + } +} \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/BlittableAllocator.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/BlittableAllocator.cs index b17292faba..00d5a6ee1d 100644 --- a/libs/storage/Tsavorite/cs/src/core/Allocator/BlittableAllocator.cs +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/BlittableAllocator.cs @@ -1,433 +1,178 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -using System; -using System.Diagnostics; using System.Runtime.CompilerServices; -using System.Threading; -using Microsoft.Extensions.Logging; namespace Tsavorite.core { - internal sealed unsafe class BlittableAllocator : AllocatorBase + /// + /// Struct wrapper (for inlining) around the fixed-length Blittable allocator. + /// + public struct BlittableAllocator : IAllocator + where TStoreFunctions : IStoreFunctions { - // Circular buffer definition - private readonly byte[][] values; - private readonly long[] pointers; - private readonly long* nativePointers; + /// The wrapped class containing all data and most actual functionality. This must be the ONLY field in this structure so its size is sizeof(IntPtr). + private readonly BlittableAllocatorImpl _this; - internal static int KeySize => Unsafe.SizeOf(); - internal static int ValueSize => Unsafe.SizeOf(); - internal static int RecordSize => Unsafe.SizeOf>(); - - private readonly OverflowPool overflowPagePool; - - public BlittableAllocator(LogSettings settings, ITsavoriteEqualityComparer comparer, Action evictCallback = null, - LightEpoch epoch = null, Action flushCallback = null, ILogger logger = null) - : base(settings, comparer, evictCallback, epoch, flushCallback, logger) + public BlittableAllocator(AllocatorSettings settings, TStoreFunctions storeFunctions) { - overflowPagePool = new OverflowPool(4, p => { }); - - if (BufferSize > 0) - { - values = new byte[BufferSize][]; - pointers = GC.AllocateArray(BufferSize, true); - nativePointers = (long*)Unsafe.AsPointer(ref pointers[0]); - } + // Called by TsavoriteKV via allocatorCreator; must pass a wrapperCreator to AllocatorBase + _this = new(settings, storeFunctions, @this => new BlittableAllocator(@this)); } - public override void Reset() + public BlittableAllocator(object @this) { - base.Reset(); - for (int index = 0; index < BufferSize; index++) - { - if (IsAllocated(index)) - FreePage(index); - } - Initialize(); + // Called by AllocatorBase via primary ctor wrapperCreator + _this = (BlittableAllocatorImpl)@this; } - void ReturnPage(int index) - { - Debug.Assert(index < BufferSize); - if (values[index] != null) - { - overflowPagePool.TryAdd(new PageUnit - { - pointer = pointers[index], - value = values[index] - }); - values[index] = null; - pointers[index] = 0; - Interlocked.Decrement(ref AllocatedPageCount); - } - } + /// + public readonly AllocatorBase GetBase() + where TAllocator : IAllocator + => (AllocatorBase)(object)_this; - public override void Initialize() - { - Initialize(Constants.kFirstValidAddress); - } + /// + public readonly bool IsFixedLength => true; - public override ref RecordInfo GetInfo(long physicalAddress) - { - return ref Unsafe.AsRef((void*)physicalAddress); - } + /// + public readonly bool HasObjectLog => false; - public override ref RecordInfo GetInfoFromBytePointer(byte* ptr) - { - return ref Unsafe.AsRef(ptr); - } - - public override ref Key GetKey(long physicalAddress) - { - return ref Unsafe.AsRef((byte*)physicalAddress + RecordInfo.GetLength()); - } - - public override ref Value GetValue(long physicalAddress) - { - return ref Unsafe.AsRef((byte*)physicalAddress + RecordInfo.GetLength() + KeySize); - } - - public override (int actualSize, int allocatedSize) GetRecordSize(long physicalAddress) - { - return (RecordSize, RecordSize); - } - - public override (int actualSize, int allocatedSize, int keySize) GetRMWCopyDestinationRecordSize(ref Key key, ref Input input, ref Value value, ref RecordInfo recordInfo, TVariableLengthInput varlenInput) - { - return (RecordSize, RecordSize, KeySize); - } - - public override int GetAverageRecordSize() => RecordSize; - - public override int GetFixedRecordSize() => RecordSize; - - public override (int actualSize, int allocatedSize, int keySize) GetRMWInitialRecordSize(ref Key key, ref Input input, TSessionFunctionsWrapper sessionFunctions) - { - return (RecordSize, RecordSize, KeySize); - } - - public override (int actualSize, int allocatedSize, int keySize) GetRecordSize(ref Key key, ref Value value) - { - return (RecordSize, RecordSize, KeySize); - } - - public override int GetValueLength(ref Value value) => ValueSize; - - /// - /// Dispose memory allocator - /// - public override void Dispose() - { - base.Dispose(); - overflowPagePool.Dispose(); - } - - public override AddressInfo* GetKeyAddressInfo(long physicalAddress) - { - return (AddressInfo*)((byte*)physicalAddress + RecordInfo.GetLength()); - } - - public override AddressInfo* GetValueAddressInfo(long physicalAddress) - { - return (AddressInfo*)((byte*)physicalAddress + RecordInfo.GetLength() + KeySize); - } - - /// - /// Allocate memory page, pinned in memory, and in sector aligned form, if possible - /// - /// - internal override void AllocatePage(int index) - { - IncrementAllocatedPageCount(); - - if (overflowPagePool.TryGet(out var item)) - { - pointers[index] = item.pointer; - values[index] = item.value; - return; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly long GetStartLogicalAddress(long page) => _this.GetStartLogicalAddress(page); - var adjustedSize = PageSize + 2 * sectorSize; + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly long GetFirstValidLogicalAddress(long page) => _this.GetFirstValidLogicalAddress(page); - byte[] tmp = GC.AllocateArray(adjustedSize, true); - long p = (long)Unsafe.AsPointer(ref tmp[0]); - pointers[index] = (p + (sectorSize - 1)) & ~((long)sectorSize - 1); - values[index] = tmp; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly long GetPhysicalAddress(long logicalAddress) => _this.GetPhysicalAddress(logicalAddress); - internal override int OverflowPageCount => overflowPagePool.Count; + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly ref RecordInfo GetInfo(long physicalAddress) + => ref BlittableAllocatorImpl.GetInfo(physicalAddress); + /// [MethodImpl(MethodImplOptions.AggressiveInlining)] - public override long GetPhysicalAddress(long logicalAddress) - { - // Offset within page - int offset = (int)(logicalAddress & ((1L << LogPageSizeBits) - 1)); + public readonly unsafe ref RecordInfo GetInfoFromBytePointer(byte* ptr) + => ref BlittableAllocatorImpl.GetInfoFromBytePointer(ptr); - // Index of page within the circular buffer - int pageIndex = (int)((logicalAddress >> LogPageSizeBits) & (BufferSize - 1)); - return *(nativePointers + pageIndex) + offset; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly ref Key GetKey(long physicalAddress) + => ref BlittableAllocatorImpl.GetKey(physicalAddress); - internal override bool IsAllocated(int pageIndex) - { - return values[pageIndex] != null; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly ref Value GetValue(long physicalAddress) + => ref BlittableAllocatorImpl.GetValue(physicalAddress); - protected override void WriteAsync(long flushPage, DeviceIOCompletionCallback callback, PageAsyncFlushResult asyncResult) - { - WriteAsync((IntPtr)pointers[flushPage % BufferSize], - (ulong)(AlignedPageSizeBytes * flushPage), - (uint)AlignedPageSizeBytes, - callback, - asyncResult, device); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly ref Value GetAndInitializeValue(long physicalAddress, long endPhysicalAddress) => ref GetValue(physicalAddress); - protected override void WriteAsyncToDevice - (long startPage, long flushPage, int pageSize, DeviceIOCompletionCallback callback, - PageAsyncFlushResult asyncResult, IDevice device, IDevice objectLogDevice, long[] localSegmentOffsets, long fuzzyStartLogicalAddress) - { - VerifyCompatibleSectorSize(device); - var alignedPageSize = (pageSize + (sectorSize - 1)) & ~(sectorSize - 1); + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly (int actualSize, int allocatedSize) GetRecordSize(long physicalAddress) + => BlittableAllocatorImpl.GetRecordSize(physicalAddress); - WriteAsync((IntPtr)pointers[flushPage % BufferSize], - (ulong)(AlignedPageSizeBytes * (flushPage - startPage)), - (uint)alignedPageSize, callback, asyncResult, - device); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly (int actualSize, int allocatedSize, int keySize) GetRMWCopyDestinationRecordSize(ref Key key, ref Input input, ref Value value, ref RecordInfo recordInfo, TVariableLengthInput varlenInput) + where TVariableLengthInput : IVariableLengthInput + => BlittableAllocatorImpl.GetRMWCopyDestinationRecordSize(ref key, ref input, ref value, ref recordInfo, varlenInput); - /// - /// Get start logical address - /// - /// - /// - public override long GetStartLogicalAddress(long page) - { - return page << LogPageSizeBits; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly int GetRequiredRecordSize(long physicalAddress, int availableBytes) => GetAverageRecordSize(); + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly int GetAverageRecordSize() + => BlittableAllocatorImpl.GetAverageRecordSize(); - /// - /// Get first valid logical address - /// - /// - /// - public override long GetFirstValidLogicalAddress(long page) - { - if (page == 0) - return (page << LogPageSizeBits) + Constants.kFirstValidAddress; + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly int GetFixedRecordSize() + => BlittableAllocatorImpl.GetFixedRecordSize(); - return page << LogPageSizeBits; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly (int actualSize, int allocatedSize, int keySize) GetRMWInitialRecordSize(ref Key key, ref Input input, TSessionFunctionsWrapper sessionFunctions) + where TSessionFunctionsWrapper : IVariableLengthInput + => BlittableAllocatorImpl.GetRMWInitialRecordSize(ref key, ref input, sessionFunctions); - internal override void ClearPage(long page, int offset) - { - if (offset == 0) - Array.Clear(values[page % BufferSize], offset, values[page % BufferSize].Length - offset); - else - { - // Adjust array offset for cache alignment - offset += (int)(pointers[page % BufferSize] - (long)Unsafe.AsPointer(ref values[page % BufferSize][0])); - Array.Clear(values[page % BufferSize], offset, values[page % BufferSize].Length - offset); - } - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly (int actualSize, int allocatedSize, int keySize) GetRecordSize(ref Key key, ref Value value) + => BlittableAllocatorImpl.GetRecordSize(ref key, ref value); - internal override void FreePage(long page) - { - ClearPage(page, 0); - if (EmptyPageCount > 0) - ReturnPage((int)(page % BufferSize)); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly int GetValueLength(ref Value value) + => BlittableAllocatorImpl.GetValueLength(ref value); - /// - /// Delete in-memory portion of the log - /// - internal override void DeleteFromMemory() - { - for (int i = 0; i < values.Length; i++) - { - values[i] = null; - } - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly unsafe bool RetrievedFullRecord(byte* record, ref AsyncIOContext ctx) + => BlittableAllocatorImpl.RetrievedFullRecord(record, ref ctx); - protected override void ReadAsync( - ulong alignedSourceAddress, int destinationPageIndex, uint aligned_read_length, - DeviceIOCompletionCallback callback, PageAsyncReadResult asyncResult, IDevice device, IDevice objlogDevice) - { - device.ReadAsync(alignedSourceAddress, (IntPtr)pointers[destinationPageIndex], - aligned_read_length, callback, asyncResult); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void AllocatePage(int pageIndex) => _this.AllocatePage(pageIndex); - /// - /// Invoked by users to obtain a record from disk. It uses sector aligned memory to read - /// the record efficiently into memory. - /// - /// - /// - /// - /// - /// - protected override void AsyncReadRecordObjectsToMemory(long fromLogical, int numBytes, DeviceIOCompletionCallback callback, AsyncIOContext context, SectorAlignedMemory result = default) - { - throw new InvalidOperationException("AsyncReadRecordObjectsToMemory invalid for BlittableAllocator"); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly bool IsAllocated(int pageIndex) => _this.IsAllocated(pageIndex); - /// - /// Retrieve objects from object log - /// - /// - /// - /// - protected override bool RetrievedFullRecord(byte* record, ref AsyncIOContext ctx) - { - ctx.key = GetKey((long)record); - ctx.value = GetValue((long)record); - return true; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly unsafe void PopulatePage(byte* src, int required_bytes, long destinationPageIndex) + => BlittableAllocatorImpl.PopulatePage(src, required_bytes, destinationPageIndex); - /// - /// Whether KVS has keys to serialize/deserialize - /// - /// - public override bool KeyHasObjects() - { - return false; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void MarkPage(long logicalAddress, long version) => _this.MarkPage(logicalAddress, version); - /// - /// Whether KVS has values to serialize/deserialize - /// - /// - public override bool ValueHasObjects() - { - return false; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void MarkPageAtomic(long logicalAddress, long version) => _this.MarkPageAtomic(logicalAddress, version); - public override IHeapContainer GetKeyContainer(ref Key key) => new StandardHeapContainer(ref key); - public override IHeapContainer GetValueContainer(ref Value value) => new StandardHeapContainer(ref value); + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void ClearPage(long page, int offset = 0) => _this.ClearPage(page, offset); - public override long[] GetSegmentOffsets() - { - return null; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void FreePage(long pageIndex) => _this.FreePage(pageIndex); - internal override void PopulatePage(byte* src, int required_bytes, long destinationPage) - { - throw new TsavoriteException("BlittableAllocator memory pages are sector aligned - use direct copy"); - } + /// + public readonly ref Key GetContextRecordKey(ref AsyncIOContext ctx) => ref ctx.key; - /// - /// Iterator interface for pull-scanning Tsavorite log - /// - public override ITsavoriteScanIterator Scan(TsavoriteKV store, long beginAddress, long endAddress, ScanBufferingMode scanBufferingMode, bool includeSealedRecords) - => new BlittableScanIterator(store, this, beginAddress, endAddress, scanBufferingMode, includeSealedRecords, epoch, logger: logger); + /// + public readonly ref Value GetContextRecordValue(ref AsyncIOContext ctx) => ref ctx.value; - /// - /// Implementation for push-scanning Tsavorite log, called from LogAccessor - /// - internal override bool Scan(TsavoriteKV store, long beginAddress, long endAddress, ref TScanFunctions scanFunctions, ScanBufferingMode scanBufferingMode) - { - using BlittableScanIterator iter = new(store, this, beginAddress, endAddress, scanBufferingMode, false, epoch, logger: logger); - return PushScanImpl(beginAddress, endAddress, ref scanFunctions, iter); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly IHeapContainer GetKeyContainer(ref Key key) => new StandardHeapContainer(ref key); - /// - /// Implementation for push-scanning Tsavorite log with a cursor, called from LogAccessor - /// - internal override bool ScanCursor(TsavoriteKV store, ScanCursorState scanCursorState, ref long cursor, long count, TScanFunctions scanFunctions, long endAddress, bool validateCursor) - { - using BlittableScanIterator iter = new(store, this, cursor, endAddress, ScanBufferingMode.SinglePageBuffering, false, epoch, logger: logger); - return ScanLookup>(store, scanCursorState, ref cursor, count, scanFunctions, iter, validateCursor); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly IHeapContainer GetValueContainer(ref Value value) => new StandardHeapContainer(ref value); - /// - /// Implementation for push-iterating key versions, called from LogAccessor - /// - internal override bool IterateKeyVersions(TsavoriteKV store, ref Key key, long beginAddress, ref TScanFunctions scanFunctions) - { - using BlittableScanIterator iter = new(store, this, store.comparer, beginAddress, epoch, logger: logger); - return IterateKeyVersionsImpl(store, ref key, beginAddress, ref scanFunctions, iter); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly long[] GetSegmentOffsets() + => BlittableAllocatorImpl.GetSegmentOffsets(); - /// - internal override void MemoryPageScan(long beginAddress, long endAddress, IObserver> observer) - { - using var iter = new BlittableScanIterator(store: null, this, beginAddress, endAddress, ScanBufferingMode.NoBuffering, false, epoch, true, logger: logger); - observer?.OnNext(iter); - } + /// + public readonly int OverflowPageCount => _this.OverflowPageCount; - /// - /// Read pages from specified device - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - internal void AsyncReadPagesFromDeviceToFrame( - long readPageStart, - int numPages, - long untilAddress, - DeviceIOCompletionCallback callback, - TContext context, - BlittableFrame frame, - out CountdownEvent completed, - long devicePageOffset = 0, - IDevice device = null, - IDevice objectLogDevice = null, - CancellationTokenSource cts = null) - { - var usedDevice = device; - - if (device == null) - { - usedDevice = this.device; - } - - completed = new CountdownEvent(numPages); - for (long readPage = readPageStart; readPage < (readPageStart + numPages); readPage++) - { - int pageIndex = (int)(readPage % frame.frameSize); - if (frame.frame[pageIndex] == null) - { - frame.Allocate(pageIndex); - } - else - { - frame.Clear(pageIndex); - } - var asyncResult = new PageAsyncReadResult() - { - page = readPage, - context = context, - handle = completed, - frame = frame, - cts = cts - }; - - ulong offsetInFile = (ulong)(AlignedPageSizeBytes * readPage); - - uint readLength = (uint)AlignedPageSizeBytes; - long adjustedUntilAddress = (AlignedPageSizeBytes * (untilAddress >> LogPageSizeBits) + (untilAddress & PageSizeMask)); - - if (adjustedUntilAddress > 0 && ((adjustedUntilAddress - (long)offsetInFile) < PageSize)) - { - readLength = (uint)(adjustedUntilAddress - (long)offsetInFile); - readLength = (uint)((readLength + (sectorSize - 1)) & ~(sectorSize - 1)); - } - - if (device != null) - offsetInFile = (ulong)(AlignedPageSizeBytes * (readPage - devicePageOffset)); - - usedDevice.ReadAsync(offsetInFile, (IntPtr)frame.pointers[pageIndex], readLength, callback, asyncResult); - } - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void SerializeKey(ref Key key, long physicalAddress) + => BlittableAllocatorImpl.SerializeKey(ref key, physicalAddress); } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/BlittableAllocatorImpl.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/BlittableAllocatorImpl.cs new file mode 100644 index 0000000000..f61bbe8e20 --- /dev/null +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/BlittableAllocatorImpl.cs @@ -0,0 +1,348 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +using System; +using System.Diagnostics; +using System.Runtime.CompilerServices; +using System.Threading; + +namespace Tsavorite.core +{ + internal sealed unsafe class BlittableAllocatorImpl : AllocatorBase> + where TStoreFunctions : IStoreFunctions + { + // Circular buffer definition + private readonly byte[][] values; + private readonly long[] pointers; + private readonly long* nativePointers; + + private static int KeySize => Unsafe.SizeOf(); + private static int ValueSize => Unsafe.SizeOf(); + internal static int RecordSize => Unsafe.SizeOf>(); + + private readonly OverflowPool overflowPagePool; + + public BlittableAllocatorImpl(AllocatorSettings settings, TStoreFunctions storeFunctions, Func> wrapperCreator) + : base(settings.LogSettings, storeFunctions, wrapperCreator, settings.evictCallback, settings.epoch, settings.flushCallback, settings.logger) + { + if (!Utility.IsBlittable() || !Utility.IsBlittable()) + throw new TsavoriteException($"BlittableAllocator requires blittlable Key ({typeof(Key)}) and Value ({typeof(Value)})"); + + overflowPagePool = new OverflowPool(4, p => { }); + + if (BufferSize > 0) + { + values = new byte[BufferSize][]; + pointers = GC.AllocateArray(BufferSize, true); + nativePointers = (long*)Unsafe.AsPointer(ref pointers[0]); + } + } + + public override void Reset() + { + base.Reset(); + for (int index = 0; index < BufferSize; index++) + { + if (IsAllocated(index)) + FreePage(index); + } + Initialize(); + } + + void ReturnPage(int index) + { + Debug.Assert(index < BufferSize); + if (values[index] != null) + { + _ = overflowPagePool.TryAdd(new PageUnit + { + pointer = pointers[index], + value = values[index] + }); + values[index] = null; + pointers[index] = 0; + _ = Interlocked.Decrement(ref AllocatedPageCount); + } + } + + public override void Initialize() => Initialize(Constants.kFirstValidAddress); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static ref RecordInfo GetInfo(long physicalAddress) => ref Unsafe.AsRef((void*)physicalAddress); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static ref RecordInfo GetInfoFromBytePointer(byte* ptr) => ref Unsafe.AsRef(ptr); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static ref Key GetKey(long physicalAddress) => ref Unsafe.AsRef((byte*)physicalAddress + RecordInfo.GetLength()); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static ref Value GetValue(long physicalAddress) => ref Unsafe.AsRef((byte*)physicalAddress + RecordInfo.GetLength() + KeySize); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static (int actualSize, int allocatedSize) GetRecordSize(long physicalAddress) => (RecordSize, RecordSize); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static (int actualSize, int allocatedSize, int keySize) GetRMWCopyDestinationRecordSize(ref Key key, ref Input input, ref Value value, ref RecordInfo recordInfo, TVariableLengthInput varlenInput) + => (RecordSize, RecordSize, KeySize); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static (int actualSize, int allocatedSize, int keySize) GetRMWInitialRecordSize(ref Key key, ref Input input, TSessionFunctionsWrapper sessionFunctions) + => (RecordSize, RecordSize, KeySize); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int GetRequiredRecordSize(long physicalAddress, int availableBytes) => GetAverageRecordSize(); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int GetAverageRecordSize() => RecordSize; + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static int GetFixedRecordSize() => RecordSize; + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static (int actualSize, int allocatedSize, int keySize) GetRecordSize(ref Key key, ref Value value) => (RecordSize, RecordSize, KeySize); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int GetValueLength(ref Value value) => ValueSize; + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void SerializeKey(ref Key src, long physicalAddress) => GetKey(physicalAddress) = src; + + /// + /// Dispose memory allocator + /// + public override void Dispose() + { + base.Dispose(); + overflowPagePool.Dispose(); + } + + /// + /// Allocate memory page, pinned in memory, and in sector aligned form, if possible + /// + /// + internal void AllocatePage(int index) + { + IncrementAllocatedPageCount(); + + if (overflowPagePool.TryGet(out var item)) + { + pointers[index] = item.pointer; + values[index] = item.value; + return; + } + + var adjustedSize = PageSize + 2 * sectorSize; + + byte[] tmp = GC.AllocateArray(adjustedSize, true); + long p = (long)Unsafe.AsPointer(ref tmp[0]); + pointers[index] = (p + (sectorSize - 1)) & ~((long)sectorSize - 1); + values[index] = tmp; + } + + internal int OverflowPageCount => overflowPagePool.Count; + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public long GetPhysicalAddress(long logicalAddress) + { + // Offset within page + var offset = (int)(logicalAddress & ((1L << LogPageSizeBits) - 1)); + + // Index of page within the circular buffer + var pageIndex = (int)((logicalAddress >> LogPageSizeBits) & (BufferSize - 1)); + return *(nativePointers + pageIndex) + offset; + } + + internal bool IsAllocated(int pageIndex) => values[pageIndex] != null; + + protected override void WriteAsync(long flushPage, DeviceIOCompletionCallback callback, PageAsyncFlushResult asyncResult) + { + WriteAsync((IntPtr)pointers[flushPage % BufferSize], + (ulong)(AlignedPageSizeBytes * flushPage), + (uint)AlignedPageSizeBytes, + callback, + asyncResult, device); + } + + protected override void WriteAsyncToDevice + (long startPage, long flushPage, int pageSize, DeviceIOCompletionCallback callback, + PageAsyncFlushResult asyncResult, IDevice device, IDevice objectLogDevice, long[] localSegmentOffsets, long fuzzyStartLogicalAddress) + { + VerifyCompatibleSectorSize(device); + var alignedPageSize = (pageSize + (sectorSize - 1)) & ~(sectorSize - 1); + + WriteAsync((IntPtr)pointers[flushPage % BufferSize], + (ulong)(AlignedPageSizeBytes * (flushPage - startPage)), + (uint)alignedPageSize, callback, asyncResult, + device); + } + + /// + /// Get start logical address + /// + public long GetStartLogicalAddress(long page) => page << LogPageSizeBits; + + /// + /// Get first valid logical address + /// + public long GetFirstValidLogicalAddress(long page) + { + if (page == 0) + return (page << LogPageSizeBits) + Constants.kFirstValidAddress; + return page << LogPageSizeBits; + } + + internal void ClearPage(long page, int offset) + { + if (offset == 0) + Array.Clear(values[page % BufferSize], offset, values[page % BufferSize].Length - offset); + else + { + // Adjust array offset for cache alignment + offset += (int)(pointers[page % BufferSize] - (long)Unsafe.AsPointer(ref values[page % BufferSize][0])); + Array.Clear(values[page % BufferSize], offset, values[page % BufferSize].Length - offset); + } + } + + internal void FreePage(long page) + { + ClearPage(page, 0); + if (EmptyPageCount > 0) + ReturnPage((int)(page % BufferSize)); + } + + /// + /// Delete in-memory portion of the log + /// + internal override void DeleteFromMemory() + { + for (int i = 0; i < values.Length; i++) + values[i] = null; + } + + protected override void ReadAsync(ulong alignedSourceAddress, int destinationPageIndex, uint aligned_read_length, + DeviceIOCompletionCallback callback, PageAsyncReadResult asyncResult, IDevice device, IDevice objlogDevice) + => device.ReadAsync(alignedSourceAddress, (IntPtr)pointers[destinationPageIndex], aligned_read_length, callback, asyncResult); + + /// + /// Invoked by users to obtain a record from disk. It uses sector aligned memory to read + /// the record efficiently into memory. + /// + /// + /// + /// + /// + /// + protected override void AsyncReadRecordObjectsToMemory(long fromLogical, int numBytes, DeviceIOCompletionCallback callback, AsyncIOContext context, SectorAlignedMemory result = default) + => throw new InvalidOperationException("AsyncReadRecordObjectsToMemory invalid for BlittableAllocator"); + + internal static bool RetrievedFullRecord(byte* record, ref AsyncIOContext ctx) + { + ctx.key = GetKey((long)record); + ctx.value = GetValue((long)record); + return true; + } + + internal static long[] GetSegmentOffsets() => null; + + internal static void PopulatePage(byte* src, int required_bytes, long destinationPage) + => throw new TsavoriteException("BlittableAllocator memory pages are sector aligned - use direct copy"); + + /// + /// Iterator interface for pull-scanning Tsavorite log + /// + public override ITsavoriteScanIterator Scan(TsavoriteKV> store, + long beginAddress, long endAddress, ScanBufferingMode scanBufferingMode, bool includeSealedRecords) + => new BlittableScanIterator(store, this, beginAddress, endAddress, scanBufferingMode, includeSealedRecords, epoch, logger: logger); + + /// + /// Implementation for push-scanning Tsavorite log, called from LogAccessor + /// + internal override bool Scan(TsavoriteKV> store, + long beginAddress, long endAddress, ref TScanFunctions scanFunctions, ScanBufferingMode scanBufferingMode) + { + using BlittableScanIterator iter = new(store, this, beginAddress, endAddress, scanBufferingMode, false, epoch, logger: logger); + return PushScanImpl(beginAddress, endAddress, ref scanFunctions, iter); + } + + /// + /// Implementation for push-scanning Tsavorite log with a cursor, called from LogAccessor + /// + internal override bool ScanCursor(TsavoriteKV> store, + ScanCursorState scanCursorState, ref long cursor, long count, TScanFunctions scanFunctions, long endAddress, bool validateCursor) + { + using BlittableScanIterator iter = new(store, this, cursor, endAddress, ScanBufferingMode.SinglePageBuffering, false, epoch, logger: logger); + return ScanLookup>(store, scanCursorState, ref cursor, count, scanFunctions, iter, validateCursor); + } + + /// + /// Implementation for push-iterating key versions, called from LogAccessor + /// + internal override bool IterateKeyVersions(TsavoriteKV> store, ref Key key, long beginAddress, ref TScanFunctions scanFunctions) + { + using BlittableScanIterator iter = new(store, this, beginAddress, epoch, logger: logger); + return IterateKeyVersionsImpl(store, ref key, beginAddress, ref scanFunctions, iter); + } + + /// + internal override void MemoryPageScan(long beginAddress, long endAddress, IObserver> observer) + { + using var iter = new BlittableScanIterator(store: null, this, beginAddress, endAddress, ScanBufferingMode.NoBuffering, false, epoch, true, logger: logger); + observer?.OnNext(iter); + } + + /// + /// Read pages from specified device + /// + internal void AsyncReadPagesFromDeviceToFrame( + long readPageStart, + int numPages, + long untilAddress, + DeviceIOCompletionCallback callback, + TContext context, + BlittableFrame frame, + out CountdownEvent completed, + long devicePageOffset = 0, + IDevice device = null, + IDevice objectLogDevice = null, + CancellationTokenSource cts = null) + { + var usedDevice = device ?? this.device; + + completed = new CountdownEvent(numPages); + for (long readPage = readPageStart; readPage < (readPageStart + numPages); readPage++) + { + int pageIndex = (int)(readPage % frame.frameSize); + if (frame.frame[pageIndex] == null) + frame.Allocate(pageIndex); + else + frame.Clear(pageIndex); + + var asyncResult = new PageAsyncReadResult() + { + page = readPage, + context = context, + handle = completed, + frame = frame, + cts = cts + }; + + ulong offsetInFile = (ulong)(AlignedPageSizeBytes * readPage); + + uint readLength = (uint)AlignedPageSizeBytes; + long adjustedUntilAddress = (AlignedPageSizeBytes * (untilAddress >> LogPageSizeBits) + (untilAddress & PageSizeMask)); + + if (adjustedUntilAddress > 0 && ((adjustedUntilAddress - (long)offsetInFile) < PageSize)) + { + readLength = (uint)(adjustedUntilAddress - (long)offsetInFile); + readLength = (uint)((readLength + (sectorSize - 1)) & ~(sectorSize - 1)); + } + + if (device != null) + offsetInFile = (ulong)(AlignedPageSizeBytes * (readPage - devicePageOffset)); + + usedDevice.ReadAsync(offsetInFile, (IntPtr)frame.pointers[pageIndex], readLength, callback, asyncResult); + } + } + } +} \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/BlittableScanIterator.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/BlittableScanIterator.cs index 746447e58b..bcd4264a6d 100644 --- a/libs/storage/Tsavorite/cs/src/core/Allocator/BlittableScanIterator.cs +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/BlittableScanIterator.cs @@ -11,11 +11,11 @@ namespace Tsavorite.core /// /// Scan iterator for hybrid log /// - public sealed class BlittableScanIterator : ScanIteratorBase, ITsavoriteScanIterator, IPushScanIterator + public sealed class BlittableScanIterator : ScanIteratorBase, ITsavoriteScanIterator, IPushScanIterator + where TStoreFunctions : IStoreFunctions { - private readonly TsavoriteKV store; - private readonly BlittableAllocator hlog; - private readonly ITsavoriteEqualityComparer comparer; + private readonly TsavoriteKV> store; + private readonly BlittableAllocatorImpl hlog; private readonly BlittableFrame frame; private readonly bool forceInMemory; @@ -27,15 +27,16 @@ public sealed class BlittableScanIterator : ScanIteratorBase, ITsavo /// Constructor for use with head-to-tail scan /// /// - /// + /// The fully derived log implementation /// /// /// + /// /// Epoch to use for protection; may be null if is true. /// Provided address range is known by caller to be in memory, even if less than HeadAddress /// - internal BlittableScanIterator(TsavoriteKV store, BlittableAllocator hlog, long beginAddress, long endAddress, ScanBufferingMode scanBufferingMode, - bool includeSealedRecords, LightEpoch epoch, bool forceInMemory = false, ILogger logger = null) + internal BlittableScanIterator(TsavoriteKV> store, BlittableAllocatorImpl hlog, + long beginAddress, long endAddress, ScanBufferingMode scanBufferingMode, bool includeSealedRecords, LightEpoch epoch, bool forceInMemory = false, ILogger logger = null) : base(beginAddress == 0 ? hlog.GetFirstValidLogicalAddress(0) : beginAddress, endAddress, scanBufferingMode, includeSealedRecords, epoch, hlog.LogPageSizeBits, logger: logger) { this.store = store; @@ -48,12 +49,12 @@ internal BlittableScanIterator(TsavoriteKV store, BlittableAllocator /// /// Constructor for use with tail-to-head push iteration of the passed key's record versions /// - internal BlittableScanIterator(TsavoriteKV store, BlittableAllocator hlog, ITsavoriteEqualityComparer comparer, long beginAddress, LightEpoch epoch, ILogger logger = null) + internal BlittableScanIterator(TsavoriteKV> store, BlittableAllocatorImpl hlog, + long beginAddress, LightEpoch epoch, ILogger logger = null) : base(beginAddress == 0 ? hlog.GetFirstValidLogicalAddress(0) : beginAddress, hlog.GetTailAddress(), ScanBufferingMode.SinglePageBuffering, false, epoch, hlog.LogPageSizeBits, logger: logger) { this.store = store; this.hlog = hlog; - this.comparer = comparer; forceInMemory = false; if (frameSize > 0) frame = new BlittableFrame(frameSize, hlog.PageSize, hlog.GetDeviceSectorSize()); @@ -62,27 +63,21 @@ internal BlittableScanIterator(TsavoriteKV store, BlittableAllocator /// /// Get a reference to the current key /// - public ref Key GetKey() => ref framePhysicalAddress != 0 ? ref hlog.GetKey(framePhysicalAddress) : ref currentKey; + public ref Key GetKey() => ref framePhysicalAddress != 0 ? ref hlog._wrapper.GetKey(framePhysicalAddress) : ref currentKey; /// /// Get a reference to the current value /// - public ref Value GetValue() => ref framePhysicalAddress != 0 ? ref hlog.GetValue(framePhysicalAddress) : ref currentValue; + public ref Value GetValue() => ref framePhysicalAddress != 0 ? ref hlog._wrapper.GetValue(framePhysicalAddress) : ref currentValue; /// public bool SnapCursorToLogicalAddress(ref long cursor) { Debug.Assert(currentAddress == -1, "SnapCursorToLogicalAddress must be called before GetNext()"); - beginAddress = nextAddress = hlog.SnapToFixedLengthLogicalAddressBoundary(ref cursor, BlittableAllocator.RecordSize); + beginAddress = nextAddress = hlog.SnapToFixedLengthLogicalAddressBoundary(ref cursor, BlittableAllocatorImpl.RecordSize); return true; } - ref RecordInfo IPushScanIterator.GetLockableInfo() - { - Debug.Assert(framePhysicalAddress == 0, "GetLockableInfo should be in memory (i.e. should not have a frame)"); - return ref hlog.GetInfo(hlog.GetPhysicalAddress(currentAddress)); - } - /// /// Get next record in iterator /// @@ -118,7 +113,7 @@ public unsafe bool GetNext(out RecordInfo recordInfo) BufferAndLoad(currentAddress, currentPage, currentPage % frameSize, headAddress, stopAddress); long physicalAddress = GetPhysicalAddress(currentAddress, headAddress, currentPage, offset); - var recordSize = hlog.GetRecordSize(physicalAddress).Item2; + var recordSize = hlog._wrapper.GetRecordSize(physicalAddress).Item2; // If record does not fit on page, skip to the next page. if ((currentAddress & hlog.PageSizeMask) + recordSize > hlog.PageSize) @@ -130,7 +125,7 @@ public unsafe bool GetNext(out RecordInfo recordInfo) nextAddress = currentAddress + recordSize; - recordInfo = hlog.GetInfo(physicalAddress); + recordInfo = hlog._wrapper.GetInfo(physicalAddress); bool skipOnScan = includeSealedRecords ? recordInfo.Invalid : recordInfo.SkipOnScan; if (skipOnScan || recordInfo.IsNull()) { @@ -138,19 +133,18 @@ public unsafe bool GetNext(out RecordInfo recordInfo) continue; } - OperationStackContext stackCtx = default; + OperationStackContext> stackCtx = default; try { - // Lock to ensure no value tearing while copying to temp storage. - // We cannot use GetKey() and GetLockableInfo() because they have not yet been set. + // Lock to ensure no value tearing while copying to temp storage. We cannot use GetKey() because it has not yet been set. if (currentAddress >= headAddress && store is not null) - store.LockForScan(ref stackCtx, ref hlog.GetKey(physicalAddress), ref ((IPushScanIterator)this).GetLockableInfo()); - CopyDataMembers(physicalAddress); + store.LockForScan(ref stackCtx, ref hlog._wrapper.GetKey(physicalAddress)); + _ = CopyDataMembers(physicalAddress); } finally { if (stackCtx.recSrc.HasLock) - store.UnlockForScan(ref stackCtx, ref hlog.GetKey(physicalAddress), ref ((IPushScanIterator)this).GetLockableInfo()); + store.UnlockForScan(ref stackCtx); } // Success @@ -186,11 +180,11 @@ bool IPushScanIterator.BeginGetPrevInMemory(ref Key key, out RecordInfo rec long physicalAddress = GetPhysicalAddress(currentAddress, headAddress, currentPage, offset); - recordInfo = hlog.GetInfo(physicalAddress); + recordInfo = hlog._wrapper.GetInfo(physicalAddress); nextAddress = recordInfo.PreviousAddress; // Do not SkipOnScan here; we Seal previous versions. - if (recordInfo.IsNull() || !comparer.Equals(ref hlog.GetKey(physicalAddress), ref key)) + if (recordInfo.IsNull() || !hlog._storeFunctions.KeysEqual(ref hlog._wrapper.GetKey(physicalAddress), ref key)) { epoch?.Suspend(); continue; @@ -228,8 +222,8 @@ private bool CopyDataMembers(long physicalAddress) if (framePhysicalAddress == 0) { // Copy the values from the log to data members so we have no ref into the log after the epoch.Suspend(). - currentKey = hlog.GetKey(physicalAddress); - currentValue = hlog.GetValue(physicalAddress); + currentKey = hlog._wrapper.GetKey(physicalAddress); + currentValue = hlog._wrapper.GetValue(physicalAddress); } return true; } @@ -260,7 +254,8 @@ public override void Dispose() frame?.Dispose(); } - internal override void AsyncReadPagesFromDeviceToFrame(long readPageStart, int numPages, long untilAddress, TContext context, out CountdownEvent completed, long devicePageOffset = 0, IDevice device = null, IDevice objectLogDevice = null, CancellationTokenSource cts = null) + internal override void AsyncReadPagesFromDeviceToFrame(long readPageStart, int numPages, long untilAddress, TContext context, out CountdownEvent completed, + long devicePageOffset = 0, IDevice device = null, IDevice objectLogDevice = null, CancellationTokenSource cts = null) => hlog.AsyncReadPagesFromDeviceToFrame(readPageStart, numPages, untilAddress, AsyncReadPagesCallback, context, frame, out completed, devicePageOffset, device, objectLogDevice); private unsafe void AsyncReadPagesCallback(uint errorCode, uint numBytes, object context) @@ -275,7 +270,7 @@ private unsafe void AsyncReadPagesCallback(uint errorCode, uint numBytes, object if (result.freeBuffer1 != null) { - hlog.PopulatePage(result.freeBuffer1.GetValidPointer(), result.freeBuffer1.required_bytes, result.page); + BlittableAllocatorImpl.PopulatePage(result.freeBuffer1.GetValidPointer(), result.freeBuffer1.required_bytes, result.page); result.freeBuffer1.Return(); result.freeBuffer1 = null; } diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/GenericAllocator.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/GenericAllocator.cs index 0b3a6850c0..b19e86717f 100644 --- a/libs/storage/Tsavorite/cs/src/core/Allocator/GenericAllocator.cs +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/GenericAllocator.cs @@ -1,1254 +1,165 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -using System; -using System.Collections.Generic; -using System.Diagnostics; -using System.IO; using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; -using System.Threading; -using Microsoft.Extensions.Logging; - -#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member namespace Tsavorite.core { - [StructLayout(LayoutKind.Sequential, Pack = 1)] - public struct Record + /// + /// Struct wrapper (for inlining) around the fixed-length Blittable allocator. + /// + public struct GenericAllocator : IAllocator + where TStoreFunctions : IStoreFunctions { - public RecordInfo info; - public Key key; - public Value value; - - public override string ToString() - { - var keyString = key?.ToString() ?? "null"; - if (keyString.Length > 20) - keyString = keyString.Substring(0, 20) + "..."; - var valueString = value?.ToString() ?? "null"; ; - if (valueString.Length > 20) - valueString = valueString.Substring(0, 20) + "..."; - return $"{keyString} | {valueString} | {info}"; - } - } - - internal sealed unsafe class GenericAllocator : AllocatorBase - { - // Circular buffer definition - internal Record[][] values; - - // Object log related variables - private readonly IDevice objectLogDevice; - // Size of object chunks being written to storage - private readonly int ObjectBlockSize = 100 * (1 << 20); - // Tail offsets per segment, in object log - public readonly long[] segmentOffsets; - private readonly SerializerSettings SerializerSettings; - - // Record sizes. We do not support variable-length keys in GenericAllocator - internal static int KeySize => Unsafe.SizeOf(); - internal static int ValueSize => Unsafe.SizeOf(); - internal static int RecordSize => Unsafe.SizeOf>(); - - private readonly OverflowPool[]> overflowPagePool; - - public GenericAllocator(LogSettings settings, SerializerSettings serializerSettings, ITsavoriteEqualityComparer comparer, - Action evictCallback = null, LightEpoch epoch = null, Action flushCallback = null, ILogger logger = null) - : base(settings, comparer, evictCallback, epoch, flushCallback, logger) - { - overflowPagePool = new OverflowPool[]>(4); - - if (settings.ObjectLogDevice == null) - { - throw new TsavoriteException("LogSettings.ObjectLogDevice needs to be specified (e.g., use Devices.CreateLogDevice, AzureStorageDevice, or NullDevice)"); - } - - if (typeof(Key) == typeof(SpanByte)) - throw new TsavoriteException("SpanByte Keys cannot be mixed with object Values"); - if (typeof(Value) == typeof(SpanByte)) - throw new TsavoriteException("SpanByte Values cannot be mixed with object Keys"); - - SerializerSettings = serializerSettings ?? new SerializerSettings(); - - if ((!Utility.IsBlittable()) && (settings.LogDevice as NullDevice == null) && ((SerializerSettings == null) || (SerializerSettings.keySerializer == null))) - { -#if DEBUG - if (typeof(Key) != typeof(byte[]) && typeof(Key) != typeof(string)) - Debug.WriteLine("Key is not blittable, but no serializer specified via SerializerSettings. Using (slow) DataContractSerializer as default."); -#endif - SerializerSettings.keySerializer = ObjectSerializer.Get(); - } - - if ((!Utility.IsBlittable()) && (settings.LogDevice as NullDevice == null) && ((SerializerSettings == null) || (SerializerSettings.valueSerializer == null))) - { -#if DEBUG - if (typeof(Value) != typeof(byte[]) && typeof(Value) != typeof(string)) - Debug.WriteLine("Value is not blittable, but no serializer specified via SerializerSettings. Using (slow) DataContractSerializer as default."); -#endif - SerializerSettings.valueSerializer = ObjectSerializer.Get(); - } - - values = new Record[BufferSize][]; - segmentOffsets = new long[SegmentBufferSize]; - - objectLogDevice = settings.ObjectLogDevice; - - if ((settings.LogDevice as NullDevice == null) && (KeyHasObjects() || ValueHasObjects())) - { - if (objectLogDevice == null) - throw new TsavoriteException("Objects in key/value, but object log not provided during creation of Tsavorite instance"); - if (objectLogDevice.SegmentSize != -1) - throw new TsavoriteException("Object log device should not have fixed segment size. Set preallocateFile to false when calling CreateLogDevice for object log"); - } - } - - internal override int OverflowPageCount => overflowPagePool.Count; - - public override void Reset() - { - base.Reset(); - objectLogDevice.Reset(); - for (int index = 0; index < BufferSize; index++) - { - if (IsAllocated(index)) - FreePage(index); - } - - Array.Clear(segmentOffsets, 0, segmentOffsets.Length); - Initialize(); - } - - void ReturnPage(int index) - { - Debug.Assert(index < BufferSize); - if (values[index] != default) - { - overflowPagePool.TryAdd(values[index]); - values[index] = default; - Interlocked.Decrement(ref AllocatedPageCount); - } - } - - public override void Initialize() - { - Initialize(RecordSize); - } - - /// - /// Get start logical address - /// - /// - /// - public override long GetStartLogicalAddress(long page) - { - return page << LogPageSizeBits; - } - - /// - /// Get first valid logical address - /// - /// - /// - public override long GetFirstValidLogicalAddress(long page) - { - if (page == 0) - return (page << LogPageSizeBits) + RecordSize; - - return page << LogPageSizeBits; - } - - public override ref RecordInfo GetInfo(long physicalAddress) - { - // Offset within page - int offset = (int)(physicalAddress & PageSizeMask); - - // Index of page within the circular buffer - int pageIndex = (int)((physicalAddress >> LogPageSizeBits) & BufferSizeMask); - - return ref values[pageIndex][offset / RecordSize].info; - } - - public override ref RecordInfo GetInfoFromBytePointer(byte* ptr) - { - return ref Unsafe.AsRef>(ptr).info; - } - - public override ref Key GetKey(long physicalAddress) - { - // Offset within page - int offset = (int)(physicalAddress & PageSizeMask); - - // Index of page within the circular buffer - int pageIndex = (int)((physicalAddress >> LogPageSizeBits) & BufferSizeMask); - - return ref values[pageIndex][offset / RecordSize].key; - } - - public override ref Value GetValue(long physicalAddress) - { - // Offset within page - int offset = (int)(physicalAddress & PageSizeMask); + /// The wrapped class containing all data and most actual functionality. This must be the ONLY field in this structure so its size is sizeof(IntPtr). + private readonly GenericAllocatorImpl _this; - // Index of page within the circular buffer - int pageIndex = (int)((physicalAddress >> LogPageSizeBits) & BufferSizeMask); - - return ref values[pageIndex][offset / RecordSize].value; - } - - public override (int actualSize, int allocatedSize) GetRecordSize(long physicalAddress) - { - return (RecordSize, RecordSize); - } - - public override int GetValueLength(ref Value value) => ValueSize; - - public override (int actualSize, int allocatedSize, int keySize) GetRMWCopyDestinationRecordSize(ref Key key, ref Input input, ref Value value, ref RecordInfo recordInfo, TVariableLengthInput varlenInput) - { - return (RecordSize, RecordSize, KeySize); - } - - public override int GetAverageRecordSize() + public GenericAllocator(AllocatorSettings settings, TStoreFunctions storeFunctions) { - return RecordSize; + // Called by TsavoriteKV via allocatorCreator; must pass a wrapperCreator to AllocatorBase + _this = new(settings, storeFunctions, @this => new GenericAllocator(@this)); } - public override int GetFixedRecordSize() => RecordSize; - - public override (int actualSize, int allocatedSize, int keySize) GetRMWInitialRecordSize(ref Key key, ref Input input, TSessionFunctionsWrapper sessionFunctions) + public GenericAllocator(object @this) { - return (RecordSize, RecordSize, KeySize); + // Called by AllocatorBase via primary ctor wrapperCreator + _this = (GenericAllocatorImpl)@this; } - public override (int actualSize, int allocatedSize, int keySize) GetRecordSize(ref Key key, ref Value value) - { - return (RecordSize, RecordSize, KeySize); - } + /// + public readonly AllocatorBase GetBase() + where TAllocator : IAllocator + => (AllocatorBase)(object)_this; - internal override bool TryComplete() - { - var b1 = objectLogDevice.TryComplete(); - var b2 = base.TryComplete(); - return b1 || b2; - } + /// + public readonly bool IsFixedLength => true; - /// - /// Dispose memory allocator - /// - public override void Dispose() - { - if (values != null) - { - for (int i = 0; i < values.Length; i++) - { - values[i] = null; - } - values = null; - } - overflowPagePool.Dispose(); - base.Dispose(); - } - - /// - /// Delete in-memory portion of the log - /// - internal override void DeleteFromMemory() - { - for (int i = 0; i < values.Length; i++) - { - values[i] = null; - } - values = null; - } - - public override AddressInfo* GetKeyAddressInfo(long physicalAddress) - { - return (AddressInfo*)Unsafe.AsPointer(ref Unsafe.AsRef>((byte*)physicalAddress).key); - } - - public override AddressInfo* GetValueAddressInfo(long physicalAddress) - { - return (AddressInfo*)Unsafe.AsPointer(ref Unsafe.AsRef>((byte*)physicalAddress).value); - } - - /// - /// Allocate memory page, pinned in memory, and in sector aligned form, if possible - /// - /// - internal override void AllocatePage(int index) - { - values[index] = AllocatePage(); - } - - internal Record[] AllocatePage() - { - IncrementAllocatedPageCount(); - - if (overflowPagePool.TryGet(out var item)) - return item; - - return new Record[(PageSize + RecordSize - 1) / RecordSize]; - } + /// + public readonly bool HasObjectLog => true; + /// [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static long SnapToLogicalAddressBoundary(ref long logicalAddress) - { - return logicalAddress = ((logicalAddress - Constants.kFirstValidAddress) / RecordSize) * RecordSize + Constants.kFirstValidAddress; - } - - public override long GetPhysicalAddress(long logicalAddress) - { - return logicalAddress; - } - - internal override bool IsAllocated(int pageIndex) - { - return values[pageIndex] != null; - } - - protected override void TruncateUntilAddress(long toAddress) - { - base.TruncateUntilAddress(toAddress); - objectLogDevice.TruncateUntilSegment((int)(toAddress >> LogSegmentSizeBits)); - } - - protected override void TruncateUntilAddressBlocking(long toAddress) - { - base.TruncateUntilAddressBlocking(toAddress); - objectLogDevice.TruncateUntilSegment((int)(toAddress >> LogSegmentSizeBits)); - } - - protected override void RemoveSegment(int segment) - { - base.RemoveSegment(segment); - objectLogDevice.RemoveSegment(segment); - } - - protected override void WriteAsync(long flushPage, DeviceIOCompletionCallback callback, PageAsyncFlushResult asyncResult) - { - WriteAsync(flushPage, - (ulong)(AlignedPageSizeBytes * flushPage), - (uint)PageSize, - callback, - asyncResult, device, objectLogDevice); - } - - protected override void WriteAsyncToDevice - (long startPage, long flushPage, int pageSize, DeviceIOCompletionCallback callback, - PageAsyncFlushResult asyncResult, IDevice device, IDevice objectLogDevice, long[] localSegmentOffsets, long fuzzyStartLogicalAddress) - { - VerifyCompatibleSectorSize(device); - VerifyCompatibleSectorSize(objectLogDevice); - - bool epochTaken = false; - if (!epoch.ThisInstanceProtected()) - { - epochTaken = true; - epoch.Resume(); - } - try - { - if (HeadAddress >= (flushPage << LogPageSizeBits) + pageSize) - { - // Requested page is unavailable in memory, ignore - callback(0, 0, asyncResult); - } - else - { - // We are writing to separate device, so use fresh segment offsets - WriteAsync(flushPage, - (ulong)(AlignedPageSizeBytes * (flushPage - startPage)), - (uint)pageSize, callback, asyncResult, - device, objectLogDevice, flushPage, localSegmentOffsets, fuzzyStartLogicalAddress); - } - } - finally - { - if (epochTaken) - epoch.Suspend(); - } - } - - internal override void ClearPage(long page, int offset) - { - Array.Clear(values[page % BufferSize], offset / RecordSize, values[page % BufferSize].Length - offset / RecordSize); - } - - internal override void FreePage(long page) - { - ClearPage(page, 0); - - // Close segments - var thisCloseSegment = page >> (LogSegmentSizeBits - LogPageSizeBits); - var nextCloseSegment = (page + 1) >> (LogSegmentSizeBits - LogPageSizeBits); - - if (thisCloseSegment != nextCloseSegment) - { - // We are clearing the last page in current segment - segmentOffsets[thisCloseSegment % SegmentBufferSize] = 0; - } - - // If all pages are being used (i.e. EmptyPageCount == 0), nothing to re-utilize by adding - // to overflow pool. - if (EmptyPageCount > 0) - ReturnPage((int)(page % BufferSize)); - } - - private void WriteAsync(long flushPage, ulong alignedDestinationAddress, uint numBytesToWrite, - DeviceIOCompletionCallback callback, PageAsyncFlushResult asyncResult, - IDevice device, IDevice objlogDevice, long intendedDestinationPage = -1, long[] localSegmentOffsets = null, long fuzzyStartLogicalAddress = long.MaxValue) - { - // Short circuit if we are using a null device - if (device as NullDevice != null) - { - device.WriteAsync(IntPtr.Zero, 0, 0, numBytesToWrite, callback, asyncResult); - return; - } - - int start = 0, aligned_start = 0, end = (int)numBytesToWrite; - if (asyncResult.partial) - { - // We're writing only a subset of the page - start = (int)(asyncResult.fromAddress - (asyncResult.page << LogPageSizeBits)); - aligned_start = (start / sectorSize) * sectorSize; - end = (int)(asyncResult.untilAddress - (asyncResult.page << LogPageSizeBits)); - } - - // Check if user did not override with special segment offsets - if (localSegmentOffsets == null) - localSegmentOffsets = segmentOffsets; - - // This is the in-memory buffer page to be written - var src = values[flushPage % BufferSize]; - - // We create a shadow copy of the page if we are under epoch protection. - // This copy ensures that object references are kept valid even if the original page is reclaimed. - // We suspend epoch during the actual flush as that can take a long time. - bool epochProtected = false; - if (epoch.ThisInstanceProtected()) - { - epochProtected = true; - src = new Record[values[flushPage % BufferSize].Length]; - Array.Copy(values[flushPage % BufferSize], src, values[flushPage % BufferSize].Length); - epoch.Suspend(); - } - try - { - // Temporary storage to hold the image "template" we'll write to disk: It will have RecordInfos and object pointers that will be overwritten by addresses - // when writing to the main log (both object pointers and addresses are 8 bytes). - var buffer = bufferPool.Get((int)numBytesToWrite); - - if (aligned_start < start && (KeyHasObjects() || ValueHasObjects())) - { - // Do not read back the invalid header of page 0 - if ((flushPage > 0) || (start > GetFirstValidLogicalAddress(flushPage))) - { - // Get the overlapping HLOG from disk as we wrote it with object pointers previously. This avoids object reserialization - PageAsyncReadResult result = new() - { - handle = new CountdownEvent(1) - }; - device.ReadAsync(alignedDestinationAddress + (ulong)aligned_start, (IntPtr)buffer.aligned_pointer + aligned_start, - (uint)sectorSize, AsyncReadPageCallback, result); - result.handle.Wait(); - } - fixed (RecordInfo* pin = &src[0].info) - { - // Write all the RecordInfos on one operation. This also includes object pointers, but for valid records we will overwrite those below. - Debug.Assert(buffer.aligned_pointer + numBytesToWrite <= (byte*)Unsafe.AsPointer(ref buffer.buffer[0]) + buffer.buffer.Length); - - Buffer.MemoryCopy((void*)((long)Unsafe.AsPointer(ref src[0]) + start), buffer.aligned_pointer + start, - numBytesToWrite - start, numBytesToWrite - start); - } - } - else - { - fixed (RecordInfo* pin = &src[0].info) - { - // Write all the RecordInfos on one operation. This also includes object pointers, but for valid records we will overwrite those below. - Debug.Assert(buffer.aligned_pointer + numBytesToWrite <= (byte*)Unsafe.AsPointer(ref buffer.buffer[0]) + buffer.buffer.Length); - - Buffer.MemoryCopy((void*)((long)Unsafe.AsPointer(ref src[0]) + aligned_start), buffer.aligned_pointer + aligned_start, - numBytesToWrite - aligned_start, numBytesToWrite - aligned_start); - } - } - - // In the main log, we write addresses to pages in the object log. This array saves the addresses of the key and/or value fields in 'buffer', - // which again is the image we're building from the 'values' "page" for this write. The "addresses into 'buffer'" are cast below to AddressInfo - // structures and stored in the sequence we'll write them: alternating series of key then value if both are object types, else keys or values only. - List addr = new List(); - asyncResult.freeBuffer1 = buffer; - - // Object keys and values are serialized into this MemoryStream. - MemoryStream ms = new(); - IObjectSerializer keySerializer = null; - IObjectSerializer valueSerializer = null; - - if (KeyHasObjects()) - { - keySerializer = SerializerSettings.keySerializer(); - keySerializer.BeginSerialize(ms); - } - if (ValueHasObjects()) - { - valueSerializer = SerializerSettings.valueSerializer(); - valueSerializer.BeginSerialize(ms); - } - - // Track the size to be written to the object log. - long endPosition = 0; - - for (int i = start / RecordSize; i < end / RecordSize; i++) - { - byte* recordPtr = buffer.aligned_pointer + i * RecordSize; - - // Retrieve reference to record struct - ref var record = ref Unsafe.AsRef>(recordPtr); - AddressInfo* key_address = null, value_address = null; - - // Zero out object reference addresses (AddressInfo) in the planned disk image - if (KeyHasObjects()) - { - key_address = GetKeyAddressInfo((long)recordPtr); - *key_address = default; - } - if (ValueHasObjects()) - { - value_address = GetValueAddressInfo((long)recordPtr); - *value_address = default; - } - - // Now fill in AddressInfo data for the valid records - if (!record.info.Invalid) - { - // Calculate the logical address of the 'values' page currently being written. - var address = (flushPage << LogPageSizeBits) + i * RecordSize; - - // Do not write v+1 records (e.g. during a checkpoint) - if (address < fuzzyStartLogicalAddress || !record.info.IsInNewVersion) - { - if (KeyHasObjects()) - { - long pos = ms.Position; - keySerializer.Serialize(ref src[i].key); - - // Store the key address into the 'buffer' AddressInfo image as an offset into 'ms'. - key_address->Address = pos; - key_address->Size = (int)(ms.Position - pos); - addr.Add((long)key_address); - endPosition = pos + key_address->Size; - } - - if (ValueHasObjects() && !record.info.Tombstone) - { - long pos = ms.Position; - valueSerializer.Serialize(ref src[i].value); - - // Store the value address into the 'buffer' AddressInfo image as an offset into 'ms'. - value_address->Address = pos; - value_address->Size = (int)(ms.Position - pos); - addr.Add((long)value_address); - endPosition = pos + value_address->Size; - } - } - else - { - // Mark v+1 records as invalid to avoid deserializing them on recovery - record.info.SetInvalid(); - } - } - - // If this record's serialized size surpassed ObjectBlockSize or it's the last record to be written, write to the object log. - if (endPosition > ObjectBlockSize || i == (end / RecordSize) - 1) - { - var memoryStreamActualLength = ms.Position; - var memoryStreamTotalLength = (int)endPosition; - endPosition = 0; + public readonly long GetStartLogicalAddress(long page) => _this.GetStartLogicalAddress(page); - if (KeyHasObjects()) - keySerializer.EndSerialize(); - if (ValueHasObjects()) - valueSerializer.EndSerialize(); - ms.Close(); - - // Get the total serialized length rounded up to sectorSize - var _alignedLength = (memoryStreamTotalLength + (sectorSize - 1)) & ~(sectorSize - 1); - - // Reserve the current address in the object log segment offsets for this chunk's write operation. - var _objAddr = Interlocked.Add(ref localSegmentOffsets[(long)(alignedDestinationAddress >> LogSegmentSizeBits) % SegmentBufferSize], _alignedLength) - _alignedLength; - - // Allocate the object-log buffer to build the image we'll write to disk, then copy to it from the memory stream. - SectorAlignedMemory _objBuffer = null; - if (memoryStreamTotalLength > 0) - { - _objBuffer = bufferPool.Get(memoryStreamTotalLength); - - fixed (void* src_ = ms.GetBuffer()) - Buffer.MemoryCopy(src_, _objBuffer.aligned_pointer, memoryStreamTotalLength, memoryStreamActualLength); - } - - // Each address we calculated above is now an offset to objAddr; convert to the actual address. - foreach (var address in addr) - ((AddressInfo*)address)->Address += _objAddr; - - // If we have not written all records, prepare for the next chunk of records to be written. - if (i < (end / RecordSize) - 1) - { - // Create a new MemoryStream for the next chunk of records to be written. - ms = new MemoryStream(); - if (KeyHasObjects()) - keySerializer.BeginSerialize(ms); - if (ValueHasObjects()) - valueSerializer.BeginSerialize(ms); - - // Reset address list for the next chunk of records to be written. - addr = new List(); - - // Write this chunk of records to the object log device. - asyncResult.done = new AutoResetEvent(false); - Debug.Assert(memoryStreamTotalLength > 0); - objlogDevice.WriteAsync( - (IntPtr)_objBuffer.aligned_pointer, - (int)(alignedDestinationAddress >> LogSegmentSizeBits), - (ulong)_objAddr, (uint)_alignedLength, AsyncFlushPartialObjectLogCallback, asyncResult); - - // Wait for write to complete before resuming next write - asyncResult.done.WaitOne(); - _objBuffer.Return(); - } - else - { - // We have written all records in this 'values' "page". - if (memoryStreamTotalLength > 0) - { - // Increment the count because we need to write both page and object cache. - Interlocked.Increment(ref asyncResult.count); - - asyncResult.freeBuffer2 = _objBuffer; - objlogDevice.WriteAsync( - (IntPtr)_objBuffer.aligned_pointer, - (int)(alignedDestinationAddress >> LogSegmentSizeBits), - (ulong)_objAddr, (uint)_alignedLength, callback, asyncResult); - } - } - } - } - - if (asyncResult.partial) - { - // We're writing only a subset of the page, so update our count of bytes to write. - var aligned_end = (int)(asyncResult.untilAddress - (asyncResult.page << LogPageSizeBits)); - aligned_end = (aligned_end + (sectorSize - 1)) & ~(sectorSize - 1); - numBytesToWrite = (uint)(aligned_end - aligned_start); - } - - // Round up the number of byte to write to sector alignment. - var alignedNumBytesToWrite = (uint)((numBytesToWrite + (sectorSize - 1)) & ~(sectorSize - 1)); - - // Finally write the hlog page - device.WriteAsync((IntPtr)buffer.aligned_pointer + aligned_start, alignedDestinationAddress + (ulong)aligned_start, - alignedNumBytesToWrite, callback, asyncResult); - } - finally - { - if (epochProtected) - epoch.Resume(); - } - } - - private void AsyncReadPageCallback(uint errorCode, uint numBytes, object context) - { - if (errorCode != 0) - { - logger?.LogError($"AsyncReadPageCallback error: {errorCode}"); - } - - // Set the page status to flushed - var result = (PageAsyncReadResult)context; - - result.handle.Signal(); - } - - protected override void ReadAsync( - ulong alignedSourceAddress, int destinationPageIndex, uint aligned_read_length, - DeviceIOCompletionCallback callback, PageAsyncReadResult asyncResult, IDevice device, IDevice objlogDevice) - { - asyncResult.freeBuffer1 = bufferPool.Get((int)aligned_read_length); - asyncResult.freeBuffer1.required_bytes = (int)aligned_read_length; - - if (!(KeyHasObjects() || ValueHasObjects())) - { - device.ReadAsync(alignedSourceAddress, (IntPtr)asyncResult.freeBuffer1.aligned_pointer, - aligned_read_length, callback, asyncResult); - return; - } - - asyncResult.callback = callback; - - if (objlogDevice == null) - { - Debug.Assert(objectLogDevice != null); - objlogDevice = objectLogDevice; - } - asyncResult.objlogDevice = objlogDevice; - - device.ReadAsync(alignedSourceAddress, (IntPtr)asyncResult.freeBuffer1.aligned_pointer, - aligned_read_length, AsyncReadPageWithObjectsCallback, asyncResult); - } - - - /// - /// IOCompletion callback for page flush - /// - /// - /// - /// - private void AsyncFlushPartialObjectLogCallback(uint errorCode, uint numBytes, object context) - { - if (errorCode != 0) - { - logger?.LogError($"AsyncFlushPartialObjectLogCallback error: {errorCode}"); - } - - // Set the page status to flushed - PageAsyncFlushResult result = (PageAsyncFlushResult)context; - result.done.Set(); - } - - private void AsyncReadPageWithObjectsCallback(uint errorCode, uint numBytes, object context) - { - if (errorCode != 0) - { - logger?.LogError($"AsyncReadPageWithObjectsCallback error: {errorCode}"); - } - - PageAsyncReadResult result = (PageAsyncReadResult)context; - - Record[] src; - - // We are reading into a frame - if (result.frame != null) - { - var frame = (GenericFrame)result.frame; - src = frame.GetPage(result.page % frame.frameSize); - } - else - src = values[result.page % BufferSize]; - - - // Deserialize all objects until untilptr - if (result.resumePtr < result.untilPtr) - { - MemoryStream ms = new(result.freeBuffer2.buffer); - ms.Seek(result.freeBuffer2.offset, SeekOrigin.Begin); - Deserialize(result.freeBuffer1.GetValidPointer(), result.resumePtr, result.untilPtr, src, ms); - ms.Dispose(); - - result.freeBuffer2.Return(); - result.freeBuffer2 = null; - result.resumePtr = result.untilPtr; - } - - // If we have processed entire page, return - if (result.untilPtr >= result.maxPtr) - { - result.Free(); - - // Call the "real" page read callback - result.callback(errorCode, numBytes, context); - return; - } - - // We will now be able to process all records until (but not including) untilPtr - GetObjectInfo(result.freeBuffer1.GetValidPointer(), ref result.untilPtr, result.maxPtr, ObjectBlockSize, out long startptr, out long alignedLength); - - // Object log fragment should be aligned by construction - Debug.Assert(startptr % sectorSize == 0); - Debug.Assert(alignedLength % sectorSize == 0); - - if (alignedLength > int.MaxValue) - throw new TsavoriteException("Unable to read object page, total size greater than 2GB: " + alignedLength); - - var objBuffer = bufferPool.Get((int)alignedLength); - result.freeBuffer2 = objBuffer; - - // Request objects from objlog - result.objlogDevice.ReadAsync( - (int)((result.page - result.offset) >> (LogSegmentSizeBits - LogPageSizeBits)), - (ulong)startptr, - (IntPtr)objBuffer.aligned_pointer, (uint)alignedLength, AsyncReadPageWithObjectsCallback, result); - } - - /// - /// Invoked by users to obtain a record from disk. It uses sector aligned memory to read - /// the record efficiently into memory. - /// - /// - /// - /// - /// - /// - protected override void AsyncReadRecordObjectsToMemory(long fromLogical, int numBytes, DeviceIOCompletionCallback callback, AsyncIOContext context, SectorAlignedMemory result = default) - { - ulong fileOffset = (ulong)(AlignedPageSizeBytes * (fromLogical >> LogPageSizeBits) + (fromLogical & PageSizeMask)); - ulong alignedFileOffset = (ulong)(((long)fileOffset / sectorSize) * sectorSize); - - uint alignedReadLength = (uint)((long)fileOffset + numBytes - (long)alignedFileOffset); - alignedReadLength = (uint)((alignedReadLength + (sectorSize - 1)) & ~(sectorSize - 1)); - - var record = bufferPool.Get((int)alignedReadLength); - record.valid_offset = (int)(fileOffset - alignedFileOffset); - record.available_bytes = (int)(alignedReadLength - (fileOffset - alignedFileOffset)); - record.required_bytes = numBytes; - - var asyncResult = default(AsyncGetFromDiskResult>); - asyncResult.context = context; - asyncResult.context.record = result; - asyncResult.context.objBuffer = record; - objectLogDevice.ReadAsync( - (int)(context.logicalAddress >> LogSegmentSizeBits), - alignedFileOffset, - (IntPtr)asyncResult.context.objBuffer.aligned_pointer, - alignedReadLength, - callback, - asyncResult); - } - - /// - /// Read pages from specified device - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - internal void AsyncReadPagesFromDeviceToFrame( - long readPageStart, - int numPages, - long untilAddress, - DeviceIOCompletionCallback callback, - TContext context, - GenericFrame frame, - out CountdownEvent completed, - long devicePageOffset = 0, - IDevice device = null, IDevice objectLogDevice = null) - { - var usedDevice = device; - IDevice usedObjlogDevice = objectLogDevice; - - if (device == null) - { - usedDevice = this.device; - } - - completed = new CountdownEvent(numPages); - for (long readPage = readPageStart; readPage < (readPageStart + numPages); readPage++) - { - int pageIndex = (int)(readPage % frame.frameSize); - if (frame.GetPage(pageIndex) == null) - { - frame.Allocate(pageIndex); - } - else - { - frame.Clear(pageIndex); - } - var asyncResult = new PageAsyncReadResult() - { - page = readPage, - context = context, - handle = completed, - maxPtr = PageSize, - frame = frame, - }; - - ulong offsetInFile = (ulong)(AlignedPageSizeBytes * readPage); - uint readLength = (uint)AlignedPageSizeBytes; - long adjustedUntilAddress = (AlignedPageSizeBytes * (untilAddress >> LogPageSizeBits) + (untilAddress & PageSizeMask)); - - if (adjustedUntilAddress > 0 && ((adjustedUntilAddress - (long)offsetInFile) < PageSize)) - { - readLength = (uint)(adjustedUntilAddress - (long)offsetInFile); - asyncResult.maxPtr = readLength; - readLength = (uint)((readLength + (sectorSize - 1)) & ~(sectorSize - 1)); - } - - if (device != null) - offsetInFile = (ulong)(AlignedPageSizeBytes * (readPage - devicePageOffset)); - - ReadAsync(offsetInFile, pageIndex, readLength, callback, asyncResult, usedDevice, usedObjlogDevice); - } - } - - - #region Page handlers for objects - /// - /// Deseialize part of page from stream - /// - /// - /// From pointer - /// Until pointer - /// - /// Stream - public void Deserialize(byte* raw, long ptr, long untilptr, Record[] src, Stream stream) - { - IObjectSerializer keySerializer = null; - IObjectSerializer valueSerializer = null; - - long streamStartPos = stream.Position; - long start_addr = -1; - int start_offset = -1, end_offset = -1; - if (KeyHasObjects()) - { - keySerializer = SerializerSettings.keySerializer(); - keySerializer.BeginDeserialize(stream); - } - if (ValueHasObjects()) - { - valueSerializer = SerializerSettings.valueSerializer(); - valueSerializer.BeginDeserialize(stream); - } - - while (ptr < untilptr) - { - ref Record record = ref Unsafe.AsRef>(raw + ptr); - src[ptr / RecordSize].info = record.info; - if (start_offset == -1) - start_offset = (int)(ptr / RecordSize); - - end_offset = (int)(ptr / RecordSize) + 1; - - if (!record.info.Invalid) - { - if (KeyHasObjects()) - { - var key_addr = GetKeyAddressInfo((long)raw + ptr); - if (start_addr == -1) start_addr = key_addr->Address & ~((long)sectorSize - 1); - if (stream.Position != streamStartPos + key_addr->Address - start_addr) - { - stream.Seek(streamStartPos + key_addr->Address - start_addr, SeekOrigin.Begin); - } - - keySerializer.Deserialize(out src[ptr / RecordSize].key); - } - else - { - src[ptr / RecordSize].key = record.key; - } - - if (!record.info.Tombstone) - { - if (ValueHasObjects()) - { - var value_addr = GetValueAddressInfo((long)raw + ptr); - if (start_addr == -1) start_addr = value_addr->Address & ~((long)sectorSize - 1); - if (stream.Position != streamStartPos + value_addr->Address - start_addr) - { - stream.Seek(streamStartPos + value_addr->Address - start_addr, SeekOrigin.Begin); - } - - valueSerializer.Deserialize(out src[ptr / RecordSize].value); - } - else - { - src[ptr / RecordSize].value = record.value; - } - } - } - ptr += GetRecordSize(ptr).Item2; - } - if (KeyHasObjects()) - { - keySerializer.EndDeserialize(); - } - if (ValueHasObjects()) - { - valueSerializer.EndDeserialize(); - } - - if (OnDeserializationObserver != null && start_offset != -1 && end_offset != -1) - { - using var iter = new MemoryPageScanIterator(src, start_offset, end_offset, -1, RecordSize); - OnDeserializationObserver.OnNext(iter); - } - } - - /// - /// Get location and range of object log addresses for specified log page - /// - /// - /// - /// - /// - /// - /// - public void GetObjectInfo(byte* raw, ref long ptr, long untilptr, int objectBlockSize, out long startptr, out long size) - { - long minObjAddress = long.MaxValue; - long maxObjAddress = long.MinValue; - bool done = false; - - while (!done && (ptr < untilptr)) - { - ref Record record = ref Unsafe.AsRef>(raw + ptr); - - if (!record.info.Invalid) - { - if (KeyHasObjects()) - { - var key_addr = GetKeyAddressInfo((long)raw + ptr); - var addr = key_addr->Address; - - if (addr < minObjAddress) minObjAddress = addr; - addr += key_addr->Size; - if (addr > maxObjAddress) maxObjAddress = addr; - - // If object pointer is greater than kObjectSize from starting object pointer - if (minObjAddress != long.MaxValue && (addr - minObjAddress > objectBlockSize)) - done = true; - } - - - if (ValueHasObjects() && !record.info.Tombstone) - { - var value_addr = GetValueAddressInfo((long)raw + ptr); - var addr = value_addr->Address; - - if (addr < minObjAddress) minObjAddress = addr; - addr += value_addr->Size; - if (addr > maxObjAddress) maxObjAddress = addr; - - // If object pointer is greater than kObjectSize from starting object pointer - if (minObjAddress != long.MaxValue && (addr - minObjAddress > objectBlockSize)) - done = true; - } - } - ptr += GetRecordSize(ptr).Item2; - } - - // Handle the case where no objects are to be written - if (minObjAddress == long.MaxValue && maxObjAddress == long.MinValue) - { - minObjAddress = 0; - maxObjAddress = 0; - } - - // Align start pointer for retrieval - minObjAddress &= ~((long)sectorSize - 1); - - // Align max address as well - maxObjAddress = (maxObjAddress + (sectorSize - 1)) & ~((long)sectorSize - 1); + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly long GetFirstValidLogicalAddress(long page) => _this.GetFirstValidLogicalAddress(page); - startptr = minObjAddress; - size = maxObjAddress - minObjAddress; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly long GetPhysicalAddress(long logicalAddress) => _this.GetPhysicalAddress(logicalAddress); - /// - /// Retrieve objects from object log - /// - /// - /// - /// - protected override bool RetrievedFullRecord(byte* record, ref AsyncIOContext ctx) - { - if (!KeyHasObjects()) - { - ctx.key = Unsafe.AsRef>(record).key; - } - if (!ValueHasObjects()) - { - ctx.value = Unsafe.AsRef>(record).value; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly ref RecordInfo GetInfo(long physicalAddress) => ref _this.GetInfo(physicalAddress); - if (!(KeyHasObjects() || ValueHasObjects())) - return true; + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly unsafe ref RecordInfo GetInfoFromBytePointer(byte* ptr) => ref _this.GetInfoFromBytePointer(ptr); - if (ctx.objBuffer == null) - { - // Issue IO for objects - long startAddress = -1; - long endAddress = -1; - if (KeyHasObjects()) - { - var x = GetKeyAddressInfo((long)record); - startAddress = x->Address; - endAddress = x->Address + x->Size; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly ref Key GetKey(long physicalAddress) => ref _this.GetKey(physicalAddress); - if (ValueHasObjects() && !GetInfoFromBytePointer(record).Tombstone) - { - var x = GetValueAddressInfo((long)record); - if (startAddress == -1) - startAddress = x->Address; - endAddress = x->Address + x->Size; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly ref Value GetValue(long physicalAddress) => ref _this.GetValue(physicalAddress); - // We are limited to a 2GB size per key-value - if (endAddress - startAddress > int.MaxValue) - throw new TsavoriteException("Size of key-value exceeds max of 2GB: " + (endAddress - startAddress)); + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly ref Value GetAndInitializeValue(long physicalAddress, long endPhysicalAddress) => ref GetValue(physicalAddress); - if (startAddress < 0) - startAddress = 0; + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly (int actualSize, int allocatedSize) GetRecordSize(long physicalAddress) => _this.GetRecordSize(physicalAddress); - AsyncGetFromDisk(startAddress, (int)(endAddress - startAddress), ctx, ctx.record); - return false; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly (int actualSize, int allocatedSize, int keySize) GetRMWCopyDestinationRecordSize(ref Key key, ref Input input, ref Value value, ref RecordInfo recordInfo, TVariableLengthInput varlenInput) + where TVariableLengthInput : IVariableLengthInput + => _this.GetRMWCopyDestinationRecordSize(ref key, ref input, ref value, ref recordInfo, varlenInput); - // Parse the key and value objects - MemoryStream ms = new MemoryStream(ctx.objBuffer.buffer); - ms.Seek(ctx.objBuffer.offset + ctx.objBuffer.valid_offset, SeekOrigin.Begin); + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly int GetRequiredRecordSize(long physicalAddress, int availableBytes) => GetAverageRecordSize(); - if (KeyHasObjects()) - { - var keySerializer = SerializerSettings.keySerializer(); - keySerializer.BeginDeserialize(ms); - keySerializer.Deserialize(out ctx.key); - keySerializer.EndDeserialize(); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly int GetAverageRecordSize() => _this.GetAverageRecordSize(); - if (ValueHasObjects() && !GetInfoFromBytePointer(record).Tombstone) - { - var valueSerializer = SerializerSettings.valueSerializer(); - valueSerializer.BeginDeserialize(ms); - valueSerializer.Deserialize(out ctx.value); - valueSerializer.EndDeserialize(); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly int GetFixedRecordSize() => _this.GetFixedRecordSize(); - ctx.objBuffer.Return(); - return true; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly (int actualSize, int allocatedSize, int keySize) GetRMWInitialRecordSize(ref Key key, ref Input input, TSessionFunctionsWrapper sessionFunctions) + where TSessionFunctionsWrapper : IVariableLengthInput + => _this.GetRMWInitialRecordSize(ref key, ref input, sessionFunctions); - /// - /// Whether KVS has keys to serialize/deserialize - /// - /// - public override bool KeyHasObjects() - { - return SerializerSettings.keySerializer != null; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly (int actualSize, int allocatedSize, int keySize) GetRecordSize(ref Key key, ref Value value) => _this.GetRecordSize(ref key, ref value); - /// - /// Whether KVS has values to serialize/deserialize - /// - /// - public override bool ValueHasObjects() - { - return SerializerSettings.valueSerializer != null; - } - #endregion + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly int GetValueLength(ref Value value) => _this.GetValueLength(ref value); - public override IHeapContainer GetKeyContainer(ref Key key) => new StandardHeapContainer(ref key); - public override IHeapContainer GetValueContainer(ref Value value) => new StandardHeapContainer(ref value); + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly unsafe bool RetrievedFullRecord(byte* record, ref AsyncIOContext ctx) => _this.RetrievedFullRecord(record, ref ctx); - public override long[] GetSegmentOffsets() - { - return segmentOffsets; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void AllocatePage(int pageIndex) => _this.AllocatePage(pageIndex); - internal override void PopulatePage(byte* src, int required_bytes, long destinationPage) - { - PopulatePage(src, required_bytes, ref values[destinationPage % BufferSize]); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly bool IsAllocated(int pageIndex) => _this.IsAllocated(pageIndex); - internal void PopulatePageFrame(byte* src, int required_bytes, Record[] frame) - { - PopulatePage(src, required_bytes, ref frame); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly unsafe void PopulatePage(byte* src, int required_bytes, long destinationPageIndex) => _this.PopulatePage(src, required_bytes, destinationPageIndex); - internal void PopulatePage(byte* src, int required_bytes, ref Record[] destinationPage) - { - fixed (RecordInfo* pin = &destinationPage[0].info) - { - Debug.Assert(required_bytes <= RecordSize * destinationPage.Length); + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void MarkPage(long logicalAddress, long version) => _this.MarkPage(logicalAddress, version); - Buffer.MemoryCopy(src, Unsafe.AsPointer(ref destinationPage[0]), required_bytes, required_bytes); - } - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void MarkPageAtomic(long logicalAddress, long version) => _this.MarkPageAtomic(logicalAddress, version); - /// - /// Iterator interface for scanning Tsavorite log - /// - /// - public override ITsavoriteScanIterator Scan(TsavoriteKV store, long beginAddress, long endAddress, ScanBufferingMode scanBufferingMode, bool includeSealedRecords) - => new GenericScanIterator(store, this, beginAddress, endAddress, scanBufferingMode, includeSealedRecords, epoch); + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void ClearPage(long page, int offset = 0) => _this.ClearPage(page, offset); - /// - /// Implementation for push-scanning Tsavorite log, called from LogAccessor - /// - internal override bool Scan(TsavoriteKV store, long beginAddress, long endAddress, ref TScanFunctions scanFunctions, ScanBufferingMode scanBufferingMode) - { - using GenericScanIterator iter = new(store, this, beginAddress, endAddress, scanBufferingMode, false, epoch, logger: logger); - return PushScanImpl(beginAddress, endAddress, ref scanFunctions, iter); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void FreePage(long pageIndex) => _this.FreePage(pageIndex); - /// - /// Implementation for push-scanning Tsavorite log with a cursor, called from LogAccessor - /// - internal override bool ScanCursor(TsavoriteKV store, ScanCursorState scanCursorState, ref long cursor, long count, TScanFunctions scanFunctions, long endAddress, bool validateCursor) - { - using GenericScanIterator iter = new(store, this, cursor, endAddress, ScanBufferingMode.SinglePageBuffering, false, epoch, logger: logger); - return ScanLookup>(store, scanCursorState, ref cursor, count, scanFunctions, iter, validateCursor); - } + /// + public readonly ref Key GetContextRecordKey(ref AsyncIOContext ctx) => ref ctx.key; - /// - /// Implementation for push-iterating key versions, called from LogAccessor - /// - internal override bool IterateKeyVersions(TsavoriteKV store, ref Key key, long beginAddress, ref TScanFunctions scanFunctions) - { - using GenericScanIterator iter = new(store, this, store.comparer, beginAddress, epoch, logger: logger); - return IterateKeyVersionsImpl(store, ref key, beginAddress, ref scanFunctions, iter); - } + /// + public readonly ref Value GetContextRecordValue(ref AsyncIOContext ctx) => ref ctx.value; - private void ComputeScanBoundaries(long beginAddress, long endAddress, out long pageStartAddress, out int start, out int end) - { - pageStartAddress = beginAddress & ~PageSizeMask; - start = (int)(beginAddress & PageSizeMask) / RecordSize; - var count = (int)(endAddress - beginAddress) / RecordSize; - end = start + count; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly IHeapContainer GetKeyContainer(ref Key key) => new StandardHeapContainer(ref key); - /// - public override void EvictPage(long page) - { - if (OnEvictionObserver is not null) - { - var beginAddress = page << LogPageSizeBits; - var endAddress = (page + 1) << LogPageSizeBits; - ComputeScanBoundaries(beginAddress, endAddress, out var pageStartAddress, out var start, out var end); - using var iter = new MemoryPageScanIterator(values[(int)(page % BufferSize)], start, end, pageStartAddress, RecordSize); - OnEvictionObserver?.OnNext(iter); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly IHeapContainer GetValueContainer(ref Value value) => new StandardHeapContainer(ref value); - FreePage(page); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly long[] GetSegmentOffsets() => _this.GetSegmentOffsets(); - /// - internal override void MemoryPageScan(long beginAddress, long endAddress, IObserver> observer) - { - var page = (beginAddress >> LogPageSizeBits) % BufferSize; - ComputeScanBoundaries(beginAddress, endAddress, out var pageStartAddress, out var start, out var end); - using var iter = new MemoryPageScanIterator(values[page], start, end, pageStartAddress, RecordSize); - Debug.Assert(epoch.ThisInstanceProtected()); - try - { - epoch.Suspend(); - observer?.OnNext(iter); - } - finally - { - epoch.Resume(); - } - } + /// + public readonly int OverflowPageCount => _this.OverflowPageCount; - internal override void AsyncFlushDeltaToDevice(long startAddress, long endAddress, long prevEndAddress, long version, DeltaLog deltaLog, out SemaphoreSlim completedSemaphore, int throttleCheckpointFlushDelayMs) - { - throw new TsavoriteException("Incremental snapshots not supported with generic allocator"); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void SerializeKey(ref Key key, long physicalAddress) => _this.SerializeKey(ref key, physicalAddress); } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/GenericAllocatorImpl.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/GenericAllocatorImpl.cs new file mode 100644 index 0000000000..55a4ef7a6b --- /dev/null +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/GenericAllocatorImpl.cs @@ -0,0 +1,1075 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.IO; +using System.Runtime.CompilerServices; +using System.Threading; +using Microsoft.Extensions.Logging; + +namespace Tsavorite.core +{ + internal sealed unsafe class GenericAllocatorImpl : AllocatorBase> + where TStoreFunctions : IStoreFunctions + { + // Circular buffer definition + internal AllocatorRecord[][] values; + + // Object log related variables + private readonly IDevice objectLogDevice; + // Size of object chunks being written to storage + private readonly int ObjectBlockSize = 100 * (1 << 20); + // Tail offsets per segment, in object log + public readonly long[] segmentOffsets; + + // Record sizes. We do not support variable-length keys in GenericAllocator + internal static int KeySize => Unsafe.SizeOf(); + internal static int ValueSize => Unsafe.SizeOf(); + internal static int RecordSize => Unsafe.SizeOf>(); + + private readonly OverflowPool[]> overflowPagePool; + + public GenericAllocatorImpl(AllocatorSettings settings, TStoreFunctions storeFunctions, Func> wrapperCreator) + : base(settings.LogSettings, storeFunctions, wrapperCreator, settings.evictCallback, settings.epoch, settings.flushCallback, settings.logger) + { + overflowPagePool = new OverflowPool[]>(4); + + if (settings.LogSettings.ObjectLogDevice == null) + throw new TsavoriteException("LogSettings.ObjectLogDevice needs to be specified (e.g., use Devices.CreateLogDevice, AzureStorageDevice, or NullDevice)"); + + if (typeof(Key) == typeof(SpanByte)) + throw new TsavoriteException("SpanByte Keys cannot be mixed with object Values"); + if (typeof(Value) == typeof(SpanByte)) + throw new TsavoriteException("SpanByte Values cannot be mixed with object Keys"); + + values = new AllocatorRecord[BufferSize][]; + segmentOffsets = new long[SegmentBufferSize]; + + objectLogDevice = settings.LogSettings.ObjectLogDevice; + + if ((settings.LogSettings.LogDevice as NullDevice) == null && (KeyHasObjects() || ValueHasObjects())) + { + if (objectLogDevice == null) + throw new TsavoriteException("Objects in key/value, but object log not provided during creation of Tsavorite instance"); + if (objectLogDevice.SegmentSize != -1) + throw new TsavoriteException("Object log device should not have fixed segment size. Set preallocateFile to false when calling CreateLogDevice for object log"); + } + } + + internal int OverflowPageCount => overflowPagePool.Count; + + public override void Reset() + { + base.Reset(); + objectLogDevice.Reset(); + for (int index = 0; index < BufferSize; index++) + { + if (IsAllocated(index)) + FreePage(index); + } + Array.Clear(segmentOffsets, 0, segmentOffsets.Length); + Initialize(); + } + + void ReturnPage(int index) + { + Debug.Assert(index < BufferSize); + if (values[index] != default) + { + _ = overflowPagePool.TryAdd(values[index]); + values[index] = default; + _ = Interlocked.Decrement(ref AllocatedPageCount); + } + } + + public override void Initialize() => Initialize(RecordSize); + + /// Get start logical address + internal long GetStartLogicalAddress(long page) => page << LogPageSizeBits; + + /// Get first valid logical address + internal long GetFirstValidLogicalAddress(long page) + { + if (page == 0) + return (page << LogPageSizeBits) + RecordSize; + return page << LogPageSizeBits; + } + + internal ref RecordInfo GetInfo(long physicalAddress) + { + // Offset within page + int offset = (int)(physicalAddress & PageSizeMask); + + // Index of page within the circular buffer + int pageIndex = (int)((physicalAddress >> LogPageSizeBits) & BufferSizeMask); + + return ref values[pageIndex][offset / RecordSize].info; + } + + internal ref RecordInfo GetInfoFromBytePointer(byte* ptr) => ref Unsafe.AsRef>(ptr).info; + + internal ref Key GetKey(long physicalAddress) + { + // Offset within page + var offset = (int)(physicalAddress & PageSizeMask); + + // Index of page within the circular buffer + var pageIndex = (int)((physicalAddress >> LogPageSizeBits) & BufferSizeMask); + + return ref values[pageIndex][offset / RecordSize].key; + } + + internal ref Value GetValue(long physicalAddress) + { + // Offset within page + var offset = (int)(physicalAddress & PageSizeMask); + + // Index of page within the circular buffer + var pageIndex = (int)((physicalAddress >> LogPageSizeBits) & BufferSizeMask); + + return ref values[pageIndex][offset / RecordSize].value; + } + + internal (int actualSize, int allocatedSize) GetRecordSize(long physicalAddress) => (RecordSize, RecordSize); + + public int GetValueLength(ref Value value) => ValueSize; + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal void SerializeKey(ref Key src, long physicalAddress) => GetKey(physicalAddress) = src; + + internal (int actualSize, int allocatedSize, int keySize) GetRMWCopyDestinationRecordSize(ref Key key, ref Input input, ref Value value, ref RecordInfo recordInfo, TVariableLengthInput varlenInput) + => (RecordSize, RecordSize, KeySize); + + internal int GetAverageRecordSize() => RecordSize; + + internal int GetFixedRecordSize() => RecordSize; + + internal (int actualSize, int allocatedSize, int keySize) GetRMWInitialRecordSize(ref Key key, ref Input input, TSessionFunctionsWrapper sessionFunctions) + => (RecordSize, RecordSize, KeySize); + + internal (int actualSize, int allocatedSize, int keySize) GetRecordSize(ref Key key, ref Value value) => (RecordSize, RecordSize, KeySize); + + internal override bool TryComplete() + { + var b1 = objectLogDevice.TryComplete(); + var b2 = base.TryComplete(); + return b1 || b2; + } + + /// + /// Dispose memory allocator + /// + public override void Dispose() + { + if (values != null) + { + for (int i = 0; i < values.Length; i++) + values[i] = null; + values = null; + } + overflowPagePool.Dispose(); + base.Dispose(); + } + + /// Delete in-memory portion of the log + internal override void DeleteFromMemory() + { + for (int i = 0; i < values.Length; i++) + values[i] = null; + values = null; + } + + internal AddressInfo* GetKeyAddressInfo(long physicalAddress) + => (AddressInfo*)Unsafe.AsPointer(ref Unsafe.AsRef>((byte*)physicalAddress).key); + + internal AddressInfo* GetValueAddressInfo(long physicalAddress) + => (AddressInfo*)Unsafe.AsPointer(ref Unsafe.AsRef>((byte*)physicalAddress).value); + + /// Allocate memory page, pinned in memory, and in sector aligned form, if possible + internal void AllocatePage(int index) => values[index] = AllocatePage(); + + internal AllocatorRecord[] AllocatePage() + { + IncrementAllocatedPageCount(); + + if (overflowPagePool.TryGet(out var item)) + return item; + + return new AllocatorRecord[(PageSize + RecordSize - 1) / RecordSize]; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal static long SnapToLogicalAddressBoundary(ref long logicalAddress) + => logicalAddress = ((logicalAddress - Constants.kFirstValidAddress) / RecordSize) * RecordSize + Constants.kFirstValidAddress; + + public long GetPhysicalAddress(long logicalAddress) => logicalAddress; + + internal bool IsAllocated(int pageIndex) => values[pageIndex] != null; + + protected override void TruncateUntilAddress(long toAddress) + { + base.TruncateUntilAddress(toAddress); + objectLogDevice.TruncateUntilSegment((int)(toAddress >> LogSegmentSizeBits)); + } + + protected override void TruncateUntilAddressBlocking(long toAddress) + { + base.TruncateUntilAddressBlocking(toAddress); + objectLogDevice.TruncateUntilSegment((int)(toAddress >> LogSegmentSizeBits)); + } + + protected override void RemoveSegment(int segment) + { + base.RemoveSegment(segment); + objectLogDevice.RemoveSegment(segment); + } + + protected override void WriteAsync(long flushPage, DeviceIOCompletionCallback callback, PageAsyncFlushResult asyncResult) + { + WriteAsync(flushPage, + (ulong)(AlignedPageSizeBytes * flushPage), + (uint)PageSize, + callback, + asyncResult, device, objectLogDevice); + } + + protected override void WriteAsyncToDevice + (long startPage, long flushPage, int pageSize, DeviceIOCompletionCallback callback, + PageAsyncFlushResult asyncResult, IDevice device, IDevice objectLogDevice, long[] localSegmentOffsets, long fuzzyStartLogicalAddress) + { + VerifyCompatibleSectorSize(device); + VerifyCompatibleSectorSize(objectLogDevice); + + var epochTaken = false; + if (!epoch.ThisInstanceProtected()) + { + epochTaken = true; + epoch.Resume(); + } + try + { + if (HeadAddress >= (flushPage << LogPageSizeBits) + pageSize) + { + // Requested page is unavailable in memory, ignore + callback(0, 0, asyncResult); + } + else + { + // We are writing to separate device, so use fresh segment offsets + WriteAsync(flushPage, + (ulong)(AlignedPageSizeBytes * (flushPage - startPage)), + (uint)pageSize, callback, asyncResult, + device, objectLogDevice, flushPage, localSegmentOffsets, fuzzyStartLogicalAddress); + } + } + finally + { + if (epochTaken) + epoch.Suspend(); + } + } + + internal void ClearPage(long page, int offset) + => Array.Clear(values[page % BufferSize], offset / RecordSize, values[page % BufferSize].Length - offset / RecordSize); + + internal void FreePage(long page) + { + ClearPage(page, 0); + + // Close segments + var thisCloseSegment = page >> (LogSegmentSizeBits - LogPageSizeBits); + var nextCloseSegment = (page + 1) >> (LogSegmentSizeBits - LogPageSizeBits); + + if (thisCloseSegment != nextCloseSegment) + { + // We are clearing the last page in current segment + segmentOffsets[thisCloseSegment % SegmentBufferSize] = 0; + } + + // If all pages are being used (i.e. EmptyPageCount == 0), nothing to re-utilize by adding + // to overflow pool. + if (EmptyPageCount > 0) + ReturnPage((int)(page % BufferSize)); + } + + private void WriteAsync(long flushPage, ulong alignedDestinationAddress, uint numBytesToWrite, + DeviceIOCompletionCallback callback, PageAsyncFlushResult asyncResult, + IDevice device, IDevice objlogDevice, long intendedDestinationPage = -1, long[] localSegmentOffsets = null, long fuzzyStartLogicalAddress = long.MaxValue) + { + // Short circuit if we are using a null device + if ((device as NullDevice) != null) + { + device.WriteAsync(IntPtr.Zero, 0, 0, numBytesToWrite, callback, asyncResult); + return; + } + + int start = 0, aligned_start = 0, end = (int)numBytesToWrite; + if (asyncResult.partial) + { + // We're writing only a subset of the page + start = (int)(asyncResult.fromAddress - (asyncResult.page << LogPageSizeBits)); + aligned_start = (start / sectorSize) * sectorSize; + end = (int)(asyncResult.untilAddress - (asyncResult.page << LogPageSizeBits)); + } + + // Check if user did not override with special segment offsets + localSegmentOffsets ??= segmentOffsets; + + // This is the in-memory buffer page to be written + var src = values[flushPage % BufferSize]; + + // We create a shadow copy of the page if we are under epoch protection. + // This copy ensures that object references are kept valid even if the original page is reclaimed. + // We suspend epoch during the actual flush as that can take a long time. + var epochProtected = false; + if (epoch.ThisInstanceProtected()) + { + epochProtected = true; + src = new AllocatorRecord[values[flushPage % BufferSize].Length]; + Array.Copy(values[flushPage % BufferSize], src, values[flushPage % BufferSize].Length); + epoch.Suspend(); + } + try + { + // Temporary storage to hold the image "template" we'll write to disk: It will have RecordInfos and object pointers that will be overwritten by addresses + // when writing to the main log (both object pointers and addresses are 8 bytes). + var buffer = bufferPool.Get((int)numBytesToWrite); + + if (aligned_start < start && (KeyHasObjects() || ValueHasObjects())) + { + // Do not read back the invalid header of page 0 + if ((flushPage > 0) || (start > GetFirstValidLogicalAddress(flushPage))) + { + // Get the overlapping HLOG from disk as we wrote it with object pointers previously. This avoids object reserialization + PageAsyncReadResult result = new() + { + handle = new CountdownEvent(1) + }; + device.ReadAsync(alignedDestinationAddress + (ulong)aligned_start, (IntPtr)buffer.aligned_pointer + aligned_start, + (uint)sectorSize, AsyncReadPageCallback, result); + result.handle.Wait(); + } + fixed (RecordInfo* pin = &src[0].info) + { + // Write all the RecordInfos on one operation. This also includes object pointers, but for valid records we will overwrite those below. + Debug.Assert(buffer.aligned_pointer + numBytesToWrite <= (byte*)Unsafe.AsPointer(ref buffer.buffer[0]) + buffer.buffer.Length); + + Buffer.MemoryCopy((void*)((long)Unsafe.AsPointer(ref src[0]) + start), buffer.aligned_pointer + start, + numBytesToWrite - start, numBytesToWrite - start); + } + } + else + { + fixed (RecordInfo* pin = &src[0].info) + { + // Write all the RecordInfos on one operation. This also includes object pointers, but for valid records we will overwrite those below. + Debug.Assert(buffer.aligned_pointer + numBytesToWrite <= (byte*)Unsafe.AsPointer(ref buffer.buffer[0]) + buffer.buffer.Length); + + Buffer.MemoryCopy((void*)((long)Unsafe.AsPointer(ref src[0]) + aligned_start), buffer.aligned_pointer + aligned_start, + numBytesToWrite - aligned_start, numBytesToWrite - aligned_start); + } + } + + // In the main log, we write addresses to pages in the object log. This array saves the addresses of the key and/or value fields in 'buffer', + // which again is the image we're building from the 'values' "page" for this write. The "addresses into 'buffer'" are cast below to AddressInfo + // structures and stored in the sequence we'll write them: alternating series of key then value if both are object types, else keys or values only. + var addr = new List(); + asyncResult.freeBuffer1 = buffer; + + // Object keys and values are serialized into this MemoryStream. + MemoryStream ms = new(); + var keySerializer = KeyHasObjects() ? _storeFunctions.BeginSerializeKey(ms) : null; + var valueSerializer = ValueHasObjects() ? _storeFunctions.BeginSerializeValue(ms) : null; + + // Track the size to be written to the object log. + long endPosition = 0; + + for (int i = start / RecordSize; i < end / RecordSize; i++) + { + byte* recordPtr = buffer.aligned_pointer + i * RecordSize; + + // Retrieve reference to record struct + ref var record = ref Unsafe.AsRef>(recordPtr); + AddressInfo* key_address = null, value_address = null; + + // Zero out object reference addresses (AddressInfo) in the planned disk image + if (KeyHasObjects()) + { + key_address = GetKeyAddressInfo((long)recordPtr); + *key_address = default; + } + if (ValueHasObjects()) + { + value_address = GetValueAddressInfo((long)recordPtr); + *value_address = default; + } + + // Now fill in AddressInfo data for the valid records + if (!record.info.Invalid) + { + // Calculate the logical address of the 'values' page currently being written. + var address = (flushPage << LogPageSizeBits) + i * RecordSize; + + // Do not write v+1 records (e.g. during a checkpoint) + if (address < fuzzyStartLogicalAddress || !record.info.IsInNewVersion) + { + if (KeyHasObjects()) + { + long pos = ms.Position; + keySerializer.Serialize(ref src[i].key); + + // Store the key address into the 'buffer' AddressInfo image as an offset into 'ms'. + key_address->Address = pos; + key_address->Size = (int)(ms.Position - pos); + addr.Add((long)key_address); + endPosition = pos + key_address->Size; + } + + if (ValueHasObjects() && !record.info.Tombstone) + { + long pos = ms.Position; + valueSerializer.Serialize(ref src[i].value); + + // Store the value address into the 'buffer' AddressInfo image as an offset into 'ms'. + value_address->Address = pos; + value_address->Size = (int)(ms.Position - pos); + addr.Add((long)value_address); + endPosition = pos + value_address->Size; + } + } + else + { + // Mark v+1 records as invalid to avoid deserializing them on recovery + record.info.SetInvalid(); + } + } + + // If this record's serialized size surpassed ObjectBlockSize or it's the last record to be written, write to the object log. + if (endPosition > ObjectBlockSize || i == (end / RecordSize) - 1) + { + var memoryStreamActualLength = ms.Position; + var memoryStreamTotalLength = (int)endPosition; + endPosition = 0; + + if (KeyHasObjects()) + keySerializer.EndSerialize(); + if (ValueHasObjects()) + valueSerializer.EndSerialize(); + ms.Close(); + + // Get the total serialized length rounded up to sectorSize + var _alignedLength = (memoryStreamTotalLength + (sectorSize - 1)) & ~(sectorSize - 1); + + // Reserve the current address in the object log segment offsets for this chunk's write operation. + var _objAddr = Interlocked.Add(ref localSegmentOffsets[(long)(alignedDestinationAddress >> LogSegmentSizeBits) % SegmentBufferSize], _alignedLength) - _alignedLength; + + // Allocate the object-log buffer to build the image we'll write to disk, then copy to it from the memory stream. + SectorAlignedMemory _objBuffer = null; + if (memoryStreamTotalLength > 0) + { + _objBuffer = bufferPool.Get(memoryStreamTotalLength); + + fixed (void* src_ = ms.GetBuffer()) + Buffer.MemoryCopy(src_, _objBuffer.aligned_pointer, memoryStreamTotalLength, memoryStreamActualLength); + } + + // Each address we calculated above is now an offset to objAddr; convert to the actual address. + foreach (var address in addr) + ((AddressInfo*)address)->Address += _objAddr; + + // If we have not written all records, prepare for the next chunk of records to be written. + if (i < (end / RecordSize) - 1) + { + // Create a new MemoryStream for the next chunk of records to be written. + ms = new MemoryStream(); + if (KeyHasObjects()) + keySerializer.BeginSerialize(ms); + if (ValueHasObjects()) + valueSerializer.BeginSerialize(ms); + + // Reset address list for the next chunk of records to be written. + addr = new List(); + + // Write this chunk of records to the object log device. + asyncResult.done = new AutoResetEvent(false); + Debug.Assert(memoryStreamTotalLength > 0); + objlogDevice.WriteAsync( + (IntPtr)_objBuffer.aligned_pointer, + (int)(alignedDestinationAddress >> LogSegmentSizeBits), + (ulong)_objAddr, (uint)_alignedLength, AsyncFlushPartialObjectLogCallback, asyncResult); + + // Wait for write to complete before resuming next write + _ = asyncResult.done.WaitOne(); + _objBuffer.Return(); + } + else + { + // We have written all records in this 'values' "page". + if (memoryStreamTotalLength > 0) + { + // Increment the count because we need to write both page and object cache. + _ = Interlocked.Increment(ref asyncResult.count); + + asyncResult.freeBuffer2 = _objBuffer; + objlogDevice.WriteAsync( + (IntPtr)_objBuffer.aligned_pointer, + (int)(alignedDestinationAddress >> LogSegmentSizeBits), + (ulong)_objAddr, (uint)_alignedLength, callback, asyncResult); + } + } + } + } + + if (asyncResult.partial) + { + // We're writing only a subset of the page, so update our count of bytes to write. + var aligned_end = (int)(asyncResult.untilAddress - (asyncResult.page << LogPageSizeBits)); + aligned_end = (aligned_end + (sectorSize - 1)) & ~(sectorSize - 1); + numBytesToWrite = (uint)(aligned_end - aligned_start); + } + + // Round up the number of byte to write to sector alignment. + var alignedNumBytesToWrite = (uint)((numBytesToWrite + (sectorSize - 1)) & ~(sectorSize - 1)); + + // Finally write the hlog page + device.WriteAsync((IntPtr)buffer.aligned_pointer + aligned_start, alignedDestinationAddress + (ulong)aligned_start, + alignedNumBytesToWrite, callback, asyncResult); + } + finally + { + if (epochProtected) + epoch.Resume(); + } + } + + private void AsyncReadPageCallback(uint errorCode, uint numBytes, object context) + { + if (errorCode != 0) + logger?.LogError($"AsyncReadPageCallback error: {errorCode}"); + + // Set the page status to flushed + var result = (PageAsyncReadResult)context; + _ = result.handle.Signal(); + } + + protected override void ReadAsync( + ulong alignedSourceAddress, int destinationPageIndex, uint aligned_read_length, + DeviceIOCompletionCallback callback, PageAsyncReadResult asyncResult, IDevice device, IDevice objlogDevice) + { + asyncResult.freeBuffer1 = bufferPool.Get((int)aligned_read_length); + asyncResult.freeBuffer1.required_bytes = (int)aligned_read_length; + + if (!(KeyHasObjects() || ValueHasObjects())) + { + device.ReadAsync(alignedSourceAddress, (IntPtr)asyncResult.freeBuffer1.aligned_pointer, + aligned_read_length, callback, asyncResult); + return; + } + + asyncResult.callback = callback; + + if (objlogDevice == null) + { + Debug.Assert(objectLogDevice != null); + objlogDevice = objectLogDevice; + } + asyncResult.objlogDevice = objlogDevice; + + device.ReadAsync(alignedSourceAddress, (IntPtr)asyncResult.freeBuffer1.aligned_pointer, + aligned_read_length, AsyncReadPageWithObjectsCallback, asyncResult); + } + + + /// + /// IOCompletion callback for page flush + /// + /// + /// + /// + private void AsyncFlushPartialObjectLogCallback(uint errorCode, uint numBytes, object context) + { + if (errorCode != 0) + logger?.LogError($"AsyncFlushPartialObjectLogCallback error: {errorCode}"); + + // Set the page status to flushed + var result = (PageAsyncFlushResult)context; + _ = result.done.Set(); + } + + private void AsyncReadPageWithObjectsCallback(uint errorCode, uint numBytes, object context) + { + if (errorCode != 0) + logger?.LogError($"AsyncReadPageWithObjectsCallback error: {errorCode}"); + + var result = (PageAsyncReadResult)context; + + AllocatorRecord[] src; + + // We are reading into a frame + if (result.frame != null) + { + var frame = (GenericFrame)result.frame; + src = frame.GetPage(result.page % frame.frameSize); + } + else + src = values[result.page % BufferSize]; + + + // Deserialize all objects until untilptr + if (result.resumePtr < result.untilPtr) + { + MemoryStream ms = new(result.freeBuffer2.buffer); + ms.Seek(result.freeBuffer2.offset, SeekOrigin.Begin); + Deserialize(result.freeBuffer1.GetValidPointer(), result.resumePtr, result.untilPtr, src, ms); + ms.Dispose(); + + result.freeBuffer2.Return(); + result.freeBuffer2 = null; + result.resumePtr = result.untilPtr; + } + + // If we have processed entire page, return + if (result.untilPtr >= result.maxPtr) + { + result.Free(); + + // Call the "real" page read callback + result.callback(errorCode, numBytes, context); + return; + } + + // We will now be able to process all records until (but not including) untilPtr + GetObjectInfo(result.freeBuffer1.GetValidPointer(), ref result.untilPtr, result.maxPtr, ObjectBlockSize, out long startptr, out long alignedLength); + + // Object log fragment should be aligned by construction + Debug.Assert(startptr % sectorSize == 0); + Debug.Assert(alignedLength % sectorSize == 0); + + if (alignedLength > int.MaxValue) + throw new TsavoriteException("Unable to read object page, total size greater than 2GB: " + alignedLength); + + var objBuffer = bufferPool.Get((int)alignedLength); + result.freeBuffer2 = objBuffer; + + // Request objects from objlog + result.objlogDevice.ReadAsync( + (int)((result.page - result.offset) >> (LogSegmentSizeBits - LogPageSizeBits)), + (ulong)startptr, + (IntPtr)objBuffer.aligned_pointer, (uint)alignedLength, AsyncReadPageWithObjectsCallback, result); + } + + /// + /// Invoked by users to obtain a record from disk. It uses sector aligned memory to read + /// the record efficiently into memory. + /// + /// + /// + /// + /// + /// + protected override void AsyncReadRecordObjectsToMemory(long fromLogical, int numBytes, DeviceIOCompletionCallback callback, AsyncIOContext context, SectorAlignedMemory result = default) + { + var fileOffset = (ulong)(AlignedPageSizeBytes * (fromLogical >> LogPageSizeBits) + (fromLogical & PageSizeMask)); + var alignedFileOffset = (ulong)(((long)fileOffset / sectorSize) * sectorSize); + + var alignedReadLength = (uint)((long)fileOffset + numBytes - (long)alignedFileOffset); + alignedReadLength = (uint)((alignedReadLength + (sectorSize - 1)) & ~(sectorSize - 1)); + + var record = bufferPool.Get((int)alignedReadLength); + record.valid_offset = (int)(fileOffset - alignedFileOffset); + record.available_bytes = (int)(alignedReadLength - (fileOffset - alignedFileOffset)); + record.required_bytes = numBytes; + + var asyncResult = default(AsyncGetFromDiskResult>); + asyncResult.context = context; + asyncResult.context.record = result; + asyncResult.context.objBuffer = record; + objectLogDevice.ReadAsync( + (int)(context.logicalAddress >> LogSegmentSizeBits), + alignedFileOffset, + (IntPtr)asyncResult.context.objBuffer.aligned_pointer, + alignedReadLength, + callback, + asyncResult); + } + + /// + /// Read pages from specified device + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + internal void AsyncReadPagesFromDeviceToFrame( + long readPageStart, + int numPages, + long untilAddress, + DeviceIOCompletionCallback callback, + TContext context, + GenericFrame frame, + out CountdownEvent completed, + long devicePageOffset = 0, + IDevice device = null, IDevice objectLogDevice = null) + { + var usedDevice = device ?? this.device; + IDevice usedObjlogDevice = objectLogDevice; + + completed = new CountdownEvent(numPages); + for (long readPage = readPageStart; readPage < (readPageStart + numPages); readPage++) + { + int pageIndex = (int)(readPage % frame.frameSize); + if (frame.GetPage(pageIndex) == null) + frame.Allocate(pageIndex); + else + frame.Clear(pageIndex); + + var asyncResult = new PageAsyncReadResult() + { + page = readPage, + context = context, + handle = completed, + maxPtr = PageSize, + frame = frame, + }; + + var offsetInFile = (ulong)(AlignedPageSizeBytes * readPage); + var readLength = (uint)AlignedPageSizeBytes; + long adjustedUntilAddress = (AlignedPageSizeBytes * (untilAddress >> LogPageSizeBits) + (untilAddress & PageSizeMask)); + + if (adjustedUntilAddress > 0 && ((adjustedUntilAddress - (long)offsetInFile) < PageSize)) + { + readLength = (uint)(adjustedUntilAddress - (long)offsetInFile); + asyncResult.maxPtr = readLength; + readLength = (uint)((readLength + (sectorSize - 1)) & ~(sectorSize - 1)); + } + + if (device != null) + offsetInFile = (ulong)(AlignedPageSizeBytes * (readPage - devicePageOffset)); + + ReadAsync(offsetInFile, pageIndex, readLength, callback, asyncResult, usedDevice, usedObjlogDevice); + } + } + + + #region Page handlers for objects + /// + /// Deseialize part of page from stream + /// + /// + /// From pointer + /// Until pointer + /// + /// Stream + public void Deserialize(byte* raw, long ptr, long untilptr, AllocatorRecord[] src, Stream stream) + { + long streamStartPos = stream.Position; + long start_addr = -1; + int start_offset = -1, end_offset = -1; + + var keySerializer = KeyHasObjects() ? _storeFunctions.BeginDeserializeKey(stream) : null; + var valueSerializer = ValueHasObjects() ? _storeFunctions.BeginDeserializeValue(stream) : null; + + while (ptr < untilptr) + { + ref var record = ref Unsafe.AsRef>(raw + ptr); + src[ptr / RecordSize].info = record.info; + if (start_offset == -1) + start_offset = (int)(ptr / RecordSize); + + end_offset = (int)(ptr / RecordSize) + 1; + + if (!record.info.Invalid) + { + if (KeyHasObjects()) + { + var key_addr = GetKeyAddressInfo((long)raw + ptr); + if (start_addr == -1) start_addr = key_addr->Address & ~((long)sectorSize - 1); + if (stream.Position != streamStartPos + key_addr->Address - start_addr) + _ = stream.Seek(streamStartPos + key_addr->Address - start_addr, SeekOrigin.Begin); + + keySerializer.Deserialize(out src[ptr / RecordSize].key); + } + else + src[ptr / RecordSize].key = record.key; + + if (!record.info.Tombstone) + { + if (ValueHasObjects()) + { + var value_addr = GetValueAddressInfo((long)raw + ptr); + if (start_addr == -1) start_addr = value_addr->Address & ~((long)sectorSize - 1); + if (stream.Position != streamStartPos + value_addr->Address - start_addr) + stream.Seek(streamStartPos + value_addr->Address - start_addr, SeekOrigin.Begin); + + valueSerializer.Deserialize(out src[ptr / RecordSize].value); + } + else + src[ptr / RecordSize].value = record.value; + } + } + ptr += GetRecordSize(ptr).Item2; + } + if (KeyHasObjects()) + keySerializer.EndDeserialize(); + if (ValueHasObjects()) + valueSerializer.EndDeserialize(); + + if (OnDeserializationObserver != null && start_offset != -1 && end_offset != -1) + { + using var iter = new MemoryPageScanIterator(src, start_offset, end_offset, -1, RecordSize); + OnDeserializationObserver.OnNext(iter); + } + } + + /// + /// Get location and range of object log addresses for specified log page + /// + /// + /// + /// + /// + /// + /// + public void GetObjectInfo(byte* raw, ref long ptr, long untilptr, int objectBlockSize, out long startptr, out long size) + { + var minObjAddress = long.MaxValue; + var maxObjAddress = long.MinValue; + var done = false; + + while (!done && (ptr < untilptr)) + { + ref var record = ref Unsafe.AsRef>(raw + ptr); + + if (!record.info.Invalid) + { + if (KeyHasObjects()) + { + var key_addr = GetKeyAddressInfo((long)raw + ptr); + var addr = key_addr->Address; + + if (addr < minObjAddress) minObjAddress = addr; + addr += key_addr->Size; + if (addr > maxObjAddress) maxObjAddress = addr; + + // If object pointer is greater than kObjectSize from starting object pointer + if (minObjAddress != long.MaxValue && (addr - minObjAddress > objectBlockSize)) + done = true; + } + + + if (ValueHasObjects() && !record.info.Tombstone) + { + var value_addr = GetValueAddressInfo((long)raw + ptr); + var addr = value_addr->Address; + + if (addr < minObjAddress) minObjAddress = addr; + addr += value_addr->Size; + if (addr > maxObjAddress) maxObjAddress = addr; + + // If object pointer is greater than kObjectSize from starting object pointer + if (minObjAddress != long.MaxValue && (addr - minObjAddress > objectBlockSize)) + done = true; + } + } + ptr += GetRecordSize(ptr).allocatedSize; + } + + // Handle the case where no objects are to be written + if (minObjAddress == long.MaxValue && maxObjAddress == long.MinValue) + { + minObjAddress = 0; + maxObjAddress = 0; + } + + // Align start pointer for retrieval + minObjAddress &= ~((long)sectorSize - 1); + + // Align max address as well + maxObjAddress = (maxObjAddress + (sectorSize - 1)) & ~((long)sectorSize - 1); + + startptr = minObjAddress; + size = maxObjAddress - minObjAddress; + } + + /// Retrieve objects from object log + internal bool RetrievedFullRecord(byte* record, ref AsyncIOContext ctx) + { + if (!KeyHasObjects()) + ctx.key = Unsafe.AsRef>(record).key; + if (!ValueHasObjects()) + ctx.value = Unsafe.AsRef>(record).value; + + if (!(KeyHasObjects() || ValueHasObjects())) + return true; + + if (ctx.objBuffer == null) + { + // Issue IO for objects + long startAddress = -1; + long endAddress = -1; + if (KeyHasObjects()) + { + var x = GetKeyAddressInfo((long)record); + startAddress = x->Address; + endAddress = x->Address + x->Size; + } + + if (ValueHasObjects() && !GetInfoFromBytePointer(record).Tombstone) + { + var x = GetValueAddressInfo((long)record); + if (startAddress == -1) + startAddress = x->Address; + endAddress = x->Address + x->Size; + } + + // We are limited to a 2GB size per key-value + if (endAddress - startAddress > int.MaxValue) + throw new TsavoriteException("Size of key-value exceeds max of 2GB: " + (endAddress - startAddress)); + + if (startAddress < 0) + startAddress = 0; + + AsyncGetFromDisk(startAddress, (int)(endAddress - startAddress), ctx, ctx.record); + return false; + } + + // Parse the key and value objects + var ms = new MemoryStream(ctx.objBuffer.buffer); + _ = ms.Seek(ctx.objBuffer.offset + ctx.objBuffer.valid_offset, SeekOrigin.Begin); + + if (KeyHasObjects()) + { + var keySerializer = _storeFunctions.BeginDeserializeKey(ms); + keySerializer.Deserialize(out ctx.key); + keySerializer.EndDeserialize(); + } + + if (ValueHasObjects() && !GetInfoFromBytePointer(record).Tombstone) + { + var valueSerializer = _storeFunctions.BeginDeserializeValue(ms); + valueSerializer.Deserialize(out ctx.value); + valueSerializer.EndDeserialize(); + } + + ctx.objBuffer.Return(); + return true; + } + + /// Whether KVS has keys to serialize/deserialize + internal bool KeyHasObjects() => _storeFunctions.HasKeySerializer; + + /// Whether KVS has values to serialize/deserialize + internal bool ValueHasObjects() => _storeFunctions.HasValueSerializer; + #endregion + + public long[] GetSegmentOffsets() => segmentOffsets; + + internal void PopulatePage(byte* src, int required_bytes, long destinationPage) + => PopulatePage(src, required_bytes, ref values[destinationPage % BufferSize]); + + internal void PopulatePageFrame(byte* src, int required_bytes, AllocatorRecord[] frame) + => PopulatePage(src, required_bytes, ref frame); + + internal void PopulatePage(byte* src, int required_bytes, ref AllocatorRecord[] destinationPage) + { + fixed (RecordInfo* pin = &destinationPage[0].info) + { + Debug.Assert(required_bytes <= RecordSize * destinationPage.Length); + Buffer.MemoryCopy(src, Unsafe.AsPointer(ref destinationPage[0]), required_bytes, required_bytes); + } + } + + /// + /// Iterator interface for scanning Tsavorite log + /// + /// + public override ITsavoriteScanIterator Scan(TsavoriteKV> store, + long beginAddress, long endAddress, ScanBufferingMode scanBufferingMode, bool includeSealedRecords) + => new GenericScanIterator(store, this, beginAddress, endAddress, scanBufferingMode, includeSealedRecords, epoch); + + /// + /// Implementation for push-scanning Tsavorite log, called from LogAccessor + /// + internal override bool Scan(TsavoriteKV> store, + long beginAddress, long endAddress, ref TScanFunctions scanFunctions, ScanBufferingMode scanBufferingMode) + { + using GenericScanIterator iter = new(store, this, beginAddress, endAddress, scanBufferingMode, false, epoch, logger: logger); + return PushScanImpl(beginAddress, endAddress, ref scanFunctions, iter); + } + + /// + /// Implementation for push-scanning Tsavorite log with a cursor, called from LogAccessor + /// + internal override bool ScanCursor(TsavoriteKV> store, + ScanCursorState scanCursorState, ref long cursor, long count, TScanFunctions scanFunctions, long endAddress, bool validateCursor) + { + using GenericScanIterator iter = new(store, this, cursor, endAddress, ScanBufferingMode.SinglePageBuffering, false, epoch, logger: logger); + return ScanLookup>(store, scanCursorState, ref cursor, count, scanFunctions, iter, validateCursor); + } + + /// + /// Implementation for push-iterating key versions, called from LogAccessor + /// + internal override bool IterateKeyVersions(TsavoriteKV> store, + ref Key key, long beginAddress, ref TScanFunctions scanFunctions) + { + using GenericScanIterator iter = new(store, this, beginAddress, epoch, logger: logger); + return IterateKeyVersionsImpl(store, ref key, beginAddress, ref scanFunctions, iter); + } + + private void ComputeScanBoundaries(long beginAddress, long endAddress, out long pageStartAddress, out int start, out int end) + { + pageStartAddress = beginAddress & ~PageSizeMask; + start = (int)(beginAddress & PageSizeMask) / RecordSize; + var count = (int)(endAddress - beginAddress) / RecordSize; + end = start + count; + } + + /// + internal override void EvictPage(long page) + { + if (OnEvictionObserver is not null) + { + var beginAddress = page << LogPageSizeBits; + var endAddress = (page + 1) << LogPageSizeBits; + ComputeScanBoundaries(beginAddress, endAddress, out var pageStartAddress, out var start, out var end); + using var iter = new MemoryPageScanIterator(values[(int)(page % BufferSize)], start, end, pageStartAddress, RecordSize); + OnEvictionObserver?.OnNext(iter); + } + + FreePage(page); + } + + /// + internal override void MemoryPageScan(long beginAddress, long endAddress, IObserver> observer) + { + var page = (beginAddress >> LogPageSizeBits) % BufferSize; + ComputeScanBoundaries(beginAddress, endAddress, out var pageStartAddress, out var start, out var end); + using var iter = new MemoryPageScanIterator(values[page], start, end, pageStartAddress, RecordSize); + Debug.Assert(epoch.ThisInstanceProtected()); + try + { + epoch.Suspend(); + observer?.OnNext(iter); + } + finally + { + epoch.Resume(); + } + } + + internal override void AsyncFlushDeltaToDevice(long startAddress, long endAddress, long prevEndAddress, long version, DeltaLog deltaLog, out SemaphoreSlim completedSemaphore, int throttleCheckpointFlushDelayMs) + { + throw new TsavoriteException("Incremental snapshots not supported with generic allocator"); + } + } +} \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/GenericFrame.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/GenericFrame.cs index 06628c2b88..e6a9d349ae 100644 --- a/libs/storage/Tsavorite/cs/src/core/Allocator/GenericFrame.cs +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/GenericFrame.cs @@ -11,20 +11,20 @@ namespace Tsavorite.core /// internal sealed class GenericFrame : IDisposable { - private readonly Record[][] frame; + private readonly AllocatorRecord[][] frame; public readonly int frameSize, pageSize; - private static int RecordSize => Unsafe.SizeOf>(); + private static int RecordSize => Unsafe.SizeOf>(); public GenericFrame(int frameSize, int pageSize) { this.frameSize = frameSize; this.pageSize = pageSize; - frame = new Record[frameSize][]; + frame = new AllocatorRecord[frameSize][]; } public void Allocate(int index) { - frame[index] = new Record[(pageSize + RecordSize - 1) / RecordSize]; + frame[index] = new AllocatorRecord[(pageSize + RecordSize - 1) / RecordSize]; } public void Clear(int pageIndex) @@ -47,7 +47,7 @@ public ref RecordInfo GetInfo(long frameNumber, long offset) return ref frame[frameNumber][offset].info; } - public ref Record[] GetPage(long frameNumber) + public ref AllocatorRecord[] GetPage(long frameNumber) { return ref frame[frameNumber]; } diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/GenericScanIterator.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/GenericScanIterator.cs index 5899d6435b..a825370da5 100644 --- a/libs/storage/Tsavorite/cs/src/core/Allocator/GenericScanIterator.cs +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/GenericScanIterator.cs @@ -10,11 +10,11 @@ namespace Tsavorite.core /// /// Scan iterator for hybrid log /// - internal sealed class GenericScanIterator : ScanIteratorBase, ITsavoriteScanIterator, IPushScanIterator + internal sealed class GenericScanIterator : ScanIteratorBase, ITsavoriteScanIterator, IPushScanIterator + where TStoreFunctions : IStoreFunctions { - private readonly TsavoriteKV store; - private readonly GenericAllocator hlog; - private readonly ITsavoriteEqualityComparer comparer; + private readonly TsavoriteKV> store; + private readonly GenericAllocatorImpl hlog; private readonly GenericFrame frame; private readonly int recordSize; @@ -26,12 +26,13 @@ internal sealed class GenericScanIterator : ScanIteratorBase, ITsavo /// /// Constructor /// - public GenericScanIterator(TsavoriteKV store, GenericAllocator hlog, long beginAddress, long endAddress, ScanBufferingMode scanBufferingMode, bool includeSealedRecords, LightEpoch epoch, ILogger logger = null) + public GenericScanIterator(TsavoriteKV> store, GenericAllocatorImpl hlog, + long beginAddress, long endAddress, ScanBufferingMode scanBufferingMode, bool includeSealedRecords, LightEpoch epoch, ILogger logger = null) : base(beginAddress == 0 ? hlog.GetFirstValidLogicalAddress(0) : beginAddress, endAddress, scanBufferingMode, includeSealedRecords, epoch, hlog.LogPageSizeBits, logger: logger) { this.store = store; this.hlog = hlog; - recordSize = hlog.GetRecordSize(0).Item2; + recordSize = hlog.GetRecordSize(0).allocatedSize; if (frameSize > 0) frame = new GenericFrame(frameSize, hlog.PageSize); } @@ -39,13 +40,13 @@ public GenericScanIterator(TsavoriteKV store, GenericAllocator /// Constructor for use with tail-to-head push iteration of the passed key's record versions /// - public GenericScanIterator(TsavoriteKV store, GenericAllocator hlog, ITsavoriteEqualityComparer comparer, long beginAddress, LightEpoch epoch, ILogger logger = null) + public GenericScanIterator(TsavoriteKV> store, GenericAllocatorImpl hlog, + long beginAddress, LightEpoch epoch, ILogger logger = null) : base(beginAddress == 0 ? hlog.GetFirstValidLogicalAddress(0) : beginAddress, hlog.GetTailAddress(), ScanBufferingMode.SinglePageBuffering, false, epoch, hlog.LogPageSizeBits, logger: logger) { this.store = store; this.hlog = hlog; - this.comparer = comparer; - recordSize = hlog.GetRecordSize(0).Item2; + recordSize = hlog.GetRecordSize(0).allocatedSize; if (frameSize > 0) frame = new GenericFrame(frameSize, hlog.PageSize); } @@ -66,17 +67,10 @@ public GenericScanIterator(TsavoriteKV store, GenericAllocator.RecordSize); + beginAddress = nextAddress = hlog.SnapToFixedLengthLogicalAddressBoundary(ref cursor, GenericAllocatorImpl.RecordSize); return true; } - ref RecordInfo IPushScanIterator.GetLockableInfo() - { - Debug.Assert(currentFrame < 0, "GetLockableInfo() should be in-memory (i.e.should not have a frame)"); - Debug.Assert(epoch.ThisInstanceProtected(), "GetLockableInfo() should be called with the epoch held"); - return ref hlog.values[currentPage][currentOffset].info; - } - /// /// Get next record in iterator /// @@ -112,7 +106,7 @@ public unsafe bool GetNext(out RecordInfo recordInfo) currentOffset = (currentAddress & hlog.PageSizeMask) / recordSize; if (currentAddress < headAddress) - BufferAndLoad(currentAddress, currentPage, currentPage % frameSize, headAddress, stopAddress); + _ = BufferAndLoad(currentAddress, currentPage, currentPage % frameSize, headAddress, stopAddress); // Check if record fits on page, if not skip to next page if ((currentAddress & hlog.PageSizeMask) + recordSize > hlog.PageSize) @@ -138,15 +132,14 @@ public unsafe bool GetNext(out RecordInfo recordInfo) continue; } - // Copy the object values from cached page memory to data members; we have no ref into the log after the epoch.Suspend() - // (except for GetLockableInfo which we know is safe). These are pointer-sized shallow copies but we need to lock to ensure - // no value tearing inside the object while copying to temp storage. - OperationStackContext stackCtx = default; + // Copy the object values from cached page memory to data members; we have no ref into the log after the epoch.Suspend(). + // These are pointer-sized shallow copies but we need to lock to ensure no value tearing inside the object while copying to temp storage. + OperationStackContext> stackCtx = default; try { - // We cannot use GetKey() and GetLockableInfo() because they have not yet been set. + // We cannot use GetKey() because it has not yet been set. if (currentAddress >= headAddress && store is not null) - store.LockForScan(ref stackCtx, ref hlog.values[currentPage][currentOffset].key, ref hlog.values[currentPage][currentOffset].info); + store.LockForScan(ref stackCtx, ref hlog.values[currentPage][currentOffset].key); recordInfo = hlog.values[currentPage][currentOffset].info; currentKey = hlog.values[currentPage][currentOffset].key; @@ -155,7 +148,7 @@ public unsafe bool GetNext(out RecordInfo recordInfo) finally { if (stackCtx.recSrc.HasLock) - store.UnlockForScan(ref stackCtx, ref hlog.values[currentPage][currentOffset].key, ref hlog.values[currentPage][currentOffset].info); + store.UnlockForScan(ref stackCtx); } // Success @@ -175,7 +168,7 @@ public unsafe bool GetNext(out RecordInfo recordInfo) // Copy the object values from the frame to data members. currentKey = frame.GetKey(currentFrame, currentOffset); currentValue = frame.GetValue(currentFrame, currentOffset); - currentPage = currentOffset = -1; // We should no longer use these except for GetLockableInfo() + currentPage = currentOffset = -1; // Success epoch?.Suspend(); @@ -218,14 +211,14 @@ bool IPushScanIterator.BeginGetPrevInMemory(ref Key key, out RecordInfo rec nextAddress = currentAddress + recordSize; bool skipOnScan = includeSealedRecords ? recordInfo.Invalid : recordInfo.SkipOnScan; - if (skipOnScan || recordInfo.IsNull() || !comparer.Equals(ref hlog.values[currentPage][currentOffset].key, ref key)) + if (skipOnScan || recordInfo.IsNull() || !hlog._storeFunctions.KeysEqual(ref hlog.values[currentPage][currentOffset].key, ref key)) { epoch?.Suspend(); continue; } - // Copy the object values from cached page memory to data members; we have no ref into the log after the epoch.Suspend() - // (except for GetLockableInfo which we know is safe). These are pointer-sized shallow copies. + // Copy the object values from cached page memory to data members; we have no ref into the log after the epoch.Suspend(). + // These are pointer-sized shallow copies. recordInfo = hlog.values[currentPage][currentOffset].info; currentKey = hlog.values[currentPage][currentOffset].key; currentValue = hlog.values[currentPage][currentOffset].value; @@ -271,7 +264,8 @@ public override void Dispose() frame?.Dispose(); } - internal override void AsyncReadPagesFromDeviceToFrame(long readPageStart, int numPages, long untilAddress, TContext context, out CountdownEvent completed, long devicePageOffset = 0, IDevice device = null, IDevice objectLogDevice = null, CancellationTokenSource cts = null) + internal override void AsyncReadPagesFromDeviceToFrame(long readPageStart, int numPages, long untilAddress, TContext context, out CountdownEvent completed, + long devicePageOffset = 0, IDevice device = null, IDevice objectLogDevice = null, CancellationTokenSource cts = null) => hlog.AsyncReadPagesFromDeviceToFrame(readPageStart, numPages, untilAddress, AsyncReadPagesCallback, context, frame, out completed, devicePageOffset, device, objectLogDevice); private unsafe void AsyncReadPagesCallback(uint errorCode, uint numBytes, object context) @@ -292,7 +286,7 @@ private unsafe void AsyncReadPagesCallback(uint errorCode, uint numBytes, object } if (errorCode == 0) - result.handle?.Signal(); + _ = result.handle?.Signal(); Interlocked.MemoryBarrier(); } diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/IAllocator.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/IAllocator.cs new file mode 100644 index 0000000000..1971c70162 --- /dev/null +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/IAllocator.cs @@ -0,0 +1,52 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +namespace Tsavorite.core +{ + /// + /// Interface for hybrid log memory allocator struct wrapper for inlining. This contains the performance-critical methods that must be inlined; + /// abstract/virtual methods may be called via . + /// + public interface IAllocator : IAllocatorCallbacks + where TStoreFunctions : IStoreFunctions + { + /// The base class instance of the allocator implementation + AllocatorBase GetBase() + where TAllocator : IAllocator; + + /// Whether this allocator uses fixed-length records + bool IsFixedLength { get; } + + /// Whether this allocator uses a separate object log + bool HasObjectLog { get; } + + /// Cast address range to . For this will also initialize the value to span the address range. + ref Value GetAndInitializeValue(long physicalAddress, long endPhysicalAddress); + + /// Get copy destination size for RMW, taking Input into account + (int actualSize, int allocatedSize, int keySize) GetRMWCopyDestinationRecordSize(ref Key key, ref Input input, ref Value value, ref RecordInfo recordInfo, TVariableLengthInput varlenInput) + where TVariableLengthInput : IVariableLengthInput; + + /// Get initial record size for RMW, given the and + (int actualSize, int allocatedSize, int keySize) GetRMWInitialRecordSize(ref Key key, ref Input input, TSessionFunctionsWrapper sessionFunctions) + where TSessionFunctionsWrapper : IVariableLengthInput; + + /// Get record size required for the given and + (int actualSize, int allocatedSize, int keySize) GetRecordSize(ref Key key, ref Value value); + + /// Get the size of the given + int GetValueLength(ref Value value); + + /// Mark the page that contains as dirty + void MarkPage(long logicalAddress, long version); + + /// Mark the page that contains as dirty atomically + void MarkPageAtomic(long logicalAddress, long version); + + /// Get segment offsets + long[] GetSegmentOffsets(); + + /// Serialize key to log + void SerializeKey(ref Key key, long physicalAddress); + } +} \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/IAllocatorCallbacks.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/IAllocatorCallbacks.cs new file mode 100644 index 0000000000..818468e3d3 --- /dev/null +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/IAllocatorCallbacks.cs @@ -0,0 +1,82 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +namespace Tsavorite.core +{ + /// + /// Interface for hybrid log memory allocator struct wrapper callbacks for inlining performance-path callbacks from + /// + /// to the fully derived allocator, including both record accessors and Scan calls. + /// + /// This interface does not currently appear in type constraints, but the organization may prove useful. + public interface IAllocatorCallbacks + where TStoreFunctions : IStoreFunctions + { + /// Get start logical address on + long GetStartLogicalAddress(long page); + + /// Get first valid logical address on + long GetFirstValidLogicalAddress(long page); + + /// Get physical address from + long GetPhysicalAddress(long logicalAddress); + + /// Get from + ref RecordInfo GetInfo(long physicalAddress); + + /// Get from pinned memory + unsafe ref RecordInfo GetInfoFromBytePointer(byte* ptr); + + /// Get from + ref Key GetKey(long physicalAddress); + + /// Get from + ref Value GetValue(long physicalAddress); + + /// Get the actual (used) and allocated record sizes at + (int actualSize, int allocatedSize) GetRecordSize(long physicalAddress); + + /// Get number of bytes required to read the full record that starts at for . + int GetRequiredRecordSize(long physicalAddress, int availableBytes); + + /// Get average record size + int GetAverageRecordSize(); + + /// Allocate the page in the circular buffer slot at + void AllocatePage(int pageIndex); + + /// Whether the page at is allocated + bool IsAllocated(int pageIndex); + + /// + /// Populate the page at from the pointer, which has bytes. + /// + unsafe void PopulatePage(byte* src, int required_bytes, long destinationPageIndex); + + /// Free the page at , starting at + void ClearPage(long pageIndex, int offset = 0); + + /// Free the page at + void FreePage(long pageIndex); + + /// Number of extra overflow pages allocated + int OverflowPageCount { get; } + + int GetFixedRecordSize(); + + /// Retrieve key from IO context record + ref Key GetContextRecordKey(ref AsyncIOContext ctx); + + /// Retrieve value from IO context record + ref Value GetContextRecordValue(ref AsyncIOContext ctx); + + /// Determine whether we IO has returned the full record + unsafe bool RetrievedFullRecord(byte* record, ref AsyncIOContext ctx); + + /// Get heap container for pending key + IHeapContainer GetKeyContainer(ref Key key); + + /// Get heap container for pending value + IHeapContainer GetValueContainer(ref Value value); + } +} \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/IScanIteratorFunctions.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/IScanIteratorFunctions.cs index 0116daa39d..f8eb0ce129 100644 --- a/libs/storage/Tsavorite/cs/src/core/Allocator/IScanIteratorFunctions.cs +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/IScanIteratorFunctions.cs @@ -74,8 +74,6 @@ internal interface IPushScanIterator bool BeginGetPrevInMemory(ref Key key, out RecordInfo recordInfo, out bool continueOnDisk); bool EndGetPrevInMemory(); - ref RecordInfo GetLockableInfo(); - /// /// When beginning a cursor scan, if it is not the last cursor returned, snap it to the preceding logical address boundary. /// diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/MemoryPageScanIterator.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/MemoryPageScanIterator.cs index e2bf015f37..2ce8854921 100644 --- a/libs/storage/Tsavorite/cs/src/core/Allocator/MemoryPageScanIterator.cs +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/MemoryPageScanIterator.cs @@ -13,15 +13,15 @@ namespace Tsavorite.core /// internal sealed class MemoryPageScanIterator : ITsavoriteScanIterator { - readonly Record[] page; + readonly AllocatorRecord[] page; readonly long pageStartAddress; readonly int recordSize; readonly int start, end; int offset; - public MemoryPageScanIterator(Record[] page, int start, int end, long pageStartAddress, int recordSize) + public MemoryPageScanIterator(AllocatorRecord[] page, int start, int end, long pageStartAddress, int recordSize) { - this.page = new Record[page.Length]; + this.page = new AllocatorRecord[page.Length]; Array.Copy(page, start, this.page, start, end - start); offset = start - 1; this.start = start; diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/PageUnit.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/PageUnit.cs index 903da2e1cf..5d093c3fd5 100644 --- a/libs/storage/Tsavorite/cs/src/core/Allocator/PageUnit.cs +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/PageUnit.cs @@ -1,6 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. +using System.Runtime.InteropServices; + namespace Tsavorite.core { struct PageUnit @@ -8,4 +10,24 @@ struct PageUnit public byte[] value; public long pointer; } + + [StructLayout(LayoutKind.Explicit)] + internal struct FullPageStatus + { + [FieldOffset(0)] + public long LastFlushedUntilAddress; + [FieldOffset(8)] + public long Dirty; + } + + [StructLayout(LayoutKind.Explicit)] + internal struct PageOffset + { + [FieldOffset(0)] + public int Offset; + [FieldOffset(4)] + public int Page; + [FieldOffset(0)] + public long PageAndOffset; + } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/ScanIteratorBase.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/ScanIteratorBase.cs index 3ad65d6cb1..a09d0e0bd4 100644 --- a/libs/storage/Tsavorite/cs/src/core/Allocator/ScanIteratorBase.cs +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/ScanIteratorBase.cs @@ -209,11 +209,13 @@ protected unsafe bool NeedBufferAndLoad(long currentAddress, long currentPage, l return false; } - internal abstract void AsyncReadPagesFromDeviceToFrame(long readPageStart, int numPages, long untilAddress, TContext context, out CountdownEvent completed, long devicePageOffset = 0, IDevice device = null, IDevice objectLogDevice = null, CancellationTokenSource cts = null); + internal abstract void AsyncReadPagesFromDeviceToFrame(long readPageStart, int numPages, long untilAddress, TContext context, out CountdownEvent completed, + long devicePageOffset = 0, IDevice device = null, IDevice objectLogDevice = null, CancellationTokenSource cts = null); private bool WaitForFrameLoad(long currentAddress, long currentFrame) { - if (loaded[currentFrame].IsSet) return false; + if (loaded[currentFrame].IsSet) + return false; try { diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/SpanByteAllocator.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/SpanByteAllocator.cs index 04f978a8ae..d0cad6cd83 100644 --- a/libs/storage/Tsavorite/cs/src/core/Allocator/SpanByteAllocator.cs +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/SpanByteAllocator.cs @@ -1,459 +1,173 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -using System; -using System.Diagnostics; using System.Runtime.CompilerServices; -using System.Threading; -using Microsoft.Extensions.Logging; -using static Tsavorite.core.Utility; namespace Tsavorite.core { - // Allocator for SpanByte, possibly with a Blittable Key or Value. - internal sealed unsafe class SpanByteAllocator : AllocatorBase + // Allocator for SpanByte Keys and Values. + public struct SpanByteAllocator : IAllocator + where TStoreFunctions : IStoreFunctions { - public const int kRecordAlignment = 8; // RecordInfo has a long field, so it should be aligned to 8-bytes + /// The wrapped class containing all data and most actual functionality. This must be the ONLY field in this structure so its size is sizeof(IntPtr). + private readonly SpanByteAllocatorImpl _this; - // Circular buffer definition - private readonly byte[][] values; - private readonly long[] pointers; - private readonly long* nativePointers; - - private readonly OverflowPool overflowPagePool; - - public SpanByteAllocator(LogSettings settings, ITsavoriteEqualityComparer comparer, Action evictCallback = null, LightEpoch epoch = null, Action flushCallback = null, ILogger logger = null) - : base(settings, comparer, evictCallback, epoch, flushCallback, logger) + public SpanByteAllocator(AllocatorSettings settings, TStoreFunctions storeFunctions) { - overflowPagePool = new OverflowPool(4, p => { }); - - if (BufferSize > 0) - { - values = new byte[BufferSize][]; - pointers = GC.AllocateArray(BufferSize, true); - nativePointers = (long*)Unsafe.AsPointer(ref pointers[0]); - } + // Called by TsavoriteKV via allocatorCreator; must pass a wrapperCreator to AllocatorBase + _this = new(settings, storeFunctions, @this => new SpanByteAllocator(@this)); } - internal override int OverflowPageCount => overflowPagePool.Count; - - public override void Reset() + public SpanByteAllocator(object @this) { - base.Reset(); - for (int index = 0; index < BufferSize; index++) - { - if (IsAllocated(index)) - FreePage(index); - } - - Initialize(); + // Called by AllocatorBase via primary ctor wrapperCreator + _this = (SpanByteAllocatorImpl)@this; } - void ReturnPage(int index) - { - Debug.Assert(index < BufferSize); - if (values[index] != null) - { - overflowPagePool.TryAdd(new PageUnit - { - pointer = pointers[index], - value = values[index] - }); - values[index] = null; - pointers[index] = 0; - Interlocked.Decrement(ref AllocatedPageCount); - } - } - - public override void Initialize() => Initialize(Constants.kFirstValidAddress); - - public override ref RecordInfo GetInfo(long physicalAddress) => ref Unsafe.AsRef((void*)physicalAddress); + /// + public readonly AllocatorBase GetBase() + where TAllocator : IAllocator + => (AllocatorBase)(object)_this; - public override ref RecordInfo GetInfoFromBytePointer(byte* ptr) => ref Unsafe.AsRef(ptr); + /// + public readonly bool IsFixedLength => false; - public override ref SpanByte GetKey(long physicalAddress) => ref Unsafe.AsRef((byte*)physicalAddress + RecordInfo.GetLength()); - - public override ref SpanByte GetValue(long physicalAddress) => ref Unsafe.AsRef((byte*)ValueOffset(physicalAddress)); - - public override ref SpanByte GetAndInitializeValue(long physicalAddress, long endAddress) - { - var src = (byte*)ValueOffset(physicalAddress); - - // Initialize the SpanByte to the length of the entire value space, less the length of the int size prefix. - *(int*)src = (int)((byte*)endAddress - src) - sizeof(int); - return ref Unsafe.AsRef(src); - } + /// + public readonly bool HasObjectLog => false; + /// [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static long KeyOffset(long physicalAddress) => physicalAddress + RecordInfo.GetLength(); + public readonly long GetStartLogicalAddress(long page) => _this.GetStartLogicalAddress(page); + /// [MethodImpl(MethodImplOptions.AggressiveInlining)] - private long ValueOffset(long physicalAddress) => KeyOffset(physicalAddress) + AlignedKeySize(physicalAddress); + public readonly long GetFirstValidLogicalAddress(long page) => _this.GetFirstValidLogicalAddress(page); + /// [MethodImpl(MethodImplOptions.AggressiveInlining)] - private int AlignedKeySize(long physicalAddress) => RoundUp(KeySize(physicalAddress), kRecordAlignment); + public readonly long GetPhysicalAddress(long logicalAddress) => _this.GetPhysicalAddress(logicalAddress); + /// [MethodImpl(MethodImplOptions.AggressiveInlining)] - private int KeySize(long physicalAddress) => (*(SpanByte*)KeyOffset(physicalAddress)).TotalSize; + public readonly ref RecordInfo GetInfo(long physicalAddress) + => ref SpanByteAllocatorImpl.GetInfo(physicalAddress); + /// [MethodImpl(MethodImplOptions.AggressiveInlining)] - private int ValueSize(long physicalAddress) => (*(SpanByte*)ValueOffset(physicalAddress)).TotalSize; - - public override int GetValueLength(ref SpanByte value) => value.TotalSize; - - const int FieldInitialLength = sizeof(int); // The .Length field of a SpanByte is the initial length - - public override (int actualSize, int allocatedSize) GetRecordSize(long physicalAddress) - { - ref var recordInfo = ref GetInfo(physicalAddress); - if (recordInfo.IsNull()) - { - var l = RecordInfo.GetLength(); - return (l, l); - } - - var valueLen = ValueSize(physicalAddress); - if (recordInfo.Filler) // Get the extraValueLength - valueLen += *(int*)(ValueOffset(physicalAddress) + RoundUp(valueLen, sizeof(int))); - - var size = RecordInfo.GetLength() + AlignedKeySize(physicalAddress) + valueLen; - return (size, RoundUp(size, kRecordAlignment)); - } - - public override (int actualSize, int allocatedSize, int keySize) GetRMWCopyDestinationRecordSize(ref SpanByte key, ref Input input, ref SpanByte value, ref RecordInfo recordInfo, TVariableLengthInput varlenInput) - { - // Used by RMW to determine the length of copy destination (taking Input into account), so does not need to get filler length. - var keySize = key.TotalSize; - var size = RecordInfo.GetLength() + RoundUp(keySize, kRecordAlignment) + varlenInput.GetRMWModifiedValueLength(ref value, ref input); - return (size, RoundUp(size, kRecordAlignment), keySize); - } - - public override int GetRequiredRecordSize(long physicalAddress, int availableBytes) - { - // We need at least [average record size]... - var reqBytes = GetAverageRecordSize(); - if (availableBytes < reqBytes) - return reqBytes; - - // We need at least [RecordInfo size] + [actual key size]... - reqBytes = RecordInfo.GetLength() + AlignedKeySize(physicalAddress) + FieldInitialLength; - if (availableBytes < reqBytes) - return reqBytes; - - // We need at least [RecordInfo size] + [actual key size] + [actual value size] - var recordInfo = GetInfo(physicalAddress); - var valueLen = ValueSize(physicalAddress); - if (recordInfo.Filler) - { - // We have a filler, so the valueLen we have now is the usedValueLength; we need to offset to where the extraValueLength is and read that int - var alignedUsedValueLength = RoundUp(valueLen, sizeof(int)); - reqBytes = RecordInfo.GetLength() + AlignedKeySize(physicalAddress) + alignedUsedValueLength + sizeof(int); - if (availableBytes < reqBytes) - return reqBytes; - valueLen += *(int*)(ValueOffset(physicalAddress) + alignedUsedValueLength); - } - - // Now we know the full record length. - reqBytes = RecordInfo.GetLength() + AlignedKeySize(physicalAddress) + valueLen; - reqBytes = RoundUp(reqBytes, kRecordAlignment); - return reqBytes; - } - - public override int GetAverageRecordSize() => RecordInfo.GetLength() + (RoundUp(FieldInitialLength, kRecordAlignment) * 2); - - public override int GetFixedRecordSize() => GetAverageRecordSize(); - - public override (int actualSize, int allocatedSize, int keySize) GetRMWInitialRecordSize(ref SpanByte key, ref TInput input, TSessionFunctionsWrapper sessionFunctions) - { - int keySize = key.TotalSize; - var actualSize = RecordInfo.GetLength() + RoundUp(keySize, kRecordAlignment) + sessionFunctions.GetRMWInitialValueLength(ref input); - return (actualSize, RoundUp(actualSize, kRecordAlignment), keySize); - } - - public override (int actualSize, int allocatedSize, int keySize) GetRecordSize(ref SpanByte key, ref SpanByte value) - { - int keySize = key.TotalSize; - var actualSize = RecordInfo.GetLength() + RoundUp(keySize, kRecordAlignment) + value.TotalSize; - return (actualSize, RoundUp(actualSize, kRecordAlignment), keySize); - } - - public override void SerializeKey(ref SpanByte src, long physicalAddress) => src.CopyTo((byte*)KeyOffset(physicalAddress)); - - public override void SerializeValue(ref SpanByte src, long physicalAddress) => src.CopyTo((byte*)ValueOffset(physicalAddress)); - - /// - /// Dispose memory allocator - /// - public override void Dispose() - { - base.Dispose(); - overflowPagePool.Dispose(); - } - - public override AddressInfo* GetKeyAddressInfo(long physicalAddress) - { - // AddressInfo is only used in GenericAllocator - TODO remove from other allocators - throw new NotSupportedException(); - } - - public override AddressInfo* GetValueAddressInfo(long physicalAddress) - { - // AddressInfo is only used in GenericAllocator - TODO remove from other allocators - throw new NotSupportedException(); - } - - /// - /// Allocate memory page, pinned in memory, and in sector aligned form, if possible - /// - /// - internal override void AllocatePage(int index) - { - IncrementAllocatedPageCount(); - - if (overflowPagePool.TryGet(out var item)) - { - pointers[index] = item.pointer; - values[index] = item.value; - return; - } - - var adjustedSize = PageSize + 2 * sectorSize; - - byte[] tmp = GC.AllocateArray(adjustedSize, true); - long p = (long)Unsafe.AsPointer(ref tmp[0]); - pointers[index] = (p + (sectorSize - 1)) & ~((long)sectorSize - 1); - values[index] = tmp; - } + public readonly unsafe ref RecordInfo GetInfoFromBytePointer(byte* ptr) + => ref SpanByteAllocatorImpl.GetInfoFromBytePointer(ptr); + /// [MethodImpl(MethodImplOptions.AggressiveInlining)] - public override long GetPhysicalAddress(long logicalAddress) - { - // Offset within page - int offset = (int)(logicalAddress & ((1L << LogPageSizeBits) - 1)); - - // Index of page within the circular buffer - int pageIndex = (int)((logicalAddress >> LogPageSizeBits) & (BufferSize - 1)); - return *(nativePointers + pageIndex) + offset; - } - - internal override bool IsAllocated(int pageIndex) => values[pageIndex] != null; + public readonly ref SpanByte GetKey(long physicalAddress) + => ref SpanByteAllocatorImpl.GetKey(physicalAddress); - protected override void WriteAsync(long flushPage, DeviceIOCompletionCallback callback, PageAsyncFlushResult asyncResult) - { - WriteAsync((IntPtr)pointers[flushPage % BufferSize], - (ulong)(AlignedPageSizeBytes * flushPage), - (uint)AlignedPageSizeBytes, - callback, - asyncResult, device); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly ref SpanByte GetValue(long physicalAddress) => ref _this.GetValue(physicalAddress); - protected override void WriteAsyncToDevice - (long startPage, long flushPage, int pageSize, DeviceIOCompletionCallback callback, - PageAsyncFlushResult asyncResult, IDevice device, IDevice objectLogDevice, long[] localSegmentOffsets, long fuzzyStartLogicalAddress) - { - VerifyCompatibleSectorSize(device); - var alignedPageSize = (pageSize + (sectorSize - 1)) & ~(sectorSize - 1); + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly ref SpanByte GetAndInitializeValue(long physicalAddress, long endPhysicalAddress) => ref _this.GetAndInitializeValue(physicalAddress, endPhysicalAddress); - WriteAsync((IntPtr)pointers[flushPage % BufferSize], - (ulong)(AlignedPageSizeBytes * (flushPage - startPage)), - (uint)alignedPageSize, callback, asyncResult, - device); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly (int actualSize, int allocatedSize) GetRecordSize(long physicalAddress) => _this.GetRecordSize(physicalAddress); - public override long GetStartLogicalAddress(long page) => page << LogPageSizeBits; + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly (int actualSize, int allocatedSize, int keySize) GetRMWCopyDestinationRecordSize(ref SpanByte key, ref Input input, ref SpanByte value, ref RecordInfo recordInfo, TVariableLengthInput varlenInput) + where TVariableLengthInput : IVariableLengthInput + => _this.GetRMWCopyDestinationRecordSize(ref key, ref input, ref value, ref recordInfo, varlenInput); - public override long GetFirstValidLogicalAddress(long page) - { - if (page == 0) - return (page << LogPageSizeBits) + Constants.kFirstValidAddress; - return page << LogPageSizeBits; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly int GetRequiredRecordSize(long physicalAddress, int availableBytes) => _this.GetRequiredRecordSize(physicalAddress, availableBytes); - internal override void ClearPage(long page, int offset) - { - if (offset == 0) - Array.Clear(values[page % BufferSize], offset, values[page % BufferSize].Length - offset); - else - { - // Adjust array offset for cache alignment - offset += (int)(pointers[page % BufferSize] - (long)Unsafe.AsPointer(ref values[page % BufferSize][0])); - Array.Clear(values[page % BufferSize], offset, values[page % BufferSize].Length - offset); - } - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly int GetAverageRecordSize() => _this.GetAverageRecordSize(); - internal override void FreePage(long page) - { - ClearPage(page, 0); - if (EmptyPageCount > 0) - ReturnPage((int)(page % BufferSize)); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly int GetFixedRecordSize() => _this.GetFixedRecordSize(); - /// - /// Delete in-memory portion of the log - /// - internal override void DeleteFromMemory() - { - for (int i = 0; i < values.Length; i++) - values[i] = null; - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly (int actualSize, int allocatedSize, int keySize) GetRMWInitialRecordSize(ref SpanByte key, ref Input input, TSessionFunctionsWrapper sessionFunctions) + where TSessionFunctionsWrapper : IVariableLengthInput + => _this.GetRMWInitialRecordSize(ref key, ref input, sessionFunctions); - protected override void ReadAsync( - ulong alignedSourceAddress, int destinationPageIndex, uint aligned_read_length, - DeviceIOCompletionCallback callback, PageAsyncReadResult asyncResult, IDevice device, IDevice objlogDevice) - { - device.ReadAsync(alignedSourceAddress, (IntPtr)pointers[destinationPageIndex], - aligned_read_length, callback, asyncResult); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly (int actualSize, int allocatedSize, int keySize) GetRecordSize(ref SpanByte key, ref SpanByte value) => _this.GetRecordSize(ref key, ref value); - /// - /// Invoked by users to obtain a record from disk. It uses sector aligned memory to read - /// the record efficiently into memory. - /// - /// - /// - /// - /// - /// - protected override void AsyncReadRecordObjectsToMemory(long fromLogical, int numBytes, DeviceIOCompletionCallback callback, AsyncIOContext context, SectorAlignedMemory result = default) - { - throw new InvalidOperationException("AsyncReadRecordObjectsToMemory invalid for SpanByteAllocator"); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly int GetValueLength(ref SpanByte value) + => SpanByteAllocatorImpl.GetValueLength(ref value); - /// - /// Retrieve objects from object log - /// - /// - /// - /// - protected override bool RetrievedFullRecord(byte* record, ref AsyncIOContext ctx) => true; + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly unsafe bool RetrievedFullRecord(byte* record, ref AsyncIOContext ctx) + => SpanByteAllocatorImpl.RetrievedFullRecord(record, ref ctx); - public override ref SpanByte GetContextRecordKey(ref AsyncIOContext ctx) => ref GetKey((long)ctx.record.GetValidPointer()); + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void AllocatePage(int pageIndex) => _this.AllocatePage(pageIndex); - public override ref SpanByte GetContextRecordValue(ref AsyncIOContext ctx) => ref GetValue((long)ctx.record.GetValidPointer()); + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly bool IsAllocated(int pageIndex) => _this.IsAllocated(pageIndex); - public override IHeapContainer GetKeyContainer(ref SpanByte key) => new SpanByteHeapContainer(ref key, bufferPool); + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly unsafe void PopulatePage(byte* src, int required_bytes, long destinationPageIndex) => _this.PopulatePage(src, required_bytes, destinationPageIndex); - public override IHeapContainer GetValueContainer(ref SpanByte value) => new SpanByteHeapContainer(ref value, bufferPool); + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void MarkPage(long logicalAddress, long version) => _this.MarkPage(logicalAddress, version); - public override bool KeyHasObjects() => false; + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void MarkPageAtomic(long logicalAddress, long version) => _this.MarkPageAtomic(logicalAddress, version); - public override bool ValueHasObjects() => false; + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void ClearPage(long page, int offset = 0) => _this.ClearPage(page, offset); - public override long[] GetSegmentOffsets() => null; + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void FreePage(long pageIndex) => _this.FreePage(pageIndex); - internal override void PopulatePage(byte* src, int required_bytes, long destinationPage) - { - throw new TsavoriteException("SpanByteAllocator memory pages are sector aligned - use direct copy"); - // Buffer.MemoryCopy(src, (void*)pointers[destinationPage % BufferSize], required_bytes, required_bytes); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly ref SpanByte GetContextRecordKey(ref AsyncIOContext ctx) + => ref SpanByteAllocatorImpl.GetContextRecordKey(ref ctx); - /// - /// Iterator interface for pull-scanning Tsavorite log - /// - public override ITsavoriteScanIterator Scan(TsavoriteKV store, long beginAddress, long endAddress, ScanBufferingMode scanBufferingMode, bool includeSealedRecords) - => new SpanByteScanIterator(store, this, beginAddress, endAddress, scanBufferingMode, includeSealedRecords, epoch, logger: logger); + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly ref SpanByte GetContextRecordValue(ref AsyncIOContext ctx) => ref _this.GetContextRecordValue(ref ctx); - /// - /// Implementation for push-scanning Tsavorite log, called from LogAccessor - /// - internal override bool Scan(TsavoriteKV store, long beginAddress, long endAddress, ref TScanFunctions scanFunctions, ScanBufferingMode scanBufferingMode) - { - using SpanByteScanIterator iter = new(store, this, beginAddress, endAddress, scanBufferingMode, false, epoch, logger: logger); - return PushScanImpl(beginAddress, endAddress, ref scanFunctions, iter); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly IHeapContainer GetKeyContainer(ref SpanByte key) => _this.GetKeyContainer(ref key); - /// - /// Implementation for push-scanning Tsavorite log with a cursor, called from LogAccessor - /// - internal override bool ScanCursor(TsavoriteKV store, ScanCursorState scanCursorState, ref long cursor, long count, TScanFunctions scanFunctions, long endAddress, bool validateCursor) - { - using SpanByteScanIterator iter = new(store, this, cursor, endAddress, ScanBufferingMode.SinglePageBuffering, false, epoch, logger: logger); - return ScanLookup(store, scanCursorState, ref cursor, count, scanFunctions, iter, validateCursor); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly IHeapContainer GetValueContainer(ref SpanByte value) => _this.GetValueContainer(ref value); - /// - /// Implementation for push-iterating key versions, called from LogAccessor - /// - internal override bool IterateKeyVersions(TsavoriteKV store, ref SpanByte key, long beginAddress, ref TScanFunctions scanFunctions) - { - using SpanByteScanIterator iter = new(store, store.comparer, this, beginAddress, epoch, logger: logger); - return IterateKeyVersionsImpl(store, ref key, beginAddress, ref scanFunctions, iter); - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly long[] GetSegmentOffsets() + => SpanByteAllocatorImpl.GetSegmentOffsets(); - /// - internal override void MemoryPageScan(long beginAddress, long endAddress, IObserver> observer) - { - using var iter = new SpanByteScanIterator(store: null, this, beginAddress, endAddress, ScanBufferingMode.NoBuffering, false, epoch, true, logger: logger); - observer?.OnNext(iter); - } + /// + public readonly int OverflowPageCount => _this.OverflowPageCount; - /// - /// Read pages from specified device - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - internal void AsyncReadPagesFromDeviceToFrame( - long readPageStart, - int numPages, - long untilAddress, - DeviceIOCompletionCallback callback, - TContext context, - BlittableFrame frame, - out CountdownEvent completed, - long devicePageOffset = 0, - IDevice device = null, IDevice objectLogDevice = null) - { - var usedDevice = device; - if (device == null) - { - usedDevice = this.device; - } - - completed = new CountdownEvent(numPages); - for (long readPage = readPageStart; readPage < (readPageStart + numPages); readPage++) - { - int pageIndex = (int)(readPage % frame.frameSize); - if (frame.frame[pageIndex] == null) - { - frame.Allocate(pageIndex); - } - else - { - frame.Clear(pageIndex); - } - var asyncResult = new PageAsyncReadResult() - { - page = readPage, - context = context, - handle = completed, - frame = frame - }; - - ulong offsetInFile = (ulong)(AlignedPageSizeBytes * readPage); - - uint readLength = (uint)AlignedPageSizeBytes; - long adjustedUntilAddress = (AlignedPageSizeBytes * (untilAddress >> LogPageSizeBits) + (untilAddress & PageSizeMask)); - - if (adjustedUntilAddress > 0 && ((adjustedUntilAddress - (long)offsetInFile) < PageSize)) - { - readLength = (uint)(adjustedUntilAddress - (long)offsetInFile); - readLength = (uint)((readLength + (sectorSize - 1)) & ~(sectorSize - 1)); - } - - if (device != null) - offsetInFile = (ulong)(AlignedPageSizeBytes * (readPage - devicePageOffset)); - - usedDevice.ReadAsync(offsetInFile, (IntPtr)frame.pointers[pageIndex], readLength, callback, asyncResult); - } - } + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void SerializeKey(ref SpanByte key, long physicalAddress) + => SpanByteAllocatorImpl.SerializeKey(ref key, physicalAddress); } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/SpanByteAllocatorImpl.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/SpanByteAllocatorImpl.cs new file mode 100644 index 0000000000..75e9b68389 --- /dev/null +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/SpanByteAllocatorImpl.cs @@ -0,0 +1,446 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +using System; +using System.Diagnostics; +using System.Runtime.CompilerServices; +using System.Threading; +using static Tsavorite.core.Utility; + +namespace Tsavorite.core +{ + // Allocator for SpanByte, possibly with a Blittable Key or Value. + internal sealed unsafe class SpanByteAllocatorImpl : AllocatorBase> + where TStoreFunctions : IStoreFunctions + { + // Circular buffer definition + private readonly byte[][] values; + private readonly long[] pointers; + private readonly long* nativePointers; + + private readonly OverflowPool overflowPagePool; + + public SpanByteAllocatorImpl(AllocatorSettings settings, TStoreFunctions storeFunctions, Func> wrapperCreator) + : base(settings.LogSettings, storeFunctions, wrapperCreator, settings.evictCallback, settings.epoch, settings.flushCallback, settings.logger) + { + overflowPagePool = new OverflowPool(4, p => { }); + + if (BufferSize > 0) + { + values = new byte[BufferSize][]; + pointers = GC.AllocateArray(BufferSize, true); + nativePointers = (long*)Unsafe.AsPointer(ref pointers[0]); + } + } + + internal int OverflowPageCount => overflowPagePool.Count; + + public override void Reset() + { + base.Reset(); + for (int index = 0; index < BufferSize; index++) + { + if (IsAllocated(index)) + FreePage(index); + } + Initialize(); + } + + void ReturnPage(int index) + { + Debug.Assert(index < BufferSize); + if (values[index] != null) + { + overflowPagePool.TryAdd(new PageUnit + { + pointer = pointers[index], + value = values[index] + }); + values[index] = null; + pointers[index] = 0; + Interlocked.Decrement(ref AllocatedPageCount); + } + } + + public override void Initialize() => Initialize(Constants.kFirstValidAddress); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static ref RecordInfo GetInfo(long physicalAddress) => ref Unsafe.AsRef((void*)physicalAddress); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static ref RecordInfo GetInfoFromBytePointer(byte* ptr) => ref Unsafe.AsRef(ptr); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static ref SpanByte GetKey(long physicalAddress) => ref Unsafe.AsRef((byte*)physicalAddress + RecordInfo.GetLength()); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public ref SpanByte GetValue(long physicalAddress) => ref Unsafe.AsRef((byte*)ValueOffset(physicalAddress)); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public ref SpanByte GetAndInitializeValue(long physicalAddress, long endAddress) + { + var src = (byte*)ValueOffset(physicalAddress); + + // Initialize the SpanByte to the length of the entire value space, less the length of the int size prefix. + *(int*)src = (int)((byte*)endAddress - src) - sizeof(int); + return ref Unsafe.AsRef(src); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static long KeyOffset(long physicalAddress) => physicalAddress + RecordInfo.GetLength(); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private long ValueOffset(long physicalAddress) => KeyOffset(physicalAddress) + AlignedKeySize(physicalAddress); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private int AlignedKeySize(long physicalAddress) => RoundUp(KeySize(physicalAddress), Constants.kRecordAlignment); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private int KeySize(long physicalAddress) => (*(SpanByte*)KeyOffset(physicalAddress)).TotalSize; + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private int ValueSize(long physicalAddress) => (*(SpanByte*)ValueOffset(physicalAddress)).TotalSize; + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int GetValueLength(ref SpanByte value) => value.TotalSize; + + const int FieldInitialLength = sizeof(int); // The .Length field of a SpanByte is the initial length + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public (int actualSize, int allocatedSize) GetRecordSize(long physicalAddress) + { + ref var recordInfo = ref GetInfo(physicalAddress); + if (recordInfo.IsNull()) + return (RecordInfo.GetLength(), RecordInfo.GetLength()); + + var valueLen = ValueSize(physicalAddress); + if (recordInfo.HasFiller) // Get the extraValueLength + valueLen += *(int*)(ValueOffset(physicalAddress) + RoundUp(valueLen, sizeof(int))); + + var size = RecordInfo.GetLength() + AlignedKeySize(physicalAddress) + valueLen; + return (size, RoundUp(size, Constants.kRecordAlignment)); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public (int actualSize, int allocatedSize, int keySize) GetRMWCopyDestinationRecordSize(ref SpanByte key, ref Input input, ref SpanByte value, ref RecordInfo recordInfo, TVariableLengthInput varlenInput) + where TVariableLengthInput : IVariableLengthInput + { + // Used by RMW to determine the length of copy destination (taking Input into account), so does not need to get filler length. + var keySize = key.TotalSize; + var size = RecordInfo.GetLength() + RoundUp(keySize, Constants.kRecordAlignment) + varlenInput.GetRMWModifiedValueLength(ref value, ref input); + return (size, RoundUp(size, Constants.kRecordAlignment), keySize); + } + + public int GetRequiredRecordSize(long physicalAddress, int availableBytes) + { + // We need at least [average record size]... + var reqBytes = GetAverageRecordSize(); + if (availableBytes < reqBytes) + return reqBytes; + + // We need at least [RecordInfo size] + [actual key size]... + reqBytes = RecordInfo.GetLength() + AlignedKeySize(physicalAddress) + FieldInitialLength; + if (availableBytes < reqBytes) + return reqBytes; + + // We need at least [RecordInfo size] + [actual key size] + [actual value size] + var recordInfo = GetInfo(physicalAddress); + var valueLen = ValueSize(physicalAddress); + if (recordInfo.HasFiller) + { + // We have a filler, so the valueLen we have now is the usedValueLength; we need to offset to where the extraValueLength is and read that int + var alignedUsedValueLength = RoundUp(valueLen, sizeof(int)); + reqBytes = RecordInfo.GetLength() + AlignedKeySize(physicalAddress) + alignedUsedValueLength + sizeof(int); + if (availableBytes < reqBytes) + return reqBytes; + valueLen += *(int*)(ValueOffset(physicalAddress) + alignedUsedValueLength); + } + + // Now we know the full record length. + reqBytes = RecordInfo.GetLength() + AlignedKeySize(physicalAddress) + valueLen; + reqBytes = RoundUp(reqBytes, Constants.kRecordAlignment); + return reqBytes; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public int GetAverageRecordSize() => RecordInfo.GetLength() + (RoundUp(FieldInitialLength, Constants.kRecordAlignment) * 2); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public int GetFixedRecordSize() => GetAverageRecordSize(); + + public (int actualSize, int allocatedSize, int keySize) GetRMWInitialRecordSize(ref SpanByte key, ref TInput input, TSessionFunctionsWrapper sessionFunctions) + where TSessionFunctionsWrapper : IVariableLengthInput + { + int keySize = key.TotalSize; + var actualSize = RecordInfo.GetLength() + RoundUp(keySize, Constants.kRecordAlignment) + sessionFunctions.GetRMWInitialValueLength(ref input); + return (actualSize, RoundUp(actualSize, Constants.kRecordAlignment), keySize); + } + + public (int actualSize, int allocatedSize, int keySize) GetRecordSize(ref SpanByte key, ref SpanByte value) + { + int keySize = key.TotalSize; + var actualSize = RecordInfo.GetLength() + RoundUp(keySize, Constants.kRecordAlignment) + value.TotalSize; + return (actualSize, RoundUp(actualSize, Constants.kRecordAlignment), keySize); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static void SerializeKey(ref SpanByte src, long physicalAddress) => src.CopyTo((byte*)KeyOffset(physicalAddress)); + + /// + /// Dispose memory allocator + /// + public override void Dispose() + { + base.Dispose(); + overflowPagePool.Dispose(); + } + + /// + /// Allocate memory page, pinned in memory, and in sector aligned form, if possible + /// + /// + internal void AllocatePage(int index) + { + IncrementAllocatedPageCount(); + + if (overflowPagePool.TryGet(out var item)) + { + pointers[index] = item.pointer; + values[index] = item.value; + return; + } + + var adjustedSize = PageSize + 2 * sectorSize; + + byte[] tmp = GC.AllocateArray(adjustedSize, true); + long p = (long)Unsafe.AsPointer(ref tmp[0]); + pointers[index] = (p + (sectorSize - 1)) & ~((long)sectorSize - 1); + values[index] = tmp; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public long GetPhysicalAddress(long logicalAddress) + { + // Offset within page + var offset = (int)(logicalAddress & ((1L << LogPageSizeBits) - 1)); + + // Index of page within the circular buffer + var pageIndex = (int)((logicalAddress >> LogPageSizeBits) & (BufferSize - 1)); + return *(nativePointers + pageIndex) + offset; + } + + internal bool IsAllocated(int pageIndex) => values[pageIndex] != null; + + protected override void WriteAsync(long flushPage, DeviceIOCompletionCallback callback, PageAsyncFlushResult asyncResult) + { + WriteAsync((IntPtr)pointers[flushPage % BufferSize], + (ulong)(AlignedPageSizeBytes * flushPage), + (uint)AlignedPageSizeBytes, + callback, + asyncResult, device); + } + + protected override void WriteAsyncToDevice + (long startPage, long flushPage, int pageSize, DeviceIOCompletionCallback callback, + PageAsyncFlushResult asyncResult, IDevice device, IDevice objectLogDevice, long[] localSegmentOffsets, long fuzzyStartLogicalAddress) + { + VerifyCompatibleSectorSize(device); + var alignedPageSize = (pageSize + (sectorSize - 1)) & ~(sectorSize - 1); + + WriteAsync((IntPtr)pointers[flushPage % BufferSize], + (ulong)(AlignedPageSizeBytes * (flushPage - startPage)), + (uint)alignedPageSize, callback, asyncResult, + device); + } + + public long GetStartLogicalAddress(long page) => page << LogPageSizeBits; + + public long GetFirstValidLogicalAddress(long page) + { + if (page == 0) + return (page << LogPageSizeBits) + Constants.kFirstValidAddress; + return page << LogPageSizeBits; + } + + internal void ClearPage(long page, int offset) + { + if (offset == 0) + Array.Clear(values[page % BufferSize], offset, values[page % BufferSize].Length - offset); + else + { + // Adjust array offset for cache alignment + offset += (int)(pointers[page % BufferSize] - (long)Unsafe.AsPointer(ref values[page % BufferSize][0])); + Array.Clear(values[page % BufferSize], offset, values[page % BufferSize].Length - offset); + } + } + + internal void FreePage(long page) + { + ClearPage(page, 0); + if (EmptyPageCount > 0) + ReturnPage((int)(page % BufferSize)); + } + + /// + /// Delete in-memory portion of the log + /// + internal override void DeleteFromMemory() + { + for (int i = 0; i < values.Length; i++) + values[i] = null; + } + + protected override void ReadAsync( + ulong alignedSourceAddress, int destinationPageIndex, uint aligned_read_length, + DeviceIOCompletionCallback callback, PageAsyncReadResult asyncResult, IDevice device, IDevice objlogDevice) + { + device.ReadAsync(alignedSourceAddress, (IntPtr)pointers[destinationPageIndex], + aligned_read_length, callback, asyncResult); + } + + /// + /// Invoked by users to obtain a record from disk. It uses sector aligned memory to read + /// the record efficiently into memory. + /// + /// + /// + /// + /// + /// + protected override void AsyncReadRecordObjectsToMemory(long fromLogical, int numBytes, DeviceIOCompletionCallback callback, AsyncIOContext context, SectorAlignedMemory result = default) + { + throw new InvalidOperationException("AsyncReadRecordObjectsToMemory invalid for SpanByteAllocator"); + } + + internal static bool RetrievedFullRecord(byte* record, ref AsyncIOContext ctx) => true; + + internal static ref SpanByte GetContextRecordKey(ref AsyncIOContext ctx) => ref GetKey((long)ctx.record.GetValidPointer()); + + internal ref SpanByte GetContextRecordValue(ref AsyncIOContext ctx) => ref GetValue((long)ctx.record.GetValidPointer()); + + internal IHeapContainer GetKeyContainer(ref SpanByte key) => new SpanByteHeapContainer(ref key, bufferPool); + + internal IHeapContainer GetValueContainer(ref SpanByte value) => new SpanByteHeapContainer(ref value, bufferPool); + + internal static long[] GetSegmentOffsets() => null; + + internal void PopulatePage(byte* src, int required_bytes, long destinationPage) + { + throw new TsavoriteException("SpanByteAllocator memory pages are sector aligned - use direct copy"); + // Buffer.MemoryCopy(src, (void*)pointers[destinationPage % BufferSize], required_bytes, required_bytes); + } + + /// + /// Iterator interface for pull-scanning Tsavorite log + /// + public override ITsavoriteScanIterator Scan(TsavoriteKV> store, + long beginAddress, long endAddress, ScanBufferingMode scanBufferingMode, bool includeSealedRecords) + => new SpanByteScanIterator(store, this, beginAddress, endAddress, scanBufferingMode, includeSealedRecords, epoch, logger: logger); + + /// + /// Implementation for push-scanning Tsavorite log, called from LogAccessor + /// + internal override bool Scan(TsavoriteKV> store, + long beginAddress, long endAddress, ref TScanFunctions scanFunctions, ScanBufferingMode scanBufferingMode) + { + using SpanByteScanIterator iter = new(store, this, beginAddress, endAddress, scanBufferingMode, false, epoch, logger: logger); + return PushScanImpl(beginAddress, endAddress, ref scanFunctions, iter); + } + + /// + /// Implementation for push-scanning Tsavorite log with a cursor, called from LogAccessor + /// + internal override bool ScanCursor(TsavoriteKV> store, + ScanCursorState scanCursorState, ref long cursor, long count, TScanFunctions scanFunctions, long endAddress, bool validateCursor) + { + using SpanByteScanIterator iter = new(store, this, cursor, endAddress, ScanBufferingMode.SinglePageBuffering, false, epoch, logger: logger); + return ScanLookup>(store, scanCursorState, ref cursor, count, scanFunctions, iter, validateCursor); + } + + /// + /// Implementation for push-iterating key versions, called from LogAccessor + /// + internal override bool IterateKeyVersions(TsavoriteKV> store, + ref SpanByte key, long beginAddress, ref TScanFunctions scanFunctions) + { + using SpanByteScanIterator iter = new(store, this, beginAddress, epoch, logger: logger); + return IterateKeyVersionsImpl(store, ref key, beginAddress, ref scanFunctions, iter); + } + + /// + internal override void MemoryPageScan(long beginAddress, long endAddress, IObserver> observer) + { + using var iter = new SpanByteScanIterator(store: null, this, beginAddress, endAddress, ScanBufferingMode.NoBuffering, false, epoch, true, logger: logger); + observer?.OnNext(iter); + } + + /// + /// Read pages from specified device + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + internal void AsyncReadPagesFromDeviceToFrame( + long readPageStart, + int numPages, + long untilAddress, + DeviceIOCompletionCallback callback, + TContext context, + BlittableFrame frame, + out CountdownEvent completed, + long devicePageOffset = 0, + IDevice device = null, IDevice objectLogDevice = null) + { + var usedDevice = device; + if (device == null) + { + usedDevice = this.device; + } + + completed = new CountdownEvent(numPages); + for (long readPage = readPageStart; readPage < (readPageStart + numPages); readPage++) + { + int pageIndex = (int)(readPage % frame.frameSize); + if (frame.frame[pageIndex] == null) + { + frame.Allocate(pageIndex); + } + else + { + frame.Clear(pageIndex); + } + var asyncResult = new PageAsyncReadResult() + { + page = readPage, + context = context, + handle = completed, + frame = frame + }; + + ulong offsetInFile = (ulong)(AlignedPageSizeBytes * readPage); + + uint readLength = (uint)AlignedPageSizeBytes; + long adjustedUntilAddress = (AlignedPageSizeBytes * (untilAddress >> LogPageSizeBits) + (untilAddress & PageSizeMask)); + + if (adjustedUntilAddress > 0 && ((adjustedUntilAddress - (long)offsetInFile) < PageSize)) + { + readLength = (uint)(adjustedUntilAddress - (long)offsetInFile); + readLength = (uint)((readLength + (sectorSize - 1)) & ~(sectorSize - 1)); + } + + if (device != null) + offsetInFile = (ulong)(AlignedPageSizeBytes * (readPage - devicePageOffset)); + + usedDevice.ReadAsync(offsetInFile, (IntPtr)frame.pointers[pageIndex], readLength, callback, asyncResult); + } + } + } +} \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Allocator/SpanByteScanIterator.cs b/libs/storage/Tsavorite/cs/src/core/Allocator/SpanByteScanIterator.cs index db1b5fac64..5dd6480994 100644 --- a/libs/storage/Tsavorite/cs/src/core/Allocator/SpanByteScanIterator.cs +++ b/libs/storage/Tsavorite/cs/src/core/Allocator/SpanByteScanIterator.cs @@ -12,11 +12,11 @@ namespace Tsavorite.core /// /// Scan iterator for hybrid log /// - public sealed class SpanByteScanIterator : ScanIteratorBase, ITsavoriteScanIterator, IPushScanIterator + public sealed class SpanByteScanIterator : ScanIteratorBase, ITsavoriteScanIterator, IPushScanIterator + where TStoreFunctions : IStoreFunctions { - private readonly TsavoriteKV store; - private readonly SpanByteAllocator hlog; - private readonly ITsavoriteEqualityComparer comparer; + private readonly TsavoriteKV> store; + private readonly SpanByteAllocatorImpl hlog; private readonly BlittableFrame frame; private SectorAlignedMemory memory; @@ -28,15 +28,16 @@ public sealed class SpanByteScanIterator : ScanIteratorBase, ITsavoriteScanItera /// Constructor /// /// - /// + /// The fully derived log implementation /// /// /// + /// /// Epoch to use for protection; may be null if is true. /// Provided address range is known by caller to be in memory, even if less than HeadAddress /// - internal SpanByteScanIterator(TsavoriteKV store, SpanByteAllocator hlog, long beginAddress, long endAddress, - ScanBufferingMode scanBufferingMode, bool includeSealedRecords, LightEpoch epoch, bool forceInMemory = false, ILogger logger = null) + internal SpanByteScanIterator(TsavoriteKV> store, SpanByteAllocatorImpl hlog, + long beginAddress, long endAddress, ScanBufferingMode scanBufferingMode, bool includeSealedRecords, LightEpoch epoch, bool forceInMemory = false, ILogger logger = null) : base(beginAddress == 0 ? hlog.GetFirstValidLogicalAddress(0) : beginAddress, endAddress, scanBufferingMode, includeSealedRecords, epoch, hlog.LogPageSizeBits, logger: logger) { this.store = store; @@ -49,12 +50,12 @@ internal SpanByteScanIterator(TsavoriteKV store, SpanByteAll /// /// Constructor for use with tail-to-head push iteration of the passed key's record versions /// - internal SpanByteScanIterator(TsavoriteKV store, ITsavoriteEqualityComparer comparer, SpanByteAllocator hlog, long beginAddress, LightEpoch epoch, ILogger logger = null) + internal SpanByteScanIterator(TsavoriteKV> store, SpanByteAllocatorImpl hlog, + long beginAddress, LightEpoch epoch, ILogger logger = null) : base(beginAddress == 0 ? hlog.GetFirstValidLogicalAddress(0) : beginAddress, hlog.GetTailAddress(), ScanBufferingMode.SinglePageBuffering, false, epoch, hlog.LogPageSizeBits, logger: logger) { this.store = store; this.hlog = hlog; - this.comparer = comparer; forceInMemory = false; if (frameSize > 0) frame = new BlittableFrame(frameSize, hlog.PageSize, hlog.GetDeviceSectorSize()); @@ -63,7 +64,7 @@ internal SpanByteScanIterator(TsavoriteKV store, ITsavoriteE /// /// Gets reference to current key /// - public ref SpanByte GetKey() => ref hlog.GetKey(currentPhysicalAddress); + public ref SpanByte GetKey() => ref hlog._wrapper.GetKey(currentPhysicalAddress); /// /// Gets reference to current value @@ -84,14 +85,6 @@ public bool SnapCursorToLogicalAddress(ref long cursor) return true; } - ref RecordInfo IPushScanIterator.GetLockableInfo() - { - // hlog.HeadAddress may have been incremented so use ClosedUntilAddress to avoid a false negative assert (not worth raising the temp headAddress out of BeginGetNext just for this). - Debug.Assert(currentPhysicalAddress >= hlog.ClosedUntilAddress, "GetLockableInfo() should be in-memory"); - Debug.Assert(epoch.ThisInstanceProtected(), "GetLockableInfo() should be called with the epoch held"); - return ref hlog.GetInfo(currentPhysicalAddress); - } - private bool InitializeGetNext(out long headAddress, out long currentPage) { currentAddress = nextAddress; @@ -117,7 +110,7 @@ private bool InitializeGetNext(out long headAddress, out long currentPage) currentPage = currentAddress >> hlog.LogPageSizeBits; if (currentAddress < headAddress && !forceInMemory) - BufferAndLoad(currentAddress, currentPage, currentPage % frameSize, headAddress, stopAddress); + _ = BufferAndLoad(currentAddress, currentPage, currentPage % frameSize, headAddress, stopAddress); // Success; keep the epoch held for GetNext (SnapCursorToLogicalAddress will Suspend()). return true; @@ -176,7 +169,7 @@ public unsafe bool GetNext(out RecordInfo recordInfo) nextAddress = currentAddress + recordSize; - recordInfo = hlog.GetInfo(physicalAddress); + recordInfo = hlog._wrapper.GetInfo(physicalAddress); bool skipOnScan = includeSealedRecords ? recordInfo.Invalid : recordInfo.SkipOnScan; if (skipOnScan || recordInfo.IsNull()) { @@ -193,12 +186,12 @@ public unsafe bool GetNext(out RecordInfo recordInfo) memory = null; if (currentAddress >= headAddress || forceInMemory) { - OperationStackContext stackCtx = default; + OperationStackContext> stackCtx = default; try { - // GetKey() and GetLockableInfo() should work but for safety and consistency with other allocators use physicalAddress. + // GetKey() should work but for safety and consistency with other allocators use physicalAddress. if (currentAddress >= headAddress && store is not null) - store.LockForScan(ref stackCtx, ref hlog.GetKey(physicalAddress), ref hlog.GetInfo(physicalAddress)); + store.LockForScan(ref stackCtx, ref hlog._wrapper.GetKey(physicalAddress)); memory = hlog.bufferPool.Get(recordSize); unsafe @@ -210,7 +203,7 @@ public unsafe bool GetNext(out RecordInfo recordInfo) finally { if (stackCtx.recSrc.HasLock) - store.UnlockForScan(ref stackCtx, ref GetKey(), ref ((IPushScanIterator)this).GetLockableInfo()); + store.UnlockForScan(ref stackCtx); } } @@ -247,10 +240,10 @@ bool IPushScanIterator.BeginGetPrevInMemory(ref SpanByte key, out Reco long physicalAddress = GetPhysicalAddress(currentAddress, headAddress, currentPage, offset); - recordInfo = hlog.GetInfo(physicalAddress); + recordInfo = hlog._wrapper.GetInfo(physicalAddress); nextAddress = recordInfo.PreviousAddress; bool skipOnScan = includeSealedRecords ? recordInfo.Invalid : recordInfo.SkipOnScan; - if (skipOnScan || recordInfo.IsNull() || !comparer.Equals(ref hlog.GetKey(physicalAddress), ref key)) + if (skipOnScan || recordInfo.IsNull() || !hlog._storeFunctions.KeysEqual(ref hlog._wrapper.GetKey(physicalAddress), ref key)) { epoch?.Suspend(); continue; @@ -297,7 +290,8 @@ public override void Dispose() frame?.Dispose(); } - internal override void AsyncReadPagesFromDeviceToFrame(long readPageStart, int numPages, long untilAddress, TContext context, out CountdownEvent completed, long devicePageOffset = 0, IDevice device = null, IDevice objectLogDevice = null, CancellationTokenSource cts = null) + internal override void AsyncReadPagesFromDeviceToFrame(long readPageStart, int numPages, long untilAddress, TContext context, out CountdownEvent completed, + long devicePageOffset = 0, IDevice device = null, IDevice objectLogDevice = null, CancellationTokenSource cts = null) => hlog.AsyncReadPagesFromDeviceToFrame(readPageStart, numPages, untilAddress, AsyncReadPagesCallback, context, frame, out completed, devicePageOffset, device, objectLogDevice); private unsafe void AsyncReadPagesCallback(uint errorCode, uint numBytes, object context) diff --git a/libs/storage/Tsavorite/cs/src/core/Async/CompletePendingAsync.cs b/libs/storage/Tsavorite/cs/src/core/Async/CompletePendingAsync.cs index 17faf4f1d5..578dbd691f 100644 --- a/libs/storage/Tsavorite/cs/src/core/Async/CompletePendingAsync.cs +++ b/libs/storage/Tsavorite/cs/src/core/Async/CompletePendingAsync.cs @@ -9,9 +9,9 @@ namespace Tsavorite.core /// /// The Tsavorite key-value store /// - /// Key - /// Value - public partial class TsavoriteKV : TsavoriteBase + public partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Check if at least one (sync) request is ready for CompletePending to operate on @@ -29,7 +29,7 @@ internal static ValueTask ReadyToCompletePendingAsync(Ts /// internal async ValueTask CompletePendingAsync(TSessionFunctionsWrapper sessionFunctions, CancellationToken token, CompletedOutputIterator completedOutputs) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { while (true) { diff --git a/libs/storage/Tsavorite/cs/src/core/ClientSession/BasicContext.cs b/libs/storage/Tsavorite/cs/src/core/ClientSession/BasicContext.cs index 22c68c11c3..6a7dc45000 100644 --- a/libs/storage/Tsavorite/cs/src/core/ClientSession/BasicContext.cs +++ b/libs/storage/Tsavorite/cs/src/core/ClientSession/BasicContext.cs @@ -10,18 +10,21 @@ namespace Tsavorite.core /// /// Basic Tsavorite Context implementation. /// - public readonly struct BasicContext : ITsavoriteContext + public readonly struct BasicContext + : ITsavoriteContext where Functions : ISessionFunctions + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - readonly ClientSession clientSession; - internal readonly SessionFunctionsWrapper> sessionFunctions; + readonly ClientSession clientSession; + internal readonly SessionFunctionsWrapper, TStoreFunctions, TAllocator> sessionFunctions; /// public bool IsNull => clientSession is null; - private TsavoriteKV store => clientSession.store; + private TsavoriteKV store => clientSession.store; - internal BasicContext(ClientSession clientSession) + internal BasicContext(ClientSession clientSession) { this.clientSession = clientSession; sessionFunctions = new(clientSession); @@ -40,7 +43,7 @@ public void UnsafeSuspendThread() #region ITsavoriteContext /// - public ClientSession Session => clientSession; + public ClientSession Session => clientSession; /// public long GetKeyHash(Key key) => clientSession.store.GetKeyHash(ref key); @@ -203,7 +206,7 @@ public Status Upsert(ref Key key, ref Value desiredValue, Context userContext = { Input input = default; Output output = default; - return Upsert(ref key, store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); + return Upsert(ref key, store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); } /// @@ -212,18 +215,18 @@ public Status Upsert(ref Key key, ref Value desiredValue, ref UpsertOptions upse { Input input = default; Output output = default; - return Upsert(ref key, upsertOptions.KeyHash ?? store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); + return Upsert(ref key, upsertOptions.KeyHash ?? store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); } /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Upsert(ref Key key, ref Input input, ref Value desiredValue, ref Output output, Context userContext = default) - => Upsert(ref key, store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); + => Upsert(ref key, store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Upsert(ref Key key, ref Input input, ref Value desiredValue, ref Output output, ref UpsertOptions upsertOptions, Context userContext = default) - => Upsert(ref key, upsertOptions.KeyHash ?? store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); + => Upsert(ref key, upsertOptions.KeyHash ?? store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -243,12 +246,12 @@ private Status Upsert(ref Key key, long keyHash, ref Input input, ref Value desi /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Upsert(ref Key key, ref Input input, ref Value desiredValue, ref Output output, out RecordMetadata recordMetadata, Context userContext = default) - => Upsert(ref key, store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, out recordMetadata, userContext); + => Upsert(ref key, store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, out recordMetadata, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Upsert(ref Key key, ref Input input, ref Value desiredValue, ref Output output, ref UpsertOptions upsertOptions, out RecordMetadata recordMetadata, Context userContext = default) - => Upsert(ref key, upsertOptions.KeyHash ?? store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, out recordMetadata, userContext); + => Upsert(ref key, upsertOptions.KeyHash ?? store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, out recordMetadata, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -288,21 +291,21 @@ public Status Upsert(Key key, Input input, Value desiredValue, ref Output output /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status RMW(ref Key key, ref Input input, ref Output output, Context userContext = default) - => RMW(ref key, store.comparer.GetHashCode64(ref key), ref input, ref output, out _, userContext); + => RMW(ref key, store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref output, out _, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status RMW(ref Key key, ref Input input, ref Output output, ref RMWOptions rmwOptions, Context userContext = default) - => RMW(ref key, rmwOptions.KeyHash ?? store.comparer.GetHashCode64(ref key), ref input, ref output, out _, userContext); + => RMW(ref key, rmwOptions.KeyHash ?? store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref output, out _, userContext); /// public Status RMW(ref Key key, ref Input input, ref Output output, out RecordMetadata recordMetadata, Context userContext = default) - => RMW(ref key, store.comparer.GetHashCode64(ref key), ref input, ref output, out recordMetadata, userContext); + => RMW(ref key, store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref output, out recordMetadata, userContext); /// public Status RMW(ref Key key, ref Input input, ref Output output, ref RMWOptions rmwOptions, out RecordMetadata recordMetadata, Context userContext = default) { - var keyHash = rmwOptions.KeyHash ?? store.comparer.GetHashCode64(ref key); + var keyHash = rmwOptions.KeyHash ?? store.storeFunctions.GetKeyHashCode64(ref key); return RMW(ref key, keyHash, ref input, ref output, out recordMetadata, userContext); } @@ -366,12 +369,12 @@ public Status RMW(Key key, Input input, ref RMWOptions rmwOptions, Context userC /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Delete(ref Key key, Context userContext = default) - => Delete(ref key, store.comparer.GetHashCode64(ref key), userContext); + => Delete(ref key, store.storeFunctions.GetKeyHashCode64(ref key), userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Delete(ref Key key, ref DeleteOptions deleteOptions, Context userContext = default) - => Delete(ref key, deleteOptions.KeyHash ?? store.comparer.GetHashCode64(ref key), userContext); + => Delete(ref key, deleteOptions.KeyHash ?? store.storeFunctions.GetKeyHashCode64(ref key), userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -380,7 +383,7 @@ private Status Delete(ref Key key, long keyHash, Context userContext = default) UnsafeResumeThread(); try { - return store.ContextDelete>>(ref key, keyHash, userContext, sessionFunctions); + return store.ContextDelete, TStoreFunctions, TAllocator>>(ref key, keyHash, userContext, sessionFunctions); } finally { @@ -433,7 +436,7 @@ internal Status CompactionCopyToTail(ref Key key, ref Input input, ref Value val UnsafeResumeThread(); try { - return store.CompactionConditionalCopyToTail>>( + return store.CompactionConditionalCopyToTail, TStoreFunctions, TAllocator>>( sessionFunctions, ref key, ref input, ref value, ref output, untilAddress); } finally @@ -456,7 +459,7 @@ internal Status ConditionalScanPush(ScanCursorState scanCursorState, UnsafeResumeThread(); try { - return store.hlog.ConditionalScanPush>>( + return store.hlogBase.ConditionalScanPush, TStoreFunctions, TAllocator>>( sessionFunctions, scanCursorState, recordInfo, ref key, ref value, untilAddress); } finally @@ -477,7 +480,7 @@ internal Status ContainsKeyInMemory(ref Key key, out long logicalAddress, long f UnsafeResumeThread(); try { - return store.InternalContainsKeyInMemory>>( + return store.InternalContainsKeyInMemory, TStoreFunctions, TAllocator>>( ref key, sessionFunctions, out logicalAddress, fromAddress); } finally diff --git a/libs/storage/Tsavorite/cs/src/core/ClientSession/ClientSession.cs b/libs/storage/Tsavorite/cs/src/core/ClientSession/ClientSession.cs index 2d8133fb76..129925681b 100644 --- a/libs/storage/Tsavorite/cs/src/core/ClientSession/ClientSession.cs +++ b/libs/storage/Tsavorite/cs/src/core/ClientSession/ClientSession.cs @@ -13,21 +13,23 @@ namespace Tsavorite.core /// /// Thread-independent session interface to Tsavorite /// - public sealed class ClientSession : IClientSession, IDisposable + public sealed class ClientSession : IClientSession, IDisposable where Functions : ISessionFunctions + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - internal readonly TsavoriteKV store; + internal readonly TsavoriteKV store; - internal readonly TsavoriteKV.TsavoriteExecutionContext ctx; + internal readonly TsavoriteKV.TsavoriteExecutionContext ctx; internal readonly Functions functions; internal CompletedOutputIterator completedOutputs; - readonly UnsafeContext uContext; - readonly LockableUnsafeContext luContext; - readonly LockableContext lContext; - readonly BasicContext bContext; + readonly UnsafeContext uContext; + readonly LockableUnsafeContext luContext; + readonly LockableContext lContext; + readonly BasicContext bContext; internal const string NotAsyncSessionErr = "Session does not support async operations"; @@ -43,7 +45,7 @@ public sealed class ClientSession ScanCursorState scanCursorState; internal void AcquireLockable(TSessionFunctions sessionFunctions) - where TSessionFunctions : ISessionFunctionsWrapper + where TSessionFunctions : ISessionFunctionsWrapper { CheckIsNotAcquiredLockable(); @@ -63,7 +65,7 @@ internal void AcquireLockable(TSessionFunctions sessionFuncti if (!IsInPreparePhase()) break; InternalReleaseLockable(); - Thread.Yield(); + _ = Thread.Yield(); } } @@ -95,8 +97,8 @@ void CheckIsNotAcquiredLockable() } internal ClientSession( - TsavoriteKV store, - TsavoriteKV.TsavoriteExecutionContext ctx, + TsavoriteKV store, + TsavoriteKV.TsavoriteExecutionContext ctx, Functions functions, ILoggerFactory loggerFactory = null) { @@ -137,22 +139,22 @@ public void Dispose() /// /// Return a new interface to Tsavorite operations that supports manual epoch control. /// - public UnsafeContext UnsafeContext => uContext; + public UnsafeContext UnsafeContext => uContext; /// /// Return a new interface to Tsavorite operations that supports manual locking and epoch control. /// - public LockableUnsafeContext LockableUnsafeContext => luContext; + public LockableUnsafeContext LockableUnsafeContext => luContext; /// /// Return a session wrapper that supports manual locking. /// - public LockableContext LockableContext => lContext; + public LockableContext LockableContext => lContext; /// /// Return a session wrapper struct that passes through to client session /// - public BasicContext BasicContext => bContext; + public BasicContext BasicContext => bContext; #region ITsavoriteContext @@ -164,7 +166,7 @@ public void Dispose() /// internal void Refresh(TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { UnsafeResumeThread(sessionFunctions); try @@ -179,7 +181,7 @@ internal void Refresh(TSessionFunctionsWrapper session /// internal void ResetModified(TSessionFunctionsWrapper sessionFunctions, ref Key key) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { UnsafeResumeThread(sessionFunctions); try @@ -210,12 +212,12 @@ internal void ResetModified(TSessionFunctionsWrapper s /// internal bool CompletePending(TSessionFunctionsWrapper sessionFunctions, bool wait = false, bool spinWaitForCommit = false) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper => CompletePending(sessionFunctions, getOutputs: false, wait, spinWaitForCommit); /// internal bool CompletePendingWithOutputs(TSessionFunctionsWrapper sessionFunctions, out CompletedOutputIterator completedOutputs, bool wait = false, bool spinWaitForCommit = false) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { InitializeCompletedOutputs(); var result = CompletePending(sessionFunctions, getOutputs: true, wait, spinWaitForCommit); @@ -228,7 +230,7 @@ internal bool CompletePendingWithOutputs(TSessionFunct /// Assumes epoch protection is managed by user. Async operations must be completed individually. /// internal bool UnsafeCompletePendingWithOutputs(TSessionFunctionsWrapper sessionFunctions, out CompletedOutputIterator completedOutputs, bool wait = false, bool spinWaitForCommit = false) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { InitializeCompletedOutputs(); var result = UnsafeCompletePending(sessionFunctions, true, wait, spinWaitForCommit); @@ -245,7 +247,7 @@ private void InitializeCompletedOutputs() } internal bool CompletePending(TSessionFunctionsWrapper sessionFunctions, bool getOutputs, bool wait, bool spinWaitForCommit) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { UnsafeResumeThread(sessionFunctions); try @@ -259,7 +261,7 @@ internal bool CompletePending(TSessionFunctionsWrapper } internal bool UnsafeCompletePending(TSessionFunctionsWrapper sessionFunctions, bool getOutputs, bool wait, bool spinWaitForCommit) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { var requestedOutputs = getOutputs ? completedOutputs : default; var result = store.InternalCompletePending(sessionFunctions, wait, requestedOutputs); @@ -282,13 +284,13 @@ internal bool UnsafeCompletePending(TSessionFunctionsW /// internal ValueTask CompletePendingAsync(TSessionFunctionsWrapper sessionFunctions, bool waitForCommit = false, CancellationToken token = default) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper => CompletePendingAsync(sessionFunctions, getOutputs: false, waitForCommit, token); /// internal async ValueTask> CompletePendingWithOutputsAsync(TSessionFunctionsWrapper sessionFunctions, bool waitForCommit = false, CancellationToken token = default) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { InitializeCompletedOutputs(); await CompletePendingAsync(sessionFunctions, getOutputs: true, waitForCommit, token).ConfigureAwait(false); @@ -296,7 +298,7 @@ internal async ValueTask(TSessionFunctionsWrapper sessionFunctions, bool getOutputs, bool waitForCommit = false, CancellationToken token = default) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { token.ThrowIfCancellationRequested(); @@ -324,7 +326,7 @@ public async ValueTask ReadyToCompletePendingAsync(CancellationToken token = def if (store.epoch.ThisInstanceProtected()) throw new NotSupportedException("Async operations not supported over protected epoch"); - await TsavoriteKV.ReadyToCompletePendingAsync(ctx, token).ConfigureAwait(false); + await TsavoriteKV.ReadyToCompletePendingAsync(ctx, token).ConfigureAwait(false); } #endregion Pending Operations @@ -332,7 +334,7 @@ public async ValueTask ReadyToCompletePendingAsync(CancellationToken token = def #region Other Operations internal void UnsafeResetModified(TSessionFunctionsWrapper sessionFunctions, ref Key key) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { OperationStatus status; do @@ -342,12 +344,12 @@ internal void UnsafeResetModified(TSessionFunctionsWra /// internal unsafe void ResetModified(TSessionFunctionsWrapper sessionFunctions, Key key) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper => ResetModified(sessionFunctions, ref key); /// internal bool IsModified(TSessionFunctionsWrapper sessionFunctions, ref Key key) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { UnsafeResumeThread(sessionFunctions); try @@ -361,7 +363,7 @@ internal bool IsModified(TSessionFunctionsWrapper sess } internal bool UnsafeIsModified(TSessionFunctionsWrapper sessionFunctions, ref Key key) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { RecordInfo modifiedInfo; OperationStatus status; @@ -373,7 +375,7 @@ internal bool UnsafeIsModified(TSessionFunctionsWrappe /// internal unsafe bool IsModified(TSessionFunctionsWrapper sessionFunctions, Key key) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper => IsModified(sessionFunctions, ref key); /// @@ -382,7 +384,7 @@ internal unsafe bool IsModified(TSessionFunctionsWrapp /// /// private async ValueTask WaitForCommitAsync(TSessionFunctionsWrapper sessionFunctions, CancellationToken token = default) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { token.ThrowIfCancellationRequested(); @@ -492,14 +494,14 @@ public bool Iterate(ref TScanFunctions scanFunctions, long until /// or one of the TScanIterator reader functions returning false public bool ScanCursor(ref long cursor, long count, TScanFunctions scanFunctions, long endAddress = long.MaxValue, bool validateCursor = false) where TScanFunctions : IScanIteratorFunctions - => store.hlog.ScanCursor(store, scanCursorState ??= new(), ref cursor, count, scanFunctions, endAddress, validateCursor); + => store.hlogBase.ScanCursor(store, scanCursorState ??= new(), ref cursor, count, scanFunctions, endAddress, validateCursor); /// /// Resume session on current thread. IMPORTANT: Call SuspendThread before any async op. /// [MethodImpl(MethodImplOptions.AggressiveInlining)] internal void UnsafeResumeThread(TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { // We do not track any "acquired" state here; if someone mixes calls between safe and unsafe contexts, they will // get the "trying to acquire already-acquired epoch" error. @@ -519,7 +521,7 @@ internal void UnsafeSuspendThread() void IClientSession.AtomicSwitch(long version) { - _ = TsavoriteKV.AtomicSwitch(ctx, ctx.prevCtx, version); + _ = TsavoriteKV.AtomicSwitch(ctx, ctx.prevCtx, version); } /// @@ -537,39 +539,5 @@ internal bool IsInPreparePhase() } #endregion Other Operations - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal bool InPlaceUpdater(TSessionFunctionsWrapper sessionFunctions, ref Key key, ref Input input, ref Value value, ref Output output, ref RecordInfo recordInfo, ref RMWInfo rmwInfo, out OperationStatus status) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper - { - // Note: KeyIndexes do not need notification of in-place updates because the key does not change. - if (functions.InPlaceUpdater(ref key, ref input, ref value, ref output, ref rmwInfo, ref recordInfo)) - { - rmwInfo.Action = RMWAction.Default; - // MarkPage is done in InternalRMW - status = OperationStatusUtils.AdvancedOpCode(OperationStatus.SUCCESS, StatusCode.InPlaceUpdatedRecord); - return true; - } - if (rmwInfo.Action == RMWAction.CancelOperation) - { - status = OperationStatus.CANCELED; - return false; - } - if (rmwInfo.Action == RMWAction.ExpireAndResume) - { - // This inserts the tombstone if appropriate - return store.ReinitializeExpiredRecord(ref key, ref input, ref value, ref output, ref recordInfo, - ref rmwInfo, rmwInfo.Address, sessionFunctions, isIpu: true, out status); - } - if (rmwInfo.Action == RMWAction.ExpireAndStop) - { - recordInfo.Tombstone = true; - status = OperationStatusUtils.AdvancedOpCode(OperationStatus.SUCCESS, StatusCode.InPlaceUpdatedRecord | StatusCode.Expired); - return false; - } - - status = OperationStatus.SUCCESS; - return false; - } } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/ClientSession/ILockableContext.cs b/libs/storage/Tsavorite/cs/src/core/ClientSession/ILockableContext.cs index 07691f2ed8..373c56ae40 100644 --- a/libs/storage/Tsavorite/cs/src/core/ClientSession/ILockableContext.cs +++ b/libs/storage/Tsavorite/cs/src/core/ClientSession/ILockableContext.cs @@ -7,7 +7,9 @@ namespace Tsavorite.core { /// - /// Lockable context functions. Useful when doing generic locking across diverse and specializations. + /// Lockable context functions. Useful when doing generic locking across diverse + /// and + /// specializations. /// /// public interface ILockableContext diff --git a/libs/storage/Tsavorite/cs/src/core/ClientSession/ITsavoriteContext.cs b/libs/storage/Tsavorite/cs/src/core/ClientSession/ITsavoriteContext.cs index a1a859629a..1f23196a3e 100644 --- a/libs/storage/Tsavorite/cs/src/core/ClientSession/ITsavoriteContext.cs +++ b/libs/storage/Tsavorite/cs/src/core/ClientSession/ITsavoriteContext.cs @@ -15,22 +15,24 @@ public interface ITsavoriteContext /// Obtain a code by which groups of keys will be sorted for manual locking, to avoid deadlocks. /// The key to obtain a code for /// - /// The hashcode of the key; created and returned by + /// The hashcode of the key; created and returned by long GetKeyHash(Key key); /// /// Obtain a code by which groups of keys will be sorted for manual locking, to avoid deadlocks. /// The key to obtain a code for /// - /// The hashcode of the key; created and returned by + /// The hashcode of the key; created and returned by long GetKeyHash(ref Key key); } /// /// Interface for Tsavorite operations /// - public interface ITsavoriteContext : ITsavoriteContext + public interface ITsavoriteContext : ITsavoriteContext where Functions : ISessionFunctions + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Indicates whether this context has been initialized. @@ -38,9 +40,9 @@ public interface ITsavoriteContext - /// Obtain the underlying + /// Obtain the underlying /// - ClientSession Session { get; } + ClientSession Session { get; } /// /// Synchronously complete outstanding pending synchronous operations. diff --git a/libs/storage/Tsavorite/cs/src/core/ClientSession/IUnsafeContext.cs b/libs/storage/Tsavorite/cs/src/core/ClientSession/IUnsafeContext.cs index c26008b139..8e1ef26f32 100644 --- a/libs/storage/Tsavorite/cs/src/core/ClientSession/IUnsafeContext.cs +++ b/libs/storage/Tsavorite/cs/src/core/ClientSession/IUnsafeContext.cs @@ -4,8 +4,9 @@ namespace Tsavorite.core { /// - /// Manual epoch control functions. Useful when doing generic operations across diverse - /// and specializations. + /// Manual epoch control functions. Useful when doing generic operations across diverse + /// and + /// specializations. /// public interface IUnsafeContext { diff --git a/libs/storage/Tsavorite/cs/src/core/ClientSession/LockableContext.cs b/libs/storage/Tsavorite/cs/src/core/ClientSession/LockableContext.cs index 61d2b5fa48..872a55bba3 100644 --- a/libs/storage/Tsavorite/cs/src/core/ClientSession/LockableContext.cs +++ b/libs/storage/Tsavorite/cs/src/core/ClientSession/LockableContext.cs @@ -12,18 +12,20 @@ namespace Tsavorite.core /// /// Tsavorite Context implementation that allows manual control of record locking and epoch management. For advanced use only. /// - public readonly struct LockableContext : ITsavoriteContext, ILockableContext + public readonly struct LockableContext : ITsavoriteContext, ILockableContext where Functions : ISessionFunctions + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - readonly ClientSession clientSession; - readonly SessionFunctionsWrapper> sessionFunctions; + readonly ClientSession clientSession; + readonly SessionFunctionsWrapper, TStoreFunctions, TAllocator> sessionFunctions; /// public bool IsNull => clientSession is null; const int KeyLockMaxRetryAttempts = 1000; - internal LockableContext(ClientSession clientSession) + internal LockableContext(ClientSession clientSession) { this.clientSession = clientSession; sessionFunctions = new(clientSession); @@ -54,9 +56,9 @@ internal LockableContext(ClientSession(TLockableKey[] keys, int start, int count) where TLockableKey : ILockableKey => clientSession.SortKeyHashes(keys, start, count); [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static bool DoManualLock(TSessionFunctionsWrapper sessionFunctions, ClientSession clientSession, + internal static bool DoManualLock(TSessionFunctionsWrapper sessionFunctions, ClientSession clientSession, TLockableKey[] keys, int start, int count) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper where TLockableKey : ILockableKey { // The key codes are sorted, but there may be duplicates; the sorting is such that exclusive locks come first for each key code, @@ -96,9 +98,9 @@ internal static bool DoManualLock(TSessi } [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static bool DoManualTryLock(TSessionFunctionsWrapper sessionFunctions, ClientSession clientSession, + internal static bool DoManualTryLock(TSessionFunctionsWrapper sessionFunctions, ClientSession clientSession, TLockableKey[] keys, int start, int count, TimeSpan timeout, CancellationToken cancellationToken) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper where TLockableKey : ILockableKey { // The key codes are sorted, but there may be duplicates; the sorting is such that exclusive locks come first for each key code, @@ -151,9 +153,9 @@ internal static bool DoManualTryLock(TSe } [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal static bool DoManualTryPromoteLock(TSessionFunctionsWrapper sessionFunctions, ClientSession clientSession, + internal static bool DoManualTryPromoteLock(TSessionFunctionsWrapper sessionFunctions, ClientSession clientSession, TLockableKey key, TimeSpan timeout, CancellationToken cancellationToken) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper where TLockableKey : ILockableKey { var startTime = DateTime.UtcNow; @@ -182,7 +184,7 @@ internal static bool DoManualTryPromoteLock(ClientSession clientSession, TLockableKey key) + internal static OperationStatus DoManualLock(ClientSession clientSession, TLockableKey key) where TLockableKey : ILockableKey { if (key.LockType == LockType.Shared) @@ -201,7 +203,7 @@ internal static OperationStatus DoManualLock(ClientSession(ClientSession clientSession, + internal static void DoManualUnlock(ClientSession clientSession, TLockableKey[] keys, int start, int keyIdx) where TLockableKey : ILockableKey { @@ -363,7 +365,7 @@ public void Unlock(TLockableKey[] keys, int start, int count) #region ITsavoriteContext /// - public ClientSession Session => clientSession; + public ClientSession Session => clientSession; /// public long GetKeyHash(Key key) => clientSession.store.GetKeyHash(ref key); @@ -552,7 +554,7 @@ public Status Upsert(ref Key key, ref Value desiredValue, Context userContext = { Input input = default; Output output = default; - return Upsert(ref key, clientSession.store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); + return Upsert(ref key, clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); } /// @@ -561,18 +563,18 @@ public Status Upsert(ref Key key, ref Value desiredValue, ref UpsertOptions upse { Input input = default; Output output = default; - return Upsert(ref key, upsertOptions.KeyHash ?? clientSession.store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); + return Upsert(ref key, upsertOptions.KeyHash ?? clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); } /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Upsert(ref Key key, ref Input input, ref Value desiredValue, ref Output output, Context userContext = default) - => Upsert(ref key, clientSession.store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); + => Upsert(ref key, clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Upsert(ref Key key, ref Input input, ref Value desiredValue, ref Output output, ref UpsertOptions upsertOptions, Context userContext = default) - => Upsert(ref key, upsertOptions.KeyHash ?? clientSession.store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); + => Upsert(ref key, upsertOptions.KeyHash ?? clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -593,12 +595,12 @@ private Status Upsert(ref Key key, long keyHash, ref Input input, ref Value desi /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Upsert(ref Key key, ref Input input, ref Value desiredValue, ref Output output, out RecordMetadata recordMetadata, Context userContext = default) - => Upsert(ref key, clientSession.store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, out recordMetadata, userContext); + => Upsert(ref key, clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, out recordMetadata, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Upsert(ref Key key, ref Input input, ref Value desiredValue, ref Output output, ref UpsertOptions upsertOptions, out RecordMetadata recordMetadata, Context userContext = default) - => Upsert(ref key, upsertOptions.KeyHash ?? clientSession.store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, out recordMetadata, userContext); + => Upsert(ref key, upsertOptions.KeyHash ?? clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, out recordMetadata, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -644,17 +646,17 @@ public Status RMW(ref Key key, ref Input input, ref Output output, Context userC /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status RMW(ref Key key, ref Input input, ref Output output, ref RMWOptions rmwOptions, Context userContext = default) - => RMW(ref key, rmwOptions.KeyHash ?? clientSession.store.comparer.GetHashCode64(ref key), ref input, ref output, out _, userContext); + => RMW(ref key, rmwOptions.KeyHash ?? clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref output, out _, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status RMW(ref Key key, ref Input input, ref Output output, out RecordMetadata recordMetadata, Context userContext = default) - => RMW(ref key, clientSession.store.comparer.GetHashCode64(ref key), ref input, ref output, out recordMetadata, userContext); + => RMW(ref key, clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref output, out recordMetadata, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status RMW(ref Key key, ref Input input, ref Output output, ref RMWOptions rmwOptions, out RecordMetadata recordMetadata, Context userContext = default) - => RMW(ref key, rmwOptions.KeyHash ?? clientSession.store.comparer.GetHashCode64(ref key), ref input, ref output, out recordMetadata, userContext); + => RMW(ref key, rmwOptions.KeyHash ?? clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref output, out recordMetadata, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -723,13 +725,13 @@ public Status RMW(Key key, Input input, ref RMWOptions rmwOptions, Context userC /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Delete(ref Key key, Context userContext = default) - => Delete(ref key, clientSession.store.comparer.GetHashCode64(ref key), userContext); + => Delete(ref key, clientSession.store.storeFunctions.GetKeyHashCode64(ref key), userContext); /// /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Delete(ref Key key, ref DeleteOptions deleteOptions, Context userContext = default) - => Delete(ref key, deleteOptions.KeyHash ?? clientSession.store.comparer.GetHashCode64(ref key), userContext); + => Delete(ref key, deleteOptions.KeyHash ?? clientSession.store.storeFunctions.GetKeyHashCode64(ref key), userContext); [MethodImpl(MethodImplOptions.AggressiveInlining)] private Status Delete(ref Key key, long keyHash, Context userContext = default) @@ -738,7 +740,7 @@ private Status Delete(ref Key key, long keyHash, Context userContext = default) clientSession.UnsafeResumeThread(sessionFunctions); try { - return clientSession.store.ContextDelete>>( + return clientSession.store.ContextDelete, TStoreFunctions, TAllocator>>( ref key, keyHash, userContext, sessionFunctions); } finally @@ -774,7 +776,7 @@ public void Refresh() clientSession.UnsafeResumeThread(sessionFunctions); try { - clientSession.store.InternalRefresh>>(sessionFunctions); + clientSession.store.InternalRefresh, TStoreFunctions, TAllocator>>(sessionFunctions); } finally { diff --git a/libs/storage/Tsavorite/cs/src/core/ClientSession/LockableUnsafeContext.cs b/libs/storage/Tsavorite/cs/src/core/ClientSession/LockableUnsafeContext.cs index 8840616446..5c1d4ccd5c 100644 --- a/libs/storage/Tsavorite/cs/src/core/ClientSession/LockableUnsafeContext.cs +++ b/libs/storage/Tsavorite/cs/src/core/ClientSession/LockableUnsafeContext.cs @@ -12,16 +12,19 @@ namespace Tsavorite.core /// /// Tsavorite Context implementation that allows manual control of record locking and epoch management. For advanced use only. /// - public readonly struct LockableUnsafeContext : ITsavoriteContext, ILockableContext, IUnsafeContext + public readonly struct LockableUnsafeContext + : ITsavoriteContext, ILockableContext, IUnsafeContext where Functions : ISessionFunctions + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - readonly ClientSession clientSession; - readonly SessionFunctionsWrapper> sessionFunctions; + readonly ClientSession clientSession; + readonly SessionFunctionsWrapper, TStoreFunctions, TAllocator> sessionFunctions; /// public bool IsNull => clientSession is null; - internal LockableUnsafeContext(ClientSession clientSession) + internal LockableUnsafeContext(ClientSession clientSession) { this.clientSession = clientSession; sessionFunctions = new(clientSession); @@ -73,7 +76,7 @@ public void Lock(TLockableKey[] keys, int start, int count) Debug.Assert(clientSession.store.epoch.ThisInstanceProtected(), "Epoch protection required for LockableUnsafeContext.Lock()"); while (true) { - if (LockableContext.DoManualLock(sessionFunctions, clientSession, keys, start, count)) + if (LockableContext.DoManualLock(sessionFunctions, clientSession, keys, start, count)) { break; } @@ -120,7 +123,7 @@ public bool TryLock(TLockableKey[] keys, int start, int count, Tim clientSession.CheckIsAcquiredLockable(); Debug.Assert(clientSession.store.epoch.ThisInstanceProtected(), "Epoch protection required for LockableUnsafeContext.Lock()"); - return LockableContext.DoManualTryLock(sessionFunctions, clientSession, keys, start, count, timeout, cancellationToken); + return LockableContext.DoManualTryLock(sessionFunctions, clientSession, keys, start, count, timeout, cancellationToken); } /// @@ -145,7 +148,7 @@ public bool TryPromoteLock(TLockableKey key, TimeSpan timeout, Can clientSession.CheckIsAcquiredLockable(); Debug.Assert(clientSession.store.epoch.ThisInstanceProtected(), "Epoch protection required for LockableUnsafeContext.Lock()"); - return LockableContext.DoManualTryPromoteLock(sessionFunctions, clientSession, key, timeout, cancellationToken); + return LockableContext.DoManualTryPromoteLock(sessionFunctions, clientSession, key, timeout, cancellationToken); } /// @@ -158,7 +161,7 @@ public void Unlock(TLockableKey[] keys, int start, int count) clientSession.CheckIsAcquiredLockable(); Debug.Assert(clientSession.store.epoch.ThisInstanceProtected(), "Epoch protection required for LockableUnsafeContext.Unlock()"); - LockableContext.DoManualUnlock(clientSession, keys, start, start + count - 1); + LockableContext.DoManualUnlock(clientSession, keys, start, start + count - 1); } /// @@ -171,7 +174,7 @@ public void Unlock(TLockableKey[] keys, int start, int count) #region ITsavoriteContext /// - public ClientSession Session => clientSession; + public ClientSession Session => clientSession; /// public long GetKeyHash(Key key) => clientSession.store.GetKeyHash(ref key); @@ -315,7 +318,7 @@ public Status Upsert(ref Key key, ref Value desiredValue, Context userContext = { Input input = default; Output output = default; - return Upsert(ref key, clientSession.store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); + return Upsert(ref key, clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); } /// @@ -324,18 +327,18 @@ public Status Upsert(ref Key key, ref Value desiredValue, ref UpsertOptions upse { Input input = default; Output output = default; - return Upsert(ref key, upsertOptions.KeyHash ?? clientSession.store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); + return Upsert(ref key, upsertOptions.KeyHash ?? clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); } /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Upsert(ref Key key, ref Input input, ref Value desiredValue, ref Output output, Context userContext = default) - => Upsert(ref key, clientSession.store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); + => Upsert(ref key, clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Upsert(ref Key key, ref Input input, ref Value desiredValue, ref Output output, ref UpsertOptions upsertOptions, Context userContext = default) - => Upsert(ref key, upsertOptions.KeyHash ?? clientSession.store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); + => Upsert(ref key, upsertOptions.KeyHash ?? clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -348,12 +351,12 @@ private Status Upsert(ref Key key, long keyHash, ref Input input, ref Value desi /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Upsert(ref Key key, ref Input input, ref Value desiredValue, ref Output output, out RecordMetadata recordMetadata, Context userContext = default) - => Upsert(ref key, clientSession.store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, out recordMetadata, userContext); + => Upsert(ref key, clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, out recordMetadata, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Upsert(ref Key key, ref Input input, ref Value desiredValue, ref Output output, ref UpsertOptions upsertOptions, out RecordMetadata recordMetadata, Context userContext = default) - => Upsert(ref key, upsertOptions.KeyHash ?? clientSession.store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, out recordMetadata, userContext); + => Upsert(ref key, upsertOptions.KeyHash ?? clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, out recordMetadata, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -391,17 +394,17 @@ public Status RMW(ref Key key, ref Input input, ref Output output, Context userC /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status RMW(ref Key key, ref Input input, ref Output output, ref RMWOptions rmwOptions, Context userContext = default) - => RMW(ref key, rmwOptions.KeyHash ?? clientSession.store.comparer.GetHashCode64(ref key), ref input, ref output, out _, userContext); + => RMW(ref key, rmwOptions.KeyHash ?? clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref output, out _, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status RMW(ref Key key, ref Input input, ref Output output, out RecordMetadata recordMetadata, Context userContext = default) - => RMW(ref key, clientSession.store.comparer.GetHashCode64(ref key), ref input, ref output, out recordMetadata, userContext); + => RMW(ref key, clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref output, out recordMetadata, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status RMW(ref Key key, ref Input input, ref Output output, ref RMWOptions rmwOptions, out RecordMetadata recordMetadata, Context userContext = default) - => RMW(ref key, rmwOptions.KeyHash ?? clientSession.store.comparer.GetHashCode64(ref key), ref input, ref output, out recordMetadata, userContext); + => RMW(ref key, rmwOptions.KeyHash ?? clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref output, out recordMetadata, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -462,20 +465,20 @@ public Status RMW(Key key, Input input, ref RMWOptions rmwOptions, Context userC /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Delete(ref Key key, Context userContext = default) - => Delete(ref key, clientSession.store.comparer.GetHashCode64(ref key), userContext); + => Delete(ref key, clientSession.store.storeFunctions.GetKeyHashCode64(ref key), userContext); /// /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Delete(ref Key key, ref DeleteOptions deleteOptions, Context userContext = default) - => Delete(ref key, deleteOptions.KeyHash ?? clientSession.store.comparer.GetHashCode64(ref key), userContext); + => Delete(ref key, deleteOptions.KeyHash ?? clientSession.store.storeFunctions.GetKeyHashCode64(ref key), userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Delete(ref Key key, long keyHash, Context userContext = default) { Debug.Assert(clientSession.store.epoch.ThisInstanceProtected()); - return clientSession.store.ContextDelete>>( + return clientSession.store.ContextDelete, TStoreFunctions, TAllocator>>( ref key, keyHash, userContext, sessionFunctions); } @@ -503,7 +506,7 @@ internal bool IsModified(Key key) public void Refresh() { Debug.Assert(clientSession.store.epoch.ThisInstanceProtected()); - clientSession.store.InternalRefresh>>(sessionFunctions); + clientSession.store.InternalRefresh, TStoreFunctions, TAllocator>>(sessionFunctions); } #endregion ITsavoriteContext diff --git a/libs/storage/Tsavorite/cs/src/core/ClientSession/ManageClientSessions.cs b/libs/storage/Tsavorite/cs/src/core/ClientSession/ManageClientSessions.cs index a3aa328fff..edc54de170 100644 --- a/libs/storage/Tsavorite/cs/src/core/ClientSession/ManageClientSessions.cs +++ b/libs/storage/Tsavorite/cs/src/core/ClientSession/ManageClientSessions.cs @@ -7,7 +7,9 @@ namespace Tsavorite.core { - public unsafe partial class TsavoriteKV : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { internal Dictionary _activeSessions = new(); @@ -18,7 +20,7 @@ public unsafe partial class TsavoriteKV : TsavoriteBase /// Name of session (optional) /// for this session; override those specified at TsavoriteKV level, and may be overridden on individual Read operations /// Session instance - public ClientSession NewSession(Functions functions, string sessionName = null, + public ClientSession NewSession(Functions functions, string sessionName = null, ReadCopyOptions readCopyOptions = default) where Functions : ISessionFunctions { @@ -39,9 +41,9 @@ public ClientSession NewSession(), null); + _ = Interlocked.CompareExchange(ref _activeSessions, new Dictionary(), null); - var session = new ClientSession(this, ctx, functions); + var session = new ClientSession(this, ctx, functions); lock (_activeSessions) _activeSessions.Add(sessionID, new SessionInfo { sessionName = sessionName, session = session, isActive = true }); return session; @@ -65,7 +67,7 @@ internal void DisposeClientSession(int sessionID, Phase sessionPhase) if (RevivificationManager.IsEnabled) session.MergeRevivificationStatsTo(ref RevivificationManager.stats, reset: true); if (sessionPhase == Phase.REST || sessionPhase == Phase.PREPARE_GROW || sessionPhase == Phase.IN_PROGRESS_GROW) - _activeSessions.Remove(sessionID); + _ = _activeSessions.Remove(sessionID); else sessionInfo.isActive = false; } diff --git a/libs/storage/Tsavorite/cs/src/core/ClientSession/SessionFunctionsWrapper.cs b/libs/storage/Tsavorite/cs/src/core/ClientSession/SessionFunctionsWrapper.cs index e524adb248..dd8ea778d5 100644 --- a/libs/storage/Tsavorite/cs/src/core/ClientSession/SessionFunctionsWrapper.cs +++ b/libs/storage/Tsavorite/cs/src/core/ClientSession/SessionFunctionsWrapper.cs @@ -5,21 +5,24 @@ namespace Tsavorite.core { - internal readonly struct SessionFunctionsWrapper : ISessionFunctionsWrapper + internal readonly struct SessionFunctionsWrapper + : ISessionFunctionsWrapper where Functions : ISessionFunctions - where TSessionLocker : struct, ISessionLocker + where TSessionLocker : struct, ISessionLocker + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - private readonly ClientSession _clientSession; + private readonly ClientSession _clientSession; private readonly TSessionLocker _sessionLocker; // Has no data members - public SessionFunctionsWrapper(ClientSession clientSession) + public SessionFunctionsWrapper(ClientSession clientSession) { _clientSession = clientSession; _sessionLocker = new TSessionLocker(); } - public TsavoriteKV Store => _clientSession.store; - public OverflowBucketLockTable LockTable => _clientSession.store.LockTable; + public TsavoriteKV Store => _clientSession.store; + public OverflowBucketLockTable LockTable => _clientSession.store.LockTable; #region Reads [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -99,11 +102,37 @@ public bool PostCopyUpdater(ref Key key, ref Input input, ref Value oldValue, re public bool InPlaceUpdater(long physicalAddress, ref Key key, ref Input input, ref Value value, ref Output output, ref RMWInfo rmwInfo, out OperationStatus status, ref RecordInfo recordInfo) { (rmwInfo.UsedValueLength, rmwInfo.FullValueLength, _) = _clientSession.store.GetRecordLengths(physicalAddress, ref value, ref recordInfo); - if (!_clientSession.InPlaceUpdater(this, ref key, ref input, ref value, ref output, ref recordInfo, ref rmwInfo, out status)) - return false; - _clientSession.store.SetExtraValueLength(ref value, ref recordInfo, rmwInfo.UsedValueLength, rmwInfo.FullValueLength); - recordInfo.SetDirtyAndModified(); - return true; + + if (_clientSession.functions.InPlaceUpdater(ref key, ref input, ref value, ref output, ref rmwInfo, ref recordInfo)) + { + rmwInfo.Action = RMWAction.Default; + _clientSession.store.SetExtraValueLength(ref value, ref recordInfo, rmwInfo.UsedValueLength, rmwInfo.FullValueLength); + recordInfo.SetDirtyAndModified(); + + // MarkPage is done in InternalRMW + status = OperationStatusUtils.AdvancedOpCode(OperationStatus.SUCCESS, StatusCode.InPlaceUpdatedRecord); + return true; + } + + if (rmwInfo.Action == RMWAction.ExpireAndResume) + { + // This inserts the tombstone if appropriate + return _clientSession.store.ReinitializeExpiredRecord>( + ref key, ref input, ref value, ref output, ref recordInfo, ref rmwInfo, rmwInfo.Address, this, isIpu: true, out status); + } + + if (rmwInfo.Action == RMWAction.CancelOperation) + { + status = OperationStatus.CANCELED; + } + else if (rmwInfo.Action == RMWAction.ExpireAndStop) + { + recordInfo.SetTombstone(); + status = OperationStatusUtils.AdvancedOpCode(OperationStatus.SUCCESS, StatusCode.InPlaceUpdatedRecord | StatusCode.Expired); + } + else + status = OperationStatus.SUCCESS; + return false; } #endregion InPlaceUpdater @@ -136,21 +165,6 @@ public bool ConcurrentDeleter(long physicalAddress, ref Key key, ref Value value } #endregion Deletes - #region Dispose - public void DisposeSingleWriter(ref Key key, ref Input input, ref Value src, ref Value dst, ref Output output, ref UpsertInfo upsertInfo, WriteReason reason) - => _clientSession.functions.DisposeSingleWriter(ref key, ref input, ref src, ref dst, ref output, ref upsertInfo, reason); - public void DisposeCopyUpdater(ref Key key, ref Input input, ref Value oldValue, ref Value newValue, ref Output output, ref RMWInfo rmwInfo) - => _clientSession.functions.DisposeCopyUpdater(ref key, ref input, ref oldValue, ref newValue, ref output, ref rmwInfo); - public void DisposeInitialUpdater(ref Key key, ref Input input, ref Value value, ref Output output, ref RMWInfo rmwInfo) - => _clientSession.functions.DisposeInitialUpdater(ref key, ref input, ref value, ref output, ref rmwInfo); - public void DisposeSingleDeleter(ref Key key, ref Value value, ref DeleteInfo deleteInfo) - => _clientSession.functions.DisposeSingleDeleter(ref key, ref value, ref deleteInfo); - public void DisposeDeserializedFromDisk(ref Key key, ref Value value, ref RecordInfo recordInfo) - => _clientSession.functions.DisposeDeserializedFromDisk(ref key, ref value); - public void DisposeForRevivification(ref Key key, ref Value value, int newKeySize, ref RecordInfo recordInfo) - => _clientSession.functions.DisposeForRevivification(ref key, ref value, newKeySize); - #endregion Dispose - #region Utilities /// public void ConvertOutputToHeap(ref Input input, ref Output output) => _clientSession.functions.ConvertOutputToHeap(ref input, ref output); @@ -160,42 +174,48 @@ public void DisposeForRevivification(ref Key key, ref Value value, int newKeySiz public bool IsManualLocking => _sessionLocker.IsManualLocking; [MethodImpl(MethodImplOptions.AggressiveInlining)] - public bool TryLockTransientExclusive(ref Key key, ref OperationStackContext stackCtx) => + public bool TryLockTransientExclusive(ref Key key, ref OperationStackContext stackCtx) => _sessionLocker.TryLockTransientExclusive(Store, ref stackCtx); [MethodImpl(MethodImplOptions.AggressiveInlining)] - public bool TryLockTransientShared(ref Key key, ref OperationStackContext stackCtx) + public bool TryLockTransientShared(ref Key key, ref OperationStackContext stackCtx) => _sessionLocker.TryLockTransientShared(Store, ref stackCtx); [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void UnlockTransientExclusive(ref Key key, ref OperationStackContext stackCtx) + public void UnlockTransientExclusive(ref Key key, ref OperationStackContext stackCtx) => _sessionLocker.UnlockTransientExclusive(Store, ref stackCtx); [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void UnlockTransientShared(ref Key key, ref OperationStackContext stackCtx) + public void UnlockTransientShared(ref Key key, ref OperationStackContext stackCtx) => _sessionLocker.UnlockTransientShared(Store, ref stackCtx); #endregion Transient locking #region Internal utilities + [MethodImpl(MethodImplOptions.AggressiveInlining)] public int GetRMWInitialValueLength(ref Input input) => _clientSession.functions.GetRMWInitialValueLength(ref input); + [MethodImpl(MethodImplOptions.AggressiveInlining)] public int GetRMWModifiedValueLength(ref Value t, ref Input input) => _clientSession.functions.GetRMWModifiedValueLength(ref t, ref input); + [MethodImpl(MethodImplOptions.AggressiveInlining)] public IHeapContainer GetHeapContainer(ref Input input) { if (typeof(Input) == typeof(SpanByte)) - return new SpanByteHeapContainer(ref Unsafe.As(ref input), _clientSession.store.hlog.bufferPool) as IHeapContainer; + return new SpanByteHeapContainer(ref Unsafe.As(ref input), _clientSession.store.hlogBase.bufferPool) as IHeapContainer; return new StandardHeapContainer(ref input); } + [MethodImpl(MethodImplOptions.AggressiveInlining)] public void UnsafeResumeThread() => _clientSession.UnsafeResumeThread(this); + [MethodImpl(MethodImplOptions.AggressiveInlining)] public void UnsafeSuspendThread() => _clientSession.UnsafeSuspendThread(); + [MethodImpl(MethodImplOptions.AggressiveInlining)] public bool CompletePendingWithOutputs(out CompletedOutputIterator completedOutputs, bool wait = false, bool spinWaitForCommit = false) => _clientSession.CompletePendingWithOutputs(this, out completedOutputs, wait, spinWaitForCommit); - public TsavoriteKV.TsavoriteExecutionContext Ctx => _clientSession.ctx; + public TsavoriteKV.TsavoriteExecutionContext Ctx => _clientSession.ctx; #endregion Internal utilities } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/ClientSession/UnsafeContext.cs b/libs/storage/Tsavorite/cs/src/core/ClientSession/UnsafeContext.cs index 3072f1bb3d..6172440b23 100644 --- a/libs/storage/Tsavorite/cs/src/core/ClientSession/UnsafeContext.cs +++ b/libs/storage/Tsavorite/cs/src/core/ClientSession/UnsafeContext.cs @@ -11,16 +11,19 @@ namespace Tsavorite.core /// /// Tsavorite Operations implementation that allows manual control of record epoch management. For advanced use only. /// - public readonly struct UnsafeContext : ITsavoriteContext, IUnsafeContext + public readonly struct UnsafeContext + : ITsavoriteContext, IUnsafeContext where Functions : ISessionFunctions + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - readonly ClientSession clientSession; - internal readonly SessionFunctionsWrapper> sessionFunctions; + readonly ClientSession clientSession; + internal readonly SessionFunctionsWrapper, TStoreFunctions, TAllocator> sessionFunctions; /// public bool IsNull => clientSession is null; - internal UnsafeContext(ClientSession clientSession) + internal UnsafeContext(ClientSession clientSession) { this.clientSession = clientSession; sessionFunctions = new(clientSession); @@ -41,7 +44,7 @@ internal UnsafeContext(ClientSession - public ClientSession Session => clientSession; + public ClientSession Session => clientSession; /// public long GetKeyHash(Key key) => clientSession.store.GetKeyHash(ref key); @@ -185,7 +188,7 @@ public Status Upsert(ref Key key, ref Value desiredValue, Context userContext = { Input input = default; Output output = default; - return Upsert(ref key, clientSession.store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, out _, userContext); + return Upsert(ref key, clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, out _, userContext); } /// @@ -194,18 +197,18 @@ public Status Upsert(ref Key key, ref Value desiredValue, ref UpsertOptions upse { Input input = default; Output output = default; - return Upsert(ref key, upsertOptions.KeyHash ?? clientSession.store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); + return Upsert(ref key, upsertOptions.KeyHash ?? clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); } /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Upsert(ref Key key, ref Input input, ref Value desiredValue, ref Output output, Context userContext = default) - => Upsert(ref key, clientSession.store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); + => Upsert(ref key, clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Upsert(ref Key key, ref Input input, ref Value desiredValue, ref Output output, ref UpsertOptions upsertOptions, Context userContext = default) - => Upsert(ref key, upsertOptions.KeyHash ?? clientSession.store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); + => Upsert(ref key, upsertOptions.KeyHash ?? clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -218,12 +221,12 @@ private Status Upsert(ref Key key, long keyHash, ref Input input, ref Value desi /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Upsert(ref Key key, ref Input input, ref Value desiredValue, ref Output output, out RecordMetadata recordMetadata, Context userContext = default) - => Upsert(ref key, clientSession.store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, out recordMetadata, userContext); + => Upsert(ref key, clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, out recordMetadata, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Upsert(ref Key key, ref Input input, ref Value desiredValue, ref Output output, ref UpsertOptions upsertOptions, out RecordMetadata recordMetadata, Context userContext = default) - => Upsert(ref key, upsertOptions.KeyHash ?? clientSession.store.comparer.GetHashCode64(ref key), ref input, ref desiredValue, ref output, out recordMetadata, userContext); + => Upsert(ref key, upsertOptions.KeyHash ?? clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref desiredValue, ref output, out recordMetadata, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -261,17 +264,17 @@ public Status RMW(ref Key key, ref Input input, ref Output output, Context userC /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status RMW(ref Key key, ref Input input, ref Output output, ref RMWOptions rmwOptions, Context userContext = default) - => RMW(ref key, rmwOptions.KeyHash ?? clientSession.store.comparer.GetHashCode64(ref key), ref input, ref output, out _, userContext); + => RMW(ref key, rmwOptions.KeyHash ?? clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref output, out _, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status RMW(ref Key key, ref Input input, ref Output output, out RecordMetadata recordMetadata, Context userContext = default) - => RMW(ref key, clientSession.store.comparer.GetHashCode64(ref key), ref input, ref output, out recordMetadata, userContext); + => RMW(ref key, clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref output, out recordMetadata, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status RMW(ref Key key, ref Input input, ref Output output, ref RMWOptions rmwOptions, out RecordMetadata recordMetadata, Context userContext = default) - => RMW(ref key, rmwOptions.KeyHash ?? clientSession.store.comparer.GetHashCode64(ref key), ref input, ref output, out recordMetadata, userContext); + => RMW(ref key, rmwOptions.KeyHash ?? clientSession.store.storeFunctions.GetKeyHashCode64(ref key), ref input, ref output, out recordMetadata, userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -329,20 +332,20 @@ public Status RMW(Key key, Input input, ref RMWOptions rmwOptions, Context userC /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Delete(ref Key key, Context userContext = default) - => Delete(ref key, clientSession.store.comparer.GetHashCode64(ref key), userContext); + => Delete(ref key, clientSession.store.storeFunctions.GetKeyHashCode64(ref key), userContext); /// /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Delete(ref Key key, ref DeleteOptions deleteOptions, Context userContext = default) - => Delete(ref key, deleteOptions.KeyHash ?? clientSession.store.comparer.GetHashCode64(ref key), userContext); + => Delete(ref key, deleteOptions.KeyHash ?? clientSession.store.storeFunctions.GetKeyHashCode64(ref key), userContext); /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public Status Delete(ref Key key, long keyHash, Context userContext = default) { Debug.Assert(clientSession.store.epoch.ThisInstanceProtected()); - return clientSession.store.ContextDelete>>( + return clientSession.store.ContextDelete, TStoreFunctions, TAllocator>>( ref key, keyHash, userContext, sessionFunctions); } @@ -370,7 +373,7 @@ internal bool IsModified(Key key) public void Refresh() { Debug.Assert(clientSession.store.epoch.ThisInstanceProtected()); - clientSession.store.InternalRefresh>>(sessionFunctions); + clientSession.store.InternalRefresh, TStoreFunctions, TAllocator>>(sessionFunctions); } #endregion ITsavoriteContext } diff --git a/libs/storage/Tsavorite/cs/src/core/Compaction/ICompactionFunctions.cs b/libs/storage/Tsavorite/cs/src/core/Compaction/ICompactionFunctions.cs index 45acc69f5e..55cf7f96b0 100644 --- a/libs/storage/Tsavorite/cs/src/core/Compaction/ICompactionFunctions.cs +++ b/libs/storage/Tsavorite/cs/src/core/Compaction/ICompactionFunctions.cs @@ -12,7 +12,7 @@ public interface ICompactionFunctions { /// /// Checks if record in the Tsavorite log is logically deleted. - /// If the record was deleted via + /// If the record was deleted via /// then this function is not called for such a record. /// /// diff --git a/libs/storage/Tsavorite/cs/src/core/Compaction/LogCompactionFunctions.cs b/libs/storage/Tsavorite/cs/src/core/Compaction/LogCompactionFunctions.cs index 4c7a88ce71..da23f37457 100644 --- a/libs/storage/Tsavorite/cs/src/core/Compaction/LogCompactionFunctions.cs +++ b/libs/storage/Tsavorite/cs/src/core/Compaction/LogCompactionFunctions.cs @@ -66,13 +66,6 @@ public bool SingleWriter(ref Key key, ref Input input, ref Value src, ref Value public void PostSingleWriter(ref Key key, ref Input input, ref Value src, ref Value dst, ref Output output, ref UpsertInfo upsertInfo, WriteReason reason) { } - public void DisposeSingleWriter(ref Key key, ref Input input, ref Value src, ref Value dst, ref Output output, ref UpsertInfo upsertInfo, WriteReason reason) { } - public void DisposeCopyUpdater(ref Key key, ref Input input, ref Value oldValue, ref Value newValue, ref Output output, ref RMWInfo rmwInfo) { } - public void DisposeInitialUpdater(ref Key key, ref Input input, ref Value value, ref Output output, ref RMWInfo rmwInfo) { } - public void DisposeSingleDeleter(ref Key key, ref Value value, ref DeleteInfo deleteInfo) { } - public void DisposeDeserializedFromDisk(ref Key key, ref Value value) { } - public void DisposeForRevivification(ref Key key, ref Value value, int newKeySize) { } - public void ConvertOutputToHeap(ref Input input, ref Output output) { } } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Compaction/TsavoriteCompaction.cs b/libs/storage/Tsavorite/cs/src/core/Compaction/TsavoriteCompaction.cs index 378aabb33a..463a7e8c2a 100644 --- a/libs/storage/Tsavorite/cs/src/core/Compaction/TsavoriteCompaction.cs +++ b/libs/storage/Tsavorite/cs/src/core/Compaction/TsavoriteCompaction.cs @@ -6,7 +6,9 @@ namespace Tsavorite.core /// /// Compaction methods /// - public partial class TsavoriteKV : TsavoriteBase + public partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Compact the log until specified address, moving active records to the tail of the log. BeginAddress is shifted, but the physical log @@ -35,7 +37,7 @@ private long CompactLookup where CompactionFunctions : ICompactionFunctions { - if (untilAddress > hlog.SafeReadOnlyAddress) + if (untilAddress > hlogBase.SafeReadOnlyAddress) throw new TsavoriteException("Can compact only until Log.SafeReadOnlyAddress"); var lf = new LogCompactionFunctions(functions); @@ -74,7 +76,7 @@ private long CompactScan where Functions : ISessionFunctions where CompactionFunctions : ICompactionFunctions { - if (untilAddress > hlog.SafeReadOnlyAddress) + if (untilAddress > hlogBase.SafeReadOnlyAddress) throw new TsavoriteException("Can compact only until Log.SafeReadOnlyAddress"); var originalUntilAddress = untilAddress; @@ -83,11 +85,18 @@ private long CompactScan using var storeSession = NewSession>(lf); var storebContext = storeSession.BasicContext; - using (var tempKv = new TsavoriteKV(IndexSize, new LogSettings { LogDevice = new NullDevice(), ObjectLogDevice = new NullDevice() }, comparer: Comparer, loggerFactory: loggerFactory)) + var tempKVSettings = new KVSettings(baseDir: null, loggerFactory: loggerFactory) + { + IndexSize = KVSettings.SetIndexSizeFromCacheLines(IndexSize), + LogDevice = new NullDevice(), + ObjectLogDevice = new NullDevice() + }; + + using (var tempKv = new TsavoriteKV(tempKVSettings, storeFunctions, allocatorFactory)) using (var tempKvSession = tempKv.NewSession(functions)) { var tempbContext = tempKvSession.BasicContext; - using (var iter1 = Log.Scan(hlog.BeginAddress, untilAddress)) + using (var iter1 = Log.Scan(hlogBase.BeginAddress, untilAddress)) { while (iter1.GetNext(out var recordInfo)) { @@ -104,7 +113,7 @@ private long CompactScan } // Scan until SafeReadOnlyAddress - var scanUntil = hlog.SafeReadOnlyAddress; + var scanUntil = hlogBase.SafeReadOnlyAddress; if (untilAddress < scanUntil) ScanImmutableTailToRemoveFromTempKv(ref untilAddress, scanUntil, tempbContext); @@ -116,7 +125,7 @@ private long CompactScan continue; // Try to ensure we have checked all immutable records - scanUntil = hlog.SafeReadOnlyAddress; + scanUntil = hlogBase.SafeReadOnlyAddress; if (untilAddress < scanUntil) ScanImmutableTailToRemoveFromTempKv(ref untilAddress, scanUntil, tempbContext); @@ -140,7 +149,8 @@ private long CompactScan return originalUntilAddress; } - private void ScanImmutableTailToRemoveFromTempKv(ref long untilAddress, long scanUntil, BasicContext tempbContext) + private void ScanImmutableTailToRemoveFromTempKv(ref long untilAddress, long scanUntil, + BasicContext tempbContext) where Functions : ISessionFunctions { using var iter = Log.Scan(untilAddress, scanUntil); diff --git a/libs/storage/Tsavorite/cs/src/core/Epochs/LightEpoch.cs b/libs/storage/Tsavorite/cs/src/core/Epochs/LightEpoch.cs index 78e2602848..aaff50ab7d 100644 --- a/libs/storage/Tsavorite/cs/src/core/Epochs/LightEpoch.cs +++ b/libs/storage/Tsavorite/cs/src/core/Epochs/LightEpoch.cs @@ -397,7 +397,8 @@ void Drain(long nextEpoch) // Execute the action trigger_action(); - if (drainCount == 0) break; + if (drainCount == 0) + break; } } } diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Common/CheckpointSettings.cs b/libs/storage/Tsavorite/cs/src/core/Index/Common/CheckpointSettings.cs index 9a026c9d71..c50261a3d2 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Common/CheckpointSettings.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Common/CheckpointSettings.cs @@ -23,7 +23,7 @@ public enum CheckpointType /// /// Checkpoint-related settings /// - public class CheckpointSettings + internal class CheckpointSettings { /// /// Checkpoint manager diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Common/CompletedOutput.cs b/libs/storage/Tsavorite/cs/src/core/Index/Common/CompletedOutput.cs index f301a5e84a..9bdbb4e956 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Common/CompletedOutput.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Common/CompletedOutput.cs @@ -8,11 +8,6 @@ namespace Tsavorite.core /// /// A list of for completed outputs from a pending operation. /// - /// The Key type of the - /// The Value type of the - /// The session input type - /// The session output type - /// The session context type /// The session holds this list and returns an enumeration to the caller of an appropriate CompletePending overload. The session will handle /// disposing and clearing this list, but it is best if the caller calls Dispose() after processing the results, so the key, input, and heap containers /// are released as soon as possible. @@ -24,7 +19,9 @@ public sealed class CompletedOutputIterator.PendingContext pendingContext, Status status) + internal void TransferFrom(ref TsavoriteKV.PendingContext pendingContext, Status status) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { // Note: vector is never null if (maxIndex >= vector.Length - 1) @@ -69,11 +66,6 @@ public void Dispose() /// /// Structure to hold a key and its output for a pending operation. /// - /// The Key type of the - /// The Value type of the - /// The session input type - /// The session output type - /// The session context type /// The session holds a list of these that it returns to the caller of an appropriate CompletePending overload. The session will handle disposing /// and clearing, and will manage Dispose(), but it is best if the caller calls Dispose() after processing the results, so the key, input, and heap containers /// are released as soon as possible. @@ -112,7 +104,9 @@ public struct CompletedOutput /// public Status Status; - internal void TransferFrom(ref TsavoriteKV.PendingContext pendingContext, Status status) + internal void TransferFrom(ref TsavoriteKV.PendingContext pendingContext, Status status) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { // Transfers the containers from the pendingContext, then null them; this is called before pendingContext.Dispose(). keyContainer = pendingContext.key; diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Common/ExecutionContext.cs b/libs/storage/Tsavorite/cs/src/core/Index/Common/ExecutionContext.cs index 1dce49e4e8..3107bb67f6 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Common/ExecutionContext.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Common/ExecutionContext.cs @@ -8,7 +8,9 @@ namespace Tsavorite.core { - public partial class TsavoriteKV : TsavoriteBase + public partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { internal sealed class TsavoriteExecutionContext { @@ -27,20 +29,28 @@ internal sealed class TsavoriteExecutionContext public AsyncCountDown pendingReads; public AsyncQueue> readyResponses; public int asyncPendingCount; - public ISynchronizationStateMachine threadStateMachine; + public ISynchronizationStateMachine threadStateMachine; internal RevivificationStats RevivificationStats = new(); public int SyncIoPendingCount => ioPendingRequests.Count - asyncPendingCount; - public bool IsInV1 => phase switch + public bool IsInV1 { - Phase.IN_PROGRESS => true, - Phase.WAIT_INDEX_CHECKPOINT => true, - Phase.WAIT_FLUSH => true, - _ => false, - }; + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get + { + return phase switch + { + Phase.IN_PROGRESS => true, + Phase.WAIT_INDEX_CHECKPOINT => true, + Phase.WAIT_FLUSH => true, + _ => false, + }; + } + } + [MethodImpl(MethodImplOptions.AggressiveInlining)] internal void MergeReadCopyOptions(ReadCopyOptions storeCopyOptions, ReadCopyOptions copyOptions) => ReadCopyOptions = ReadCopyOptions.Merge(storeCopyOptions, copyOptions); diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Common/TsavoriteKVSettings.cs b/libs/storage/Tsavorite/cs/src/core/Index/Common/KVSettings.cs similarity index 89% rename from libs/storage/Tsavorite/cs/src/core/Index/Common/TsavoriteKVSettings.cs rename to libs/storage/Tsavorite/cs/src/core/Index/Common/KVSettings.cs index 6a037372c3..2a4cb71fae 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Common/TsavoriteKVSettings.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Common/KVSettings.cs @@ -10,7 +10,7 @@ namespace Tsavorite.core /// /// Configuration settings for hybrid log. Use Utility.ParseSize to specify sizes in familiar string notation (e.g., "4k" and "4 MB"). /// - public sealed class TsavoriteKVSettings : IDisposable + public sealed class KVSettings : IDisposable { readonly bool disposeDevices = false; readonly bool deleteDirOnDispose = false; @@ -46,6 +46,11 @@ public sealed class TsavoriteKVSettings : IDisposable /// public long MemorySize = 1L << 34; + /// + /// Controls how many pages should be empty to account for non-power-of-two-sized log + /// + public int MinEmptyPageCount = 0; + /// /// Fraction of log marked as mutable (in-place updates). Rounds down to power of 2. /// @@ -61,21 +66,6 @@ public sealed class TsavoriteKVSettings : IDisposable /// public bool PreallocateLog = false; - /// - /// Key serializer - /// - public Func> KeySerializer; - - /// - /// Value serializer - /// - public Func> ValueSerializer; - - /// - /// Equality comparer for key - /// - public ITsavoriteEqualityComparer EqualityComparer; - /// /// Whether read cache is enabled /// @@ -112,7 +102,7 @@ public sealed class TsavoriteKVSettings : IDisposable /// /// Whether Tsavorite should remove outdated checkpoints automatically /// - public bool RemoveOutdatedCheckpoints = true; + public bool RemoveOutdatedCheckpoints = false; /// /// Try to recover from latest checkpoint, if available @@ -140,8 +130,9 @@ public sealed class TsavoriteKVSettings : IDisposable /// Use Utility.ParseSize to specify sizes in familiar string notation (e.g., "4k" and "4 MB"). /// Default index size is 64MB. /// - public TsavoriteKVSettings() { } + public KVSettings() { } + internal readonly ILoggerFactory loggerFactory; internal readonly ILogger logger; /// @@ -152,8 +143,9 @@ public TsavoriteKVSettings() { } /// Base directory (without trailing path separator) /// Whether to delete base directory on dispose. This option prevents later recovery. /// - public TsavoriteKVSettings(string baseDir, bool deleteDirOnDispose = false, ILogger logger = null) + public KVSettings(string baseDir, bool deleteDirOnDispose = false, ILoggerFactory loggerFactory = null, ILogger logger = null) { + this.loggerFactory = loggerFactory; this.logger = logger; disposeDevices = true; this.deleteDirOnDispose = deleteDirOnDispose; @@ -197,13 +189,16 @@ public override string ToString() internal long GetIndexSizeCacheLines() { long adjustedSize = Utility.PreviousPowerOf2(IndexSize); - if (adjustedSize < 512) - throw new TsavoriteException($"{nameof(IndexSize)} should be at least of size 8 cache line (512 bytes)"); + if (adjustedSize < 64) + throw new TsavoriteException($"{nameof(IndexSize)} should be at least of size 1 cache line (64 bytes)"); if (IndexSize != adjustedSize) // Don't use string interpolation when logging messages because it makes it impossible to group by the message template. logger?.LogInformation("Warning: using lower value {0} instead of specified {1} for {2}", adjustedSize, IndexSize, nameof(IndexSize)); return adjustedSize / 64; } + internal static long SetIndexSizeFromCacheLines(long cacheLines) + => cacheLines * 64; + internal LogSettings GetLogSettings() { return new LogSettings @@ -215,6 +210,7 @@ internal LogSettings GetLogSettings() PageSizeBits = Utility.NumBitsPreviousPowerOf2(PageSize), SegmentSizeBits = Utility.NumBitsPreviousPowerOf2(SegmentSize), MutableFraction = MutableFraction, + MinEmptyPageCount = MinEmptyPageCount, PreallocateLog = PreallocateLog, ReadCacheSettings = GetReadCacheSettings() }; @@ -232,18 +228,6 @@ private ReadCacheSettings GetReadCacheSettings() : null; } - internal SerializerSettings GetSerializerSettings() - { - if (KeySerializer == null && ValueSerializer == null) - return null; - - return new SerializerSettings - { - keySerializer = KeySerializer, - valueSerializer = ValueSerializer - }; - } - internal CheckpointSettings GetCheckpointSettings() { return new CheckpointSettings diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Common/LogSettings.cs b/libs/storage/Tsavorite/cs/src/core/Index/Common/LogSettings.cs index 761ea214ca..2e5114e420 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Common/LogSettings.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Common/LogSettings.cs @@ -1,32 +1,14 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -using System; +#pragma warning disable IDE1006 // Naming Styles namespace Tsavorite.core { - /// - /// Configuration settings for serializing objects - /// - /// - /// - public class SerializerSettings - { - /// - /// Key serializer - /// - public Func> keySerializer; - - /// - /// Value serializer - /// - public Func> valueSerializer; - } - /// /// Configuration settings for hybrid log /// - public class LogSettings + internal class LogSettings { /// Minimum number of bits for a page size public const int kMinPageSizeBits = 6; @@ -94,27 +76,4 @@ public class LogSettings /// public bool PreallocateLog = false; } - - /// - /// Configuration settings for hybrid log - /// - public class ReadCacheSettings - { - /// - /// Size of a segment (group of pages), in bits - /// - public int PageSizeBits = 25; - - /// - /// Total size of in-memory part of log, in bits - /// - public int MemorySizeBits = 34; - - /// - /// Fraction of log head (in memory) used for second chance - /// copy to tail. This is (1 - MutableFraction) for the - /// underlying log - /// - public double SecondChanceFraction = 0.1; - } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Common/LogSizeTracker.cs b/libs/storage/Tsavorite/cs/src/core/Index/Common/LogSizeTracker.cs index fc1e452258..175d3ecea1 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Common/LogSizeTracker.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Common/LogSizeTracker.cs @@ -27,13 +27,15 @@ public enum LogOperationType Deserialize } - public class LogOperationObserver : IObserver> + public class LogOperationObserver : IObserver> + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator where TLogSizeCalculator : ILogSizeCalculator { - private readonly LogSizeTracker logSizeTracker; + private readonly LogSizeTracker logSizeTracker; private readonly LogOperationType logOperationType; - public LogOperationObserver(LogSizeTracker logSizeTracker, LogOperationType logOperationType) + public LogOperationObserver(LogSizeTracker logSizeTracker, LogOperationType logOperationType) { this.logSizeTracker = logSizeTracker; this.logOperationType = logOperationType; @@ -67,9 +69,13 @@ public void OnNext(ITsavoriteScanIterator records) /// Tracks and controls size of log /// Type of key /// Type of value + /// + /// /// Type of the log size calculator - public class LogSizeTracker : IObserver> + public class LogSizeTracker : IObserver> where TLogSizeCalculator : ILogSizeCalculator + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { private ConcurrentCounter logSize; private long lowTargetSize; @@ -78,7 +84,7 @@ public class LogSizeTracker : IObserver logAccessor; + internal LogAccessor logAccessor; /// Indicates whether resizer task has been stopped public volatile bool Stopped; @@ -99,7 +105,7 @@ public class LogSizeTracker : IObserverTarget size for the hybrid log memory utilization /// Delta from target size to maintain memory utilization /// - public LogSizeTracker(LogAccessor logAccessor, TLogSizeCalculator logSizeCalculator, long targetSize, long delta, ILogger logger) + public LogSizeTracker(LogAccessor logAccessor, TLogSizeCalculator logSizeCalculator, long targetSize, long delta, ILogger logger) { Debug.Assert(logAccessor != null); Debug.Assert(logSizeCalculator != null); diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Common/OperationOptions.cs b/libs/storage/Tsavorite/cs/src/core/Index/Common/OperationOptions.cs index f7829a5d5d..7482f377d2 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Common/OperationOptions.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Common/OperationOptions.cs @@ -8,7 +8,7 @@ namespace Tsavorite.core /// /// Identifies which log regions records will be copied from to . This specification is /// evaluated in hierarchical order, from that on the TsavoriteKV ctor, which may be overridden by those in - /// .NewSession(), which may be overridden + /// .NewSession(), which may be overridden /// by those at the individual Read() level. /// public enum ReadCopyFrom : byte diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Common/OperationStatus.cs b/libs/storage/Tsavorite/cs/src/core/Index/Common/OperationStatus.cs index 60dca8793b..5a15ea78d6 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Common/OperationStatus.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Common/OperationStatus.cs @@ -58,8 +58,8 @@ internal enum OperationStatus RETRY_LATER, /// - /// I/O has been enqueued and the caller must go through or - /// , + /// I/O has been enqueued and the caller must go through or + /// , /// or one of the Async forms. /// RECORD_ON_DISK, @@ -72,7 +72,7 @@ internal enum OperationStatus /// /// Allocation failed, due to a need to flush pages. Clients do not see this status directly; they see . /// - /// For Sync operations we retry this as part of . + /// For Sync operations we retry this as part of . /// For Async operations we retry this as part of the ".Complete(...)" or ".CompleteAsync(...)" operation on the appropriate "*AsyncResult{}" object. /// /// diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Common/PendingContext.cs b/libs/storage/Tsavorite/cs/src/core/Index/Common/PendingContext.cs index 20a6f800d2..488ab90849 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Common/PendingContext.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Common/PendingContext.cs @@ -5,7 +5,9 @@ namespace Tsavorite.core { - public partial class TsavoriteKV : TsavoriteBase + public partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { internal struct PendingContext { diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Common/ReadCacheSettings.cs b/libs/storage/Tsavorite/cs/src/core/Index/Common/ReadCacheSettings.cs new file mode 100644 index 0000000000..9d16c2b248 --- /dev/null +++ b/libs/storage/Tsavorite/cs/src/core/Index/Common/ReadCacheSettings.cs @@ -0,0 +1,30 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +#pragma warning disable IDE1006 // Naming Styles + +namespace Tsavorite.core +{ + /// + /// Configuration settings for hybrid log + /// + internal class ReadCacheSettings + { + /// + /// Size of a segment (group of pages), in bits + /// + public int PageSizeBits = 25; + + /// + /// Total size of in-memory part of log, in bits + /// + public int MemorySizeBits = 34; + + /// + /// Fraction of log head (in memory) used for second chance + /// copy to tail. This is (1 - MutableFraction) for the + /// underlying log + /// + public double SecondChanceFraction = 0.1; + } +} \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Common/RecordInfo.cs b/libs/storage/Tsavorite/cs/src/core/Index/Common/RecordInfo.cs index 6e6d2c82d4..b04e793e8a 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Common/RecordInfo.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Common/RecordInfo.cs @@ -3,6 +3,7 @@ #pragma warning disable CS1591 // Missing XML comment for publicly visible type or member +using System.Diagnostics; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Threading; @@ -54,7 +55,8 @@ public struct RecordInfo // an in-memory address (or even know if the key will be found in-memory). internal static RecordInfo InitialValid = new() { Valid = true, PreviousAddress = Constants.kTempInvalidAddress }; - public void WriteInfo(bool inNewVersion, bool tombstone, long previousAddress) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void WriteInfo(bool inNewVersion, long previousAddress) { // For Recovery reasons, we need to have the record both Sealed and Invalid: // - Recovery removes the Sealed bit, so we need Invalid to survive from this point on to successful CAS. @@ -62,9 +64,9 @@ public void WriteInfo(bool inNewVersion, bool tombstone, long previousAddress) // - Revivification sets Sealed; we need to preserve it here. // We'll clear both on successful CAS. InitializeToSealedAndInvalid(); - Tombstone = tombstone; PreviousAddress = previousAddress; - IsInNewVersion = inNewVersion; + if (inNewVersion) + SetIsInNewVersion(); } // We ignore temp bits from disk images @@ -89,8 +91,17 @@ internal bool IsClosedOrTombstoned(ref OperationStatus internalStatus) return false; } - public readonly bool IsClosed => IsClosedWord(word); - public readonly bool IsSealed => (word & kSealedBitMask) != 0; + public readonly bool IsClosed + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get { return IsClosedWord(word); } + } + + public readonly bool IsSealed + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get { return (word & kSealedBitMask) != 0; } + } /// /// Seal this record (currently only called to prepare it for inline revivification). @@ -144,21 +155,25 @@ public bool TryUpdateAddress(long expectedPrevAddress, long newPrevAddress) public readonly bool IsNull() => word == 0; - public bool Tombstone + public readonly bool Tombstone { - readonly get => (word & kTombstoneBitMask) > 0; - set - { - if (value) word |= kTombstoneBitMask; - else word &= ~kTombstoneBitMask; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get => (word & kTombstoneBitMask) > 0; } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void SetTombstone() => word |= kTombstoneBitMask; + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void ClearTombstone() => word &= ~kTombstoneBitMask; + public bool Valid { readonly get => (word & kValidBitMask) > 0; + set { + // This is only called for initialization of static .InitialValid if (value) word |= kValidBitMask; else word &= ~kValidBitMask; } @@ -174,14 +189,9 @@ public void ClearDirtyAtomic() } } - public bool Dirty + public readonly bool Dirty { - readonly get => (word & kDirtyBitMask) > 0; - set - { - if (value) word |= kDirtyBitMask; - else word &= ~kDirtyBitMask; - } + get => (word & kDirtyBitMask) > 0; } public bool Modified @@ -194,33 +204,32 @@ public bool Modified } } - public bool Filler + public readonly bool HasFiller { - readonly get => (word & kFillerBitMask) > 0; - set - { - if (value) word |= kFillerBitMask; - else word &= ~kFillerBitMask; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get => (word & kFillerBitMask) > 0; } - public bool IsInNewVersion + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void SetHasFiller() => word |= kFillerBitMask; + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void ClearHasFiller() => word &= ~kFillerBitMask; + + public readonly bool IsInNewVersion { - readonly get => (word & kInNewVersionBitMask) > 0; - set - { - if (value) word |= kInNewVersionBitMask; - else word &= ~kInNewVersionBitMask; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get => (word & kInNewVersionBitMask) > 0; } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void SetIsInNewVersion() => word &= ~kInNewVersionBitMask; + [MethodImpl(MethodImplOptions.AggressiveInlining)] public void SetDirtyAndModified() => word |= kDirtyBitMask | kModifiedBitMask; [MethodImpl(MethodImplOptions.AggressiveInlining)] public void SetDirty() => word |= kDirtyBitMask; [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void SetTombstone() => word |= kTombstoneBitMask; - [MethodImpl(MethodImplOptions.AggressiveInlining)] public void SetInvalid() => word &= ~kValidBitMask; [MethodImpl(MethodImplOptions.AggressiveInlining)] public void InitializeToSealedAndInvalid() => word = kSealedBitMask; // Does not include kValidBitMask @@ -242,18 +251,21 @@ public void SetInvalidAtomic() } } - public readonly bool Invalid => (word & kValidBitMask) == 0; + public readonly bool Invalid + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get { return (word & kValidBitMask) == 0; } + } public readonly bool SkipOnScan => IsClosedWord(word); public long PreviousAddress { - readonly get => word & kPreviousAddressMaskInWord; - set - { - word &= ~kPreviousAddressMaskInWord; - word |= value & kPreviousAddressMaskInWord; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + readonly get { return word & kPreviousAddressMaskInWord; } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + set { word = (word & ~kPreviousAddressMaskInWord) | (value & kPreviousAddressMaskInWord); } } [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -276,7 +288,7 @@ public override readonly string ToString() var paRC = IsReadCache(PreviousAddress) ? "(rc)" : string.Empty; static string bstr(bool value) => value ? "T" : "F"; return $"prev {AbsoluteAddress(PreviousAddress)}{paRC}, valid {bstr(Valid)}, tomb {bstr(Tombstone)}, seal {bstr(IsSealed)}," - + $" mod {bstr(Modified)}, dirty {bstr(Dirty)}, fill {bstr(Filler)}, Un1 {bstr(Unused1)}, Un2 {bstr(Unused2)}"; + + $" mod {bstr(Modified)}, dirty {bstr(Dirty)}, fill {bstr(HasFiller)}, Un1 {bstr(Unused1)}, Un2 {bstr(Unused2)}"; } } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/CallbackInfos.cs b/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/CallbackInfos.cs index d2dc651bab..bb5c7d7d07 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/CallbackInfos.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/CallbackInfos.cs @@ -100,14 +100,14 @@ public readonly unsafe void ClearExtraValueLength(ref RecordInfo recordI [MethodImpl(MethodImplOptions.AggressiveInlining)] internal static unsafe void StaticClearExtraValueLength(ref RecordInfo recordInfo, ref TValue recordValue, int usedValueLength) { - if (!recordInfo.Filler) + if (!recordInfo.HasFiller) return; var valueAddress = (long)Unsafe.AsPointer(ref recordValue); int* extraLengthPtr = (int*)(valueAddress + RoundUp(usedValueLength, sizeof(int))); *extraLengthPtr = 0; - recordInfo.Filler = false; + recordInfo.ClearHasFiller(); } /// @@ -129,7 +129,7 @@ internal static unsafe void StaticSetUsedValueLength(ref RecordInfo reco { // Note: This is only called for variable-length types, and for those we have ensured the location of recordValue is pinned. long valueAddress = (long)Unsafe.AsPointer(ref recordValue); - Debug.Assert(!recordInfo.Filler, "Filler should have been cleared by ClearExtraValueLength()"); + Debug.Assert(!recordInfo.HasFiller, "Filler should have been cleared by ClearExtraValueLength()"); usedValueLength = RoundUp(usedValueLength, sizeof(int)); int extraValueLength = fullValueLength - usedValueLength; @@ -138,7 +138,7 @@ internal static unsafe void StaticSetUsedValueLength(ref RecordInfo reco int* extraValueLengthPtr = (int*)(valueAddress + usedValueLength); Debug.Assert(*extraValueLengthPtr == 0 || *extraValueLengthPtr == extraValueLength, "existing ExtraValueLength should be 0 or the same value"); *extraValueLengthPtr = extraValueLength; - recordInfo.Filler = true; + recordInfo.SetHasFiller(); } } } diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ITsavoriteEqualityComparer.cs b/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/IKeyComparer.cs similarity index 94% rename from libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ITsavoriteEqualityComparer.cs rename to libs/storage/Tsavorite/cs/src/core/Index/Interfaces/IKeyComparer.cs index eb9f788c24..1d5299b0b2 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ITsavoriteEqualityComparer.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/IKeyComparer.cs @@ -10,7 +10,7 @@ namespace Tsavorite.core /// /// The type of keys to compare. /// This comparer differs from the built-in in that it implements a 64-bit hash code - public interface ITsavoriteEqualityComparer + public interface IKeyComparer { /// /// Get 64-bit hash code diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/IObjectSerializer.cs b/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/IObjectSerializer.cs index b9e6bdad88..4277038664 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/IObjectSerializer.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/IObjectSerializer.cs @@ -53,60 +53,25 @@ public interface IObjectSerializer /// public abstract class BinaryObjectSerializer : IObjectSerializer { - /// - /// Binary reader - /// protected BinaryReader reader; - - /// - /// Binary writer - /// protected BinaryWriter writer; - /// - /// Begin deserialization - /// - /// - public void BeginDeserialize(Stream stream) - { - reader = new BinaryReader(stream, new UTF8Encoding(), true); - } + /// Begin deserialization + public void BeginDeserialize(Stream stream) => reader = new BinaryReader(stream, new UTF8Encoding(), true); - /// - /// Deserialize - /// - /// + /// Deserialize public abstract void Deserialize(out T obj); - /// - /// End deserialize - /// - public void EndDeserialize() - { - reader.Dispose(); - } + /// End deserialize + public void EndDeserialize() => reader.Dispose(); - /// - /// Begin serialize - /// - /// - public void BeginSerialize(Stream stream) - { - writer = new BinaryWriter(stream, new UTF8Encoding(), true); - } + /// Begin serialize + public void BeginSerialize(Stream stream) => writer = new BinaryWriter(stream, new UTF8Encoding(), true); - /// - /// Serialize - /// - /// + /// Serialize public abstract void Serialize(ref T obj); - /// - /// End serialize - /// - public void EndSerialize() - { - writer.Dispose(); - } + /// End serialize + public void EndSerialize() => writer.Dispose(); } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ISessionEpochControl.cs b/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ISessionEpochControl.cs index b23a6d9096..73ec887edf 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ISessionEpochControl.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ISessionEpochControl.cs @@ -7,7 +7,7 @@ namespace Tsavorite.core /// Provides thread management and callback to checkpoint completion (called state machine). /// /// This is broken out into a non-generic base interface to allow the use of - /// in . + /// in . internal interface ISessionEpochControl { void UnsafeResumeThread(); diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ISessionFunctions.cs b/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ISessionFunctions.cs index a9c0f0f072..0015fd1d5a 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ISessionFunctions.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ISessionFunctions.cs @@ -236,65 +236,6 @@ public interface ISessionFunctions bool ConcurrentDeleter(ref Key key, ref Value value, ref DeleteInfo deleteInfo, ref RecordInfo recordInfo); #endregion Deletes - #region Dispose - /// - /// Called after SingleWriter, if the CAS insertion of record into the store fails. Can be used to perform object disposal related actions. - /// - /// The key for this record - /// The user input that was used to compute - /// The previous value to be copied/updated - /// The destination to be updated; because this is an copy to a new location, there is no previous value there. - /// The location where the result of the update may be placed - /// Information about this update operation and its context - /// The operation for which this write is being done - void DisposeSingleWriter(ref Key key, ref Input input, ref Value src, ref Value dst, ref Output output, ref UpsertInfo upsertInfo, WriteReason reason); - - /// - /// Called after copy-update for RMW (RCU (Read-Copy-Update) to the tail of the log), if the CAS insertion of record into the store fails. Can be used to perform object disposal related actions. - /// - /// The key for this record - /// The user input to be used for computing from - /// The previous value to be copied/updated - /// The destination to be updated; because this is an copy to a new location, there is no previous value there. - /// The location where is to be copied - /// Information about this update operation and its context - void DisposeCopyUpdater(ref Key key, ref Input input, ref Value oldValue, ref Value newValue, ref Output output, ref RMWInfo rmwInfo); - - /// - /// Called after initial update for RMW (insert at the tail of the log), if the CAS insertion of record into the store fails. Can be used to perform object disposal related actions. - /// - /// The key for this record - /// The user input to be used for computing the updated - /// The destination to be updated; because this is an insert, there is no previous value there. - /// The location where the result of the operation on is to be copied - /// Information about this update operation and its context - void DisposeInitialUpdater(ref Key key, ref Input input, ref Value value, ref Output output, ref RMWInfo rmwInfo); - - /// - /// Called after a Delete that does not find the record in the mutable range and so inserts a new record, if the CAS insertion of record into the store fails. Can be used to perform object disposal related actions. - /// - /// The key for the record to be deleted - /// The value for the record being deleted; because this method is called only for in-place updates, there is a previous value there. Usually this is ignored or assigned 'default'. - /// Information about this update operation and its context - /// For Object Value types, Dispose() can be called here. If recordInfo.Invalid is true, this is called after the record was allocated and populated, but could not be appended at the end of the log. - void DisposeSingleDeleter(ref Key key, ref Value value, ref DeleteInfo deleteInfo); - - /// - /// Called after a record has been deserialized from the disk on a pending Read or RMW. Can be used to perform object disposal related actions. - /// - /// The key for the record - /// The value for the record - void DisposeDeserializedFromDisk(ref Key key, ref Value value); - - /// - /// Called when a record is being revivified from the freelist, which will likely be for a different key. The previous Key must therefore be disposed; the Value probably already has been, at Delete() time. - /// - /// The key for the record - /// The value for the record - /// If > 0, this is a record from the freelist and we are disposing the key as well as value (it is -1 when revivifying a record in the hash chain or when doing a RETRY; for these the key does not change) - void DisposeForRevivification(ref Key key, ref Value value, int newKeySize); - #endregion Dispose - #region Utilities /// /// Called by Tsavorite when the operation goes pending, so the app can signal to itself that any pinned diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ISessionFunctionsWrapper.cs b/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ISessionFunctionsWrapper.cs index 719e4a97ea..e23bf9fc5c 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ISessionFunctionsWrapper.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ISessionFunctionsWrapper.cs @@ -7,10 +7,12 @@ namespace Tsavorite.core /// Provides thread management and all callbacks. A wrapper for IFunctions and additional methods called by TsavoriteImpl; the wrapped /// IFunctions methods provide additional parameters to support the wrapper functionality, then call through to the user implementations. /// - internal interface ISessionFunctionsWrapper : ISessionEpochControl, IVariableLengthInput + internal interface ISessionFunctionsWrapper : ISessionEpochControl, IVariableLengthInput + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { bool IsManualLocking { get; } - TsavoriteKV Store { get; } + TsavoriteKV Store { get; } #region Reads bool SingleReader(ref Key key, ref Input input, ref Value value, ref Output dst, ref ReadInfo readInfo); @@ -50,30 +52,21 @@ internal interface ISessionFunctionsWrapper bool ConcurrentDeleter(long physicalAddress, ref Key key, ref Value value, ref DeleteInfo deleteInfo, ref RecordInfo recordInfo, out int fullRecordLength); #endregion Deletes - #region Disposal - void DisposeSingleWriter(ref Key key, ref Input input, ref Value src, ref Value dst, ref Output output, ref UpsertInfo upsertInfo, WriteReason reason); - void DisposeCopyUpdater(ref Key key, ref Input input, ref Value oldValue, ref Value newValue, ref Output output, ref RMWInfo rmwInfo); - void DisposeInitialUpdater(ref Key key, ref Input input, ref Value value, ref Output output, ref RMWInfo rmwInfo); - void DisposeSingleDeleter(ref Key key, ref Value value, ref DeleteInfo deleteInfo); - void DisposeDeserializedFromDisk(ref Key key, ref Value value, ref RecordInfo recordInfo); - void DisposeForRevivification(ref Key key, ref Value value, int newKeySize, ref RecordInfo recordInfo); - #endregion Disposal - #region Utilities /// void ConvertOutputToHeap(ref Input input, ref Output output); #endregion Utilities #region Transient locking - bool TryLockTransientExclusive(ref Key key, ref OperationStackContext stackCtx); - bool TryLockTransientShared(ref Key key, ref OperationStackContext stackCtx); - void UnlockTransientExclusive(ref Key key, ref OperationStackContext stackCtx); - void UnlockTransientShared(ref Key key, ref OperationStackContext stackCtx); + bool TryLockTransientExclusive(ref Key key, ref OperationStackContext stackCtx); + bool TryLockTransientShared(ref Key key, ref OperationStackContext stackCtx); + void UnlockTransientExclusive(ref Key key, ref OperationStackContext stackCtx); + void UnlockTransientShared(ref Key key, ref OperationStackContext stackCtx); #endregion bool CompletePendingWithOutputs(out CompletedOutputIterator completedOutputs, bool wait = false, bool spinWaitForCommit = false); - TsavoriteKV.TsavoriteExecutionContext Ctx { get; } + TsavoriteKV.TsavoriteExecutionContext Ctx { get; } IHeapContainer GetHeapContainer(ref Input input); } diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ISessionLocker.cs b/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ISessionLocker.cs index 5164042814..8c83129da6 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ISessionLocker.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/ISessionLocker.cs @@ -2,6 +2,7 @@ // Licensed under the MIT license. using System.Diagnostics; +using System.Runtime.CompilerServices; namespace Tsavorite.core { @@ -9,14 +10,16 @@ namespace Tsavorite.core /// Provides thread management and all callbacks. A wrapper for ISessionFunctions and additional methods called by TsavoriteImpl; the wrapped /// ISessionFunctions methods provide additional parameters to support the wrapper functionality, then call through to the user implementations. /// - public interface ISessionLocker + public interface ISessionLocker + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { bool IsManualLocking { get; } - bool TryLockTransientExclusive(TsavoriteKV store, ref OperationStackContext stackCtx); - bool TryLockTransientShared(TsavoriteKV store, ref OperationStackContext stackCtx); - void UnlockTransientExclusive(TsavoriteKV store, ref OperationStackContext stackCtx); - void UnlockTransientShared(TsavoriteKV store, ref OperationStackContext stackCtx); + bool TryLockTransientExclusive(TsavoriteKV store, ref OperationStackContext stackCtx); + bool TryLockTransientShared(TsavoriteKV store, ref OperationStackContext stackCtx); + void UnlockTransientExclusive(TsavoriteKV store, ref OperationStackContext stackCtx); + void UnlockTransientShared(TsavoriteKV store, ref OperationStackContext stackCtx); } /// @@ -25,11 +28,14 @@ public interface ISessionLocker /// /// This struct contains no data fields; SessionFunctionsWrapper redirects with its ClientSession. /// - internal struct BasicSessionLocker : ISessionLocker + internal struct BasicSessionLocker : ISessionLocker + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { public bool IsManualLocking => false; - public bool TryLockTransientExclusive(TsavoriteKV store, ref OperationStackContext stackCtx) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public bool TryLockTransientExclusive(TsavoriteKV store, ref OperationStackContext stackCtx) { if (!store.LockTable.TryLockExclusive(ref stackCtx.hei)) return false; @@ -37,7 +43,8 @@ public bool TryLockTransientExclusive(TsavoriteKV store, ref Opera return true; } - public bool TryLockTransientShared(TsavoriteKV store, ref OperationStackContext stackCtx) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public bool TryLockTransientShared(TsavoriteKV store, ref OperationStackContext stackCtx) { if (!store.LockTable.TryLockShared(ref stackCtx.hei)) return false; @@ -45,13 +52,15 @@ public bool TryLockTransientShared(TsavoriteKV store, ref Operatio return true; } - public void UnlockTransientExclusive(TsavoriteKV store, ref OperationStackContext stackCtx) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void UnlockTransientExclusive(TsavoriteKV store, ref OperationStackContext stackCtx) { store.LockTable.UnlockExclusive(ref stackCtx.hei); stackCtx.recSrc.ClearHasTransientXLock(); } - public void UnlockTransientShared(TsavoriteKV store, ref OperationStackContext stackCtx) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void UnlockTransientShared(TsavoriteKV store, ref OperationStackContext stackCtx) { store.LockTable.UnlockShared(ref stackCtx.hei); stackCtx.recSrc.ClearHasTransientSLock(); @@ -61,11 +70,14 @@ public void UnlockTransientShared(TsavoriteKV store, ref Operation /// /// Lockable sessions are manual locking and thus must have already locked the record prior to an operation on it, so assert that. /// - internal struct LockableSessionLocker : ISessionLocker + internal struct LockableSessionLocker : ISessionLocker + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { public bool IsManualLocking => true; - public bool TryLockTransientExclusive(TsavoriteKV store, ref OperationStackContext stackCtx) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public bool TryLockTransientExclusive(TsavoriteKV store, ref OperationStackContext stackCtx) { Debug.Assert(store.LockTable.IsLockedExclusive(ref stackCtx.hei), $"Attempting to use a non-XLocked key in a Lockable context (requesting XLock):" @@ -74,9 +86,8 @@ public bool TryLockTransientExclusive(TsavoriteKV store, ref Opera return true; } - public bool TryLockTransientExclusive(TsavoriteKV store, ref TKey key, ref OperationStackContext stackCtx) => throw new System.NotImplementedException(); - - public bool TryLockTransientShared(TsavoriteKV store, ref OperationStackContext stackCtx) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public bool TryLockTransientShared(TsavoriteKV store, ref OperationStackContext stackCtx) { Debug.Assert(store.LockTable.IsLocked(ref stackCtx.hei), $"Attempting to use a non-Locked (S or X) key in a Lockable context (requesting SLock):" @@ -85,9 +96,8 @@ public bool TryLockTransientShared(TsavoriteKV store, ref Operatio return true; } - public bool TryLockTransientShared(TsavoriteKV store, ref TKey key, ref OperationStackContext stackCtx) => throw new System.NotImplementedException(); - - public void UnlockTransientExclusive(TsavoriteKV store, ref OperationStackContext stackCtx) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void UnlockTransientExclusive(TsavoriteKV store, ref OperationStackContext stackCtx) { Debug.Assert(store.LockTable.IsLockedExclusive(ref stackCtx.hei), $"Attempting to unlock a non-XLocked key in a Lockable context (requesting XLock):" @@ -95,16 +105,13 @@ public void UnlockTransientExclusive(TsavoriteKV store, ref Operat + $" Slocked {store.LockTable.IsLockedShared(ref stackCtx.hei)}"); } - public void UnlockTransientExclusive(TsavoriteKV store, ref TKey key, ref OperationStackContext stackCtx) => throw new System.NotImplementedException(); - - public void UnlockTransientShared(TsavoriteKV store, ref OperationStackContext stackCtx) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void UnlockTransientShared(TsavoriteKV store, ref OperationStackContext stackCtx) { Debug.Assert(store.LockTable.IsLockedShared(ref stackCtx.hei), $"Attempting to use a non-XLocked key in a Lockable context (requesting XLock):" + $" XLocked {store.LockTable.IsLockedExclusive(ref stackCtx.hei)}," + $" Slocked {store.LockTable.IsLockedShared(ref stackCtx.hei)}"); } - - public void UnlockTransientShared(TsavoriteKV store, ref TKey key, ref OperationStackContext stackCtx) => throw new System.NotImplementedException(); } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/TsavoriteEqualityComparer.cs b/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/KeyComparers.cs similarity index 69% rename from libs/storage/Tsavorite/cs/src/core/Index/Interfaces/TsavoriteEqualityComparer.cs rename to libs/storage/Tsavorite/cs/src/core/Index/Interfaces/KeyComparers.cs index 070107e095..6a89e5a912 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/TsavoriteEqualityComparer.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/KeyComparers.cs @@ -7,27 +7,27 @@ namespace Tsavorite.core { - internal static class TsavoriteEqualityComparer + internal static class KeyComparers { - public static ITsavoriteEqualityComparer Get() + public static IKeyComparer Get() { if (typeof(T) == typeof(string)) - return (ITsavoriteEqualityComparer)(object)StringTsavoriteEqualityComparer.Instance; + return (IKeyComparer)(object)StringKeyComparer.Instance; else if (typeof(T) == typeof(byte[])) - return (ITsavoriteEqualityComparer)(object)ByteArrayTsavoriteEqualityComparer.Instance; + return (IKeyComparer)(object)ByteArrayKeyComparer.Instance; else if (typeof(T) == typeof(long)) - return (ITsavoriteEqualityComparer)(object)LongTsavoriteEqualityComparer.Instance; + return (IKeyComparer)(object)LongKeyComparer.Instance; else if (typeof(T) == typeof(int)) - return (ITsavoriteEqualityComparer)(object)IntTsavoriteEqualityComparer.Instance; + return (IKeyComparer)(object)IntKeyComparer.Instance; else if (typeof(T) == typeof(Guid)) - return (ITsavoriteEqualityComparer)(object)GuidTsavoriteEqualityComparer.Instance; + return (IKeyComparer)(object)GuidKeyComparer.Instance; else if (typeof(T) == typeof(SpanByte)) - return (ITsavoriteEqualityComparer)(object)SpanByteComparer.Instance; + return (IKeyComparer)(object)SpanByteComparer.Instance; else { Debug.WriteLine("***WARNING*** Creating default Tsavorite key equality comparer based on potentially slow EqualityComparer.Default." + "To avoid this, provide a comparer (ITsavoriteEqualityComparer) as an argument to Tsavorite's constructor, or make Key implement the interface ITsavoriteEqualityComparer"); - return DefaultTsavoriteEqualityComparer.Instance; + return DefaultKeyComparer.Instance; } } } @@ -35,13 +35,13 @@ public static ITsavoriteEqualityComparer Get() /// /// Deterministic equality comparer for strings /// - public sealed class StringTsavoriteEqualityComparer : ITsavoriteEqualityComparer + public sealed class StringKeyComparer : IKeyComparer { /// /// The default instance. /// /// Used to avoid allocating new comparers. - public static readonly StringTsavoriteEqualityComparer Instance = new(); + public static readonly StringKeyComparer Instance = new(); /// public bool Equals(ref string key1, ref string key2) @@ -69,13 +69,13 @@ public unsafe long GetHashCode64(ref string key) /// /// Deterministic equality comparer for longs /// - public sealed class LongTsavoriteEqualityComparer : ITsavoriteEqualityComparer + public sealed class LongKeyComparer : IKeyComparer { /// /// The default instance. /// /// Used to avoid allocating new comparers. - public static readonly LongTsavoriteEqualityComparer Instance = new(); + public static readonly LongKeyComparer Instance = new(); /// public bool Equals(ref long k1, ref long k2) => k1 == k2; @@ -87,13 +87,13 @@ public sealed class LongTsavoriteEqualityComparer : ITsavoriteEqualityComparer /// Deterministic equality comparer for longs /// - public sealed class IntTsavoriteEqualityComparer : ITsavoriteEqualityComparer + public sealed class IntKeyComparer : IKeyComparer { /// /// The default instance. /// /// Used to avoid allocating new comparers. - public static readonly IntTsavoriteEqualityComparer Instance = new(); + public static readonly IntKeyComparer Instance = new(); /// public bool Equals(ref int k1, ref int k2) => k1 == k2; @@ -105,13 +105,13 @@ public sealed class IntTsavoriteEqualityComparer : ITsavoriteEqualityComparer /// Deterministic equality comparer for longs /// - public sealed class GuidTsavoriteEqualityComparer : ITsavoriteEqualityComparer + public sealed class GuidKeyComparer : IKeyComparer { /// /// The default instance. /// /// Used to avoid allocating new comparers. - public static readonly GuidTsavoriteEqualityComparer Instance = new(); + public static readonly GuidKeyComparer Instance = new(); /// public bool Equals(ref Guid k1, ref Guid k2) => k1 == k2; @@ -128,13 +128,13 @@ public unsafe long GetHashCode64(ref Guid k) /// /// Deterministic equality comparer for byte[] /// - public sealed class ByteArrayTsavoriteEqualityComparer : ITsavoriteEqualityComparer + public sealed class ByteArrayKeyComparer : IKeyComparer { /// /// The default instance. /// /// Used to avoid allocating new comparers. - public static readonly ByteArrayTsavoriteEqualityComparer Instance = new(); + public static readonly ByteArrayKeyComparer Instance = new(); /// public bool Equals(ref byte[] key1, ref byte[] key2) => key1.AsSpan().SequenceEqual(key2); @@ -154,17 +154,35 @@ public unsafe long GetHashCode64(ref byte[] key) } } + /// + /// No-op equality comparer for Empty (used by TsavoriteLog) + /// + public sealed class EmptyKeyComparer : IKeyComparer + { + /// + /// The default instance. + /// + /// Used to avoid allocating new comparers. + public static readonly EmptyKeyComparer Instance = new(); + + /// + public bool Equals(ref Empty key1, ref Empty key2) => throw new NotImplementedException(); + + /// + public long GetHashCode64(ref Empty key) => throw new NotImplementedException(); + } + /// /// Low-performance Tsavorite equality comparer wrapper around EqualityComparer.Default /// /// - internal sealed class DefaultTsavoriteEqualityComparer : ITsavoriteEqualityComparer + internal sealed class DefaultKeyComparer : IKeyComparer { /// /// The default instance. /// /// Used to avoid allocating new comparers. - public static readonly DefaultTsavoriteEqualityComparer Instance = new(); + public static readonly DefaultKeyComparer Instance = new(); private static readonly EqualityComparer DefaultEC = EqualityComparer.Default; diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/SessionFunctionsBase.cs b/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/SessionFunctionsBase.cs index a5a7286cbf..3750a7056e 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/SessionFunctionsBase.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Interfaces/SessionFunctionsBase.cs @@ -49,19 +49,6 @@ public virtual void PostInitialUpdater(ref Key key, ref Input input, ref Value v public virtual void PostSingleDeleter(ref Key key, ref DeleteInfo deleteInfo) { } public virtual bool ConcurrentDeleter(ref Key key, ref Value value, ref DeleteInfo deleteInfo, ref RecordInfo recordInfo) => true; - /// - public virtual void DisposeSingleWriter(ref Key key, ref Input input, ref Value src, ref Value dst, ref Output output, ref UpsertInfo upsertInfo, WriteReason reason) { } - /// - public virtual void DisposeCopyUpdater(ref Key key, ref Input input, ref Value oldValue, ref Value newValue, ref Output output, ref RMWInfo rmwInfo) { } - /// - public virtual void DisposeInitialUpdater(ref Key key, ref Input input, ref Value value, ref Output output, ref RMWInfo rmwInfo) { } - /// - public virtual void DisposeSingleDeleter(ref Key key, ref Value value, ref DeleteInfo deleteInfo) { } - /// - public virtual void DisposeDeserializedFromDisk(ref Key key, ref Value value) { } - /// - public virtual void DisposeForRevivification(ref Key key, ref Value value, int newKeySize) { } - /// public virtual void ReadCompletionCallback(ref Key key, ref Input input, ref Output output, Context ctx, Status status, RecordMetadata recordMetadata) { } /// public virtual void RMWCompletionCallback(ref Key key, ref Input input, ref Output output, Context ctx, Status status, RecordMetadata recordMetadata) { } diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Recovery/Checkpoint.cs b/libs/storage/Tsavorite/cs/src/core/Index/Recovery/Checkpoint.cs index c5ab2101cb..55163ff064 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Recovery/Checkpoint.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Recovery/Checkpoint.cs @@ -29,7 +29,9 @@ internal static class EpochPhaseIdx public const int CheckpointCompletionCallback = 4; } - public partial class TsavoriteKV + public partial class TsavoriteKV + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { internal TaskCompletionSource checkpointTcs = new(TaskCreationOptions.RunContinuationsAsynchronously); @@ -75,7 +77,7 @@ internal void WriteIndexMetaInfo() internal bool ObtainCurrentTailAddress(ref long location) { - var tailAddress = hlog.GetTailAddress(); + var tailAddress = hlogBase.GetTailAddress(); return Interlocked.CompareExchange(ref location, tailAddress, 0) == 0; } @@ -87,7 +89,7 @@ internal void InitializeIndexCheckpoint(Guid indexToken) internal void InitializeHybridLogCheckpoint(Guid hybridLogToken, long version) { _hybridLogCheckpoint.Initialize(hybridLogToken, version, checkpointManager); - _hybridLogCheckpoint.info.manualLockingActive = hlog.NumActiveLockingSessions > 0; + _hybridLogCheckpoint.info.manualLockingActive = hlogBase.NumActiveLockingSessions > 0; } internal long Compact(ISessionFunctions functions, CompactionFunctions compactionFunctions, long untilAddress, CompactionType compactionType) diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Recovery/IndexCheckpoint.cs b/libs/storage/Tsavorite/cs/src/core/Index/Recovery/IndexCheckpoint.cs index 0dd17c5c59..c73088651c 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Recovery/IndexCheckpoint.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Recovery/IndexCheckpoint.cs @@ -11,7 +11,9 @@ namespace Tsavorite.core { internal unsafe delegate void SkipReadCache(HashBucket* bucket); - public partial class TsavoriteKV : TsavoriteBase + public partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { // Derived class facing persistence API internal IndexCheckpointInfo _indexCheckpoint; diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Recovery/Recovery.cs b/libs/storage/Tsavorite/cs/src/core/Index/Recovery/Recovery.cs index 9730fbd687..e67fb55941 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Recovery/Recovery.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Recovery/Recovery.cs @@ -157,7 +157,9 @@ public struct LogFileInfo public long deltaLogTailAddress; } - public partial class TsavoriteKV : TsavoriteBase + public partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { private const long NoPageFreed = -1; @@ -186,7 +188,7 @@ public long GetLatestCheckpointVersion() using var current = new HybridLogCheckpointInfo(); // Make sure we consider delta log in order to compute latest checkpoint version - current.Recover(hlogToken, checkpointManager, hlog.LogPageSizeBits, + current.Recover(hlogToken, checkpointManager, hlogBase.LogPageSizeBits, out var _, true); return current.info.nextVersion; } @@ -201,13 +203,13 @@ public LogFileInfo GetLogFileSize(Guid token, long version = -1) { using var current = new HybridLogCheckpointInfo(); // We find the latest checkpoint metadata for the given token, including scanning the delta log for the latest metadata - current.Recover(token, checkpointManager, hlog.LogPageSizeBits, + current.Recover(token, checkpointManager, hlogBase.LogPageSizeBits, out var _, true, version); - long snapshotDeviceOffset = hlog.GetPage(current.info.snapshotStartFlushedLogicalAddress) << hlog.LogPageSizeBits; + long snapshotDeviceOffset = hlogBase.GetPage(current.info.snapshotStartFlushedLogicalAddress) << hlogBase.LogPageSizeBits; return new LogFileInfo { snapshotFileEndAddress = current.info.snapshotFinalLogicalAddress - snapshotDeviceOffset, - hybridLogFileStartAddress = hlog.GetPage(current.info.beginAddress) << hlog.LogPageSizeBits, + hybridLogFileStartAddress = hlogBase.GetPage(current.info.beginAddress) << hlogBase.LogPageSizeBits, hybridLogFileEndAddress = current.info.flushedLogicalAddress, deltaLogTailAddress = current.info.deltaTailAddress, }; @@ -220,7 +222,7 @@ public LogFileInfo GetLogFileSize(Guid token, long version = -1) /// public long GetIndexFileSize(Guid token) { - IndexCheckpointInfo recoveredICInfo = new IndexCheckpointInfo(); + var recoveredICInfo = new IndexCheckpointInfo(); recoveredICInfo.Recover(token, checkpointManager); return (long)(recoveredICInfo.info.num_ht_bytes + recoveredICInfo.info.num_ofb_bytes); } @@ -232,7 +234,7 @@ private void GetClosestHybridLogCheckpointInfo( out byte[] cookie) { HybridLogCheckpointInfo current; - long closestVersion = long.MaxValue; + var closestVersion = long.MaxValue; closest = default; closestToken = default; cookie = default; @@ -245,7 +247,7 @@ private void GetClosestHybridLogCheckpointInfo( try { current = new HybridLogCheckpointInfo(); - current.Recover(hybridLogToken, checkpointManager, hlog.LogPageSizeBits, + current.Recover(hybridLogToken, checkpointManager, hlogBase.LogPageSizeBits, out var currCookie, false); var distanceToTarget = (requestedVersion == -1 ? long.MaxValue : requestedVersion) - current.info.version; // This is larger than intended version, cannot recover to this. @@ -328,7 +330,7 @@ private void FindRecoveryInfo(long requestedVersion, out HybridLogCheckpointInfo { recoveredHlcInfo.Dispose(); // need to actually scan delta log now - recoveredHlcInfo.Recover(closestToken, checkpointManager, hlog.LogPageSizeBits, out _, true); + recoveredHlcInfo.Recover(closestToken, checkpointManager, hlogBase.LogPageSizeBits, out _, true); } recoveredHlcInfo.info.DebugPrint(logger); @@ -372,7 +374,7 @@ private void GetRecoveryInfo(Guid indexToken, Guid hybridLogToken, out HybridLog // Recovery appropriate context information recoveredHLCInfo = new HybridLogCheckpointInfo(); - recoveredHLCInfo.Recover(hybridLogToken, checkpointManager, hlog.LogPageSizeBits, out recoveredCommitCookie, true); + recoveredHLCInfo.Recover(hybridLogToken, checkpointManager, hlogBase.LogPageSizeBits, out recoveredCommitCookie, true); recoveredHLCInfo.info.DebugPrint(logger); try { @@ -411,15 +413,15 @@ public void Reset() overflowBucketsAllocator = new MallocFixedPageSize(logger); // Reset the hybrid log - hlog.Reset(); + hlogBase.Reset(); } private long InternalRecover(IndexCheckpointInfo recoveredICInfo, HybridLogCheckpointInfo recoveredHLCInfo, int numPagesToPreload, bool undoNextVersion, long recoverTo) { - hlog.VerifyRecoveryInfo(recoveredHLCInfo, false); + hlogBase.VerifyRecoveryInfo(recoveredHLCInfo, false); - if (hlog.GetTailAddress() > hlog.GetFirstValidLogicalAddress(0)) + if (hlogBase.GetTailAddress() > hlog.GetFirstValidLogicalAddress(0)) { logger?.LogInformation("Recovery called on non-empty log - resetting to empty state first. Make sure store is quiesced before calling Recover on a running store."); Reset(); @@ -463,9 +465,9 @@ private long InternalRecover(IndexCheckpointInfo recoveredICInfo, HybridLogCheck private async ValueTask InternalRecoverAsync(IndexCheckpointInfo recoveredICInfo, HybridLogCheckpointInfo recoveredHLCInfo, int numPagesToPreload, bool undoNextVersion, long recoverTo, CancellationToken cancellationToken) { - hlog.VerifyRecoveryInfo(recoveredHLCInfo, false); + hlogBase.VerifyRecoveryInfo(recoveredHLCInfo, false); - if (hlog.GetTailAddress() > hlog.GetFirstValidLogicalAddress(0)) + if (hlogBase.GetTailAddress() > hlog.GetFirstValidLogicalAddress(0)) { logger?.LogInformation("Recovery called on non-empty log - resetting to empty state first. Make sure store is quiesced before calling Recover on a running store."); Reset(); @@ -512,12 +514,12 @@ private async ValueTask InternalRecoverAsync(IndexCheckpointInfo recovered private void DoPostRecovery(IndexCheckpointInfo recoveredICInfo, HybridLogCheckpointInfo recoveredHLCInfo, long tailAddress, ref long headAddress, ref long readOnlyAddress, long lastFreedPage) { // Adjust head and read-only address post-recovery - var _head = (1 + (tailAddress >> hlog.LogPageSizeBits) - (hlog.GetCapacityNumPages() - hlog.MinEmptyPageCount)) << hlog.LogPageSizeBits; + var _head = (1 + (tailAddress >> hlogBase.LogPageSizeBits) - (hlogBase.GetCapacityNumPages() - hlogBase.MinEmptyPageCount)) << hlogBase.LogPageSizeBits; // If additional pages have been freed to accommodate heap memory constraints, adjust head address accordingly if (lastFreedPage != NoPageFreed) { - var nextAddress = (lastFreedPage + 1) << hlog.LogPageSizeBits; + var nextAddress = (lastFreedPage + 1) << hlogBase.LogPageSizeBits; if (_head < nextAddress) _head = nextAddress; } @@ -527,7 +529,7 @@ private void DoPostRecovery(IndexCheckpointInfo recoveredICInfo, HybridLogCheckp if (readOnlyAddress < headAddress) readOnlyAddress = headAddress; - hlog.RecoveryReset(tailAddress, headAddress, recoveredHLCInfo.info.beginAddress, readOnlyAddress); + hlogBase.RecoveryReset(tailAddress, headAddress, recoveredHLCInfo.info.beginAddress, readOnlyAddress); checkpointManager.OnRecovery(recoveredICInfo.info.token, recoveredHLCInfo.info.guid); recoveredHLCInfo.Dispose(); } @@ -559,8 +561,8 @@ private bool RecoverToInitialPage(IndexCheckpointInfo recoveredICInfo, HybridLog recoverFromAddress = recoveredHLCInfo.info.beginAddress; // Unless we recovered previously until some hlog address - if (hlog.FlushedUntilAddress > recoverFromAddress) - recoverFromAddress = hlog.FlushedUntilAddress; + if (hlogBase.FlushedUntilAddress > recoverFromAddress) + recoverFromAddress = hlogBase.FlushedUntilAddress; // Start recovery at least from beginning of fuzzy log region // Needed if we are recovering to the same checkpoint a second time, with undo @@ -585,7 +587,7 @@ private bool RecoverToInitialPage(IndexCheckpointInfo recoveredICInfo, HybridLog private bool SetRecoveryPageRanges(HybridLogCheckpointInfo recoveredHLCInfo, int numPagesToPreload, long fromAddress, out long tailAddress, out long headAddress, out long scanFromAddress) { - if ((recoveredHLCInfo.info.useSnapshotFile == 0) && (recoveredHLCInfo.info.finalLogicalAddress <= hlog.GetTailAddress())) + if ((recoveredHLCInfo.info.useSnapshotFile == 0) && (recoveredHLCInfo.info.finalLogicalAddress <= hlogBase.GetTailAddress())) { tailAddress = headAddress = scanFromAddress = default; return false; @@ -601,7 +603,7 @@ private bool SetRecoveryPageRanges(HybridLogCheckpointInfo recoveredHLCInfo, int headAddress = recoveredHLCInfo.info.headAddress; if (numPagesToPreload != -1) { - var head = (hlog.GetPage(tailAddress) - numPagesToPreload) << hlog.LogPageSizeBits; + var head = (hlogBase.GetPage(tailAddress) - numPagesToPreload) << hlogBase.LogPageSizeBits; if (head > headAddress) headAddress = head; } @@ -618,8 +620,8 @@ private bool SetRecoveryPageRanges(HybridLogCheckpointInfo recoveredHLCInfo, int headAddress = recoveredHLCInfo.info.headAddress; } - if (hlog.FlushedUntilAddress > scanFromAddress) - scanFromAddress = hlog.FlushedUntilAddress; + if (hlogBase.FlushedUntilAddress > scanFromAddress) + scanFromAddress = hlogBase.FlushedUntilAddress; return true; } @@ -640,11 +642,11 @@ private void FreePagesBeyondUsableCapacity(long startPage, int capacity, int usa for (var page = beg; page < end; page++) { - var pageIndex = hlog.GetPageIndexForPage(page); + var pageIndex = hlogBase.GetPageIndexForPage(page); if (hlog.IsAllocated(pageIndex)) { recoveryStatus.WaitFlush(pageIndex); - hlog.EvictPage(page); + hlogBase.EvictPage(page); } } } @@ -653,12 +655,12 @@ private void ReadPagesWithMemoryConstraint(long endAddress, int capacity, Recove { // Before reading in additional pages, make sure that any previously allocated pages that would violate the memory size // constraint are freed. - FreePagesBeyondUsableCapacity(startPage: page, capacity: capacity, usableCapacity: capacity - hlog.MinEmptyPageCount, pagesToRead: numPagesToRead, recoveryStatus); + FreePagesBeyondUsableCapacity(startPage: page, capacity: capacity, usableCapacity: capacity - hlogBase.MinEmptyPageCount, pagesToRead: numPagesToRead, recoveryStatus); // Issue request to read pages as much as possible - for (var p = page; p < endPage; p++) recoveryStatus.readStatus[hlog.GetPageIndexForPage(p)] = ReadStatus.Pending; - hlog.AsyncReadPagesFromDevice(page, numPagesToRead, endAddress, - hlog.AsyncReadPagesCallbackForRecovery, + for (var p = page; p < endPage; p++) recoveryStatus.readStatus[hlogBase.GetPageIndexForPage(p)] = ReadStatus.Pending; + hlogBase.AsyncReadPagesFromDevice(page, numPagesToRead, endAddress, + hlogBase.AsyncReadPagesCallbackForRecovery, recoveryStatus, recoveryStatus.recoveryDevicePageOffset, recoveryStatus.recoveryDevice, recoveryStatus.objectLogRecoveryDevice); } @@ -666,17 +668,17 @@ private void ReadPagesWithMemoryConstraint(long endAddress, int capacity, Recove private long FreePagesToLimitHeapMemory(RecoveryStatus recoveryStatus, long page) { long lastFreedPage = NoPageFreed; - if (hlog.IsSizeBeyondLimit == null) + if (hlogBase.IsSizeBeyondLimit == null) return lastFreedPage; // free up additional pages, one at a time, to bring memory usage under control starting with the earliest possible page - for (var p = Math.Max(0, page - recoveryStatus.usableCapacity + 1); p < page && hlog.IsSizeBeyondLimit(); p++) + for (var p = Math.Max(0, page - recoveryStatus.usableCapacity + 1); p < page && hlogBase.IsSizeBeyondLimit(); p++) { - var pageIndex = hlog.GetPageIndexForPage(p); + var pageIndex = hlogBase.GetPageIndexForPage(p); if (hlog.IsAllocated(pageIndex)) { recoveryStatus.WaitFlush(pageIndex); - hlog.EvictPage(p); + hlogBase.EvictPage(p); lastFreedPage = p; } } @@ -721,17 +723,17 @@ private async ValueTask ReadPagesForRecoveryAsync(long untilAddress, Recov private async Task FreePagesToLimitHeapMemoryAsync(RecoveryStatus recoveryStatus, long page, CancellationToken cancellationToken) { long lastFreedPage = NoPageFreed; - if (hlog.IsSizeBeyondLimit == null) + if (hlogBase.IsSizeBeyondLimit == null) return lastFreedPage; // free up additional pages, one at a time, to bring memory usage under control starting with the earliest possible page - for (var p = Math.Max(0, page - recoveryStatus.usableCapacity + 1); p < page && hlog.IsSizeBeyondLimit(); p++) + for (var p = Math.Max(0, page - recoveryStatus.usableCapacity + 1); p < page && hlogBase.IsSizeBeyondLimit(); p++) { - var pageIndex = hlog.GetPageIndexForPage(p); + var pageIndex = hlogBase.GetPageIndexForPage(p); if (hlog.IsAllocated(pageIndex)) { await recoveryStatus.WaitFlushAsync(pageIndex, cancellationToken); - hlog.EvictPage(p); + hlogBase.EvictPage(p); lastFreedPage = p; } } @@ -753,7 +755,7 @@ private long RecoverHybridLog(long scanFromAddress, long recoverFromAddress, lon for (var p = page; p < end; p++) { // Ensure page has been read into memory - int pageIndex = hlog.GetPageIndexForPage(p); + int pageIndex = hlogBase.GetPageIndexForPage(p); recoveryStatus.WaitRead(pageIndex); var freedPage = FreePagesToLimitHeapMemory(recoveryStatus, p); @@ -786,7 +788,7 @@ private async ValueTask RecoverHybridLogAsync(long scanFromAddress, long r for (var p = page; p < end; p++) { // Ensure page has been read into memory - int pageIndex = hlog.GetPageIndexForPage(p); + int pageIndex = hlogBase.GetPageIndexForPage(p); await recoveryStatus.WaitReadAsync(pageIndex, cancellationToken).ConfigureAwait(false); var freedPage = await FreePagesToLimitHeapMemoryAsync(recoveryStatus, p, cancellationToken).ConfigureAwait(false); @@ -806,20 +808,20 @@ private async ValueTask RecoverHybridLogAsync(long scanFromAddress, long r private RecoveryStatus GetPageRangesToRead(long scanFromAddress, long untilAddress, CheckpointType checkpointType, out long startPage, out long endPage, out int capacity, out int numPagesToReadPerIteration) { - startPage = hlog.GetPage(scanFromAddress); - endPage = hlog.GetPage(untilAddress); + startPage = hlogBase.GetPage(scanFromAddress); + endPage = hlogBase.GetPage(untilAddress); if (untilAddress > hlog.GetStartLogicalAddress(endPage) && untilAddress > scanFromAddress) { endPage++; } - capacity = hlog.GetCapacityNumPages(); + capacity = hlogBase.GetCapacityNumPages(); int totalPagesToRead = (int)(endPage - startPage); // Leave out at least MinEmptyPageCount pages to maintain memory size during recovery // If heap memory is to be tracked, then read one page at a time to control memory usage - numPagesToReadPerIteration = hlog.IsSizeBeyondLimit == null ? Math.Min(capacity - hlog.MinEmptyPageCount, totalPagesToRead) : 1; - return new RecoveryStatus(capacity, hlog.MinEmptyPageCount, endPage, untilAddress, checkpointType); + numPagesToReadPerIteration = hlogBase.IsSizeBeyondLimit == null ? Math.Min(capacity - hlogBase.MinEmptyPageCount, totalPagesToRead) : 1; + return new RecoveryStatus(capacity, hlogBase.MinEmptyPageCount, endPage, untilAddress, checkpointType); } private void ProcessReadPageAndFlush(long recoverFromAddress, long untilAddress, long nextVersion, RecoveryOptions options, RecoveryStatus recoveryStatus, long page, int pageIndex) @@ -827,7 +829,7 @@ private void ProcessReadPageAndFlush(long recoverFromAddress, long untilAddress, if (ProcessReadPage(recoverFromAddress, untilAddress, nextVersion, options, recoveryStatus, page, pageIndex)) { // Page was modified due to undoFutureVersion. Flush it to disk; the callback issues the after-capacity read request if necessary. - hlog.AsyncFlushPages(page, 1, AsyncFlushPageCallbackForRecovery, recoveryStatus); + hlogBase.AsyncFlushPages(page, 1, AsyncFlushPageCallbackForRecovery, recoveryStatus); return; } @@ -845,13 +847,13 @@ private bool ProcessReadPage(long recoverFromAddress, long untilAddress, long ne return false; var pageFromAddress = 0L; - var pageUntilAddress = hlog.GetPageSize(); + var pageUntilAddress = hlogBase.GetPageSize(); if (recoverFromAddress > startLogicalAddress) - pageFromAddress = hlog.GetOffsetInPage(recoverFromAddress); + pageFromAddress = hlogBase.GetOffsetInPage(recoverFromAddress); if (untilAddress < endLogicalAddress) - pageUntilAddress = hlog.GetOffsetInPage(untilAddress); + pageUntilAddress = hlogBase.GetOffsetInPage(untilAddress); if (RecoverFromPage(recoverFromAddress, pageFromAddress, pageUntilAddress, startLogicalAddress, physicalAddress, nextVersion, options)) { @@ -867,13 +869,13 @@ private bool ProcessReadPage(long recoverFromAddress, long untilAddress, long ne private void WaitUntilAllPagesHaveBeenFlushed(long startPage, long endPage, RecoveryStatus recoveryStatus) { for (long page = startPage; page < endPage; page++) - recoveryStatus.WaitFlush(hlog.GetPageIndexForPage(page)); + recoveryStatus.WaitFlush(hlogBase.GetPageIndexForPage(page)); } private async ValueTask WaitUntilAllPagesHaveBeenFlushedAsync(long startPage, long endPage, RecoveryStatus recoveryStatus, CancellationToken cancellationToken) { for (long page = startPage; page < endPage; page++) - await recoveryStatus.WaitFlushAsync(hlog.GetPageIndexForPage(page), cancellationToken).ConfigureAwait(false); + await recoveryStatus.WaitFlushAsync(hlogBase.GetPageIndexForPage(page), cancellationToken).ConfigureAwait(false); } private long RecoverHybridLogFromSnapshotFile(long scanFromAddress, long recoverFromAddress, long untilAddress, long snapshotStartAddress, long snapshotEndAddress, long nextVersion, Guid guid, RecoveryOptions options, DeltaLog deltaLog, long recoverTo) @@ -887,7 +889,7 @@ private long RecoverHybridLogFromSnapshotFile(long scanFromAddress, long recover var end = Math.Min(page + numPagesToReadPerIteration, endPage); for (long p = page; p < end; p++) { - int pageIndex = hlog.GetPageIndexForPage(p); + int pageIndex = hlogBase.GetPageIndexForPage(p); if (p < snapshotEndPage) { // Ensure the page is read from file @@ -929,7 +931,7 @@ private async ValueTask RecoverHybridLogFromSnapshotFileAsync(long scanFro var end = Math.Min(page + numPagesToReadPerIteration, endPage); for (long p = page; p < end; p++) { - int pageIndex = hlog.GetPageIndexForPage(p); + int pageIndex = hlogBase.GetPageIndexForPage(p); if (p < snapshotEndPage) { // Ensure the page is read from file @@ -961,11 +963,11 @@ private async ValueTask RecoverHybridLogFromSnapshotFileAsync(long scanFro private void ApplyDelta(long scanFromAddress, long recoverFromAddress, long untilAddress, long nextVersion, RecoveryOptions options, DeltaLog deltaLog, long recoverTo, long endPage, long snapshotEndPage, int capacity, int numPagesToRead, RecoveryStatus recoveryStatus, long page, long end) { - hlog.ApplyDelta(deltaLog, page, end, recoverTo); + hlogBase.ApplyDelta(deltaLog, page, end, recoverTo); for (long p = page; p < end; p++) { - int pageIndex = hlog.GetPageIndexForPage(p); + int pageIndex = hlogBase.GetPageIndexForPage(p); var endLogicalAddress = hlog.GetStartLogicalAddress(p + 1); if (recoverFromAddress < endLogicalAddress && recoverFromAddress < untilAddress) @@ -976,7 +978,7 @@ private void ApplyDelta(long scanFromAddress, long recoverFromAddress, long unti { // Flush snapshot page to main log recoveryStatus.flushStatus[pageIndex] = FlushStatus.Pending; - hlog.AsyncFlushPages(p, 1, AsyncFlushPageCallbackForRecovery, recoveryStatus); + hlogBase.AsyncFlushPages(p, 1, AsyncFlushPageCallbackForRecovery, recoveryStatus); } } } @@ -985,23 +987,23 @@ private void GetSnapshotPageRangesToRead(long fromAddress, long untilAddress, lo out RecoveryStatus recoveryStatus, out int numPagesToReadPerIteration) { // Compute startPage and endPage - startPage = hlog.GetPage(fromAddress); - endPage = hlog.GetPage(untilAddress); + startPage = hlogBase.GetPage(fromAddress); + endPage = hlogBase.GetPage(untilAddress); if (untilAddress > hlog.GetStartLogicalAddress(endPage) && untilAddress > fromAddress) endPage++; - long snapshotStartPage = hlog.GetPage(snapshotStartAddress); - snapshotEndPage = hlog.GetPage(snapshotEndAddress); + long snapshotStartPage = hlogBase.GetPage(snapshotStartAddress); + snapshotEndPage = hlogBase.GetPage(snapshotEndAddress); if (snapshotEndAddress > hlog.GetStartLogicalAddress(snapshotEndPage) && snapshotEndAddress > snapshotStartAddress) snapshotEndPage++; // By default first page has one extra record - capacity = hlog.GetCapacityNumPages(); + capacity = hlogBase.GetCapacityNumPages(); var recoveryDevice = checkpointManager.GetSnapshotLogDevice(guid); var objectLogRecoveryDevice = checkpointManager.GetSnapshotObjectLogDevice(guid); - recoveryDevice.Initialize(hlog.GetSegmentSize()); + recoveryDevice.Initialize(hlogBase.GetSegmentSize()); objectLogRecoveryDevice.Initialize(-1); - recoveryStatus = new RecoveryStatus(capacity, hlog.MinEmptyPageCount, endPage, untilAddress, CheckpointType.Snapshot) + recoveryStatus = new RecoveryStatus(capacity, hlogBase.MinEmptyPageCount, endPage, untilAddress, CheckpointType.Snapshot) { recoveryDevice = recoveryDevice, objectLogRecoveryDevice = objectLogRecoveryDevice, @@ -1012,7 +1014,7 @@ private void GetSnapshotPageRangesToRead(long fromAddress, long untilAddress, lo // Initially issue read request for all pages that can be held in memory // If heap memory is to be tracked, then read one page at a time to control memory usage int totalPagesToRead = (int)(snapshotEndPage - startPage); - numPagesToReadPerIteration = hlog.IsSizeBeyondLimit == null ? Math.Min(capacity - hlog.MinEmptyPageCount, totalPagesToRead) : 1; + numPagesToReadPerIteration = hlogBase.IsSizeBeyondLimit == null ? Math.Min(capacity - hlogBase.MinEmptyPageCount, totalPagesToRead) : 1; } private void ProcessReadSnapshotPage(long fromAddress, long untilAddress, long nextVersion, RecoveryOptions options, RecoveryStatus recoveryStatus, long page, int pageIndex) @@ -1033,16 +1035,16 @@ private void ProcessReadSnapshotPage(long fromAddress, long untilAddress, long n */ var pageFromAddress = 0L; - var pageUntilAddress = hlog.GetPageSize(); + var pageUntilAddress = hlogBase.GetPageSize(); var physicalAddress = hlog.GetPhysicalAddress(startLogicalAddress); if (fromAddress > startLogicalAddress && fromAddress < endLogicalAddress) - pageFromAddress = hlog.GetOffsetInPage(fromAddress); + pageFromAddress = hlogBase.GetOffsetInPage(fromAddress); if (endLogicalAddress > untilAddress) - pageUntilAddress = hlog.GetOffsetInPage(untilAddress); + pageUntilAddress = hlogBase.GetOffsetInPage(untilAddress); - RecoverFromPage(fromAddress, pageFromAddress, pageUntilAddress, + _ = RecoverFromPage(fromAddress, pageFromAddress, pageUntilAddress, startLogicalAddress, physicalAddress, nextVersion, options); } @@ -1058,7 +1060,7 @@ private unsafe void ClearLocksOnPage(long page, RecoveryOptions options) // no need to clear locks for records that will not end up in main memory if (options.headAddress >= endLogicalAddress) return; - long untilLogicalAddressInPage = hlog.GetPageSize(); + long untilLogicalAddressInPage = hlogBase.GetPageSize(); long pointer = 0; while (pointer < untilLogicalAddressInPage) @@ -1072,7 +1074,7 @@ private unsafe void ClearLocksOnPage(long page, RecoveryOptions options) else { int size = hlog.GetRecordSize(recordStart).Item2; - Debug.Assert(size <= hlog.GetPageSize()); + Debug.Assert(size <= hlogBase.GetPageSize()); pointer += size; } } @@ -1088,7 +1090,7 @@ private unsafe bool RecoverFromPage(long startRecoveryAddress, long pagePhysicalAddress, long nextVersion, RecoveryOptions options) { - bool touched = false; + var touched = false; var pointer = default(long); var recordStart = default(long); @@ -1107,8 +1109,8 @@ private unsafe bool RecoverFromPage(long startRecoveryAddress, if (!info.Invalid) { - HashEntryInfo hei = new(comparer.GetHashCode64(ref hlog.GetKey(recordStart))); - FindOrCreateTag(ref hei, hlog.BeginAddress); + HashEntryInfo hei = new(storeFunctions.GetKeyHashCode64(ref hlog.GetKey(recordStart))); + FindOrCreateTag(ref hei, hlogBase.BeginAddress); bool ignoreRecord = ((pageLogicalAddress + pointer) >= options.fuzzyRegionStartAddress) && info.IsInNewVersion; if (!options.undoNextVersion) ignoreRecord = false; @@ -1151,7 +1153,7 @@ private void AsyncFlushPageCallbackForRecovery(uint errorCode, uint numBytes, ob if (Interlocked.Decrement(ref result.count) == 0) { - int pageIndex = hlog.GetPageIndexForPage(result.page); + int pageIndex = hlogBase.GetPageIndexForPage(result.page); if (errorCode != 0) result.context.SignalFlushedError(pageIndex); @@ -1176,7 +1178,9 @@ internal static bool AtomicSwitch(TsavoriteExecutionCont } } - internal abstract partial class AllocatorBase : IDisposable + public abstract partial class AllocatorBase : IDisposable + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Restore log @@ -1235,8 +1239,8 @@ private bool RestoreHybridLogInitializePages(long beginAddress, long headAddress ((headAddress == untilAddress) && (GetOffsetInPage(headAddress) == 0)) // Empty in-memory page ) { - if (!IsAllocated(GetPageIndexForAddress(headAddress))) - AllocatePage(GetPageIndexForAddress(headAddress)); + if (!_wrapper.IsAllocated(GetPageIndexForAddress(headAddress))) + _wrapper.AllocatePage(GetPageIndexForAddress(headAddress)); } else { @@ -1281,7 +1285,7 @@ internal unsafe void AsyncReadPagesCallbackForRecovery(uint errorCode, uint numB if (result.freeBuffer1 != null) { - PopulatePage(result.freeBuffer1.GetValidPointer(), result.freeBuffer1.required_bytes, result.page); + _wrapper.PopulatePage(result.freeBuffer1.GetValidPointer(), result.freeBuffer1.required_bytes, result.page); result.freeBuffer1.Return(); } int pageIndex = GetPageIndexForPage(result.page); diff --git a/libs/storage/Tsavorite/cs/src/core/Index/StoreFunctions/DisposeReason.cs b/libs/storage/Tsavorite/cs/src/core/Index/StoreFunctions/DisposeReason.cs new file mode 100644 index 0000000000..79edca2794 --- /dev/null +++ b/libs/storage/Tsavorite/cs/src/core/Index/StoreFunctions/DisposeReason.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +namespace Tsavorite.core +{ + /// + /// The reason for a call to + /// + public enum DisposeReason + { + /// + /// No Dispose() call was made + /// + None, + + /// + /// Failure of SingleWriter insertion of a record at the tail of the cache. + /// + SingleWriterCASFailed, + + /// + /// Failure of CopyUpdater insertion of a record at the tail of the cache. + /// + CopyUpdaterCASFailed, + + /// + /// Failure of InitialUpdater insertion of a record at the tail of the cache. + /// + InitialUpdaterCASFailed, + + /// + /// Failure of SingleDeleter insertion of a record at the tail of the cache. + /// + SingleDeleterCASFailed, + + /// + /// A record was deserialized from the disk for a pending Read or RMW operation. + /// + DeserializedFromDisk, + + /// + /// A record was retrieved from the revivification freelist, and thus the key space may have to be adjusted as well. + /// + RevivificationFreeList, + + /// + /// A page was evicted from the in-memory portion of the main log, or from the readcache. + /// + PageEviction + } +} \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Index/StoreFunctions/IRecordDisposer.cs b/libs/storage/Tsavorite/cs/src/core/Index/StoreFunctions/IRecordDisposer.cs new file mode 100644 index 0000000000..45edde7f78 --- /dev/null +++ b/libs/storage/Tsavorite/cs/src/core/Index/StoreFunctions/IRecordDisposer.cs @@ -0,0 +1,106 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using System.Diagnostics; +using System.Runtime.CompilerServices; + +namespace Tsavorite.core +{ + /// + /// Interface to implement the Disposer component of + /// + public interface IRecordDisposer + { + /// + /// If true, with + /// is called on page evictions from both readcache and main log. Otherwise, the user can register an Observer and do any needed disposal there. + /// + public bool DisposeOnPageEviction { get; } + + /// + /// Dispose the Key and Value of a record, if necessary. See comments in for details. + /// + void DisposeRecord(ref Key key, ref Value value, DisposeReason reason, int newKeySize); + } + + /// + /// Default no-op implementation if + /// + /// It is appropriate to call methods on this instance as a no-op. + public struct DefaultRecordDisposer : IRecordDisposer + { + /// + /// Default instance + /// + public static readonly DefaultRecordDisposer Instance = new(); + + /// + /// Assumes the key and value have no need of Dispose(), and does nothing. + /// + public readonly bool DisposeOnPageEviction => false; + + /// + /// Assumes the key and value have no need of Dispose(), and does nothing. + /// + public readonly void DisposeRecord(ref Key key, ref Value value, DisposeReason reason, int newKeySize) + { + Debug.Assert(typeof(Key) != typeof(SpanByte) && typeof(Value) != typeof(SpanByte), "Must use SpanByteRecordDisposer"); + } + } + + /// + /// Default no-op implementation if for SpanByte + /// + public struct SpanByteRecordDisposer : IRecordDisposer + { + /// + /// Default instance + /// + public static readonly SpanByteRecordDisposer Instance = new(); + + /// + /// Assumes the key and value have no need of Dispose(), and does nothing. + /// + public readonly bool DisposeOnPageEviction => false; + + /// + /// If is and is >= 0, + /// this adjusts the key (and if necessary value) space as needed to preserve log zero-init correctness. + /// Otherwise the key and value have no need of disposal, and this does nothing. + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public unsafe void DisposeRecord(ref SpanByte key, ref SpanByte value, DisposeReason reason, int newKeySize) + { + // We don't have to do anything with the Value unless the new key size requires adjusting the key length. + // newKeySize == -1 means we are preserving the existing key (e.g. for in-chain revivification). + if (reason != DisposeReason.RevivificationFreeList || newKeySize < 0) + return; + + var oldKeySize = Utility.RoundUp(key.TotalSize, Constants.kRecordAlignment); + + // We are changing the key size (e.g. revivification from the freelist with a new key). + // Our math here uses record alignment of keys as in the allocator, and assumes this will always be at least int alignment. + newKeySize = Utility.RoundUp(newKeySize, Constants.kRecordAlignment); + int keySizeChange = newKeySize - oldKeySize; + if (keySizeChange == 0) + return; + + // We are growing or shrinking. We don't care (here or in SingleWriter, InitialUpdater, CopyUpdater) what is inside the Key and Value, + // as long as we don't leave nonzero bytes after the used value space. So we just need to make sure the Value space starts immediately + // after the new key size. SingleWriter et al. will do the ShrinkSerializedLength on Value as needed. + if (keySizeChange < 0) + { + // We are shrinking the key; the Value of the new record will start after key + newKeySize, so set the new value length there. + *(int*)((byte*)Unsafe.AsPointer(ref key) + newKeySize) = value.Length - keySizeChange; // minus negative => plus positive + } + else + { + // We are growing the key; the Value of the new record will start somewhere in the middle of where the old Value was, so set the new value length there. + *(int*)((byte*)Unsafe.AsPointer(ref value) + keySizeChange) = value.Length - keySizeChange; + } + + // NewKeySize is (newKey).TotalSize. + key.Length = newKeySize - sizeof(int); + } + } +} \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Index/StoreFunctions/IStoreFunctions.cs b/libs/storage/Tsavorite/cs/src/core/Index/StoreFunctions/IStoreFunctions.cs new file mode 100644 index 0000000000..5d2245620d --- /dev/null +++ b/libs/storage/Tsavorite/cs/src/core/Index/StoreFunctions/IStoreFunctions.cs @@ -0,0 +1,72 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using System; +using System.IO; + +namespace Tsavorite.core +{ + /// + /// The interface to define functions on the TsavoriteKV store itself (rather than a session). + /// + public interface IStoreFunctions + { + #region Key Comparer + /// Get a 64-bit hash code for a key + long GetKeyHashCode64(ref TKey key); + + /// Compare two keys for equality + bool KeysEqual(ref TKey k1, ref TKey k2); + #endregion Key Comparer + + #region Key Serializer + /// Indicates whether the Key Serializer is to be used + bool HasKeySerializer { get; } + + /// Instatiate a KeySerializer and begin Key serialization to the given stream. + /// This must instantiate a new serializer as multiple threads may be serializing or deserializing. + IObjectSerializer BeginSerializeKey(Stream stream); + + /// Instatiate a KeySerializer and begin Key deserialization from the given stream. + /// This must instantiate a new serializer as multiple threads may be serializing or deserializing. + IObjectSerializer BeginDeserializeKey(Stream stream); + #endregion Key Serializer + + #region Value Serializer + /// Indicates whether the Value Serializer is to be used + bool HasValueSerializer { get; } + + /// Instatiate a ValueSerializer and begin Value serialization to the given stream. + /// This must instantiate a new serializer as multiple threads may be serializing or deserializing. + IObjectSerializer BeginSerializeValue(Stream stream); + + /// Instatiate a ValueSerializer and begin Value deserialization from the given stream. + /// This must instantiate a new serializer as multiple threads may be serializing or deserializing. + IObjectSerializer BeginDeserializeValue(Stream stream); + #endregion Value Serializer + + #region Record Disposer + /// + /// If true, with + /// is called on page evictions from both readcache and main log. Otherwise, the user can register an Observer and + /// do any needed disposal there. + /// + bool DisposeOnPageEviction { get; } + + /// Dispose the Key and Value of a record, if necessary. + /// The key for the record + /// The value for the record + /// For only, this is a record from the freelist and we may be disposing the key as well as value + /// (it is -1 when revivifying a record in the hash chain or when doing a RETRY; for these the key does not change) + void DisposeRecord(ref TKey key, ref TValue value, DisposeReason reason, int newKeySize = -1); + #endregion Record Disposer + + #region Checkpoint Completion + /// Set the parameterless checkpoint completion callback. + void SetCheckpointCompletedCallback(Action callback); + + /// Called when a checkpoint has completed. + void OnCheckpointCompleted(); + #endregion Checkpoint Completion + } +} \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Index/StoreFunctions/StoreFunctions.cs b/libs/storage/Tsavorite/cs/src/core/Index/StoreFunctions/StoreFunctions.cs new file mode 100644 index 0000000000..a920c2f646 --- /dev/null +++ b/libs/storage/Tsavorite/cs/src/core/Index/StoreFunctions/StoreFunctions.cs @@ -0,0 +1,157 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using System; +using System.IO; +using System.Runtime.CompilerServices; + +namespace Tsavorite.core +{ + /// + /// Store functions for and . + /// + /// + /// The implementation takes instances of the supported interfaces (e.g. ) to allow custom + /// implementation of any/all. We also provide standard implementations for standard types. The design exposes the instances + /// because there is no need to wrap calls to them with additional functionality. This can be changed to redirect if such wrapper + /// functionality is needed. + /// + public struct StoreFunctions + (TKeyComparer keyComparer, Func> keySerializerCreator, Func> valueSerializerCreator, TRecordDisposer recordDisposer) + : IStoreFunctions + where TKeyComparer : IKeyComparer + where TRecordDisposer : IRecordDisposer + { + #region Fields + /// Compare two keys for equality, and get a key's hash code. + readonly TKeyComparer keyComparer = keyComparer; + + /// Serialize a Key to persistent storage + readonly Func> keySerializerCreator = keySerializerCreator; + + /// Serialize a Value to persistent storage + readonly Func> valueSerializerCreator = valueSerializerCreator; + + /// Dispose a record + readonly TRecordDisposer recordDisposer = recordDisposer; + + /// Optional checkpoint completion callback, set separately from ctor. + Action checkpointCompletionCallback = () => { }; + #endregion Fields + + #region Key Comparer + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly long GetKeyHashCode64(ref TKey key) => keyComparer.GetHashCode64(ref key); + + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly bool KeysEqual(ref TKey k1, ref TKey k2) => keyComparer.Equals(ref k1, ref k2); + #endregion Key Comparer + + #region Key Serializer + /// + public readonly bool HasKeySerializer => keySerializerCreator is not null; + + /// + public readonly IObjectSerializer BeginSerializeKey(Stream stream) + { + var keySerializer = keySerializerCreator(); + keySerializer.BeginSerialize(stream); + return keySerializer; + } + + /// + public readonly IObjectSerializer BeginDeserializeKey(Stream stream) + { + var keySerializer = keySerializerCreator(); + keySerializer.BeginDeserialize(stream); + return keySerializer; + } + #endregion Key Serializer + + #region Value Serializer + /// + public readonly bool HasValueSerializer => valueSerializerCreator is not null; + + /// + public readonly IObjectSerializer BeginSerializeValue(Stream stream) + { + var valueSerializer = valueSerializerCreator(); + valueSerializer.BeginSerialize(stream); + return valueSerializer; + } + + /// + public readonly IObjectSerializer BeginDeserializeValue(Stream stream) + { + var valueSerializer = valueSerializerCreator(); + valueSerializer.BeginDeserialize(stream); + return valueSerializer; + } + #endregion Value Serializer + + #region Record Disposer + /// + public readonly bool DisposeOnPageEviction => recordDisposer.DisposeOnPageEviction; + + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public readonly void DisposeRecord(ref TKey key, ref TValue value, DisposeReason reason, int newKeySize) => recordDisposer.DisposeRecord(ref key, ref value, reason, newKeySize); + #endregion Record Disposer + + #region Checkpoint Completion + /// + public void SetCheckpointCompletedCallback(Action callback) => checkpointCompletionCallback = callback; + + /// + public readonly void OnCheckpointCompleted() => checkpointCompletionCallback(); + #endregion Checkpoint Completion + } + + /// + /// A non-parameterized version of StoreFunctions that provides type-reduced Create() methods. + /// + public struct StoreFunctions + { + /// + /// Construct a StoreFunctions instance with all types specified and contained instances passed, e.g. for custom objects. + /// + public static StoreFunctions Create + (TKeyComparer keyComparer, Func> keySerializerCreator, Func> valueSerializerCreator, TRecordDisposer recordDisposer) + where TKeyComparer : IKeyComparer + where TRecordDisposer : IRecordDisposer + => new(keyComparer, keySerializerCreator, valueSerializerCreator, recordDisposer); + + /// + /// Construct a StoreFunctions instance with all types specified and contained instances passed, e.g. for custom objects. + /// + public static StoreFunctions> Create + (TKeyComparer keyComparer, Func> keySerializerCreator, Func> valueSerializerCreator) + where TKeyComparer : IKeyComparer + => new(keyComparer, keySerializerCreator, valueSerializerCreator, new DefaultRecordDisposer()); + + /// + /// Construct a StoreFunctions instance with all types specified and contained instances passed, e.g. for custom objects. + /// + public static StoreFunctions Create + (TKeyComparer keyComparer, TRecordDisposer recordDisposer) + where TKeyComparer : IKeyComparer + where TRecordDisposer : IRecordDisposer + => new(keyComparer, keySerializerCreator: null, valueSerializerCreator: null, recordDisposer); + + /// + /// Store functions for and that take only the + /// + public static StoreFunctions> Create + (TKeyComparer keyComparer) + where TKeyComparer : IKeyComparer + => new(keyComparer, keySerializerCreator: null, valueSerializerCreator: null, DefaultRecordDisposer.Instance); + + /// + /// Store functions for Key and Value + /// + public static StoreFunctions Create() + => new(SpanByteComparer.Instance, keySerializerCreator: null, valueSerializerCreator: null, SpanByteRecordDisposer.Instance); + } +} \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/FullCheckpointStateMachine.cs b/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/FullCheckpointStateMachine.cs index cc933a66b4..4291f8d018 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/FullCheckpointStateMachine.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/FullCheckpointStateMachine.cs @@ -12,12 +12,14 @@ namespace Tsavorite.core /// /// This task contains logic to orchestrate the index and hybrid log checkpoint in parallel /// - internal sealed class FullCheckpointOrchestrationTask : ISynchronizationTask + internal sealed class FullCheckpointOrchestrationTask : ISynchronizationTask + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// - public void GlobalBeforeEnteringState( + public void GlobalBeforeEnteringState( SystemState next, - TsavoriteKV store) + TsavoriteKV store) { switch (next.Phase) { @@ -32,7 +34,7 @@ public void GlobalBeforeEnteringState( break; case Phase.WAIT_FLUSH: store._indexCheckpoint.info.num_buckets = store.overflowBucketsAllocator.GetMaxValidAddress(); - store._indexCheckpoint.info.finalLogicalAddress = store.hlog.GetTailAddress(); + store._indexCheckpoint.info.finalLogicalAddress = store.hlogBase.GetTailAddress(); break; case Phase.PERSISTENCE_CALLBACK: store.WriteIndexMetaInfo(); @@ -42,18 +44,18 @@ public void GlobalBeforeEnteringState( } /// - public void GlobalAfterEnteringState( + public void GlobalAfterEnteringState( SystemState next, - TsavoriteKV store) + TsavoriteKV store) { } /// - public void OnThreadState( + public void OnThreadState( SystemState current, SystemState prev, - TsavoriteKV store, - TsavoriteKV.TsavoriteExecutionContext ctx, + TsavoriteKV store, + TsavoriteKV.TsavoriteExecutionContext ctx, TSessionFunctionsWrapper sessionFunctions, List valueTasks, CancellationToken token = default) @@ -65,7 +67,9 @@ public void OnThreadState /// The state machine orchestrates a full checkpoint /// - internal sealed class FullCheckpointStateMachine : HybridLogCheckpointStateMachine + internal sealed class FullCheckpointStateMachine : HybridLogCheckpointStateMachine + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Construct a new FullCheckpointStateMachine to use the given checkpoint backend (either fold-over or snapshot), @@ -73,9 +77,9 @@ internal sealed class FullCheckpointStateMachine : HybridLogCheckpointStateMachi /// /// A task that encapsulates the logic to persist the checkpoint /// upper limit (inclusive) of the version included - public FullCheckpointStateMachine(ISynchronizationTask checkpointBackend, long targetVersion = -1) : base( - targetVersion, new VersionChangeTask(), new FullCheckpointOrchestrationTask(), - new IndexSnapshotTask(), checkpointBackend) + public FullCheckpointStateMachine(ISynchronizationTask checkpointBackend, long targetVersion = -1) : base( + targetVersion, new VersionChangeTask(), new FullCheckpointOrchestrationTask(), + new IndexSnapshotTask(), checkpointBackend) { } /// diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/HybridLogCheckpointTask.cs b/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/HybridLogCheckpointTask.cs index 6924073fe5..f141e316d0 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/HybridLogCheckpointTask.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/HybridLogCheckpointTask.cs @@ -13,12 +13,14 @@ namespace Tsavorite.core /// This task is the base class for a checkpoint "backend", which decides how a captured version is /// persisted on disk. /// - internal abstract class HybridLogCheckpointOrchestrationTask : ISynchronizationTask + internal abstract class HybridLogCheckpointOrchestrationTask : ISynchronizationTask + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { private long lastVersion; /// - public virtual void GlobalBeforeEnteringState(SystemState next, - TsavoriteKV store) + public virtual void GlobalBeforeEnteringState(SystemState next, + TsavoriteKV store) { switch (next.Phase) { @@ -30,15 +32,15 @@ public virtual void GlobalBeforeEnteringState(SystemState next, store.InitializeHybridLogCheckpoint(store._hybridLogCheckpointToken, next.Version); } store._hybridLogCheckpoint.info.version = next.Version; - store._hybridLogCheckpoint.info.startLogicalAddress = store.hlog.GetTailAddress(); + store._hybridLogCheckpoint.info.startLogicalAddress = store.hlogBase.GetTailAddress(); // Capture begin address before checkpoint starts - store._hybridLogCheckpoint.info.beginAddress = store.hlog.BeginAddress; + store._hybridLogCheckpoint.info.beginAddress = store.hlogBase.BeginAddress; break; case Phase.IN_PROGRESS: store.CheckpointVersionShift(lastVersion, next.Version); break; case Phase.WAIT_FLUSH: - store._hybridLogCheckpoint.info.headAddress = store.hlog.HeadAddress; + store._hybridLogCheckpoint.info.headAddress = store.hlogBase.HeadAddress; store._hybridLogCheckpoint.info.nextVersion = next.Version; break; case Phase.PERSISTENCE_CALLBACK: @@ -55,7 +57,7 @@ public virtual void GlobalBeforeEnteringState(SystemState next, } } - protected static void CollectMetadata(SystemState next, TsavoriteKV store) + protected static void CollectMetadata(SystemState next, TsavoriteKV store) { // Collect object log offsets only after flushes // are completed @@ -87,22 +89,22 @@ protected static void CollectMetadata(SystemState next, TsavoriteKV< if (toDelete != null) { foreach (var key in toDelete) - store._activeSessions.Remove(key); + _ = store._activeSessions.Remove(key); } } } /// - public virtual void GlobalAfterEnteringState(SystemState next, - TsavoriteKV store) + public virtual void GlobalAfterEnteringState(SystemState next, + TsavoriteKV store) { } /// - public virtual void OnThreadState( + public virtual void OnThreadState( SystemState current, - SystemState prev, TsavoriteKV store, - TsavoriteKV.TsavoriteExecutionContext ctx, + SystemState prev, TsavoriteKV store, + TsavoriteKV.TsavoriteExecutionContext ctx, TSessionFunctionsWrapper sessionFunctions, List valueTasks, CancellationToken token = default) @@ -114,7 +116,7 @@ public virtual void OnThreadState - internal sealed class FoldOverCheckpointTask : HybridLogCheckpointOrchestrationTask + internal sealed class FoldOverCheckpointTask : HybridLogCheckpointOrchestrationTask + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// - public override void GlobalBeforeEnteringState(SystemState next, - TsavoriteKV store) + public override void GlobalBeforeEnteringState(SystemState next, + TsavoriteKV store) { base.GlobalBeforeEnteringState(next, store); @@ -143,17 +147,16 @@ public override void GlobalBeforeEnteringState(SystemState next, if (next.Phase != Phase.WAIT_FLUSH) return; - store.hlog.ShiftReadOnlyToTail(out var tailAddress, - out store._hybridLogCheckpoint.flushedSemaphore); + _ = store.hlogBase.ShiftReadOnlyToTail(out var tailAddress, out store._hybridLogCheckpoint.flushedSemaphore); store._hybridLogCheckpoint.info.finalLogicalAddress = tailAddress; } /// - public override void OnThreadState( + public override void OnThreadState( SystemState current, SystemState prev, - TsavoriteKV store, - TsavoriteKV.TsavoriteExecutionContext ctx, + TsavoriteKV store, + TsavoriteKV.TsavoriteExecutionContext ctx, TSessionFunctionsWrapper sessionFunctions, List valueTasks, CancellationToken token = default) @@ -166,7 +169,7 @@ public override void OnThreadState= store._hybridLogCheckpoint.info.finalLogicalAddress; + var notify = store.hlogBase.FlushedUntilAddress >= store._hybridLogCheckpoint.info.finalLogicalAddress; notify = notify || !store.SameCycle(ctx, current) || s == null; if (valueTasks != null && !notify) @@ -191,10 +194,12 @@ public override void OnThreadState - internal sealed class SnapshotCheckpointTask : HybridLogCheckpointOrchestrationTask + internal sealed class SnapshotCheckpointTask : HybridLogCheckpointOrchestrationTask + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// - public override void GlobalBeforeEnteringState(SystemState next, TsavoriteKV store) + public override void GlobalBeforeEnteringState(SystemState next, TsavoriteKV store) { switch (next.Phase) { @@ -205,21 +210,21 @@ public override void GlobalBeforeEnteringState(SystemState next, Tsa break; case Phase.WAIT_FLUSH: base.GlobalBeforeEnteringState(next, store); - store._hybridLogCheckpoint.info.finalLogicalAddress = store.hlog.GetTailAddress(); + store._hybridLogCheckpoint.info.finalLogicalAddress = store.hlogBase.GetTailAddress(); store._hybridLogCheckpoint.info.snapshotFinalLogicalAddress = store._hybridLogCheckpoint.info.finalLogicalAddress; store._hybridLogCheckpoint.snapshotFileDevice = store.checkpointManager.GetSnapshotLogDevice(store._hybridLogCheckpointToken); store._hybridLogCheckpoint.snapshotFileObjectLogDevice = store.checkpointManager.GetSnapshotObjectLogDevice(store._hybridLogCheckpointToken); - store._hybridLogCheckpoint.snapshotFileDevice.Initialize(store.hlog.GetSegmentSize()); + store._hybridLogCheckpoint.snapshotFileDevice.Initialize(store.hlogBase.GetSegmentSize()); store._hybridLogCheckpoint.snapshotFileObjectLogDevice.Initialize(-1); // If we are using a NullDevice then storage tier is not enabled and FlushedUntilAddress may be ReadOnlyAddress; get all records in memory. - store._hybridLogCheckpoint.info.snapshotStartFlushedLogicalAddress = store.hlog.IsNullDevice ? store.hlog.HeadAddress : store.hlog.FlushedUntilAddress; + store._hybridLogCheckpoint.info.snapshotStartFlushedLogicalAddress = store.hlogBase.IsNullDevice ? store.hlogBase.HeadAddress : store.hlogBase.FlushedUntilAddress; - long startPage = store.hlog.GetPage(store._hybridLogCheckpoint.info.snapshotStartFlushedLogicalAddress); - long endPage = store.hlog.GetPage(store._hybridLogCheckpoint.info.finalLogicalAddress); + long startPage = store.hlogBase.GetPage(store._hybridLogCheckpoint.info.snapshotStartFlushedLogicalAddress); + long endPage = store.hlogBase.GetPage(store._hybridLogCheckpoint.info.finalLogicalAddress); if (store._hybridLogCheckpoint.info.finalLogicalAddress > store.hlog.GetStartLogicalAddress(endPage)) { @@ -230,7 +235,7 @@ public override void GlobalBeforeEnteringState(SystemState next, Tsa // handle corrupted or unexpected concurrent page changes during the flush, e.g., by // resuming epoch protection if necessary. Correctness is not affected as we will // only read safe pages during recovery. - store.hlog.AsyncFlushPagesToDevice( + store.hlogBase.AsyncFlushPagesToDevice( startPage, endPage, store._hybridLogCheckpoint.info.finalLogicalAddress, @@ -243,7 +248,7 @@ public override void GlobalBeforeEnteringState(SystemState next, Tsa case Phase.PERSISTENCE_CALLBACK: // Set actual FlushedUntil to the latest possible data in main log that is on disk // If we are using a NullDevice then storage tier is not enabled and FlushedUntilAddress may be ReadOnlyAddress; get all records in memory. - store._hybridLogCheckpoint.info.flushedLogicalAddress = store.hlog.IsNullDevice ? store.hlog.HeadAddress : store.hlog.FlushedUntilAddress; + store._hybridLogCheckpoint.info.flushedLogicalAddress = store.hlogBase.IsNullDevice ? store.hlogBase.HeadAddress : store.hlogBase.FlushedUntilAddress; base.GlobalBeforeEnteringState(next, store); store._lastSnapshotCheckpoint = store._hybridLogCheckpoint.Transfer(); break; @@ -254,10 +259,10 @@ public override void GlobalBeforeEnteringState(SystemState next, Tsa } /// - public override void OnThreadState( + public override void OnThreadState( SystemState current, - SystemState prev, TsavoriteKV store, - TsavoriteKV.TsavoriteExecutionContext ctx, + SystemState prev, TsavoriteKV store, + TsavoriteKV.TsavoriteExecutionContext ctx, TSessionFunctionsWrapper sessionFunctions, List valueTasks, CancellationToken token = default) @@ -296,10 +301,12 @@ public override void OnThreadState - internal sealed class IncrementalSnapshotCheckpointTask : HybridLogCheckpointOrchestrationTask + internal sealed class IncrementalSnapshotCheckpointTask : HybridLogCheckpointOrchestrationTask + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// - public override void GlobalBeforeEnteringState(SystemState next, TsavoriteKV store) + public override void GlobalBeforeEnteringState(SystemState next, TsavoriteKV store) { switch (next.Phase) { @@ -313,22 +320,22 @@ public override void GlobalBeforeEnteringState(SystemState next, Tsa break; case Phase.WAIT_FLUSH: base.GlobalBeforeEnteringState(next, store); - store._hybridLogCheckpoint.info.finalLogicalAddress = store.hlog.GetTailAddress(); + store._hybridLogCheckpoint.info.finalLogicalAddress = store.hlogBase.GetTailAddress(); if (store._hybridLogCheckpoint.deltaLog == null) { store._hybridLogCheckpoint.deltaFileDevice = store.checkpointManager.GetDeltaLogDevice(store._hybridLogCheckpointToken); store._hybridLogCheckpoint.deltaFileDevice.Initialize(-1); - store._hybridLogCheckpoint.deltaLog = new DeltaLog(store._hybridLogCheckpoint.deltaFileDevice, store.hlog.LogPageSizeBits, -1); - store._hybridLogCheckpoint.deltaLog.InitializeForWrites(store.hlog.bufferPool); + store._hybridLogCheckpoint.deltaLog = new DeltaLog(store._hybridLogCheckpoint.deltaFileDevice, store.hlogBase.LogPageSizeBits, -1); + store._hybridLogCheckpoint.deltaLog.InitializeForWrites(store.hlogBase.bufferPool); } // We are writing delta records outside epoch protection, so callee should be able to // handle corrupted or unexpected concurrent page changes during the flush, e.g., by // resuming epoch protection if necessary. Correctness is not affected as we will // only read safe pages during recovery. - store.hlog.AsyncFlushDeltaToDevice( - store.hlog.FlushedUntilAddress, + store.hlogBase.AsyncFlushDeltaToDevice( + store.hlogBase.FlushedUntilAddress, store._hybridLogCheckpoint.info.finalLogicalAddress, store._lastSnapshotCheckpoint.info.finalLogicalAddress, store._hybridLogCheckpoint.prevVersion, @@ -348,10 +355,10 @@ public override void GlobalBeforeEnteringState(SystemState next, Tsa } /// - public override void OnThreadState( + public override void OnThreadState( SystemState current, - SystemState prev, TsavoriteKV store, - TsavoriteKV.TsavoriteExecutionContext ctx, + SystemState prev, TsavoriteKV store, + TsavoriteKV.TsavoriteExecutionContext ctx, TSessionFunctionsWrapper sessionFunctions, List valueTasks, CancellationToken token = default) @@ -388,7 +395,9 @@ public override void OnThreadState /// /// - internal class HybridLogCheckpointStateMachine : VersionChangeStateMachine + internal class HybridLogCheckpointStateMachine : VersionChangeStateMachine + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Construct a new HybridLogCheckpointStateMachine to use the given checkpoint backend (either fold-over or @@ -396,15 +405,15 @@ internal class HybridLogCheckpointStateMachine : VersionChangeStateMachine /// /// A task that encapsulates the logic to persist the checkpoint /// upper limit (inclusive) of the version included - public HybridLogCheckpointStateMachine(ISynchronizationTask checkpointBackend, long targetVersion = -1) - : base(targetVersion, new VersionChangeTask(), checkpointBackend) { } + public HybridLogCheckpointStateMachine(ISynchronizationTask checkpointBackend, long targetVersion = -1) + : base(targetVersion, new VersionChangeTask(), checkpointBackend) { } /// /// Construct a new HybridLogCheckpointStateMachine with the given tasks. Does not load any tasks by default. /// /// upper limit (inclusive) of the version included /// The tasks to load onto the state machine - protected HybridLogCheckpointStateMachine(long targetVersion, params ISynchronizationTask[] tasks) + protected HybridLogCheckpointStateMachine(long targetVersion, params ISynchronizationTask[] tasks) : base(targetVersion, tasks) { } /// diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/IStateMachineCallback.cs b/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/IStateMachineCallback.cs index 238234b702..f89b1a2335 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/IStateMachineCallback.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/IStateMachineCallback.cs @@ -6,15 +6,13 @@ namespace Tsavorite.core /// /// Encapsulates custom logic to be executed as part of Tsavorite's state machine logic /// - public interface IStateMachineCallback + public interface IStateMachineCallback + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Invoked immediately before every state transition. /// - /// next system state - /// reference to Tsavorite KV - /// Key Type - /// Value Type - void BeforeEnteringState(SystemState next, TsavoriteKV tsavorite); + void BeforeEnteringState(SystemState next, TsavoriteKV tsavorite); } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/ISynchronizationStateMachine.cs b/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/ISynchronizationStateMachine.cs index 8c1346bbee..595fd1c9fc 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/ISynchronizationStateMachine.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/ISynchronizationStateMachine.cs @@ -12,7 +12,9 @@ namespace Tsavorite.core /// synchronize and agree on certain time points. A full run of the state machine is defined as a cycle /// starting from REST and ending in REST, and only one state machine can be active at a given time. /// - internal interface ISynchronizationStateMachine + internal interface ISynchronizationStateMachine + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Returns the version that we expect this state machine to end up at when back to REST, or -1 if not yet known. @@ -30,46 +32,24 @@ internal interface ISynchronizationStateMachine /// /// This function is invoked immediately before the global state machine enters the given state. /// - /// - /// - /// - /// - void GlobalBeforeEnteringState(SystemState next, - TsavoriteKV tsavorite); + void GlobalBeforeEnteringState(SystemState next, + TsavoriteKV tsavorite); /// /// This function is invoked immediately after the global state machine enters the given state. /// - /// - /// - /// - /// - void GlobalAfterEnteringState(SystemState next, - TsavoriteKV tsavorite); + void GlobalAfterEnteringState(SystemState next, + TsavoriteKV tsavorite); /// /// This function is invoked for every thread when they refresh and observe a given state. /// /// Note that the function is not allowed to await when async is set to false. /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - void OnThreadEnteringState(SystemState current, + void OnThreadEnteringState(SystemState current, SystemState prev, - TsavoriteKV tsavorite, - TsavoriteKV.TsavoriteExecutionContext ctx, + TsavoriteKV tsavorite, + TsavoriteKV.TsavoriteExecutionContext ctx, TSessionFunctionsWrapper sessionFunctions, List valueTasks, CancellationToken token = default) @@ -82,54 +62,34 @@ void OnThreadEnteringState - internal interface ISynchronizationTask + internal interface ISynchronizationTask + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// This function is invoked immediately before the global state machine enters the given state. /// - /// - /// - /// - /// - void GlobalBeforeEnteringState( + void GlobalBeforeEnteringState( SystemState next, - TsavoriteKV tsavorite); + TsavoriteKV tsavorite); /// /// This function is invoked immediately after the global state machine enters the given state. /// - /// - /// - /// - /// - void GlobalAfterEnteringState( + void GlobalAfterEnteringState( SystemState next, - TsavoriteKV tsavorite); + TsavoriteKV tsavorite); /// /// This function is invoked for every thread when they refresh and observe a given state. /// /// Note that the function is not allowed to await when async is set to false. /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - void OnThreadState( + void OnThreadState( SystemState current, SystemState prev, - TsavoriteKV tsavorite, - TsavoriteKV.TsavoriteExecutionContext ctx, + TsavoriteKV tsavorite, + TsavoriteKV.TsavoriteExecutionContext ctx, TSessionFunctionsWrapper sessionFunctions, List valueTasks, CancellationToken token = default) @@ -140,9 +100,11 @@ void OnThreadState /// Abstract base class for ISynchronizationStateMachine that implements that state machine logic /// with ISynchronizationTasks /// - internal abstract class SynchronizationStateMachineBase : ISynchronizationStateMachine + internal abstract class SynchronizationStateMachineBase : ISynchronizationStateMachine + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - private readonly ISynchronizationTask[] tasks; + private readonly ISynchronizationTask[] tasks; private long toVersion = -1; @@ -151,7 +113,7 @@ internal abstract class SynchronizationStateMachineBase : ISynchronizationStateM /// order they are executed on each state machine. /// /// The ISynchronizationTasks to run on the state machine - protected SynchronizationStateMachineBase(params ISynchronizationTask[] tasks) + protected SynchronizationStateMachineBase(params ISynchronizationTask[] tasks) { this.tasks = tasks; } @@ -169,27 +131,27 @@ protected SynchronizationStateMachineBase(params ISynchronizationTask[] tasks) public abstract SystemState NextState(SystemState start); /// - public void GlobalBeforeEnteringState(SystemState next, - TsavoriteKV tsavorite) + public void GlobalBeforeEnteringState(SystemState next, + TsavoriteKV tsavorite) { foreach (var task in tasks) task.GlobalBeforeEnteringState(next, tsavorite); } /// - public void GlobalAfterEnteringState(SystemState next, - TsavoriteKV tsavorite) + public void GlobalAfterEnteringState(SystemState next, + TsavoriteKV tsavorite) { foreach (var task in tasks) task.GlobalAfterEnteringState(next, tsavorite); } /// - public void OnThreadEnteringState( + public void OnThreadEnteringState( SystemState current, SystemState prev, - TsavoriteKV tsavorite, - TsavoriteKV.TsavoriteExecutionContext ctx, + TsavoriteKV tsavorite, + TsavoriteKV.TsavoriteExecutionContext ctx, TSessionFunctionsWrapper sessionFunctions, List valueTasks, CancellationToken token = default) diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/IndexResizeStateMachine.cs b/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/IndexResizeStateMachine.cs index 9e703c7966..aafbbc550a 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/IndexResizeStateMachine.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/IndexResizeStateMachine.cs @@ -10,14 +10,16 @@ namespace Tsavorite.core /// /// Resizes an index /// - internal sealed class IndexResizeTask : ISynchronizationTask + internal sealed class IndexResizeTask : ISynchronizationTask + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { bool allThreadsInPrepareGrow; /// - public void GlobalBeforeEnteringState( + public void GlobalBeforeEnteringState( SystemState next, - TsavoriteKV store) + TsavoriteKV store) { switch (next.Phase) { @@ -50,9 +52,9 @@ public void GlobalBeforeEnteringState( } /// - public void GlobalAfterEnteringState( + public void GlobalAfterEnteringState( SystemState next, - TsavoriteKV store) + TsavoriteKV store) { switch (next.Phase) { @@ -80,11 +82,11 @@ public void GlobalAfterEnteringState( } /// - public void OnThreadState( + public void OnThreadState( SystemState current, SystemState prev, - TsavoriteKV store, - TsavoriteKV.TsavoriteExecutionContext ctx, + TsavoriteKV store, + TsavoriteKV.TsavoriteExecutionContext ctx, TSessionFunctionsWrapper sessionFunctions, List valueTasks, CancellationToken token = default) @@ -95,7 +97,7 @@ public void OnThreadState /// Resizes the index /// - internal sealed class IndexResizeStateMachine : SynchronizationStateMachineBase + internal sealed class IndexResizeStateMachine : SynchronizationStateMachineBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Constructs a new IndexResizeStateMachine /// - public IndexResizeStateMachine() : base(new IndexResizeTask()) { } + public IndexResizeStateMachine() : base(new IndexResizeTask()) { } /// public override SystemState NextState(SystemState start) diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/IndexSnapshotStateMachine.cs b/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/IndexSnapshotStateMachine.cs index 92ee78c247..a66842ab9b 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/IndexSnapshotStateMachine.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/IndexSnapshotStateMachine.cs @@ -11,12 +11,14 @@ namespace Tsavorite.core /// /// This task performs an index checkpoint. /// - internal sealed class IndexSnapshotTask : ISynchronizationTask + internal sealed class IndexSnapshotTask : ISynchronizationTask + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// - public void GlobalBeforeEnteringState( + public void GlobalBeforeEnteringState( SystemState next, - TsavoriteKV store) + TsavoriteKV store) { switch (next.Phase) { @@ -27,7 +29,7 @@ public void GlobalBeforeEnteringState( store.InitializeIndexCheckpoint(store._indexCheckpointToken); } - store._indexCheckpoint.info.startLogicalAddress = store.hlog.GetTailAddress(); + store._indexCheckpoint.info.startLogicalAddress = store.hlogBase.GetTailAddress(); store.TakeIndexFuzzyCheckpoint(); break; @@ -52,18 +54,18 @@ public void GlobalBeforeEnteringState( } /// - public void GlobalAfterEnteringState( + public void GlobalAfterEnteringState( SystemState next, - TsavoriteKV store) + TsavoriteKV store) { } /// - public void OnThreadState( + public void OnThreadState( SystemState current, SystemState prev, - TsavoriteKV store, - TsavoriteKV.TsavoriteExecutionContext ctx, + TsavoriteKV store, + TsavoriteKV.TsavoriteExecutionContext ctx, TSessionFunctionsWrapper sessionFunctions, List valueTasks, CancellationToken token = default) @@ -98,12 +100,14 @@ public void OnThreadState /// This state machine performs an index checkpoint /// - internal sealed class IndexSnapshotStateMachine : SynchronizationStateMachineBase + internal sealed class IndexSnapshotStateMachine : SynchronizationStateMachineBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Create a new IndexSnapshotStateMachine /// - public IndexSnapshotStateMachine() : base(new IndexSnapshotTask()) + public IndexSnapshotStateMachine() : base(new IndexSnapshotTask()) { } diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/StateTransitions.cs b/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/StateTransitions.cs index 9d7e7379ab..0e75362419 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/StateTransitions.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/StateTransitions.cs @@ -91,6 +91,7 @@ public struct SystemState /// public Phase Phase { + [MethodImpl(MethodImplOptions.AggressiveInlining)] get { return (Phase)((Word >> kPhaseShiftInWord) & kPhaseMaskInInteger); diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/TsavoriteStateMachine.cs b/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/TsavoriteStateMachine.cs index f47da41ab9..8ef955f09f 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/TsavoriteStateMachine.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/TsavoriteStateMachine.cs @@ -10,7 +10,9 @@ namespace Tsavorite.core { - public partial class TsavoriteKV + public partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { // The current system state, defined as the combination of a phase and a version number. This value // is observed by all sessions and a state machine communicates its progress to sessions through @@ -20,8 +22,8 @@ public partial class TsavoriteKV private volatile int stateMachineActive = 0; // The current state machine in the system. The value could be stale and point to the previous state machine // if no state machine is active at this time. - private ISynchronizationStateMachine currentSyncStateMachine; - private List callbacks = new List(); + private ISynchronizationStateMachine currentSyncStateMachine; + private List> callbacks = new(); internal long lastVersion; /// @@ -67,14 +69,14 @@ public partial class TsavoriteKV /// may slow or halt state machine execution. For advanced users only. /// /// callback to register - public void UnsafeRegisterCallback(IStateMachineCallback callback) => callbacks.Add(callback); + public void UnsafeRegisterCallback(IStateMachineCallback callback) => callbacks.Add(callback); /// /// Attempt to start the given state machine in the system if no other state machine is active. /// /// The state machine to start /// true if the state machine has started, false otherwise - private bool StartStateMachine(ISynchronizationStateMachine stateMachine) + private bool StartStateMachine(ISynchronizationStateMachine stateMachine) { // return immediately if there is a state machine under way. if (Interlocked.CompareExchange(ref stateMachineActive, 1, 0) != 0) return false; @@ -248,7 +250,7 @@ private void ThreadStateMachineStep) )); currentTask.OnThreadEnteringState(threadState, previousState, this, ctx, sessionFunctions, valueTasks, token); diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/VersionChangeStateMachine.cs b/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/VersionChangeStateMachine.cs index 29e4a4a404..cadd4182d0 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/VersionChangeStateMachine.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Synchronization/VersionChangeStateMachine.cs @@ -12,27 +12,29 @@ namespace Tsavorite.core /// version. It is used as the basis of many other tasks, which decides what they do with the captured /// version. /// - internal sealed class VersionChangeTask : ISynchronizationTask + internal sealed class VersionChangeTask : ISynchronizationTask + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// - public void GlobalBeforeEnteringState( + public void GlobalBeforeEnteringState( SystemState next, - TsavoriteKV store) + TsavoriteKV store) { } /// - public void GlobalAfterEnteringState( + public void GlobalAfterEnteringState( SystemState start, - TsavoriteKV store) + TsavoriteKV store) { } /// - public void OnThreadState( + public void OnThreadState( SystemState current, SystemState prev, - TsavoriteKV store, - TsavoriteKV.TsavoriteExecutionContext ctx, + TsavoriteKV store, + TsavoriteKV.TsavoriteExecutionContext ctx, TSessionFunctionsWrapper sessionFunctions, List valueTasks, CancellationToken token = default) @@ -48,7 +50,7 @@ public void OnThreadState.AtomicSwitch(ctx, ctx.prevCtx, _ctx.version); - TsavoriteKV.InitContext(ctx, ctx.prevCtx.sessionID, ctx.prevCtx.sessionName); + _ = TsavoriteKV.AtomicSwitch(ctx, ctx.prevCtx, _ctx.version); + TsavoriteKV.InitContext(ctx, ctx.prevCtx.sessionID, ctx.prevCtx.sessionName); // Has to be prevCtx, not ctx ctx.prevCtx.markers[EpochPhaseIdx.InProgress] = true; @@ -81,30 +83,32 @@ public void OnThreadState - internal sealed class FoldOverTask : ISynchronizationTask + internal sealed class FoldOverTask : ISynchronizationTask + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// - public void GlobalBeforeEnteringState( + public void GlobalBeforeEnteringState( SystemState next, - TsavoriteKV store) + TsavoriteKV store) { if (next.Phase == Phase.REST) // Before leaving the checkpoint, make sure all previous versions are read-only. - store.hlog.ShiftReadOnlyToTail(out _, out _); + store.hlogBase.ShiftReadOnlyToTail(out _, out _); } /// - public void GlobalAfterEnteringState( + public void GlobalAfterEnteringState( SystemState next, - TsavoriteKV store) + TsavoriteKV store) { } /// - public void OnThreadState( + public void OnThreadState( SystemState current, SystemState prev, - TsavoriteKV store, - TsavoriteKV.TsavoriteExecutionContext ctx, + TsavoriteKV store, + TsavoriteKV.TsavoriteExecutionContext ctx, TSessionFunctionsWrapper sessionFunctions, List valueTasks, CancellationToken token = default) @@ -116,7 +120,9 @@ public void OnThreadState /// A VersionChangeStateMachine orchestrates to capture a version, but does not flush to disk. /// - internal class VersionChangeStateMachine : SynchronizationStateMachineBase + internal class VersionChangeStateMachine : SynchronizationStateMachineBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { private readonly long targetVersion; @@ -125,7 +131,7 @@ internal class VersionChangeStateMachine : SynchronizationStateMachineBase /// /// upper limit (inclusive) of the version included /// The tasks to load onto the state machine - protected VersionChangeStateMachine(long targetVersion = -1, params ISynchronizationTask[] tasks) : base(tasks) + protected VersionChangeStateMachine(long targetVersion = -1, params ISynchronizationTask[] tasks) : base(tasks) { this.targetVersion = targetVersion; } diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Constants.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Constants.cs index 2cf6821ecf..61c5a6538d 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Constants.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Constants.cs @@ -8,6 +8,9 @@ internal static class Constants /// Size of cache line in bytes public const int kCacheLineBytes = 64; + // RecordInfo has a long field, so it should be aligned to 8-bytes + public const int kRecordAlignment = 8; + public const bool kFineGrainedHandoverRecord = false; public const bool kFineGrainedHandoverBucket = true; diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Extensions.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Extensions.cs index 9ac12cfe34..22f8b2b403 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Extensions.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Extensions.cs @@ -19,12 +19,12 @@ public static class Extensions /// /// /// - public static IObservable> ToRecordObservable(this IObservable> source) + public static IObservable> ToRecordObservable(this IObservable> source) { return new RecordObservable(source); } - internal sealed class RecordObservable : IObservable> + internal sealed class RecordObservable : IObservable> { readonly IObservable> o; @@ -33,7 +33,7 @@ public RecordObservable(IObservable> o) this.o = o; } - public IDisposable Subscribe(IObserver> observer) + public IDisposable Subscribe(IObserver> observer) { return o.Subscribe(new RecordObserver(observer)); } @@ -41,9 +41,9 @@ public IDisposable Subscribe(IObserver> observer) internal sealed class RecordObserver : IObserver> { - private readonly IObserver> observer; + private readonly IObserver> observer; - public RecordObserver(IObserver> observer) + public RecordObserver(IObserver> observer) { this.observer = observer; } @@ -62,7 +62,7 @@ public void OnNext(ITsavoriteScanIterator v) { while (v.GetNext(out RecordInfo info, out Key key, out Value value)) { - observer.OnNext(new Record { info = info, key = key, value = value }); + observer.OnNext(new AllocatorRecord { info = info, key = key, value = value }); } } } diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/HashBucketEntry.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/HashBucketEntry.cs index 03e7b4ba8f..a73cf7b887 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/HashBucketEntry.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/HashBucketEntry.cs @@ -1,6 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. +using System.Runtime.CompilerServices; using System.Runtime.InteropServices; namespace Tsavorite.core @@ -14,7 +15,9 @@ internal struct HashBucketEntry public long word; public long Address { + [MethodImpl(MethodImplOptions.AggressiveInlining)] readonly get => word & Constants.kAddressMask; + set { word &= ~Constants.kAddressMask; @@ -26,7 +29,9 @@ public long Address public ushort Tag { + [MethodImpl(MethodImplOptions.AggressiveInlining)] readonly get => (ushort)((word & Constants.kTagPositionMask) >> Constants.kTagShift); + set { word &= ~Constants.kTagPositionMask; @@ -36,7 +41,9 @@ public ushort Tag public bool Tentative { + [MethodImpl(MethodImplOptions.AggressiveInlining)] readonly get => (word & Constants.kTentativeBitMask) != 0; + set { if (value) @@ -46,16 +53,10 @@ public bool Tentative } } - public bool ReadCache + public readonly bool ReadCache { - readonly get => (word & Constants.kReadCacheBitMask) != 0; - set - { - if (value) - word |= Constants.kReadCacheBitMask; - else - word &= ~Constants.kReadCacheBitMask; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get => (word & Constants.kReadCacheBitMask) != 0; } public override readonly string ToString() diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/BlockAllocate.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/BlockAllocate.cs index 4ee9c1c12b..7d443a3b33 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/BlockAllocate.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/BlockAllocate.cs @@ -7,11 +7,13 @@ namespace Tsavorite.core { - public unsafe partial class TsavoriteKV : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { [MethodImpl(MethodImplOptions.AggressiveInlining)] private static bool TryBlockAllocate( - AllocatorBase allocator, + AllocatorBase allocator, int recordSize, out long logicalAddress, ref PendingContext pendingContext, @@ -48,9 +50,9 @@ internal struct AllocateOptions [MethodImpl(MethodImplOptions.AggressiveInlining)] bool TryAllocateRecord(TSessionFunctionsWrapper sessionFunctions, ref PendingContext pendingContext, - ref OperationStackContext stackCtx, int actualSize, ref int allocatedSize, int newKeySize, AllocateOptions options, + ref OperationStackContext stackCtx, int actualSize, ref int allocatedSize, int newKeySize, AllocateOptions options, out long newLogicalAddress, out long newPhysicalAddress, out OperationStatus status) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { status = OperationStatus.SUCCESS; @@ -77,7 +79,7 @@ bool TryAllocateRecord(TSessio // Spin to make sure newLogicalAddress is > recSrc.LatestLogicalAddress (the .PreviousAddress and CAS comparison value). for (; ; Thread.Yield()) { - if (!TryBlockAllocate(hlog, allocatedSize, out newLogicalAddress, ref pendingContext, out status)) + if (!TryBlockAllocate(hlogBase, allocatedSize, out newLogicalAddress, ref pendingContext, out status)) break; newPhysicalAddress = hlog.GetPhysicalAddress(newLogicalAddress); @@ -97,8 +99,8 @@ bool TryAllocateRecord(TSessio if (options.Recycle) { ref var newValue = ref hlog.GetValue(newPhysicalAddress); - hlog.GetAndInitializeValue(newPhysicalAddress, newPhysicalAddress + actualSize); - int valueOffset = (int)((long)Unsafe.AsPointer(ref newValue) - newPhysicalAddress); + _ = hlog.GetAndInitializeValue(newPhysicalAddress, newPhysicalAddress + actualSize); + var valueOffset = (int)((long)Unsafe.AsPointer(ref newValue) - newPhysicalAddress); SetExtraValueLength(ref hlog.GetValue(newPhysicalAddress), ref newRecordInfo, actualSize - valueOffset, allocatedSize - valueOffset); SaveAllocationForRetry(ref pendingContext, newLogicalAddress, newPhysicalAddress, allocatedSize); } @@ -113,13 +115,13 @@ bool TryAllocateRecord(TSessio } [MethodImpl(MethodImplOptions.AggressiveInlining)] - bool TryAllocateRecordReadCache(ref PendingContext pendingContext, ref OperationStackContext stackCtx, + bool TryAllocateRecordReadCache(ref PendingContext pendingContext, ref OperationStackContext stackCtx, int allocatedSize, out long newLogicalAddress, out long newPhysicalAddress, out OperationStatus status) { // Spin to make sure the start of the tag chain is not readcache, or that newLogicalAddress is > the first address in the tag chain. for (; ; Thread.Yield()) { - if (!TryBlockAllocate(readcache, allocatedSize, out newLogicalAddress, ref pendingContext, out status)) + if (!TryBlockAllocate(readCacheBase, allocatedSize, out newLogicalAddress, ref pendingContext, out status)) break; newPhysicalAddress = readcache.GetPhysicalAddress(newLogicalAddress); @@ -155,21 +157,21 @@ void SaveAllocationForRetry(ref PendingContext(TSessionFunctionsWrapper sessionFunctions, ref PendingContext pendingContext, long minAddress, ref int allocatedSize, int newKeySize, out long newLogicalAddress, out long newPhysicalAddress) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { // Use an earlier allocation from a failed operation, if possible. newLogicalAddress = pendingContext.retryNewLogicalAddress; pendingContext.retryNewLogicalAddress = 0; - if (newLogicalAddress < hlog.HeadAddress) + if (newLogicalAddress < hlogBase.HeadAddress) { - // The record dropped below headAddress. If it needs DisposeForRevivification, it will be done on eviction. + // The record dropped below headAddress. If it needs DisposeRecord, it will be done on eviction. newPhysicalAddress = 0; return false; } @@ -182,7 +184,7 @@ bool GetAllocationForRetry(TSe // Dispose the record for either reuse or abandonment. ClearExtraValueSpace(ref recordInfo, ref recordValue, usedValueLength, fullValueLength); - sessionFunctions.DisposeForRevivification(ref hlog.GetKey(newPhysicalAddress), ref recordValue, newKeySize, ref recordInfo); + storeFunctions.DisposeRecord(ref hlog.GetKey(newPhysicalAddress), ref recordValue, DisposeReason.RevivificationFreeList, newKeySize); if (newLogicalAddress <= minAddress || fullRecordLength < allocatedSize) { diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ConditionalCopyToTail.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ConditionalCopyToTail.cs index e08517b43f..d1d5d3dde5 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ConditionalCopyToTail.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ConditionalCopyToTail.cs @@ -6,7 +6,9 @@ namespace Tsavorite.core { - public unsafe partial class TsavoriteKV : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Copy a record to the tail of the log after caller has verified it does not exist within a specified range. @@ -27,8 +29,8 @@ public unsafe partial class TsavoriteKV : TsavoriteBase private OperationStatus ConditionalCopyToTail(TSessionFunctionsWrapper sessionFunctions, ref PendingContext pendingContext, ref Key key, ref Input input, ref Value value, ref Output output, Context userContext, - ref OperationStackContext stackCtx, WriteReason writeReason, bool wantIO = true) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + ref OperationStackContext stackCtx, WriteReason writeReason, bool wantIO = true) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { bool callerHasTransientLock = stackCtx.recSrc.HasTransientSLock; @@ -64,7 +66,7 @@ private OperationStatus ConditionalCopyToTail stackCtx2 = new(stackCtx.hei.hash); + OperationStackContext stackCtx2 = new(stackCtx.hei.hash); bool needIO; do { @@ -77,7 +79,7 @@ private OperationStatus ConditionalCopyToTail(TSessionFunctionsWrapper sessionFunctions, ref Key key, ref Input input, ref Value value, ref Output output, long minAddress) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { Debug.Assert(epoch.ThisInstanceProtected(), "This is called only from Compaction so the epoch should be protected"); PendingContext pendingContext = new(); - OperationStackContext stackCtx = new(comparer.GetHashCode64(ref key)); + OperationStackContext stackCtx = new(storeFunctions.GetKeyHashCode64(ref key)); OperationStatus status; bool needIO; do @@ -116,8 +118,9 @@ internal Status CompactionConditionalCopyToTail(TSessionFunctionsWrapper sessionFunctions, ref PendingContext pendingContext, ref Key key, ref Input input, ref Value value, ref Output output, Context userContext, - ref OperationStackContext stackCtx, long minAddress, WriteReason writeReason, OperationType opType = OperationType.CONDITIONAL_INSERT) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + ref OperationStackContext stackCtx, long minAddress, WriteReason writeReason, + OperationType opType = OperationType.CONDITIONAL_INSERT) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { pendingContext.type = opType; pendingContext.minAddress = minAddress; diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ContainsKeyInMemory.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ContainsKeyInMemory.cs index 66a7bcbc1a..ee2e5095ed 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ContainsKeyInMemory.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ContainsKeyInMemory.cs @@ -5,27 +5,29 @@ namespace Tsavorite.core { - public unsafe partial class TsavoriteKV : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { [MethodImpl(MethodImplOptions.AggressiveInlining)] internal Status InternalContainsKeyInMemory( ref Key key, TSessionFunctionsWrapper sessionFunctions, out long logicalAddress, long fromAddress = -1) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { - OperationStackContext stackCtx = new(comparer.GetHashCode64(ref key)); + OperationStackContext stackCtx = new(storeFunctions.GetKeyHashCode64(ref key)); if (sessionFunctions.Ctx.phase == Phase.IN_PROGRESS_GROW) SplitBuckets(stackCtx.hei.hash); if (FindTag(ref stackCtx.hei)) { - stackCtx.SetRecordSourceToHashEntry(hlog); + stackCtx.SetRecordSourceToHashEntry(hlogBase); if (UseReadCache) SkipReadCache(ref stackCtx, out _); - if (fromAddress < hlog.HeadAddress) - fromAddress = hlog.HeadAddress; + if (fromAddress < hlogBase.HeadAddress) + fromAddress = hlogBase.HeadAddress; if (TryFindRecordInMainLog(ref key, ref stackCtx, fromAddress) && !stackCtx.recSrc.GetInfo().Tombstone) { diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ContinuePending.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ContinuePending.cs index c2cd0207ad..451b1d9120 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ContinuePending.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ContinuePending.cs @@ -5,7 +5,9 @@ namespace Tsavorite.core { - public unsafe partial class TsavoriteKV : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Continue a pending read operation. Computes 'output' from 'input' and value corresponding to 'key' @@ -28,18 +30,18 @@ public unsafe partial class TsavoriteKV : TsavoriteBase /// internal OperationStatus ContinuePendingRead(AsyncIOContext request, ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { ref RecordInfo srcRecordInfo = ref hlog.GetInfoFromBytePointer(request.record.GetValidPointer()); srcRecordInfo.ClearBitsForDiskImages(); - if (request.logicalAddress >= hlog.BeginAddress && request.logicalAddress >= pendingContext.minAddress) + if (request.logicalAddress >= hlogBase.BeginAddress && request.logicalAddress >= pendingContext.minAddress) { SpinWaitUntilClosed(request.logicalAddress); // If NoKey, we do not have the key in the initial call and must use the key from the satisfied request. ref Key key = ref pendingContext.NoKey ? ref hlog.GetContextRecordKey(ref request) : ref pendingContext.key.Get(); - OperationStackContext stackCtx = new(comparer.GetHashCode64(ref key)); + OperationStackContext stackCtx = new(storeFunctions.GetKeyHashCode64(ref key)); while (true) { @@ -51,7 +53,7 @@ internal OperationStatus ContinuePendingRead= hlog.ReadOnlyAddress) + if (stackCtx.recSrc.HasMainLogSrc && stackCtx.recSrc.LogicalAddress >= hlogBase.ReadOnlyAddress) { // If this succeeds, we don't need to copy to tail or readcache, so return success. if (sessionFunctions.ConcurrentReader(ref key, ref pendingContext.input.Get(), ref value, ref pendingContext.output, ref readInfo, ref srcRecordInfo)) @@ -184,7 +186,7 @@ internal OperationStatus ContinuePendingRead internal OperationStatus ContinuePendingRMW(AsyncIOContext request, ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { ref Key key = ref pendingContext.key.Get(); @@ -199,7 +201,7 @@ internal OperationStatus ContinuePendingRMW stackCtx = new(pendingContext.keyHash); + OperationStackContext stackCtx = new(pendingContext.keyHash); if (!FindOrCreateTagAndTryTransientXLock(sessionFunctions, ref key, ref stackCtx, out status)) goto CheckRetry; @@ -207,7 +209,7 @@ internal OperationStatus ContinuePendingRMW pendingContext.InitialLatestLogicalAddress) { - Debug.Assert(pendingContext.InitialLatestLogicalAddress < hlog.HeadAddress, "Failed to search all in-memory records"); + Debug.Assert(pendingContext.InitialLatestLogicalAddress < hlogBase.HeadAddress, "Failed to search all in-memory records"); break; } @@ -229,7 +231,7 @@ internal OperationStatus ContinuePendingRMW= hlog.BeginAddress && !srcRecordInfo.Tombstone); + doingCU: request.logicalAddress >= hlogBase.BeginAddress && !srcRecordInfo.Tombstone); } finally { @@ -276,7 +278,7 @@ internal OperationStatus ContinuePendingRMW internal OperationStatus ContinuePendingConditionalCopyToTail(AsyncIOContext request, ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { // If the key was found at or above minAddress, do nothing. if (request.logicalAddress >= pendingContext.minAddress) @@ -284,7 +286,7 @@ internal OperationStatus ContinuePendingConditionalCopyToTail stackCtx = new(comparer.GetHashCode64(ref key)); + OperationStackContext stackCtx = new(storeFunctions.GetKeyHashCode64(ref key)); // See if the record was added above the highest address we checked before issuing the IO. var minAddress = pendingContext.InitialLatestLogicalAddress + 1; @@ -332,7 +334,7 @@ internal OperationStatus ContinuePendingConditionalCopyToTail internal OperationStatus ContinuePendingConditionalScanPush(AsyncIOContext request, ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { // If the key was found at or above minAddress, do nothing; we'll push it when we get to it. If we flagged the iteration to stop, do nothing. if (request.logicalAddress >= pendingContext.minAddress || pendingContext.scanCursorState.stop) @@ -341,7 +343,7 @@ internal OperationStatus ContinuePendingConditionalScanPush(sessionFunctions, pendingContext.scanCursorState, pendingContext.recordInfo, ref pendingContext.key.Get(), ref pendingContext.value.Get(), + hlogBase.ConditionalScanPush(sessionFunctions, pendingContext.scanCursorState, pendingContext.recordInfo, ref pendingContext.key.Get(), ref pendingContext.value.Get(), minAddress: pendingContext.InitialLatestLogicalAddress + 1); // ConditionalScanPush has already called HandleOperationStatus, so return SUCCESS here. diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/EpochOperations.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/EpochOperations.cs index e3ce66917c..3eba34c4a9 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/EpochOperations.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/EpochOperations.cs @@ -7,14 +7,16 @@ namespace Tsavorite.core { - public unsafe partial class TsavoriteKV : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { [MethodImpl(MethodImplOptions.AggressiveInlining)] internal void SynchronizeEpoch( TsavoriteExecutionContext sessionCtx, ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { var version = sessionCtx.version; Debug.Assert(sessionCtx.version == version, $"sessionCtx.version ({sessionCtx.version}) should == version ({version})"); @@ -33,16 +35,16 @@ internal void SynchronizeEpoch void SpinWaitUntilClosed(long address) { // Unlike HeadAddress, ClosedUntilAddress is a high-water mark; a record that is == to ClosedUntilAddress has *not* been closed yet. - while (address >= hlog.ClosedUntilAddress) + while (address >= hlogBase.ClosedUntilAddress) { - Debug.Assert(address < hlog.HeadAddress, "expected address < hlog.HeadAddress"); + Debug.Assert(address < hlogBase.HeadAddress, "expected address < hlog.HeadAddress"); epoch.ProtectAndDrain(); Thread.Yield(); } } [MethodImpl(MethodImplOptions.AggressiveInlining)] - void SpinWaitUntilRecordIsClosed(long logicalAddress, AllocatorBase log) + void SpinWaitUntilRecordIsClosed(long logicalAddress, AllocatorBase log) { Debug.Assert(logicalAddress < log.HeadAddress, "SpinWaitUntilRecordIsClosed should not be called for addresses above HeadAddress"); diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/FindRecord.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/FindRecord.cs index 88274f11d5..c32a57e2f2 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/FindRecord.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/FindRecord.cs @@ -7,20 +7,22 @@ namespace Tsavorite.core { - public unsafe partial class TsavoriteKV : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { [MethodImpl(MethodImplOptions.AggressiveInlining)] - private bool TryFindRecordInMemory(ref Key key, ref OperationStackContext stackCtx, long minAddress, bool stopAtHeadAddress = true) + private bool TryFindRecordInMemory(ref Key key, ref OperationStackContext stackCtx, long minAddress, bool stopAtHeadAddress = true) { if (UseReadCache && FindInReadCache(ref key, ref stackCtx, minAddress: Constants.kInvalidAddress)) return true; - if (minAddress < hlog.HeadAddress && stopAtHeadAddress) - minAddress = hlog.HeadAddress; + if (minAddress < hlogBase.HeadAddress && stopAtHeadAddress) + minAddress = hlogBase.HeadAddress; return TryFindRecordInMainLog(ref key, ref stackCtx, minAddress: minAddress); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private bool TryFindRecordInMemory(ref Key key, ref OperationStackContext stackCtx, + private bool TryFindRecordInMemory(ref Key key, ref OperationStackContext stackCtx, ref PendingContext pendingContext) { // Add 1 to the pendingContext minAddresses because we don't want an inclusive search; we're looking to see if it was added *after*. @@ -30,12 +32,12 @@ private bool TryFindRecordInMemory(ref Key key, ref Oper if (FindInReadCache(ref key, ref stackCtx, minAddress: minRC)) return true; } - var minLog = pendingContext.InitialLatestLogicalAddress < hlog.HeadAddress ? hlog.HeadAddress : pendingContext.InitialLatestLogicalAddress + 1; + var minLog = pendingContext.InitialLatestLogicalAddress < hlogBase.HeadAddress ? hlogBase.HeadAddress : pendingContext.InitialLatestLogicalAddress + 1; return TryFindRecordInMainLog(ref key, ref stackCtx, minAddress: minLog); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal bool TryFindRecordInMainLog(ref Key key, ref OperationStackContext stackCtx, long minAddress) + internal bool TryFindRecordInMainLog(ref Key key, ref OperationStackContext stackCtx, long minAddress) { Debug.Assert(!stackCtx.recSrc.HasInMemorySrc, "Should not have found record before this call"); if (stackCtx.recSrc.LogicalAddress >= minAddress) @@ -48,8 +50,8 @@ internal bool TryFindRecordInMainLog(ref Key key, ref OperationStackContext(TSessionFunctionsWrapper sessionFunctions, - ref Key key, ref OperationStackContext stackCtx, long minAddress, out OperationStatus internalStatus, out bool needIO) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + ref Key key, ref OperationStackContext stackCtx, long minAddress, out OperationStatus internalStatus, out bool needIO) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { internalStatus = OperationStatus.SUCCESS; if (RevivificationManager.UseFreeRecordPool) @@ -65,7 +67,7 @@ internal bool TryFindRecordInMainLogForConditionalOperation= hlog.BeginAddress; + needIO = stackCtx.hei.Address >= hlogBase.BeginAddress; return false; } } @@ -87,10 +89,10 @@ internal bool TryFindRecordInMainLogForConditionalOperation= minAddress && stackCtx.recSrc.LogicalAddress < hlog.HeadAddress && stackCtx.recSrc.LogicalAddress >= hlog.BeginAddress; + needIO = stackCtx.recSrc.LogicalAddress >= minAddress && stackCtx.recSrc.LogicalAddress < hlogBase.HeadAddress && stackCtx.recSrc.LogicalAddress >= hlogBase.BeginAddress; return false; } finally @@ -104,11 +106,11 @@ internal bool TryFindRecordInMainLogForConditionalOperation !recordInfo.Invalid || recordInfo.IsSealed; [MethodImpl(MethodImplOptions.AggressiveInlining)] - private bool TraceBackForKeyMatch(ref Key key, ref RecordSource recSrc, long minAddress) + private bool TraceBackForKeyMatch(ref Key key, ref RecordSource recSrc, long minAddress) { // PhysicalAddress must already be populated by callers. ref var recordInfo = ref recSrc.GetInfo(); - if (IsValidTracebackRecord(recordInfo) && comparer.Equals(ref key, ref recSrc.GetKey())) + if (IsValidTracebackRecord(recordInfo) && storeFunctions.KeysEqual(ref key, ref recSrc.GetKey())) { recSrc.SetHasMainLogSrc(); return true; @@ -133,7 +135,7 @@ private bool TraceBackForKeyMatch(ref Key key, long fromLogicalAddress, long min foundPhysicalAddress = hlog.GetPhysicalAddress(foundLogicalAddress); ref var recordInfo = ref hlog.GetInfo(foundPhysicalAddress); - if (IsValidTracebackRecord(recordInfo) && comparer.Equals(ref key, ref hlog.GetKey(foundPhysicalAddress))) + if (IsValidTracebackRecord(recordInfo) && storeFunctions.KeysEqual(ref key, ref hlog.GetKey(foundPhysicalAddress))) return true; foundLogicalAddress = recordInfo.PreviousAddress; @@ -143,7 +145,7 @@ private bool TraceBackForKeyMatch(ref Key key, long fromLogicalAddress, long min } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private bool TryFindRecordForUpdate(ref Key key, ref OperationStackContext stackCtx, long minAddress, out OperationStatus internalStatus) + private bool TryFindRecordForUpdate(ref Key key, ref OperationStackContext stackCtx, long minAddress, out OperationStatus internalStatus) { // This routine returns true if we should proceed with the InternalXxx operation (whether the record was found or not), // else false (including false if we need a RETRY). If it returns true with recSrc.HasInMemorySrc, caller must set srcRecordInfo. @@ -162,7 +164,7 @@ private bool TryFindRecordForUpdate(ref Key key, ref OperationStackContext stackCtx, long minAddress, out OperationStatus internalStatus) + private bool TryFindRecordForRead(ref Key key, ref OperationStackContext stackCtx, long minAddress, out OperationStatus internalStatus) { // This routine returns true if we should proceed with the InternalXxx operation (whether the record was found or not), // else false (including false if we need a RETRY). If it returns true with recSrc.HasInMemorySrc, caller must set srcRecordInfo. @@ -181,7 +183,7 @@ private bool TryFindRecordForRead(ref Key key, ref OperationStackContext(ref Key key, ref OperationStackContext stackCtx, long minAddress, out OperationStatus internalStatus, + private bool TryFindRecordForPendingOperation(ref Key key, ref OperationStackContext stackCtx, long minAddress, out OperationStatus internalStatus, ref PendingContext pendingContext) { // This routine returns true if we find the key, else false. @@ -197,7 +199,7 @@ private bool TryFindRecordForPendingOperation(ref Key ke } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private bool TryFindRecordInMainLogForPendingOperation(ref Key key, ref OperationStackContext stackCtx, long minAddress, out OperationStatus internalStatus) + private bool TryFindRecordInMainLogForPendingOperation(ref Key key, ref OperationStackContext stackCtx, long minAddress, out OperationStatus internalStatus) { // This overload is called when we do not have a PendingContext to get minAddress from, and we've skipped the readcache if present. diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/HandleOperationStatus.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/HandleOperationStatus.cs index 7955a84e54..2d77524bc2 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/HandleOperationStatus.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/HandleOperationStatus.cs @@ -8,14 +8,16 @@ namespace Tsavorite.core { - public unsafe partial class TsavoriteKV : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { [MethodImpl(MethodImplOptions.AggressiveInlining)] private bool HandleImmediateRetryStatus( OperationStatus internalStatus, TSessionFunctionsWrapper sessionFunctions, ref PendingContext pendingContext) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper => (internalStatus & OperationStatus.BASIC_MASK) > OperationStatus.MAX_MAP_TO_COMPLETED_STATUSCODE && HandleRetryStatus(internalStatus, sessionFunctions, ref pendingContext); @@ -24,7 +26,7 @@ private bool HandleImmediateRetryStatus [MethodImpl(MethodImplOptions.AggressiveInlining)] internal bool HandleImmediateNonPendingRetryStatus(OperationStatus internalStatus, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { Debug.Assert(epoch.ThisInstanceProtected()); switch (internalStatus) @@ -45,7 +47,7 @@ private bool HandleRetryStatus OperationStatus internalStatus, TSessionFunctionsWrapper sessionFunctions, ref PendingContext pendingContext) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { Debug.Assert(epoch.ThisInstanceProtected()); switch (internalStatus) @@ -148,7 +150,7 @@ internal Status HandleOperationStatus( else request.callbackQueue = sessionCtx.readyResponses; - hlog.AsyncGetFromDisk(pendingContext.logicalAddress, hlog.GetAverageRecordSize(), request); + hlogBase.AsyncGetFromDisk(pendingContext.logicalAddress, hlog.GetAverageRecordSize(), request); return new(StatusCode.Pending); } else diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/HashEntryInfo.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/HashEntryInfo.cs index de7385ea1b..c8d07ce23b 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/HashEntryInfo.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/HashEntryInfo.cs @@ -50,7 +50,12 @@ internal HashEntryInfo(long hash) /// /// The current address of this hash entry (which may have been updated (via CAS) in the bucket after FindTag, etc.) /// - internal readonly long CurrentAddress => new HashBucketEntry() { word = bucket->bucket_entries[slot] }.Address; + internal readonly long CurrentAddress + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get { return new HashBucketEntry() { word = bucket->bucket_entries[slot] }.Address; } + } + internal readonly long AbsoluteCurrentAddress => Utility.AbsoluteAddress(CurrentAddress); /// @@ -72,7 +77,7 @@ internal HashEntryInfo(long hash) /// Set members to the current entry (which may have been updated (via CAS) in the bucket after FindTag, etc.) /// [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal void SetToCurrent() => entry = new HashBucketEntry() { word = bucket->bucket_entries[slot] }; + internal void SetToCurrent() => entry = new() { word = bucket->bucket_entries[slot] }; [MethodImpl(MethodImplOptions.AggressiveInlining)] internal bool TryCAS(long newLogicalAddress) diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Helpers.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Helpers.cs index c941288539..33bbeca3ea 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Helpers.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Helpers.cs @@ -7,7 +7,9 @@ namespace Tsavorite.core { - public unsafe partial class TsavoriteKV : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { private enum LatchDestination { @@ -17,11 +19,11 @@ private enum LatchDestination } [MethodImpl(MethodImplOptions.AggressiveInlining)] - static ref RecordInfo WriteNewRecordInfo(ref Key key, AllocatorBase log, long newPhysicalAddress, bool inNewVersion, bool tombstone, long previousAddress) + static ref RecordInfo WriteNewRecordInfo(ref Key key, AllocatorBase log, long newPhysicalAddress, bool inNewVersion, long previousAddress) { - ref RecordInfo recordInfo = ref log.GetInfo(newPhysicalAddress); - recordInfo.WriteInfo(inNewVersion, tombstone, previousAddress); - log.SerializeKey(ref key, newPhysicalAddress); + ref RecordInfo recordInfo = ref log._wrapper.GetInfo(newPhysicalAddress); + recordInfo.WriteInfo(inNewVersion, previousAddress); + log._wrapper.SerializeKey(ref key, newPhysicalAddress); return ref recordInfo; } @@ -66,7 +68,7 @@ private bool IsEntryVersionNew(ref HashBucketEntry entry) return false; // If the record is in memory, check if it has the new version bit set - if (entry.Address < hlog.HeadAddress) + if (entry.Address < hlogBase.HeadAddress) return false; return hlog.GetInfo(hlog.GetPhysicalAddress(entry.Address)).IsInNewVersion; } @@ -75,19 +77,21 @@ private bool IsEntryVersionNew(ref HashBucketEntry entry) // PreviousAddress does not point to a valid record. Otherwise an earlier record for this key could be reachable again. // Also, it cannot be elided if it is frozen due to checkpointing. [MethodImpl(MethodImplOptions.AggressiveInlining)] - private bool CanElide(TSessionFunctionsWrapper sessionFunctions, ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + private bool CanElide(TSessionFunctionsWrapper sessionFunctions, + ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { Debug.Assert(!stackCtx.recSrc.HasReadCacheSrc, "Should not call CanElide() for readcache records"); - return stackCtx.hei.Address == stackCtx.recSrc.LogicalAddress && srcRecordInfo.PreviousAddress < hlog.BeginAddress + return stackCtx.hei.Address == stackCtx.recSrc.LogicalAddress && srcRecordInfo.PreviousAddress < hlogBase.BeginAddress && !IsFrozen(sessionFunctions, ref stackCtx, ref srcRecordInfo); } // If the record is in a checkpoint range, it must not be modified. If it is in the fuzzy region, it can only be modified // if it is a new record. [MethodImpl(MethodImplOptions.AggressiveInlining)] - private bool IsFrozen(TSessionFunctionsWrapper sessionFunctions, ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + private bool IsFrozen(TSessionFunctionsWrapper sessionFunctions, + ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { Debug.Assert(!stackCtx.recSrc.HasReadCacheSrc, "Should not call IsFrozen() for readcache records"); return sessionFunctions.Ctx.IsInV1 @@ -97,8 +101,8 @@ private bool IsFrozen(TSession [MethodImpl(MethodImplOptions.AggressiveInlining)] private (bool elided, bool added) TryElideAndTransferToFreeList(TSessionFunctionsWrapper sessionFunctions, - ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo, (int usedValueLength, int fullValueLength, int fullRecordLength) recordLengths) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo, (int usedValueLength, int fullValueLength, int fullRecordLength) recordLengths) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { // Try to CAS out of the hashtable and if successful, add it to the free list. Debug.Assert(srcRecordInfo.IsSealed, "Expected a Sealed record in TryElideAndTransferToFreeList"); @@ -110,9 +114,10 @@ private bool IsFrozen(TSession } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private bool TryTransferToFreeList(TSessionFunctionsWrapper sessionFunctions, ref OperationStackContext stackCtx, + private bool TryTransferToFreeList(TSessionFunctionsWrapper sessionFunctions, + ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo, (int usedValueLength, int fullValueLength, int fullRecordLength) recordLengths) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { // The record has been CAS'd out of the hashtable or elided from the chain, so add it to the free list. Debug.Assert(srcRecordInfo.IsSealed, "Expected a Sealed record in TryTransferToFreeList"); @@ -122,7 +127,7 @@ private bool TryTransferToFreeList stackCtx, long newLogicalAddress, ref RecordInfo newRecordInfo) + private bool CASRecordIntoChain(ref Key key, ref OperationStackContext stackCtx, long newLogicalAddress, ref RecordInfo newRecordInfo) { var result = stackCtx.recSrc.LowestReadCachePhysicalAddress == Constants.kInvalidAddress ? stackCtx.hei.TryCAS(newLogicalAddress) @@ -159,11 +164,11 @@ private bool CASRecordIntoChain(ref Key key, ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo) + private void PostCopyToTail(ref Key key, ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo) => PostCopyToTail(ref key, ref stackCtx, ref srcRecordInfo, stackCtx.hei.Address); [MethodImpl(MethodImplOptions.AggressiveInlining)] - private void PostCopyToTail(ref Key key, ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo, long highestReadCacheAddressChecked) + private void PostCopyToTail(ref Key key, ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo, long highestReadCacheAddressChecked) { // Nothing required here if not using ReadCache if (!UseReadCache) @@ -186,14 +191,14 @@ private void PostCopyToTail(ref Key key, ref OperationStackContext s // Called after BlockAllocate or anything else that could shift HeadAddress, to adjust addresses or return false for RETRY as needed. // This refreshes the HashEntryInfo, so the caller needs to recheck to confirm the BlockAllocated address is still > hei.Address. [MethodImpl(MethodImplOptions.AggressiveInlining)] - private bool VerifyInMemoryAddresses(ref OperationStackContext stackCtx) + private bool VerifyInMemoryAddresses(ref OperationStackContext stackCtx) { // If we have an in-memory source that fell below HeadAddress, return false and the caller will RETRY_LATER. - if (stackCtx.recSrc.HasInMemorySrc && stackCtx.recSrc.LogicalAddress < stackCtx.recSrc.Log.HeadAddress) + if (stackCtx.recSrc.HasInMemorySrc && stackCtx.recSrc.LogicalAddress < stackCtx.recSrc.AllocatorBase.HeadAddress) return false; // If we're not using readcache or we don't have a splice point or it is still above readcache.HeadAddress, we're good. - if (!UseReadCache || stackCtx.recSrc.LowestReadCacheLogicalAddress == Constants.kInvalidAddress || stackCtx.recSrc.LowestReadCacheLogicalAddress >= readcache.HeadAddress) + if (!UseReadCache || stackCtx.recSrc.LowestReadCacheLogicalAddress == Constants.kInvalidAddress || stackCtx.recSrc.LowestReadCacheLogicalAddress >= readCacheBase.HeadAddress) return true; // If the splice point went below readcache.HeadAddress, we would have to wait for the chain to be fixed up by eviction, @@ -202,25 +207,25 @@ private bool VerifyInMemoryAddresses(ref OperationStackContext stack } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private bool FindOrCreateTagAndTryTransientXLock(TSessionFunctionsWrapper sessionFunctions, ref Key key, ref OperationStackContext stackCtx, - out OperationStatus internalStatus) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + private bool FindOrCreateTagAndTryTransientXLock(TSessionFunctionsWrapper sessionFunctions, ref Key key, + ref OperationStackContext stackCtx, out OperationStatus internalStatus) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { // Transient must lock the bucket before traceback, to prevent revivification from yanking the record out from underneath us. Manual locking already automatically locks the bucket. - FindOrCreateTag(ref stackCtx.hei, hlog.BeginAddress); + FindOrCreateTag(ref stackCtx.hei, hlogBase.BeginAddress); if (!TryTransientXLock(sessionFunctions, ref key, ref stackCtx, out internalStatus)) return false; // Between the time we found the tag and the time we locked the bucket the record in hei.entry may have been elided, so make sure we don't have a stale address in hei.entry. stackCtx.hei.SetToCurrent(); - stackCtx.SetRecordSourceToHashEntry(hlog); + stackCtx.SetRecordSourceToHashEntry(hlogBase); return true; } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private bool FindTagAndTryTransientXLock(TSessionFunctionsWrapper sessionFunctions, ref Key key, ref OperationStackContext stackCtx, - out OperationStatus internalStatus) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + private bool FindTagAndTryTransientXLock(TSessionFunctionsWrapper sessionFunctions, ref Key key, + ref OperationStackContext stackCtx, out OperationStatus internalStatus) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { // Transient must lock the bucket before traceback, to prevent revivification from yanking the record out from underneath us. Manual locking already automatically locks the bucket. internalStatus = OperationStatus.NOTFOUND; @@ -229,14 +234,14 @@ private bool FindTagAndTryTransientXLock(TSessionFunctionsWrapper sessionFunctions, ref Key key, ref OperationStackContext stackCtx, - out OperationStatus internalStatus) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + private bool FindTagAndTryTransientSLock(TSessionFunctionsWrapper sessionFunctions, ref Key key, + ref OperationStackContext stackCtx, out OperationStatus internalStatus) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { // Transient must lock the bucket before traceback, to prevent revivification from yanking the record out from underneath us. Manual locking already automatically locks the bucket. internalStatus = OperationStatus.NOTFOUND; @@ -245,7 +250,7 @@ private bool FindTagAndTryTransientSLock : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Delete operation. Replaces the value corresponding to 'key' with tombstone. @@ -40,11 +42,11 @@ public unsafe partial class TsavoriteKV : TsavoriteBase [MethodImpl(MethodImplOptions.AggressiveInlining)] internal OperationStatus InternalDelete(ref Key key, long keyHash, ref Context userContext, ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { var latchOperation = LatchOperation.None; - OperationStackContext stackCtx = new(keyHash); + OperationStackContext stackCtx = new(keyHash); pendingContext.keyHash = keyHash; if (sessionFunctions.Ctx.phase == Phase.IN_PROGRESS_GROW) @@ -60,7 +62,7 @@ internal OperationStatus InternalDelete= hlog.ReadOnlyAddress) + if (stackCtx.recSrc.LogicalAddress >= hlogBase.ReadOnlyAddress) { srcRecordInfo = ref stackCtx.recSrc.GetInfo(); @@ -154,7 +156,7 @@ internal OperationStatus InternalDelete= hlog.HeadAddress) + else if (stackCtx.recSrc.LogicalAddress >= hlogBase.HeadAddress) { // If we already have a deleted record, there's nothing to do. srcRecordInfo = ref stackCtx.recSrc.GetInfo(); @@ -222,8 +224,8 @@ internal OperationStatus InternalDelete(ref Key key, Context userContext, - ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions, ref OperationStackContext stackCtx) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions, ref OperationStackContext stackCtx) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { pendingContext.type = OperationType.DELETE; if (pendingContext.key == default) pendingContext.key = hlog.GetKeyContainer(ref key); @@ -232,7 +234,7 @@ private void CreatePendingDeleteContext stackCtx, ref OperationStatus status, ref LatchOperation latchOperation) + private LatchDestination CheckCPRConsistencyDelete(Phase phase, ref OperationStackContext stackCtx, ref OperationStatus status, ref LatchOperation latchOperation) { // This is the same logic as Upsert; neither goes pending. return CheckCPRConsistencyUpsert(phase, ref stackCtx, ref status, ref latchOperation); @@ -244,13 +246,13 @@ private LatchDestination CheckCPRConsistencyDelete(Phase phase, ref OperationSta /// The record Key /// Information about the operation context /// The current session - /// Contains the and structures for this operation, + /// Contains the and structures for this operation, /// and allows passing back the newLogicalAddress for invalidation in the case of exceptions. - /// If ., - /// this is the for + /// If ., + /// this is the for private OperationStatus CreateNewRecordDelete(ref Key key, ref PendingContext pendingContext, - TSessionFunctionsWrapper sessionFunctions, ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + TSessionFunctionsWrapper sessionFunctions, ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { var value = default(Value); var (actualSize, allocatedSize, keySize) = hlog.GetRecordSize(ref key, ref value); @@ -260,7 +262,8 @@ private OperationStatus CreateNewRecordDelete : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { [MethodImpl(MethodImplOptions.AggressiveInlining)] internal bool InternalTryLockShared(long keyHash) diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/InternalRMW.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/InternalRMW.cs index 6fb0327407..45e433f512 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/InternalRMW.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/InternalRMW.cs @@ -6,7 +6,9 @@ namespace Tsavorite.core { - public unsafe partial class TsavoriteKV : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Read-Modify-Write Operation. Updates value of 'key' using 'input' and current value. @@ -47,11 +49,11 @@ public unsafe partial class TsavoriteKV : TsavoriteBase [MethodImpl(MethodImplOptions.AggressiveInlining)] internal OperationStatus InternalRMW(ref Key key, long keyHash, ref Input input, ref Output output, ref Context userContext, ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { var latchOperation = LatchOperation.None; - OperationStackContext stackCtx = new(keyHash); + OperationStackContext stackCtx = new(keyHash); pendingContext.keyHash = keyHash; if (sessionFunctions.Ctx.phase == Phase.IN_PROGRESS_GROW) @@ -67,7 +69,7 @@ internal OperationStatus InternalRMW= hlog.ReadOnlyAddress) + if (stackCtx.recSrc.LogicalAddress >= hlogBase.ReadOnlyAddress) { srcRecordInfo = ref stackCtx.recSrc.GetInfo(); @@ -152,21 +154,21 @@ internal OperationStatus InternalRMW= hlog.SafeReadOnlyAddress && !stackCtx.recSrc.GetInfo().Tombstone) + if (stackCtx.recSrc.LogicalAddress >= hlogBase.SafeReadOnlyAddress && !stackCtx.recSrc.GetInfo().Tombstone) { // Fuzzy Region: Must retry after epoch refresh, due to lost-update anomaly status = OperationStatus.RETRY_LATER; goto LatchRelease; } - if (stackCtx.recSrc.LogicalAddress >= hlog.HeadAddress) + if (stackCtx.recSrc.LogicalAddress >= hlogBase.HeadAddress) { // Safe Read-Only Region: CopyUpdate to create a record in the mutable region. srcRecordInfo = ref stackCtx.recSrc.GetInfo(); goto CreateNewRecord; } - if (stackCtx.recSrc.LogicalAddress >= hlog.BeginAddress) + if (stackCtx.recSrc.LogicalAddress >= hlogBase.BeginAddress) { - if (hlog.IsNullDevice) + if (hlogBase.IsNullDevice) goto CreateNewRecord; // Disk Region: Need to issue async io requests. Locking will be checked on pending completion. @@ -221,8 +223,8 @@ internal OperationStatus InternalRMW(ref Key key, ref Input input, Output output, Context userContext, - ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions, ref OperationStackContext stackCtx) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions, ref OperationStackContext stackCtx) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { pendingContext.type = OperationType.RMW; if (pendingContext.key == default) @@ -238,22 +240,23 @@ private void CreatePendingRMWContext(ref Key key, ref Input input, ref Output output, ref PendingContext pendingContext, - TSessionFunctionsWrapper sessionFunctions, ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo, ref RMWInfo rmwInfo, out OperationStatus status, ref Value recordValue) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + TSessionFunctionsWrapper sessionFunctions, ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo, ref RMWInfo rmwInfo, + out OperationStatus status, ref Value recordValue) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { if (IsFrozen(sessionFunctions, ref stackCtx, ref srcRecordInfo)) goto NeedNewRecord; // This record is safe to revivify even if its PreviousAddress points to a valid record, because it is revivified for the same key. - bool ok = true; + var ok = true; try { if (srcRecordInfo.Tombstone) { - srcRecordInfo.Tombstone = false; + srcRecordInfo.ClearTombstone(); if (RevivificationManager.IsFixedLength) - rmwInfo.UsedValueLength = rmwInfo.FullValueLength = RevivificationManager.FixedValueLength; + rmwInfo.UsedValueLength = rmwInfo.FullValueLength = RevivificationManager.FixedValueLength; else { var recordLengths = GetRecordLengths(stackCtx.recSrc.PhysicalAddress, ref recordValue, ref srcRecordInfo); @@ -276,7 +279,7 @@ private bool TryRevivifyInChain stackCtx, ref OperationStatus status, ref LatchOperation latchOperation) + private LatchDestination CheckCPRConsistencyRMW(Phase phase, ref OperationStackContext stackCtx, ref OperationStatus status, ref LatchOperation latchOperation) { // The idea of CPR is that if a thread in version V tries to perform an operation and notices a record in V+1, it needs to back off and run CPR_SHIFT_DETECTED. // Similarly, a V+1 thread cannot update a V record; it needs to do a read-copy-update (or upsert at tail) instead of an in-place update. @@ -319,7 +322,7 @@ private LatchDestination CheckCPRConsistencyRMW(Phase phase, ref OperationStackC if (IsRecordVersionNew(stackCtx.recSrc.LogicalAddress)) break; // Normal Processing; V+1 thread encountered a record in V+1 - if (stackCtx.recSrc.LogicalAddress >= hlog.HeadAddress) + if (stackCtx.recSrc.LogicalAddress >= hlogBase.HeadAddress) return LatchDestination.CreateNewRecord; // Record is in memory so force creation of a (V+1) record break; // Normal Processing; the record is below HeadAddress so the operation will go pending @@ -342,18 +345,18 @@ private LatchDestination CheckCPRConsistencyRMW(Phase phase, ref OperationStackC /// The result of ISessionFunctions.SingleWriter /// Information about the operation context /// The current session - /// Contains the and structures for this operation, + /// Contains the and structures for this operation, /// and allows passing back the newLogicalAddress for invalidation in the case of exceptions. If called from pending IO, /// this is populated from the data read from disk. - /// If ., - /// this is the for . Otherwise, if called from pending IO, + /// If ., + /// this is the for . Otherwise, if called from pending IO, /// this is the read from disk. If neither of these, it is a default . /// Whether we are doing a CopyUpdate, either from in-memory or pending IO /// private OperationStatus CreateNewRecordRMW(ref Key key, ref Input input, ref Value value, ref Output output, ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions, - ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo, bool doingCU) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo, bool doingCU) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { bool forExpiration = false; @@ -397,7 +400,7 @@ private OperationStatus CreateNewRecordRMW(sessionFunctions, ref stackCtx, ref srcRecordInfo, oldRecordLengths); + _ = TryTransferToFreeList(sessionFunctions, ref stackCtx, ref srcRecordInfo, oldRecordLengths); } } else @@ -553,10 +556,7 @@ private OperationStatus CreateNewRecordRMW(ref Key key, ref Input input, ref Value value, ref Output output, ref RecordInfo recordInfo, ref RMWInfo rmwInfo, long logicalAddress, TSessionFunctionsWrapper sessionFunctions, bool isIpu, out OperationStatus status) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { // This is called for InPlaceUpdater or CopyUpdater only; CopyUpdater however does not copy an expired record, so we return CreatedRecord. var advancedStatusCode = isIpu ? StatusCode.InPlaceUpdatedRecord : StatusCode.CreatedRecord; @@ -579,7 +579,7 @@ internal bool ReinitializeExpiredRecord : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Read operation. Computes the 'output' from 'input' and current value corresponding to 'key'. @@ -50,9 +52,9 @@ public unsafe partial class TsavoriteKV : TsavoriteBase [MethodImpl(MethodImplOptions.AggressiveInlining)] internal OperationStatus InternalRead(ref Key key, long keyHash, ref Input input, ref Output output, Context userContext, ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { - OperationStackContext stackCtx = new(keyHash); + OperationStackContext stackCtx = new(keyHash); pendingContext.keyHash = keyHash; if (sessionFunctions.Ctx.phase == Phase.IN_PROGRESS_GROW) @@ -60,7 +62,7 @@ internal OperationStatus InternalRead(sessionFunctions, ref key, ref stackCtx, out OperationStatus status)) return status; - stackCtx.SetRecordSourceToHashEntry(hlog); + stackCtx.SetRecordSourceToHashEntry(hlogBase); // We have to assign a reference on declaration, so assign it here before we know whether LogicalAddress is above or below HeadAddress. // It must be at this scope so it can be unlocked in 'finally'. @@ -70,6 +72,7 @@ internal OperationStatus InternalRead= hlog.SafeReadOnlyAddress) + if (stackCtx.recSrc.LogicalAddress >= hlogBase.SafeReadOnlyAddress) { // Mutable region (even fuzzy region is included here) srcRecordInfo = ref stackCtx.recSrc.GetInfo(); @@ -126,7 +130,7 @@ internal OperationStatus InternalRead= hlog.HeadAddress) + if (stackCtx.recSrc.LogicalAddress >= hlogBase.HeadAddress) { // Immutable region srcRecordInfo = ref stackCtx.recSrc.GetInfo(); @@ -145,13 +149,13 @@ internal OperationStatus InternalRead= hlog.BeginAddress) + if (stackCtx.recSrc.LogicalAddress >= hlogBase.BeginAddress) { // On-Disk Region Debug.Assert(!sessionFunctions.IsManualLocking || LockTable.IsLocked(ref stackCtx.hei), "A Lockable-session Read() of an on-disk key requires a LockTable lock"); // Note: we do not lock here; we wait until reading from disk, then lock in the ContinuePendingRead chain. - if (hlog.IsNullDevice) + if (hlogBase.IsNullDevice) return OperationStatus.NOTFOUND; CreatePendingReadContext(ref key, ref input, output, userContext, ref pendingContext, sessionFunctions, stackCtx.recSrc.LogicalAddress); return OperationStatus.RECORD_ON_DISK; @@ -170,8 +174,9 @@ internal OperationStatus InternalRead(ref Key key, ref Input input, ref Output output, Context userContext, - ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions, ref OperationStackContext stackCtx, ref OperationStatus status, Value recordValue) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions, + ref OperationStackContext stackCtx, ref OperationStatus status, Value recordValue) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { if (pendingContext.readCopyOptions.CopyTo == ReadCopyTo.MainLog) { @@ -243,15 +248,15 @@ private static OperationStatus CheckFalseActionStatus(ReadInfo readInfo) [MethodImpl(MethodImplOptions.AggressiveInlining)] internal OperationStatus InternalReadAtAddress(long readAtAddress, ref Key key, ref Input input, ref Output output, ref ReadOptions readOptions, Context userContext, ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { - if (readAtAddress < hlog.BeginAddress) + if (readAtAddress < hlogBase.BeginAddress) return OperationStatus.NOTFOUND; pendingContext.IsReadAtAddress = true; // We do things in a different order here than in InternalRead, in part to handle NoKey (especially with Revivification). - if (readAtAddress < hlog.HeadAddress) + if (readAtAddress < hlogBase.HeadAddress) { // Do not trace back in the pending callback if it is a key mismatch. pendingContext.NoKey = true; @@ -267,12 +272,12 @@ internal OperationStatus InternalReadAtAddress stackCtx = new(pendingContext.keyHash); + OperationStackContext stackCtx = new(pendingContext.keyHash); if (sessionFunctions.Ctx.phase == Phase.IN_PROGRESS_GROW) SplitBuckets(stackCtx.hei.hash); if (!FindTagAndTryTransientSLock(sessionFunctions, ref key, ref stackCtx, out OperationStatus status)) return status; - stackCtx.SetRecordSourceToHashEntry(hlog); + stackCtx.SetRecordSourceToHashEntry(hlogBase); stackCtx.recSrc.LogicalAddress = readAtAddress; - stackCtx.recSrc.SetPhysicalAddress(); + _ = stackCtx.recSrc.SetPhysicalAddress(); // Note: We read directly from the address either in memory or pending, so do not do any ReadCache operations. @@ -319,7 +324,7 @@ internal OperationStatus InternalReadAtAddress= hlog.SafeReadOnlyAddress) + if (stackCtx.recSrc.LogicalAddress >= hlogBase.SafeReadOnlyAddress) { // Mutable region (even fuzzy region is included here). sessionFunctions.ConcurrentReader(ref stackCtx.recSrc.GetKey(), ref input, ref stackCtx.recSrc.GetValue(), ref output, ref readInfo, ref srcRecordInfo); @@ -341,7 +346,7 @@ internal OperationStatus InternalReadAtAddress(ref Key key, ref Input input, Output output, Context userContext, ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions, long logicalAddress) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { pendingContext.type = OperationType.READ; if (!pendingContext.NoKey && pendingContext.key == default) // If this is true, we don't have a valid key diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/InternalUpsert.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/InternalUpsert.cs index 575993a1f8..483e0e7539 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/InternalUpsert.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/InternalUpsert.cs @@ -6,7 +6,9 @@ namespace Tsavorite.core { - public unsafe partial class TsavoriteKV : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Upsert operation. Replaces the value corresponding to 'key' with provided 'value', if one exists @@ -43,11 +45,11 @@ public unsafe partial class TsavoriteKV : TsavoriteBase [MethodImpl(MethodImplOptions.AggressiveInlining)] internal OperationStatus InternalUpsert(ref Key key, long keyHash, ref Input input, ref Value value, ref Output output, ref Context userContext, ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { var latchOperation = LatchOperation.None; - OperationStackContext stackCtx = new(keyHash); + OperationStackContext stackCtx = new(keyHash); pendingContext.keyHash = keyHash; if (sessionFunctions.Ctx.phase == Phase.IN_PROGRESS_GROW) @@ -63,7 +65,7 @@ internal OperationStatus InternalUpsert= ReadOnlyAddress). - if (!TryFindRecordForUpdate(ref key, ref stackCtx, hlog.ReadOnlyAddress, out status)) + if (!TryFindRecordForUpdate(ref key, ref stackCtx, hlogBase.ReadOnlyAddress, out status)) return status; // Note: Upsert does not track pendingContext.InitialAddress because we don't have an InternalContinuePendingUpsert @@ -91,7 +93,7 @@ internal OperationStatus InternalUpsert= hlog.ReadOnlyAddress) + if (stackCtx.recSrc.LogicalAddress >= hlogBase.ReadOnlyAddress) { srcRecordInfo = ref stackCtx.recSrc.GetInfo(); @@ -178,8 +180,8 @@ internal OperationStatus InternalUpsert(ref Key key, ref Input input, ref Value value, Output output, Context userContext, - ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions, ref OperationStackContext stackCtx) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions, ref OperationStackContext stackCtx) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { pendingContext.type = OperationType.UPSERT; if (pendingContext.key == default) @@ -197,22 +199,23 @@ private void CreatePendingUpsertContext(ref Key key, ref Input input, ref Value value, ref Output output, ref PendingContext pendingContext, - TSessionFunctionsWrapper sessionFunctions, ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo, ref UpsertInfo upsertInfo, out OperationStatus status, ref Value recordValue) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + TSessionFunctionsWrapper sessionFunctions, ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo, ref UpsertInfo upsertInfo, + out OperationStatus status, ref Value recordValue) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { if (IsFrozen(sessionFunctions, ref stackCtx, ref srcRecordInfo)) goto NeedNewRecord; // This record is safe to revivify even if its PreviousAddress points to a valid record, because it is revivified for the same key. - bool ok = true; + var ok = true; try { if (srcRecordInfo.Tombstone) { - srcRecordInfo.Tombstone = false; + srcRecordInfo.ClearTombstone(); if (RevivificationManager.IsFixedLength) - upsertInfo.UsedValueLength = upsertInfo.FullValueLength = RevivificationManager.FixedValueLength; + upsertInfo.UsedValueLength = upsertInfo.FullValueLength = RevivificationManager.FixedValueLength; else { var recordLengths = GetRecordLengths(stackCtx.recSrc.PhysicalAddress, ref recordValue, ref srcRecordInfo); @@ -235,7 +238,7 @@ private bool TryRevivifyInChain stackCtx, ref OperationStatus status, ref LatchOperation latchOperation) + private LatchDestination CheckCPRConsistencyUpsert(Phase phase, ref OperationStackContext stackCtx, ref OperationStatus status, ref LatchOperation latchOperation) { // See explanatory comments in CheckCPRConsistencyRMW. @@ -287,14 +290,14 @@ private LatchDestination CheckCPRConsistencyUpsert(Phase phase, ref OperationSta /// The result of ISessionFunctions.SingleWriter /// Information about the operation context /// The current session - /// Contains the and structures for this operation, + /// Contains the and structures for this operation, /// and allows passing back the newLogicalAddress for invalidation in the case of exceptions. - /// If ., - /// this is the for + /// If ., + /// this is the for private OperationStatus CreateNewRecordUpsert(ref Key key, ref Input input, ref Value value, ref Output output, ref PendingContext pendingContext, TSessionFunctionsWrapper sessionFunctions, - ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { var (actualSize, allocatedSize, keySize) = hlog.GetRecordSize(ref key, ref value); // Input is not included in record-length calculations for Upsert AllocateOptions allocOptions = new() @@ -309,7 +312,7 @@ private OperationStatus CreateNewRecordUpsert : ILockTable + internal struct OverflowBucketLockTable : ILockTable + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - private readonly TsavoriteKV store; + private readonly TsavoriteKV store; internal readonly long NumBuckets => store.state[store.resizeInfo.version].size_mask + 1; public readonly bool IsEnabled => store is not null; - internal OverflowBucketLockTable(TsavoriteKV tkv) => store = tkv; + internal OverflowBucketLockTable(TsavoriteKV store) => this.store = store; internal readonly long GetSize() => store.state[store.resizeInfo.version].size_mask; diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Locking/TransientLocking.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Locking/TransientLocking.cs index 20224f0d57..39818bb4d0 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Locking/TransientLocking.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Locking/TransientLocking.cs @@ -6,12 +6,15 @@ namespace Tsavorite.core { - public unsafe partial class TsavoriteKV : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { [MethodImpl(MethodImplOptions.AggressiveInlining)] - private bool TryTransientXLock(TSessionFunctionsWrapper sessionFunctions, ref Key key, ref OperationStackContext stackCtx, + private bool TryTransientXLock(TSessionFunctionsWrapper sessionFunctions, ref Key key, + ref OperationStackContext stackCtx, out OperationStatus status) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { if (sessionFunctions.TryLockTransientExclusive(ref key, ref stackCtx)) { @@ -23,17 +26,19 @@ private bool TryTransientXLock } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static void TransientXUnlock(TSessionFunctionsWrapper sessionFunctions, ref Key key, ref OperationStackContext stackCtx) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + private static void TransientXUnlock(TSessionFunctionsWrapper sessionFunctions, ref Key key, + ref OperationStackContext stackCtx) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { if (stackCtx.recSrc.HasTransientXLock) sessionFunctions.UnlockTransientExclusive(ref key, ref stackCtx); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal bool TryTransientSLock(TSessionFunctionsWrapper sessionFunctions, ref Key key, ref OperationStackContext stackCtx, + internal bool TryTransientSLock(TSessionFunctionsWrapper sessionFunctions, ref Key key, + ref OperationStackContext stackCtx, out OperationStatus status) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { if (sessionFunctions.TryLockTransientShared(ref key, ref stackCtx)) { @@ -45,22 +50,23 @@ internal bool TryTransientSLock(TSessionFunctionsWrapper sessionFunctions, ref Key key, ref OperationStackContext stackCtx) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + internal static void TransientSUnlock(TSessionFunctionsWrapper sessionFunctions, ref Key key, + ref OperationStackContext stackCtx) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { if (stackCtx.recSrc.HasTransientSLock) sessionFunctions.UnlockTransientShared(ref key, ref stackCtx); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal void LockForScan(ref OperationStackContext stackCtx, ref Key key, ref RecordInfo recordInfo) + internal void LockForScan(ref OperationStackContext stackCtx, ref Key key) { Debug.Assert(!stackCtx.recSrc.HasLock, $"Should not call LockForScan if recSrc already has a lock ({stackCtx.recSrc.LockStateString()})"); // This will always be a transient lock as it is not session-based - stackCtx = new(comparer.GetHashCode64(ref key)); - FindTag(ref stackCtx.hei); - stackCtx.SetRecordSourceToHashEntry(hlog); + stackCtx = new(storeFunctions.GetKeyHashCode64(ref key)); + _ = FindTag(ref stackCtx.hei); + stackCtx.SetRecordSourceToHashEntry(hlogBase); while (!LockTable.TryLockShared(ref stackCtx.hei)) epoch.ProtectAndDrain(); @@ -68,7 +74,7 @@ internal void LockForScan(ref OperationStackContext stackCtx, ref Ke } [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal void UnlockForScan(ref OperationStackContext stackCtx, ref Key key, ref RecordInfo recordInfo) + internal void UnlockForScan(ref OperationStackContext stackCtx) { if (stackCtx.recSrc.HasTransientSLock) { diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ModifiedBitOperation.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ModifiedBitOperation.cs index 3688d5f6c6..6bd34824e4 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ModifiedBitOperation.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ModifiedBitOperation.cs @@ -6,7 +6,9 @@ namespace Tsavorite.core { - public unsafe partial class TsavoriteKV : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// if reset is true it simply resets the modified bit for the key @@ -20,26 +22,26 @@ internal OperationStatus InternalModifiedBitOperation(ref Key key, out RecordInf { Debug.Assert(epoch.ThisInstanceProtected()); - HashEntryInfo hei = new(comparer.GetHashCode64(ref key)); ; + HashEntryInfo hei = new(storeFunctions.GetKeyHashCode64(ref key)); ; #region Trace back for record in in-memory HybridLog - FindTag(ref hei); + _ = FindTag(ref hei); var logicalAddress = hei.Address; var physicalAddress = hlog.GetPhysicalAddress(logicalAddress); - if (logicalAddress >= hlog.HeadAddress) + if (logicalAddress >= hlogBase.HeadAddress) { ref RecordInfo recordInfo = ref hlog.GetInfo(physicalAddress); - if (recordInfo.Invalid || !comparer.Equals(ref key, ref hlog.GetKey(physicalAddress))) + if (recordInfo.Invalid || !storeFunctions.KeysEqual(ref key, ref hlog.GetKey(physicalAddress))) { logicalAddress = recordInfo.PreviousAddress; - TraceBackForKeyMatch(ref key, logicalAddress, hlog.HeadAddress, out logicalAddress, out physicalAddress); + TraceBackForKeyMatch(ref key, logicalAddress, hlogBase.HeadAddress, out logicalAddress, out physicalAddress); } } #endregion modifiedInfo = default; - if (logicalAddress >= hlog.HeadAddress) + if (logicalAddress >= hlogBase.HeadAddress) { ref RecordInfo recordInfo = ref hlog.GetInfo(physicalAddress); if (reset) @@ -53,7 +55,7 @@ internal OperationStatus InternalModifiedBitOperation(ref Key key, out RecordInf } // If the record does not exist we return unmodified; if it is on the disk we return modified - modifiedInfo.Modified = logicalAddress >= hlog.BeginAddress; + modifiedInfo.Modified = logicalAddress >= hlogBase.BeginAddress; // It is not in memory so we return success return OperationStatus.SUCCESS; diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/OperationStackContext.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/OperationStackContext.cs index 4238c41360..a62521252d 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/OperationStackContext.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/OperationStackContext.cs @@ -6,12 +6,15 @@ namespace Tsavorite.core { - public struct OperationStackContext + public struct OperationStackContext + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { // Note: Cannot use ref fields because they are not supported before net7.0. internal HashEntryInfo hei; - internal RecordSource recSrc; + internal RecordSource recSrc; + [MethodImpl(MethodImplOptions.AggressiveInlining)] internal OperationStackContext(long keyHash) => hei = new(keyHash); /// @@ -20,14 +23,14 @@ public struct OperationStackContext /// /// The TsavoriteKV's hlog [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal void SetRecordSourceToHashEntry(AllocatorBase srcLog) => recSrc.Set(hei.Address, srcLog); + internal void SetRecordSourceToHashEntry(AllocatorBase srcLog) => recSrc.Set(hei.Address, srcLog); /// /// Sets to the current ., which is the current address /// in the hash table. This is the same effect as calling . /// [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal void UpdateRecordSourceToCurrentHashEntry(AllocatorBase hlog) + internal void UpdateRecordSourceToCurrentHashEntry(AllocatorBase hlog) { hei.SetToCurrent(); SetRecordSourceToHashEntry(hlog); @@ -66,7 +69,7 @@ internal void SetNewRecordInvalid(ref RecordInfo newRecordInfo) /// Called during InternalXxx 'finally' handler, to set the new record invalid if an exception or other error occurred. /// [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal void HandleNewRecordOnException(TsavoriteKV store) + internal void HandleNewRecordOnException(TsavoriteKV store) { if (newLogicalAddress != Constants.kInvalidAddress) { diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ReadCache.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ReadCache.cs index 4ea424ca1e..b72ec36056 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ReadCache.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/ReadCache.cs @@ -8,15 +8,17 @@ namespace Tsavorite.core { // Partial file for readcache functions - public unsafe partial class TsavoriteKV : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal bool FindInReadCache(ref Key key, ref OperationStackContext stackCtx, long minAddress = Constants.kInvalidAddress, bool alwaysFindLatestLA = true) + internal bool FindInReadCache(ref Key key, ref OperationStackContext stackCtx, long minAddress = Constants.kInvalidAddress, bool alwaysFindLatestLA = true) { Debug.Assert(UseReadCache, "Should not call FindInReadCache if !UseReadCache"); // minAddress, if present, comes from the pre-pendingIO entry.Address; there may have been no readcache entries then. - minAddress = IsReadCache(minAddress) ? AbsoluteAddress(minAddress) : readcache.HeadAddress; + minAddress = IsReadCache(minAddress) ? AbsoluteAddress(minAddress) : readCacheBase.HeadAddress; RestartChain: @@ -46,13 +48,13 @@ internal bool FindInReadCache(ref Key key, ref OperationStackContext // When traversing the readcache, we skip Invalid (Closed) records. We don't have Sealed records in the readcache because they cause // the operation to be retried, so we'd never get past them. Return true if we find a Valid read cache entry matching the key. if (!recordInfo.Invalid && stackCtx.recSrc.LatestLogicalAddress >= minAddress && !stackCtx.recSrc.HasReadCacheSrc - && comparer.Equals(ref key, ref readcache.GetKey(stackCtx.recSrc.LowestReadCachePhysicalAddress))) + && storeFunctions.KeysEqual(ref key, ref readcache.GetKey(stackCtx.recSrc.LowestReadCachePhysicalAddress))) { // Keep these at the current readcache location; they'll be the caller's source record. stackCtx.recSrc.LogicalAddress = stackCtx.recSrc.LowestReadCacheLogicalAddress; stackCtx.recSrc.PhysicalAddress = stackCtx.recSrc.LowestReadCachePhysicalAddress; stackCtx.recSrc.SetHasReadCacheSrc(); - stackCtx.recSrc.Log = readcache; + stackCtx.recSrc.SetAllocator(readCacheBase); // Read() does not need to continue past the found record; updaters need to continue to find latestLogicalAddress and lowestReadCache*Address. if (!alwaysFindLatestLA) @@ -67,13 +69,10 @@ internal bool FindInReadCache(ref Key key, ref OperationStackContext InMainLog: if (stackCtx.recSrc.HasReadCacheSrc) - { - Debug.Assert(ReferenceEquals(stackCtx.recSrc.Log, readcache), "Expected Log == readcache"); return true; - } // We did not find the record in the readcache, so set these to the start of the main log entries, and the caller will call TracebackForKeyMatch - Debug.Assert(ReferenceEquals(stackCtx.recSrc.Log, hlog), "Expected Log == hlog"); + Debug.Assert(ReferenceEquals(stackCtx.recSrc.AllocatorBase, hlogBase), "Expected recSrc.AllocatorBase == hlogBase"); Debug.Assert(stackCtx.recSrc.LatestLogicalAddress > Constants.kTempInvalidAddress, "Must have a main-log address after readcache"); stackCtx.recSrc.LogicalAddress = stackCtx.recSrc.LatestLogicalAddress; stackCtx.recSrc.PhysicalAddress = 0; // do *not* call hlog.GetPhysicalAddress(); LogicalAddress may be below hlog.HeadAddress. Let the caller decide when to do this. @@ -81,25 +80,25 @@ internal bool FindInReadCache(ref Key key, ref OperationStackContext } [MethodImpl(MethodImplOptions.AggressiveInlining)] - bool ReadCacheNeedToWaitForEviction(ref OperationStackContext stackCtx) + bool ReadCacheNeedToWaitForEviction(ref OperationStackContext stackCtx) { - if (stackCtx.recSrc.LatestLogicalAddress < readcache.HeadAddress) + if (stackCtx.recSrc.LatestLogicalAddress < readCacheBase.HeadAddress) { - SpinWaitUntilRecordIsClosed(stackCtx.recSrc.LatestLogicalAddress, readcache); + SpinWaitUntilRecordIsClosed(stackCtx.recSrc.LatestLogicalAddress, readCacheBase); // Restore to hlog; we may have set readcache into Log and continued the loop, had to restart, and the matching readcache record was evicted. - stackCtx.UpdateRecordSourceToCurrentHashEntry(hlog); + stackCtx.UpdateRecordSourceToCurrentHashEntry(hlogBase); return true; } return false; } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private bool SpliceIntoHashChainAtReadCacheBoundary(ref Key key, ref OperationStackContext stackCtx, long newLogicalAddress) + private bool SpliceIntoHashChainAtReadCacheBoundary(ref Key key, ref OperationStackContext stackCtx, long newLogicalAddress) { // Splice into the gap of the last readcache/first main log entries. - Debug.Assert(stackCtx.recSrc.LowestReadCacheLogicalAddress >= readcache.ClosedUntilAddress, - $"{nameof(VerifyInMemoryAddresses)} should have ensured LowestReadCacheLogicalAddress ({stackCtx.recSrc.LowestReadCacheLogicalAddress}) >= readcache.ClosedUntilAddress ({readcache.ClosedUntilAddress})"); + Debug.Assert(stackCtx.recSrc.LowestReadCacheLogicalAddress >= readCacheBase.ClosedUntilAddress, + $"{nameof(VerifyInMemoryAddresses)} should have ensured LowestReadCacheLogicalAddress ({stackCtx.recSrc.LowestReadCacheLogicalAddress}) >= readcache.ClosedUntilAddress ({readCacheBase.ClosedUntilAddress})"); // If the LockTable is enabled, then we either have an exclusive lock and thus cannot have a competing insert to the readcache, or we are doing a // Read() so we allow a momentary overlap of records because they're the same value (no update is being done). @@ -109,7 +108,7 @@ private bool SpliceIntoHashChainAtReadCacheBoundary(ref Key key, ref OperationSt // Skip over all readcache records in this key's chain, advancing stackCtx.recSrc to the first non-readcache record we encounter. [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal void SkipReadCache(ref OperationStackContext stackCtx, out bool didRefresh) + internal void SkipReadCache(ref OperationStackContext stackCtx, out bool didRefresh) { Debug.Assert(UseReadCache, "Should not call SkipReadCache if !UseReadCache"); didRefresh = false; @@ -176,7 +175,7 @@ private void SkipReadCacheBucket(HashBucket* bucket) // Called after a readcache insert, to make sure there was no race with another session that added a main-log record at the same time. [MethodImpl(MethodImplOptions.AggressiveInlining)] - private bool EnsureNoNewMainLogRecordWasSpliced(ref Key key, RecordSource recSrc, long highestSearchedAddress, ref OperationStatus failStatus) + private bool EnsureNoNewMainLogRecordWasSpliced(ref Key key, RecordSource recSrc, long highestSearchedAddress, ref OperationStatus failStatus) { bool success = true; ref RecordInfo lowest_rcri = ref readcache.GetInfo(recSrc.LowestReadCachePhysicalAddress); @@ -184,10 +183,10 @@ private bool EnsureNoNewMainLogRecordWasSpliced(ref Key key, RecordSource highestSearchedAddress) { // Someone added a new record in the splice region. It won't be readcache; that would've been added at tail. See if it's our key. - var minAddress = highestSearchedAddress > hlog.HeadAddress ? highestSearchedAddress : hlog.HeadAddress; + var minAddress = highestSearchedAddress > hlogBase.HeadAddress ? highestSearchedAddress : hlogBase.HeadAddress; if (TraceBackForKeyMatch(ref key, lowest_rcri.PreviousAddress, minAddress + 1, out long prevAddress, out _)) success = false; - else if (prevAddress > highestSearchedAddress && prevAddress < hlog.HeadAddress) + else if (prevAddress > highestSearchedAddress && prevAddress < hlogBase.HeadAddress) { // One or more records were inserted and escaped to disk during the time of this Read/PENDING operation, untilLogicalAddress // is below hlog.HeadAddress, and there are one or more inserted records between them: @@ -220,7 +219,7 @@ private void ReadCacheCheckTailAfterSplice(ref Key key, ref HashEntryInfo hei, l { var physicalAddress = readcache.GetPhysicalAddress(entry.AbsoluteAddress); ref RecordInfo recordInfo = ref readcache.GetInfo(physicalAddress); - if (!recordInfo.Invalid && comparer.Equals(ref key, ref readcache.GetKey(physicalAddress))) + if (!recordInfo.Invalid && storeFunctions.KeysEqual(ref key, ref readcache.GetKey(physicalAddress))) { recordInfo.SetInvalidAtomic(); return; @@ -269,16 +268,16 @@ internal void ReadCacheEvict(long rcLogicalAddress, long rcToLogicalAddress) // Find the hash index entry for the key in the store's hash table. ref Key key = ref readcache.GetKey(rcPhysicalAddress); - HashEntryInfo hei = new(comparer.GetHashCode64(ref key)); + HashEntryInfo hei = new(storeFunctions.GetKeyHashCode64(ref key)); if (!FindTag(ref hei)) goto NextRecord; ReadCacheEvictChain(rcToLogicalAddress, ref hei); NextRecord: - if ((rcLogicalAddress & readcache.PageSizeMask) + rcAllocatedSize > readcache.PageSize) + if ((rcLogicalAddress & readCacheBase.PageSizeMask) + rcAllocatedSize > readCacheBase.PageSize) { - rcLogicalAddress = (1 + (rcLogicalAddress >> readcache.LogPageSizeBits)) << readcache.LogPageSizeBits; + rcLogicalAddress = (1 + (rcLogicalAddress >> readCacheBase.LogPageSizeBits)) << readCacheBase.LogPageSizeBits; continue; } rcLogicalAddress += rcAllocatedSize; @@ -302,7 +301,7 @@ private void ReadCacheEvictChain(long rcToLogicalAddress, ref HashEntryInfo hei) // Due to collisions, we can compare the hash code *mask* (i.e. the hash bucket index), not the key var mask = state[resizeInfo.version].size_mask; var rc_mask = hei.hash & mask; - var pa_mask = comparer.GetHashCode64(ref readcache.GetKey(pa)) & mask; + var pa_mask = storeFunctions.GetKeyHashCode64(ref readcache.GetKey(pa)) & mask; Debug.Assert(rc_mask == pa_mask, "The keyHash mask of the hash-chain ReadCache entry does not match the one obtained from the initial readcache address"); #endif diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/RecordSource.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/RecordSource.cs index 66dd8731fb..f26e1c6783 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/RecordSource.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/RecordSource.cs @@ -11,7 +11,9 @@ namespace Tsavorite.core /// operations, where "source" is a copy source for RMW and/or a locked record. This is passed to functions that create records, such as /// TsavoriteKV.CreateNewRecord*() or TsavoriteKV.InternalTryCopyToTail(), and to unlocking utilities. /// - internal struct RecordSource + internal struct RecordSource + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// If valid, this is the logical address of a record. As "source", it may be copied from for RMW or pending Reads, @@ -47,7 +49,12 @@ internal struct RecordSource /// /// If , this is the allocator (hlog or readcache) that is in. /// - internal AllocatorBase Log; + internal TAllocator Allocator { get; private set; } + + /// + /// If , this is the allocator base (hlog or readcache) that is in. + /// + internal AllocatorBase AllocatorBase { get; private set; } struct InternalStates { @@ -129,13 +136,13 @@ void append(int value, string name) internal void ClearHasReadCacheSrc() => internalState &= ~InternalStates.ReadCacheSrc; [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal long SetPhysicalAddress() => PhysicalAddress = Log.GetPhysicalAddress(LogicalAddress); + internal long SetPhysicalAddress() => PhysicalAddress = Allocator.GetPhysicalAddress(LogicalAddress); [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal readonly ref RecordInfo GetInfo() => ref Log.GetInfo(PhysicalAddress); + internal readonly ref RecordInfo GetInfo() => ref Allocator.GetInfo(PhysicalAddress); [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal readonly ref Key GetKey() => ref Log.GetKey(PhysicalAddress); + internal readonly ref Key GetKey() => ref Allocator.GetKey(PhysicalAddress); [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal readonly ref Value GetValue() => ref Log.GetValue(PhysicalAddress); + internal readonly ref Value GetValue() => ref Allocator.GetValue(PhysicalAddress); internal readonly bool HasInMemorySrc => (internalState & (InternalStates.MainLogSrc | InternalStates.ReadCacheSrc)) != 0; @@ -143,7 +150,7 @@ void append(int value, string name) /// Initialize to the latest logical address from the caller. /// [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal void Set(long latestLogicalAddress, AllocatorBase srcLog) + internal void Set(long latestLogicalAddress, AllocatorBase srcAllocatorBase) { PhysicalAddress = default; LowestReadCacheLogicalAddress = default; @@ -154,7 +161,14 @@ internal void Set(long latestLogicalAddress, AllocatorBase srcLog) // HasTransientLock = ...; Do not clear this; it is in the LockTable and must be preserved until unlocked LatestLogicalAddress = LogicalAddress = AbsoluteAddress(latestLogicalAddress); - Log = srcLog; + SetAllocator(srcAllocatorBase); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal void SetAllocator(AllocatorBase srcAllocatorBase) + { + this.AllocatorBase = srcAllocatorBase; + this.Allocator = AllocatorBase._wrapper; } [MethodImpl(MethodImplOptions.AggressiveInlining)] diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Revivification/CheckEmptyWorker.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Revivification/CheckEmptyWorker.cs index 51dfc13a1b..94e5646fe6 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Revivification/CheckEmptyWorker.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Revivification/CheckEmptyWorker.cs @@ -8,7 +8,9 @@ namespace Tsavorite.core { - internal sealed class CheckEmptyWorker + internal sealed class CheckEmptyWorker + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { // State control variables. internal struct State @@ -31,9 +33,9 @@ internal static string ToString(long state) CancellationTokenSource cts = new(); - readonly FreeRecordPool recordPool; + readonly FreeRecordPool recordPool; - internal CheckEmptyWorker(FreeRecordPool recordPool) => this.recordPool = recordPool; + internal CheckEmptyWorker(FreeRecordPool recordPool) => this.recordPool = recordPool; [MethodImpl(MethodImplOptions.AggressiveInlining)] internal unsafe void Start() diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Revivification/FreeRecordPool.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Revivification/FreeRecordPool.cs index e74211debc..4946aee5bc 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Revivification/FreeRecordPool.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Revivification/FreeRecordPool.cs @@ -62,7 +62,9 @@ void SetEmptyAtomic(long oldWord) } [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal bool TryPeek(long recordSize, TsavoriteKV store, bool oversize, long minAddress, out int thisRecordSize) + internal bool TryPeek(long recordSize, TsavoriteKV store, bool oversize, long minAddress, out int thisRecordSize) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { FreeRecord oldRecord = this; thisRecordSize = 0; @@ -137,7 +139,9 @@ internal bool TryTake(int recordSize, long minAddress, out long address, ref Tak } [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static int GetRecordSize(TsavoriteKV store, long logicalAddress) + private static int GetRecordSize(TsavoriteKV store, long logicalAddress) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { // Because this is oversize, we need hlog to get the length out of the record's value (it won't fit in FreeRecord.kSizeBits) long physicalAddress = store.hlog.GetPhysicalAddress(logicalAddress); @@ -145,7 +149,9 @@ private static int GetRecordSize(TsavoriteKV store, long } [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal unsafe bool TryTakeOversize(long recordSize, long minAddress, TsavoriteKV store, out long address, ref TakeResult takeResult) + internal unsafe bool TryTakeOversize(long recordSize, long minAddress, TsavoriteKV store, out long address, ref TakeResult takeResult) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { address = 0; @@ -278,7 +284,9 @@ internal int GetSegmentStart(int recordSize) private FreeRecord* GetRecord(int recordIndex) => records + (recordIndex >= recordCount ? recordIndex - recordCount : recordIndex); [MethodImpl(MethodImplOptions.AggressiveInlining)] - public bool TryAdd(long address, int recordSize, TsavoriteKV store, long minAddress, ref RevivificationStats revivStats) + public bool TryAdd(long address, int recordSize, TsavoriteKV store, long minAddress, ref RevivificationStats revivStats) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { var segmentStart = GetSegmentStart(recordSize); @@ -297,11 +305,15 @@ public bool TryAdd(long address, int recordSize, TsavoriteKV(int recordSize, long minAddress, TsavoriteKV store, out long address, ref RevivificationStats revivStats) + public bool TryTake(int recordSize, long minAddress, TsavoriteKV store, out long address, ref RevivificationStats revivStats) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator => TryTake(recordSize, minAddress, store, oversize: false, out address, ref revivStats); [MethodImpl(MethodImplOptions.AggressiveInlining)] - public bool TryTake(int recordSize, long minAddress, TsavoriteKV store, bool oversize, out long address, ref RevivificationStats revivStats) + public bool TryTake(int recordSize, long minAddress, TsavoriteKV store, bool oversize, out long address, ref RevivificationStats revivStats) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { if (isEmpty) { @@ -314,7 +326,9 @@ public bool TryTake(int recordSize, long minAddress, TsavoriteKV(int recordSize, long minAddress, TsavoriteKV store, bool oversize, out long address, ref RevivificationStats revivStats) + public bool TryTakeFirstFit(int recordSize, long minAddress, TsavoriteKV store, bool oversize, out long address, ref RevivificationStats revivStats) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { var segmentStart = GetSegmentStart(recordSize); @@ -341,7 +355,9 @@ public bool TryTakeFirstFit(int recordSize, long minAddress, Tsavori } [MethodImpl(MethodImplOptions.AggressiveInlining)] - public bool TryTakeBestFit(int recordSize, long minAddress, TsavoriteKV store, bool oversize, out long address, ref RevivificationStats revivStats) + public bool TryTakeBestFit(int recordSize, long minAddress, TsavoriteKV store, bool oversize, out long address, ref RevivificationStats revivStats) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { // Retry as long as we find a candidate, but reduce the best fit scan limit each retry. int localBestFitScanLimit = bestFitScanLimit; @@ -398,7 +414,9 @@ record = GetRecord(segmentStart + bestFitIndex); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal void ScanForEmpty(FreeRecordPool recordPool, CancellationToken cancellationToken) + internal void ScanForEmpty(FreeRecordPool recordPool, CancellationToken cancellationToken) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { // Add() always sets isEmpty to false and we do not clear isEmpty on Take() because that could lead to more lost "isEmpty = false". // So this routine is called only if the bin is marked not-empty. @@ -419,9 +437,11 @@ internal void ScanForEmpty(FreeRecordPool recordPool } } - internal unsafe class FreeRecordPool : IDisposable + internal unsafe class FreeRecordPool : IDisposable + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - internal readonly TsavoriteKV store; + internal readonly TsavoriteKV store; internal readonly FreeRecordBin[] bins; internal int numberOfBinsToSearch; @@ -431,13 +451,13 @@ internal unsafe class FreeRecordPool : IDisposable private readonly int* sizeIndex; private readonly int numBins; - internal readonly CheckEmptyWorker checkEmptyWorker; + internal readonly CheckEmptyWorker checkEmptyWorker; /// public override string ToString() => $"isFixedLen {IsFixedLength}, numBins {numBins}, searchNextBin {numberOfBinsToSearch}, checkEmptyWorker: {checkEmptyWorker}"; - internal FreeRecordPool(TsavoriteKV store, RevivificationSettings settings, int fixedRecordLength) + internal FreeRecordPool(TsavoriteKV store, RevivificationSettings settings, int fixedRecordLength) { this.store = store; IsFixedLength = fixedRecordLength > 0; diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Revivification/RecordLengths.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Revivification/RecordLengths.cs index 4690f9c9f3..8dcf74d0bd 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Revivification/RecordLengths.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Revivification/RecordLengths.cs @@ -8,11 +8,13 @@ namespace Tsavorite.core { using static Utility; - public unsafe partial class TsavoriteKV : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { [MethodImpl(MethodImplOptions.AggressiveInlining)] internal long GetMinRevivifiableAddress() - => RevivificationManager.GetMinRevivifiableAddress(hlog.GetTailAddress(), hlog.ReadOnlyAddress); + => RevivificationManager.GetMinRevivifiableAddress(hlogBase.GetTailAddress(), hlogBase.ReadOnlyAddress); [MethodImpl(MethodImplOptions.AggressiveInlining)] private static int GetValueOffset(long physicalAddress, ref Value recordValue) => (int)((long)Unsafe.AsPointer(ref recordValue) - physicalAddress); @@ -28,7 +30,7 @@ internal long GetMinRevivifiableAddress() internal unsafe void SetExtraValueLength(ref Value recordValue, ref RecordInfo recordInfo, int usedValueLength, int fullValueLength) { if (RevivificationManager.IsFixedLength) - recordInfo.Filler = false; + recordInfo.ClearHasFiller(); else SetVarLenExtraValueLength(ref recordValue, ref recordInfo, usedValueLength, fullValueLength); } @@ -47,10 +49,10 @@ internal static unsafe void SetVarLenExtraValueLength(ref Value recordValue, ref // We always store the "extra" as the difference between the aligned usedValueLength and the fullValueLength. // However, the UpdateInfo structures use the unaligned usedValueLength; aligned usedValueLength is not visible to the user. *extraValueLengthPtr = extraValueLength; - recordInfo.Filler = true; + recordInfo.SetHasFiller(); return; } - recordInfo.Filler = false; + recordInfo.ClearHasFiller(); } [MethodImpl(MethodImplOptions.AggressiveInlining)] @@ -58,10 +60,10 @@ internal static unsafe void SetVarLenExtraValueLength(ref Value recordValue, ref { // FixedLen may be GenericAllocator which does not point physicalAddress to the actual record location, so calculate fullRecordLength via GetAverageRecordSize(). if (RevivificationManager.IsFixedLength) - return (RevivificationManager.FixedValueLength, RevivificationManager.FixedValueLength, hlog.GetAverageRecordSize()); + return (RevivificationManager.FixedValueLength, RevivificationManager.FixedValueLength, hlog.GetAverageRecordSize()); int usedValueLength, fullValueLength, allocatedSize, valueOffset = GetValueOffset(physicalAddress, ref recordValue); - if (recordInfo.Filler) + if (recordInfo.HasFiller) { usedValueLength = hlog.GetValueLength(ref recordValue); var alignedUsedValueLength = RoundUp(usedValueLength, sizeof(int)); @@ -88,7 +90,7 @@ internal static unsafe void SetVarLenExtraValueLength(ref Value recordValue, ref { // Called after a new record is allocated if (RevivificationManager.IsFixedLength) - return (RevivificationManager.FixedValueLength, RevivificationManager.FixedValueLength); + return (RevivificationManager.FixedValueLength, RevivificationManager.FixedValueLength); int valueOffset = GetValueOffset(newPhysicalAddress, ref recordValue); int usedValueLength = actualSize - valueOffset; @@ -109,7 +111,7 @@ internal void SetFreeRecordSize(long physicalAddress, ref RecordInfo recordInfo, // Skip the valuelength calls if we are not VarLen. if (RevivificationManager.IsFixedLength) { - recordInfo.Filler = false; + recordInfo.ClearHasFiller(); return; } @@ -139,13 +141,13 @@ static void ClearExtraValueSpace(ref RecordInfo recordInfo, ref Value recordValu // Even though this says "SpanByte" it is just a utility function to zero space; no actual SpanByte instance is assumed SpanByte.Clear((byte*)Unsafe.AsPointer(ref recordValue) + usedValueLength, extraValueLength); } - recordInfo.Filler = false; + recordInfo.ClearHasFiller(); } // Do not try to inline this; it causes TryAllocateRecord to bloat and slow bool TryTakeFreeRecord(TSessionFunctionsWrapper sessionFunctions, int requiredSize, ref int allocatedSize, int newKeySize, long minRevivAddress, out long logicalAddress, out long physicalAddress) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { // Caller checks for UseFreeRecordPool if (RevivificationManager.TryTake(allocatedSize, minRevivAddress, out logicalAddress, ref sessionFunctions.Ctx.RevivificationStats)) @@ -169,7 +171,7 @@ bool TryTakeFreeRecord(TSessio // Clear any no-longer-needed space, then call DisposeForRevivification again with newKeySize so SpanByte can be efficient about zeroinit. ClearExtraValueSpace(ref recordInfo, ref recordValue, minValueLength, fullValueLength); - sessionFunctions.DisposeForRevivification(ref hlog.GetKey(physicalAddress), ref recordValue, newKeySize, ref recordInfo); + storeFunctions.DisposeRecord(ref hlog.GetKey(physicalAddress), ref recordValue, DisposeReason.RevivificationFreeList, newKeySize); Debug.Assert(fullRecordLength >= allocatedSize, $"TryTakeFreeRecord: fullRecordLength {fullRecordLength} should be >= allocatedSize {allocatedSize}"); allocatedSize = fullRecordLength; @@ -193,10 +195,10 @@ bool TryTakeFreeRecord(TSessio [MethodImpl(MethodImplOptions.AggressiveInlining)] internal void SetTombstoneAndExtraValueLength(ref Value recordValue, ref RecordInfo recordInfo, int usedValueLength, int fullValueLength) { - recordInfo.Tombstone = true; + recordInfo.SetTombstone(); if (RevivificationManager.IsFixedLength) { - recordInfo.Filler = false; + recordInfo.ClearHasFiller(); return; } @@ -207,7 +209,7 @@ internal void SetTombstoneAndExtraValueLength(ref Value recordValue, ref RecordI [MethodImpl(MethodImplOptions.AggressiveInlining)] internal (bool ok, int usedValueLength) TryReinitializeTombstonedValue(TSessionFunctionsWrapper sessionFunctions, ref RecordInfo srcRecordInfo, ref Key key, ref Value recordValue, int requiredSize, (int usedValueLength, int fullValueLength, int allocatedSize) recordLengths) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { if (RevivificationManager.IsFixedLength || recordLengths.allocatedSize < requiredSize) return (false, recordLengths.usedValueLength); @@ -218,9 +220,9 @@ internal void SetTombstoneAndExtraValueLength(ref Value recordValue, ref RecordI var minValueLength = requiredValueLength < recordLengths.usedValueLength ? requiredValueLength : recordLengths.usedValueLength; ClearExtraValueSpace(ref srcRecordInfo, ref recordValue, minValueLength, recordLengths.fullValueLength); - sessionFunctions.DisposeForRevivification(ref key, ref recordValue, newKeySize: -1, ref srcRecordInfo); + storeFunctions.DisposeRecord(ref key, ref recordValue, DisposeReason.RevivificationFreeList); - srcRecordInfo.Tombstone = false; + srcRecordInfo.ClearTombstone(); SetExtraValueLength(ref recordValue, ref srcRecordInfo, recordLengths.usedValueLength, recordLengths.fullValueLength); return (true, hlog.GetValueLength(ref recordValue)); diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Revivification/RevivificationManager.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Revivification/RevivificationManager.cs index ecfac8cedc..34cd438ccc 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Revivification/RevivificationManager.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/Revivification/RevivificationManager.cs @@ -5,9 +5,11 @@ namespace Tsavorite.core { - internal struct RevivificationManager + internal struct RevivificationManager + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - internal FreeRecordPool FreeRecordPool; + internal FreeRecordPool FreeRecordPool; internal readonly bool UseFreeRecordPool => FreeRecordPool is not null; internal RevivificationStats stats = new(); @@ -21,7 +23,7 @@ internal struct RevivificationManager internal double revivifiableFraction; - public RevivificationManager(TsavoriteKV store, bool isFixedLen, RevivificationSettings revivSettings, LogSettings logSettings) + public RevivificationManager(TsavoriteKV store, bool isFixedLen, RevivificationSettings revivSettings, LogSettings logSettings) { IsFixedLength = isFixedLen; revivifiableFraction = revivSettings is null || revivSettings.RevivifiableFraction == RevivificationSettings.DefaultRevivifiableFraction @@ -37,7 +39,7 @@ public RevivificationManager(TsavoriteKV store, bool isFixedLen, Rev IsEnabled = true; if (revivSettings.FreeRecordBins?.Length > 0) { - FreeRecordPool = new FreeRecordPool(store, revivSettings, IsFixedLength ? store.hlog.GetAverageRecordSize() : -1); + FreeRecordPool = new FreeRecordPool(store, revivSettings, IsFixedLength ? store.hlog.GetAverageRecordSize() : -1); restoreDeletedRecordsIfBinIsFull = revivSettings.RestoreDeletedRecordsIfBinIsFull; useFreeRecordPoolForCTT = revivSettings.UseFreeRecordPoolForCopyToTail; } diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/SplitIndex.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/SplitIndex.cs index 9f748ab6ac..c38b474aaf 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/SplitIndex.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/SplitIndex.cs @@ -5,7 +5,9 @@ namespace Tsavorite.core { - public unsafe partial class TsavoriteKV : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { private void SplitBuckets(long hash) { @@ -99,16 +101,16 @@ private void SplitChunk( var logicalAddress = entry.Address; long physicalAddress = 0; - if (entry.ReadCache && entry.AbsoluteAddress >= readcache.HeadAddress) + if (entry.ReadCache && entry.AbsoluteAddress >= readCacheBase.HeadAddress) physicalAddress = readcache.GetPhysicalAddress(entry.AbsoluteAddress); - else if (logicalAddress >= hlog.HeadAddress) + else if (logicalAddress >= hlogBase.HeadAddress) physicalAddress = hlog.GetPhysicalAddress(logicalAddress); // It is safe to always use hlog instead of readcache for some calls such // as GetKey and GetInfo if (physicalAddress != 0) { - var hash = comparer.GetHashCode64(ref hlog.GetKey(physicalAddress)); + var hash = storeFunctions.GetKeyHashCode64(ref hlog.GetKey(physicalAddress)); if ((hash & state[resizeInfo.version].size_mask) >> (state[resizeInfo.version].size_bits - 1) == 0) { // Insert in left @@ -220,10 +222,10 @@ private long TraceBackForOtherChainStart(long logicalAddress, int bit) entry.Address = logicalAddress; if (entry.ReadCache) { - if (logicalAddress < readcache.HeadAddress) + if (logicalAddress < readCacheBase.HeadAddress) break; var physicalAddress = readcache.GetPhysicalAddress(logicalAddress); - var hash = comparer.GetHashCode64(ref readcache.GetKey(physicalAddress)); + var hash = storeFunctions.GetKeyHashCode64(ref readcache.GetKey(physicalAddress)); if ((hash & state[resizeInfo.version].size_mask) >> (state[resizeInfo.version].size_bits - 1) == bit) { return logicalAddress; @@ -232,10 +234,10 @@ private long TraceBackForOtherChainStart(long logicalAddress, int bit) } else { - if (logicalAddress < hlog.HeadAddress) + if (logicalAddress < hlogBase.HeadAddress) break; var physicalAddress = hlog.GetPhysicalAddress(logicalAddress); - var hash = comparer.GetHashCode64(ref hlog.GetKey(physicalAddress)); + var hash = storeFunctions.GetKeyHashCode64(ref hlog.GetKey(physicalAddress)); if ((hash & state[resizeInfo.version].size_mask) >> (state[resizeInfo.version].size_bits - 1) == bit) { return logicalAddress; diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/TryCopyToReadCache.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/TryCopyToReadCache.cs index b7af453138..46e01f62c9 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/TryCopyToReadCache.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Implementation/TryCopyToReadCache.cs @@ -3,7 +3,9 @@ namespace Tsavorite.core { - public unsafe partial class TsavoriteKV : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Copy a record from the disk to the read cache. @@ -12,20 +14,20 @@ public unsafe partial class TsavoriteKV : TsavoriteBase /// /// /// - /// Contains the and structures for this operation, + /// Contains the and structures for this operation, /// and allows passing back the newLogicalAddress for invalidation in the case of exceptions. /// /// True if copied to readcache, else false; readcache is "best effort", and we don't fail the read process, or slow it down by retrying. /// internal bool TryCopyToReadCache(TSessionFunctionsWrapper sessionFunctions, ref PendingContext pendingContext, - ref Key key, ref Input input, ref Value recordValue, ref OperationStackContext stackCtx) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + ref Key key, ref Input input, ref Value recordValue, ref OperationStackContext stackCtx) + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { var (actualSize, allocatedSize, _) = hlog.GetRecordSize(ref key, ref recordValue); if (!TryAllocateRecordReadCache(ref pendingContext, ref stackCtx, allocatedSize, out long newLogicalAddress, out long newPhysicalAddress, out _)) return false; - ref var newRecordInfo = ref WriteNewRecordInfo(ref key, readcache, newPhysicalAddress, inNewVersion: false, tombstone: false, stackCtx.hei.Address); + ref var newRecordInfo = ref WriteNewRecordInfo(ref key, readCacheBase, newPhysicalAddress, inNewVersion: false, stackCtx.hei.Address); stackCtx.SetNewRecord(newLogicalAddress | Constants.kReadCacheBitMask); UpsertInfo upsertInfo = new() @@ -84,8 +86,7 @@ internal bool TryCopyToReadCache : TsavoriteBase + public unsafe partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Copy a record from the immutable region of the log, from the disk, or from ConditionalCopyToTail to the tail of the log (or splice into the log/readcache boundary). @@ -13,9 +15,9 @@ public unsafe partial class TsavoriteKV : TsavoriteBase /// /// /// - /// Contains the and structures for this operation, + /// Contains the and structures for this operation, /// and allows passing back the newLogicalAddress for invalidation in the case of exceptions. - /// if ., the recordInfo to close, if transferring. + /// if ., the recordInfo to close, if transferring. /// /// The reason for this operation. /// @@ -25,15 +27,15 @@ public unsafe partial class TsavoriteKV : TsavoriteBase /// /// internal OperationStatus TryCopyToTail(ref PendingContext pendingContext, - ref Key key, ref Input input, ref Value value, ref Output output, ref OperationStackContext stackCtx, + ref Key key, ref Input input, ref Value value, ref Output output, ref OperationStackContext stackCtx, ref RecordInfo srcRecordInfo, TSessionFunctionsWrapper sessionFunctions, WriteReason reason) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { var (actualSize, allocatedSize, keySize) = hlog.GetRecordSize(ref key, ref value); if (!TryAllocateRecord(sessionFunctions, ref pendingContext, ref stackCtx, actualSize, ref allocatedSize, keySize, new AllocateOptions() { Recycle = true }, out long newLogicalAddress, out long newPhysicalAddress, out OperationStatus status)) return status; - ref var newRecordInfo = ref WriteNewRecordInfo(ref key, hlog, newPhysicalAddress, inNewVersion: sessionFunctions.Ctx.InNewVersion, tombstone: false, stackCtx.recSrc.LatestLogicalAddress); + ref var newRecordInfo = ref WriteNewRecordInfo(ref key, hlogBase, newPhysicalAddress, inNewVersion: sessionFunctions.Ctx.InNewVersion, stackCtx.recSrc.LatestLogicalAddress); stackCtx.SetNewRecord(newLogicalAddress); UpsertInfo upsertInfo = new() @@ -75,7 +77,7 @@ internal OperationStatus TryCopyToTail /// Wrapper to process log-related commands /// - /// - /// - public sealed class LogAccessor : IObservable> + public sealed class LogAccessor : IObservable> + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - private readonly TsavoriteKV store; - private readonly AllocatorBase allocator; + private readonly TsavoriteKV store; + private readonly TAllocator allocator; + private readonly AllocatorBase allocatorBase; /// /// Constructor /// /// /// - internal LogAccessor(TsavoriteKV store, AllocatorBase allocator) + internal LogAccessor(TsavoriteKV store, TAllocator allocator) { this.store = store; this.allocator = allocator; + allocatorBase = allocator.GetBase(); } /// /// Tail address of log /// - public long TailAddress => allocator.GetTailAddress(); + public long TailAddress => allocatorBase.GetTailAddress(); /// /// Read-only address of log, i.e. boundary between read-only region and mutable region /// - public long ReadOnlyAddress => allocator.ReadOnlyAddress; + public long ReadOnlyAddress => allocatorBase.ReadOnlyAddress; /// /// Safe read-only address of log, i.e. boundary between read-only region and mutable region /// - public long SafeReadOnlyAddress => allocator.SafeReadOnlyAddress; + public long SafeReadOnlyAddress => allocatorBase.SafeReadOnlyAddress; /// /// Head address of log, i.e. beginning of in-memory regions /// - public long HeadAddress => allocator.HeadAddress; + public long HeadAddress => allocatorBase.HeadAddress; /// /// Beginning address of log /// - public long BeginAddress => allocator.BeginAddress; + public long BeginAddress => allocatorBase.BeginAddress; /// /// Get the bytes used on the primary log by every record. Does not include @@ -66,19 +68,19 @@ internal LogAccessor(TsavoriteKV store, AllocatorBase al /// public int EmptyPageCount { - get => allocator.EmptyPageCount; - set { allocator.EmptyPageCount = value; } + get => allocatorBase.EmptyPageCount; + set { allocatorBase.EmptyPageCount = value; } } /// /// Maximum possible number of empty pages in Allocator /// - public int MaxEmptyPageCount => allocator.MaxEmptyPageCount; + public int MaxEmptyPageCount => allocatorBase.MaxEmptyPageCount; /// /// Minimum possible number of empty pages in Allocator /// - public int MinEmptyPageCount => allocator.MinEmptyPageCount; + public int MinEmptyPageCount => allocatorBase.MinEmptyPageCount; /// /// Set empty page count in allocator @@ -87,10 +89,10 @@ public int EmptyPageCount /// Whether to wait for shift addresses to complete public void SetEmptyPageCount(int pageCount, bool wait = false) { - allocator.EmptyPageCount = pageCount; + allocatorBase.EmptyPageCount = pageCount; if (wait) { - long newHeadAddress = (allocator.GetTailAddress() & ~allocator.PageSizeMask) - allocator.HeadOffsetLagAddress; + long newHeadAddress = (allocatorBase.GetTailAddress() & ~allocatorBase.PageSizeMask) - allocatorBase.HeadOffsetLagAddress; ShiftHeadAddress(newHeadAddress, wait); } } @@ -98,17 +100,17 @@ public void SetEmptyPageCount(int pageCount, bool wait = false) /// /// Total in-memory circular buffer capacity (in number of pages) /// - public int BufferSize => allocator.BufferSize; + public int BufferSize => allocatorBase.BufferSize; /// /// Actual memory used by log (not including heap objects) and overflow pages /// - public long MemorySizeBytes => ((long)(allocator.AllocatedPageCount + allocator.OverflowPageCount)) << allocator.LogPageSizeBits; + public long MemorySizeBytes => ((long)(allocatorBase.AllocatedPageCount + allocator.OverflowPageCount)) << allocatorBase.LogPageSizeBits; /// /// Number of pages allocated /// - public int AllocatedPageCount => allocator.AllocatedPageCount; + public int AllocatedPageCount => allocatorBase.AllocatedPageCount; /// /// Shift begin address to the provided untilAddress. Make sure address corresponds to record boundary if snapToPageStart is set to @@ -121,14 +123,14 @@ public void SetEmptyPageCount(int pageCount, bool wait = false) public void ShiftBeginAddress(long untilAddress, bool snapToPageStart = false, bool truncateLog = false) { if (snapToPageStart) - untilAddress &= ~allocator.PageSizeMask; + untilAddress &= ~allocatorBase.PageSizeMask; - bool epochProtected = store.epoch.ThisInstanceProtected(); + var epochProtected = store.epoch.ThisInstanceProtected(); try { if (!epochProtected) store.epoch.Resume(); - allocator.ShiftBeginAddress(untilAddress, truncateLog); + allocatorBase.ShiftBeginAddress(untilAddress, truncateLog); } finally { @@ -161,27 +163,28 @@ public void ShiftHeadAddress(long newHeadAddress, bool wait) try { store.epoch.Resume(); - allocator.ShiftHeadAddress(newHeadAddress); + allocatorBase.ShiftHeadAddress(newHeadAddress); } finally { store.epoch.Suspend(); } - while (wait && allocator.SafeHeadAddress < newHeadAddress) Thread.Yield(); + while (wait && allocatorBase.SafeHeadAddress < newHeadAddress) + _ = Thread.Yield(); } else { - allocator.ShiftHeadAddress(newHeadAddress); - while (wait && allocator.SafeHeadAddress < newHeadAddress) + allocatorBase.ShiftHeadAddress(newHeadAddress); + while (wait && allocatorBase.SafeHeadAddress < newHeadAddress) store.epoch.ProtectAndDrain(); } } public Func IsSizeBeyondLimit { - get => allocator.IsSizeBeyondLimit; - set => allocator.IsSizeBeyondLimit = value; + get => allocatorBase.IsSizeBeyondLimit; + set => allocatorBase.IsSizeBeyondLimit = value; } /// @@ -193,8 +196,8 @@ public Func IsSizeBeyondLimit /// Observer to which scan iterator is pushed public IDisposable Subscribe(IObserver> readOnlyObserver) { - allocator.OnReadOnlyObserver = readOnlyObserver; - return new LogSubscribeDisposable(allocator, true); + allocatorBase.OnReadOnlyObserver = readOnlyObserver; + return new LogSubscribeDisposable(allocatorBase, isReadOnly: true); } /// @@ -206,14 +209,14 @@ public IDisposable Subscribe(IObserver> readO /// Observer to which scan iterator is pushed public IDisposable SubscribeEvictions(IObserver> evictionObserver) { - allocator.OnEvictionObserver = evictionObserver; - return new LogSubscribeDisposable(allocator, false); + allocatorBase.OnEvictionObserver = evictionObserver; + return new LogSubscribeDisposable(allocatorBase, isReadOnly: false); } public IDisposable SubscribeDeserializations(IObserver> deserializationObserver) { - allocator.OnDeserializationObserver = deserializationObserver; - return new LogSubscribeDisposable(allocator, false); + allocatorBase.OnDeserializationObserver = deserializationObserver; + return new LogSubscribeDisposable(allocatorBase, isReadOnly: false); } /// @@ -221,13 +224,13 @@ public IDisposable SubscribeDeserializations(IObserver class LogSubscribeDisposable : IDisposable { - private readonly AllocatorBase allocator; + private readonly AllocatorBase allocator; private readonly bool readOnly; - public LogSubscribeDisposable(AllocatorBase allocator, bool readOnly) + public LogSubscribeDisposable(AllocatorBase allocator, bool isReadOnly) { this.allocator = allocator; - this.readOnly = readOnly; + readOnly = isReadOnly; } public void Dispose() @@ -251,7 +254,7 @@ public void ShiftReadOnlyAddress(long newReadOnlyAddress, bool wait) try { store.epoch.Resume(); - allocator.ShiftReadOnlyAddress(newReadOnlyAddress); + _ = allocatorBase.ShiftReadOnlyAddress(newReadOnlyAddress); } finally { @@ -259,14 +262,15 @@ public void ShiftReadOnlyAddress(long newReadOnlyAddress, bool wait) } // Wait for flush to complete - while (wait && allocator.FlushedUntilAddress < newReadOnlyAddress) Thread.Yield(); + while (wait && allocatorBase.FlushedUntilAddress < newReadOnlyAddress) + _ = Thread.Yield(); } else { - allocator.ShiftReadOnlyAddress(newReadOnlyAddress); + _ = allocatorBase.ShiftReadOnlyAddress(newReadOnlyAddress); // Wait for flush to complete - while (wait && allocator.FlushedUntilAddress < newReadOnlyAddress) + while (wait && allocatorBase.FlushedUntilAddress < newReadOnlyAddress) store.epoch.ProtectAndDrain(); } } @@ -277,7 +281,7 @@ public void ShiftReadOnlyAddress(long newReadOnlyAddress, bool wait) /// Scan iterator instance [MethodImpl(MethodImplOptions.AggressiveInlining)] public ITsavoriteScanIterator Scan(long beginAddress, long endAddress, ScanBufferingMode scanBufferingMode = ScanBufferingMode.DoublePageBuffering, bool includeSealedRecords = false) - => allocator.Scan(store: null, beginAddress, endAddress, scanBufferingMode, includeSealedRecords); + => allocatorBase.Scan(store: null, beginAddress, endAddress, scanBufferingMode, includeSealedRecords); /// /// Push-scan the log given address range; returns all records with address less than endAddress @@ -285,7 +289,7 @@ public ITsavoriteScanIterator Scan(long beginAddress, long endAddres /// True if Scan completed; false if Scan ended early due to one of the TScanIterator reader functions returning false public bool Scan(ref TScanFunctions scanFunctions, long beginAddress, long endAddress, ScanBufferingMode scanBufferingMode = ScanBufferingMode.DoublePageBuffering) where TScanFunctions : IScanIteratorFunctions - => allocator.Scan(store, beginAddress, endAddress, ref scanFunctions, scanBufferingMode); + => allocatorBase.Scan(store, beginAddress, endAddress, ref scanFunctions, scanBufferingMode); /// /// Iterate versions of the specified key, starting with most recent @@ -293,7 +297,7 @@ public bool Scan(ref TScanFunctions scanFunctions, long beginAdd /// True if Scan completed; false if Scan ended early due to one of the TScanIterator reader functions returning false public bool IterateKeyVersions(ref TScanFunctions scanFunctions, ref Key key) where TScanFunctions : IScanIteratorFunctions - => allocator.IterateKeyVersions(store, ref key, ref scanFunctions); + => allocatorBase.IterateKeyVersions(store, ref key, ref scanFunctions); /// /// Flush log until current tail (records are still retained in memory) @@ -301,7 +305,7 @@ public bool IterateKeyVersions(ref TScanFunctions scanFunctions, /// Synchronous wait for operation to complete public void Flush(bool wait) { - ShiftReadOnlyAddress(allocator.GetTailAddress(), wait); + ShiftReadOnlyAddress(allocatorBase.GetTailAddress(), wait); } /// @@ -310,7 +314,7 @@ public void Flush(bool wait) /// Wait for operation to complete public void FlushAndEvict(bool wait) { - ShiftHeadAddress(allocator.GetTailAddress(), wait); + ShiftHeadAddress(allocatorBase.GetTailAddress(), wait); } /// @@ -323,7 +327,7 @@ public void DisposeFromMemory() FlushAndEvict(true); // Delete from memory - allocator.DeleteFromMemory(); + allocatorBase.DeleteFromMemory(); } /// diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Tsavorite.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Tsavorite.cs index bb322539d5..99239ee927 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Tsavorite.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/Tsavorite.cs @@ -12,15 +12,19 @@ namespace Tsavorite.core { - public partial class TsavoriteKV : TsavoriteBase, IDisposable + /// + /// The Tsavorite Key/Value store class + /// + public partial class TsavoriteKV : TsavoriteBase, IDisposable + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - internal readonly AllocatorBase hlog; - internal readonly AllocatorBase readcache; + internal readonly TAllocator hlog; + internal readonly AllocatorBase hlogBase; + internal readonly TAllocator readcache; + internal readonly AllocatorBase readCacheBase; - /// - /// Compares two keys - /// - internal readonly ITsavoriteEqualityComparer comparer; + internal readonly TStoreFunctions storeFunctions; internal readonly bool UseReadCache; private readonly ReadCopyOptions ReadCopyOptions; @@ -34,7 +38,7 @@ public partial class TsavoriteKV : TsavoriteBase, IDisposable /// /// Maximum number of memory pages ever allocated /// - public long MaxAllocatedPageCount => hlog.MaxAllocatedPageCount; + public long MaxAllocatedPageCount => hlogBase.MaxAllocatedPageCount; /// /// Size of index in #cache lines (64 bytes each) @@ -49,91 +53,50 @@ public partial class TsavoriteKV : TsavoriteBase, IDisposable /// Number of allocations performed public long OverflowBucketAllocations => overflowBucketsAllocator.NumAllocations; - /// - /// Comparer used by Tsavorite - /// - public ITsavoriteEqualityComparer Comparer => comparer; - /// /// Hybrid log used by this Tsavorite instance /// - public LogAccessor Log { get; } + public LogAccessor Log { get; } /// /// Read cache used by this Tsavorite instance /// - public LogAccessor ReadCache { get; } + public LogAccessor ReadCache { get; } int maxSessionID; internal readonly bool CheckpointVersionSwitchBarrier; // version switch barrier - internal readonly OverflowBucketLockTable LockTable; + internal readonly OverflowBucketLockTable LockTable; internal void IncrementNumLockingSessions() { _hybridLogCheckpoint.info.manualLockingActive = true; - Interlocked.Increment(ref hlog.NumActiveLockingSessions); + Interlocked.Increment(ref hlogBase.NumActiveLockingSessions); } - internal void DecrementNumLockingSessions() => Interlocked.Decrement(ref hlog.NumActiveLockingSessions); + internal void DecrementNumLockingSessions() => Interlocked.Decrement(ref hlogBase.NumActiveLockingSessions); internal readonly int ThrottleCheckpointFlushDelayMs = -1; - internal RevivificationManager RevivificationManager; + internal RevivificationManager RevivificationManager; - /// - /// Create TsavoriteKV instance - /// - /// Config settings - public TsavoriteKV(TsavoriteKVSettings tsavoriteKVSettings) : - this( - tsavoriteKVSettings.GetIndexSizeCacheLines(), tsavoriteKVSettings.GetLogSettings(), - tsavoriteKVSettings.GetCheckpointSettings(), tsavoriteKVSettings.GetSerializerSettings(), - tsavoriteKVSettings.EqualityComparer, tsavoriteKVSettings.TryRecoverLatest, - null, revivificationSettings: tsavoriteKVSettings.RevivificationSettings) - { } + internal Func allocatorFactory; /// /// Create TsavoriteKV instance /// - /// Size of core index (#cache lines) - /// Log settings - /// Checkpoint settings - /// Serializer settings - /// Tsavorite equality comparer for key - /// Try to recover from latest checkpoint, if any - /// Logger factory to create an ILogger, if one is not passed in (e.g. from ). - /// Logger to use. - /// Settings for recycling deleted records on the log. - public TsavoriteKV(long size, LogSettings logSettings, - CheckpointSettings checkpointSettings = null, SerializerSettings serializerSettings = null, - ITsavoriteEqualityComparer comparer = null, bool tryRecoverLatest = false, - ILoggerFactory loggerFactory = null, ILogger logger = null, RevivificationSettings revivificationSettings = null) + /// Config settings + /// Store-level user function implementations + /// Func to call to create the allocator(s, if doing readcache) + public TsavoriteKV(KVSettings kvSettings, TStoreFunctions storeFunctions, Func allocatorFactory) + : base(kvSettings.logger ?? kvSettings.loggerFactory?.CreateLogger("TsavoriteKV Index Overflow buckets")) { - this.loggerFactory = loggerFactory; - this.logger = logger ?? this.loggerFactory?.CreateLogger("TsavoriteKV Constructor"); + this.allocatorFactory = allocatorFactory; + loggerFactory = kvSettings.loggerFactory; + logger = kvSettings.logger ?? kvSettings.loggerFactory?.CreateLogger("TsavoriteKV"); - if (comparer != null) - this.comparer = comparer; - else - { - if (typeof(ITsavoriteEqualityComparer).IsAssignableFrom(typeof(Key))) - { - if (default(Key) is not null) - { - this.comparer = default(Key) as ITsavoriteEqualityComparer; - } - else if (typeof(Key).GetConstructor(Type.EmptyTypes) != null) - { - this.comparer = Activator.CreateInstance(typeof(Key)) as ITsavoriteEqualityComparer; - } - } - else - { - this.comparer = TsavoriteEqualityComparer.Get(); - } - } + this.storeFunctions = storeFunctions; - checkpointSettings ??= new CheckpointSettings(); + var checkpointSettings = kvSettings.GetCheckpointSettings() ?? new CheckpointSettings(); CheckpointVersionSwitchBarrier = checkpointSettings.CheckpointVersionSwitchBarrier; ThrottleCheckpointFlushDelayMs = checkpointSettings.ThrottleCheckpointFlushDelayMs; @@ -150,7 +113,9 @@ public TsavoriteKV(long size, LogSettings logSettings, if (checkpointSettings.CheckpointManager is null) disposeCheckpointManager = true; - UseReadCache = logSettings.ReadCacheSettings is not null; + var logSettings = kvSettings.GetLogSettings(); + + UseReadCache = kvSettings.ReadCacheEnabled; ReadCopyOptions = logSettings.ReadCopyOptions; if (ReadCopyOptions.CopyTo == ReadCopyTo.Inherit) @@ -161,80 +126,43 @@ public TsavoriteKV(long size, LogSettings logSettings, if (ReadCopyOptions.CopyFrom == ReadCopyFrom.Inherit) ReadCopyOptions.CopyFrom = ReadCopyFrom.Device; - bool isFixedLenReviv = true; + bool isFixedLenReviv = hlog.IsFixedLength; - if (!Utility.IsBlittable() || !Utility.IsBlittable()) - { - hlog = new GenericAllocator(logSettings, serializerSettings, this.comparer, null, epoch, logger: logger ?? loggerFactory?.CreateLogger("GenericAllocator HybridLog")); - Log = new LogAccessor(this, hlog); - if (UseReadCache) - { - readcache = new GenericAllocator( - new LogSettings - { - LogDevice = new NullDevice(), - ObjectLogDevice = new NullDevice(), - PageSizeBits = logSettings.ReadCacheSettings.PageSizeBits, - MemorySizeBits = logSettings.ReadCacheSettings.MemorySizeBits, - SegmentSizeBits = logSettings.ReadCacheSettings.MemorySizeBits, - MutableFraction = 1 - logSettings.ReadCacheSettings.SecondChanceFraction - }, serializerSettings, this.comparer, ReadCacheEvict, epoch, logger: logger ?? loggerFactory?.CreateLogger("GenericAllocator ReadCache")); - readcache.Initialize(); - ReadCache = new LogAccessor(this, readcache); - } - } - else if (typeof(Key) == typeof(SpanByte) && typeof(Value) == typeof(SpanByte)) - { - isFixedLenReviv = false; - var spanByteComparer = this.comparer as ITsavoriteEqualityComparer; - hlog = new SpanByteAllocator(logSettings, spanByteComparer, null, epoch, logger: logger ?? loggerFactory?.CreateLogger("SpanByteAllocator HybridLog")) as AllocatorBase; - Log = new LogAccessor(this, hlog); - if (UseReadCache) - { - readcache = new SpanByteAllocator( - new LogSettings - { - LogDevice = new NullDevice(), - PageSizeBits = logSettings.ReadCacheSettings.PageSizeBits, - MemorySizeBits = logSettings.ReadCacheSettings.MemorySizeBits, - SegmentSizeBits = logSettings.ReadCacheSettings.MemorySizeBits, - MutableFraction = 1 - logSettings.ReadCacheSettings.SecondChanceFraction - }, spanByteComparer, ReadCacheEvict, epoch, logger: logger ?? loggerFactory?.CreateLogger("SpanByteAllocator ReadCache")) as AllocatorBase; - readcache.Initialize(); - ReadCache = new LogAccessor(this, readcache); - } - } - else + // Create the allocator + var allocatorSettings = new AllocatorSettings(logSettings, epoch, kvSettings.logger ?? kvSettings.loggerFactory?.CreateLogger(typeof(TAllocator).Name)); + hlog = allocatorFactory(allocatorSettings, storeFunctions); + hlogBase = hlog.GetBase(); + hlogBase.Initialize(); + Log = new(this, hlog); + + if (UseReadCache) { - hlog = new BlittableAllocator(logSettings, this.comparer, null, epoch, logger: logger ?? loggerFactory?.CreateLogger("BlittableAllocator HybridLog")); - Log = new LogAccessor(this, hlog); - if (UseReadCache) + allocatorSettings.LogSettings = new() { - readcache = new BlittableAllocator( - new LogSettings - { - LogDevice = new NullDevice(), - PageSizeBits = logSettings.ReadCacheSettings.PageSizeBits, - MemorySizeBits = logSettings.ReadCacheSettings.MemorySizeBits, - SegmentSizeBits = logSettings.ReadCacheSettings.MemorySizeBits, - MutableFraction = 1 - logSettings.ReadCacheSettings.SecondChanceFraction - }, this.comparer, ReadCacheEvict, epoch, logger: logger ?? loggerFactory?.CreateLogger("BlittableAllocator ReadCache")); - readcache.Initialize(); - ReadCache = new LogAccessor(this, readcache); - } + LogDevice = new NullDevice(), + ObjectLogDevice = hlog.HasObjectLog ? new NullDevice() : null, + PageSizeBits = logSettings.ReadCacheSettings.PageSizeBits, + MemorySizeBits = logSettings.ReadCacheSettings.MemorySizeBits, + SegmentSizeBits = logSettings.ReadCacheSettings.MemorySizeBits, + MutableFraction = 1 - logSettings.ReadCacheSettings.SecondChanceFraction + }; + allocatorSettings.logger = kvSettings.logger ?? kvSettings.loggerFactory?.CreateLogger($"{typeof(TAllocator).Name} ReadCache"); + allocatorSettings.evictCallback = ReadCacheEvict; + readcache = allocatorFactory(allocatorSettings, storeFunctions); + readCacheBase = readcache.GetBase(); + readCacheBase.Initialize(); + ReadCache = new(this, readcache); } - hlog.Initialize(); - sectorSize = (int)logSettings.LogDevice.SectorSize; - Initialize(size, sectorSize); + Initialize(kvSettings.GetIndexSizeCacheLines(), sectorSize); - LockTable = new OverflowBucketLockTable(this); - RevivificationManager = new(this, isFixedLenReviv, revivificationSettings, logSettings); + LockTable = new OverflowBucketLockTable(this); + RevivificationManager = new(this, isFixedLenReviv, kvSettings.RevivificationSettings, logSettings); systemState = SystemState.Make(Phase.REST, 1); - if (tryRecoverLatest) + if (kvSettings.TryRecoverLatest) { try { @@ -245,10 +173,10 @@ public TsavoriteKV(long size, LogSettings logSettings, } /// Get the hashcode for a key. - public long GetKeyHash(Key key) => comparer.GetHashCode64(ref key); + public long GetKeyHash(Key key) => storeFunctions.GetKeyHashCode64(ref key); /// Get the hashcode for a key. - public long GetKeyHash(ref Key key) => comparer.GetHashCode64(ref key); + public long GetKeyHash(ref Key key) => storeFunctions.GetKeyHashCode64(ref key); /// /// Initiate full checkpoint @@ -267,15 +195,15 @@ public TsavoriteKV(long size, LogSettings logSettings, /// public bool TryInitiateFullCheckpoint(out Guid token, CheckpointType checkpointType, long targetVersion = -1) { - ISynchronizationTask backend; + ISynchronizationTask backend; if (checkpointType == CheckpointType.FoldOver) - backend = new FoldOverCheckpointTask(); + backend = new FoldOverCheckpointTask(); else if (checkpointType == CheckpointType.Snapshot) - backend = new SnapshotCheckpointTask(); + backend = new SnapshotCheckpointTask(); else throw new TsavoriteException("Unsupported full checkpoint type"); - var result = StartStateMachine(new FullCheckpointStateMachine(backend, targetVersion)); + var result = StartStateMachine(new FullCheckpointStateMachine(backend, targetVersion)); if (result) token = _hybridLogCheckpointToken; else @@ -319,7 +247,7 @@ public bool TryInitiateFullCheckpoint(out Guid token, CheckpointType checkpointT /// Whether we could initiate the checkpoint. Use CompleteCheckpointAsync to wait completion. public bool TryInitiateIndexCheckpoint(out Guid token) { - var result = StartStateMachine(new IndexSnapshotStateMachine()); + var result = StartStateMachine(new IndexSnapshotStateMachine()); token = _indexCheckpointToken; return result; } @@ -361,20 +289,20 @@ public bool TryInitiateIndexCheckpoint(out Guid token) public bool TryInitiateHybridLogCheckpoint(out Guid token, CheckpointType checkpointType, bool tryIncremental = false, long targetVersion = -1) { - ISynchronizationTask backend; + ISynchronizationTask backend; if (checkpointType == CheckpointType.FoldOver) - backend = new FoldOverCheckpointTask(); + backend = new FoldOverCheckpointTask(); else if (checkpointType == CheckpointType.Snapshot) { - if (tryIncremental && _lastSnapshotCheckpoint.info.guid != default && _lastSnapshotCheckpoint.info.finalLogicalAddress > hlog.FlushedUntilAddress && (hlog is not GenericAllocator)) - backend = new IncrementalSnapshotCheckpointTask(); + if (tryIncremental && _lastSnapshotCheckpoint.info.guid != default && _lastSnapshotCheckpoint.info.finalLogicalAddress > hlogBase.FlushedUntilAddress && !hlog.HasObjectLog) + backend = new IncrementalSnapshotCheckpointTask(); else - backend = new SnapshotCheckpointTask(); + backend = new SnapshotCheckpointTask(); } else throw new TsavoriteException("Unsupported checkpoint type"); - var result = StartStateMachine(new HybridLogCheckpointStateMachine(backend, targetVersion)); + var result = StartStateMachine(new HybridLogCheckpointStateMachine(backend, targetVersion)); token = _hybridLogCheckpointToken; return result; } @@ -535,11 +463,11 @@ public async ValueTask CompleteCheckpointAsync(CancellationToken token = default [MethodImpl(MethodImplOptions.AggressiveInlining)] internal Status ContextRead(ref Key key, ref Input input, ref Output output, Context context, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { var pcontext = new PendingContext(sessionFunctions.Ctx.ReadCopyOptions); OperationStatus internalStatus; - var keyHash = comparer.GetHashCode64(ref key); + var keyHash = storeFunctions.GetKeyHashCode64(ref key); do internalStatus = InternalRead(ref key, keyHash, ref input, ref output, context, ref pcontext, sessionFunctions); @@ -553,11 +481,11 @@ internal Status ContextRead(re [MethodImpl(MethodImplOptions.AggressiveInlining)] internal Status ContextRead(ref Key key, ref Input input, ref Output output, ref ReadOptions readOptions, out RecordMetadata recordMetadata, Context context, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { var pcontext = new PendingContext(sessionFunctions.Ctx.ReadCopyOptions, ref readOptions); OperationStatus internalStatus; - var keyHash = readOptions.KeyHash ?? comparer.GetHashCode64(ref key); + var keyHash = readOptions.KeyHash ?? storeFunctions.GetKeyHashCode64(ref key); do internalStatus = InternalRead(ref key, keyHash, ref input, ref output, context, ref pcontext, sessionFunctions); @@ -570,7 +498,7 @@ internal Status ContextRead(re [MethodImpl(MethodImplOptions.AggressiveInlining)] internal Status ContextReadAtAddress(long address, ref Input input, ref Output output, ref ReadOptions readOptions, out RecordMetadata recordMetadata, Context context, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { var pcontext = new PendingContext(sessionFunctions.Ctx.ReadCopyOptions, ref readOptions, noKey: true); Key key = default; @@ -579,7 +507,7 @@ internal Status ContextReadAtAddress(long address, ref Key key, ref Input input, ref Output output, ref ReadOptions readOptions, out RecordMetadata recordMetadata, Context context, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { var pcontext = new PendingContext(sessionFunctions.Ctx.ReadCopyOptions, ref readOptions, noKey: false); return ContextReadAtAddress(address, ref key, ref input, ref output, ref readOptions, out recordMetadata, context, ref pcontext, sessionFunctions); @@ -588,7 +516,7 @@ internal Status ContextReadAtAddress(long address, ref Key key, ref Input input, ref Output output, ref ReadOptions readOptions, out RecordMetadata recordMetadata, Context context, ref PendingContext pcontext, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { OperationStatus internalStatus; do @@ -602,7 +530,7 @@ private Status ContextReadAtAddress(ref Key key, long keyHash, ref Input input, ref Value value, ref Output output, Context context, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { var pcontext = default(PendingContext); OperationStatus internalStatus; @@ -618,7 +546,7 @@ internal Status ContextUpsert( [MethodImpl(MethodImplOptions.AggressiveInlining)] internal Status ContextUpsert(ref Key key, long keyHash, ref Input input, ref Value value, ref Output output, out RecordMetadata recordMetadata, Context context, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { var pcontext = default(PendingContext); OperationStatus internalStatus; @@ -635,7 +563,7 @@ internal Status ContextUpsert( [MethodImpl(MethodImplOptions.AggressiveInlining)] internal Status ContextRMW(ref Key key, long keyHash, ref Input input, ref Output output, out RecordMetadata recordMetadata, Context context, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { var pcontext = default(PendingContext); OperationStatus internalStatus; @@ -651,7 +579,7 @@ internal Status ContextRMW(ref [MethodImpl(MethodImplOptions.AggressiveInlining)] internal Status ContextDelete(ref Key key, long keyHash, Context context, TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { var pcontext = default(PendingContext); OperationStatus internalStatus; @@ -673,7 +601,7 @@ public bool GrowIndex() if (epoch.ThisInstanceProtected()) throw new TsavoriteException("Cannot use GrowIndex when using non-async sessions"); - if (!StartStateMachine(new IndexResizeStateMachine())) + if (!StartStateMachine(new IndexResizeStateMachine())) return false; epoch.Resume(); @@ -682,7 +610,7 @@ public bool GrowIndex() { while (true) { - SystemState _systemState = SystemState.Copy(ref systemState); + var _systemState = SystemState.Copy(ref systemState); if (_systemState.Phase == Phase.PREPARE_GROW) ThreadStateMachineStep(null, NullSession.Instance, default); else if (_systemState.Phase == Phase.IN_PROGRESS_GROW) @@ -690,7 +618,7 @@ public bool GrowIndex() else if (_systemState.Phase == Phase.REST) break; epoch.ProtectAndDrain(); - Thread.Yield(); + _ = Thread.Yield(); } } finally @@ -706,8 +634,8 @@ public bool GrowIndex() public void Dispose() { Free(); - hlog.Dispose(); - readcache?.Dispose(); + hlogBase.Dispose(); + readCacheBase?.Dispose(); LockTable.Dispose(); _lastSnapshotCheckpoint.Dispose(); if (disposeCheckpointManager) @@ -725,7 +653,7 @@ private unsafe long GetEntryCount() var table_size_ = state[version].size; var ptable_ = state[version].tableAligned; long total_entry_count = 0; - long beginAddress = hlog.BeginAddress; + long beginAddress = hlogBase.BeginAddress; for (long bucket = 0; bucket < table_size_; ++bucket) { @@ -747,7 +675,7 @@ private unsafe string DumpDistributionInternal(int version) var table_size_ = state[version].size; var ptable_ = state[version].tableAligned; long total_record_count = 0; - long beginAddress = hlog.BeginAddress; + long beginAddress = hlogBase.BeginAddress; Dictionary histogram = new(); for (long bucket = 0; bucket < table_size_; ++bucket) @@ -761,7 +689,7 @@ private unsafe string DumpDistributionInternal(int version) { var x = default(HashBucketEntry); x.word = b.bucket_entries[bucket_entry]; - if (((!x.ReadCache) && (x.Address >= beginAddress)) || (x.ReadCache && (x.AbsoluteAddress >= readcache.HeadAddress))) + if (((!x.ReadCache) && (x.Address >= beginAddress)) || (x.ReadCache && (x.AbsoluteAddress >= readCacheBase.HeadAddress))) { if (tags.Contains(x.Tag) && !x.Tentative) throw new TsavoriteException("Duplicate tag found in index"); diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/TsavoriteIterator.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/TsavoriteIterator.cs index f9e594f721..7abc93ccc0 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/TsavoriteIterator.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/TsavoriteIterator.cs @@ -7,7 +7,9 @@ namespace Tsavorite.core { - public partial class TsavoriteKV : TsavoriteBase + public partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { /// /// Pull iterator for all (distinct) live key-values stored in Tsavorite @@ -20,7 +22,7 @@ public ITsavoriteScanIterator Iterate(this, functions, untilAddress, loggerFactory: loggerFactory); + return new TsavoriteKVIterator(this, functions, untilAddress, loggerFactory: loggerFactory); } /// @@ -36,7 +38,7 @@ public bool Iterate(Functions { if (untilAddress == -1) untilAddress = Log.TailAddress; - using TsavoriteKVIterator iter = new(this, functions, untilAddress, loggerFactory: loggerFactory); + using TsavoriteKVIterator iter = new(this, functions, untilAddress, loggerFactory: loggerFactory); if (!scanFunctions.OnStart(iter.BeginAddress, iter.EndAddress)) return false; @@ -71,13 +73,15 @@ public ITsavoriteScanIterator Iterate(Compactio => throw new TsavoriteException("Invoke Iterate() on a client session (ClientSession), or use store.Iterate overload with Functions provided as parameter"); } - internal sealed class TsavoriteKVIterator : ITsavoriteScanIterator + internal sealed class TsavoriteKVIterator : ITsavoriteScanIterator where Functions : ISessionFunctions + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - private readonly TsavoriteKV store; - private readonly TsavoriteKV tempKv; - private readonly ClientSession tempKvSession; - private readonly BasicContext tempbContext; + private readonly TsavoriteKV store; + private readonly TsavoriteKV tempKv; + private readonly ClientSession tempKvSession; + private readonly BasicContext tempbContext; private readonly ITsavoriteScanIterator mainKvIter; private readonly IPushScanIterator pushScanIterator; private ITsavoriteScanIterator tempKvIter; @@ -90,13 +94,20 @@ enum IterationPhase }; private IterationPhase iterationPhase; - public TsavoriteKVIterator(TsavoriteKV store, Functions functions, long untilAddress, ILoggerFactory loggerFactory = null) + public TsavoriteKVIterator(TsavoriteKV store, Functions functions, long untilAddress, ILoggerFactory loggerFactory = null) { this.store = store; iterationPhase = IterationPhase.MainKv; - tempKv = new TsavoriteKV(store.IndexSize, new LogSettings { LogDevice = new NullDevice(), ObjectLogDevice = new NullDevice(), MutableFraction = 1 }, comparer: store.Comparer, - loggerFactory: loggerFactory); + var tempKVSettings = new KVSettings(baseDir: null, loggerFactory: loggerFactory) + { + IndexSize = KVSettings.SetIndexSizeFromCacheLines(store.IndexSize), + LogDevice = new NullDevice(), + ObjectLogDevice = new NullDevice(), + MutableFraction = 1 + }; + + tempKv = new TsavoriteKV(tempKVSettings, store.storeFunctions, store.allocatorFactory); tempKvSession = tempKv.NewSession(functions); tempbContext = tempKvSession.BasicContext; mainKvIter = store.Log.Scan(store.Log.BeginAddress, untilAddress); @@ -132,7 +143,7 @@ public bool GetNext(out RecordInfo recordInfo) if (mainKvIter.GetNext(out recordInfo)) { ref var key = ref mainKvIter.GetKey(); - OperationStackContext stackCtx = default; + OperationStackContext stackCtx = default; if (IsTailmostMainKvRecord(ref key, recordInfo, ref stackCtx)) return true; @@ -173,7 +184,7 @@ internal bool PushNext(ref TScanFunctions scanFunctions, long nu { if (iterationPhase == IterationPhase.MainKv) { - OperationStackContext stackCtx = default; + OperationStackContext stackCtx = default; if (mainKvIter.GetNext(out var recordInfo)) { try @@ -183,7 +194,7 @@ internal bool PushNext(ref TScanFunctions scanFunctions, long nu { // Push Iter records are in temp storage so do not need locks, but we'll call ConcurrentReader because, for example, GenericAllocator // may need to know the object is in that region. - stop = mainKvIter.CurrentAddress >= store.hlog.ReadOnlyAddress + stop = mainKvIter.CurrentAddress >= store.hlogBase.ReadOnlyAddress ? !scanFunctions.ConcurrentReader(ref key, ref mainKvIter.GetValue(), new RecordMetadata(recordInfo, mainKvIter.CurrentAddress), numRecords, out _) : !scanFunctions.SingleReader(ref key, ref mainKvIter.GetValue(), new RecordMetadata(recordInfo, mainKvIter.CurrentAddress), numRecords, out _); return !stop; @@ -200,7 +211,7 @@ internal bool PushNext(ref TScanFunctions scanFunctions, long nu finally { if (stackCtx.recSrc.HasLock) - store.UnlockForScan(ref stackCtx, ref mainKvIter.GetKey(), ref pushScanIterator.GetLockableInfo()); + store.UnlockForScan(ref stackCtx); } } @@ -240,19 +251,19 @@ private void ProcessNonTailmostMainKvRecord(RecordInfo recordInfo, Key key) { // Check if it's in-memory first so we don't spuriously create a tombstone record. if (tempbContext.ContainsKeyInMemory(ref key, out _).Found) - tempbContext.Delete(ref key); + _ = tempbContext.Delete(ref key); } else - tempbContext.Upsert(ref key, ref mainKvIter.GetValue()); + _ = tempbContext.Upsert(ref key, ref mainKvIter.GetValue()); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - bool IsTailmostMainKvRecord(ref Key key, RecordInfo mainKvRecordInfo, ref OperationStackContext stackCtx) + bool IsTailmostMainKvRecord(ref Key key, RecordInfo mainKvRecordInfo, ref OperationStackContext stackCtx) { - stackCtx = new(store.comparer.GetHashCode64(ref key)); + stackCtx = new(store.storeFunctions.GetKeyHashCode64(ref key)); if (store.FindTag(ref stackCtx.hei)) { - stackCtx.SetRecordSourceToHashEntry(store.hlog); + stackCtx.SetRecordSourceToHashEntry(store.hlogBase); if (store.UseReadCache) store.SkipReadCache(ref stackCtx, out _); if (stackCtx.recSrc.LogicalAddress == mainKvIter.CurrentAddress) diff --git a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/TsavoriteThread.cs b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/TsavoriteThread.cs index 810b18b67c..0215e93c69 100644 --- a/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/TsavoriteThread.cs +++ b/libs/storage/Tsavorite/cs/src/core/Index/Tsavorite/TsavoriteThread.cs @@ -8,11 +8,13 @@ namespace Tsavorite.core { - public partial class TsavoriteKV : TsavoriteBase + public partial class TsavoriteKV : TsavoriteBase + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { [MethodImpl(MethodImplOptions.AggressiveInlining)] internal void InternalRefresh(TSessionFunctionsWrapper sessionFunctions) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { epoch.ProtectAndDrain(); @@ -37,18 +39,18 @@ internal void InternalRefresh( // have reached PREPARE and all multi-key ops have drained (see VersionChangeTask.OnThreadState). if (CheckpointVersionSwitchBarrier && sessionFunctions.Ctx.phase == Phase.PREPARE && - hlog.NumActiveLockingSessions == 0) + hlogBase.NumActiveLockingSessions == 0) { epoch.ProtectAndDrain(); - Thread.Yield(); + _ = Thread.Yield(); continue; } if (sessionFunctions.Ctx.phase == Phase.PREPARE_GROW && - hlog.NumActiveLockingSessions == 0) + hlogBase.NumActiveLockingSessions == 0) { epoch.ProtectAndDrain(); - Thread.Yield(); + _ = Thread.Yield(); continue; } break; @@ -85,7 +87,7 @@ internal static void CopyContext(TsavoriteExecutionConte internal bool InternalCompletePending(TSessionFunctionsWrapper sessionFunctions, bool wait = false, CompletedOutputIterator completedOutputs = null) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { while (true) { @@ -106,21 +108,19 @@ internal bool InternalCompletePending(TSessionFunctionsWrapper sessionFunctions, CompletedOutputIterator completedOutputs) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { - hlog.TryComplete(); + _ = hlogBase.TryComplete(); if (sessionFunctions.Ctx.readyResponses.Count == 0) return; while (sessionFunctions.Ctx.readyResponses.TryDequeue(out AsyncIOContext request)) - { InternalCompletePendingRequest(sessionFunctions, request, completedOutputs); - } } internal void InternalCompletePendingRequest(TSessionFunctionsWrapper sessionFunctions, AsyncIOContext request, CompletedOutputIterator completedOutputs) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { // Get and Remove this request.id pending dictionary if it is there. if (sessionFunctions.Ctx.ioPendingRequests.Remove(request.id, out var pendingContext)) @@ -141,7 +141,7 @@ internal void InternalCompletePendingRequest internal Status InternalCompletePendingRequestFromContext(TSessionFunctionsWrapper sessionFunctions, AsyncIOContext request, ref PendingContext pendingContext, out AsyncIOContext newRequest) - where TSessionFunctionsWrapper : ISessionFunctionsWrapper + where TSessionFunctionsWrapper : ISessionFunctionsWrapper { Debug.Assert(epoch.ThisInstanceProtected(), "InternalCompletePendingRequestFromContext requires epoch acquision"); newRequest = default; @@ -189,7 +189,7 @@ ref pendingContext.input.Get(), unsafe { ref RecordInfo recordInfo = ref hlog.GetInfoFromBytePointer(request.record.GetValidPointer()); - sessionFunctions.DisposeDeserializedFromDisk(ref hlog.GetContextRecordKey(ref request), ref hlog.GetContextRecordValue(ref request), ref recordInfo); + storeFunctions.DisposeRecord(ref hlog.GetContextRecordKey(ref request), ref hlog.GetContextRecordValue(ref request), DisposeReason.DeserializedFromDisk); } request.Dispose(); return status; diff --git a/libs/storage/Tsavorite/cs/src/core/TsavoriteLog/TsavoriteLog.cs b/libs/storage/Tsavorite/cs/src/core/TsavoriteLog/TsavoriteLog.cs index 23bc433cae..e8feca24f0 100644 --- a/libs/storage/Tsavorite/cs/src/core/TsavoriteLog/TsavoriteLog.cs +++ b/libs/storage/Tsavorite/cs/src/core/TsavoriteLog/TsavoriteLog.cs @@ -15,6 +15,8 @@ namespace Tsavorite.core { + using EmptyStoreFunctions = StoreFunctions>; + /// /// Tsavorite log /// @@ -22,7 +24,7 @@ public sealed class TsavoriteLog : IDisposable { private Exception cannedException = null; - readonly BlittableAllocator allocator; + readonly BlittableAllocatorImpl allocator; readonly LightEpoch epoch; readonly ILogCommitManager logCommitManager; readonly bool disposeLogCommitManager; @@ -189,9 +191,10 @@ private TsavoriteLog(TsavoriteLogSettings logSettings, bool syncRecover, ILogger CommittedBeginAddress = Constants.kFirstValidAddress; SafeTailAddress = Constants.kFirstValidAddress; commitQueue = new WorkQueueLIFO(SerialCommitCallbackWorker); - allocator = new BlittableAllocator( - logSettings.GetLogSettings(), null, - null, epoch, CommitCallback, logger); + allocator = new( + new AllocatorSettings(logSettings.GetLogSettings(), epoch, logger) { flushCallback = CommitCallback }, + StoreFunctions.Create(EmptyKeyComparer.Instance), + @this => new BlittableAllocator(@this)); allocator.Initialize(); beginAddress = allocator.BeginAddress; diff --git a/libs/storage/Tsavorite/cs/src/core/TsavoriteLog/TsavoriteLogIterator.cs b/libs/storage/Tsavorite/cs/src/core/TsavoriteLog/TsavoriteLogIterator.cs index 9238424677..e4610ce365 100644 --- a/libs/storage/Tsavorite/cs/src/core/TsavoriteLog/TsavoriteLogIterator.cs +++ b/libs/storage/Tsavorite/cs/src/core/TsavoriteLog/TsavoriteLogIterator.cs @@ -12,6 +12,8 @@ namespace Tsavorite.core { + using EmptyStoreFunctions = StoreFunctions>; + /// /// Scan iterator for hybrid log /// @@ -19,7 +21,7 @@ public sealed class TsavoriteLogScanIterator : ScanIteratorBase, IDisposable { private readonly string name; private readonly TsavoriteLog tsavoriteLog; - private readonly BlittableAllocator allocator; + private readonly BlittableAllocatorImpl allocator; private readonly BlittableFrame frame; private readonly GetMemory getMemory; private readonly int headerSize; @@ -52,7 +54,8 @@ public sealed class TsavoriteLogScanIterator : ScanIteratorBase, IDisposable /// /// /// - internal unsafe TsavoriteLogScanIterator(TsavoriteLog tsavoriteLog, BlittableAllocator hlog, long beginAddress, long endAddress, GetMemory getMemory, ScanBufferingMode scanBufferingMode, LightEpoch epoch, int headerSize, string name, bool scanUncommitted = false, ILogger logger = null) + internal unsafe TsavoriteLogScanIterator(TsavoriteLog tsavoriteLog, BlittableAllocatorImpl hlog, long beginAddress, long endAddress, + GetMemory getMemory, ScanBufferingMode scanBufferingMode, LightEpoch epoch, int headerSize, string name, bool scanUncommitted = false, ILogger logger = null) : base(beginAddress == 0 ? hlog.GetFirstValidLogicalAddress(0) : beginAddress, endAddress, scanBufferingMode, false, epoch, hlog.LogPageSizeBits, logger: logger) { this.tsavoriteLog = tsavoriteLog; @@ -681,7 +684,7 @@ private unsafe void AsyncReadPagesCallback(uint errorCode, uint numBytes, object if (result.freeBuffer1 != null) { if (errorCode == 0) - allocator.PopulatePage(result.freeBuffer1.GetValidPointer(), result.freeBuffer1.required_bytes, result.page); + allocator._wrapper.PopulatePage(result.freeBuffer1.GetValidPointer(), result.freeBuffer1.required_bytes, result.page); result.freeBuffer1.Return(); result.freeBuffer1 = null; } diff --git a/libs/storage/Tsavorite/cs/src/core/VarLen/IVariableLengthInput.cs b/libs/storage/Tsavorite/cs/src/core/VarLen/IVariableLengthInput.cs index b2eb242606..9eaa027f39 100644 --- a/libs/storage/Tsavorite/cs/src/core/VarLen/IVariableLengthInput.cs +++ b/libs/storage/Tsavorite/cs/src/core/VarLen/IVariableLengthInput.cs @@ -6,7 +6,7 @@ namespace Tsavorite.core /// /// Interface for variable length Inputs to RMW; only implemented for of . /// - internal interface IVariableLengthInput + public interface IVariableLengthInput { /// /// Length of resulting value object when performing RMW modification of value using given input diff --git a/libs/storage/Tsavorite/cs/src/core/VarLen/SpanByte.cs b/libs/storage/Tsavorite/cs/src/core/VarLen/SpanByte.cs index e3bb61cb4d..3788e820b9 100644 --- a/libs/storage/Tsavorite/cs/src/core/VarLen/SpanByte.cs +++ b/libs/storage/Tsavorite/cs/src/core/VarLen/SpanByte.cs @@ -70,7 +70,9 @@ public unsafe struct SpanByte /// public int Length { + [MethodImpl(MethodImplOptions.AggressiveInlining)] readonly get => length & ~HeaderMask; + [MethodImpl(MethodImplOptions.AggressiveInlining)] set { length = (length & HeaderMask) | value; } } @@ -110,6 +112,7 @@ public SpanByte(int length, IntPtr payload) /// public long ExtraMetadata { + [MethodImpl(MethodImplOptions.AggressiveInlining)] get { if (Serialized) @@ -117,6 +120,8 @@ public long ExtraMetadata else return MetadataSize > 0 ? *(long*)payload : 0; } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] set { if (value > 0) @@ -145,17 +150,17 @@ public void MarkExtraMetadata() /// Unmark as having 8-byte metadata in header of payload /// [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void UnmarkExtraMetadata() - { - length &= ~ExtraMetadataBitMask; - } + public void UnmarkExtraMetadata() => length &= ~ExtraMetadataBitMask; /// /// Check or set struct as invalid /// public bool Invalid { + [MethodImpl(MethodImplOptions.AggressiveInlining)] readonly get => ((length & UnserializedBitMask) != 0) && payload == IntPtr.Zero; + + [MethodImpl(MethodImplOptions.AggressiveInlining)] set { Debug.Assert(value, "Cannot restore an Invalid SpanByte to Valid; must reassign the SpanByte as a full value"); @@ -370,7 +375,6 @@ public bool TrySafeCopyTo(ref SpanByte dst, int fullDestSize) { // dst length is equal or longer than src. We can adjust the length header on the serialized log, if we wish (here, we do). // This method will also zero out the extra space to retain log scan correctness. - dst.UnmarkExtraMetadata(); dst.ShrinkSerializedLength(Length); CopyTo(ref dst); dst.Length = Length; @@ -385,17 +389,14 @@ public bool TrySafeCopyTo(ref SpanByte dst, int fullDestSize) /// /// New length of payload (including metadata) [MethodImpl(MethodImplOptions.AggressiveInlining)] - public bool ShrinkSerializedLength(int newLength) + public void ShrinkSerializedLength(int newLength) { - if (newLength > Length) return false; - // Zero-fill extra space - needed so log scan does not see spurious data - *before* setting length to 0. if (newLength < Length) { - AsSpanWithMetadata().Slice(newLength).Clear(); + Unsafe.InitBlockUnaligned(ToPointerWithMetadata() + newLength, 0, (uint)(Length - newLength)); Length = newLength; } - return true; } /// @@ -459,6 +460,7 @@ public void CopyWithHeaderTo(ref SpanByteAndMemory dst, MemoryPool memoryP /// /// Copy serialized version to specified memory location /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] public void CopyTo(byte* destination) { if (Serialized) diff --git a/libs/storage/Tsavorite/cs/src/core/VarLen/SpanByteComparer.cs b/libs/storage/Tsavorite/cs/src/core/VarLen/SpanByteComparer.cs index f15e694bd7..6ab7bbf1d1 100644 --- a/libs/storage/Tsavorite/cs/src/core/VarLen/SpanByteComparer.cs +++ b/libs/storage/Tsavorite/cs/src/core/VarLen/SpanByteComparer.cs @@ -9,7 +9,7 @@ namespace Tsavorite.core /// /// Equality comparer for /// - public struct SpanByteComparer : ITsavoriteEqualityComparer + public struct SpanByteComparer : IKeyComparer { /// /// The default instance. @@ -18,7 +18,7 @@ public struct SpanByteComparer : ITsavoriteEqualityComparer public static readonly SpanByteComparer Instance = new(); /// - public unsafe long GetHashCode64(ref SpanByte spanByte) => StaticGetHashCode64(ref spanByte); + public readonly unsafe long GetHashCode64(ref SpanByte spanByte) => StaticGetHashCode64(ref spanByte); /// /// Get 64-bit hash code @@ -39,7 +39,7 @@ public static unsafe long StaticGetHashCode64(ref SpanByte spanByte) } /// - public unsafe bool Equals(ref SpanByte k1, ref SpanByte k2) => StaticEquals(ref k1, ref k2); + public readonly unsafe bool Equals(ref SpanByte k1, ref SpanByte k2) => StaticEquals(ref k1, ref k2); /// /// Equality comparison diff --git a/libs/storage/Tsavorite/cs/src/core/VarLen/SpanByteFunctions.cs b/libs/storage/Tsavorite/cs/src/core/VarLen/SpanByteFunctions.cs index 7e8e3834ae..ed8ef80385 100644 --- a/libs/storage/Tsavorite/cs/src/core/VarLen/SpanByteFunctions.cs +++ b/libs/storage/Tsavorite/cs/src/core/VarLen/SpanByteFunctions.cs @@ -61,6 +61,7 @@ public override bool ConcurrentWriter(ref SpanByte key, ref Input input, ref Spa /// /// Utility function for copying, Upsert version. /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] public static bool DoSafeCopy(ref SpanByte src, ref SpanByte dst, ref UpsertInfo upsertInfo, ref RecordInfo recordInfo) { // First get the full record length and clear it from the extra value space (if there is any). @@ -82,6 +83,7 @@ public static bool DoSafeCopy(ref SpanByte src, ref SpanByte dst, ref UpsertInfo /// /// Utility function for copying, RMW version. /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] public static bool DoSafeCopy(ref SpanByte src, ref SpanByte dst, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) { // See comments in upsertInfo overload of this function. @@ -94,41 +96,6 @@ public static bool DoSafeCopy(ref SpanByte src, ref SpanByte dst, ref RMWInfo rm /// /// Avoids the "value = default" for added tombstone record, which do not have space for the payload public override bool SingleDeleter(ref SpanByte key, ref SpanByte value, ref DeleteInfo deleteInfo, ref RecordInfo recordInfo) => true; - - /// - public override unsafe void DisposeForRevivification(ref SpanByte key, ref SpanByte value, int newKeySize) - { - var oldKeySize = RoundUp(key.TotalSize, SpanByteAllocator.kRecordAlignment); - - // We don't have to do anything with the Value unless the new key size requires adjusting the key length. - // newKeySize == -1 means we are preserving the existing key (e.g. for in-chain revivification). - if (newKeySize < 0) - return; - - // We are changing the key size (e.g. revivification from the freelist with a new key). - // Our math here uses record alignment of keys as in the allocator, and assumes this will always be at least int alignment. - newKeySize = RoundUp(newKeySize, SpanByteAllocator.kRecordAlignment); - int keySizeChange = newKeySize - oldKeySize; - if (keySizeChange == 0) - return; - - // We are growing or shrinking. We don't care (here or in SingleWriter, InitialUpdater, CopyUpdater) what is inside the Key and Value, - // as long as we don't leave nonzero bytes after the used value space. So we just need to make sure the Value space starts immediately - // after the new key size. SingleWriter et al. will do the ShrinkSerializedLength on Value as needed. - if (keySizeChange < 0) - { - // We are shrinking the key; the Value of the new record will start after key + newKeySize, so set the new value length there. - *(int*)((byte*)Unsafe.AsPointer(ref key) + newKeySize) = value.Length - keySizeChange; // minus negative => plus positive - } - else - { - // We are growing the key; the Value of the new record will start somewhere in the middle of where the old Value was, so set the new value length there. - *(int*)((byte*)Unsafe.AsPointer(ref value) + keySizeChange) = value.Length - keySizeChange; - } - - // NewKeySize is (newKey).TotalSize. - key.Length = newKeySize - sizeof(int); - } } /// diff --git a/libs/storage/Tsavorite/cs/test/BasicLockTests.cs b/libs/storage/Tsavorite/cs/test/BasicLockTests.cs index 1d3610c5a5..be1817ad52 100644 --- a/libs/storage/Tsavorite/cs/test/BasicLockTests.cs +++ b/libs/storage/Tsavorite/cs/test/BasicLockTests.cs @@ -11,6 +11,23 @@ namespace Tsavorite.test.LockTests { + // Must be in a separate block so the "using StructStoreFunctions" is the first line in its namespace declaration. + internal sealed class LocalIntKeyComparer : IKeyComparer + { + internal int mod; + + internal LocalIntKeyComparer(int mod) => this.mod = mod; + + public bool Equals(ref int k1, ref int k2) => k1 == k2; + + public long GetHashCode64(ref int k) => Utility.GetHashCode(k % mod); + } +} + +namespace Tsavorite.test.LockTests +{ + using StructStoreFunctions = StoreFunctions>; + [TestFixture] public class BasicLockTests { @@ -60,29 +77,27 @@ public override bool SingleDeleter(ref int key, ref int value, ref DeleteInfo de } } - internal class LocalComparer : ITsavoriteEqualityComparer - { - internal int mod = numRecords; - - public bool Equals(ref int k1, ref int k2) => k1 == k2; - - public long GetHashCode64(ref int k) => Utility.GetHashCode(k % mod); - } - - private TsavoriteKV store; - private ClientSession session; - private BasicContext bContext; + private TsavoriteKV> store; + private ClientSession> session; + private BasicContext> bContext; private IDevice log; + private LocalIntKeyComparer keyComparer = new(NumRecords); - const int numRecords = 100; - const int valueMult = 1000000; + const int NumRecords = 100; + const int ValueMult = 1000000; [SetUp] public void Setup() { DeleteDirectory(MethodTestDir, wait: true); log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "GenericStringTests.log"), deleteOnClose: true); - store = new TsavoriteKV(1L << 20, new LogSettings { LogDevice = log, ObjectLogDevice = null }, comparer: new LocalComparer()); + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log + }, StoreFunctions.Create(keyComparer) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); session = store.NewSession(new Functions()); bContext = session.BasicContext; } @@ -105,22 +120,22 @@ public void TearDown() public void FunctionsLockTest([Values(1, 20)] int numThreads) { // Populate - for (int key = 0; key < numRecords; key++) + for (var key = 0; key < NumRecords; key++) { // For this test we should be in-memory, so no pending - Assert.IsFalse(bContext.Upsert(key, key * valueMult).IsPending); + Assert.IsFalse(bContext.Upsert(key, key * ValueMult).IsPending); } // Update const int numIters = 500; - var tasks = Enumerable.Range(0, numThreads).Select(ii => Task.Factory.StartNew(() => UpdateFunc((ii & 1) == 0, numRecords, numIters))).ToArray(); + var tasks = Enumerable.Range(0, numThreads).Select(ii => Task.Factory.StartNew(() => UpdateFunc((ii & 1) == 0, NumRecords, numIters))).ToArray(); Task.WaitAll(tasks); // Verify - for (int key = 0; key < numRecords; key++) + for (var key = 0; key < NumRecords; key++) { - var expectedValue = key * valueMult + numThreads * numIters; - Assert.IsFalse(bContext.Read(key, out int value).IsPending); + var expectedValue = key * ValueMult + numThreads * numIters; + Assert.IsFalse(bContext.Read(key, out var value).IsPending); Assert.AreEqual(expectedValue, value); } } @@ -129,16 +144,13 @@ void UpdateFunc(bool useRMW, int numRecords, int numIters) { for (var key = 0; key < numRecords; ++key) { - for (int iter = 0; iter < numIters; iter++) + for (var iter = 0; iter < numIters; iter++) { if ((iter & 7) == 7) Assert.IsFalse(bContext.Read(key).status.IsPending); // These will both just increment the stored value, ignoring the input argument. - if (useRMW) - bContext.RMW(key, default); - else - bContext.Upsert(key, default); + _ = useRMW ? bContext.RMW(key, default) : bContext.Upsert(key, default); } } } @@ -148,22 +160,22 @@ void UpdateFunc(bool useRMW, int numRecords, int numIters) public unsafe void CollidingDeletedRecordTest([Values(UpdateOp.RMW, UpdateOp.Upsert)] UpdateOp updateOp, [Values(FlushMode.NoFlush, FlushMode.OnDisk)] FlushMode flushMode) { // Populate - for (int key = 0; key < numRecords; key++) + for (var key = 0; key < NumRecords; key++) { // For this test we should be in-memory, so no pending - Assert.IsFalse(bContext.Upsert(key, key * valueMult).IsPending); + Assert.IsFalse(bContext.Upsert(key, key * ValueMult).IsPending); } // Insert a colliding key so we don't elide the deleted key from the hash chain. - int deleteKey = numRecords / 2; - int collidingKey = deleteKey + numRecords; - Assert.IsFalse(bContext.Upsert(collidingKey, collidingKey * valueMult).IsPending); + var deleteKey = NumRecords / 2; + var collidingKey = deleteKey + NumRecords; + Assert.IsFalse(bContext.Upsert(collidingKey, collidingKey * ValueMult).IsPending); // Now make sure we did collide - HashEntryInfo hei = new(store.comparer.GetHashCode64(ref deleteKey)); + HashEntryInfo hei = new(store.storeFunctions.GetKeyHashCode64(ref deleteKey)); Assert.IsTrue(store.FindTag(ref hei), "Cannot find deleteKey entry"); Assert.Greater(hei.Address, Constants.kInvalidAddress, "Couldn't find deleteKey Address"); - long physicalAddress = store.hlog.GetPhysicalAddress(hei.Address); + var physicalAddress = store.hlog.GetPhysicalAddress(hei.Address); ref var recordInfo = ref store.hlog.GetInfo(physicalAddress); ref var lookupKey = ref store.hlog.GetKey(physicalAddress); Assert.AreEqual(collidingKey, lookupKey, "Expected collidingKey"); @@ -180,7 +192,7 @@ public unsafe void CollidingDeletedRecordTest([Values(UpdateOp.RMW, UpdateOp.Ups Assert.IsTrue(recordInfo.Tombstone, "Tombstone should be true after Delete"); if (flushMode == FlushMode.ReadOnly) - store.hlog.ShiftReadOnlyAddress(store.Log.TailAddress); + _ = store.hlogBase.ShiftReadOnlyAddress(store.Log.TailAddress); var status = updateOp switch { @@ -199,21 +211,21 @@ public unsafe void CollidingDeletedRecordTest([Values(UpdateOp.RMW, UpdateOp.Ups public unsafe void SetInvalidOnException([Values] UpdateOp updateOp) { // Don't modulo the hash codes. - (store.comparer as LocalComparer).mod = int.MaxValue; + keyComparer.mod = int.MaxValue; // Populate - for (int key = 0; key < numRecords; key++) + for (var key = 0; key < NumRecords; key++) { // For this test we should be in-memory, so no pending - Assert.IsFalse(bContext.Upsert(key, key * valueMult).IsPending); + Assert.IsFalse(bContext.Upsert(key, key * ValueMult).IsPending); } - long expectedThrowAddress = store.Log.TailAddress; + var expectedThrowAddress = store.Log.TailAddress; session.functions.throwOnInitialUpdater = true; // Delete must try with an existing key; Upsert and Delete should insert a new key - int deleteKey = numRecords / 2; - var insertKey = numRecords + 1; + var deleteKey = NumRecords / 2; + var insertKey = NumRecords + 1; // Make sure everything will create a new record. store.Log.FlushAndEvict(wait: true); @@ -239,7 +251,7 @@ public unsafe void SetInvalidOnException([Values] UpdateOp updateOp) Assert.IsTrue(threw, "Test should have thrown"); Assert.AreEqual(expectedThrowAddress, session.functions.initialUpdaterThrowAddress, "Unexpected throw address"); - long physicalAddress = store.hlog.GetPhysicalAddress(expectedThrowAddress); + var physicalAddress = store.hlog.GetPhysicalAddress(expectedThrowAddress); ref var recordInfo = ref store.hlog.GetInfo(physicalAddress); Assert.IsTrue(recordInfo.Invalid, "Expected Invalid record"); } diff --git a/libs/storage/Tsavorite/cs/test/BasicStorageTests.cs b/libs/storage/Tsavorite/cs/test/BasicStorageTests.cs index 92dec1d946..f693eed2c6 100644 --- a/libs/storage/Tsavorite/cs/test/BasicStorageTests.cs +++ b/libs/storage/Tsavorite/cs/test/BasicStorageTests.cs @@ -8,11 +8,12 @@ namespace Tsavorite.test { + using StructAllocator = BlittableAllocator>>; + using StructStoreFunctions = StoreFunctions>; + [TestFixture] internal class BasicStorageTests { - private TsavoriteKV store; - [Test] [Category("TsavoriteKV")] public void LocalStorageWriteRead() @@ -45,7 +46,7 @@ public void TieredWriteRead() { TestUtils.DeleteDirectory(TestUtils.MethodTestDir); IDevice tested; - IDevice localDevice = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "BasicDiskTests.log"), deleteOnClose: true, capacity: 1 << 30); + IDevice localDevice = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "BasicDiskTests.log"), deleteOnClose: true, capacity: 1L << 30); if (TestUtils.IsRunningAzureTests) { IDevice cloudDevice = new AzureStorageDevice(TestUtils.AzureEmulatedStorageString, TestUtils.AzureTestContainer, TestUtils.AzureTestDirectory, "BasicDiskTests", logger: TestUtils.TestLoggerFactory.CreateLogger("asd")); @@ -54,7 +55,7 @@ public void TieredWriteRead() else { // If no Azure is enabled, just use another disk - IDevice localDevice2 = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "BasicDiskTests2.log"), deleteOnClose: true, capacity: 1 << 30); + IDevice localDevice2 = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "BasicDiskTests2.log"), deleteOnClose: true, capacity: 1L << 30); tested = new TieredStorageDevice(1, localDevice, localDevice2); } @@ -66,8 +67,8 @@ public void TieredWriteRead() [Category("Smoke")] public void ShardedWriteRead() { - IDevice localDevice1 = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "BasicDiskTests1.log"), deleteOnClose: true, capacity: 1 << 30); - IDevice localDevice2 = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "BasicDiskTests2.log"), deleteOnClose: true, capacity: 1 << 30); + IDevice localDevice1 = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "BasicDiskTests1.log"), deleteOnClose: true, capacity: 1L << 30); + IDevice localDevice2 = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "BasicDiskTests2.log"), deleteOnClose: true, capacity: 1L << 30); var device = new ShardedStorageDevice(new UniformPartitionScheme(512, localDevice1, localDevice2)); TestDeviceWriteRead(device); } @@ -92,10 +93,18 @@ public void OmitSegmentIdTest([Values] TestUtils.DeviceType deviceType) } } - void TestDeviceWriteRead(IDevice log) + static void TestDeviceWriteRead(IDevice log) { - store = new TsavoriteKV - (1L << 20, new LogSettings { LogDevice = log, MemorySizeBits = 15, PageSizeBits = 10 }); + var store = new TsavoriteKV( + new() + { + IndexSize = 1L << 26, + LogDevice = log, + MemorySize = 1L << 15, + PageSize = 1L << 10, + }, StoreFunctions.Create(KeyStruct.Comparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); var session = store.NewSession(new Functions()); var bContext = session.BasicContext; @@ -106,9 +115,9 @@ void TestDeviceWriteRead(IDevice log) { var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value, Empty.Default); + _ = bContext.Upsert(ref key1, ref value, Empty.Default); } - bContext.CompletePending(true); + _ = bContext.CompletePending(true); // Update first 100 using RMW from storage for (int i = 0; i < 100; i++) @@ -117,7 +126,7 @@ void TestDeviceWriteRead(IDevice log) input = new InputStruct { ifield1 = 1, ifield2 = 1 }; var status = bContext.RMW(ref key1, ref input, Empty.Default); if (status.IsPending) - bContext.CompletePending(true); + _ = bContext.CompletePending(true); } @@ -129,7 +138,7 @@ void TestDeviceWriteRead(IDevice log) if (bContext.Read(ref key1, ref input, ref output, Empty.Default).IsPending) { - bContext.CompletePending(true); + _ = bContext.CompletePending(true); } else { diff --git a/libs/storage/Tsavorite/cs/test/BasicTests.cs b/libs/storage/Tsavorite/cs/test/BasicTests.cs index 7ee5b2034e..86cebb5ab0 100644 --- a/libs/storage/Tsavorite/cs/test/BasicTests.cs +++ b/libs/storage/Tsavorite/cs/test/BasicTests.cs @@ -5,7 +5,6 @@ using System.Diagnostics; using System.IO; using System.Linq; -using System.Threading.Tasks; using NUnit.Framework; using NUnit.Framework.Internal; using Tsavorite.core; @@ -13,15 +12,21 @@ namespace Tsavorite.test { + using LongAllocator = BlittableAllocator>>; + using LongStoreFunctions = StoreFunctions>; + + using StructAllocator = BlittableAllocator>>; + using StructStoreFunctions = StoreFunctions>; + //** NOTE - more detailed / in depth Read tests in ReadAddressTests.cs //** These tests ensure the basics are fully covered [TestFixture] internal class BasicTests { - private TsavoriteKV store; - private ClientSession session; - private BasicContext bContext; + private TsavoriteKV store; + private ClientSession session; + private BasicContext bContext; private IDevice log; DeviceType deviceType; @@ -29,15 +34,22 @@ internal class BasicTests public void Setup() { // Clean up log files from previous test runs in case they weren't cleaned up - DeleteDirectory(TestUtils.MethodTestDir, wait: true); + DeleteDirectory(MethodTestDir, wait: true); } - private void Setup(long size, LogSettings logSettings, DeviceType deviceType, int latencyMs = DefaultLocalMemoryDeviceLatencyMs) + private void Setup(KVSettings kvSettings, DeviceType deviceType, int latencyMs = DefaultLocalMemoryDeviceLatencyMs) { + kvSettings.IndexSize = 1L << 13; + string filename = Path.Join(MethodTestDir, TestContext.CurrentContext.Test.Name + deviceType.ToString() + ".log"); log = CreateTestDevice(deviceType, filename, latencyMs: latencyMs); - logSettings.LogDevice = log; - store = new TsavoriteKV(size, logSettings); + kvSettings.LogDevice = log; + + store = new(kvSettings + , StoreFunctions.Create(KeyStruct.Comparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); + session = store.NewSession(new Functions()); bContext = session.BasicContext; } @@ -63,7 +75,7 @@ private void AssertCompleted(Status expected, Status actual) private (Status status, OutputStruct output) CompletePendingResult() { - bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); return GetSinglePendingResult(completedOutputs); } @@ -72,7 +84,7 @@ private void AssertCompleted(Status expected, Status actual) [Category("Smoke")] public void NativeInMemWriteRead([Values] DeviceType deviceType) { - Setup(128, new LogSettings { PageSizeBits = 10, MemorySizeBits = 12, SegmentSizeBits = 22 }, deviceType); + Setup(new() { PageSize = 1L << 10, MemorySize = 1L << 12, SegmentSize = 1L << 22 }, deviceType); InputStruct input = default; OutputStruct output = default; @@ -80,7 +92,7 @@ public void NativeInMemWriteRead([Values] DeviceType deviceType) var key1 = new KeyStruct { kfield1 = 13, kfield2 = 14 }; var value = new ValueStruct { vfield1 = 23, vfield2 = 24 }; - bContext.Upsert(ref key1, ref value, Empty.Default); + _ = bContext.Upsert(ref key1, ref value, Empty.Default); var status = bContext.Read(ref key1, ref input, ref output, Empty.Default); AssertCompleted(new(StatusCode.Found), status); @@ -93,7 +105,7 @@ public void NativeInMemWriteRead([Values] DeviceType deviceType) [Category("Smoke")] public void NativeInMemWriteReadDelete([Values] DeviceType deviceType) { - Setup(128, new LogSettings { PageSizeBits = 10, MemorySizeBits = 12, SegmentSizeBits = 22 }, deviceType); + Setup(new() { PageSize = 1L << 10, MemorySize = 1L << 12, SegmentSize = 1L << 22 }, deviceType); InputStruct input = default; OutputStruct output = default; @@ -101,11 +113,11 @@ public void NativeInMemWriteReadDelete([Values] DeviceType deviceType) var key1 = new KeyStruct { kfield1 = 13, kfield2 = 14 }; var value = new ValueStruct { vfield1 = 23, vfield2 = 24 }; - bContext.Upsert(ref key1, ref value, Empty.Default); + _ = bContext.Upsert(ref key1, ref value, Empty.Default); var status = bContext.Read(ref key1, ref input, ref output, Empty.Default); AssertCompleted(new(StatusCode.Found), status); - bContext.Delete(ref key1, Empty.Default); + _ = bContext.Delete(ref key1, Empty.Default); status = bContext.Read(ref key1, ref input, ref output, Empty.Default); AssertCompleted(new(StatusCode.NotFound), status); @@ -113,7 +125,7 @@ public void NativeInMemWriteReadDelete([Values] DeviceType deviceType) var key2 = new KeyStruct { kfield1 = 14, kfield2 = 15 }; var value2 = new ValueStruct { vfield1 = 24, vfield2 = 25 }; - bContext.Upsert(ref key2, ref value2, Empty.Default); + _ = bContext.Upsert(ref key2, ref value2, Empty.Default); status = bContext.Read(ref key2, ref input, ref output, Empty.Default); AssertCompleted(new(StatusCode.Found), status); @@ -132,27 +144,26 @@ public void NativeInMemWriteReadDelete2() const int count = 10; - // Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType); - Setup(128, new LogSettings { MemorySizeBits = 29 }, deviceType); + Setup(new() { MemorySize = 1L << 29 }, deviceType); InputStruct input = default; OutputStruct output = default; - for (int i = 0; i < 10 * count; i++) + for (var i = 0; i < 10 * count; i++) { var key1 = new KeyStruct { kfield1 = i, kfield2 = 14 }; var value = new ValueStruct { vfield1 = i, vfield2 = 24 }; - bContext.Upsert(ref key1, ref value, Empty.Default); + _ = bContext.Upsert(ref key1, ref value, Empty.Default); } - for (int i = 0; i < 10 * count; i++) + for (var i = 0; i < 10 * count; i++) { var key1 = new KeyStruct { kfield1 = i, kfield2 = 14 }; - bContext.Delete(ref key1, Empty.Default); + _ = bContext.Delete(ref key1, Empty.Default); } - for (int i = 0; i < 10 * count; i++) + for (var i = 0; i < 10 * count; i++) { var key1 = new KeyStruct { kfield1 = i, kfield2 = 14 }; var value = new ValueStruct { vfield1 = i, vfield2 = 24 }; @@ -160,10 +171,10 @@ public void NativeInMemWriteReadDelete2() var status = bContext.Read(ref key1, ref input, ref output, Empty.Default); AssertCompleted(new(StatusCode.NotFound), status); - bContext.Upsert(ref key1, ref value, Empty.Default); + _ = bContext.Upsert(ref key1, ref value, Empty.Default); } - for (int i = 0; i < 10 * count; i++) + for (var i = 0; i < 10 * count; i++) { var key1 = new KeyStruct { kfield1 = i, kfield2 = 14 }; var status = bContext.Read(ref key1, ref input, ref output, Empty.Default); @@ -179,26 +190,25 @@ public unsafe void NativeInMemWriteRead2() // Just use this one instead of all four devices since InMemWriteRead covers all four devices deviceType = DeviceType.MLSD; - int count = 200; + const int count = 200; - // Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType); - Setup(128, new LogSettings { MemorySizeBits = 29 }, deviceType); + Setup(new() { MemorySize = 1L << 29 }, deviceType); session = store.NewSession(new Functions()); InputStruct input = default; Random r = new(10); - for (int c = 0; c < count; c++) + for (var c = 0; c < count; c++) { var i = r.Next(10000); var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value, Empty.Default); + _ = bContext.Upsert(ref key1, ref value, Empty.Default); } r = new Random(10); - for (int c = 0; c < count; c++) + for (var c = 0; c < count; c++) { var i = r.Next(10000); OutputStruct output = default; @@ -207,7 +217,7 @@ public unsafe void NativeInMemWriteRead2() if (bContext.Read(ref key1, ref input, ref output, Empty.Default).IsPending) { - bContext.CompletePending(true); + _ = bContext.CompletePending(true); } Assert.AreEqual(value.vfield1, output.value.vfield1); @@ -218,7 +228,7 @@ public unsafe void NativeInMemWriteRead2() store.Log.ShiftBeginAddress(store.Log.TailAddress, truncateLog: true); r = new Random(10); - for (int c = 0; c < count; c++) + for (var c = 0; c < count; c++) { var i = r.Next(10000); OutputStruct output = default; @@ -241,20 +251,20 @@ public unsafe void TestShiftHeadAddress([Values] DeviceType deviceType, [Values] var sw = Stopwatch.StartNew(); var latencyMs = batchMode == BatchMode.NoBatch ? 0 : DefaultLocalMemoryDeviceLatencyMs; - Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType, latencyMs: latencyMs); + Setup(new() { MemorySize = 1L << 22, SegmentSize = 1L << 22, PageSize = 1L << 10 }, deviceType, latencyMs: latencyMs); - for (int c = 0; c < NumRecs; c++) + for (var c = 0; c < NumRecs; c++) { var i = r.Next(RandRange); var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value, Empty.Default); + _ = bContext.Upsert(ref key1, ref value, Empty.Default); } r = new Random(RandSeed); sw.Restart(); - for (int c = 0; c < NumRecs; c++) + for (var c = 0; c < NumRecs; c++) { var i = r.Next(RandRange); OutputStruct output = default; @@ -267,7 +277,7 @@ public unsafe void TestShiftHeadAddress([Values] DeviceType deviceType, [Values] Assert.AreEqual(value.vfield2, output.value.vfield2); } } - bContext.CompletePending(true); + _ = bContext.CompletePending(true); // Shift head and retry - should not find in main memory now store.Log.FlushAndEvict(true); @@ -276,17 +286,17 @@ public unsafe void TestShiftHeadAddress([Values] DeviceType deviceType, [Values] sw.Restart(); const int batchSize = 256; - for (int c = 0; c < NumRecs; c++) + for (var c = 0; c < NumRecs; c++) { var i = r.Next(RandRange); OutputStruct output = default; var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; - Status foundStatus = bContext.Read(ref key1, ref input, ref output, Empty.Default); + var foundStatus = bContext.Read(ref key1, ref input, ref output, Empty.Default); Assert.IsTrue(foundStatus.IsPending); if (batchMode == BatchMode.NoBatch) { Status status; - bContext.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var outputs, wait: true); (status, output) = GetSinglePendingResult(outputs); Assert.IsTrue(status.Found, status.ToString()); Assert.AreEqual(key1.kfield1, output.value.vfield1); @@ -295,8 +305,8 @@ public unsafe void TestShiftHeadAddress([Values] DeviceType deviceType, [Values] } else if (c > 0 && (c % batchSize) == 0) { - bContext.CompletePendingWithOutputs(out var outputs, wait: true); - int count = 0; + _ = bContext.CompletePendingWithOutputs(out var outputs, wait: true); + var count = 0; while (outputs.Next()) { count++; @@ -317,33 +327,31 @@ public unsafe void NativeInMemRMWRefKeys([Values] DeviceType deviceType) InputStruct input = default; OutputStruct output = default; - Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType); + Setup(new() { MemorySize = 1L << 22, SegmentSize = 1L << 22, PageSize = 1L << 10 }, deviceType); var nums = Enumerable.Range(0, 1000).ToArray(); var rnd = new Random(11); - for (int i = 0; i < nums.Length; ++i) + for (var i = 0; i < nums.Length; ++i) { - int randomIndex = rnd.Next(nums.Length); - int temp = nums[randomIndex]; - nums[randomIndex] = nums[i]; - nums[i] = temp; + var randomIndex = rnd.Next(nums.Length); + (nums[i], nums[randomIndex]) = (nums[randomIndex], nums[i]); } - for (int j = 0; j < nums.Length; ++j) + for (var j = 0; j < nums.Length; ++j) { var i = nums[j]; var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; input = new InputStruct { ifield1 = i, ifield2 = i + 1 }; - bContext.RMW(ref key1, ref input, Empty.Default); + _ = bContext.RMW(ref key1, ref input, Empty.Default); } - for (int j = 0; j < nums.Length; ++j) + for (var j = 0; j < nums.Length; ++j) { var i = nums[j]; var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; input = new InputStruct { ifield1 = i, ifield2 = i + 1 }; if (bContext.RMW(ref key1, ref input, ref output, Empty.Default).IsPending) { - bContext.CompletePending(true); + _ = bContext.CompletePending(true); } else { @@ -355,7 +363,7 @@ public unsafe void NativeInMemRMWRefKeys([Values] DeviceType deviceType) Status status; KeyStruct key; - for (int j = 0; j < nums.Length; ++j) + for (var j = 0; j < nums.Length; ++j) { var i = nums[j]; @@ -381,41 +389,39 @@ public unsafe void NativeInMemRMWNoRefKeys([Values] DeviceType deviceType) { InputStruct input = default; - Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType); + Setup(new() { MemorySize = 1L << 22, SegmentSize = 1L << 22, PageSize = 1L << 10 }, deviceType); var nums = Enumerable.Range(0, 1000).ToArray(); var rnd = new Random(11); - for (int i = 0; i < nums.Length; ++i) + for (var i = 0; i < nums.Length; ++i) { - int randomIndex = rnd.Next(nums.Length); - int temp = nums[randomIndex]; - nums[randomIndex] = nums[i]; - nums[i] = temp; + var randomIndex = rnd.Next(nums.Length); + (nums[i], nums[randomIndex]) = (nums[randomIndex], nums[i]); } // InitialUpdater - for (int j = 0; j < nums.Length; ++j) + for (var j = 0; j < nums.Length; ++j) { var i = nums[j]; var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; input = new InputStruct { ifield1 = i, ifield2 = i + 1 }; - bContext.RMW(ref key1, ref input, Empty.Default); + _ = bContext.RMW(ref key1, ref input, Empty.Default); } // CopyUpdater - for (int j = 0; j < nums.Length; ++j) + for (var j = 0; j < nums.Length; ++j) { var i = nums[j]; var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; input = new InputStruct { ifield1 = i, ifield2 = i + 1 }; - bContext.RMW(key1, input); // no ref and do not set any other params + _ = bContext.RMW(key1, input); // no ref and do not set any other params } OutputStruct output = default; Status status; KeyStruct key; - for (int j = 0; j < nums.Length; ++j) + for (var j = 0; j < nums.Length; ++j) { var i = nums[j]; @@ -442,13 +448,13 @@ public void ReadNoRefKeyInputOutput([Values] DeviceType deviceType) { InputStruct input = default; - Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType); + Setup(new() { MemorySize = 1L << 22, SegmentSize = 1L << 22, PageSize = 1L << 10 }, deviceType); var key1 = new KeyStruct { kfield1 = 13, kfield2 = 14 }; var value = new ValueStruct { vfield1 = 23, vfield2 = 24 }; - bContext.Upsert(ref key1, ref value, Empty.Default); - var status = bContext.Read(key1, input, out OutputStruct output, Empty.Default); + _ = bContext.Upsert(ref key1, ref value, Empty.Default); + var status = bContext.Read(key1, input, out var output, Empty.Default); AssertCompleted(new(StatusCode.Found), status); // Verify the read data @@ -463,13 +469,13 @@ public void ReadNoRefKeyInputOutput([Values] DeviceType deviceType) [Category("TsavoriteKV")] public void ReadNoRefKey([Values] DeviceType deviceType) { - Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType); + Setup(new() { MemorySize = 1L << 22, SegmentSize = 1L << 22, PageSize = 1L << 10 }, deviceType); var key1 = new KeyStruct { kfield1 = 13, kfield2 = 14 }; var value = new ValueStruct { vfield1 = 23, vfield2 = 24 }; - bContext.Upsert(ref key1, ref value, Empty.Default); - var status = bContext.Read(key1, out OutputStruct output, Empty.Default); + _ = bContext.Upsert(ref key1, ref value, Empty.Default); + var status = bContext.Read(key1, out var output, Empty.Default); AssertCompleted(new(StatusCode.Found), status); // Verify the read data @@ -486,14 +492,14 @@ public void ReadNoRefKey([Values] DeviceType deviceType) [Category("Smoke")] public void ReadWithoutInput([Values] DeviceType deviceType) { - Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType); + Setup(new() { MemorySize = 1L << 22, SegmentSize = 1L << 22, PageSize = 1L << 10 }, deviceType); OutputStruct output = default; var key1 = new KeyStruct { kfield1 = 13, kfield2 = 14 }; var value = new ValueStruct { vfield1 = 23, vfield2 = 24 }; - bContext.Upsert(ref key1, ref value, Empty.Default); + _ = bContext.Upsert(ref key1, ref value, Empty.Default); var status = bContext.Read(ref key1, ref output, Empty.Default); AssertCompleted(new(StatusCode.Found), status); @@ -510,12 +516,12 @@ public void ReadWithoutInput([Values] DeviceType deviceType) [Category("Smoke")] public void ReadBareMinParams([Values] DeviceType deviceType) { - Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType); + Setup(new() { MemorySize = 1L << 22, SegmentSize = 1L << 22, PageSize = 1L << 10 }, deviceType); var key1 = new KeyStruct { kfield1 = 13, kfield2 = 14 }; var value = new ValueStruct { vfield1 = 23, vfield2 = 24 }; - bContext.Upsert(ref key1, ref value, Empty.Default); + _ = bContext.Upsert(ref key1, ref value, Empty.Default); var (status, output) = bContext.Read(key1); AssertCompleted(new(StatusCode.Found), status); @@ -534,7 +540,7 @@ public void ReadAtAddressDefaultOptions() // Just functional test of ReadFlag so one device is enough deviceType = DeviceType.MLSD; - Setup(128, new LogSettings { MemorySizeBits = 29 }, deviceType); + Setup(new() { MemorySize = 1L << 29 }, deviceType); InputStruct input = default; OutputStruct output = default; @@ -543,7 +549,7 @@ public void ReadAtAddressDefaultOptions() var value = new ValueStruct { vfield1 = 23, vfield2 = 24 }; ReadOptions readOptions = default; - bContext.Upsert(ref key1, ref value, Empty.Default); + _ = bContext.Upsert(ref key1, ref value, Empty.Default); var status = bContext.ReadAtAddress(store.Log.BeginAddress, ref input, ref output, ref readOptions, out _, Empty.Default); AssertCompleted(new(StatusCode.Found), status); @@ -589,7 +595,7 @@ public void ReadAtAddressIgnoreReadCache() // Another ReadFlag functional test so one device is enough deviceType = DeviceType.MLSD; - Setup(128, new LogSettings { MemorySizeBits = 29, ReadCacheSettings = new ReadCacheSettings() }, deviceType); + Setup(new() { MemorySize = 1L << 29, ReadCacheEnabled = true }, deviceType); SkipReadCacheFunctions functions = new(); using var skipReadCacheSession = store.NewSession(functions); @@ -602,7 +608,7 @@ public void ReadAtAddressIgnoreReadCache() var readAtAddress = store.Log.BeginAddress; Status status; - skipReadCachebContext.Upsert(ref key1, ref value, Empty.Default); + _ = skipReadCachebContext.Upsert(ref key1, ref value, Empty.Default); void VerifyOutput() { @@ -617,7 +623,7 @@ void VerifyResult() { if (status.IsPending) { - skipReadCachebContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); + _ = skipReadCachebContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); (status, output) = GetSinglePendingResult(completedOutputs); } Assert.IsTrue(status.Found); @@ -664,7 +670,7 @@ void VerifyResult() [Category("Smoke")] public void UpsertDefaultsTest([Values] DeviceType deviceType) { - Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType); + Setup(new() { MemorySize = 1L << 22, SegmentSize = 1L << 22, PageSize = 1L << 10 }, deviceType); InputStruct input = default; OutputStruct output = default; @@ -674,7 +680,7 @@ public void UpsertDefaultsTest([Values] DeviceType deviceType) Assert.AreEqual(0, store.EntryCount); - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); var status = bContext.Read(ref key1, ref input, ref output, Empty.Default); AssertCompleted(new(StatusCode.Found), status); @@ -692,7 +698,7 @@ public void UpsertNoRefNoDefaultsTest() // Just checking more parameter values so one device is enough deviceType = DeviceType.MLSD; - Setup(128, new LogSettings { MemorySizeBits = 29 }, deviceType); + Setup(new() { MemorySize = 1L << 29 }, deviceType); InputStruct input = default; OutputStruct output = default; @@ -700,7 +706,7 @@ public void UpsertNoRefNoDefaultsTest() var key1 = new KeyStruct { kfield1 = 13, kfield2 = 14 }; var value = new ValueStruct { vfield1 = 23, vfield2 = 24 }; - bContext.Upsert(key1, value, Empty.Default); + _ = bContext.Upsert(key1, value, Empty.Default); var status = bContext.Read(ref key1, ref input, ref output, Empty.Default); AssertCompleted(new(StatusCode.Found), status); @@ -716,17 +722,26 @@ public void UpsertNoRefNoDefaultsTest() public static void KVBasicsSampleEndToEndInDocs() { using var log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "hlog.log"), deleteOnClose: false); - using var store = new TsavoriteKV(1L << 20, new LogSettings { LogDevice = log }); + + using var store = new TsavoriteKV( + new() + { + IndexSize = 1L << 26, + LogDevice = log, + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); + using var s = store.NewSession>(new SimpleSimpleFunctions()); var bContext = s.BasicContext; long key = 1, value = 1, input = 10, output = 0; - bContext.Upsert(ref key, ref value); - bContext.Read(ref key, ref output); + _ = bContext.Upsert(ref key, ref value); + _ = bContext.Read(ref key, ref output); Assert.AreEqual(value, output); - bContext.RMW(ref key, ref input); - bContext.RMW(ref key, ref input); - bContext.Read(ref key, ref output); + _ = bContext.RMW(ref key, ref input); + _ = bContext.RMW(ref key, ref input); + _ = bContext.Read(ref key, ref output); Assert.AreEqual(10, output); } @@ -739,51 +754,24 @@ public static void LogPathtooLong() string testDir = new('x', Native32.WIN32_MAX_PATH - 11); // As in LSD, -11 for "." using var log = Devices.CreateLogDevice(testDir, deleteOnClose: true); // Should succeed - Assert.Throws(typeof(TsavoriteException), () => Devices.CreateLogDevice(testDir + "y", deleteOnClose: true)); + _ = Assert.Throws(typeof(TsavoriteException), () => Devices.CreateLogDevice(testDir + "y", deleteOnClose: true)); } [Test] [Category("TsavoriteKV")] - public static void UshortKeyByteValueTest() + public static void BasicSyncOperationsTest() { using var log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "hlog.log"), deleteOnClose: false); - using var store = new TsavoriteKV(1L << 20, new LogSettings { LogDevice = log }); - using var s = store.NewSession>(new SimpleSimpleFunctions()); - var bContext = s.BasicContext; - ushort key = 1024; - byte value = 1, input = 10, output = 0; - - // For blittable types, the records are not 8-byte aligned; RecordSize is sizeof(RecordInfo) + sizeof(ushort) + sizeof(byte) - const int expectedRecordSize = sizeof(long) + sizeof(ushort) + sizeof(byte); - Assert.AreEqual(11, expectedRecordSize); - long prevTailLogicalAddress = store.hlog.GetTailAddress(); - long prevTailPhysicalAddress = store.hlog.GetPhysicalAddress(prevTailLogicalAddress); - for (var ii = 0; ii < 5; ++ii, ++key, ++value, ++input) - { - output = 0; - bContext.Upsert(ref key, ref value); - bContext.Read(ref key, ref output); - Assert.AreEqual(value, output); - bContext.RMW(ref key, ref input); - bContext.Read(ref key, ref output); - Assert.AreEqual(input, output); - - var tailLogicalAddress = store.hlog.GetTailAddress(); - Assert.AreEqual(expectedRecordSize, tailLogicalAddress - prevTailLogicalAddress); - long tailPhysicalAddress = store.hlog.GetPhysicalAddress(tailLogicalAddress); - Assert.AreEqual(expectedRecordSize, tailPhysicalAddress - prevTailPhysicalAddress); - prevTailLogicalAddress = tailLogicalAddress; - prevTailPhysicalAddress = tailPhysicalAddress; - } - } + using var store = new TsavoriteKV( + new() + { + IndexSize = 1L << 26, + LogDevice = log, + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); - [Test] - [Category("TsavoriteKV")] - public static void BasicSyncOperationsTest() - { - using var log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "hlog.log"), deleteOnClose: false); - using var store = new TsavoriteKV(1L << 20, new LogSettings { LogDevice = log }); using var session = store.NewSession>(new SimpleSimpleFunctions()); var bContext = session.BasicContext; @@ -794,10 +782,10 @@ public static void BasicSyncOperationsTest() Status status; long output; - for (long key = 0; key < numRecords; key++) + for (var key = 0L; key < numRecords; key++) { - long value = key + valueMult; - hashes[key] = store.comparer.GetHashCode64(ref key); + var value = key + valueMult; + hashes[key] = store.storeFunctions.GetKeyHashCode64(ref key); status = bContext.Upsert(key, value); Assert.IsTrue(status.Record.Created, status.ToString()); status = bContext.Read(key, out output); @@ -808,9 +796,9 @@ public static void BasicSyncOperationsTest() void doUpdate(bool useRMW) { // Update and Read without keyHash - for (long key = 0; key < numRecords; key++) + for (var key = 0L; key < numRecords; key++) { - long value = key + valueMult * 2; + var value = key + valueMult * 2; if (useRMW) { status = bContext.RMW(key, value); @@ -827,9 +815,9 @@ void doUpdate(bool useRMW) } // Update and Read with keyHash - for (long key = 0; key < numRecords; key++) + for (var key = 0L; key < numRecords; key++) { - long value = key + valueMult * 3; + var value = key + valueMult * 3; if (useRMW) { RMWOptions rmwOptions = new() { KeyHash = hashes[key] }; @@ -853,7 +841,7 @@ void doUpdate(bool useRMW) doUpdate(useRMW: true); // Delete without keyHash - for (long key = 0; key < numRecords; key++) + for (var key = 0L; key < numRecords; key++) { status = bContext.Delete(key); Assert.IsTrue(status.Found, status.ToString()); @@ -862,7 +850,7 @@ void doUpdate(bool useRMW) } // Update and Read without keyHash - for (long key = 0; key < numRecords; key++) + for (var key = 0L; key < numRecords; key++) { DeleteOptions deleteOptions = new() { KeyHash = hashes[key] }; status = bContext.Delete(key, ref deleteOptions); @@ -877,7 +865,16 @@ void doUpdate(bool useRMW) public static void BasicOperationsTest() { using var log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "hlog.log"), deleteOnClose: false); - using var store = new TsavoriteKV(1L << 20, new LogSettings { LogDevice = log }); + + using var store = new TsavoriteKV( + new() + { + IndexSize = 1L << 26, + LogDevice = log, + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); + using var session = store.NewSession>(new SimpleSimpleFunctions()); var bContext = session.BasicContext; @@ -888,10 +885,10 @@ public static void BasicOperationsTest() Status status; long output; - for (long key = 0; key < numRecords; key++) + for (var key = 0L; key < numRecords; key++) { - long value = key + valueMult; - hashes[key] = store.comparer.GetHashCode64(ref key); + var value = key + valueMult; + hashes[key] = store.storeFunctions.GetKeyHashCode64(ref key); status = bContext.Upsert(key, value); Assert.IsTrue(status.Record.Created, status.ToString()); (status, output) = bContext.Read(key); @@ -902,9 +899,9 @@ public static void BasicOperationsTest() void doUpdate(bool useRMW) { // Update and Read without keyHash - for (long key = 0; key < numRecords; key++) + for (var key = 0L; key < numRecords; key++) { - long value = key + valueMult * 2; + var value = key + valueMult * 2; if (useRMW) { status = bContext.RMW(key, value); @@ -921,9 +918,9 @@ void doUpdate(bool useRMW) } // Update and Read with keyHash - for (long key = 0; key < numRecords; key++) + for (var key = 0L; key < numRecords; key++) { - long value = key + valueMult * 3; + var value = key + valueMult * 3; if (useRMW) { RMWOptions rmwOptions = new() { KeyHash = hashes[key] }; @@ -947,7 +944,7 @@ void doUpdate(bool useRMW) doUpdate(useRMW: true); // Delete without keyHash - for (long key = 0; key < numRecords; key++) + for (var key = 0L; key < numRecords; key++) { status = bContext.Delete(key); Assert.IsTrue(status.Found, status.ToString()); @@ -956,7 +953,7 @@ void doUpdate(bool useRMW) } // Update and Read without keyHash - for (long key = 0; key < numRecords; key++) + for (var key = 0L; key < numRecords; key++) { DeleteOptions deleteOptions = new() { KeyHash = hashes[key] }; status = bContext.Delete(key, ref deleteOptions); diff --git a/libs/storage/Tsavorite/cs/test/BlittableIterationTests.cs b/libs/storage/Tsavorite/cs/test/BlittableIterationTests.cs index fe8ba9738c..2d83413908 100644 --- a/libs/storage/Tsavorite/cs/test/BlittableIterationTests.cs +++ b/libs/storage/Tsavorite/cs/test/BlittableIterationTests.cs @@ -11,10 +11,12 @@ namespace Tsavorite.test { + using StructStoreFunctions = StoreFunctions>; + [TestFixture] internal class BlittableIterationTests { - private TsavoriteKV store; + private TsavoriteKV> store; private IDevice log; [SetUp] @@ -50,9 +52,9 @@ public bool SingleReader(ref KeyStruct key, ref ValueStruct value, RecordMetadat public bool ConcurrentReader(ref KeyStruct key, ref ValueStruct value, RecordMetadata recordMetadata, long numberOfRecords, out CursorRecordResult cursorRecordResult) => SingleReader(ref key, ref value, recordMetadata, numberOfRecords, out cursorRecordResult); - public bool OnStart(long beginAddress, long endAddress) => true; - public void OnException(Exception exception, long numberOfRecords) { } - public void OnStop(bool completed, long numberOfRecords) { } + public readonly bool OnStart(long beginAddress, long endAddress) => true; + public readonly void OnException(Exception exception, long numberOfRecords) { } + public readonly void OnStop(bool completed, long numberOfRecords) { } } [Test] @@ -61,8 +63,18 @@ public void OnStop(bool completed, long numberOfRecords) { } public void BlittableIterationBasicTest([Values] DeviceType deviceType, [Values] ScanIteratorType scanIteratorType) { log = CreateTestDevice(deviceType, Path.Join(MethodTestDir, $"{deviceType}.log")); - store = new TsavoriteKV - (1L << 20, new LogSettings { LogDevice = log, MemorySizeBits = 15, PageSizeBits = 9, SegmentSizeBits = 22 }); + + store = new( + new() + { + IndexSize = 1L << 26, + LogDevice = log, + MemorySize = 1L << 15, + PageSize = 1L << 9, + SegmentSize = 1L << 22 + }, StoreFunctions.Create(KeyStruct.Comparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); using var session = store.NewSession(new FunctionsCompaction()); var bContext = session.BasicContext; @@ -80,7 +92,7 @@ void iterateAndVerify(int keyMultToValue, int expectedRecs) { using var iter = session.Iterate(); while (iter.GetNext(out var recordInfo)) - scanIteratorFunctions.SingleReader(ref iter.GetKey(), ref iter.GetValue(), default, default, out _); + _ = scanIteratorFunctions.SingleReader(ref iter.GetKey(), ref iter.GetValue(), default, default, out _); } else Assert.IsTrue(session.Iterate(ref scanIteratorFunctions), $"Failed to complete push iteration; numRecords = {scanIteratorFunctions.numRecords}"); @@ -89,50 +101,50 @@ void iterateAndVerify(int keyMultToValue, int expectedRecs) } // Initial population - for (int i = 0; i < totalRecords; i++) + for (var i = 0; i < totalRecords; i++) { var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } iterateAndVerify(1, totalRecords); - for (int i = 0; i < totalRecords; i++) + for (var i = 0; i < totalRecords; i++) { var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = 2 * i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } iterateAndVerify(2, totalRecords); - for (int i = totalRecords / 2; i < totalRecords; i++) + for (var i = totalRecords / 2; i < totalRecords; i++) { var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } iterateAndVerify(0, totalRecords); - for (int i = 0; i < totalRecords; i += 2) + for (var i = 0; i < totalRecords; i += 2) { var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } iterateAndVerify(0, totalRecords); - for (int i = 0; i < totalRecords; i += 2) + for (var i = 0; i < totalRecords; i += 2) { var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; - bContext.Delete(ref key1); + _ = bContext.Delete(ref key1); } iterateAndVerify(0, totalRecords / 2); - for (int i = 0; i < totalRecords; i++) + for (var i = 0; i < totalRecords; i++) { var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = 3 * i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } iterateAndVerify(3, totalRecords); @@ -146,8 +158,18 @@ void iterateAndVerify(int keyMultToValue, int expectedRecs) public void BlittableIterationPushStopTest() { log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "stop_test.log")); - store = new TsavoriteKV - (1L << 20, new LogSettings { LogDevice = log, MemorySizeBits = 15, PageSizeBits = 9, SegmentSizeBits = 22 }); + + store = new( + new() + { + IndexSize = 1L << 26, + LogDevice = log, + MemorySize = 1L << 15, + PageSize = 1L << 9, + SegmentSize = 1L << 22 + }, StoreFunctions.Create(KeyStruct.Comparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); using var session = store.NewSession(new FunctionsCompaction()); var bContext = session.BasicContext; @@ -168,11 +190,11 @@ void scanAndVerify(int stopAt, bool useScan) } // Initial population - for (int i = 0; i < totalRecords; i++) + for (var i = 0; i < totalRecords; i++) { var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } scanAndVerify(42, useScan: true); @@ -185,9 +207,19 @@ void scanAndVerify(int stopAt, bool useScan) public unsafe void BlittableIterationPushLockTest([Values(1, 4)] int scanThreads, [Values(1, 4)] int updateThreads, [Values] ScanMode scanMode) { log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "lock_test.log")); + // Must be large enough to contain all records in memory to exercise locking - store = new TsavoriteKV(1L << 20, - new LogSettings { LogDevice = log, MemorySizeBits = 25, PageSizeBits = 20, SegmentSizeBits = 22 }); + store = new( + new() + { + IndexSize = 1L << 26, + LogDevice = log, + MemorySize = 1L << 25, + PageSize = 1L << 20, + SegmentSize = 1L << 22 + }, StoreFunctions.Create(KeyStruct.Comparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); const int totalRecords = 2000; var start = store.Log.TailAddress; @@ -209,11 +241,11 @@ void LocalUpdate(int tid) var bContext = session.BasicContext; for (var iteration = 0; iteration < 2; ++iteration) { - for (int i = 0; i < totalRecords; i++) + for (var i = 0; i < totalRecords; i++) { var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = (tid + 1) * i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value, 0); + _ = bContext.Upsert(ref key1, ref value, 0); } } } @@ -221,17 +253,17 @@ void LocalUpdate(int tid) { // Initial population using var session = store.NewSession(new FunctionsCompaction()); var bContext = session.BasicContext; - for (int i = 0; i < totalRecords; i++) + for (var i = 0; i < totalRecords; i++) { var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } } - List tasks = new(); // Task rather than Thread for propagation of exception. + List tasks = []; // Task rather than Thread for propagation of exception. var numThreads = scanThreads + updateThreads; - for (int t = 0; t < numThreads; t++) + for (var t = 0; t < numThreads; t++) { var tid = t; if (t < scanThreads) @@ -239,7 +271,7 @@ void LocalUpdate(int tid) else tasks.Add(Task.Factory.StartNew(() => LocalUpdate(tid))); } - Task.WaitAll(tasks.ToArray()); + Task.WaitAll([.. tasks]); } } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/test/BlittableLogCompactionTests.cs b/libs/storage/Tsavorite/cs/test/BlittableLogCompactionTests.cs index 778859a966..1d9c303f50 100644 --- a/libs/storage/Tsavorite/cs/test/BlittableLogCompactionTests.cs +++ b/libs/storage/Tsavorite/cs/test/BlittableLogCompactionTests.cs @@ -9,30 +9,36 @@ using static Tsavorite.test.TestUtils; #pragma warning disable IDE0060 // Remove unused parameter == Some parameters are just to let [Setup] know what to do - namespace Tsavorite.test { - [TestFixture] - public class BlittableLogCompactionTests + // Must be in a separate block so the "using StructStoreFunctions" is the first line in its namespace declaration. + struct HashModuloKeyStructComparer : IKeyComparer { - private TsavoriteKV store; - private IDevice log; - - struct HashModuloComparer : ITsavoriteEqualityComparer - { - readonly HashModulo modRange; + readonly HashModulo modRange; - internal HashModuloComparer(HashModulo mod) => modRange = mod; + internal HashModuloKeyStructComparer(HashModulo mod) => modRange = mod; - public bool Equals(ref KeyStruct k1, ref KeyStruct k2) => k1.kfield1 == k2.kfield1; + public readonly bool Equals(ref KeyStruct k1, ref KeyStruct k2) => k1.kfield1 == k2.kfield1; - // Force collisions to create a chain - public long GetHashCode64(ref KeyStruct k) - { - var value = Utility.GetHashCode(k.kfield1); - return modRange != HashModulo.NoMod ? value % (long)modRange : value; - } + // Force collisions to create a chain + public readonly long GetHashCode64(ref KeyStruct k) + { + var value = Utility.GetHashCode(k.kfield1); + return modRange != HashModulo.NoMod ? value % (long)modRange : value; } + } +} + +namespace Tsavorite.test +{ + using StructAllocator = BlittableAllocator>>; + using StructStoreFunctions = StoreFunctions>; + + [TestFixture] + public class BlittableLogCompactionTests + { + private TsavoriteKV store; + private IDevice log; [SetUp] public void Setup() @@ -50,8 +56,15 @@ public void Setup() } } - store = new TsavoriteKV - (1L << 20, new LogSettings { LogDevice = log, MemorySizeBits = 15, PageSizeBits = 9 }, comparer: new HashModuloComparer(hashMod)); ; + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + MemorySize = 1L << 15, + PageSize = 1L << 9 + }, StoreFunctions.Create(new HashModuloKeyStructComparer(hashMod)) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -64,7 +77,7 @@ public void TearDown() DeleteDirectory(MethodTestDir); } - void VerifyRead(ClientSession session, int totalRecords, Func isDeleted) + static void VerifyRead(ClientSession session, int totalRecords, Func isDeleted) { InputStruct input = default; int numPending = 0; @@ -137,7 +150,7 @@ public void BlittableLogCompactionTest1([Values] CompactionType compactionType) var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value, 0); + _ = bContext.Upsert(ref key1, ref value, 0); } store.Log.FlushAndEvict(wait: true); @@ -147,7 +160,7 @@ public void BlittableLogCompactionTest1([Values] CompactionType compactionType) Assert.AreEqual(compactUntil, store.Log.BeginAddress); // Read all keys - all should be present - VerifyRead(session, totalRecords, key => false); + BlittableLogCompactionTests.VerifyRead(session, totalRecords, key => false); } [Test] @@ -169,7 +182,7 @@ public void BlittableLogCompactionTest2([Values] CompactionType compactionType, var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value, 0); + _ = bContext.Upsert(ref key1, ref value, 0); } store.Log.FlushAndEvict(true); @@ -187,7 +200,7 @@ public void BlittableLogCompactionTest2([Values] CompactionType compactionType, { var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value, 0); + _ = bContext.Upsert(ref key1, ref value, 0); } compactUntil = session.Compact(compactUntil, compactionType); @@ -195,7 +208,7 @@ public void BlittableLogCompactionTest2([Values] CompactionType compactionType, Assert.AreEqual(compactUntil, store.Log.BeginAddress); // Read all keys - all should be present - VerifyRead(session, totalRecords, key => false); + BlittableLogCompactionTests.VerifyRead(session, totalRecords, key => false); } [Test] @@ -217,13 +230,13 @@ public void BlittableLogCompactionTest3([Values] CompactionType compactionType) var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value, 0); + _ = bContext.Upsert(ref key1, ref value, 0); if (i % 8 == 0) { int j = i / 4; key1 = new KeyStruct { kfield1 = j, kfield2 = j + 1 }; - bContext.Delete(ref key1, 0); + _ = bContext.Delete(ref key1, 0); } } @@ -233,7 +246,7 @@ public void BlittableLogCompactionTest3([Values] CompactionType compactionType) Assert.AreEqual(compactUntil, store.Log.BeginAddress); // Read all keys - all should be present except those we deleted - VerifyRead(session, totalRecords, key => (key < totalRecords / 4) && (key % 2 == 0)); + BlittableLogCompactionTests.VerifyRead(session, totalRecords, key => (key < totalRecords / 4) && (key % 2 == 0)); } [Test] @@ -259,7 +272,7 @@ public void BlittableLogCompactionCustomFunctionsTest1([Values] CompactionType c var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value, 0); + _ = bContext.Upsert(ref key1, ref value, 0); } var tail = store.Log.TailAddress; @@ -314,14 +327,14 @@ public void BlittableLogCompactionCustomFunctionsTest2([Values] CompactionType c var input = default(InputStruct); var output = default(OutputStruct); - bContext.Upsert(ref key, ref value, 0); + _ = bContext.Upsert(ref key, ref value, 0); var status = bContext.Read(ref key, ref input, ref output, 0); Debug.Assert(status.Found); store.Log.Flush(true); value = new ValueStruct { vfield1 = 11, vfield2 = 21 }; - bContext.Upsert(ref key, ref value, 0); + _ = bContext.Upsert(ref key, ref value, 0); status = bContext.Read(ref key, ref input, ref output, 0); Debug.Assert(status.Found); @@ -347,7 +360,7 @@ public void BlittableLogCompactionCustomFunctionsTest2([Values] CompactionType c private struct EvenCompactionFunctions : ICompactionFunctions { - public bool IsDeleted(ref KeyStruct key, ref ValueStruct value) => value.vfield1 % 2 != 0; + public readonly bool IsDeleted(ref KeyStruct key, ref ValueStruct value) => value.vfield1 % 2 != 0; } } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/test/BlittableLogScanTests.cs b/libs/storage/Tsavorite/cs/test/BlittableLogScanTests.cs index b4963d7493..c3b599d33b 100644 --- a/libs/storage/Tsavorite/cs/test/BlittableLogScanTests.cs +++ b/libs/storage/Tsavorite/cs/test/BlittableLogScanTests.cs @@ -9,51 +9,62 @@ namespace Tsavorite.test { - [TestFixture] - internal class BlittableLogScanTests + // Must be in a separate block so the "using StructStoreFunctions" is the first line in its namespace declaration. + struct KeyStructComparerModulo : IKeyComparer { - private TsavoriteKV store; - private IDevice log; - const int totalRecords = 2000; - const int PageSizeBits = 10; - - struct KeyStructComparerModulo : ITsavoriteEqualityComparer - { - readonly long mod; + readonly long mod; - internal KeyStructComparerModulo(long mod) => this.mod = mod; + internal KeyStructComparerModulo(long mod) => this.mod = mod; - public bool Equals(ref KeyStruct k1, ref KeyStruct k2) - { - return k1.kfield1 == k2.kfield1 && k1.kfield2 == k2.kfield2; - } + public readonly bool Equals(ref KeyStruct k1, ref KeyStruct k2) => k1.kfield1 == k2.kfield1 && k1.kfield2 == k2.kfield2; - // Force collisions to create a chain - public long GetHashCode64(ref KeyStruct key) - { - long hash = Utility.GetHashCode(key.kfield1); - return mod > 0 ? hash % mod : hash; - } + // Force collisions to create a chain + public readonly long GetHashCode64(ref KeyStruct key) + { + long hash = Utility.GetHashCode(key.kfield1); + return mod > 0 ? hash % mod : hash; } + } +} + +namespace Tsavorite.test +{ + using StructAllocator = BlittableAllocator>>; + using StructStoreFunctions = StoreFunctions>; + + [TestFixture] + internal class BlittableLogScanTests + { + private TsavoriteKV store; + private IDevice log; + const int TotalRecords = 2000; + const int PageSizeBits = 10; [SetUp] public void Setup() { DeleteDirectory(MethodTestDir, wait: true); - ITsavoriteEqualityComparer comparer = null; + KeyStructComparerModulo comparer = new(0); foreach (var arg in TestContext.CurrentContext.Test.Arguments) { if (arg is HashModulo mod && mod == HashModulo.Hundred) { - comparer = new KeyStructComparerModulo(100); + comparer = new(100); continue; } } log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "test.log"), deleteOnClose: true); - store = new TsavoriteKV(1L << 20, - new LogSettings { LogDevice = log, MemorySizeBits = 24, PageSizeBits = PageSizeBits }, comparer: comparer); + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + MemorySize = 1L << 24, + PageSize = 1L << PageSizeBits + }, StoreFunctions.Create(comparer) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -70,7 +81,7 @@ internal struct BlittablePushScanTestFunctions : IScanIteratorFunctions true; + public readonly bool OnStart(long beginAddress, long endAddress) => true; public bool ConcurrentReader(ref KeyStruct key, ref ValueStruct value, RecordMetadata recordMetadata, long numberOfRecords, out CursorRecordResult cursorRecordResult) => SingleReader(ref key, ref value, recordMetadata, numberOfRecords, out cursorRecordResult); @@ -87,9 +98,9 @@ public bool SingleReader(ref KeyStruct key, ref ValueStruct value, RecordMetadat return true; } - public void OnException(Exception exception, long numberOfRecords) { } + public readonly void OnException(Exception exception, long numberOfRecords) { } - public void OnStop(bool completed, long numberOfRecords) { } + public readonly void OnStop(bool completed, long numberOfRecords) { } } [Test] @@ -104,11 +115,11 @@ public void BlittableDiskWriteScan([Values] ScanIteratorType scanIteratorType) using var s = store.Log.Subscribe(new LogObserver()); var start = store.Log.TailAddress; - for (int i = 0; i < totalRecords; i++) + for (int i = 0; i < TotalRecords; i++) { var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value, Empty.Default); + _ = bContext.Upsert(ref key1, ref value, Empty.Default); } store.Log.FlushAndEvict(true); @@ -121,12 +132,12 @@ void scanAndVerify(ScanBufferingMode sbm) { using var iter = store.Log.Scan(start, store.Log.TailAddress, sbm); while (iter.GetNext(out var recordInfo)) - scanIteratorFunctions.SingleReader(ref iter.GetKey(), ref iter.GetValue(), default, default, out _); + _ = scanIteratorFunctions.SingleReader(ref iter.GetKey(), ref iter.GetValue(), default, default, out _); } else Assert.IsTrue(store.Log.Scan(ref scanIteratorFunctions, start, store.Log.TailAddress, sbm), "Failed to complete push iteration"); - Assert.AreEqual(totalRecords, scanIteratorFunctions.numRecords); + Assert.AreEqual(TotalRecords, scanIteratorFunctions.numRecords); } scanAndVerify(ScanBufferingMode.SinglePageBuffering); @@ -155,7 +166,7 @@ public void BlittableScanJumpToBeginAddressTest() } var key = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - bContext.Upsert(ref key, ref value, Empty.Default); + _ = bContext.Upsert(ref key, ref value, Empty.Default); } using var iter = store.Log.Scan(store.Log.HeadAddress, store.Log.TailAddress); @@ -199,16 +210,16 @@ public override bool SingleWriter(ref KeyStruct key, ref InputStruct input, ref public void BlittableScanCursorTest([Values(HashModulo.NoMod, HashModulo.Hundred)] HashModulo hashMod) { const long PageSize = 1L << PageSizeBits; - var recordSize = BlittableAllocator.RecordSize; + var recordSize = BlittableAllocatorImpl.RecordSize; using var session = store.NewSession(new ScanFunctions()); var bContext = session.BasicContext; - for (int i = 0; i < totalRecords; i++) + for (int i = 0; i < TotalRecords; i++) { var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } var scanCursorFuncs = new ScanCursorFuncs(); @@ -225,7 +236,7 @@ public void BlittableScanCursorTest([Values(HashModulo.NoMod, HashModulo.Hundred scanCursorFuncs.Initialize(verifyKeys: true); while (session.ScanCursor(ref cursor, counts[iCount], scanCursorFuncs, endAddresses[iAddr])) ; - Assert.AreEqual(totalRecords, scanCursorFuncs.numRecords, $"count: {counts[iCount]}, endAddress {endAddresses[iAddr]}"); + Assert.AreEqual(TotalRecords, scanCursorFuncs.numRecords, $"count: {counts[iCount]}, endAddress {endAddresses[iAddr]}"); Assert.AreEqual(0, cursor, "Expected cursor to be 0, pt 1"); } } @@ -238,25 +249,25 @@ public void BlittableScanCursorTest([Values(HashModulo.NoMod, HashModulo.Hundred // Scan and verify we see them all scanCursorFuncs.Initialize(verifyKeys); Assert.IsFalse(session.ScanCursor(ref cursor, long.MaxValue, scanCursorFuncs, long.MaxValue), "Expected scan to finish and return false, pt 1"); - Assert.AreEqual(totalRecords, scanCursorFuncs.numRecords, "Unexpected count for all on-disk"); + Assert.AreEqual(TotalRecords, scanCursorFuncs.numRecords, "Unexpected count for all on-disk"); Assert.AreEqual(0, cursor, "Expected cursor to be 0, pt 2"); // Add another totalRecords, with keys incremented by totalRecords to remain distinct, and verify we see all keys. - for (int i = 0; i < totalRecords; i++) + for (int i = 0; i < TotalRecords; i++) { - var key1 = new KeyStruct { kfield1 = i + totalRecords, kfield2 = i + totalRecords + 1 }; - var value = new ValueStruct { vfield1 = i + totalRecords, vfield2 = i + totalRecords + 1 }; - bContext.Upsert(ref key1, ref value); + var key1 = new KeyStruct { kfield1 = i + TotalRecords, kfield2 = i + TotalRecords + 1 }; + var value = new ValueStruct { vfield1 = i + TotalRecords, vfield2 = i + TotalRecords + 1 }; + _ = bContext.Upsert(ref key1, ref value); } scanCursorFuncs.Initialize(verifyKeys); Assert.IsFalse(session.ScanCursor(ref cursor, long.MaxValue, scanCursorFuncs, long.MaxValue), "Expected scan to finish and return false, pt 1"); - Assert.AreEqual(totalRecords * 2, scanCursorFuncs.numRecords, "Unexpected count for on-disk + in-mem"); + Assert.AreEqual(TotalRecords * 2, scanCursorFuncs.numRecords, "Unexpected count for on-disk + in-mem"); Assert.AreEqual(0, cursor, "Expected cursor to be 0, pt 3"); // Try an invalid cursor (not a multiple of 8) on-disk and verify we get one correct record. Use 3x page size to make sure page boundaries are tested. - Assert.Greater(store.hlog.GetTailAddress(), PageSize * 10, "Need enough space to exercise this"); + Assert.Greater(store.hlogBase.GetTailAddress(), PageSize * 10, "Need enough space to exercise this"); scanCursorFuncs.Initialize(verifyKeys); - cursor = store.hlog.BeginAddress - 1; + cursor = store.hlogBase.BeginAddress - 1; do { Assert.IsTrue(session.ScanCursor(ref cursor, 1, scanCursorFuncs, long.MaxValue, validateCursor: true), "Expected scan to finish and return false, pt 1"); @@ -267,7 +278,7 @@ public void BlittableScanCursorTest([Values(HashModulo.NoMod, HashModulo.Hundred InputStruct input = default; OutputStruct output = default; ReadOptions readOptions = default; - var readStatus = bContext.ReadAtAddress(store.hlog.HeadAddress, ref input, ref output, ref readOptions, out _); + var readStatus = bContext.ReadAtAddress(store.hlogBase.HeadAddress, ref input, ref output, ref readOptions, out _); Assert.IsTrue(readStatus.Found, $"Could not read at HeadAddress; {readStatus}"); scanCursorFuncs.Initialize(verifyKeys); @@ -277,7 +288,7 @@ public void BlittableScanCursorTest([Values(HashModulo.NoMod, HashModulo.Hundred { Assert.IsTrue(session.ScanCursor(ref cursor, 1, scanCursorFuncs, long.MaxValue, validateCursor: true), "Expected scan to finish and return false, pt 1"); cursor = scanCursorFuncs.lastAddress + recordSize + 1; - } while (cursor < store.hlog.HeadAddress + PageSize * 3); + } while (cursor < store.hlogBase.HeadAddress + PageSize * 3); } [Test] @@ -286,16 +297,16 @@ public void BlittableScanCursorTest([Values(HashModulo.NoMod, HashModulo.Hundred public void BlittableScanCursorFilterTest([Values(HashModulo.NoMod, HashModulo.Hundred)] HashModulo hashMod) { - var recordSize = BlittableAllocator.RecordSize; + var recordSize = BlittableAllocatorImpl.RecordSize; using var session = store.NewSession(new ScanFunctions()); var bContext = session.BasicContext; - for (int i = 0; i < totalRecords; i++) + for (int i = 0; i < TotalRecords; i++) { var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } var scanCursorFuncs = new ScanCursorFuncs(); @@ -361,7 +372,7 @@ class LogObserver : IObserver> public void OnCompleted() { - Assert.AreEqual(totalRecords, val); + Assert.AreEqual(TotalRecords, val); } public void OnError(Exception error) diff --git a/libs/storage/Tsavorite/cs/test/CancellationTests.cs b/libs/storage/Tsavorite/cs/test/CancellationTests.cs index 6e6acb84f5..c49623b647 100644 --- a/libs/storage/Tsavorite/cs/test/CancellationTests.cs +++ b/libs/storage/Tsavorite/cs/test/CancellationTests.cs @@ -8,6 +8,9 @@ namespace Tsavorite.test.Cancellation { + using IntAllocator = BlittableAllocator>>; + using IntStoreFunctions = StoreFunctions>; + [TestFixture] class CancellationTests { @@ -118,9 +121,9 @@ public override bool ConcurrentWriter(ref int key, ref int input, ref int src, r IDevice log; CancellationFunctions functions; - TsavoriteKV store; - ClientSession session; - BasicContext bContext; + TsavoriteKV store; + ClientSession session; + BasicContext bContext; const int NumRecs = 100; @@ -130,10 +133,15 @@ public void Setup() DeleteDirectory(MethodTestDir, wait: true); log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "hlog.log"), deleteOnClose: true); - store = new TsavoriteKV - (128, - new LogSettings { LogDevice = log, MemorySizeBits = 17, PageSizeBits = 12 }, - null, null, null); + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MemorySize = 1L << 17, + PageSize = 1L << 12 + }, StoreFunctions.Create(IntKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); functions = new CancellationFunctions(); session = store.NewSession(functions); @@ -156,9 +164,7 @@ private unsafe void Populate() { // Single alloc outside the loop, to the max length we'll need. for (int ii = 0; ii < NumRecs; ii++) - { - bContext.Upsert(ii, ii * NumRecs * 10); - } + _ = bContext.Upsert(ii, ii * NumRecs * 10); } [Test] diff --git a/libs/storage/Tsavorite/cs/test/CheckpointManagerTests.cs b/libs/storage/Tsavorite/cs/test/CheckpointManagerTests.cs index d523ce0fbe..e2816acb85 100644 --- a/libs/storage/Tsavorite/cs/test/CheckpointManagerTests.cs +++ b/libs/storage/Tsavorite/cs/test/CheckpointManagerTests.cs @@ -13,16 +13,19 @@ namespace Tsavorite.test { + using LongAllocator = BlittableAllocator>>; + using LongStoreFunctions = StoreFunctions>; + public class CheckpointManagerTests { - private Random random = new Random(0); + private readonly Random random = new(0); [Test] [Category("CheckpointRestore")] [Category("Smoke")] public async Task CheckpointManagerPurgeCheck([Values] DeviceMode deviceMode) { - ICheckpointManager checkpointManager; + DeviceLogCommitCheckpointManager checkpointManager; if (deviceMode == DeviceMode.Local) { checkpointManager = new DeviceLogCommitCheckpointManager( @@ -41,17 +44,17 @@ public async Task CheckpointManagerPurgeCheck([Values] DeviceMode deviceMode) { TestUtils.RecreateDirectory(TestUtils.MethodTestDir); - using var store = new TsavoriteKV - (1 << 10, - logSettings: new LogSettings + using var store = new TsavoriteKV( + new() { + IndexSize = 1L << 16, LogDevice = log, MutableFraction = 1, - PageSizeBits = 10, - MemorySizeBits = 20, - ReadCacheSettings = null - }, - checkpointSettings: new CheckpointSettings { CheckpointManager = checkpointManager } + PageSize = 1L << 10, + MemorySize = 1L << 20, + CheckpointManager = checkpointManager + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) ); using var s = store.NewSession>(new SimpleSimpleFunctions()); var bContext = s.BasicContext; @@ -63,30 +66,30 @@ public async Task CheckpointManagerPurgeCheck([Values] DeviceMode deviceMode) for (var i = 0; i < 10; i++) { // Do some dummy update - bContext.Upsert(0, random.Next()); + _ = bContext.Upsert(0, random.Next()); var checkpointType = random.Next(5); Guid result = default; switch (checkpointType) { case 0: - store.TryInitiateHybridLogCheckpoint(out result, CheckpointType.FoldOver); + _ = store.TryInitiateHybridLogCheckpoint(out result, CheckpointType.FoldOver); logCheckpoints.Add(result, 0); break; case 1: - store.TryInitiateHybridLogCheckpoint(out result, CheckpointType.Snapshot); + _ = store.TryInitiateHybridLogCheckpoint(out result, CheckpointType.Snapshot); logCheckpoints.Add(result, 0); break; case 2: - store.TryInitiateIndexCheckpoint(out result); + _ = store.TryInitiateIndexCheckpoint(out result); indexCheckpoints.Add(result, 0); break; case 3: - store.TryInitiateFullCheckpoint(out result, CheckpointType.FoldOver); + _ = store.TryInitiateFullCheckpoint(out result, CheckpointType.FoldOver); fullCheckpoints.Add(result, 0); break; case 4: - store.TryInitiateFullCheckpoint(out result, CheckpointType.Snapshot); + _ = store.TryInitiateFullCheckpoint(out result, CheckpointType.Snapshot); fullCheckpoints.Add(result, 0); break; default: @@ -106,7 +109,7 @@ public async Task CheckpointManagerPurgeCheck([Values] DeviceMode deviceMode) { var guid = logCheckpoints.First().Key; checkpointManager.Purge(guid); - logCheckpoints.Remove(guid); + _ = logCheckpoints.Remove(guid); Assert.AreEqual(checkpointManager.GetLogCheckpointTokens().ToDictionary(guid => guid, _ => 0), logCheckpoints.Union(fullCheckpoints).ToDictionary(e => e.Key, e => e.Value)); Assert.AreEqual(checkpointManager.GetIndexCheckpointTokens().ToDictionary(guid => guid, _ => 0), @@ -117,7 +120,7 @@ public async Task CheckpointManagerPurgeCheck([Values] DeviceMode deviceMode) { var guid = indexCheckpoints.First().Key; checkpointManager.Purge(guid); - indexCheckpoints.Remove(guid); + _ = indexCheckpoints.Remove(guid); Assert.AreEqual(checkpointManager.GetLogCheckpointTokens().ToDictionary(guid => guid, _ => 0), logCheckpoints.Union(fullCheckpoints).ToDictionary(e => e.Key, e => e.Value)); Assert.AreEqual(checkpointManager.GetIndexCheckpointTokens().ToDictionary(guid => guid, _ => 0), @@ -129,7 +132,7 @@ public async Task CheckpointManagerPurgeCheck([Values] DeviceMode deviceMode) { var guid = fullCheckpoints.First().Key; checkpointManager.Purge(guid); - fullCheckpoints.Remove(guid); + _ = fullCheckpoints.Remove(guid); Assert.AreEqual(checkpointManager.GetLogCheckpointTokens().ToDictionary(guid => guid, _ => 0), logCheckpoints.Union(fullCheckpoints).ToDictionary(e => e.Key, e => e.Value)); Assert.AreEqual(checkpointManager.GetIndexCheckpointTokens().ToDictionary(guid => guid, _ => 0), diff --git a/libs/storage/Tsavorite/cs/test/CompletePendingTests.cs b/libs/storage/Tsavorite/cs/test/CompletePendingTests.cs index 6ce01a528d..f4ec31e23e 100644 --- a/libs/storage/Tsavorite/cs/test/CompletePendingTests.cs +++ b/libs/storage/Tsavorite/cs/test/CompletePendingTests.cs @@ -10,26 +10,28 @@ namespace Tsavorite.test { - public struct LocalKeyStructComparer : ITsavoriteEqualityComparer + // Must be in a separate block so the "using StructStoreFunctions" is the first line in its namespace declaration. + public class LocalKeyStructComparer : IKeyComparer { internal long? forceCollisionHash; - public long GetHashCode64(ref KeyStruct key) - { - return forceCollisionHash.HasValue ? forceCollisionHash.Value : Utility.GetHashCode(key.kfield1); - } - public bool Equals(ref KeyStruct k1, ref KeyStruct k2) - { - return k1.kfield1 == k2.kfield1 && k1.kfield2 == k2.kfield2; - } + public long GetHashCode64(ref KeyStruct key) => forceCollisionHash.HasValue ? forceCollisionHash.Value : Utility.GetHashCode(key.kfield1); + + public bool Equals(ref KeyStruct k1, ref KeyStruct k2) => k1.kfield1 == k2.kfield1 && k1.kfield2 == k2.kfield2; public override string ToString() => $"forceHashCollision: {forceCollisionHash}"; } +} + +namespace Tsavorite.test +{ + using StructAllocator = BlittableAllocator>>; + using StructStoreFunctions = StoreFunctions>; [TestFixture] class CompletePendingTests { - private TsavoriteKV store; + private TsavoriteKV store; private IDevice log; LocalKeyStructComparer comparer = new(); @@ -40,7 +42,14 @@ public void Setup() DeleteDirectory(MethodTestDir, wait: true); log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "CompletePendingTests.log"), preallocateFile: true, deleteOnClose: true); - store = new TsavoriteKV(128, new LogSettings { LogDevice = log, MemorySizeBits = 29 }, comparer: comparer); + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MemorySize = 1L << 29 + }, StoreFunctions.Create(comparer) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -53,26 +62,26 @@ public void TearDown() DeleteDirectory(MethodTestDir, wait: true); } - const int numRecords = 1000; + const int NumRecords = 1000; - static KeyStruct NewKeyStruct(int key) => new() { kfield1 = key, kfield2 = key + numRecords * 10 }; - static ValueStruct NewValueStruct(int key) => new() { vfield1 = key, vfield2 = key + numRecords * 10 }; + static KeyStruct NewKeyStruct(int key) => new() { kfield1 = key, kfield2 = key + NumRecords * 10 }; + static ValueStruct NewValueStruct(int key) => new() { vfield1 = key, vfield2 = key + NumRecords * 10 }; - static InputStruct NewInputStruct(int key) => new() { ifield1 = key + numRecords * 30, ifield2 = key + numRecords * 40 }; - static ContextStruct NewContextStruct(int key) => new() { cfield1 = key + numRecords * 50, cfield2 = key + numRecords * 60 }; + static InputStruct NewInputStruct(int key) => new() { ifield1 = key + NumRecords * 30, ifield2 = key + NumRecords * 40 }; + static ContextStruct NewContextStruct(int key) => new() { cfield1 = key + NumRecords * 50, cfield2 = key + NumRecords * 60 }; static void VerifyStructs(int key, ref KeyStruct keyStruct, ref InputStruct inputStruct, ref OutputStruct outputStruct, ref ContextStruct contextStruct, bool useRMW) { Assert.AreEqual(key, keyStruct.kfield1); - Assert.AreEqual(key + numRecords * 10, keyStruct.kfield2); - Assert.AreEqual(key + numRecords * 30, inputStruct.ifield1); - Assert.AreEqual(key + numRecords * 40, inputStruct.ifield2); + Assert.AreEqual(key + NumRecords * 10, keyStruct.kfield2); + Assert.AreEqual(key + NumRecords * 30, inputStruct.ifield1); + Assert.AreEqual(key + NumRecords * 40, inputStruct.ifield2); // RMW causes the InPlaceUpdater to be called, which adds input fields to the value. Assert.AreEqual(key + (useRMW ? inputStruct.ifield1 : 0), outputStruct.value.vfield1); - Assert.AreEqual(key + numRecords * 10 + (useRMW ? inputStruct.ifield2 : 0), outputStruct.value.vfield2); - Assert.AreEqual(key + numRecords * 50, contextStruct.cfield1); - Assert.AreEqual(key + numRecords * 60, contextStruct.cfield2); + Assert.AreEqual(key + NumRecords * 10 + (useRMW ? inputStruct.ifield2 : 0), outputStruct.value.vfield2); + Assert.AreEqual(key + NumRecords * 50, contextStruct.cfield1); + Assert.AreEqual(key + NumRecords * 60, contextStruct.cfield2); } class ProcessPending @@ -80,7 +89,7 @@ class ProcessPending // Get the first chunk of outputs as a group, testing realloc. private int deferredPendingMax = CompletedOutputIterator.kInitialAlloc + 1; private int deferredPending = 0; - internal Dictionary keyAddressDict = new(); + internal Dictionary keyAddressDict = []; private bool isFirst = true; internal bool IsFirst() @@ -153,37 +162,37 @@ public async ValueTask ReadAndCompleteWithPendingOutput([Values] bool useRMW) ProcessPending processPending = new(); - for (var key = 0; key < numRecords; ++key) + for (var key = 0; key < NumRecords; ++key) { var keyStruct = NewKeyStruct(key); var valueStruct = NewValueStruct(key); processPending.keyAddressDict[key] = store.Log.TailAddress; - bContext.Upsert(ref keyStruct, ref valueStruct); + _ = bContext.Upsert(ref keyStruct, ref valueStruct); } // Flush to make reads or RMWs go pending. store.Log.FlushAndEvict(wait: true); - List<(KeyStruct key, long address)> rmwCopyUpdatedAddresses = new(); + List<(KeyStruct key, long address)> rmwCopyUpdatedAddresses = []; - for (var key = 0; key < numRecords; ++key) + for (var key = 0; key < NumRecords; ++key) { var keyStruct = NewKeyStruct(key); var inputStruct = NewInputStruct(key); var contextStruct = NewContextStruct(key); OutputStruct outputStruct = default; - if ((key % (numRecords / 10)) == 0) + if ((key % (NumRecords / 10)) == 0) { var ksUnfound = keyStruct; - ksUnfound.kfield1 += numRecords * 10; + ksUnfound.kfield1 += NumRecords * 10; if (bContext.Read(ref ksUnfound, ref inputStruct, ref outputStruct, contextStruct).IsPending) { CompletedOutputIterator completedOutputs; if ((key & 1) == 0) completedOutputs = await bContext.CompletePendingWithOutputsAsync(); else - bContext.CompletePendingWithOutputs(out completedOutputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out completedOutputs, wait: true); ProcessPending.VerifyOneNotFound(completedOutputs, ref ksUnfound); } } @@ -196,7 +205,7 @@ public async ValueTask ReadAndCompleteWithPendingOutput([Values] bool useRMW) { if (processPending.IsFirst()) { - bContext.CompletePending(wait: true); // Test that this does not instantiate CompletedOutputIterator + _ = bContext.CompletePending(wait: true); // Test that this does not instantiate CompletedOutputIterator Assert.IsNull(session.completedOutputs); // Do not instantiate until we need it continue; } @@ -207,7 +216,7 @@ public async ValueTask ReadAndCompleteWithPendingOutput([Values] bool useRMW) if ((key & 1) == 0) completedOutputs = await bContext.CompletePendingWithOutputsAsync(); else - bContext.CompletePendingWithOutputs(out completedOutputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out completedOutputs, wait: true); processPending.Process(completedOutputs, useRMW ? rmwCopyUpdatedAddresses : null); } continue; @@ -218,7 +227,7 @@ public async ValueTask ReadAndCompleteWithPendingOutput([Values] bool useRMW) // If we are using RMW, then all records were pending and updated their addresses, and we skipped the first one in the loop above. if (useRMW) - Assert.AreEqual(numRecords - 1, rmwCopyUpdatedAddresses.Count); + Assert.AreEqual(NumRecords - 1, rmwCopyUpdatedAddresses.Count); foreach (var (key, address) in rmwCopyUpdatedAddresses) { @@ -268,7 +277,7 @@ public void ReadPendingWithNewSameKey([Values(FlushMode.NoFlush, FlushMode.OnDis var firstValue = 0; // same as key var keyStruct = new KeyStruct { kfield1 = firstValue, kfield2 = firstValue * valueMult }; var valueStruct = new ValueStruct { vfield1 = firstValue, vfield2 = firstValue * valueMult }; - bContext.Upsert(ref keyStruct, ref valueStruct); + _ = bContext.Upsert(ref keyStruct, ref valueStruct); // Flush to make the Read() go pending. store.Log.FlushAndEvict(wait: true); @@ -279,7 +288,7 @@ public void ReadPendingWithNewSameKey([Values(FlushMode.NoFlush, FlushMode.OnDis // Insert next record with the same key and flush this too if requested. var secondValue = firstValue + 1; valueStruct.vfield2 = secondValue * valueMult; - bContext.Upsert(ref keyStruct, ref valueStruct); + _ = bContext.Upsert(ref keyStruct, ref valueStruct); if (secondRecordFlushMode == FlushMode.OnDisk) store.Log.FlushAndEvict(wait: true); @@ -300,10 +309,10 @@ public void ReadPendingWithNewDifferentKeyInChain([Values(FlushMode.NoFlush, Flu var firstValue = 0; // same as key var keyStruct = new KeyStruct { kfield1 = firstValue, kfield2 = firstValue * valueMult }; var valueStruct = new ValueStruct { vfield1 = firstValue, vfield2 = firstValue * valueMult }; - bContext.Upsert(ref keyStruct, ref valueStruct); + _ = bContext.Upsert(ref keyStruct, ref valueStruct); // Force collisions to test having another key in the chain - comparer.forceCollisionHash = keyStruct.GetHashCode64(ref keyStruct); + comparer.forceCollisionHash = comparer.GetHashCode64(ref keyStruct); // Flush to make the Read() go pending. store.Log.FlushAndEvict(wait: true); @@ -315,7 +324,7 @@ public void ReadPendingWithNewDifferentKeyInChain([Values(FlushMode.NoFlush, Flu var secondValue = firstValue + 1; keyStruct = new() { kfield1 = secondValue, kfield2 = secondValue * valueMult }; valueStruct = new() { vfield1 = secondValue, vfield2 = secondValue * valueMult }; - bContext.Upsert(ref keyStruct, ref valueStruct); + _ = bContext.Upsert(ref keyStruct, ref valueStruct); if (secondRecordFlushMode == FlushMode.OnDisk) store.Log.FlushAndEvict(wait: true); @@ -337,7 +346,7 @@ public void ReadPendingWithNoNewKey() var firstValue = 0; // same as key var keyStruct = new KeyStruct { kfield1 = firstValue, kfield2 = firstValue * valueMult }; var valueStruct = new ValueStruct { vfield1 = firstValue, vfield2 = firstValue * valueMult }; - bContext.Upsert(ref keyStruct, ref valueStruct); + _ = bContext.Upsert(ref keyStruct, ref valueStruct); // Flush to make the Read() go pending. store.Log.FlushAndEvict(wait: true); diff --git a/libs/storage/Tsavorite/cs/test/ComponentRecoveryTests.cs b/libs/storage/Tsavorite/cs/test/ComponentRecoveryTests.cs index 45782af01c..99f25febb1 100644 --- a/libs/storage/Tsavorite/cs/test/ComponentRecoveryTests.cs +++ b/libs/storage/Tsavorite/cs/test/ComponentRecoveryTests.cs @@ -98,7 +98,7 @@ private static unsafe void Setup_FuzzyIndexRecoveryTest(out int seed, out int si seed = 123; size = 1 << 16; - numAdds = 1 << 18; + numAdds = 1L << 18; ht_device = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "TestFuzzyIndexRecoveryht.dat"), deleteOnClose: true); ofb_device = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "TestFuzzyIndexRecoveryofb.dat"), deleteOnClose: true); hash_table1 = new TsavoriteBase(); diff --git a/libs/storage/Tsavorite/cs/test/ExpirationTests.cs b/libs/storage/Tsavorite/cs/test/ExpirationTests.cs index dcc808a273..1592f66297 100644 --- a/libs/storage/Tsavorite/cs/test/ExpirationTests.cs +++ b/libs/storage/Tsavorite/cs/test/ExpirationTests.cs @@ -9,6 +9,8 @@ namespace Tsavorite.test.Expiration { + using SpanByteStoreFunctions = StoreFunctions; + [TestFixture] internal class ExpirationTests { @@ -513,9 +515,9 @@ public override bool ConcurrentWriter(ref SpanByte key, ref ExpirationInput inpu IDevice log; ExpirationFunctions functions; - TsavoriteKV store; - ClientSession session; - BasicContext bContext; + TsavoriteKV> store; + ClientSession> session; + BasicContext> bContext; [SetUp] public void Setup() @@ -523,10 +525,15 @@ public void Setup() DeleteDirectory(MethodTestDir, wait: true); log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "hlog.log"), deleteOnClose: true); - store = new TsavoriteKV - (128, - new LogSettings { LogDevice = log, MemorySizeBits = 19, PageSizeBits = 14 }, - null, null, null); + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MemorySize = 1L << 19, + PageSize = 1L << 14 + }, StoreFunctions.Create() + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); functions = new ExpirationFunctions(); session = store.NewSession(functions); diff --git a/libs/storage/Tsavorite/cs/test/FunctionPerSessionTests.cs b/libs/storage/Tsavorite/cs/test/FunctionPerSessionTests.cs index 4feeb75ece..a7605471fc 100644 --- a/libs/storage/Tsavorite/cs/test/FunctionPerSessionTests.cs +++ b/libs/storage/Tsavorite/cs/test/FunctionPerSessionTests.cs @@ -3,45 +3,51 @@ using System.IO; using System.Threading; -using System.Threading.Tasks; using NUnit.Framework; using Tsavorite.core; namespace Tsavorite.test { - public struct RefCountedValue + // Must be in a separate block so the "using StructStoreFunctions" is the first line in its namespace declaration. + public struct RefCountedValueStruct { public int ReferenceCount; public long Value; } +} - public class RefCountedAdder : SessionFunctionsBase +namespace Tsavorite.test +{ + using StructAllocator = BlittableAllocator>>; + using StructStoreFunctions = StoreFunctions>; + + public class RefCountedAdder : SessionFunctionsBase { public int InitialCount; public int InPlaceCount; public int CopyCount; - public override bool InitialUpdater(ref int key, ref long input, ref RefCountedValue value, ref Empty output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) + public override bool InitialUpdater(ref int key, ref long input, ref RefCountedValueStruct value, ref Empty output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) { - Interlocked.Increment(ref InitialCount); + _ = Interlocked.Increment(ref InitialCount); value.Value = input; value.ReferenceCount = 1; return true; } - public override bool InPlaceUpdater(ref int key, ref long input, ref RefCountedValue value, ref Empty output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) + public override bool InPlaceUpdater(ref int key, ref long input, ref RefCountedValueStruct value, ref Empty output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) { - Interlocked.Increment(ref InPlaceCount); + _ = Interlocked.Increment(ref InPlaceCount); value.Value = input; value.ReferenceCount++; return true; } - public override bool CopyUpdater(ref int key, ref long input, ref RefCountedValue oldValue, ref RefCountedValue newValue, ref Empty output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) + public override bool CopyUpdater(ref int key, ref long input, ref RefCountedValueStruct oldValue, ref RefCountedValueStruct newValue, ref Empty output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) { - Interlocked.Increment(ref CopyCount); + _ = Interlocked.Increment(ref CopyCount); newValue.Value = input; newValue.ReferenceCount = oldValue.ReferenceCount + 1; @@ -49,24 +55,24 @@ public override bool CopyUpdater(ref int key, ref long input, ref RefCountedValu } } - public class RefCountedRemover : SessionFunctionsBase + public class RefCountedRemover : SessionFunctionsBase { public int InitialCount; public int InPlaceCount; public int CopyCount; - public override bool InitialUpdater(ref int key, ref Empty input, ref RefCountedValue value, ref Empty output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) + public override bool InitialUpdater(ref int key, ref Empty input, ref RefCountedValueStruct value, ref Empty output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) { - Interlocked.Increment(ref InitialCount); + _ = Interlocked.Increment(ref InitialCount); value.Value = 0; value.ReferenceCount = 0; return true; } - public override bool InPlaceUpdater(ref int key, ref Empty input, ref RefCountedValue value, ref Empty output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) + public override bool InPlaceUpdater(ref int key, ref Empty input, ref RefCountedValueStruct value, ref Empty output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) { - Interlocked.Increment(ref InPlaceCount); + _ = Interlocked.Increment(ref InPlaceCount); if (value.ReferenceCount > 0) value.ReferenceCount--; @@ -74,9 +80,9 @@ public override bool InPlaceUpdater(ref int key, ref Empty input, ref RefCounted return true; } - public override bool CopyUpdater(ref int key, ref Empty input, ref RefCountedValue oldValue, ref RefCountedValue newValue, ref Empty output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) + public override bool CopyUpdater(ref int key, ref Empty input, ref RefCountedValueStruct oldValue, ref RefCountedValueStruct newValue, ref Empty output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) { - Interlocked.Increment(ref CopyCount); + _ = Interlocked.Increment(ref CopyCount); newValue.ReferenceCount = oldValue.ReferenceCount; if (newValue.ReferenceCount > 0) @@ -86,15 +92,15 @@ public override bool CopyUpdater(ref int key, ref Empty input, ref RefCountedVal } } - public class RefCountedReader : SessionFunctionsBase + public class RefCountedReader : SessionFunctionsBase { - public override bool SingleReader(ref int key, ref Empty input, ref RefCountedValue value, ref RefCountedValue dst, ref ReadInfo readInfo) + public override bool SingleReader(ref int key, ref Empty input, ref RefCountedValueStruct value, ref RefCountedValueStruct dst, ref ReadInfo readInfo) { dst = value; return true; } - public override bool ConcurrentReader(ref int key, ref Empty input, ref RefCountedValue value, ref RefCountedValue dst, ref ReadInfo readInfo, ref RecordInfo recordInfo) + public override bool ConcurrentReader(ref int key, ref Empty input, ref RefCountedValueStruct value, ref RefCountedValueStruct dst, ref ReadInfo readInfo, ref RecordInfo recordInfo) { dst = value; return true; @@ -105,7 +111,7 @@ public override bool ConcurrentReader(ref int key, ref Empty input, ref RefCount public class FunctionPerSessionTests { private IDevice _log; - private TsavoriteKV _tsavorite; + private TsavoriteKV store; private RefCountedAdder _adder; private RefCountedRemover _remover; private RefCountedReader _reader; @@ -116,10 +122,13 @@ public void Setup() TestUtils.DeleteDirectory(TestUtils.MethodTestDir, wait: true); _log = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "FunctionPerSessionTests1.log"), deleteOnClose: true); - _tsavorite = new TsavoriteKV(128, new LogSettings() + store = new(new() { + IndexSize = 1L << 13, LogDevice = _log, - }); + }, StoreFunctions.Create(IntKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); _adder = new RefCountedAdder(); _remover = new RefCountedRemover(); @@ -129,8 +138,8 @@ public void Setup() [TearDown] public void TearDown() { - _tsavorite?.Dispose(); - _tsavorite = null; + store?.Dispose(); + store = null; _log?.Dispose(); _log = null; TestUtils.DeleteDirectory(TestUtils.MethodTestDir); @@ -140,35 +149,35 @@ public void TearDown() [Category("TsavoriteKV")] public void Should_create_multiple_sessions_with_different_callbacks() { - using var adderSession = _tsavorite.NewSession(_adder); - using var removerSession = _tsavorite.NewSession(_remover); - using var readerSession = _tsavorite.NewSession(_reader); + using var adderSession = store.NewSession(_adder); + using var removerSession = store.NewSession(_remover); + using var readerSession = store.NewSession(_reader); var key = 101; var input = 1000L; - adderSession.BasicContext.RMW(ref key, ref input); - adderSession.BasicContext.RMW(ref key, ref input); - adderSession.BasicContext.RMW(ref key, ref input); + _ = adderSession.BasicContext.RMW(ref key, ref input); + _ = adderSession.BasicContext.RMW(ref key, ref input); + _ = adderSession.BasicContext.RMW(ref key, ref input); Assert.AreEqual(1, _adder.InitialCount); Assert.AreEqual(2, _adder.InPlaceCount); var empty = default(Empty); - removerSession.BasicContext.RMW(ref key, ref empty); + _ = removerSession.BasicContext.RMW(ref key, ref empty); Assert.AreEqual(1, _remover.InPlaceCount); - RefCountedValue output = new(); - readerSession.BasicContext.Read(ref key, ref output); + RefCountedValueStruct output = new(); + _ = readerSession.BasicContext.Read(ref key, ref output); Assert.AreEqual(2, output.ReferenceCount); Assert.AreEqual(1000L, output.Value); - _tsavorite.Log.FlushAndEvict(true); + store.Log.FlushAndEvict(true); - removerSession.BasicContext.RMW(ref key, ref empty); - removerSession.BasicContext.CompletePending(wait: true); - readerSession.BasicContext.Read(ref key, ref empty, ref output); + _ = removerSession.BasicContext.RMW(ref key, ref empty); + _ = removerSession.BasicContext.CompletePending(wait: true); + _ = readerSession.BasicContext.Read(ref key, ref empty, ref output); Assert.AreEqual(1, output.ReferenceCount); Assert.AreEqual(1000L, output.Value); diff --git a/libs/storage/Tsavorite/cs/test/GenericByteArrayTests.cs b/libs/storage/Tsavorite/cs/test/GenericByteArrayTests.cs index 3d4f15c1d4..e94f9b3a92 100644 --- a/libs/storage/Tsavorite/cs/test/GenericByteArrayTests.cs +++ b/libs/storage/Tsavorite/cs/test/GenericByteArrayTests.cs @@ -9,13 +9,15 @@ namespace Tsavorite.test { + using ClassAllocator = GenericAllocator>>; + using ClassStoreFunctions = StoreFunctions>; [TestFixture] internal class GenericByteArrayTests { - private TsavoriteKV store; - private ClientSession session; - private BasicContext bContext; + private TsavoriteKV store; + private ClientSession session; + private BasicContext bContext; private IDevice log, objlog; [SetUp] @@ -25,11 +27,17 @@ public void Setup() log = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "GenericStringTests.log"), deleteOnClose: true); objlog = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "GenericStringTests.obj.log"), deleteOnClose: true); - store = new TsavoriteKV( - 1L << 20, // size of hash table in #cache lines; 64 bytes per cache line - new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MutableFraction = 0.1, MemorySizeBits = 14, PageSizeBits = 9 }, // log device - comparer: new ByteArrayEC() - ); + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + ObjectLogDevice = objlog, + MutableFraction = 0.1, + MemorySize = 1L << 14, + PageSize = 1L << 9 + }, StoreFunctions.Create(new ByteArrayEC(), () => new ByteArrayBinaryObjectSerializer(), () => new ByteArrayBinaryObjectSerializer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); session = store.NewSession(new MyByteArrayFuncs()); bContext = session.BasicContext; @@ -65,9 +73,9 @@ public void ByteArrayBasicTest() { var _key = GetByteArray(i); var _value = GetByteArray(i); - bContext.Upsert(ref _key, ref _value, Empty.Default); + _ = bContext.Upsert(ref _key, ref _value, Empty.Default); } - bContext.CompletePending(true); + _ = bContext.CompletePending(true); for (int i = 0; i < totalRecords; i++) { @@ -77,13 +85,9 @@ public void ByteArrayBasicTest() var value = GetByteArray(i); if (bContext.Read(ref key, ref input, ref output, Empty.Default).IsPending) - { - bContext.CompletePending(true); - } + _ = bContext.CompletePending(true); else - { Assert.IsTrue(output.SequenceEqual(value)); - } } } @@ -96,7 +100,7 @@ public override void ReadCompletionCallback(ref byte[] key, ref byte[] input, re } } - class ByteArrayEC : ITsavoriteEqualityComparer + class ByteArrayEC : IKeyComparer { public bool Equals(ref byte[] k1, ref byte[] k2) { diff --git a/libs/storage/Tsavorite/cs/test/GenericDiskDeleteTests.cs b/libs/storage/Tsavorite/cs/test/GenericDiskDeleteTests.cs index 1f89d60725..f02d0d76d3 100644 --- a/libs/storage/Tsavorite/cs/test/GenericDiskDeleteTests.cs +++ b/libs/storage/Tsavorite/cs/test/GenericDiskDeleteTests.cs @@ -8,12 +8,15 @@ namespace Tsavorite.test { + using ClassAllocator = GenericAllocator>>; + using ClassStoreFunctions = StoreFunctions>; + [TestFixture] internal class GenericDiskDeleteTests { - private TsavoriteKV store; - private ClientSession session; - private BasicContext bContext; + private TsavoriteKV store; + private ClientSession session; + private BasicContext bContext; private IDevice log, objlog; [SetUp] @@ -23,10 +26,17 @@ public void Setup() log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "GenericDiskDeleteTests.log"), deleteOnClose: true); objlog = Devices.CreateLogDevice(Path.Join(MethodTestDir, "GenericDiskDeleteTests.obj.log"), deleteOnClose: true); - store = new TsavoriteKV - (128, - logSettings: new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MutableFraction = 0.1, MemorySizeBits = 14, PageSizeBits = 9 }, - serializerSettings: new SerializerSettings { keySerializer = () => new MyKeySerializer(), valueSerializer = () => new MyValueSerializer() }); + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + ObjectLogDevice = objlog, + MutableFraction = 0.1, + MemorySize = 1L << 14, + PageSize = 1L << 9 + }, StoreFunctions.Create(new MyKey.Comparer(), () => new MyKeySerializer(), () => new MyValueSerializer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); session = store.NewSession(new MyFunctionsDelete()); bContext = session.BasicContext; } @@ -58,7 +68,7 @@ public void DiskDeleteBasicTest1() { var _key = new MyKey { key = i }; var _value = new MyValue { value = i }; - bContext.Upsert(ref _key, ref _value, 0); + _ = bContext.Upsert(ref _key, ref _value, 0); } for (int i = 0; i < totalRecords; i++) @@ -69,19 +79,15 @@ public void DiskDeleteBasicTest1() var value = new MyValue { value = i }; if (bContext.Read(ref key1, ref input, ref output, 0).IsPending) - { - bContext.CompletePending(true); - } + _ = bContext.CompletePending(true); else - { Assert.AreEqual(value.value, output.value.value); - } } for (int i = 0; i < totalRecords; i++) { var key1 = new MyKey { key = i }; - bContext.Delete(ref key1); + _ = bContext.Delete(ref key1); } for (int i = 0; i < totalRecords; i++) @@ -94,7 +100,7 @@ public void DiskDeleteBasicTest1() if (status.IsPending) { - bContext.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var outputs, wait: true); (status, _) = GetSinglePendingResult(outputs); } Assert.IsFalse(status.Found); @@ -121,14 +127,14 @@ public void DiskDeleteBasicTest2() { var _key = new MyKey { key = i }; var _value = new MyValue { value = i }; - bContext.Upsert(ref _key, ref _value, 0); + _ = bContext.Upsert(ref _key, ref _value, 0); } var key100 = new MyKey { key = 100 }; var value100 = new MyValue { value = 100 }; var key200 = new MyKey { key = 200 }; - bContext.Delete(ref key100); + _ = bContext.Delete(ref key100); var input = new MyInput { value = 1000 }; var output = new MyOutput(); @@ -142,8 +148,8 @@ public void DiskDeleteBasicTest2() Assert.IsTrue(status.Found, status.ToString()); Assert.AreEqual(value100.value, output.value.value); - bContext.Delete(ref key100); - bContext.Delete(ref key200); + _ = bContext.Delete(ref key100); + _ = bContext.Delete(ref key200); // This RMW should create new initial value, since item is deleted status = bContext.RMW(ref key200, ref input, 1); @@ -154,23 +160,23 @@ public void DiskDeleteBasicTest2() Assert.AreEqual(input.value, output.value.value); // Delete key 200 again - bContext.Delete(ref key200); + _ = bContext.Delete(ref key200); // Eliminate all records from memory for (int i = 201; i < 2000; i++) { var _key = new MyKey { key = i }; var _value = new MyValue { value = i }; - bContext.Upsert(ref _key, ref _value, 0); + _ = bContext.Upsert(ref _key, ref _value, 0); } status = bContext.Read(ref key100, ref input, ref output, 1); Assert.IsTrue(status.IsPending); - bContext.CompletePending(true); + _ = bContext.CompletePending(true); // This RMW should create new initial value, since item is deleted status = bContext.RMW(ref key200, ref input, 1); Assert.IsTrue(status.IsPending); - bContext.CompletePending(true); + _ = bContext.CompletePending(true); status = bContext.Read(ref key200, ref input, ref output, 0); Assert.IsTrue(status.Found, status.ToString()); diff --git a/libs/storage/Tsavorite/cs/test/GenericIterationTests.cs b/libs/storage/Tsavorite/cs/test/GenericIterationTests.cs index befba6d496..66daae8f4d 100644 --- a/libs/storage/Tsavorite/cs/test/GenericIterationTests.cs +++ b/libs/storage/Tsavorite/cs/test/GenericIterationTests.cs @@ -11,12 +11,15 @@ namespace Tsavorite.test { + using ClassAllocator = GenericAllocator>>; + using ClassStoreFunctions = StoreFunctions>; + [TestFixture] internal class GenericIterationTests { - private TsavoriteKV store; - private ClientSession session; - private BasicContext bContext; + private TsavoriteKV store; + private ClientSession session; + private BasicContext bContext; private IDevice log, objlog; [SetUp] @@ -26,21 +29,23 @@ public void Setup() // Tests call InternalSetup() } - private void InternalSetup(ScanIteratorType scanIteratorType, bool largeMemory) - { - InternalSetup(largeMemory); - } - private void InternalSetup(bool largeMemory) { // Broke this out as we have different requirements by test. log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "GenericIterationTests.log"), deleteOnClose: true); objlog = Devices.CreateLogDevice(Path.Join(MethodTestDir, "GenericIterationTests.obj.log"), deleteOnClose: true); - store = new TsavoriteKV - (128, - logSettings: new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MutableFraction = 0.1, MemorySizeBits = largeMemory ? 25 : 14, PageSizeBits = largeMemory ? 20 : 9 }, - serializerSettings: new SerializerSettings { keySerializer = () => new MyKeySerializer(), valueSerializer = () => new MyValueSerializer() }); + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + ObjectLogDevice = objlog, + MutableFraction = 0.1, + MemorySize = 1L << (largeMemory ? 25 : 14), + PageSize = 1L << (largeMemory ? 20 : 9) + }, StoreFunctions.Create(new MyKey.Comparer(), () => new MyKeySerializer(), () => new MyValueSerializer(), DefaultRecordDisposer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); session = store.NewSession(new MyFunctionsDelete()); bContext = session.BasicContext; } @@ -76,9 +81,9 @@ public bool SingleReader(ref MyKey key, ref MyValue value, RecordMetadata record public bool ConcurrentReader(ref MyKey key, ref MyValue value, RecordMetadata recordMetadata, long numberOfRecords, out CursorRecordResult cursorRecordResult) => SingleReader(ref key, ref value, recordMetadata, numberOfRecords, out cursorRecordResult); - public bool OnStart(long beginAddress, long endAddress) => true; - public void OnException(Exception exception, long numberOfRecords) { } - public void OnStop(bool completed, long numberOfRecords) { } + public readonly bool OnStart(long beginAddress, long endAddress) => true; + public readonly void OnException(Exception exception, long numberOfRecords) { } + public readonly void OnStop(bool completed, long numberOfRecords) { } } [Test] @@ -87,7 +92,7 @@ public void OnStop(bool completed, long numberOfRecords) { } public void GenericIterationBasicTest([Values] ScanIteratorType scanIteratorType) { - InternalSetup(scanIteratorType, largeMemory: false); + InternalSetup(largeMemory: false); GenericPushIterationTestFunctions scanIteratorFunctions = new(); const int totalRecords = 2000; @@ -101,7 +106,7 @@ void iterateAndVerify(int keyMultToValue, int expectedRecs) { using var iter = session.Iterate(); while (iter.GetNext(out var recordInfo)) - scanIteratorFunctions.SingleReader(ref iter.GetKey(), ref iter.GetValue(), default, default, out _); + _ = scanIteratorFunctions.SingleReader(ref iter.GetKey(), ref iter.GetValue(), default, default, out _); } else Assert.IsTrue(session.Iterate(ref scanIteratorFunctions), $"Failed to complete push iteration; numRecords = {scanIteratorFunctions.numRecords}"); @@ -114,7 +119,7 @@ void iterateAndVerify(int keyMultToValue, int expectedRecs) { var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } iterateAndVerify(1, totalRecords); @@ -122,7 +127,7 @@ void iterateAndVerify(int keyMultToValue, int expectedRecs) { var key1 = new MyKey { key = i }; var value = new MyValue { value = 2 * i }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } iterateAndVerify(2, totalRecords); @@ -130,7 +135,7 @@ void iterateAndVerify(int keyMultToValue, int expectedRecs) { var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } iterateAndVerify(0, totalRecords); @@ -138,14 +143,14 @@ void iterateAndVerify(int keyMultToValue, int expectedRecs) { var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } iterateAndVerify(0, totalRecords); for (int i = 0; i < totalRecords; i += 2) { var key1 = new MyKey { key = i }; - bContext.Delete(ref key1); + _ = bContext.Delete(ref key1); } iterateAndVerify(0, totalRecords / 2); @@ -153,7 +158,7 @@ void iterateAndVerify(int keyMultToValue, int expectedRecs) { var key1 = new MyKey { key = i }; var value = new MyValue { value = 3 * i }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } iterateAndVerify(3, totalRecords); @@ -167,7 +172,7 @@ void iterateAndVerify(int keyMultToValue, int expectedRecs) public void GenericIterationPushStopTest() { - InternalSetup(ScanIteratorType.Push, largeMemory: false); + InternalSetup(largeMemory: false); GenericPushIterationTestFunctions scanIteratorFunctions = new(); const int totalRecords = 2000; @@ -189,7 +194,7 @@ void scanAndVerify(int stopAt, bool useScan) { var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } scanAndVerify(42, useScan: true); @@ -225,7 +230,7 @@ void LocalUpdate(int tid) { var key1 = new MyKey { key = i }; var value = new MyValue { value = (tid + 1) * i }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } } @@ -234,11 +239,11 @@ void LocalUpdate(int tid) { var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } } - List tasks = new(); // Task rather than Thread for propagation of exception. + List tasks = []; // Task rather than Thread for propagation of exception. var numThreads = scanThreads + updateThreads; for (int t = 0; t < numThreads; t++) { @@ -248,7 +253,7 @@ void LocalUpdate(int tid) else tasks.Add(Task.Factory.StartNew(() => LocalUpdate(tid))); } - Task.WaitAll(tasks.ToArray()); + Task.WaitAll([.. tasks]); } } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/test/GenericLogCompactionTests.cs b/libs/storage/Tsavorite/cs/test/GenericLogCompactionTests.cs index bde16a559f..e0a9252a03 100644 --- a/libs/storage/Tsavorite/cs/test/GenericLogCompactionTests.cs +++ b/libs/storage/Tsavorite/cs/test/GenericLogCompactionTests.cs @@ -8,12 +8,15 @@ namespace Tsavorite.test { + using ClassAllocator = GenericAllocator>>; + using ClassStoreFunctions = StoreFunctions>; + [TestFixture] internal class GenericLogCompactionTests { - private TsavoriteKV store; - private ClientSession session; - private BasicContext bContext; + private TsavoriteKV store; + private ClientSession session; + private BasicContext bContext; private IDevice log, objlog; [SetUp] @@ -22,17 +25,19 @@ public void Setup() // Clean up log files from previous test runs in case they weren't cleaned up DeleteDirectory(MethodTestDir, wait: true); + var kvSettings = new KVSettings() + { + IndexSize = 1L << 13, + MutableFraction = 0.1, + MemorySize = 1L << 14, + PageSize = 1L << 9 + }; + if (TestContext.CurrentContext.Test.Arguments.Length == 0) { // Default log creation log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "GenericLogCompactionTests.log"), deleteOnClose: true); objlog = Devices.CreateLogDevice(Path.Join(MethodTestDir, "GenericLogCompactionTests.obj.log"), deleteOnClose: true); - - store = new TsavoriteKV - (128, - logSettings: new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MutableFraction = 0.1, MemorySizeBits = 14, PageSizeBits = 9 }, - serializerSettings: new SerializerSettings { keySerializer = () => new MyKeySerializer(), valueSerializer = () => new MyValueSerializer() } - ); } else { @@ -43,12 +48,17 @@ public void Setup() log = CreateTestDevice(deviceType, Path.Join(MethodTestDir, $"LogCompactBasicTest_{deviceType}.log")); objlog = CreateTestDevice(deviceType, Path.Join(MethodTestDir, $"LogCompactBasicTest_{deviceType}.obj.log")); - store = new TsavoriteKV - (128, - logSettings: new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MutableFraction = 0.1, MemorySizeBits = 14, PageSizeBits = 9, SegmentSizeBits = 22 }, - serializerSettings: new SerializerSettings { keySerializer = () => new MyKeySerializer(), valueSerializer = () => new MyValueSerializer() } - ); + kvSettings.SegmentSize = 1L << 22; } + + kvSettings.LogDevice = log; + kvSettings.ObjectLogDevice = objlog; + + store = new(kvSettings + , StoreFunctions.Create(new MyKey.Comparer(), () => new MyKeySerializer(), () => new MyValueSerializer(), DefaultRecordDisposer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); + session = store.NewSession(new MyFunctionsDelete()); bContext = session.BasicContext; } @@ -86,7 +96,7 @@ public void LogCompactBasicTest([Values] CompactionType compactionType) var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; - bContext.Upsert(ref key1, ref value, 0); + _ = bContext.Upsert(ref key1, ref value, 0); } compactUntil = session.Compact(compactUntil, compactionType); @@ -104,7 +114,7 @@ public void LogCompactBasicTest([Values] CompactionType compactionType) var status = bContext.Read(ref key1, ref input, ref output, 0); if (status.IsPending) { - bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); Assert.IsTrue(completedOutputs.Next()); Assert.IsTrue(completedOutputs.Current.Status.Found); output = completedOutputs.Current.Output; @@ -133,7 +143,7 @@ public void LogCompactTestNewEntries([Values] CompactionType compactionType) var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; - bContext.Upsert(ref key1, ref value, 0); + _ = bContext.Upsert(ref key1, ref value, 0); } // Put fresh entries for 1000 records @@ -141,7 +151,7 @@ public void LogCompactTestNewEntries([Values] CompactionType compactionType) { var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; - bContext.Upsert(ref key1, ref value, 0); + _ = bContext.Upsert(ref key1, ref value, 0); } store.Log.Flush(true); @@ -161,7 +171,7 @@ public void LogCompactTestNewEntries([Values] CompactionType compactionType) var status = bContext.Read(ref key1, ref input, ref output, 0); if (status.IsPending) - bContext.CompletePending(true); + _ = bContext.CompletePending(true); else { Assert.IsTrue(status.Found); @@ -188,13 +198,13 @@ public void LogCompactAfterDeleteTest([Values] CompactionType compactionType) var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; - bContext.Upsert(ref key1, ref value, 0); + _ = bContext.Upsert(ref key1, ref value, 0); if (i % 8 == 0) { int j = i / 4; key1 = new MyKey { key = j }; - bContext.Delete(ref key1); + _ = bContext.Delete(ref key1); } } @@ -213,7 +223,7 @@ public void LogCompactAfterDeleteTest([Values] CompactionType compactionType) var status = bContext.Read(ref key1, ref input, ref output, ctx); if (status.IsPending) - bContext.CompletePending(true); + _ = bContext.CompletePending(true); else { if (ctx == 0) @@ -247,7 +257,7 @@ public void LogCompactBasicCustomFctnTest([Values] CompactionType compactionType var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; - bContext.Upsert(ref key1, ref value, 0); + _ = bContext.Upsert(ref key1, ref value, 0); } compactUntil = session.Compact(compactUntil, compactionType, default(EvenCompactionFunctions)); @@ -266,7 +276,7 @@ public void LogCompactBasicCustomFctnTest([Values] CompactionType compactionType var status = bContext.Read(ref key1, ref input, ref output, ctx); if (status.IsPending) { - bContext.CompletePending(true); + _ = bContext.CompletePending(true); } else { @@ -297,12 +307,12 @@ public void LogCompactCopyInPlaceCustomFctnTest([Values] CompactionType compacti var key = new MyKey { key = 100 }; var value = new MyValue { value = 20 }; - bContext.Upsert(ref key, ref value, 0); + _ = bContext.Upsert(ref key, ref value, 0); store.Log.Flush(true); value = new MyValue { value = 21 }; - bContext.Upsert(ref key, ref value, 0); + _ = bContext.Upsert(ref key, ref value, 0); store.Log.Flush(true); @@ -315,7 +325,7 @@ public void LogCompactCopyInPlaceCustomFctnTest([Values] CompactionType compacti var status = bContext.Read(ref key, ref input, ref output); if (status.IsPending) { - bContext.CompletePendingWithOutputs(out var outputs, wait: true); + Assert.IsTrue(bContext.CompletePendingWithOutputs(out var outputs, wait: true)); (status, output) = GetSinglePendingResult(outputs); } Assert.IsTrue(status.Found); @@ -329,8 +339,7 @@ private class Test2CompactionFunctions : ICompactionFunctions private struct EvenCompactionFunctions : ICompactionFunctions { - public bool IsDeleted(ref MyKey key, ref MyValue value) => value.value % 2 != 0; + public readonly bool IsDeleted(ref MyKey key, ref MyValue value) => value.value % 2 != 0; } - } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/test/GenericLogScanTests.cs b/libs/storage/Tsavorite/cs/test/GenericLogScanTests.cs index 25d3287eb5..28b1bd5178 100644 --- a/libs/storage/Tsavorite/cs/test/GenericLogScanTests.cs +++ b/libs/storage/Tsavorite/cs/test/GenericLogScanTests.cs @@ -9,36 +9,43 @@ namespace Tsavorite.test { - [TestFixture] - internal class GenericLogScanTests + // Must be in a separate block so the "using ClassStoreFunctions" is the first line in its namespace declaration. + public class MyObjectComparerModulo : IKeyComparer { - private TsavoriteKV store; - private IDevice log, objlog; - const int totalRecords = 250; + readonly long mod; - ITsavoriteEqualityComparer comparer = null; + internal MyObjectComparerModulo(long mod) => this.mod = mod; - public class MyObjectComparerModulo : ITsavoriteEqualityComparer + public bool Equals(ref MyKey k1, ref MyKey k2) => k1.key == k2.key; + + // Force collisions to create a chain + public long GetHashCode64(ref MyKey key) { - readonly long mod; + long hash = Utility.GetHashCode(key.key); + return mod > 0 ? hash % mod : hash; + } + } +} - internal MyObjectComparerModulo(long mod) => this.mod = mod; +namespace Tsavorite.test +{ + using ClassAllocator = GenericAllocator>>; + using ClassStoreFunctions = StoreFunctions>; - public bool Equals(ref MyKey k1, ref MyKey k2) => k1.key == k2.key; + [TestFixture] + internal class GenericLogScanTests + { + private TsavoriteKV store; + private IDevice log, objlog; + const int TotalRecords = 250; - // Force collisions to create a chain - public long GetHashCode64(ref MyKey key) - { - long hash = Utility.GetHashCode(key.key); - return mod > 0 ? hash % mod : hash; - } - } + MyObjectComparerModulo comparer; [SetUp] public void Setup() { // Clean up log files from previous test runs in case they weren't cleaned up - DeleteDirectory(TestUtils.MethodTestDir, wait: true); + DeleteDirectory(MethodTestDir, wait: true); comparer = null; foreach (var arg in TestContext.CurrentContext.Test.Arguments) @@ -49,6 +56,7 @@ public void Setup() continue; } } + comparer ??= new(0); } [TearDown] @@ -68,7 +76,7 @@ internal struct GenericPushScanTestFunctions : IScanIteratorFunctions true; + public readonly bool OnStart(long beginAddress, long endAddress) => true; public bool ConcurrentReader(ref MyKey key, ref MyValue value, RecordMetadata recordMetadata, long numberOfRecords, out CursorRecordResult cursorRecordResult) => SingleReader(ref key, ref value, recordMetadata, numberOfRecords, out cursorRecordResult); @@ -83,9 +91,9 @@ public bool SingleReader(ref MyKey key, ref MyValue value, RecordMetadata record return true; } - public void OnException(Exception exception, long numberOfRecords) { } + public readonly void OnException(Exception exception, long numberOfRecords) { } - public void OnStop(bool completed, long numberOfRecords) { } + public readonly void OnStop(bool completed, long numberOfRecords) { } } [Test] @@ -95,9 +103,18 @@ public void DiskWriteScanBasicTest([Values] DeviceType deviceType, [Values] Scan { log = CreateTestDevice(deviceType, Path.Join(MethodTestDir, $"DiskWriteScanBasicTest_{deviceType}.log")); objlog = CreateTestDevice(deviceType, Path.Join(MethodTestDir, $"DiskWriteScanBasicTest_{deviceType}.obj.log")); - store = new(128, - logSettings: new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MutableFraction = 0.1, MemorySizeBits = 15, PageSizeBits = 9, SegmentSizeBits = 22 }, - serializerSettings: new SerializerSettings { keySerializer = () => new MyKeySerializer(), valueSerializer = () => new MyValueSerializer() }); + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + ObjectLogDevice = objlog, + MutableFraction = 0.1, + MemorySize = 1L << 15, + PageSize = 1L << 9, + SegmentSize = 1L << 22 + }, StoreFunctions.Create(comparer, () => new MyKeySerializer(), () => new MyValueSerializer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); using var session = store.NewSession(new MyFunctions()); var bContext = session.BasicContext; @@ -105,11 +122,11 @@ public void DiskWriteScanBasicTest([Values] DeviceType deviceType, [Values] Scan using var s = store.Log.Subscribe(new LogObserver()); var start = store.Log.TailAddress; - for (int i = 0; i < totalRecords; i++) + for (int i = 0; i < TotalRecords; i++) { var _key = new MyKey { key = i }; var _value = new MyValue { value = i }; - bContext.Upsert(ref _key, ref _value, Empty.Default); + _ = bContext.Upsert(ref _key, ref _value, Empty.Default); if (i % 100 == 0) store.Log.FlushAndEvict(true); } @@ -125,12 +142,12 @@ void scanAndVerify(ScanBufferingMode sbm) { using var iter = store.Log.Scan(start, store.Log.TailAddress, sbm); while (iter.GetNext(out var recordInfo)) - scanIteratorFunctions.SingleReader(ref iter.GetKey(), ref iter.GetValue(), default, default, out _); + _ = scanIteratorFunctions.SingleReader(ref iter.GetKey(), ref iter.GetValue(), default, default, out _); } else Assert.IsTrue(store.Log.Scan(ref scanIteratorFunctions, start, store.Log.TailAddress, sbm), "Failed to complete push iteration"); - Assert.AreEqual(totalRecords, scanIteratorFunctions.numRecords); + Assert.AreEqual(TotalRecords, scanIteratorFunctions.numRecords); } scanAndVerify(ScanBufferingMode.SinglePageBuffering); @@ -143,7 +160,7 @@ class LogObserver : IObserver> public void OnCompleted() { - Assert.AreEqual(val == totalRecords, $"LogObserver.OnCompleted: totalRecords"); + Assert.AreEqual(val == TotalRecords, $"LogObserver.OnCompleted: totalRecords"); } public void OnError(Exception error) @@ -169,9 +186,18 @@ public void BlittableScanJumpToBeginAddressTest() { log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "test.log")); objlog = Devices.CreateLogDevice(Path.Join(MethodTestDir, "test.obj.log")); - store = new(128, - logSettings: new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MutableFraction = 0.1, MemorySizeBits = 20, PageSizeBits = 15, SegmentSizeBits = 18 }, - serializerSettings: new SerializerSettings { keySerializer = () => new MyKeySerializer(), valueSerializer = () => new MyValueSerializer() }); + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + ObjectLogDevice = objlog, + MutableFraction = 0.1, + MemorySize = 1L << 20, + PageSize = 1L << 15, + SegmentSize = 1L << 18 + }, StoreFunctions.Create(comparer, () => new MyKeySerializer(), () => new MyValueSerializer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); using var session = store.NewSession(new MyFunctions()); var bContext = session.BasicContext; @@ -189,7 +215,7 @@ public void BlittableScanJumpToBeginAddressTest() } var key = new MyKey { key = i }; var value = new MyValue { value = i }; - bContext.Upsert(ref key, ref value, Empty.Default); + _ = bContext.Upsert(ref key, ref value, Empty.Default); } using var iter = store.Log.Scan(store.Log.HeadAddress, store.Log.TailAddress); @@ -234,23 +260,33 @@ public void GenericScanCursorTest([Values(HashModulo.NoMod, HashModulo.Hundred)] { const int PageSizeBits = 9; const long PageSize = 1L << PageSizeBits; - var recordSize = GenericAllocator.RecordSize; + var recordSize = GenericAllocatorImpl.RecordSize; log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "test.log")); objlog = Devices.CreateLogDevice(Path.Join(MethodTestDir, "test.obj.log")); - store = new(128, - logSettings: new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MutableFraction = 0.1, MemorySizeBits = 20, PageSizeBits = 15, SegmentSizeBits = 18 }, - serializerSettings: new SerializerSettings { keySerializer = () => new MyKeySerializer(), valueSerializer = () => new MyValueSerializer() }, - comparer: comparer); + + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + ObjectLogDevice = objlog, + MutableFraction = 0.1, + MemorySize = 1L << 20, + PageSize = 1L << 15, + SegmentSize = 1L << 18 + }, StoreFunctions.Create(comparer, () => new MyKeySerializer(), () => new MyValueSerializer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); + using var session = store.NewSession(new ScanFunctions()); var bContext = session.BasicContext; - for (int i = 0; i < totalRecords; i++) + for (int i = 0; i < TotalRecords; i++) { var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } var scanCursorFuncs = new ScanCursorFuncs(); @@ -267,7 +303,7 @@ public void GenericScanCursorTest([Values(HashModulo.NoMod, HashModulo.Hundred)] scanCursorFuncs.Initialize(verifyKeys: true); while (session.ScanCursor(ref cursor, counts[iCount], scanCursorFuncs, endAddresses[iAddr])) ; - Assert.AreEqual(totalRecords, scanCursorFuncs.numRecords, $"count: {counts[iCount]}, endAddress {endAddresses[iAddr]}"); + Assert.AreEqual(TotalRecords, scanCursorFuncs.numRecords, $"count: {counts[iCount]}, endAddress {endAddresses[iAddr]}"); Assert.AreEqual(0, cursor, "Expected cursor to be 0, pt 1"); } } @@ -280,25 +316,25 @@ public void GenericScanCursorTest([Values(HashModulo.NoMod, HashModulo.Hundred)] // Scan and verify we see them all scanCursorFuncs.Initialize(verifyKeys); Assert.IsFalse(session.ScanCursor(ref cursor, long.MaxValue, scanCursorFuncs, long.MaxValue), "Expected scan to finish and return false, pt 1"); - Assert.AreEqual(totalRecords, scanCursorFuncs.numRecords, "Unexpected count for all on-disk"); + Assert.AreEqual(TotalRecords, scanCursorFuncs.numRecords, "Unexpected count for all on-disk"); Assert.AreEqual(0, cursor, "Expected cursor to be 0, pt 2"); // Add another totalRecords, with keys incremented by totalRecords to remain distinct, and verify we see all keys. - for (int i = 0; i < totalRecords; i++) + for (int i = 0; i < TotalRecords; i++) { - var key1 = new MyKey { key = i + totalRecords }; - var value = new MyValue { value = i + totalRecords }; - bContext.Upsert(ref key1, ref value); + var key1 = new MyKey { key = i + TotalRecords }; + var value = new MyValue { value = i + TotalRecords }; + _ = bContext.Upsert(ref key1, ref value); } scanCursorFuncs.Initialize(verifyKeys); Assert.IsFalse(session.ScanCursor(ref cursor, long.MaxValue, scanCursorFuncs, long.MaxValue), "Expected scan to finish and return false, pt 1"); - Assert.AreEqual(totalRecords * 2, scanCursorFuncs.numRecords, "Unexpected count for on-disk + in-mem"); + Assert.AreEqual(TotalRecords * 2, scanCursorFuncs.numRecords, "Unexpected count for on-disk + in-mem"); Assert.AreEqual(0, cursor, "Expected cursor to be 0, pt 3"); // Try an invalid cursor (not a multiple of 8) on-disk and verify we get one correct record. Use 3x page size to make sure page boundaries are tested. - Assert.Greater(store.hlog.GetTailAddress(), PageSize * 10, "Need enough space to exercise this"); + Assert.Greater(store.hlogBase.GetTailAddress(), PageSize * 10, "Need enough space to exercise this"); scanCursorFuncs.Initialize(verifyKeys); - cursor = store.hlog.BeginAddress - 1; + cursor = store.hlogBase.BeginAddress - 1; do { Assert.IsTrue(session.ScanCursor(ref cursor, 1, scanCursorFuncs, long.MaxValue, validateCursor: true), "Expected scan to finish and return false, pt 1"); @@ -309,7 +345,7 @@ public void GenericScanCursorTest([Values(HashModulo.NoMod, HashModulo.Hundred)] MyInput input = new(); MyOutput output = new(); ReadOptions readOptions = default; - var readStatus = bContext.ReadAtAddress(store.hlog.HeadAddress, ref input, ref output, ref readOptions, out _); + var readStatus = bContext.ReadAtAddress(store.hlogBase.HeadAddress, ref input, ref output, ref readOptions, out _); Assert.IsTrue(readStatus.Found, $"Could not read at HeadAddress; {readStatus}"); scanCursorFuncs.Initialize(verifyKeys); @@ -319,7 +355,7 @@ public void GenericScanCursorTest([Values(HashModulo.NoMod, HashModulo.Hundred)] { Assert.IsTrue(session.ScanCursor(ref cursor, 1, scanCursorFuncs, long.MaxValue, validateCursor: true), "Expected scan to finish and return false, pt 1"); cursor = scanCursorFuncs.lastAddress + recordSize + 1; - } while (cursor < store.hlog.HeadAddress + PageSize * 3); + } while (cursor < store.hlogBase.HeadAddress + PageSize * 3); } [Test] @@ -330,19 +366,28 @@ public void GenericScanCursorFilterTest([Values(HashModulo.NoMod, HashModulo.Hun { log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "test.log")); objlog = Devices.CreateLogDevice(Path.Join(MethodTestDir, "test.obj.log")); - store = new(128, - logSettings: new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MutableFraction = 0.1, MemorySizeBits = 20, PageSizeBits = 15, SegmentSizeBits = 18 }, - serializerSettings: new SerializerSettings { keySerializer = () => new MyKeySerializer(), valueSerializer = () => new MyValueSerializer() }, - comparer: comparer); + + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + ObjectLogDevice = objlog, + MutableFraction = 0.1, + MemorySize = 1L << 20, + PageSize = 1L << 15, + SegmentSize = 1L << 18 + }, StoreFunctions.Create(comparer, () => new MyKeySerializer(), () => new MyValueSerializer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); using var session = store.NewSession(new ScanFunctions()); var bContext = session.BasicContext; - for (int i = 0; i < totalRecords; i++) + for (int i = 0; i < TotalRecords; i++) { var key1 = new MyKey { key = i }; var value = new MyValue { value = i }; - bContext.Upsert(ref key1, ref value); + _ = bContext.Upsert(ref key1, ref value); } var scanCursorFuncs = new ScanCursorFuncs(); diff --git a/libs/storage/Tsavorite/cs/test/GenericStringTests.cs b/libs/storage/Tsavorite/cs/test/GenericStringTests.cs index 2ba8947ba7..ce04b4701f 100644 --- a/libs/storage/Tsavorite/cs/test/GenericStringTests.cs +++ b/libs/storage/Tsavorite/cs/test/GenericStringTests.cs @@ -8,11 +8,14 @@ namespace Tsavorite.test { + using StringAllocator = GenericAllocator>>; + using StringStoreFunctions = StoreFunctions>; + [TestFixture] internal class GenericStringTests { - private TsavoriteKV store; - private ClientSession session; + private TsavoriteKV store; + private ClientSession session; private IDevice log, objlog; [SetUp] @@ -48,10 +51,18 @@ public void StringBasicTest([Values] DeviceType deviceType) log = CreateTestDevice(deviceType, logfilename); objlog = CreateTestDevice(deviceType, objlogfilename); - store = new TsavoriteKV( - 1L << 20, // size of hash table in #cache lines; 64 bytes per cache line - new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MutableFraction = 0.1, MemorySizeBits = 14, PageSizeBits = 9, SegmentSizeBits = 22 } // log device - ); + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + ObjectLogDevice = objlog, + MutableFraction = 0.1, + MemorySize = 1L << 14, + PageSize = 1L << 9, + SegmentSize = 1L << 22 + }, StoreFunctions.Create(StringKeyComparer.Instance, () => new StringBinaryObjectSerializer(), () => new StringBinaryObjectSerializer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); session = store.NewSession(new MyFuncs()); var bContext = session.BasicContext; @@ -61,9 +72,9 @@ public void StringBasicTest([Values] DeviceType deviceType) { var _key = $"{i}"; var _value = $"{i}"; ; - bContext.Upsert(ref _key, ref _value, Empty.Default); + _ = bContext.Upsert(ref _key, ref _value, Empty.Default); } - bContext.CompletePending(true); + _ = bContext.CompletePending(true); Assert.AreEqual(totalRecords, store.EntryCount); for (int i = 0; i < totalRecords; i++) @@ -76,7 +87,7 @@ public void StringBasicTest([Values] DeviceType deviceType) var status = bContext.Read(ref key, ref input, ref output, Empty.Default); if (status.IsPending) { - bContext.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var outputs, wait: true); (status, output) = GetSinglePendingResult(outputs); } Assert.IsTrue(status.Found); diff --git a/libs/storage/Tsavorite/cs/test/InputOutputParameterTests.cs b/libs/storage/Tsavorite/cs/test/InputOutputParameterTests.cs index 35024a1bdf..a6c6e702ef 100644 --- a/libs/storage/Tsavorite/cs/test/InputOutputParameterTests.cs +++ b/libs/storage/Tsavorite/cs/test/InputOutputParameterTests.cs @@ -2,12 +2,14 @@ // Licensed under the MIT license. using System.IO; -using System.Threading.Tasks; using NUnit.Framework; using Tsavorite.core; namespace Tsavorite.test.InputOutputParameterTests { + using IntAllocator = BlittableAllocator>>; + using IntStoreFunctions = StoreFunctions>; + [TestFixture] class InputOutputParameterTests { @@ -15,9 +17,9 @@ class InputOutputParameterTests const int MultValue = 100; const int NumRecs = 10; - private TsavoriteKV store; - private ClientSession session; - private BasicContext bContext; + private TsavoriteKV store; + private ClientSession session; + private BasicContext bContext; private IDevice log; internal class UpsertInputFunctions : SessionFunctionsBase @@ -82,8 +84,16 @@ public void Setup() TestUtils.DeleteDirectory(TestUtils.MethodTestDir, wait: true); log = TestUtils.CreateTestDevice(TestUtils.DeviceType.LocalMemory, Path.Combine(TestUtils.MethodTestDir, "Device.log")); - store = new TsavoriteKV - (128, new LogSettings { LogDevice = log, MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }); + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MemorySize = 1L << 22, + SegmentSize = 1L << 22, + PageSize = 1L << 10 + }, StoreFunctions.Create(IntKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); session = store.NewSession(new UpsertInputFunctions()); bContext = session.BasicContext; } @@ -116,9 +126,8 @@ void doWrites() for (int key = 0; key < NumRecs; ++key) { var tailAddress = store.Log.TailAddress; - RecordMetadata recordMetadata; status = useRMW - ? bContext.RMW(ref key, ref input, ref output, out recordMetadata) + ? bContext.RMW(ref key, ref input, ref output, out var recordMetadata) : bContext.Upsert(ref key, ref input, ref key, ref output, out recordMetadata); if (loading) { @@ -140,7 +149,7 @@ void doReads() { for (int key = 0; key < NumRecs; ++key) { - bContext.Read(ref key, ref input, ref output); + _ = bContext.Read(ref key, ref input, ref output); Assert.AreEqual(key * input + AddValue, output); } } diff --git a/libs/storage/Tsavorite/cs/test/LargeObjectTests.cs b/libs/storage/Tsavorite/cs/test/LargeObjectTests.cs index 9e059f7af1..1e6efb149d 100644 --- a/libs/storage/Tsavorite/cs/test/LargeObjectTests.cs +++ b/libs/storage/Tsavorite/cs/test/LargeObjectTests.cs @@ -10,6 +10,9 @@ namespace Tsavorite.test.largeobjects { + using ClassAllocator = GenericAllocator>>; + using ClassStoreFunctions = StoreFunctions>; + [TestFixture] internal class LargeObjectTests { @@ -30,12 +33,21 @@ public async ValueTask LargeObjectTest([Values] CheckpointType checkpointType) MyLargeOutput output = new MyLargeOutput(); Guid token = default; + // Step 1: Create and populate store. using (var log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "LargeObjectTest.log"))) using (var objlog = Devices.CreateLogDevice(Path.Join(MethodTestDir, "LargeObjectTest.obj.log"))) - using (var store = new TsavoriteKV(128, - new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MutableFraction = 0.1, PageSizeBits = 21, MemorySizeBits = 26 }, - new CheckpointSettings { CheckpointDir = MethodTestDir }, - new SerializerSettings { keySerializer = () => new MyKeySerializer(), valueSerializer = () => new MyLargeValueSerializer() })) + using (var store = new TsavoriteKV( + new() + { + IndexSize = 1L << 13, + LogDevice = log, + ObjectLogDevice = objlog, + MutableFraction = 0.1, + PageSize = 1L << 21, + MemorySize = 1L << 26, + CheckpointDir = MethodTestDir + }, StoreFunctions.Create(new MyKey.Comparer(), () => new MyKeySerializer(), () => new MyLargeValueSerializer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions))) using (var session = store.NewSession(new MyLargeFunctions())) { var bContext = session.BasicContext; @@ -45,21 +57,30 @@ public async ValueTask LargeObjectTest([Values] CheckpointType checkpointType) { var mykey = new MyKey { key = key }; var value = new MyLargeValue(1 + r.Next(maxSize)); - bContext.Upsert(ref mykey, ref value, Empty.Default); + _ = bContext.Upsert(ref mykey, ref value, Empty.Default); } - store.TryInitiateFullCheckpoint(out token, checkpointType); + _ = store.TryInitiateFullCheckpoint(out token, checkpointType); await store.CompleteCheckpointAsync(); } + // Step 1: Create and recover store. using (var log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "LargeObjectTest.log"))) using (var objlog = Devices.CreateLogDevice(Path.Join(MethodTestDir, "LargeObjectTest.obj.log"))) - using (var store = new TsavoriteKV(128, - new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MutableFraction = 0.1, PageSizeBits = 21, MemorySizeBits = 26 }, - new CheckpointSettings { CheckpointDir = MethodTestDir }, - new SerializerSettings { keySerializer = () => new MyKeySerializer(), valueSerializer = () => new MyLargeValueSerializer() })) + using (var store = new TsavoriteKV( + new() + { + IndexSize = 1L << 13, + LogDevice = log, + ObjectLogDevice = objlog, + MutableFraction = 0.1, + PageSize = 1L << 21, + MemorySize = 1L << 26, + CheckpointDir = MethodTestDir + }, StoreFunctions.Create(new MyKey.Comparer(), () => new MyKeySerializer(), () => new MyLargeValueSerializer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions))) { - store.Recover(token); + _ = store.Recover(token); using (var session = store.NewSession(new MyLargeFunctions())) { diff --git a/libs/storage/Tsavorite/cs/test/LockableUnsafeContextTests.cs b/libs/storage/Tsavorite/cs/test/LockableUnsafeContextTests.cs index faa1a26c66..361ab5a785 100644 --- a/libs/storage/Tsavorite/cs/test/LockableUnsafeContextTests.cs +++ b/libs/storage/Tsavorite/cs/test/LockableUnsafeContextTests.cs @@ -16,6 +16,27 @@ namespace Tsavorite.test.LockableUnsafeContext { + // Must be in a separate block so the "using StructStoreFunctions" is the first line in its namespace declaration. + internal class LockableUnsafeComparer : IKeyComparer + { + internal int maxSleepMs; + readonly Random rng = new(101); + + public bool Equals(ref long k1, ref long k2) => k1 == k2; + + public long GetHashCode64(ref long k) + { + if (maxSleepMs > 0) + Thread.Sleep(rng.Next(maxSleepMs)); + return Utility.GetHashCode(k); + } + } +} +namespace Tsavorite.test.LockableUnsafeContext +{ + using LongAllocator = BlittableAllocator>>; + using LongStoreFunctions = StoreFunctions>; + // Functions for the "Simple lock transaction" case, e.g.: // - Lock key1, key2, key3, keyResult // - Do some operation on value1, value2, value3 and write the result to valueResult @@ -35,21 +56,6 @@ public override bool ConcurrentDeleter(ref long key, ref long value, ref DeleteI } } - internal class LockableUnsafeComparer : ITsavoriteEqualityComparer - { - internal int maxSleepMs; - readonly Random rng = new(101); - - public bool Equals(ref long k1, ref long k2) => k1 == k2; - - public long GetHashCode64(ref long k) - { - if (maxSleepMs > 0) - Thread.Sleep(rng.Next(maxSleepMs)); - return Utility.GetHashCode(k); - } - } - public enum ResultLockTarget { MutableLock, LockTable } internal struct BucketLockTracker @@ -58,19 +64,19 @@ internal struct BucketLockTracker public BucketLockTracker() { - buckets = new(); + buckets = []; } - internal void Increment(FixedLengthLockableKeyStruct key) => Increment(ref key); // easier with 'foreach' because iteration vars can't be passed by 'ref' - internal void Increment(ref FixedLengthLockableKeyStruct key) + internal readonly void Increment(FixedLengthLockableKeyStruct key) => Increment(ref key); // easier with 'foreach' because iteration vars can't be passed by 'ref' + internal readonly void Increment(ref FixedLengthLockableKeyStruct key) { if (key.LockType == LockType.Exclusive) IncrementX(ref key); else IncrementS(ref key); } - internal void Decrement(FixedLengthLockableKeyStruct key) => Decrement(ref key); - internal void Decrement(ref FixedLengthLockableKeyStruct key) + internal readonly void Decrement(FixedLengthLockableKeyStruct key) => Decrement(ref key); + internal readonly void Decrement(ref FixedLengthLockableKeyStruct key) { if (key.LockType == LockType.Exclusive) DecrementX(ref key); @@ -78,12 +84,12 @@ internal void Decrement(ref FixedLengthLockableKeyStruct key) DecrementS(ref key); } - internal void IncrementX(ref FixedLengthLockableKeyStruct key) => AddX(ref key, 1); - internal void DecrementX(ref FixedLengthLockableKeyStruct key) => AddX(ref key, -1); - internal void IncrementS(ref FixedLengthLockableKeyStruct key) => AddS(ref key, 1); - internal void DecrementS(ref FixedLengthLockableKeyStruct key) => AddS(ref key, -1); + internal readonly void IncrementX(ref FixedLengthLockableKeyStruct key) => AddX(ref key, 1); + internal readonly void DecrementX(ref FixedLengthLockableKeyStruct key) => AddX(ref key, -1); + internal readonly void IncrementS(ref FixedLengthLockableKeyStruct key) => AddS(ref key, 1); + internal readonly void DecrementS(ref FixedLengthLockableKeyStruct key) => AddS(ref key, -1); - private void AddX(ref FixedLengthLockableKeyStruct key, int addend) + private readonly void AddX(ref FixedLengthLockableKeyStruct key, int addend) { if (!buckets.TryGetValue(key.KeyHash, out var counts)) counts = default; @@ -92,7 +98,7 @@ private void AddX(ref FixedLengthLockableKeyStruct key, int addend) buckets[key.KeyHash] = counts; } - private void AddS(ref FixedLengthLockableKeyStruct key, int addend) + private readonly void AddS(ref FixedLengthLockableKeyStruct key, int addend) { if (!buckets.TryGetValue(key.KeyHash, out var counts)) counts = default; @@ -101,7 +107,7 @@ private void AddS(ref FixedLengthLockableKeyStruct key, int addend) buckets[key.KeyHash] = counts; } - internal bool GetLockCounts(ref FixedLengthLockableKeyStruct key, out (int x, int s) counts) + internal readonly bool GetLockCounts(ref FixedLengthLockableKeyStruct key, out (int x, int s) counts) { if (!buckets.TryGetValue(key.KeyHash, out counts)) { @@ -111,7 +117,7 @@ internal bool GetLockCounts(ref FixedLengthLockableKeyStruct key, out (int return true; } - internal (int x, int s) GetLockCounts() + internal readonly (int x, int s) GetLockCounts() { var xx = 0; var ss = 0; @@ -123,7 +129,7 @@ internal bool GetLockCounts(ref FixedLengthLockableKeyStruct key, out (int return (xx, ss); } - internal void AssertNoLocks() + internal readonly void AssertNoLocks() { foreach (var kvp in buckets) { @@ -136,18 +142,18 @@ internal void AssertNoLocks() [TestFixture] class LockableUnsafeContextTests { - const int numRecords = 1000; - const int useNewKey = 1010; - const int useExistingKey = 200; + const int NumRecords = 1000; + const int UseNewKey = 1010; + const int UseExistingKey = 200; - const int valueMult = 1_000_000; + const int ValueMult = 1_000_000; LockableUnsafeFunctions functions; LockableUnsafeComparer comparer; - private TsavoriteKV store; - private ClientSession session; - private BasicContext bContext; + private TsavoriteKV store; + private ClientSession session; + private BasicContext bContext; private IDevice log; [SetUp] @@ -161,19 +167,29 @@ public void Setup(bool forRecovery) } log = Devices.CreateLogDevice(Path.Combine(MethodTestDir, "test.log"), deleteOnClose: false, recoverDevice: forRecovery); - ReadCacheSettings readCacheSettings = default; - CheckpointSettings checkpointSettings = default; + var kvSettings = new KVSettings() + { + IndexSize = 1L << 26, + LogDevice = log, + PageSize = 1L << 12, + MemorySize = 1L << 22 + }; + foreach (var arg in TestContext.CurrentContext.Test.Arguments) { if (arg is ReadCopyDestination dest) { if (dest == ReadCopyDestination.ReadCache) - readCacheSettings = new() { PageSizeBits = 12, MemorySizeBits = 22 }; + { + kvSettings.ReadCachePageSize = 1L << 12; + kvSettings.ReadCacheMemorySize = 1L << 22; + kvSettings.ReadCacheEnabled = true; + } break; } - if (arg is CheckpointType chktType) + if (arg is CheckpointType) { - checkpointSettings = new CheckpointSettings { CheckpointDir = MethodTestDir }; + kvSettings.CheckpointDir = MethodTestDir; break; } } @@ -181,8 +197,11 @@ public void Setup(bool forRecovery) comparer = new LockableUnsafeComparer(); functions = new LockableUnsafeFunctions(); - store = new TsavoriteKV(1L << 20, new LogSettings { LogDevice = log, ObjectLogDevice = null, PageSizeBits = 12, MemorySizeBits = 22, ReadCacheSettings = readCacheSettings }, - checkpointSettings: checkpointSettings, comparer: comparer); + store = new(kvSettings + , StoreFunctions.Create(comparer) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); + session = store.NewSession(functions); bContext = session.BasicContext; } @@ -207,8 +226,8 @@ public void TearDown(bool forRecovery) void Populate() { - for (int key = 0; key < numRecords; key++) - Assert.IsFalse(bContext.Upsert(key, key * valueMult).IsPending); + for (int key = 0; key < NumRecords; key++) + Assert.IsFalse(bContext.Upsert(key, key * ValueMult).IsPending); } void AssertIsLocked(FixedLengthLockableKeyStruct key, bool xlock, bool slock) @@ -218,7 +237,7 @@ void AssertIsLocked(ref FixedLengthLockableKeyStruct key, bool xlock, bool void PrepareRecordLocation(FlushMode recordLocation) => PrepareRecordLocation(store, recordLocation); - static void PrepareRecordLocation(TsavoriteKV store, FlushMode recordLocation) + static void PrepareRecordLocation(TsavoriteKV store, FlushMode recordLocation) { if (recordLocation == FlushMode.ReadOnly) store.Log.ShiftReadOnlyAddress(store.Log.TailAddress, wait: true); @@ -226,14 +245,14 @@ static void PrepareRecordLocation(TsavoriteKV store, FlushMode recor store.Log.FlushAndEvict(wait: true); } - static void ClearCountsOnError(ClientSession luContext) + static void ClearCountsOnError(ClientSession luContext) { // If we already have an exception, clear these counts so "Run" will not report them spuriously. luContext.sharedLockCount = 0; luContext.exclusiveLockCount = 0; } - static void ClearCountsOnError(ClientSession luContext) + static void ClearCountsOnError(ClientSession luContext) where TFunctions : ISessionFunctions { // If we already have an exception, clear these counts so "Run" will not report them spuriously. @@ -325,7 +344,7 @@ public async Task TestShiftHeadAddressLUC([Values] CompletionSyncMode syncMode) { long input = default; const int RandSeed = 10; - const int RandRange = numRecords; + const int RandRange = NumRecords; const int NumRecs = 200; Random r = new(RandSeed); @@ -346,8 +365,8 @@ public async Task TestShiftHeadAddressLUC([Values] CompletionSyncMode syncMode) luContext.Lock(keyVec); AssertBucketLockCount(ref keyVec[0], 1, 0); - var value = keyVec[0].Key + numRecords; - luContext.Upsert(ref keyVec[0].Key, ref value, Empty.Default); + var value = keyVec[0].Key + NumRecords; + _ = luContext.Upsert(ref keyVec[0].Key, ref value, Empty.Default); luContext.Unlock(keyVec); AssertBucketLockCount(ref keyVec[0], 0, 0); } @@ -360,7 +379,7 @@ public async Task TestShiftHeadAddressLUC([Values] CompletionSyncMode syncMode) for (int c = 0; c < NumRecs; c++) { keyVec[0] = new(r.Next(RandRange), LockType.Shared, luContext); - var value = keyVec[0].Key + numRecords; + var value = keyVec[0].Key + NumRecords; long output = 0; luContext.Lock(keyVec); @@ -375,7 +394,7 @@ public async Task TestShiftHeadAddressLUC([Values] CompletionSyncMode syncMode) if (syncMode == CompletionSyncMode.Sync) { - luContext.CompletePending(true); + _ = luContext.CompletePending(true); } else { @@ -413,7 +432,7 @@ public async Task TestShiftHeadAddressLUC([Values] CompletionSyncMode syncMode) CompletedOutputIterator outputs; if (syncMode == CompletionSyncMode.Sync) { - luContext.CompletePendingWithOutputs(out outputs, wait: true); + _ = luContext.CompletePendingWithOutputs(out outputs, wait: true); } else { @@ -435,7 +454,7 @@ public async Task TestShiftHeadAddressLUC([Values] CompletionSyncMode syncMode) while (outputs.Next()) { count++; - Assert.AreEqual(outputs.Current.Key + numRecords, outputs.Current.Output); + Assert.AreEqual(outputs.Current.Key + NumRecords, outputs.Current.Output); } outputs.Dispose(); Assert.AreEqual(expectedS, count); @@ -457,12 +476,12 @@ public void InMemorySimpleLockTxnTest([Values] ResultLockTarget resultLockTarget Populate(); PrepareRecordLocation(flushMode); - // SetUp also reads this to determine whether to supply ReadCacheSettings. If ReadCache is specified it wins over CopyToTail. + // SetUp also reads this to determine whether to supply ReadCache settings. If ReadCache is specified it wins over CopyToTail. var useRMW = updateOp == UpdateOp.RMW; const int readKey24 = 24, readKey51 = 51; - long resultKey = resultLockTarget == ResultLockTarget.LockTable ? numRecords + 1 : readKey24 + readKey51; + long resultKey = resultLockTarget == ResultLockTarget.LockTable ? NumRecords + 1 : readKey24 + readKey51; long resultValue; - long expectedResult = (readKey24 + readKey51) * valueMult; + long expectedResult = (readKey24 + readKey51) * ValueMult; Status status; BucketLockTracker blt = new(); @@ -504,10 +523,10 @@ public void InMemorySimpleLockTxnTest([Values] ResultLockTarget resultLockTarget { if (status.IsPending) { - luContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); + _ = luContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); Assert.True(completedOutputs.Next()); readValue24 = completedOutputs.Current.Output; - Assert.AreEqual(24 * valueMult, readValue24); + Assert.AreEqual(24 * ValueMult, readValue24); Assert.False(completedOutputs.Next()); completedOutputs.Dispose(); } @@ -522,10 +541,10 @@ public void InMemorySimpleLockTxnTest([Values] ResultLockTarget resultLockTarget { if (status.IsPending) { - luContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); + _ = luContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); Assert.True(completedOutputs.Next()); readValue51 = completedOutputs.Current.Output; - Assert.AreEqual(51 * valueMult, readValue51); + Assert.AreEqual(51 * ValueMult, readValue51); Assert.False(completedOutputs.Next()); completedOutputs.Dispose(); } @@ -545,7 +564,7 @@ public void InMemorySimpleLockTxnTest([Values] ResultLockTarget resultLockTarget { if (status.IsPending) { - luContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); + _ = luContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); Assert.True(completedOutputs.Next()); resultValue = completedOutputs.Current.Output; Assert.AreEqual(expectedResult, resultValue); @@ -598,9 +617,9 @@ public void InMemoryLongLockTest([Values] ResultLockTarget resultLockTarget, [Va bool initialDestWillBeLockTable = resultLockTarget == ResultLockTarget.LockTable || flushMode == FlushMode.OnDisk; const int readKey24 = 24, readKey51 = 51, valueMult2 = 10; - long resultKey = initialDestWillBeLockTable ? numRecords + 1 : readKey24 + readKey51; + long resultKey = initialDestWillBeLockTable ? NumRecords + 1 : readKey24 + readKey51; long resultValue; - int expectedResult = (readKey24 + readKey51) * valueMult * valueMult2; + int expectedResult = (readKey24 + readKey51) * ValueMult * valueMult2; var useRMW = updateOp == UpdateOp.RMW; Status status; BucketLockTracker blt = new(); @@ -643,20 +662,20 @@ public void InMemoryLongLockTest([Values] ResultLockTarget resultLockTarget, [Va if (flushMode == FlushMode.OnDisk) { Assert.IsTrue(status.IsPending, status.ToString()); - luContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); + _ = luContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); (status, readValue24) = GetSinglePendingResult(completedOutputs, out var recordMetadata); Assert.IsTrue(status.Found, status.ToString()); } else Assert.IsFalse(status.IsPending, status.ToString()); - Assert.AreEqual(readKey24 * valueMult, readValue24); + Assert.AreEqual(readKey24 * ValueMult, readValue24); // We just locked this above, but for FlushMode.OnDisk it will still be PENDING. status = luContext.Read(readKey51, out var readValue51); if (flushMode == FlushMode.OnDisk) { Assert.IsTrue(status.IsPending, status.ToString()); - luContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); + _ = luContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); Assert.True(completedOutputs.Next()); readValue51 = completedOutputs.Current.Output; Assert.False(completedOutputs.Next()); @@ -664,7 +683,7 @@ public void InMemoryLongLockTest([Values] ResultLockTarget resultLockTarget, [Va } else Assert.IsFalse(status.IsPending, status.ToString()); - Assert.AreEqual(readKey51 * valueMult, readValue51); + Assert.AreEqual(readKey51 * ValueMult, readValue51); if (!initialDestWillBeLockTable) { @@ -672,13 +691,13 @@ public void InMemoryLongLockTest([Values] ResultLockTarget resultLockTarget, [Va if (flushMode == FlushMode.OnDisk) { Assert.IsTrue(status.IsPending, status.ToString()); - luContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); + _ = luContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); (status, initialResultValue) = GetSinglePendingResult(completedOutputs, out var recordMetadata); Assert.IsTrue(status.Found, status.ToString()); } else Assert.IsFalse(status.IsPending, status.ToString()); - Assert.AreEqual(resultKey * valueMult, initialResultValue); + Assert.AreEqual(resultKey * ValueMult, initialResultValue); } // Set the phase to Phase.INTERMEDIATE to test the non-Phase.REST blocks @@ -730,8 +749,8 @@ public void InMemoryDeleteTest([Values] ResultLockTarget resultLockTarget, [Valu BucketLockTracker blt = new(); - // SetUp also reads this to determine whether to supply ReadCacheSettings. If ReadCache is specified it wins over CopyToTail. - long resultKey = resultLockTarget == ResultLockTarget.LockTable ? numRecords + 1 : 75; + // SetUp also reads this to determine whether to supply ReadCache settings. If ReadCache is specified it wins over CopyToTail. + long resultKey = resultLockTarget == ResultLockTarget.LockTable ? NumRecords + 1 : 75; Status status; var luContext = session.LockableUnsafeContext; @@ -841,15 +860,12 @@ void runLTransientLockOpThread(int tid) for (var iteration = 0; iteration < numIterations; ++iteration) { foreach (var key in enumKeys(rng)) - { - var rand = rng.Next(100); - if (rand < 33) - basicContext.Read(key); - else if (rand < 66) - basicContext.Upsert(key, key * valueMult); - else - basicContext.RMW(key, key * valueMult); - } + _ = rng.Next(100) switch + { + int rand when rand < 33 => basicContext.Read(key).status, + int rand when rand < 66 => basicContext.Upsert(key, key * ValueMult), + _ => basicContext.RMW(key, key * ValueMult) + }; } } @@ -869,7 +885,7 @@ void runLTransientLockOpThread(int tid) AssertTotalLockCounts(0, 0); } - FixedLengthLockableKeyStruct AddLockTableEntry(LockableUnsafeContext luContext, long key) + FixedLengthLockableKeyStruct AddLockTableEntry(LockableUnsafeContext luContext, long key) where TFunctions : ISessionFunctions { var keyVec = new[] { new FixedLengthLockableKeyStruct(key, LockType.Exclusive, luContext) }; @@ -885,7 +901,7 @@ FixedLengthLockableKeyStruct AddLockTableEntry(LockableUnsafeC return keyVec[0]; } - void VerifyAndUnlockSplicedInKey(LockableUnsafeContext luContext, long expectedKey) + void VerifyAndUnlockSplicedInKey(LockableUnsafeContext luContext, long expectedKey) where TFunctions : ISessionFunctions { // Scan to the end of the readcache chain and verify we inserted the value. @@ -921,7 +937,7 @@ public void VerifyLocksAfterReadAndCTTTest() var status = luContext.Read(ref key, ref input, ref output, ref readOptions, out _); Assert.IsTrue(status.IsPending, status.ToString()); - luContext.CompletePending(wait: true); + _ = luContext.CompletePending(wait: true); VerifyAndUnlockSplicedInKey(luContext, key); blt.Decrement(ref keyStruct); @@ -1007,12 +1023,12 @@ public void VerifyCountAfterUpsertToTailTest([Values] ChainTests.RecordRegion re FixedLengthLockableKeyStruct keyStruct = default; try { - if (recordRegion == ChainTests.RecordRegion.Immutable || recordRegion == ChainTests.RecordRegion.OnDisk) - keyStruct = AddLockTableEntry(luContext, useExistingKey); + if (recordRegion is ChainTests.RecordRegion.Immutable or ChainTests.RecordRegion.OnDisk) + keyStruct = AddLockTableEntry(luContext, UseExistingKey); else - keyStruct = AddLockTableEntry(luContext, useNewKey); + keyStruct = AddLockTableEntry(luContext, UseNewKey); blt.Increment(ref keyStruct); - var status = luContext.Upsert(keyStruct.Key, keyStruct.Key * valueMult); + var status = luContext.Upsert(keyStruct.Key, keyStruct.Key * ValueMult); Assert.IsTrue(status.Record.Created, status.ToString()); VerifyAndUnlockSplicedInKey(luContext, keyStruct.Key); @@ -1047,17 +1063,17 @@ public void VerifyCountAfterRMWToTailTest([Values] ChainTests.RecordRegion recor FixedLengthLockableKeyStruct keyStruct = default; try { - if (recordRegion == ChainTests.RecordRegion.Immutable || recordRegion == ChainTests.RecordRegion.OnDisk) + if (recordRegion is ChainTests.RecordRegion.Immutable or ChainTests.RecordRegion.OnDisk) { - keyStruct = AddLockTableEntry(luContext, useExistingKey); - var status = luContext.RMW(keyStruct.Key, keyStruct.Key * valueMult); + keyStruct = AddLockTableEntry(luContext, UseExistingKey); + var status = luContext.RMW(keyStruct.Key, keyStruct.Key * ValueMult); Assert.IsTrue(recordRegion == ChainTests.RecordRegion.OnDisk ? status.IsPending : status.Found); - luContext.CompletePending(wait: true); + _ = luContext.CompletePending(wait: true); } else { - keyStruct = AddLockTableEntry(luContext, useNewKey); - var status = luContext.RMW(keyStruct.Key, keyStruct.Key * valueMult); + keyStruct = AddLockTableEntry(luContext, UseNewKey); + var status = luContext.RMW(keyStruct.Key, keyStruct.Key * ValueMult); Assert.IsFalse(status.Found, status.ToString()); } blt.Increment(ref keyStruct); @@ -1094,9 +1110,9 @@ public void VerifyCountAfterDeleteToTailTest([Values] ChainTests.RecordRegion re FixedLengthLockableKeyStruct keyStruct = default; try { - if (recordRegion == ChainTests.RecordRegion.Immutable || recordRegion == ChainTests.RecordRegion.OnDisk) + if (recordRegion is ChainTests.RecordRegion.Immutable or ChainTests.RecordRegion.OnDisk) { - keyStruct = AddLockTableEntry(luContext, useExistingKey); + keyStruct = AddLockTableEntry(luContext, UseExistingKey); blt.Increment(ref keyStruct); var status = luContext.Delete(keyStruct.Key); @@ -1105,7 +1121,7 @@ public void VerifyCountAfterDeleteToTailTest([Values] ChainTests.RecordRegion re } else { - keyStruct = AddLockTableEntry(luContext, useNewKey); + keyStruct = AddLockTableEntry(luContext, UseNewKey); blt.Increment(ref keyStruct); var status = luContext.Delete(keyStruct.Key); Assert.IsFalse(status.Found, status.ToString()); @@ -1140,7 +1156,7 @@ public void LockAndUnlockInLockTableOnlyTest() FixedLengthLockableKeyStruct createKey(long key) => new(key, (key & 1) == 0 ? LockType.Exclusive : LockType.Shared, luContext); var rng = new Random(101); - var keyVec = Enumerable.Range(0, numRecords).Select(ii => createKey(rng.Next(numRecords))).ToArray(); + var keyVec = Enumerable.Range(0, NumRecords).Select(ii => createKey(rng.Next(NumRecords))).ToArray(); luContext.BeginUnsafe(); luContext.BeginLockable(); @@ -1190,7 +1206,7 @@ public void VerifyCountAfterReadOnlyToUpdateRecordTest([Values] UpdateOp updateO Populate(); store.Log.ShiftReadOnlyAddress(store.Log.TailAddress, wait: true); - static long getValue(long key) => key + valueMult; + static long getValue(long key) => key + ValueMult; var luContext = session.LockableUnsafeContext; luContext.BeginUnsafe(); @@ -1241,13 +1257,13 @@ public void LockNewRecordThenUpdateAndUnlockTest([Values] UpdateOp updateOp) using var session = store.NewSession>(new SimpleSimpleFunctions()); var luContext = session.LockableUnsafeContext; - int getValue(int key) => key + valueMult; + int getValue(int key) => key + ValueMult; // If we are testing Delete, then we need to have the records ON-DISK first; Delete is a no-op for unfound records. if (updateOp == UpdateOp.Delete) { - for (var key = numRecords; key < numRecords + numNewRecords; ++key) - Assert.IsFalse(this.bContext.Upsert(key, key * valueMult).IsPending); + for (var key = NumRecords; key < NumRecords + numNewRecords; ++key) + Assert.IsFalse(bContext.Upsert(key, key * ValueMult).IsPending); store.Log.FlushAndEvict(wait: true); } @@ -1265,7 +1281,7 @@ public void LockNewRecordThenUpdateAndUnlockTest([Values] UpdateOp updateOp) // We don't sleep in this test comparer.maxSleepMs = 0; - for (var key = numRecords; key < numRecords + numNewRecords; ++key) + for (var key = NumRecords; key < NumRecords + numNewRecords; ++key) { keyVec[0] = new(key, LockType.Exclusive, luContext); luContext.Lock(keyVec); @@ -1346,13 +1362,13 @@ public void LockNewRecordThenUnlockThenUpdateTest([Values] UpdateOp updateOp) using var updateSession = store.NewSession>(new SimpleSimpleFunctions()); var basicContext = updateSession.BasicContext; - int getValue(int key) => key + valueMult; + int getValue(int key) => key + ValueMult; // If we are testing Delete, then we need to have the records ON-DISK first; Delete is a no-op for unfound records. if (updateOp == UpdateOp.Delete) { - for (var key = numRecords; key < numRecords + numNewRecords; ++key) - Assert.IsFalse(bContext.Upsert(key, key * valueMult).IsPending); + for (var key = NumRecords; key < NumRecords + numNewRecords; ++key) + Assert.IsFalse(bContext.Upsert(key, key * ValueMult).IsPending); store.Log.FlushAndEvict(wait: true); } @@ -1373,7 +1389,7 @@ public void LockNewRecordThenUnlockThenUpdateTest([Values] UpdateOp updateOp) try { - for (var key = numRecords; key < numRecords + numNewRecords; ++key) + for (var key = NumRecords; key < NumRecords + numNewRecords; ++key) { for (var iter = 0; iter < 2; ++iter) { diff --git a/libs/storage/Tsavorite/cs/test/LogAndDeviceConfigTests.cs b/libs/storage/Tsavorite/cs/test/LogAndDeviceConfigTests.cs index 6f8459cc6a..fcde7223e7 100644 --- a/libs/storage/Tsavorite/cs/test/LogAndDeviceConfigTests.cs +++ b/libs/storage/Tsavorite/cs/test/LogAndDeviceConfigTests.cs @@ -26,7 +26,7 @@ public void Setup() TestUtils.DeleteDirectory(TestUtils.MethodTestDir, wait: true); // Create devices \ log for test - device = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "DeviceConfig"), deleteOnClose: true, recoverDevice: true, preallocateFile: true, capacity: 1 << 30); + device = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "DeviceConfig"), deleteOnClose: true, recoverDevice: true, preallocateFile: true, capacity: 1L << 30); log = new TsavoriteLog(new TsavoriteLogSettings { LogDevice = device, PageSizeBits = 80, MemorySizeBits = 20, GetMemory = null, SegmentSizeBits = 80, MutableFraction = 0.2, LogCommitManager = null }); } diff --git a/libs/storage/Tsavorite/cs/test/LogShiftTailStressTest.cs b/libs/storage/Tsavorite/cs/test/LogShiftTailStressTest.cs index b6c4e31256..b30f750194 100644 --- a/libs/storage/Tsavorite/cs/test/LogShiftTailStressTest.cs +++ b/libs/storage/Tsavorite/cs/test/LogShiftTailStressTest.cs @@ -23,7 +23,7 @@ internal class LogShiftTailStressTest : TsavoriteLogTestBase public void TsavoriteLogShiftTailStressTest() { // Get an excruciatingly slow storage device to maximize chance of clogging the flush pipeline - device = new LocalMemoryDevice(1L << 28, 1 << 28, 2, sector_size: 512, latencyMs: 50, fileName: "stress.log"); + device = new LocalMemoryDevice(1L << 28, 1L << 28, 2, sector_size: 512, latencyMs: 50, fileName: "stress.log"); var logSettings = new TsavoriteLogSettings { LogDevice = device, LogChecksum = LogChecksumType.None, LogCommitManager = manager, SegmentSizeBits = 28 }; log = new TsavoriteLog(logSettings); diff --git a/libs/storage/Tsavorite/cs/test/LowMemoryTests.cs b/libs/storage/Tsavorite/cs/test/LowMemoryTests.cs index 8209565488..ab58838f48 100644 --- a/libs/storage/Tsavorite/cs/test/LowMemoryTests.cs +++ b/libs/storage/Tsavorite/cs/test/LowMemoryTests.cs @@ -7,24 +7,34 @@ namespace Tsavorite.test.LowMemory { + using LongAllocator = BlittableAllocator>>; + using LongStoreFunctions = StoreFunctions>; + [TestFixture] public class LowMemoryTests { IDevice log; - TsavoriteKV store1; - const int numOps = 2000; + TsavoriteKV store1; + const int NumOps = 2000; [SetUp] public void Setup() { TestUtils.DeleteDirectory(TestUtils.MethodTestDir, wait: true); log = new LocalMemoryDevice(1L << 28, 1L << 25, 1, latencyMs: 20, fileName: Path.Join(TestUtils.MethodTestDir, "test.log")); - Directory.CreateDirectory(TestUtils.MethodTestDir); - store1 = new TsavoriteKV - (1L << 10, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 1, PageSizeBits = 10, MemorySizeBits = 12, SegmentSizeBits = 26 }, - checkpointSettings: new CheckpointSettings { CheckpointDir = TestUtils.MethodTestDir } - ); + _ = Directory.CreateDirectory(TestUtils.MethodTestDir); + store1 = new(new() + { + IndexSize = 1L << 16, + LogDevice = log, + MutableFraction = 1, + PageSize = 1L << 10, + MemorySize = 1L << 12, + SegmentSize = 1L << 26, + CheckpointDir = TestUtils.MethodTestDir + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -37,11 +47,11 @@ public void TearDown() TestUtils.DeleteDirectory(TestUtils.MethodTestDir); } - private static void Populate(ClientSession> s1) + private static void Populate(ClientSession, LongStoreFunctions, LongAllocator> s1) { var bContext1 = s1.BasicContext; - for (long key = 0; key < numOps; key++) - bContext1.Upsert(ref key, ref key); + for (long key = 0; key < NumOps; key++) + _ = bContext1.Upsert(ref key, ref key); } [Test] @@ -56,7 +66,7 @@ public void LowMemConcurrentUpsertReadTest() // Read all keys var numCompleted = 0; - for (long key = 0; key < numOps; key++) + for (long key = 0; key < NumOps; key++) { var (status, output) = bContext1.Read(key); if (!status.IsPending) @@ -67,7 +77,7 @@ public void LowMemConcurrentUpsertReadTest() } } - bContext1.CompletePendingWithOutputs(out var completedOutputs, wait: true); + _ = bContext1.CompletePendingWithOutputs(out var completedOutputs, wait: true); using (completedOutputs) { while (completedOutputs.Next()) @@ -77,7 +87,7 @@ public void LowMemConcurrentUpsertReadTest() Assert.AreEqual(completedOutputs.Current.Key, completedOutputs.Current.Output); } } - Assert.AreEqual(numOps, numCompleted, "numCompleted"); + Assert.AreEqual(NumOps, numCompleted, "numCompleted"); } [Test] @@ -92,21 +102,21 @@ public void LowMemConcurrentUpsertRMWReadTest([Values] bool completeSync) // RMW all keys int numPending = 0; - for (long key = 0; key < numOps; key++) + for (long key = 0; key < NumOps; key++) { var status = bContext1.RMW(ref key, ref key); if (status.IsPending && (++numPending % 256) == 0) { - bContext1.CompletePending(wait: true); + _ = bContext1.CompletePending(wait: true); numPending = 0; } } if (numPending > 0) - bContext1.CompletePending(wait: true); + _ = bContext1.CompletePending(wait: true); // Then Read all keys var numCompleted = 0; - for (long key = 0; key < numOps; key++) + for (long key = 0; key < NumOps; key++) { var (status, output) = bContext1.Read(key); if (!status.IsPending) @@ -117,7 +127,7 @@ public void LowMemConcurrentUpsertRMWReadTest([Values] bool completeSync) } } - bContext1.CompletePendingWithOutputs(out var completedOutputs, wait: true); + _ = bContext1.CompletePendingWithOutputs(out var completedOutputs, wait: true); using (completedOutputs) { while (completedOutputs.Next()) @@ -127,7 +137,7 @@ public void LowMemConcurrentUpsertRMWReadTest([Values] bool completeSync) Assert.AreEqual(completedOutputs.Current.Key * 2, completedOutputs.Current.Output); } } - Assert.AreEqual(numOps, numCompleted, "numCompleted"); + Assert.AreEqual(NumOps, numCompleted, "numCompleted"); } } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/test/ManagedLocalStorageTests.cs b/libs/storage/Tsavorite/cs/test/ManagedLocalStorageTests.cs index d8e8ce7b84..10522ac03c 100644 --- a/libs/storage/Tsavorite/cs/test/ManagedLocalStorageTests.cs +++ b/libs/storage/Tsavorite/cs/test/ManagedLocalStorageTests.cs @@ -28,7 +28,7 @@ public void Setup() device = new ManagedLocalStorageDevice(Path.Join(TestUtils.MethodTestDir, "ManagedLocalStore.log"), deleteOnClose: true); log = new TsavoriteLog(new TsavoriteLogSettings { LogDevice = device, PageSizeBits = 12, MemorySizeBits = 14 }); - deviceFullParams = new ManagedLocalStorageDevice(Path.Join(TestUtils.MethodTestDir, "ManagedLocalStoreFullParams.log"), deleteOnClose: false, recoverDevice: true, preallocateFile: true, capacity: 1 << 30); + deviceFullParams = new ManagedLocalStorageDevice(Path.Join(TestUtils.MethodTestDir, "ManagedLocalStoreFullParams.log"), deleteOnClose: false, recoverDevice: true, preallocateFile: true, capacity: 1L << 30); logFullParams = new TsavoriteLog(new TsavoriteLogSettings { LogDevice = device, PageSizeBits = 12, MemorySizeBits = 14 }); } diff --git a/libs/storage/Tsavorite/cs/test/MiscTests.cs b/libs/storage/Tsavorite/cs/test/MiscTests.cs index 074a9bab38..3ee072b57c 100644 --- a/libs/storage/Tsavorite/cs/test/MiscTests.cs +++ b/libs/storage/Tsavorite/cs/test/MiscTests.cs @@ -9,10 +9,15 @@ namespace Tsavorite.test { + using ClassAllocator = GenericAllocator>>; + using ClassStoreFunctions = StoreFunctions>; + using StructAllocator = BlittableAllocator>>; + using StructStoreFunctions = StoreFunctions>; + [TestFixture] internal class MiscTests { - private TsavoriteKV store; + private TsavoriteKV store; private IDevice log, objlog; [SetUp] @@ -22,11 +27,17 @@ public void Setup() log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "MiscTests.log"), deleteOnClose: true); objlog = Devices.CreateLogDevice(Path.Join(MethodTestDir, "MiscTests.obj.log"), deleteOnClose: true); - store = new TsavoriteKV - (128, - logSettings: new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MutableFraction = 0.1, MemorySizeBits = 15, PageSizeBits = 10 }, - serializerSettings: new SerializerSettings { valueSerializer = () => new MyValueSerializer() } - ); + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + ObjectLogDevice = objlog, + MutableFraction = 0.1, + MemorySize = 1L << 15, + PageSize = 1L << 10 + }, StoreFunctions.Create(IntKeyComparer.Instance, null, () => new MyValueSerializer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -53,16 +64,16 @@ public void MixedTest1() var input1 = new MyInput { value = 23 }; MyOutput output = new(); - bContext.RMW(ref key, ref input1, Empty.Default); + _ = bContext.RMW(ref key, ref input1, Empty.Default); int key2 = 8999999; var input2 = new MyInput { value = 24 }; - bContext.RMW(ref key2, ref input2, Empty.Default); + _ = bContext.RMW(ref key2, ref input2, Empty.Default); - bContext.Read(ref key, ref input1, ref output, Empty.Default); + _ = bContext.Read(ref key, ref input1, ref output, Empty.Default); Assert.AreEqual(input1.value, output.value.value); - bContext.Read(ref key2, ref input2, ref output, Empty.Default); + _ = bContext.Read(ref key2, ref input2, ref output, Empty.Default); Assert.AreEqual(input2.value, output.value.value); } @@ -76,7 +87,7 @@ public void MixedTest2() for (int i = 0; i < 2000; i++) { var value = new MyValue { value = i }; - bContext.Upsert(ref i, ref value, Empty.Default); + _ = bContext.Upsert(ref i, ref value, Empty.Default); } var key2 = 23; @@ -86,7 +97,7 @@ public void MixedTest2() if (status.IsPending) { - bContext.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var outputs, wait: true); (status, _) = GetSinglePendingResult(outputs); } Assert.IsTrue(status.Found); @@ -98,7 +109,7 @@ public void MixedTest2() if (status.IsPending) { - bContext.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var outputs, wait: true); (status, _) = GetSinglePendingResult(outputs); } Assert.IsFalse(status.Found); @@ -112,16 +123,22 @@ public void ForceRCUAndRecover([Values(UpdateOp.Upsert, UpdateOp.Delete)] Update // FunctionsCopyOnWrite var log = default(IDevice); - TsavoriteKV store = default; - ClientSession session = default; + TsavoriteKV store = default; + ClientSession session = default; try { var checkpointDir = Path.Join(MethodTestDir, "checkpoints"); log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "hlog1.log"), deleteOnClose: true); - store = new TsavoriteKV - (128, new LogSettings { LogDevice = log, MemorySizeBits = 29 }, - checkpointSettings: new CheckpointSettings { CheckpointDir = checkpointDir }); + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MemorySize = 1L << 29, + CheckpointDir = checkpointDir + }, StoreFunctions.Create(KeyStruct.Comparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); session = store.NewSession(copyOnWrite); var bContext = session.BasicContext; @@ -162,17 +179,23 @@ public void ForceRCUAndRecover([Values(UpdateOp.Upsert, UpdateOp.Delete)] Update status = bContext.Read(ref key, ref output); Assert.IsTrue(status.Found, status.ToString()); - store.TryInitiateFullCheckpoint(out Guid token, CheckpointType.Snapshot); + _ = store.TryInitiateFullCheckpoint(out Guid token, CheckpointType.Snapshot); store.CompleteCheckpointAsync().AsTask().GetAwaiter().GetResult(); session.Dispose(); store.Dispose(); - store = new TsavoriteKV - (128, new LogSettings { LogDevice = log, MemorySizeBits = 29 }, - checkpointSettings: new CheckpointSettings { CheckpointDir = checkpointDir }); + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MemorySize = 1L << 29, + CheckpointDir = checkpointDir + }, StoreFunctions.Create(KeyStruct.Comparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); - store.Recover(token); + _ = store.Recover(token); session = store.NewSession(copyOnWrite); using (var iterator = store.Log.Scan(store.Log.BeginAddress, store.Log.TailAddress)) diff --git a/libs/storage/Tsavorite/cs/test/ModifiedBitTests.cs b/libs/storage/Tsavorite/cs/test/ModifiedBitTests.cs index 19af9d173d..b4e92eb476 100644 --- a/libs/storage/Tsavorite/cs/test/ModifiedBitTests.cs +++ b/libs/storage/Tsavorite/cs/test/ModifiedBitTests.cs @@ -10,25 +10,31 @@ namespace Tsavorite.test.ModifiedBit { - internal class ModifiedBitTestComparer : ITsavoriteEqualityComparer + // Must be in a separate block so the "using StructStoreFunctions" is the first line in its namespace declaration. + internal struct ModifiedBitTestComparer : IKeyComparer { - public bool Equals(ref int k1, ref int k2) => k1 == k2; + public readonly bool Equals(ref int k1, ref int k2) => k1 == k2; - public long GetHashCode64(ref int k) => Utility.GetHashCode(k); + public readonly long GetHashCode64(ref int k) => Utility.GetHashCode(k); } +} + +namespace Tsavorite.test.ModifiedBit +{ + using IntAllocator = BlittableAllocator>>; + using IntStoreFunctions = StoreFunctions>; [TestFixture] class ModifiedBitTests { - const int numRecords = 1000; - const int valueMult = 1_000_000; - + const int NumRecords = 1000; + const int ValueMult = 1_000_000; ModifiedBitTestComparer comparer; - private TsavoriteKV store; - private ClientSession> session; - private BasicContext> bContext; + private TsavoriteKV store; + private ClientSession, IntStoreFunctions, IntAllocator> session; + private BasicContext, IntStoreFunctions, IntAllocator> bContext; private IDevice log; [SetUp] @@ -36,7 +42,15 @@ public void Setup() { log = Devices.CreateLogDevice(Path.Combine(MethodTestDir, "test.log"), deleteOnClose: false); comparer = new ModifiedBitTestComparer(); - store = new TsavoriteKV(1L << 20, new LogSettings { LogDevice = log, ObjectLogDevice = null, PageSizeBits = 12, MemorySizeBits = 22 }, comparer: comparer); + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + PageSize = 1L << 12, + MemorySize = 1L << 22 + }, StoreFunctions.Create(comparer) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); session = store.NewSession>(new SimpleSimpleFunctions()); bContext = session.BasicContext; } @@ -54,25 +68,25 @@ public void TearDown() void Populate() { - for (int key = 0; key < numRecords; key++) - Assert.IsFalse(bContext.Upsert(key, key * valueMult).IsPending); + for (int key = 0; key < NumRecords; key++) + Assert.IsFalse(bContext.Upsert(key, key * ValueMult).IsPending); } - void AssertLockandModified(LockableUnsafeContext> luContext, int key, bool xlock, bool slock, bool modified = false) + void AssertLockandModified(LockableUnsafeContext, IntStoreFunctions, IntAllocator> luContext, int key, bool xlock, bool slock, bool modified = false) { OverflowBucketLockTableTests.AssertLockCounts(store, ref key, xlock, slock); var isM = luContext.IsModified(key); Assert.AreEqual(modified, isM, "modified mismatch"); } - void AssertLockandModified(LockableContext> luContext, int key, bool xlock, bool slock, bool modified = false) + void AssertLockandModified(LockableContext, IntStoreFunctions, IntAllocator> luContext, int key, bool xlock, bool slock, bool modified = false) { OverflowBucketLockTableTests.AssertLockCounts(store, ref key, xlock, slock); var isM = luContext.IsModified(key); Assert.AreEqual(modified, isM, "modified mismatch"); } - void AssertLockandModified(ClientSession> session, int key, bool xlock, bool slock, bool modified = false) + void AssertLockandModified(ClientSession, IntStoreFunctions, IntAllocator> session, int key, bool xlock, bool slock, bool modified = false) { var luContext = session.LockableUnsafeContext; luContext.BeginUnsafe(); @@ -90,7 +104,7 @@ public void LockAndNotModify() { Populate(); Random r = new(100); - int key = r.Next(numRecords); + int key = r.Next(NumRecords); bContext.ResetModified(key); var lContext = session.LockableContext; @@ -120,7 +134,7 @@ public void LockAndNotModify() public void ResetModifyForNonExistingKey() { Populate(); - int key = numRecords + 100; + int key = NumRecords + 100; bContext.ResetModified(key); AssertLockandModified(session, key, xlock: false, slock: false, modified: false); } @@ -131,7 +145,7 @@ public void ModifyClientSession([Values(true, false)] bool flushToDisk, [Values] { Populate(); - int key = numRecords - 500; + int key = NumRecords - 500; int value = 14; bContext.ResetModified(key); AssertLockandModified(session, key, xlock: false, slock: false, modified: false); @@ -160,13 +174,13 @@ public void ModifyClientSession([Values(true, false)] bool flushToDisk, [Values] { case UpdateOp.RMW: Assert.IsTrue(status.IsPending, status.ToString()); - bContext.CompletePending(wait: true); + _ = bContext.CompletePending(wait: true); break; default: Assert.IsTrue(status.NotFound); break; } - (status, var _) = bContext.Read(key); + (status, _) = bContext.Read(key); Assert.IsTrue(status.Found || updateOp == UpdateOp.Delete); } @@ -182,7 +196,7 @@ public void ModifyLUC([Values(true, false)] bool flushToDisk, [Values] UpdateOp { Populate(); - int key = numRecords - 500; + int key = NumRecords - 500; int value = 14; bContext.ResetModified(key); var luContext = session.LockableUnsafeContext; @@ -225,7 +239,7 @@ public void ModifyLUC([Values(true, false)] bool flushToDisk, [Values] UpdateOp { case UpdateOp.RMW: Assert.IsTrue(status.IsPending, status.ToString()); - luContext.CompletePending(wait: true); + _ = luContext.CompletePending(wait: true); break; default: Assert.IsTrue(status.NotFound); @@ -239,7 +253,7 @@ public void ModifyLUC([Values(true, false)] bool flushToDisk, [Values] UpdateOp { keyVec[0].LockType = LockType.Shared; luContext.Lock(keyVec); - (status, var _) = luContext.Read(key); + (status, _) = luContext.Read(key); Assert.AreEqual(updateOp != UpdateOp.Delete, status.Found, status.ToString()); luContext.Unlock(keyVec); } @@ -256,7 +270,7 @@ public void ModifyUC([Values(true, false)] bool flushToDisk, [Values] UpdateOp u { Populate(); - int key = numRecords - 500; + int key = NumRecords - 500; int value = 14; bContext.ResetModified(key); AssertLockandModified(session, key, xlock: false, slock: false, modified: false); @@ -288,13 +302,13 @@ public void ModifyUC([Values(true, false)] bool flushToDisk, [Values] UpdateOp u { case UpdateOp.RMW: Assert.IsTrue(status.IsPending, status.ToString()); - unsafeContext.CompletePending(wait: true); + _ = unsafeContext.CompletePending(wait: true); break; default: Assert.IsTrue(status.NotFound); break; } - (status, var _) = unsafeContext.Read(key); + (status, _) = unsafeContext.Read(key); Assert.IsTrue(status.Found || updateOp == UpdateOp.Delete); } unsafeContext.EndUnsafe(); @@ -308,7 +322,7 @@ public void ModifyLC([Values(true, false)] bool flushToDisk, [Values] UpdateOp u { Populate(); - int key = numRecords - 500; + int key = NumRecords - 500; int value = 14; bContext.ResetModified(key); var lContext = session.LockableContext; @@ -345,7 +359,7 @@ public void ModifyLC([Values(true, false)] bool flushToDisk, [Values] UpdateOp u { case UpdateOp.RMW: Assert.IsTrue(status.IsPending, status.ToString()); - lContext.CompletePending(wait: true); + _ = lContext.CompletePending(wait: true); break; default: Assert.IsTrue(status.NotFound); @@ -359,7 +373,7 @@ public void ModifyLC([Values(true, false)] bool flushToDisk, [Values] UpdateOp u { keyVec[0].LockType = LockType.Shared; lContext.Lock(keyVec); - (status, var _) = lContext.Read(key); + (status, _) = lContext.Read(key); Assert.AreEqual(updateOp != UpdateOp.Delete, status.Found, status.ToString()); lContext.Unlock(keyVec); } @@ -392,7 +406,7 @@ public void CopyToTailTest() // Check Read Copy to Tail resets the modified var status = luContext.Read(ref key, ref input, ref output, ref readOptions, out _); Assert.IsTrue(status.IsPending, status.ToString()); - luContext.CompletePending(wait: true); + _ = luContext.CompletePending(wait: true); luContext.Unlock(keyVec); AssertLockandModified(luContext, key, xlock: false, slock: false, modified: true); @@ -403,7 +417,7 @@ public void CopyToTailTest() luContext.Lock(keyVec); status = luContext.Read(ref key, ref input, ref output, ref readOptions, out _); Assert.IsTrue(status.IsPending, status.ToString()); - luContext.CompletePending(wait: true); + _ = luContext.CompletePending(wait: true); AssertLockandModified(luContext, key, xlock: true, slock: false, modified: true); luContext.Unlock(keyVec); AssertLockandModified(luContext, key, xlock: false, slock: false, modified: true); diff --git a/libs/storage/Tsavorite/cs/test/MoreLogCompactionTests.cs b/libs/storage/Tsavorite/cs/test/MoreLogCompactionTests.cs index f8693b38dd..c8d019ea1d 100644 --- a/libs/storage/Tsavorite/cs/test/MoreLogCompactionTests.cs +++ b/libs/storage/Tsavorite/cs/test/MoreLogCompactionTests.cs @@ -7,10 +7,13 @@ namespace Tsavorite.test { + using LongAllocator = BlittableAllocator>>; + using LongStoreFunctions = StoreFunctions>; + [TestFixture] internal class MoreLogCompactionTests { - private TsavoriteKV store; + private TsavoriteKV store; private IDevice log; [SetUp] @@ -18,8 +21,15 @@ public void Setup() { TestUtils.DeleteDirectory(TestUtils.MethodTestDir, wait: true); log = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "MoreLogCompactionTests.log"), deleteOnClose: true); - store = new TsavoriteKV - (1L << 20, new LogSettings { LogDevice = log, MemorySizeBits = 15, PageSizeBits = 9 }); + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + MemorySize = 1L << 15, + PageSize = 1L << 9 + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -50,11 +60,11 @@ public void DeleteCompactLookup([Values] CompactionType compactionType) { if (i == 1010) compactUntil = store.Log.TailAddress; - bContext.Upsert(i, i); + _ = bContext.Upsert(i, i); } for (int i = 0; i < totalRecords / 2; i++) - bContext.Delete(i); + _ = bContext.Delete(i); compactUntil = session.Compact(compactUntil, compactionType); @@ -69,7 +79,7 @@ public void DeleteCompactLookup([Values] CompactionType compactionType) (var status, var output) = bContext2.Read(i); if (status.IsPending) { - bContext2.CompletePendingWithOutputs(out var completedOutputs, true); + _ = bContext2.CompletePendingWithOutputs(out var completedOutputs, true); Assert.IsTrue(completedOutputs.Next()); (status, output) = (completedOutputs.Current.Status, completedOutputs.Current.Output); Assert.IsFalse(completedOutputs.Next()); diff --git a/libs/storage/Tsavorite/cs/test/NativeReadCacheTests.cs b/libs/storage/Tsavorite/cs/test/NativeReadCacheTests.cs index ae517f9f38..5cc3ef1123 100644 --- a/libs/storage/Tsavorite/cs/test/NativeReadCacheTests.cs +++ b/libs/storage/Tsavorite/cs/test/NativeReadCacheTests.cs @@ -7,20 +7,32 @@ namespace Tsavorite.test.ReadCacheTests { + using StructAllocator = BlittableAllocator>>; + using StructStoreFunctions = StoreFunctions>; + [TestFixture] public class NativeReadCacheTests { - private TsavoriteKV store; + private TsavoriteKV store; private IDevice log; [SetUp] public void Setup() { TestUtils.DeleteDirectory(TestUtils.MethodTestDir, wait: true); - var readCacheSettings = new ReadCacheSettings { MemorySizeBits = 15, PageSizeBits = 10 }; log = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "NativeReadCacheTests.log"), deleteOnClose: true); - store = new TsavoriteKV - (1L << 20, new LogSettings { LogDevice = log, MemorySizeBits = 15, PageSizeBits = 10, ReadCacheSettings = readCacheSettings }); + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + MemorySize = 1L << 15, + PageSize = 1L << 10, + ReadCacheMemorySize = 1L << 15, + ReadCachePageSize = 1L << 10, + ReadCacheEnabled = true + }, StoreFunctions.Create(new KeyStruct.Comparer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] diff --git a/libs/storage/Tsavorite/cs/test/NeedCopyUpdateTests.cs b/libs/storage/Tsavorite/cs/test/NeedCopyUpdateTests.cs index d7db81c2d2..fc3cfbd2b1 100644 --- a/libs/storage/Tsavorite/cs/test/NeedCopyUpdateTests.cs +++ b/libs/storage/Tsavorite/cs/test/NeedCopyUpdateTests.cs @@ -4,14 +4,21 @@ using System.IO; using NUnit.Framework; using Tsavorite.core; +using static Tsavorite.test.NeedCopyUpdateTests; using static Tsavorite.test.TestUtils; namespace Tsavorite.test { + using LongAllocator = BlittableAllocator>>; + using LongStoreFunctions = StoreFunctions>; + + using RMWValueAllocator = GenericAllocator>>; + using RMWValueStoreFunctions = StoreFunctions>; + [TestFixture] internal class NeedCopyUpdateTests { - private TsavoriteKV store; + private TsavoriteKV store; private IDevice log, objlog; [SetUp] @@ -21,11 +28,17 @@ public void Setup() log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "tests.log"), deleteOnClose: true); objlog = Devices.CreateLogDevice(Path.Join(MethodTestDir, "tests.obj.log"), deleteOnClose: true); - store = new TsavoriteKV - (128, - logSettings: new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MutableFraction = 0.1, MemorySizeBits = 15, PageSizeBits = 10 }, - serializerSettings: new SerializerSettings { valueSerializer = () => new RMWValueSerializer() } - ); + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + ObjectLogDevice = objlog, + MutableFraction = 0.1, + MemorySize = 1L << 15, + PageSize = 1L << 10 + }, StoreFunctions.Create(IntKeyComparer.Instance, keySerializerCreator: null, () => new RMWValueSerializer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -46,13 +59,13 @@ public void TearDown() public void TryAddTest() { TryAddTestFunctions functions = new(); - using var session = store.NewSession(functions); + using var session = store.NewSession(functions); var bContext = session.BasicContext; Status status; var key = 1; - var value1 = new RMWValue { value = 1 }; - var value2 = new RMWValue { value = 2 }; + var value1 = new RMWValueObj { value = 1 }; + var value2 = new RMWValueObj { value = 2 }; functions.noNeedInitialUpdater = true; status = bContext.RMW(ref key, ref value1); // needInitialUpdater false + NOTFOUND @@ -74,71 +87,71 @@ public void TryAddTest() store.Log.FlushAndEvict(true); status = bContext.RMW(ref key, ref value2, new(StatusCode.Found)); // PENDING + NeedCopyUpdate + Found Assert.IsTrue(status.IsPending, status.ToString()); - bContext.CompletePendingWithOutputs(out var outputs, true); + _ = bContext.CompletePendingWithOutputs(out var outputs, true); - var output = new RMWValue(); + var output = new RMWValueObj(); (status, output) = GetSinglePendingResult(outputs); Assert.IsTrue(status.Found, status.ToString()); // NeedCopyUpdate returns false, so RMW returns simply Found // Test stored value. Should be value1 status = bContext.Read(ref key, ref value1, ref output, new(StatusCode.Found)); Assert.IsTrue(status.IsPending, status.ToString()); - bContext.CompletePending(true); + _ = bContext.CompletePending(true); status = bContext.Delete(ref key); Assert.IsTrue(!status.Found && status.Record.Created, status.ToString()); - bContext.CompletePending(true); + _ = bContext.CompletePending(true); store.Log.FlushAndEvict(true); status = bContext.RMW(ref key, ref value2, new(StatusCode.NotFound | StatusCode.CreatedRecord)); // PENDING + InitialUpdater + NOTFOUND Assert.IsTrue(status.IsPending, status.ToString()); - bContext.CompletePending(true); + _ = bContext.CompletePending(true); } - internal class RMWValue + internal class RMWValueObj { public int value; public bool flag; } - internal class RMWValueSerializer : BinaryObjectSerializer + internal class RMWValueSerializer : BinaryObjectSerializer { - public override void Serialize(ref RMWValue value) + public override void Serialize(ref RMWValueObj value) { writer.Write(value.value); } - public override void Deserialize(out RMWValue value) + public override void Deserialize(out RMWValueObj value) { - value = new RMWValue + value = new RMWValueObj { value = reader.ReadInt32() }; } } - internal class TryAddTestFunctions : TryAddFunctions + internal class TryAddTestFunctions : TryAddFunctions { internal bool noNeedInitialUpdater; - public override bool NeedInitialUpdate(ref int key, ref RMWValue input, ref RMWValue output, ref RMWInfo rmwInfo) + public override bool NeedInitialUpdate(ref int key, ref RMWValueObj input, ref RMWValueObj output, ref RMWInfo rmwInfo) { return !noNeedInitialUpdater && base.NeedInitialUpdate(ref key, ref input, ref output, ref rmwInfo); } - public override bool InitialUpdater(ref int key, ref RMWValue input, ref RMWValue value, ref RMWValue output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) + public override bool InitialUpdater(ref int key, ref RMWValueObj input, ref RMWValueObj value, ref RMWValueObj output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) { input.flag = true; - base.InitialUpdater(ref key, ref input, ref value, ref output, ref rmwInfo, ref recordInfo); + _ = base.InitialUpdater(ref key, ref input, ref value, ref output, ref rmwInfo, ref recordInfo); return true; } - public override bool CopyUpdater(ref int key, ref RMWValue input, ref RMWValue oldValue, ref RMWValue newValue, ref RMWValue output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) + public override bool CopyUpdater(ref int key, ref RMWValueObj input, ref RMWValueObj oldValue, ref RMWValueObj newValue, ref RMWValueObj output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) { Assert.Fail("CopyUpdater"); return false; } - public override void RMWCompletionCallback(ref int key, ref RMWValue input, ref RMWValue output, Status ctx, Status status, RecordMetadata recordMetadata) + public override void RMWCompletionCallback(ref int key, ref RMWValueObj input, ref RMWValueObj output, Status ctx, Status status, RecordMetadata recordMetadata) { Assert.AreEqual(ctx, status); @@ -146,7 +159,7 @@ public override void RMWCompletionCallback(ref int key, ref RMWValue input, ref Assert.IsTrue(input.flag); // InitialUpdater is called. } - public override void ReadCompletionCallback(ref int key, ref RMWValue input, ref RMWValue output, Status ctx, Status status, RecordMetadata recordMetadata) + public override void ReadCompletionCallback(ref int key, ref RMWValueObj input, ref RMWValueObj output, Status ctx, Status status, RecordMetadata recordMetadata) { Assert.AreEqual(output.value, input.value); } @@ -156,11 +169,11 @@ public override void ReadCompletionCallback(ref int key, ref RMWValue input, ref [TestFixture] internal class NeedCopyUpdateTestsSinglePage { - private TsavoriteKV store; + private TsavoriteKV store; private IDevice log; - const int pageSizeBits = 16; - const int recsPerPage = (1 << pageSizeBits) / 24; // 24 bits in RecordInfo, key, value + const int PageSizeBits = 16; + const int RecsPerPage = (1 << PageSizeBits) / 24; // 24 bits in RecordInfo, key, value [SetUp] public void Setup() @@ -168,8 +181,16 @@ public void Setup() DeleteDirectory(MethodTestDir, wait: true); log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "test.log"), deleteOnClose: true); - store = new TsavoriteKV(128, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 0.1, MemorySizeBits = pageSizeBits, PageSizeBits = pageSizeBits }); + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MutableFraction = 0.1, + MemorySize = 1L << PageSizeBits, + PageSize = 1L << PageSizeBits + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -196,7 +217,7 @@ public void CopyUpdateFromHeadReadOnlyPageTest() // caused the HeadAddress to be moved above logicalAddress in CreateNewRecordRMW. const int padding = 2; - for (int key = 0; key < recsPerPage - padding; key++) + for (int key = 0; key < RecsPerPage - padding; key++) { var status = bContext.RMW(key, key << 32 + key); Assert.IsTrue(status.IsCompletedSuccessfully, status.ToString()); @@ -205,11 +226,11 @@ public void CopyUpdateFromHeadReadOnlyPageTest() store.Log.ShiftReadOnlyAddress(store.Log.TailAddress, wait: true); // This should trigger CopyUpdater, after flushing the oldest page (closest to HeadAddress). - for (int key = 0; key < recsPerPage - padding; key++) + for (int key = 0; key < RecsPerPage - padding; key++) { var status = bContext.RMW(key, key << 32 + key); if (status.IsPending) - bContext.CompletePending(wait: true); + _ = bContext.CompletePending(wait: true); } } diff --git a/libs/storage/Tsavorite/cs/test/ObjectReadCacheTests.cs b/libs/storage/Tsavorite/cs/test/ObjectReadCacheTests.cs index 8ccd83fbf7..dfefaad30b 100644 --- a/libs/storage/Tsavorite/cs/test/ObjectReadCacheTests.cs +++ b/libs/storage/Tsavorite/cs/test/ObjectReadCacheTests.cs @@ -7,25 +7,35 @@ namespace Tsavorite.test.ReadCacheTests { + using ClassAllocator = GenericAllocator>>; + using ClassStoreFunctions = StoreFunctions>; + [TestFixture] internal class ObjectReadCacheTests { - private TsavoriteKV store; + private TsavoriteKV store; private IDevice log, objlog; [SetUp] public void Setup() { TestUtils.DeleteDirectory(TestUtils.MethodTestDir, wait: true); - var readCacheSettings = new ReadCacheSettings { MemorySizeBits = 15, PageSizeBits = 10 }; log = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "ObjectReadCacheTests.log"), deleteOnClose: true); objlog = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "ObjectReadCacheTests.obj.log"), deleteOnClose: true); - store = new TsavoriteKV - (128, - logSettings: new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MemorySizeBits = 15, PageSizeBits = 10, ReadCacheSettings = readCacheSettings }, - serializerSettings: new SerializerSettings { keySerializer = () => new MyKeySerializer(), valueSerializer = () => new MyValueSerializer() } - ); + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + ObjectLogDevice = objlog, + MemorySize = 1L << 15, + PageSize = 1L << 10, + ReadCacheMemorySize = 1L << 15, + ReadCachePageSize = 1L << 10, + ReadCacheEnabled = true + }, StoreFunctions.Create(new MyKey.Comparer(), () => new MyKeySerializer(), () => new MyValueSerializer(), DefaultRecordDisposer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] diff --git a/libs/storage/Tsavorite/cs/test/ObjectRecoveryTest.cs b/libs/storage/Tsavorite/cs/test/ObjectRecoveryTest.cs index afdc0b8802..056d808ec7 100644 --- a/libs/storage/Tsavorite/cs/test/ObjectRecoveryTest.cs +++ b/libs/storage/Tsavorite/cs/test/ObjectRecoveryTest.cs @@ -2,6 +2,7 @@ // Licensed under the MIT license. using System; +using System.Diagnostics; using System.IO; using System.Threading.Tasks; using NUnit.Framework; @@ -9,6 +10,9 @@ namespace Tsavorite.test.recovery.objects { + using ClassAllocator = GenericAllocator>>; + using ClassStoreFunctions = StoreFunctions>; + internal struct StructTuple { public T1 Item1; @@ -18,12 +22,12 @@ internal struct StructTuple [TestFixture] internal class ObjectRecoveryTests { - const long numUniqueKeys = (1 << 14); - const long keySpace = (1L << 14); - const long numOps = (1L << 19); - const long completePendingInterval = (1L << 10); - const long checkpointInterval = (1L << 16); - private TsavoriteKV store; + const long NumUniqueKeys = 1L << 14; + const long KeySpace = 1L << 14; + const long NumOps = 1L << 19; + const long CompletePendingInterval = 1L << 10; + const long CheckpointInterval = 1L << 16; + private TsavoriteKV store; private Guid token; private IDevice log, objlog; @@ -38,13 +42,15 @@ public void Setup(bool deleteDir) log = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "ObjectRecoveryTests.log"), false); objlog = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "ObjectRecoveryTests.obj.log"), false); - store = new TsavoriteKV - ( - keySpace, - new LogSettings { LogDevice = log, ObjectLogDevice = objlog }, - new CheckpointSettings { CheckpointDir = TestUtils.MethodTestDir }, - new SerializerSettings { keySerializer = () => new AdIdSerializer(), valueSerializer = () => new NumClicksSerializer() } - ); + store = new(new() + { + IndexSize = KeySpace, + LogDevice = log, + ObjectLogDevice = objlog, + CheckpointDir = TestUtils.MethodTestDir + }, StoreFunctions.Create(new AdIdObj.Comparer(), () => new AdIdObj.Serializer(), () => new NumClicksObj.Serializer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -77,9 +83,9 @@ public async ValueTask ObjectRecoveryTest1([Values] bool isAsync) PrepareToRecover(); if (isAsync) - await store.RecoverAsync(token, token); + _ = await store.RecoverAsync(token, token); else - store.Recover(token, token); + _ = store.Recover(token, token); Verify(token, token); } @@ -87,13 +93,13 @@ public async ValueTask ObjectRecoveryTest1([Values] bool isAsync) public unsafe void Populate() { // Prepare the dataset - var inputArray = new StructTuple[numOps]; - for (int i = 0; i < numOps; i++) + var inputArray = new StructTuple[NumOps]; + for (int i = 0; i < NumOps; i++) { - inputArray[i] = new StructTuple + inputArray[i] = new StructTuple { - Item1 = new AdId { adId = i % numUniqueKeys }, - Item2 = new Input { numClicks = new NumClicks { numClicks = 1 } } + Item1 = new AdIdObj { adId = i % NumUniqueKeys }, + Item2 = new Input { numClicks = new NumClicksObj { numClicks = 1 } } }; } @@ -103,11 +109,11 @@ public unsafe void Populate() // Process the batch of input data bool first = true; - for (int i = 0; i < numOps; i++) + for (int i = 0; i < NumOps; i++) { - bContext.RMW(ref inputArray[i].Item1, ref inputArray[i].Item2, Empty.Default); + _ = bContext.RMW(ref inputArray[i].Item1, ref inputArray[i].Item2, Empty.Default); - if ((i + 1) % checkpointInterval == 0) + if ((i + 1) % CheckpointInterval == 0) { if (first) while (!store.TryInitiateFullCheckpoint(out token, CheckpointType.Snapshot)) ; @@ -119,28 +125,28 @@ public unsafe void Populate() first = false; } - if (i % completePendingInterval == 0) + if (i % CompletePendingInterval == 0) { - bContext.CompletePending(false, false); + _ = bContext.CompletePending(false, false); } } // Make sure operations are completed - bContext.CompletePending(true); + _ = bContext.CompletePending(true); session.Dispose(); } public unsafe void Verify(Guid cprVersion, Guid indexVersion) { // Create array for reading - var inputArray = new StructTuple[numUniqueKeys]; - for (int i = 0; i < numUniqueKeys; i++) + var inputArray = new StructTuple[NumUniqueKeys]; + for (int i = 0; i < NumUniqueKeys; i++) { - inputArray[i] = new StructTuple + inputArray[i] = new StructTuple { - Item1 = new AdId { adId = i }, - Item2 = new Input { numClicks = new NumClicks { numClicks = 0 } } + Item1 = new AdIdObj { adId = i }, + Item2 = new Input { numClicks = new NumClicksObj { numClicks = 0 } } }; } @@ -149,14 +155,14 @@ public unsafe void Verify(Guid cprVersion, Guid indexVersion) Input input = default; // Issue read requests - for (var i = 0; i < numUniqueKeys; i++) + for (var i = 0; i < NumUniqueKeys; i++) { Output output = new(); - bContext.Read(ref inputArray[i].Item1, ref input, ref output, Empty.Default); + _ = bContext.Read(ref inputArray[i].Item1, ref input, ref output, Empty.Default); } // Complete all pending requests - bContext.CompletePending(true); + _ = bContext.CompletePending(true); // Release session.Dispose(); diff --git a/libs/storage/Tsavorite/cs/test/ObjectRecoveryTest2.cs b/libs/storage/Tsavorite/cs/test/ObjectRecoveryTest2.cs index 3988891cf1..38eea97234 100644 --- a/libs/storage/Tsavorite/cs/test/ObjectRecoveryTest2.cs +++ b/libs/storage/Tsavorite/cs/test/ObjectRecoveryTest2.cs @@ -9,6 +9,9 @@ namespace Tsavorite.test.recovery.objects { + using ClassAllocator = GenericAllocator>>; + using ClassStoreFunctions = StoreFunctions>; + [TestFixture] public class ObjectRecoveryTests2 { @@ -37,64 +40,61 @@ public async ValueTask ObjectRecoveryTest2( [Values] bool isAsync) { this.iterations = iterations; - Prepare(out IDevice log, out IDevice objlog, out TsavoriteKV h, out MyContext context); + Prepare(out IDevice log, out IDevice objlog, out var store, out MyContext context); - var session1 = h.NewSession(new MyFunctions()); - Write(session1, context, h, checkpointType); + var session1 = store.NewSession(new MyFunctions()); + Write(session1, context, store, checkpointType); Read(session1, context, false); session1.Dispose(); - h.TryInitiateFullCheckpoint(out _, checkpointType); - h.CompleteCheckpointAsync().AsTask().GetAwaiter().GetResult(); + _ = store.TryInitiateFullCheckpoint(out _, checkpointType); + store.CompleteCheckpointAsync().AsTask().GetAwaiter().GetResult(); - Destroy(log, objlog, h); + Destroy(log, objlog, store); - Prepare(out log, out objlog, out h, out context); + Prepare(out log, out objlog, out store, out context); if (isAsync) - await h.RecoverAsync(); + _ = await store.RecoverAsync(); else - h.Recover(); + _ = store.Recover(); - var session2 = h.NewSession(new MyFunctions()); + var session2 = store.NewSession(new MyFunctions()); Read(session2, context, true); session2.Dispose(); - Destroy(log, objlog, h); + Destroy(log, objlog, store); } - private void Prepare(out IDevice log, out IDevice objlog, out TsavoriteKV h, out MyContext context) + private static void Prepare(out IDevice log, out IDevice objlog, out TsavoriteKV store, out MyContext context) { log = Devices.CreateLogDevice(Path.Combine(TestUtils.MethodTestDir, "RecoverTests.log")); objlog = Devices.CreateLogDevice(Path.Combine(TestUtils.MethodTestDir, "RecoverTests_HEAP.log")); - h = new TsavoriteKV - (1L << 20, - new LogSettings - { - LogDevice = log, - ObjectLogDevice = objlog, - SegmentSizeBits = 12, - MemorySizeBits = 12, - PageSizeBits = 9 - }, - new CheckpointSettings() - { - CheckpointDir = Path.Combine(TestUtils.MethodTestDir, "check-points") - }, - new SerializerSettings { keySerializer = () => new MyKeySerializer(), valueSerializer = () => new MyValueSerializer() } + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + ObjectLogDevice = objlog, + SegmentSize = 1L << 12, + MemorySize = 1L << 12, + PageSize = 1L << 9, + CheckpointDir = Path.Combine(TestUtils.MethodTestDir, "check-points") + }, StoreFunctions.Create(new MyKey.Comparer(), () => new MyKeySerializer(), () => new MyValueSerializer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) ); context = new MyContext(); } - private static void Destroy(IDevice log, IDevice objlog, TsavoriteKV h) + private static void Destroy(IDevice log, IDevice objlog, TsavoriteKV store) { // Dispose Tsavorite instance and log - h.Dispose(); + store.Dispose(); log.Dispose(); objlog.Dispose(); } - private void Write(ClientSession session, MyContext context, TsavoriteKV store, CheckpointType checkpointType) + private void Write(ClientSession session, MyContext context, + TsavoriteKV store, CheckpointType checkpointType) { var bContext = session.BasicContext; @@ -102,17 +102,17 @@ private void Write(ClientSession session, MyContext context, bool delete) + private void Read(ClientSession session, MyContext context, bool delete) { var bContext = session.BasicContext; @@ -125,7 +125,7 @@ private void Read(ClientSession + public class MyKey { public int key; public string name; - public long GetHashCode64(ref MyKey key) => Utility.GetHashCode(key.key); - public bool Equals(ref MyKey key1, ref MyKey key2) => key1.key == key2.key && key1.name == key2.name; + public struct Comparer : IKeyComparer + { + public readonly long GetHashCode64(ref MyKey key) => Utility.GetHashCode(key.key); + public readonly bool Equals(ref MyKey key1, ref MyKey key2) => key1.key == key2.key && key1.name == key2.name; + } } - public class MyValue { public string value; } public class MyInput { public string value; } public class MyOutput { public MyValue value; } diff --git a/libs/storage/Tsavorite/cs/test/ObjectRecoveryTest3.cs b/libs/storage/Tsavorite/cs/test/ObjectRecoveryTest3.cs index d05374f3d9..92434d52ec 100644 --- a/libs/storage/Tsavorite/cs/test/ObjectRecoveryTest3.cs +++ b/libs/storage/Tsavorite/cs/test/ObjectRecoveryTest3.cs @@ -11,6 +11,9 @@ namespace Tsavorite.test.recovery.objects { + using ClassAllocator = GenericAllocator>>; + using ClassStoreFunctions = StoreFunctions>; + [TestFixture] public class ObjectRecoveryTests3 { @@ -36,67 +39,64 @@ public async ValueTask ObjectRecoveryTest3( [Values] bool isAsync) { this.iterations = iterations; - Prepare(out IDevice log, out IDevice objlog, out TsavoriteKV h, out MyContext context); + ObjectRecoveryTests3.Prepare(out IDevice log, out IDevice objlog, out var store, out MyContext context); - var session1 = h.NewSession(new MyFunctions()); - var tokens = Write(session1, context, h, checkpointType); + var session1 = store.NewSession(new MyFunctions()); + var tokens = Write(session1, context, store, checkpointType); Read(session1, context, false, iterations); session1.Dispose(); - h.TryInitiateHybridLogCheckpoint(out Guid token, checkpointType); - h.CompleteCheckpointAsync().AsTask().GetAwaiter().GetResult(); + _ = store.TryInitiateHybridLogCheckpoint(out Guid token, checkpointType); + store.CompleteCheckpointAsync().AsTask().GetAwaiter().GetResult(); tokens.Add((iterations, token)); - Destroy(log, objlog, h); + Destroy(log, objlog, store); foreach (var item in tokens) { - Prepare(out log, out objlog, out h, out context); + ObjectRecoveryTests3.Prepare(out log, out objlog, out store, out context); if (isAsync) - await h.RecoverAsync(default, item.Item2); + _ = await store.RecoverAsync(default, item.Item2); else - h.Recover(default, item.Item2); + _ = store.Recover(default, item.Item2); - var session2 = h.NewSession(new MyFunctions()); + var session2 = store.NewSession(new MyFunctions()); Read(session2, context, false, item.Item1); session2.Dispose(); - Destroy(log, objlog, h); + Destroy(log, objlog, store); } } - private void Prepare(out IDevice log, out IDevice objlog, out TsavoriteKV h, out MyContext context) + private static void Prepare(out IDevice log, out IDevice objlog, out TsavoriteKV store, out MyContext context) { log = Devices.CreateLogDevice(Path.Combine(TestUtils.MethodTestDir, "RecoverTests.log")); objlog = Devices.CreateLogDevice(Path.Combine(TestUtils.MethodTestDir, "RecoverTests_HEAP.log")); - h = new TsavoriteKV - (1L << 20, - new LogSettings - { - LogDevice = log, - ObjectLogDevice = objlog, - SegmentSizeBits = 12, - MemorySizeBits = 12, - PageSizeBits = 9 - }, - new CheckpointSettings() - { - CheckpointDir = Path.Combine(TestUtils.MethodTestDir, "check-points") - }, - new SerializerSettings { keySerializer = () => new MyKeySerializer(), valueSerializer = () => new MyValueSerializer() } + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + ObjectLogDevice = objlog, + SegmentSize = 1L << 12, + MemorySize = 1L << 12, + PageSize = 1L << 9, + CheckpointDir = Path.Combine(TestUtils.MethodTestDir, "check-points") + }, StoreFunctions.Create(new MyKey.Comparer(), () => new MyKeySerializer(), () => new MyValueSerializer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) ); context = new MyContext(); } - private static void Destroy(IDevice log, IDevice objlog, TsavoriteKV h) + private static void Destroy(IDevice log, IDevice objlog, TsavoriteKV store) { // Dispose Tsavorite instance and log - h.Dispose(); + store.Dispose(); log.Dispose(); objlog.Dispose(); } - private List<(int, Guid)> Write(ClientSession session, MyContext context, TsavoriteKV store, CheckpointType checkpointType) + private List<(int, Guid)> Write(ClientSession session, MyContext context, + TsavoriteKV store, CheckpointType checkpointType) { var bContext = session.BasicContext; @@ -105,11 +105,11 @@ private static void Destroy(IDevice log, IDevice objlog, TsavoriteKV 0) { - store.TryInitiateHybridLogCheckpoint(out Guid token, checkpointType); + _ = store.TryInitiateHybridLogCheckpoint(out Guid token, checkpointType); store.CompleteCheckpointAsync().AsTask().GetAwaiter().GetResult(); tokens.Add((i, token)); } @@ -117,7 +117,7 @@ private static void Destroy(IDevice log, IDevice objlog, TsavoriteKV session, MyContext context, bool delete, int iter) + private static void Read(ClientSession session, MyContext context, bool delete, int iter) { var bContext = session.BasicContext; @@ -130,7 +130,7 @@ private void Read(ClientSession + public class AdIdObj { public long adId; - public long GetHashCode64(ref AdId key) + public partial struct Comparer : IKeyComparer { - return Utility.GetHashCode(key.adId); - } - public bool Equals(ref AdId k1, ref AdId k2) - { - return k1.adId == k2.adId; - } - } + public readonly long GetHashCode64(ref AdIdObj key) => Utility.GetHashCode(key.adId); - public class AdIdSerializer : BinaryObjectSerializer - { - public override void Deserialize(out AdId obj) - { - obj = new AdId - { - adId = reader.ReadInt64() - }; + public readonly bool Equals(ref AdIdObj k1, ref AdIdObj k2) => k1.adId == k2.adId; } - public override void Serialize(ref AdId obj) + public class Serializer : BinaryObjectSerializer { - writer.Write(obj.adId); - } - } + public override void Deserialize(out AdIdObj obj) => obj = new AdIdObj { adId = reader.ReadInt64() }; - public class Input - { - public AdId adId; - public NumClicks numClicks; + public override void Serialize(ref AdIdObj obj) => writer.Write(obj.adId); + } } - public class NumClicks + public class NumClicksObj { public long numClicks; - } - public class NumClicksSerializer : BinaryObjectSerializer - { - public override void Deserialize(out NumClicks obj) + public class Serializer : BinaryObjectSerializer { - obj = new NumClicks - { - numClicks = reader.ReadInt64() - }; - } + public override void Deserialize(out NumClicksObj obj) => obj = new NumClicksObj { numClicks = reader.ReadInt64() }; - public override void Serialize(ref NumClicks obj) - { - writer.Write(obj.numClicks); + public override void Serialize(ref NumClicksObj obj) => writer.Write(obj.numClicks); } } + public class Input + { + public AdIdObj adId; + public NumClicksObj numClicks; + } public class Output { - public NumClicks value; + public NumClicksObj value; } - public class Functions : SessionFunctionsBase + public class Functions : SessionFunctionsBase { // Read functions - public override bool SingleReader(ref AdId key, ref Input input, ref NumClicks value, ref Output dst, ref ReadInfo readInfo) + public override bool SingleReader(ref AdIdObj key, ref Input input, ref NumClicksObj value, ref Output dst, ref ReadInfo readInfo) { dst.value = value; return true; } - public override bool ConcurrentReader(ref AdId key, ref Input input, ref NumClicks value, ref Output dst, ref ReadInfo readInfo, ref RecordInfo recordInfo) + public override bool ConcurrentReader(ref AdIdObj key, ref Input input, ref NumClicksObj value, ref Output dst, ref ReadInfo readInfo, ref RecordInfo recordInfo) { dst.value = value; return true; } // RMW functions - public override bool InitialUpdater(ref AdId key, ref Input input, ref NumClicks value, ref Output output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) + public override bool InitialUpdater(ref AdIdObj key, ref Input input, ref NumClicksObj value, ref Output output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) { value = input.numClicks; return true; } - public override bool InPlaceUpdater(ref AdId key, ref Input input, ref NumClicks value, ref Output output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) + public override bool InPlaceUpdater(ref AdIdObj key, ref Input input, ref NumClicksObj value, ref Output output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) { - Interlocked.Add(ref value.numClicks, input.numClicks.numClicks); + _ = Interlocked.Add(ref value.numClicks, input.numClicks.numClicks); return true; } - public override bool NeedCopyUpdate(ref AdId key, ref Input input, ref NumClicks oldValue, ref Output output, ref RMWInfo rmwInfo) => true; + public override bool NeedCopyUpdate(ref AdIdObj key, ref Input input, ref NumClicksObj oldValue, ref Output output, ref RMWInfo rmwInfo) => true; - public override bool CopyUpdater(ref AdId key, ref Input input, ref NumClicks oldValue, ref NumClicks newValue, ref Output output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) + public override bool CopyUpdater(ref AdIdObj key, ref Input input, ref NumClicksObj oldValue, ref NumClicksObj newValue, ref Output output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) { - newValue = new NumClicks { numClicks = oldValue.numClicks + input.numClicks.numClicks }; + newValue = new NumClicksObj { numClicks = oldValue.numClicks + input.numClicks.numClicks }; return true; } } diff --git a/libs/storage/Tsavorite/cs/test/ObjectTestTypes.cs b/libs/storage/Tsavorite/cs/test/ObjectTestTypes.cs index 0ad63b3e4b..a187a83c82 100644 --- a/libs/storage/Tsavorite/cs/test/ObjectTestTypes.cs +++ b/libs/storage/Tsavorite/cs/test/ObjectTestTypes.cs @@ -6,15 +6,18 @@ namespace Tsavorite.test { - public class MyKey : ITsavoriteEqualityComparer + public class MyKey { public int key; - public long GetHashCode64(ref MyKey key) => Utility.GetHashCode(key.key); + public override string ToString() => key.ToString(); - public bool Equals(ref MyKey k1, ref MyKey k2) => k1.key == k2.key; + public struct Comparer : IKeyComparer + { + public long GetHashCode64(ref MyKey key) => Utility.GetHashCode(key.key); - public override string ToString() => key.ToString(); + public bool Equals(ref MyKey k1, ref MyKey k2) => k1.key == k2.key; + } } public class MyKeySerializer : BinaryObjectSerializer @@ -24,15 +27,18 @@ public class MyKeySerializer : BinaryObjectSerializer public override void Serialize(ref MyKey obj) => writer.Write(obj.key); } - public class MyValue : ITsavoriteEqualityComparer + public class MyValue { public int value; - public long GetHashCode64(ref MyValue k) => Utility.GetHashCode(k.value); + public override string ToString() => value.ToString(); - public bool Equals(ref MyValue k1, ref MyValue k2) => k1.value == k2.value; + public struct Comparer : IKeyComparer // This Value comparer is used by a test + { + public long GetHashCode64(ref MyValue k) => Utility.GetHashCode(k.value); - public override string ToString() => value.ToString(); + public bool Equals(ref MyValue k1, ref MyValue k2) => k1.value == k2.value; + } } public class MyValueSerializer : BinaryObjectSerializer diff --git a/libs/storage/Tsavorite/cs/test/ObjectTests.cs b/libs/storage/Tsavorite/cs/test/ObjectTests.cs index 0b6b58bdc2..ab82f783ad 100644 --- a/libs/storage/Tsavorite/cs/test/ObjectTests.cs +++ b/libs/storage/Tsavorite/cs/test/ObjectTests.cs @@ -2,17 +2,19 @@ // Licensed under the MIT license. using System.IO; -using System.Threading.Tasks; using NUnit.Framework; using Tsavorite.core; using static Tsavorite.test.TestUtils; namespace Tsavorite.test { + using ClassAllocator = GenericAllocator>>; + using ClassStoreFunctions = StoreFunctions>; + [TestFixture] internal class ObjectTests { - private TsavoriteKV store; + private TsavoriteKV store; private IDevice log, objlog; [SetUp] @@ -22,11 +24,17 @@ public void Setup() log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "ObjectTests.log"), deleteOnClose: true); objlog = Devices.CreateLogDevice(Path.Join(MethodTestDir, "ObjectTests.obj.log"), deleteOnClose: true); - store = new TsavoriteKV - (128, - logSettings: new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MutableFraction = 0.1, MemorySizeBits = 15, PageSizeBits = 10 }, - serializerSettings: new SerializerSettings { keySerializer = () => new MyKeySerializer(), valueSerializer = () => new MyValueSerializer() } - ); + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + ObjectLogDevice = objlog, + MutableFraction = 0.1, + MemorySize = 1L << 15, + PageSize = 1L << 10 + }, StoreFunctions.Create(new MyKey.Comparer(), () => new MyKeySerializer(), () => new MyValueSerializer(), DefaultRecordDisposer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -55,8 +63,8 @@ public void ObjectInMemWriteRead() MyInput input = null; MyOutput output = new(); - bContext.Upsert(ref key1, ref value, Empty.Default); - bContext.Read(ref key1, ref input, ref output, Empty.Default); + _ = bContext.Upsert(ref key1, ref value, Empty.Default); + _ = bContext.Read(ref key1, ref input, ref output, Empty.Default); Assert.AreEqual(value.value, output.value.value); } @@ -71,17 +79,17 @@ public void ObjectInMemWriteRead2() MyInput input1 = new() { value = 23 }; MyOutput output = new(); - bContext.RMW(ref key1, ref input1, Empty.Default); + _ = bContext.RMW(ref key1, ref input1, Empty.Default); MyKey key2 = new() { key = 8999999 }; MyInput input2 = new() { value = 24 }; - bContext.RMW(ref key2, ref input2, Empty.Default); + _ = bContext.RMW(ref key2, ref input2, Empty.Default); - bContext.Read(ref key1, ref input1, ref output, Empty.Default); + _ = bContext.Read(ref key1, ref input1, ref output, Empty.Default); Assert.AreEqual(input1.value, output.value.value); - bContext.Read(ref key2, ref input2, ref output, Empty.Default); + _ = bContext.Read(ref key2, ref input2, ref output, Empty.Default); Assert.AreEqual(input2.value, output.value.value); } @@ -99,7 +107,7 @@ public void ObjectDiskWriteRead() { var key = new MyKey { key = i }; var value = new MyValue { value = i }; - bContext.Upsert(ref key, ref value, Empty.Default); + _ = bContext.Upsert(ref key, ref value, Empty.Default); // store.ShiftReadOnlyAddress(store.LogTailAddress); } @@ -110,7 +118,7 @@ public void ObjectDiskWriteRead() if (status.IsPending) { - bContext.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var outputs, wait: true); (status, g1) = GetSinglePendingResult(outputs); } @@ -140,7 +148,7 @@ public void ObjectDiskWriteRead() input = new MyInput { value = 1 }; status = bContext.RMW(ref key1, ref input, Empty.Default); if (status.IsPending) - bContext.CompletePending(true); + _ = bContext.CompletePending(true); } for (int i = 0; i < 2000; i++) diff --git a/libs/storage/Tsavorite/cs/test/OverflowBucketLockTableTests.cs b/libs/storage/Tsavorite/cs/test/OverflowBucketLockTableTests.cs index 96fb6cb047..ab74fd88f4 100644 --- a/libs/storage/Tsavorite/cs/test/OverflowBucketLockTableTests.cs +++ b/libs/storage/Tsavorite/cs/test/OverflowBucketLockTableTests.cs @@ -12,7 +12,9 @@ namespace Tsavorite.test.LockTable { - internal class SingleBucketComparer : ITsavoriteEqualityComparer + using LongStoreFunctions = StoreFunctions>; + + internal class SingleBucketComparer : IKeyComparer { public bool Equals(ref long k1, ref long k2) => k1 == k2; @@ -25,11 +27,11 @@ public enum UseSingleBucketComparer { UseSingleBucket } [TestFixture] internal class OverflowBucketLockTableTests { - ITsavoriteEqualityComparer comparer = new LongTsavoriteEqualityComparer(); + IKeyComparer comparer = new LongKeyComparer(); long SingleBucketKey = 1; // We use a single bucket here for most tests so this lets us use 'ref' easily // For OverflowBucketLockTable, we need an instance of TsavoriteKV - private TsavoriteKV store; + private TsavoriteKV> store; private IDevice log; [SetUp] @@ -47,10 +49,17 @@ public void Setup() break; } } - comparer ??= new LongTsavoriteEqualityComparer(); + comparer ??= new LongKeyComparer(); - store = new TsavoriteKV(1L << 20, new LogSettings { LogDevice = log, ObjectLogDevice = null, PageSizeBits = 12, MemorySizeBits = 22 }, - comparer: comparer); + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + PageSize = 1L << 12, + MemorySize = 1L << 22 + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -91,7 +100,10 @@ void Unlock(long key, LockType lockType) internal void PopulateHei(ref HashEntryInfo hei) => PopulateHei(store, ref hei); - internal static void PopulateHei(TsavoriteKV store, ref HashEntryInfo hei) => store.FindOrCreateTag(ref hei, store.Log.BeginAddress); + internal static void PopulateHei(TsavoriteKV store, ref HashEntryInfo hei) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator + => store.FindOrCreateTag(ref hei, store.Log.BeginAddress); internal void AssertLockCounts(ref HashEntryInfo hei, bool expectedX, long expectedS) { @@ -100,24 +112,30 @@ internal void AssertLockCounts(ref HashEntryInfo hei, bool expectedX, long expec Assert.AreEqual(expectedS, lockState.NumLockedShared); } - internal static void AssertLockCounts(TsavoriteKV store, TKey key, bool expectedX, int expectedS) + internal static void AssertLockCounts(TsavoriteKV store, TKey key, bool expectedX, int expectedS) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator => AssertLockCounts(store, ref key, expectedX, expectedS); - internal static void AssertLockCounts(TsavoriteKV store, ref TKey key, bool expectedX, int expectedS) + internal static void AssertLockCounts(TsavoriteKV store, ref TKey key, bool expectedX, int expectedS) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - HashEntryInfo hei = new(store.comparer.GetHashCode64(ref key)); + HashEntryInfo hei = new(store.storeFunctions.GetKeyHashCode64(ref key)); PopulateHei(store, ref hei); var lockState = store.LockTable.GetLockState(ref hei); Assert.AreEqual(expectedX, lockState.IsLockedExclusive, "XLock mismatch"); Assert.AreEqual(expectedS, lockState.NumLockedShared, "SLock mismatch"); } - internal static void AssertLockCounts(TsavoriteKV store, ref TKey key, bool expectedX, bool expectedS) + internal static void AssertLockCounts(TsavoriteKV store, ref TKey key, bool expectedX, bool expectedS) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { FixedLengthLockableKeyStruct keyStruct = new() { Key = key, - KeyHash = store.comparer.GetHashCode64(ref key), + KeyHash = store.storeFunctions.GetKeyHashCode64(ref key), LockType = LockType.None, // Not used for this call }; keyStruct.KeyHash = store.GetKeyHash(ref key); @@ -125,7 +143,9 @@ internal static void AssertLockCounts(TsavoriteKV st } - internal static void AssertLockCounts(TsavoriteKV store, ref FixedLengthLockableKeyStruct key, bool expectedX, bool expectedS) + internal static void AssertLockCounts(TsavoriteKV store, ref FixedLengthLockableKeyStruct key, bool expectedX, bool expectedS) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { HashEntryInfo hei = new(key.KeyHash); PopulateHei(store, ref hei); @@ -137,7 +157,9 @@ internal static void AssertLockCounts(TsavoriteKV st internal unsafe void AssertTotalLockCounts(long expectedX, long expectedS) => AssertTotalLockCounts(store, expectedX, expectedS); - internal static unsafe void AssertTotalLockCounts(TsavoriteKV store, long expectedX, long expectedS) + internal static unsafe void AssertTotalLockCounts(TsavoriteKV store, long expectedX, long expectedS) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { HashBucket* buckets = store.state[store.resizeInfo.version].tableAligned; var count = store.LockTable.NumBuckets; @@ -154,7 +176,9 @@ internal static unsafe void AssertTotalLockCounts(TsavoriteKV store, internal void AssertBucketLockCount(ref FixedLengthLockableKeyStruct key, long expectedX, long expectedS) => AssertBucketLockCount(store, ref key, expectedX, expectedS); - internal static unsafe void AssertBucketLockCount(TsavoriteKV store, ref FixedLengthLockableKeyStruct key, long expectedX, long expectedS) + internal static unsafe void AssertBucketLockCount(TsavoriteKV store, ref FixedLengthLockableKeyStruct key, long expectedX, long expectedS) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { var bucketIndex = store.LockTable.GetBucketIndex(key.KeyHash); var bucket = store.state[store.resizeInfo.version].tableAligned + bucketIndex; @@ -238,21 +262,21 @@ public void ThreeKeyTest([Values] UseSingleBucketComparer /* justToSignalSetup * [Category(LockTestCategory), Category(LockTableTestCategory), Category(SmokeTestCategory)] public void ThreadedLockStressTest1Thread() { - List tasks = new(); + List tasks = []; var lastTid = 0; AddThreads(tasks, ref lastTid, numThreads: 1, maxNumKeys: 5, lowKey: 1, highKey: 5, LockType.Exclusive); - Task.WaitAll(tasks.ToArray()); + Task.WaitAll([.. tasks]); } [Test] [Category(LockTestCategory), Category(LockTableTestCategory), Category(SmokeTestCategory)] public void ThreadedLockStressTestMultiThreadsNoContention([Values(3, 8)] int numThreads) { - List tasks = new(); + List tasks = []; var lastTid = 0; for (var ii = 0; ii < numThreads; ++ii) AddThreads(tasks, ref lastTid, numThreads: 1, maxNumKeys: 5, lowKey: 1 + 10 * ii, highKey: 5 + 10 * ii, LockType.Exclusive); - Task.WaitAll(tasks.ToArray()); + Task.WaitAll([.. tasks]); AssertTotalLockCounts(0, 0); } @@ -260,10 +284,10 @@ public void ThreadedLockStressTestMultiThreadsNoContention([Values(3, 8)] int nu [Category(LockTestCategory), Category(LockTableTestCategory), Category(SmokeTestCategory)] public void ThreadedLockStressTestMultiThreadsFullContention([Values(3, 8)] int numThreads, [Values] LockType lockType) { - List tasks = new(); + List tasks = []; var lastTid = 0; AddThreads(tasks, ref lastTid, numThreads: numThreads, maxNumKeys: 5, lowKey: 1, highKey: 5, lockType); - Task.WaitAll(tasks.ToArray()); + Task.WaitAll([.. tasks]); AssertTotalLockCounts(0, 0); } @@ -271,10 +295,10 @@ public void ThreadedLockStressTestMultiThreadsFullContention([Values(3, 8)] int [Category(LockTestCategory), Category(LockTableTestCategory), Category(SmokeTestCategory)] public void ThreadedLockStressTestMultiThreadsRandomContention([Values(3, 8)] int numThreads, [Values] LockType lockType) { - List tasks = new(); + List tasks = []; var lastTid = 0; AddThreads(tasks, ref lastTid, numThreads: numThreads, maxNumKeys: 5, lowKey: 1, highKey: 10 * (numThreads / 2), lockType); - Task.WaitAll(tasks.ToArray()); + Task.WaitAll([.. tasks]); AssertTotalLockCounts(0, 0); } @@ -361,7 +385,7 @@ public void PartialArraySortTest() } const int NumTestIterations = 15; - const int maxSleepMs = 5; + const int MaxSleepMs = 5; private void AddThreads(List tasks, ref int lastTid, int numThreads, int maxNumKeys, int lowKey, int highKey, LockType lockType) { @@ -371,7 +395,7 @@ void runThread(int tid) // maxNumKeys < 0 means use random number of keys int numKeys = maxNumKeys < 0 ? rng.Next(1, -maxNumKeys) : maxNumKeys; - FixedLengthLockableKeyStruct[] threadStructs = new FixedLengthLockableKeyStruct[numKeys]; + var threadStructs = new FixedLengthLockableKeyStruct[numKeys]; long getNextKey() { @@ -412,7 +436,7 @@ long getNextKey() } // Pretend to do work - Thread.Sleep(rng.Next(maxSleepMs)); + Thread.Sleep(rng.Next(MaxSleepMs)); // Unlock for (var ii = 0; ii < numKeys; ++ii) diff --git a/libs/storage/Tsavorite/cs/test/PostOperationsTests.cs b/libs/storage/Tsavorite/cs/test/PostOperationsTests.cs index 67767bc145..158fbf5a86 100644 --- a/libs/storage/Tsavorite/cs/test/PostOperationsTests.cs +++ b/libs/storage/Tsavorite/cs/test/PostOperationsTests.cs @@ -8,6 +8,9 @@ namespace Tsavorite.test { + using IntAllocator = BlittableAllocator>>; + using IntStoreFunctions = StoreFunctions>; + [TestFixture] internal class PostOperationsTests { @@ -52,24 +55,31 @@ public override bool PostCopyUpdater(ref int key, ref int input, ref int oldValu public override bool ConcurrentDeleter(ref int key, ref int value, ref DeleteInfo deleteInfo, ref RecordInfo recordInfo) => false; } - private TsavoriteKV store; - private ClientSession session; - private BasicContext bContext; + private TsavoriteKV store; + private ClientSession session; + private BasicContext bContext; private IDevice log; - const int numRecords = 100; - const int targetKey = 42; + const int NumRecords = 100; + const int TargetKey = 42; long expectedAddress; [SetUp] public void Setup() { // Clean up log files from previous test runs in case they weren't cleaned up - TestUtils.DeleteDirectory(TestUtils.MethodTestDir, wait: true); + DeleteDirectory(MethodTestDir, wait: true); - log = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "PostOperations.log"), deleteOnClose: true); - store = new TsavoriteKV - (1L << 20, new LogSettings { LogDevice = log, MemorySizeBits = 15, PageSizeBits = 10 }); + log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "PostOperations.log"), deleteOnClose: true); + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + MemorySize = 1L << 15, + PageSize = 1L << 10 + }, StoreFunctions.Create(IntKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); session = store.NewSession(new PostFunctions()); bContext = session.BasicContext; Populate(); @@ -84,15 +94,15 @@ public void TearDown() store = null; log?.Dispose(); log = null; - TestUtils.DeleteDirectory(TestUtils.MethodTestDir); + DeleteDirectory(MethodTestDir); } void Populate() { - for (var key = 0; key < numRecords; ++key) + for (var key = 0; key < NumRecords; ++key) { expectedAddress = store.Log.TailAddress; - bContext.Upsert(key, key * 100); + _ = bContext.Upsert(key, key * 100); Assert.AreEqual(expectedAddress, session.functions.pswAddress); } @@ -103,8 +113,8 @@ void Populate() internal void CompletePendingAndVerifyInsertedAddress() { // Note: Only Read and RMW have Pending results. - bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); - TestUtils.GetSinglePendingResult(completedOutputs, out var recordMetadata); + _ = bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); + _ = GetSinglePendingResult(completedOutputs, out var recordMetadata); Assert.AreEqual(expectedAddress, recordMetadata.Address); } @@ -117,8 +127,8 @@ public void PostSingleWriterTest() // Execute the ReadOnly (InternalInsert) test store.Log.FlushAndEvict(wait: true); - bContext.Upsert(targetKey, targetKey * 1000); - bContext.CompletePending(wait: true); + _ = bContext.Upsert(TargetKey, TargetKey * 1000); + _ = bContext.CompletePending(wait: true); Assert.AreEqual(expectedAddress, session.functions.pswAddress); } @@ -128,21 +138,21 @@ public void PostSingleWriterTest() public void PostInitialUpdaterTest() { // Execute the not-found test (InternalRMW). - bContext.RMW(numRecords + 1, (numRecords + 1) * 1000); + _ = bContext.RMW(NumRecords + 1, (NumRecords + 1) * 1000); Assert.AreEqual(expectedAddress, session.functions.piuAddress); session.functions.Clear(); // Now cause an attempt at InPlaceUpdater, which we've set to fail, so CopyUpdater is done (InternalInsert). expectedAddress = store.Log.TailAddress; - bContext.RMW(targetKey, targetKey * 1000); + _ = bContext.RMW(TargetKey, TargetKey * 1000); Assert.AreEqual(expectedAddress, session.functions.pcuAddress); // Execute the not-in-memory test (InternalContinuePendingRMW). First delete the record so it has a tombstone; this will go to InitialUpdater. - bContext.Delete(targetKey); + _ = bContext.Delete(TargetKey); store.Log.FlushAndEvict(wait: true); expectedAddress = store.Log.TailAddress; - bContext.RMW(targetKey, targetKey * 1000); + _ = bContext.RMW(TargetKey, TargetKey * 1000); CompletePendingAndVerifyInsertedAddress(); Assert.AreEqual(expectedAddress, session.functions.piuAddress); } @@ -154,13 +164,13 @@ public void PostCopyUpdaterTest() { // First try to modify in-memory, readonly (InternalRMW). store.Log.ShiftReadOnlyAddress(store.Log.ReadOnlyAddress, wait: true); - bContext.RMW(targetKey, targetKey * 1000); + _ = bContext.RMW(TargetKey, TargetKey * 1000); Assert.AreEqual(expectedAddress, session.functions.pcuAddress); // Execute the not-in-memory test (InternalContinuePendingRMW). store.Log.FlushAndEvict(wait: true); expectedAddress = store.Log.TailAddress; - bContext.RMW(targetKey, targetKey * 1000); + _ = bContext.RMW(TargetKey, TargetKey * 1000); CompletePendingAndVerifyInsertedAddress(); Assert.AreEqual(expectedAddress, session.functions.pcuAddress); } @@ -171,7 +181,7 @@ public void PostCopyUpdaterTest() public void PostCopyUpdaterFalseTest([Values(FlushMode.ReadOnly, FlushMode.OnDisk)] FlushMode flushMode) { // Verify the key exists - var (status, output) = bContext.Read(targetKey); + var (status, output) = bContext.Read(TargetKey); Assert.IsTrue(status.Found, "Expected the record to exist"); session.functions.returnFalseFromPCU = true; @@ -182,10 +192,10 @@ public void PostCopyUpdaterFalseTest([Values(FlushMode.ReadOnly, FlushMode.OnDis store.Log.FlushAndEvict(wait: true); // Call RMW - bContext.RMW(targetKey, targetKey * 1000); + _ = bContext.RMW(TargetKey, TargetKey * 1000); // Verify the key no longer exists. - (status, output) = bContext.Read(targetKey); + (status, output) = bContext.Read(TargetKey); Assert.IsFalse(status.Found, "Expected the record to no longer exist"); } @@ -195,13 +205,13 @@ public void PostCopyUpdaterFalseTest([Values(FlushMode.ReadOnly, FlushMode.OnDis public void PostSingleDeleterTest() { // Execute the not-in-memory test (InternalDelete); ConcurrentDeleter returns false to force a new record to be added. - bContext.Delete(targetKey); + _ = bContext.Delete(TargetKey); Assert.AreEqual(expectedAddress, session.functions.psdAddress); // Execute the not-in-memory test (InternalDelete). store.Log.FlushAndEvict(wait: true); expectedAddress = store.Log.TailAddress; - bContext.Delete(targetKey + 1); + _ = bContext.Delete(TargetKey + 1); Assert.AreEqual(expectedAddress, session.functions.psdAddress); } } diff --git a/libs/storage/Tsavorite/cs/test/ReadAddressTests.cs b/libs/storage/Tsavorite/cs/test/ReadAddressTests.cs index d3dad8bcca..8917c797c3 100644 --- a/libs/storage/Tsavorite/cs/test/ReadAddressTests.cs +++ b/libs/storage/Tsavorite/cs/test/ReadAddressTests.cs @@ -10,55 +10,58 @@ namespace Tsavorite.test.readaddress { - [TestFixture] - internal class ReadAddressTests + // Must be in a separate block so the "using StructStoreFunctions" is the first line in its namespace declaration. + public struct KeyStruct(long first) { - const int numKeys = 1000; - const int keyMod = 100; - const int maxLap = numKeys / keyMod; - const int deleteLap = maxLap / 2; - const int defaultKeyToScan = 42; + public long key = first; - private static int LapOffset(int lap) => lap * numKeys * 100; + public override readonly string ToString() => key.ToString(); - public struct Key + internal struct Comparer : IKeyComparer { - public long key; - - public Key(long first) => key = first; + public readonly long GetHashCode64(ref KeyStruct key) => Utility.GetHashCode(key.key); - public override string ToString() => key.ToString(); + public readonly bool Equals(ref KeyStruct k1, ref KeyStruct k2) => k1.key == k2.key; + } + } - internal class Comparer : ITsavoriteEqualityComparer - { - public long GetHashCode64(ref Key key) => Utility.GetHashCode(key.key); + public struct ValueStruct(long value) + { + public long value = value; - public bool Equals(ref Key k1, ref Key k2) => k1.key == k2.key; - } - } + public override readonly string ToString() => value.ToString(); + } +} - public struct Value - { - public long value; +namespace Tsavorite.test.readaddress +{ + using StructAllocator = BlittableAllocator>>; + using StructStoreFunctions = StoreFunctions>; - public Value(long value) => this.value = value; + [TestFixture] + internal class ReadAddressTests + { + const int NumKeys = 1000; + const int KeyMod = 100; + const int MaxLap = NumKeys / KeyMod; + const int DeleteLap = MaxLap / 2; + const int DefaultKeyToScan = 42; - public override string ToString() => value.ToString(); - } + private static int LapOffset(int lap) => lap * NumKeys * 100; public struct Output { public long value; public long address; - public override string ToString() => $"val {value}; addr {address}"; + public override readonly string ToString() => $"val {value}; addr {address}"; } private static long SetReadOutput(long key, long value) => (key << 32) | value; public enum UseReadCache { NoReadCache, ReadCache } - internal class Functions : SessionFunctionsBase + internal class Functions : SessionFunctionsBase { internal long lastWriteAddress = Constants.kInvalidAddress; readonly bool useReadCache; @@ -78,14 +81,14 @@ internal Functions(bool preserveCopyUpdaterSource = false) } } - public override bool ConcurrentReader(ref Key key, ref Value input, ref Value value, ref Output output, ref ReadInfo readInfo, ref RecordInfo recordInfo) + public override bool ConcurrentReader(ref KeyStruct key, ref ValueStruct input, ref ValueStruct value, ref Output output, ref ReadInfo readInfo, ref RecordInfo recordInfo) { output.value = SetReadOutput(key.key, value.value); output.address = readInfo.Address; return true; } - public override bool SingleReader(ref Key key, ref Value input, ref Value value, ref Output output, ref ReadInfo readInfo) + public override bool SingleReader(ref KeyStruct key, ref ValueStruct input, ref ValueStruct value, ref Output output, ref ReadInfo readInfo) { output.value = SetReadOutput(key.key, value.value); output.address = readInfo.Address; @@ -93,12 +96,12 @@ public override bool SingleReader(ref Key key, ref Value input, ref Value value, } // Return false to force a chain of values. - public override bool ConcurrentWriter(ref Key key, ref Value input, ref Value src, ref Value dst, ref Output output, ref UpsertInfo upsertInfo, ref RecordInfo recordInfo) => false; + public override bool ConcurrentWriter(ref KeyStruct key, ref ValueStruct input, ref ValueStruct src, ref ValueStruct dst, ref Output output, ref UpsertInfo upsertInfo, ref RecordInfo recordInfo) => false; - public override bool InPlaceUpdater(ref Key key, ref Value input, ref Value value, ref Output output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) => false; + public override bool InPlaceUpdater(ref KeyStruct key, ref ValueStruct input, ref ValueStruct value, ref Output output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) => false; // Record addresses - public override bool SingleWriter(ref Key key, ref Value input, ref Value src, ref Value dst, ref Output output, ref UpsertInfo upsertInfo, WriteReason reason, ref RecordInfo recordInfo) + public override bool SingleWriter(ref KeyStruct key, ref ValueStruct input, ref ValueStruct src, ref ValueStruct dst, ref Output output, ref UpsertInfo upsertInfo, WriteReason reason, ref RecordInfo recordInfo) { dst = src; output.address = upsertInfo.Address; @@ -106,7 +109,7 @@ public override bool SingleWriter(ref Key key, ref Value input, ref Value src, r return true; } - public override bool InitialUpdater(ref Key key, ref Value input, ref Value value, ref Output output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) + public override bool InitialUpdater(ref KeyStruct key, ref ValueStruct input, ref ValueStruct value, ref Output output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) { lastWriteAddress = rmwInfo.Address; output.address = rmwInfo.Address; @@ -114,7 +117,7 @@ public override bool InitialUpdater(ref Key key, ref Value input, ref Value valu return true; } - public override bool CopyUpdater(ref Key key, ref Value input, ref Value oldValue, ref Value newValue, ref Output output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) + public override bool CopyUpdater(ref KeyStruct key, ref ValueStruct input, ref ValueStruct oldValue, ref ValueStruct newValue, ref Output output, ref RMWInfo rmwInfo, ref RecordInfo recordInfo) { lastWriteAddress = rmwInfo.Address; output.address = rmwInfo.Address; @@ -123,7 +126,7 @@ public override bool CopyUpdater(ref Key key, ref Value input, ref Value oldValu return true; } - public override void ReadCompletionCallback(ref Key key, ref Value input, ref Output output, Empty ctx, Status status, RecordMetadata recordMetadata) + public override void ReadCompletionCallback(ref KeyStruct key, ref ValueStruct input, ref Output output, Empty ctx, Status status, RecordMetadata recordMetadata) { if (status.Found) { @@ -134,7 +137,7 @@ public override void ReadCompletionCallback(ref Key key, ref Value input, ref Ou } } - public override void RMWCompletionCallback(ref Key key, ref Value input, ref Output output, Empty ctx, Status status, RecordMetadata recordMetadata) + public override void RMWCompletionCallback(ref KeyStruct key, ref ValueStruct input, ref Output output, Empty ctx, Status status, RecordMetadata recordMetadata) { if (status.Found) Assert.AreEqual(output.address, recordMetadata.Address); @@ -143,11 +146,11 @@ public override void RMWCompletionCallback(ref Key key, ref Value input, ref Out private class TestStore : IDisposable { - internal TsavoriteKV store; + internal TsavoriteKV store; internal IDevice logDevice; private readonly bool flush; - internal long[] InsertAddresses = new long[numKeys]; + internal long[] insertAddresses = new long[NumKeys]; internal TestStore(bool useReadCache, ReadCopyOptions readCopyOptions, bool flush) { @@ -155,24 +158,20 @@ internal TestStore(bool useReadCache, ReadCopyOptions readCopyOptions, bool flus logDevice = Devices.CreateLogDevice(Path.Join(MethodTestDir, "hlog.log")); this.flush = flush; - var logSettings = new LogSettings + store = new(new() { + IndexSize = 1L << 26, LogDevice = logDevice, - ObjectLogDevice = new NullDevice(), - ReadCacheSettings = useReadCache ? new ReadCacheSettings() : null, + ReadCacheEnabled = useReadCache, ReadCopyOptions = readCopyOptions, // Use small-footprint values - PageSizeBits = 12, // (4K pages) - MemorySizeBits = 20 // (1M memory for main log) - }; - - store = new TsavoriteKV( - size: 1L << 20, - logSettings: logSettings, - checkpointSettings: new CheckpointSettings { CheckpointDir = Path.Join(MethodTestDir, "chkpt") }, - serializerSettings: null, - comparer: new Key.Comparer() - ); + PageSize = 1L << 12, // (4K pages) + MemorySize = 1L << 20, // (1M memory for main log) + + CheckpointDir = Path.Join(MethodTestDir, "chkpt") + }, StoreFunctions.Create(new KeyStruct.Comparer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } internal async ValueTask Flush() @@ -180,7 +179,7 @@ internal async ValueTask Flush() if (flush) { if (!store.UseReadCache) - await store.TakeFullCheckpointAsync(CheckpointType.FoldOver); + _ = await store.TakeFullCheckpointAsync(CheckpointType.FoldOver); store.Log.FlushAndEvict(wait: true); } } @@ -188,14 +187,14 @@ internal async ValueTask Flush() internal async Task Populate(bool useRMW, bool preserveCopyUpdaterSource = false) { var functions = new Functions(preserveCopyUpdaterSource); - using var session = store.NewSession(functions); + using var session = store.NewSession(functions); var bContext = session.BasicContext; var prevLap = 0; - for (int ii = 0; ii < numKeys; ii++) + for (int ii = 0; ii < NumKeys; ii++) { // lap is used to illustrate the changing values - var lap = ii / keyMod; + var lap = ii / KeyMod; if (lap != prevLap) { @@ -203,8 +202,8 @@ internal async Task Populate(bool useRMW, bool preserveCopyUpdaterSource = false prevLap = lap; } - var key = new Key(ii % keyMod); - var value = new Value(key.key + LapOffset(lap)); + var key = new KeyStruct(ii % KeyMod); + var value = new ValueStruct(key.key + LapOffset(lap)); var status = useRMW ? bContext.RMW(ref key, ref value) @@ -213,12 +212,12 @@ internal async Task Populate(bool useRMW, bool preserveCopyUpdaterSource = false if (status.IsPending) await bContext.CompletePendingAsync(); - InsertAddresses[ii] = functions.lastWriteAddress; + insertAddresses[ii] = functions.lastWriteAddress; //Assert.IsTrue(session.ctx.HasNoPendingRequests); // Illustrate that deleted records can be shown as well (unless overwritten by in-place operations, which are not done here) - if (lap == deleteLap) - bContext.Delete(ref key); + if (lap == DeleteLap) + _ = bContext.Delete(ref key); } await Flush(); @@ -228,9 +227,9 @@ internal bool ProcessChainRecord(Status status, RecordMetadata recordMetadata, i { var recordInfo = recordMetadata.RecordInfo; Assert.GreaterOrEqual(lap, 0); - long expectedValue = SetReadOutput(defaultKeyToScan, LapOffset(lap) + defaultKeyToScan); + long expectedValue = SetReadOutput(DefaultKeyToScan, LapOffset(lap) + DefaultKeyToScan); - Assert.AreEqual(lap == deleteLap, recordInfo.Tombstone, $"lap({lap}) == deleteLap({deleteLap}) != Tombstone ({recordInfo.Tombstone})"); + Assert.AreEqual(lap == DeleteLap, recordInfo.Tombstone, $"lap({lap}) == deleteLap({DeleteLap}) != Tombstone ({recordInfo.Tombstone})"); if (!recordInfo.Tombstone) Assert.AreEqual(expectedValue, actualOutput.value, $"lap({lap})"); @@ -242,8 +241,8 @@ internal static void ProcessNoKeyRecord(bool useRMW, Status status, RecordInfo r { if (status.Found) { - var keyToScan = keyOrdinal % keyMod; - var lap = keyOrdinal / keyMod; + var keyToScan = keyOrdinal % KeyMod; + var lap = keyOrdinal / KeyMod; long expectedValue = SetReadOutput(keyToScan, LapOffset(lap) + keyToScan); if (!recordInfo.Tombstone) Assert.AreEqual(expectedValue, actualOutput.value, $"keyToScan {keyToScan}, lap({lap})"); @@ -274,20 +273,20 @@ public void VersionedReadTests(UseReadCache urc, ReadCopyFrom readCopyFrom, Read var readCopyOptions = new ReadCopyOptions(readCopyFrom, readCopyTo); using var testStore = new TestStore(useReadCache, readCopyOptions, flushMode == FlushMode.OnDisk); testStore.Populate(updateOp == UpdateOp.RMW).GetAwaiter().GetResult(); - using var session = testStore.store.NewSession(new Functions()); + using var session = testStore.store.NewSession(new Functions()); var bContext = session.BasicContext; // Two iterations to ensure no issues due to read-caching or copying to tail. for (int iteration = 0; iteration < 2; ++iteration) { var output = default(Output); - var input = default(Value); - var key = new Key(defaultKeyToScan); + var input = default(ValueStruct); + var key = new KeyStruct(DefaultKeyToScan); RecordMetadata recordMetadata = default; ReadOptions readOptions = new() { CopyOptions = session.functions.readCopyOptions }; long readAtAddress = 0; - for (int lap = maxLap - 1; /* tested in loop */; --lap) + for (int lap = MaxLap - 1; /* tested in loop */; --lap) { // We need a non-AtAddress read to start the loop of returning the previous address to read at. var status = readAtAddress == 0 @@ -297,7 +296,7 @@ public void VersionedReadTests(UseReadCache urc, ReadCopyFrom readCopyFrom, Read if (status.IsPending) { // This will wait for each retrieved record; not recommended for performance-critical code or when retrieving multiple records unless necessary. - bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); (status, output) = GetSinglePendingResult(completedOutputs, out recordMetadata); } if (!testStore.ProcessChainRecord(status, recordMetadata, lap, ref output)) @@ -307,7 +306,7 @@ public void VersionedReadTests(UseReadCache urc, ReadCopyFrom readCopyFrom, Read } } - struct IterateKeyTestScanIteratorFunctions : IScanIteratorFunctions + struct IterateKeyTestScanIteratorFunctions : IScanIteratorFunctions { readonly TestStore testStore; internal int numRecords; @@ -315,24 +314,24 @@ struct IterateKeyTestScanIteratorFunctions : IScanIteratorFunctions internal IterateKeyTestScanIteratorFunctions(TestStore ts) => testStore = ts; - public bool OnStart(long beginAddress, long endAddress) => true; + public readonly bool OnStart(long beginAddress, long endAddress) => true; - public bool ConcurrentReader(ref Key key, ref Value value, RecordMetadata recordMetadata, long numberOfRecords, out CursorRecordResult cursorRecordResult) + public bool ConcurrentReader(ref KeyStruct key, ref ValueStruct value, RecordMetadata recordMetadata, long numberOfRecords, out CursorRecordResult cursorRecordResult) => SingleReader(ref key, ref value, recordMetadata, numberOfRecords, out cursorRecordResult); - public bool SingleReader(ref Key key, ref Value value, RecordMetadata recordMetadata, long numberOfRecords, out CursorRecordResult cursorRecordResult) + public bool SingleReader(ref KeyStruct key, ref ValueStruct value, RecordMetadata recordMetadata, long numberOfRecords, out CursorRecordResult cursorRecordResult) { cursorRecordResult = CursorRecordResult.Accept; // default; not used here Output output = new() { address = recordMetadata.Address, value = SetReadOutput(key.key, value.value) }; - int lap = maxLap - ++numRecords; + int lap = MaxLap - ++numRecords; Assert.AreEqual(lap != 0, testStore.ProcessChainRecord(new(StatusCode.Found), recordMetadata, lap, ref output), $"lap ({lap}) == 0 != ProcessChainRecord(...)"); Assert.AreEqual(numRecords, numberOfRecords, "mismatched record count"); return stopAt != numRecords; } - public void OnException(Exception exception, long numberOfRecords) { } + public readonly void OnException(Exception exception, long numberOfRecords) { } - public void OnStop(bool completed, long numberOfRecords) { } + public readonly void OnStop(bool completed, long numberOfRecords) { } } [Test, Category(TsavoriteKVTestCategory), Category(ReadTestCategory)] @@ -347,10 +346,10 @@ public void IterateKeyTests([Values(FlushMode.NoFlush, FlushMode.OnDisk)] FlushM for (int iteration = 0; iteration < 2; ++iteration) { - var key = new Key(defaultKeyToScan); + var key = new KeyStruct(DefaultKeyToScan); IterateKeyTestScanIteratorFunctions scanFunctions = new(testStore); Assert.IsTrue(testStore.store.Log.IterateKeyVersions(ref scanFunctions, ref key)); - Assert.AreEqual(maxLap, scanFunctions.numRecords); + Assert.AreEqual(MaxLap, scanFunctions.numRecords); } } @@ -366,7 +365,7 @@ public void IterateKeyStopTests([Values(FlushMode.NoFlush, FlushMode.OnDisk)] Fl for (int iteration = 0; iteration < 2; ++iteration) { - var key = new Key(defaultKeyToScan); + var key = new KeyStruct(DefaultKeyToScan); IterateKeyTestScanIteratorFunctions scanFunctions = new(testStore) { stopAt = 4 }; Assert.IsFalse(testStore.store.Log.IterateKeyVersions(ref scanFunctions, ref key)); Assert.AreEqual(scanFunctions.stopAt, scanFunctions.numRecords); @@ -387,20 +386,20 @@ public void ReadAtAddressTests(UseReadCache urc, ReadCopyFrom readCopyFrom, Read var readCopyOptions = new ReadCopyOptions(readCopyFrom, readCopyTo); using var testStore = new TestStore(useReadCache, readCopyOptions, flushMode == FlushMode.OnDisk); testStore.Populate(updateOp == UpdateOp.RMW).GetAwaiter().GetResult(); - using var session = testStore.store.NewSession(new Functions()); + using var session = testStore.store.NewSession(new Functions()); var bContext = session.BasicContext; // Two iterations to ensure no issues due to read-caching or copying to tail. for (int iteration = 0; iteration < 2; ++iteration) { var output = default(Output); - var input = default(Value); - var key = new Key(defaultKeyToScan); + var input = default(ValueStruct); + var key = new KeyStruct(DefaultKeyToScan); RecordMetadata recordMetadata = default; ReadOptions readOptions = new() { CopyOptions = session.functions.readCopyOptions }; long readAtAddress = 0; - for (int lap = maxLap - 1; /* tested in loop */; --lap) + for (int lap = MaxLap - 1; /* tested in loop */; --lap) { var status = readAtAddress == 0 ? bContext.Read(ref key, ref input, ref output, ref readOptions, out recordMetadata) @@ -408,7 +407,7 @@ public void ReadAtAddressTests(UseReadCache urc, ReadCopyFrom readCopyFrom, Read if (status.IsPending) { // This will wait for each retrieved record; not recommended for performance-critical code or when retrieving multiple records unless necessary. - bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); (status, output) = GetSinglePendingResult(completedOutputs, out recordMetadata); } @@ -433,19 +432,19 @@ public async Task ReadAtAddressCopyOptNoRcTest(UseReadCache urc, ReadCopyFrom re var readCopyOptions = new ReadCopyOptions(readCopyFrom, readCopyTo); using var testStore = new TestStore(useReadCache, readCopyOptions, flushMode == FlushMode.OnDisk); await testStore.Populate(updateOp == UpdateOp.RMW); - using var session = testStore.store.NewSession(new Functions()); + using var session = testStore.store.NewSession(new Functions()); var bContext = session.BasicContext; // Two iterations to ensure no issues due to read-caching or copying to tail. for (int iteration = 0; iteration < 2; ++iteration) { - var input = default(Value); - var key = new Key(defaultKeyToScan); + var input = default(ValueStruct); + var key = new KeyStruct(DefaultKeyToScan); RecordMetadata recordMetadata = default; ReadOptions readOptions = new() { CopyOptions = session.functions.readCopyOptions }; long readAtAddress = 0; - for (int lap = maxLap - 1; /* tested in loop */; --lap) + for (int lap = MaxLap - 1; /* tested in loop */; --lap) { Output output = new(); Status status = readAtAddress == 0 @@ -475,7 +474,7 @@ public async ValueTask ReadNoKeyTests(UseReadCache urc, ReadCopyFrom readCopyFro var readCopyOptions = new ReadCopyOptions(readCopyFrom, readCopyTo); using var testStore = new TestStore(useReadCache, readCopyOptions, flushMode == FlushMode.OnDisk); await testStore.Populate(updateOp == UpdateOp.RMW); - using var session = testStore.store.NewSession(new Functions()); + using var session = testStore.store.NewSession(new Functions()); var bContext = session.BasicContext; // Two iterations to ensure no issues due to read-caching or copying to tail. @@ -483,21 +482,21 @@ public async ValueTask ReadNoKeyTests(UseReadCache urc, ReadCopyFrom readCopyFro { var rng = new Random(101); var output = default(Output); - var input = default(Value); + var input = default(ValueStruct); - for (int ii = 0; ii < numKeys; ++ii) + for (int ii = 0; ii < NumKeys; ++ii) { - var keyOrdinal = rng.Next(numKeys); + var keyOrdinal = rng.Next(NumKeys); ReadOptions readOptions = new() { CopyOptions = session.functions.readCopyOptions }; - var status = bContext.ReadAtAddress(testStore.InsertAddresses[keyOrdinal], ref input, ref output, ref readOptions, out RecordMetadata recordMetadata); + var status = bContext.ReadAtAddress(testStore.insertAddresses[keyOrdinal], ref input, ref output, ref readOptions, out RecordMetadata recordMetadata); if (status.IsPending) { // This will wait for each retrieved record; not recommended for performance-critical code or when retrieving multiple records unless necessary. - bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); (status, output) = GetSinglePendingResult(completedOutputs); } diff --git a/libs/storage/Tsavorite/cs/test/ReadCacheChainTests.cs b/libs/storage/Tsavorite/cs/test/ReadCacheChainTests.cs index 7dc82ac10f..798f36c3f9 100644 --- a/libs/storage/Tsavorite/cs/test/ReadCacheChainTests.cs +++ b/libs/storage/Tsavorite/cs/test/ReadCacheChainTests.cs @@ -13,10 +13,45 @@ using Tsavorite.test.LockTable; using static Tsavorite.test.TestUtils; -#pragma warning disable IDE0060 // Remove unused parameter (used for Setup) +#pragma warning disable // Add parentheses for clarity namespace Tsavorite.test.ReadCacheTests { + // Must be in a separate block so the "using StructStoreFunctions" is the first line in its namespace declaration. + internal class LongComparerModulo : IKeyComparer + { + readonly long mod; + + internal LongComparerModulo(long mod) => this.mod = mod; + + public bool Equals(ref long k1, ref long k2) => k1 == k2; + + public long GetHashCode64(ref long k) => mod == 0 ? k : k % mod; + } + + internal struct SpanByteComparerModulo : IKeyComparer + { + readonly HashModulo modRange; + + internal SpanByteComparerModulo(HashModulo mod) => modRange = mod; + + public readonly bool Equals(ref SpanByte k1, ref SpanByte k2) => SpanByteComparer.StaticEquals(ref k1, ref k2); + + // Force collisions to create a chain + public readonly long GetHashCode64(ref SpanByte k) + { + var value = SpanByteComparer.StaticGetHashCode64(ref k); + return modRange != HashModulo.NoMod ? value % (long)modRange : value; + } + } +} + +namespace Tsavorite.test.ReadCacheTests +{ + using LongAllocator = BlittableAllocator>>; + using LongStoreFunctions = StoreFunctions>; + using SpanByteStoreFunctions = StoreFunctions; + internal static class RcTestGlobals { internal const int PendingMod = 16; @@ -24,48 +59,48 @@ internal static class RcTestGlobals class ChainTests { - private TsavoriteKV store; + private TsavoriteKV store; private IDevice log; - const long lowChainKey = 40; - const long midChainKey = lowChainKey + chainLen * (mod / 2); - const long highChainKey = lowChainKey + chainLen * (mod - 1); - const int mod = 10; - const int chainLen = 10; - const int valueAdd = 1_000_000; + private LongComparerModulo comparer; + + const long LowChainKey = 40; + const long MidChainKey = LowChainKey + ChainLen * (HashMod / 2); + const long HighChainKey = LowChainKey + ChainLen * (HashMod - 1); + const long HashMod = 10; + const int ChainLen = 10; + const int ValueAdd = 1_000_000; // -1 so highChainKey is first in the chain. - const long numKeys = highChainKey + mod - 1; + const long NumKeys = HighChainKey + HashMod - 1; // Insert into chain. - const long spliceInNewKey = highChainKey + mod * 2; - const long spliceInExistingKey = highChainKey - mod; - const long immutableSplitKey = numKeys / 2; + const long SpliceInNewKey = HighChainKey + HashMod * 2; + const long SpliceInExistingKey = HighChainKey - HashMod; + const long ImmutableSplitKey = NumKeys / 2; // This is the record after the first readcache record we insert; it lets us limit the range to ReadCacheEvict // so we get outsplicing rather than successively overwriting the hash table entry on ReadCacheEvict. long readCacheBelowMidChainKeyEvictionAddress; - internal class ChainComparer : ITsavoriteEqualityComparer - { - readonly int mod; - - internal ChainComparer(int mod) => this.mod = mod; - - public bool Equals(ref long k1, ref long k2) => k1 == k2; - - public long GetHashCode64(ref long k) => k % mod; - } - [SetUp] public void Setup() { DeleteDirectory(MethodTestDir, wait: true); - var readCacheSettings = new ReadCacheSettings { MemorySizeBits = 15, PageSizeBits = 9 }; log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "NativeReadCacheTests.log"), deleteOnClose: true); - store = new TsavoriteKV - (1L << 20, new LogSettings { LogDevice = log, MemorySizeBits = 15, PageSizeBits = 10, ReadCacheSettings = readCacheSettings }, - comparer: new ChainComparer(mod)); + comparer = new LongComparerModulo(HashMod); + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + MemorySize = 1L << 15, + PageSize = 1L << 10, + ReadCacheMemorySize = 1L << 15, + ReadCachePageSize = 1L << 9, + ReadCacheEnabled = true + }, StoreFunctions.Create(comparer) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -87,23 +122,23 @@ void PopulateAndEvict(RecordRegion recordRegion = RecordRegion.OnDisk) if (recordRegion != RecordRegion.Immutable) { - for (int key = 0; key < numKeys; key++) - bContext.Upsert(key, key + valueAdd); - bContext.CompletePending(true); + for (int key = 0; key < NumKeys; key++) + _ = bContext.Upsert(key, key + ValueAdd); + _ = bContext.CompletePending(true); if (recordRegion == RecordRegion.OnDisk) store.Log.FlushAndEvict(true); return; } // Two parts, so we can have some evicted (and bring them into the readcache), and some in immutable (readonly). - for (int key = 0; key < immutableSplitKey; key++) - bContext.Upsert(key, key + valueAdd); - bContext.CompletePending(true); + for (int key = 0; key < ImmutableSplitKey; key++) + _ = bContext.Upsert(key, key + ValueAdd); + _ = bContext.CompletePending(true); store.Log.FlushAndEvict(true); - for (long key = immutableSplitKey; key < numKeys; key++) - bContext.Upsert(key, key + valueAdd); - bContext.CompletePending(true); + for (long key = ImmutableSplitKey; key < NumKeys; key++) + _ = bContext.Upsert(key, key + ValueAdd); + _ = bContext.CompletePending(true); store.Log.ShiftReadOnlyAddress(store.Log.TailAddress, wait: true); } @@ -113,47 +148,47 @@ void CreateChain(RecordRegion recordRegion = RecordRegion.OnDisk) var bContext = session.BasicContext; long output = -1; - bool expectPending(long key) => recordRegion == RecordRegion.OnDisk || (recordRegion == RecordRegion.Immutable && key < immutableSplitKey); + bool expectPending(long key) => recordRegion == RecordRegion.OnDisk || (recordRegion == RecordRegion.Immutable && key < ImmutableSplitKey); // Pass1: PENDING reads and populate the cache - for (long ii = 0; ii < chainLen; ++ii) + for (long ii = 0; ii < ChainLen; ++ii) { - var key = lowChainKey + ii * mod; + var key = LowChainKey + ii * HashMod; var status = bContext.Read(key, out _); if (expectPending(key)) { Assert.IsTrue(status.IsPending, status.ToString()); - bContext.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var outputs, wait: true); (status, output) = GetSinglePendingResult(outputs); Assert.IsTrue(status.Record.CopiedToReadCache, status.ToString()); } Assert.IsTrue(status.Found, status.ToString()); - if (key < midChainKey) + if (key < MidChainKey) readCacheBelowMidChainKeyEvictionAddress = store.ReadCache.TailAddress; } // Pass2: non-PENDING reads from the cache - for (var ii = 0; ii < chainLen; ++ii) + for (var ii = 0; ii < ChainLen; ++ii) { - var status = bContext.Read(lowChainKey + ii * mod, out _); + var status = bContext.Read(LowChainKey + ii * HashMod, out _); Assert.IsTrue(!status.IsPending && status.Found, status.ToString()); } // Pass 3: Put in bunch of extra keys into the cache so when we FlushAndEvict we get all the ones of interest. - for (var key = 0; key < numKeys; ++key) + for (var key = 0; key < NumKeys; ++key) { - if ((key % mod) != 0) + if ((key % HashMod) != 0) { var status = bContext.Read(key, out _); if (expectPending(key)) { Assert.IsTrue(status.IsPending); - bContext.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var outputs, wait: true); (status, output) = GetSinglePendingResult(outputs); Assert.IsTrue(status.Record.CopiedToReadCache, status.ToString()); } Assert.IsTrue(status.Found, status.ToString()); - bContext.CompletePending(wait: true); + _ = bContext.CompletePending(wait: true); } } } @@ -162,7 +197,7 @@ unsafe bool GetRecordInInMemoryHashChain(long key, out bool isReadCache) { // returns whether the key was found before we'd go pending var (la, pa) = GetHashChain(store, key, out long recordKey, out bool invalid, out isReadCache); - while (isReadCache || la >= store.hlog.HeadAddress) + while (isReadCache || la >= store.hlogBase.HeadAddress) { if (recordKey == key && !invalid) return true; @@ -184,7 +219,9 @@ internal bool FindRecordInReadCache(long key, out bool invalid, out long logical return false; } - internal static (long logicalAddress, long physicalAddress) GetHashChain(TsavoriteKV store, long key, out long recordKey, out bool invalid, out bool isReadCache) + internal static (long logicalAddress, long physicalAddress) GetHashChain(TsavoriteKV store, long key, out long recordKey, out bool invalid, out bool isReadCache) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { var tagExists = store.FindHashBucketEntryForKey(ref key, out var entry); Assert.IsTrue(tagExists); @@ -201,7 +238,9 @@ internal static (long logicalAddress, long physicalAddress) GetHashChain(Tsavori (long logicalAddress, long physicalAddress) NextInChain(long physicalAddress, out long recordKey, out bool invalid, ref bool isReadCache) => NextInChain(store, physicalAddress, out recordKey, out invalid, ref isReadCache); - internal static (long logicalAddress, long physicalAddress) NextInChain(TsavoriteKV store, long physicalAddress, out long recordKey, out bool invalid, ref bool isReadCache) + internal static (long logicalAddress, long physicalAddress) NextInChain(TsavoriteKV store, long physicalAddress, out long recordKey, out bool invalid, ref bool isReadCache) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { var log = isReadCache ? store.readcache : store.hlog; var info = log.GetInfo(physicalAddress); @@ -218,13 +257,13 @@ internal static (long logicalAddress, long physicalAddress) NextInChain(Tsavorit (long logicalAddress, long physicalAddress) ScanReadCacheChain(long[] omitted = null, bool evicted = false, bool deleted = false) { - omitted ??= Array.Empty(); + omitted ??= []; - var (la, pa) = GetHashChain(store, lowChainKey, out long actualKey, out bool invalid, out bool isReadCache); - for (var expectedKey = highChainKey; expectedKey >= lowChainKey; expectedKey -= mod) + var (la, pa) = GetHashChain(store, LowChainKey, out long actualKey, out bool invalid, out bool isReadCache); + for (var expectedKey = HighChainKey; expectedKey >= LowChainKey; expectedKey -= HashMod) { // We evict from readcache only to just below midChainKey - if (!evicted || expectedKey >= midChainKey) + if (!evicted || expectedKey >= MidChainKey) Assert.IsTrue(isReadCache); if (isReadCache) @@ -239,7 +278,7 @@ internal static (long logicalAddress, long physicalAddress) NextInChain(Tsavorit } (la, pa) = NextInChain(pa, out actualKey, out invalid, ref isReadCache); - if (!isReadCache && la < store.hlog.HeadAddress) + if (!isReadCache && la < store.hlogBase.HeadAddress) break; } Assert.IsFalse(isReadCache); @@ -249,7 +288,9 @@ internal static (long logicalAddress, long physicalAddress) NextInChain(Tsavorit (long logicalAddress, long physicalAddress) SkipReadCacheChain(long key) => SkipReadCacheChain(store, key); - internal static (long logicalAddress, long physicalAddress) SkipReadCacheChain(TsavoriteKV store, long key) + internal static (long logicalAddress, long physicalAddress) SkipReadCacheChain(TsavoriteKV store, long key) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { var (la, pa) = GetHashChain(store, key, out _, out _, out bool isReadCache); while (isReadCache) @@ -265,7 +306,7 @@ void VerifySplicedInKey(long expectedKey) Assert.AreEqual(expectedKey, storedKey); } - static void ClearCountsOnError(ClientSession> luContext) + static void ClearCountsOnError(ClientSession, LongStoreFunctions, LongAllocator> luContext) { // If we already have an exception, clear these counts so "Run" will not report them spuriously. luContext.sharedLockCount = 0; @@ -282,8 +323,7 @@ public void ChainVerificationTest() { PopulateAndEvict(); CreateChain(); - - ScanReadCacheChain(); + _ = ScanReadCacheChain(); } [Test] @@ -306,13 +346,13 @@ void doTest(long key) Assert.IsFalse(status.Found, status.ToString()); } - doTest(lowChainKey); - doTest(highChainKey); - doTest(midChainKey); - ScanReadCacheChain(new[] { lowChainKey, midChainKey, highChainKey }, evicted: false); + doTest(LowChainKey); + doTest(HighChainKey); + doTest(MidChainKey); + _ = ScanReadCacheChain([LowChainKey, MidChainKey, HighChainKey], evicted: false); store.ReadCacheEvict(store.ReadCache.BeginAddress, readCacheBelowMidChainKeyEvictionAddress); - ScanReadCacheChain(new[] { lowChainKey, midChainKey, highChainKey }, evicted: true, deleted: true); + _ = ScanReadCacheChain([LowChainKey, MidChainKey, HighChainKey], evicted: true, deleted: true); } [Test] @@ -336,23 +376,23 @@ void doTest(long key) } // Should be found in the readcache before deletion - Assert.IsTrue(GetRecordInInMemoryHashChain(lowChainKey, out bool isReadCache)); + Assert.IsTrue(GetRecordInInMemoryHashChain(LowChainKey, out bool isReadCache)); Assert.IsTrue(isReadCache); - Assert.IsTrue(GetRecordInInMemoryHashChain(midChainKey, out isReadCache)); + Assert.IsTrue(GetRecordInInMemoryHashChain(MidChainKey, out isReadCache)); Assert.IsTrue(isReadCache); - Assert.IsTrue(GetRecordInInMemoryHashChain(highChainKey, out isReadCache)); + Assert.IsTrue(GetRecordInInMemoryHashChain(HighChainKey, out isReadCache)); Assert.IsTrue(isReadCache); // Delete all keys in the readcache chain below midChainKey. - for (var ii = lowChainKey; ii < midChainKey; ++ii) + for (var ii = LowChainKey; ii < MidChainKey; ++ii) doTest(ii); // LowChainKey should not be found in the readcache after deletion to just below midChainKey, but mid- and highChainKey should not be affected. - Assert.IsTrue(GetRecordInInMemoryHashChain(lowChainKey, out isReadCache)); + Assert.IsTrue(GetRecordInInMemoryHashChain(LowChainKey, out isReadCache)); Assert.IsFalse(isReadCache); - Assert.IsTrue(GetRecordInInMemoryHashChain(midChainKey, out isReadCache)); + Assert.IsTrue(GetRecordInInMemoryHashChain(MidChainKey, out isReadCache)); Assert.IsTrue(isReadCache); - Assert.IsTrue(GetRecordInInMemoryHashChain(highChainKey, out isReadCache)); + Assert.IsTrue(GetRecordInInMemoryHashChain(HighChainKey, out isReadCache)); Assert.IsTrue(isReadCache); store.ReadCacheEvict(store.ReadCache.BeginAddress, readCacheBelowMidChainKeyEvictionAddress); @@ -360,11 +400,11 @@ void doTest(long key) // Following deletion to just below midChainKey: // lowChainKey's tombstone should still be found in the mutable portion of the log // midChainKey and highChainKey should be found in the readcache - Assert.IsTrue(GetRecordInInMemoryHashChain(lowChainKey, out isReadCache)); + Assert.IsTrue(GetRecordInInMemoryHashChain(LowChainKey, out isReadCache)); Assert.IsFalse(isReadCache); - Assert.IsTrue(GetRecordInInMemoryHashChain(midChainKey, out isReadCache)); + Assert.IsTrue(GetRecordInInMemoryHashChain(MidChainKey, out isReadCache)); Assert.IsTrue(isReadCache); - Assert.IsTrue(GetRecordInInMemoryHashChain(highChainKey, out isReadCache)); + Assert.IsTrue(GetRecordInInMemoryHashChain(HighChainKey, out isReadCache)); Assert.IsTrue(isReadCache); } @@ -401,7 +441,7 @@ void doTest(long key) if (useRMW) { // RMW will use the readcache entry for its source and then invalidate it. - status = bContext.RMW(key, value + valueAdd); + status = bContext.RMW(key, value + ValueAdd); Assert.IsTrue(status.Found && status.Record.CopyUpdated, status.ToString()); Assert.IsTrue(FindRecordInReadCache(key, out bool invalid, out _, out _)); @@ -409,22 +449,22 @@ void doTest(long key) } else { - status = bContext.Upsert(key, value + valueAdd); + status = bContext.Upsert(key, value + ValueAdd); Assert.IsTrue(status.Record.Created, status.ToString()); } status = bContext.Read(key, out value); Assert.IsTrue(status.Found, status.ToString()); - Assert.AreEqual(key + valueAdd * 2, value); + Assert.AreEqual(key + ValueAdd * 2, value); } - doTest(lowChainKey); - doTest(highChainKey); - doTest(midChainKey); - ScanReadCacheChain(new[] { lowChainKey, midChainKey, highChainKey }, evicted: false); + doTest(LowChainKey); + doTest(HighChainKey); + doTest(MidChainKey); + _ = ScanReadCacheChain([LowChainKey, MidChainKey, HighChainKey], evicted: false); store.ReadCacheEvict(store.ReadCache.BeginAddress, readCacheBelowMidChainKeyEvictionAddress); - ScanReadCacheChain(new[] { lowChainKey, midChainKey, highChainKey }, evicted: true); + _ = ScanReadCacheChain([LowChainKey, MidChainKey, HighChainKey], evicted: true); } [Test] @@ -439,12 +479,12 @@ public void SpliceInFromCTTTest() using var session = store.NewSession>(new SimpleSimpleFunctions()); var bContext = session.BasicContext; - long input = 0, output = 0, key = lowChainKey - mod; // key must be in evicted region for this test + long input = 0, output = 0, key = LowChainKey - HashMod; // key must be in evicted region for this test ReadOptions readOptions = new() { CopyOptions = new(ReadCopyFrom.AllImmutable, ReadCopyTo.MainLog) }; var status = bContext.Read(ref key, ref input, ref output, ref readOptions, out _); Assert.IsTrue(status.IsPending, status.ToString()); - bContext.CompletePending(wait: true); + _ = bContext.CompletePending(wait: true); VerifySplicedInKey(key); } @@ -463,16 +503,16 @@ public void SpliceInFromUpsertTest([Values] RecordRegion recordRegion) long key = -1; - if (recordRegion == RecordRegion.Immutable || recordRegion == RecordRegion.OnDisk) + if (recordRegion is RecordRegion.Immutable or RecordRegion.OnDisk) { - key = spliceInExistingKey; - var status = bContext.Upsert(key, key + valueAdd); + key = SpliceInExistingKey; + var status = bContext.Upsert(key, key + ValueAdd); Assert.IsTrue(!status.Found && status.Record.Created, status.ToString()); } else { - key = spliceInNewKey; - var status = bContext.Upsert(key, key + valueAdd); + key = SpliceInNewKey; + var status = bContext.Upsert(key, key + ValueAdd); Assert.IsTrue(!status.Found && status.Record.Created, status.ToString()); } @@ -492,11 +532,11 @@ public void SpliceInFromRMWTest([Values] RecordRegion recordRegion) var bContext = session.BasicContext; long key = -1, output = -1; - if (recordRegion == RecordRegion.Immutable || recordRegion == RecordRegion.OnDisk) + if (recordRegion is RecordRegion.Immutable or RecordRegion.OnDisk) { // Existing key - key = spliceInExistingKey; - var status = bContext.RMW(key, key + valueAdd); + key = SpliceInExistingKey; + var status = bContext.RMW(key, key + ValueAdd); // If OnDisk, this used the readcache entry for its source and then invalidated it. Assert.IsTrue(status.Found && status.Record.CopyUpdated, status.ToString()); @@ -507,20 +547,20 @@ public void SpliceInFromRMWTest([Values] RecordRegion recordRegion) } { // New key - key = spliceInNewKey; - status = bContext.RMW(key, key + valueAdd); + key = SpliceInNewKey; + status = bContext.RMW(key, key + ValueAdd); // This NOTFOUND key will return PENDING because we have to trace back through the collisions. Assert.IsTrue(status.IsPending, status.ToString()); - bContext.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var outputs, wait: true); (status, output) = GetSinglePendingResult(outputs); Assert.IsTrue(!status.Found && status.Record.Created, status.ToString()); } } else { - key = spliceInNewKey; - var status = bContext.RMW(key, key + valueAdd); + key = SpliceInNewKey; + var status = bContext.RMW(key, key + ValueAdd); Assert.IsTrue(!status.Found && status.Record.Created, status.ToString()); } @@ -540,15 +580,15 @@ public void SpliceInFromDeleteTest([Values] RecordRegion recordRegion) var bContext = session.BasicContext; long key = -1; - if (recordRegion == RecordRegion.Immutable || recordRegion == RecordRegion.OnDisk) + if (recordRegion is RecordRegion.Immutable or RecordRegion.OnDisk) { - key = spliceInExistingKey; + key = SpliceInExistingKey; var status = bContext.Delete(key); Assert.IsTrue(!status.Found && status.Record.Created, status.ToString()); } else { - key = spliceInNewKey; + key = SpliceInNewKey; var status = bContext.Delete(key); Assert.IsTrue(!status.Found && status.Record.Created, status.ToString()); } @@ -570,9 +610,9 @@ public void VerifyLockCountsAfterReadCacheEvict() var keys = new[] { - new FixedLengthLockableKeyStruct(lowChainKey, LockType.Exclusive, luContext), - new FixedLengthLockableKeyStruct(midChainKey, LockType.Shared, luContext), - new FixedLengthLockableKeyStruct(highChainKey, LockType.Exclusive, luContext) + new FixedLengthLockableKeyStruct(LowChainKey, LockType.Exclusive, luContext), + new FixedLengthLockableKeyStruct(MidChainKey, LockType.Shared, luContext), + new FixedLengthLockableKeyStruct(HighChainKey, LockType.Exclusive, luContext) }; luContext.BeginUnsafe(); @@ -600,7 +640,7 @@ public void VerifyLockCountsAfterReadCacheEvict() foreach (var idx in LockableUnsafeContextTests.EnumActionKeyIndices(keys, LockableUnsafeContextTests.LockOperationType.Unlock)) { ref var key = ref keys[idx]; - HashEntryInfo hei = new(store.comparer.GetHashCode64(ref key.Key)); + HashEntryInfo hei = new(store.storeFunctions.GetKeyHashCode64(ref key.Key)); OverflowBucketLockTableTests.PopulateHei(store, ref hei); var lockState = store.LockTable.GetLockState(ref hei); @@ -632,27 +672,12 @@ public void VerifyLockCountsAfterReadCacheEvict() class LongStressChainTests { - private TsavoriteKV store; + private TsavoriteKV store; private IDevice log; - const long valueAdd = 1_000_000_000; + private LongComparerModulo comparer; + const long ValueAdd = 1_000_000_000; - const long numKeys = 2_000; - - struct LongComparerModulo : ITsavoriteEqualityComparer - { - readonly HashModulo modRange; - - internal LongComparerModulo(HashModulo mod) => modRange = mod; - - public bool Equals(ref long k1, ref long k2) => k1 == k2; - - // Force collisions to create a chain - public long GetHashCode64(ref long k) - { - long value = Utility.GetHashCode(k); - return modRange != HashModulo.NoMod ? value % (long)modRange : value; - } - } + const long NumKeys = 2_000; [SetUp] public void Setup() @@ -670,10 +695,6 @@ public void Setup() } log ??= Devices.CreateLogDevice(filename, deleteOnClose: true); - // Make the main log small enough that we force the readcache - ReadCacheSettings readCacheSettings = new() { MemorySizeBits = 15, PageSizeBits = 9 }; - var logSettings = new LogSettings { LogDevice = log, MemorySizeBits = 15, PageSizeBits = 10, ReadCacheSettings = readCacheSettings }; - HashModulo modRange = HashModulo.NoMod; foreach (var arg in TestContext.CurrentContext.Test.Arguments) { @@ -684,7 +705,21 @@ public void Setup() } } - store = new TsavoriteKV(1L << 20, logSettings, comparer: new LongComparerModulo(modRange)); + comparer = new LongComparerModulo((long)modRange); + + // Make the main log small enough that we force the readcache + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + MemorySize = 1L << 15, + PageSize = 1L << 10, + ReadCacheMemorySize = 1L << 15, + ReadCachePageSize = 1L << 9, + ReadCacheEnabled = true + }, StoreFunctions.Create(comparer) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -740,14 +775,14 @@ unsafe void PopulateAndEvict() using var session = store.NewSession>(new SimpleSessionFunctions()); var bContext = session.BasicContext; - for (long ii = 0; ii < numKeys; ii++) + for (long ii = 0; ii < NumKeys; ii++) { long key = ii; var status = bContext.Upsert(ref key, ref key); Assert.IsFalse(status.IsPending); Assert.IsTrue(status.Record.Created, $"key {key}, status {status}"); } - bContext.CompletePending(true); + _ = bContext.CompletePending(true); store.Log.FlushAndEvict(true); } @@ -779,7 +814,7 @@ unsafe void runReadThread(int tid) for (var iteration = 0; iteration < numIterations; ++iteration) { var numCompleted = 0; - for (var ii = 0; ii < numKeys; ++ii) + for (var ii = 0; ii < NumKeys; ++ii) { long key = ii, output = 0; var status = bContext.Read(ref key, ref output); @@ -791,12 +826,12 @@ unsafe void runReadThread(int tid) { ++numCompleted; Assert.IsTrue(status.Found, $"key {key}, status {status}, wasPending {false}"); - Assert.AreEqual(ii, output % valueAdd); + Assert.AreEqual(ii, output % ValueAdd); } - if (numPending > 0 && ((numPending % RcTestGlobals.PendingMod == 0) || ii == numKeys - 1)) + if (numPending > 0 && ((numPending % RcTestGlobals.PendingMod == 0) || ii == NumKeys - 1)) { - bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); using (completedOutputs) { while (completedOutputs.Next()) @@ -808,12 +843,12 @@ unsafe void runReadThread(int tid) key = completedOutputs.Current.Key; Assert.AreEqual(completedOutputs.Current.RecordMetadata.Address == Constants.kInvalidAddress, status.Record.CopiedToReadCache, $"key {key}: {status}"); Assert.IsTrue(status.Found, $"key {key}, status {status}, wasPending {true}"); - Assert.AreEqual(key, output % valueAdd); + Assert.AreEqual(key, output % ValueAdd); } } } } - Assert.AreEqual(numKeys, numCompleted, "numCompleted"); + Assert.AreEqual(NumKeys, numCompleted, "numCompleted"); } } @@ -825,9 +860,9 @@ unsafe void runUpdateThread(int tid) for (var iteration = 0; iteration < numIterations; ++iteration) { var numCompleted = 0; - for (var ii = 0; ii < numKeys; ++ii) + for (var ii = 0; ii < NumKeys; ++ii) { - long key = ii, input = ii + valueAdd * tid, output = 0; + long key = ii, input = ii + ValueAdd * tid, output = 0; var status = updateOp == UpdateOp.RMW ? bContext.RMW(ref key, ref input, ref output) : bContext.Upsert(ref key, ref input, ref input, ref output); @@ -843,12 +878,12 @@ unsafe void runUpdateThread(int tid) ++numCompleted; if (updateOp == UpdateOp.RMW) // Upsert will not try to find records below HeadAddress, but it may find them in-memory Assert.IsTrue(status.Found, $"key {key}, status {status}, wasPending {false}"); - Assert.AreEqual(ii + valueAdd * tid, output); + Assert.AreEqual(ii + ValueAdd * tid, output); } - if (numPending > 0 && ((numPending % RcTestGlobals.PendingMod == 0) || ii == numKeys - 1)) + if (numPending > 0 && ((numPending % RcTestGlobals.PendingMod == 0) || ii == NumKeys - 1)) { - bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); using (completedOutputs) { while (completedOutputs.Next()) @@ -856,17 +891,17 @@ unsafe void runUpdateThread(int tid) ++numCompleted; if (updateOp == UpdateOp.RMW) // Upsert will not try to find records below HeadAddress, but it may find them in-memory Assert.IsTrue(completedOutputs.Current.Status.Found, $"key {completedOutputs.Current.Key}, status {completedOutputs.Current.Status}, wasPending {true}"); - Assert.AreEqual(completedOutputs.Current.Key + valueAdd * tid, completedOutputs.Current.Output); + Assert.AreEqual(completedOutputs.Current.Key + ValueAdd * tid, completedOutputs.Current.Output); } } } } - Assert.AreEqual(numKeys, numCompleted, "numCompleted"); + Assert.AreEqual(NumKeys, numCompleted, "numCompleted"); } } - List tasks = new(); // Task rather than Thread for propagation of exceptions. + List tasks = []; // Task rather than Thread for propagation of exceptions. for (int t = 1; t <= numReadThreads + numWriteThreads; t++) { var tid = t; @@ -875,33 +910,19 @@ unsafe void runUpdateThread(int tid) else tasks.Add(Task.Factory.StartNew(() => runUpdateThread(tid))); } - Task.WaitAll(tasks.ToArray()); + Task.WaitAll([.. tasks]); } } class SpanByteStressChainTests { - private TsavoriteKV store; + private TsavoriteKV> store; private IDevice log; - const long valueAdd = 1_000_000_000; + SpanByteComparerModulo comparer; - const long numKeys = 2_000; - - struct SpanByteComparerModulo : ITsavoriteEqualityComparer - { - readonly HashModulo modRange; + const long ValueAdd = 1_000_000_000; - internal SpanByteComparerModulo(HashModulo mod) => modRange = mod; - - public readonly bool Equals(ref SpanByte k1, ref SpanByte k2) => SpanByteComparer.StaticEquals(ref k1, ref k2); - - // Force collisions to create a chain - public long GetHashCode64(ref SpanByte k) - { - var value = SpanByteComparer.StaticGetHashCode64(ref k); - return modRange != HashModulo.NoMod ? value % (long)modRange : value; - } - } + const long NumKeys = 2_000; [SetUp] public void Setup() @@ -919,10 +940,6 @@ public void Setup() } log ??= Devices.CreateLogDevice(filename, deleteOnClose: true); - // Make the main log small enough that we force the readcache - var readCacheSettings = new ReadCacheSettings { MemorySizeBits = 15, PageSizeBits = 9 }; - var logSettings = new LogSettings { LogDevice = log, MemorySizeBits = 15, PageSizeBits = 10, ReadCacheSettings = readCacheSettings }; - HashModulo modRange = HashModulo.NoMod; foreach (var arg in TestContext.CurrentContext.Test.Arguments) { @@ -933,7 +950,21 @@ public void Setup() } } - store = new TsavoriteKV(1L << 20, logSettings, comparer: new SpanByteComparerModulo(modRange)); + comparer = new SpanByteComparerModulo(modRange); + + // Make the main log small enough that we force the readcache + store = new(new() + { + IndexSize = 1L << 20, + LogDevice = log, + MemorySize = 1L << 15, + PageSize = 1L << 10, + ReadCacheMemorySize = 1L << 15, + ReadCachePageSize = 1L << 9, + ReadCacheEnabled = true + }, StoreFunctions.Create(comparer, SpanByteRecordDisposer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -997,7 +1028,7 @@ unsafe void PopulateAndEvict() Span keyVec = stackalloc byte[sizeof(long)]; var key = SpanByte.FromPinnedSpan(keyVec); - for (long ii = 0; ii < numKeys; ii++) + for (long ii = 0; ii < NumKeys; ii++) { Assert.IsTrue(BitConverter.TryWriteBytes(keyVec, ii)); var status = bContext.Upsert(ref key, ref key); @@ -1036,7 +1067,7 @@ unsafe void runReadThread(int tid) for (var iteration = 0; iteration < numIterations; ++iteration) { var numCompleted = 0; - for (var ii = 0; ii < numKeys; ++ii) + for (var ii = 0; ii < NumKeys; ++ii) { SpanByteAndMemory output = default; @@ -1053,11 +1084,11 @@ unsafe void runReadThread(int tid) Assert.IsTrue(status.Found, $"tid {tid}, key {ii}, {status}, wasPending {false}, pt 1"); Assert.IsNotNull(output.Memory, $"tid {tid}, key {ii}, wasPending {false}, pt 2"); long value = BitConverter.ToInt64(output.AsReadOnlySpan()); - Assert.AreEqual(ii, value % valueAdd, $"tid {tid}, key {ii}, wasPending {false}, pt 3"); + Assert.AreEqual(ii, value % ValueAdd, $"tid {tid}, key {ii}, wasPending {false}, pt 3"); output.Memory.Dispose(); } - if (numPending > 0 && ((numPending % RcTestGlobals.PendingMod == 0) || ii == numKeys - 1)) + if (numPending > 0 && ((numPending % RcTestGlobals.PendingMod == 0) || ii == NumKeys - 1)) { bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); using (completedOutputs) @@ -1076,13 +1107,13 @@ unsafe void runReadThread(int tid) Assert.IsTrue(status.Found, $"tid {tid}, key {keyLong}, {status}, wasPending {true}, pt 1"); Assert.IsNotNull(output.Memory, $"tid {tid}, key {keyLong}, wasPending {true}, pt 2"); long value = BitConverter.ToInt64(output.AsReadOnlySpan()); - Assert.AreEqual(keyLong, value % valueAdd, $"tid {tid}, key {keyLong}, wasPending {true}, pt 3"); + Assert.AreEqual(keyLong, value % ValueAdd, $"tid {tid}, key {keyLong}, wasPending {true}, pt 3"); output.Memory.Dispose(); } } } } - Assert.AreEqual(numKeys, numCompleted, "numCompleted"); + Assert.AreEqual(NumKeys, numCompleted, "numCompleted"); } } @@ -1099,12 +1130,12 @@ unsafe void runUpdateThread(int tid) for (var iteration = 0; iteration < numIterations; ++iteration) { var numCompleted = 0; - for (var ii = 0; ii < numKeys; ++ii) + for (var ii = 0; ii < NumKeys; ++ii) { SpanByteAndMemory output = default; Assert.IsTrue(BitConverter.TryWriteBytes(keyVec, ii)); - Assert.IsTrue(BitConverter.TryWriteBytes(inputVec, ii + valueAdd)); + Assert.IsTrue(BitConverter.TryWriteBytes(inputVec, ii + ValueAdd)); var status = updateOp == UpdateOp.RMW ? bContext.RMW(ref key, ref input, ref output) : bContext.Upsert(ref key, ref input, ref input, ref output); @@ -1122,12 +1153,12 @@ unsafe void runUpdateThread(int tid) Assert.IsTrue(status.Found, $"tid {tid}, key {ii}, {status}"); long value = BitConverter.ToInt64(output.AsReadOnlySpan()); - Assert.AreEqual(ii + valueAdd, value, $"tid {tid}, key {ii}, wasPending {false}"); + Assert.AreEqual(ii + ValueAdd, value, $"tid {tid}, key {ii}, wasPending {false}"); output.Memory?.Dispose(); } - if (numPending > 0 && ((numPending % RcTestGlobals.PendingMod == 0) || ii == numKeys - 1)) + if (numPending > 0 && ((numPending % RcTestGlobals.PendingMod == 0) || ii == NumKeys - 1)) { bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); using (completedOutputs) @@ -1145,14 +1176,14 @@ unsafe void runUpdateThread(int tid) Assert.IsTrue(status.Found, $"tid {tid}, key {keyLong}, {status}"); long value = BitConverter.ToInt64(output.AsReadOnlySpan()); - Assert.AreEqual(keyLong + valueAdd, value, $"tid {tid}, key {keyLong}, wasPending {true}"); + Assert.AreEqual(keyLong + ValueAdd, value, $"tid {tid}, key {keyLong}, wasPending {true}"); output.Memory?.Dispose(); } } } } - Assert.AreEqual(numKeys, numCompleted, "numCompleted"); + Assert.AreEqual(NumKeys, numCompleted, "numCompleted"); } } diff --git a/libs/storage/Tsavorite/cs/test/RecoveryChecks.cs b/libs/storage/Tsavorite/cs/test/RecoveryChecks.cs index a65a9fe03e..05baa20fa8 100644 --- a/libs/storage/Tsavorite/cs/test/RecoveryChecks.cs +++ b/libs/storage/Tsavorite/cs/test/RecoveryChecks.cs @@ -11,6 +11,9 @@ namespace Tsavorite.test.recovery { + using LongAllocator = BlittableAllocator>>; + using LongStoreFunctions = StoreFunctions>; + public enum DeviceMode { Local, @@ -20,13 +23,13 @@ public enum DeviceMode public class RecoveryCheckBase { protected IDevice log; - protected const int numOps = 5000; + protected const int NumOps = 5000; protected AdId[] inputArray; protected void BaseSetup() { - inputArray = new AdId[numOps]; - for (int i = 0; i < numOps; i++) + inputArray = new AdId[NumOps]; + for (int i = 0; i < NumOps; i++) { inputArray[i].adId = i; } @@ -83,20 +86,27 @@ public class RecoveryCheck1Tests : RecoveryCheckBase [Category("CheckpointRestore")] [Category("Smoke")] - public async ValueTask RecoveryCheck1([Values] CheckpointType checkpointType, [Values] bool isAsync, [Values] bool useReadCache, [Values(128, 1 << 10)] int size) + public async ValueTask RecoveryCheck1([Values] CheckpointType checkpointType, [Values] bool isAsync, [Values] bool useReadCache, [Values(1L << 13, 1L << 16)] long indexSize) { - using var store1 = new TsavoriteKV - (size, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 1, PageSizeBits = 10, MemorySizeBits = 20, ReadCacheSettings = useReadCache ? new ReadCacheSettings() : null }, - checkpointSettings: new CheckpointSettings { CheckpointDir = TestUtils.MethodTestDir } - ); + using var store1 = new TsavoriteKV(new() + { + IndexSize = indexSize, + LogDevice = log, + MutableFraction = 1, + PageSize = 1L << 10, + MemorySize = 1L << 20, + ReadCacheEnabled = useReadCache, + CheckpointDir = TestUtils.MethodTestDir + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); using var s1 = store1.NewSession(new MyFunctions()); var bc1 = s1.BasicContext; for (long key = 0; key < 1000; key++) { - bc1.Upsert(ref key, ref key); + _ = bc1.Upsert(ref key, ref key); } if (useReadCache) @@ -112,26 +122,33 @@ public async ValueTask RecoveryCheck1([Values] CheckpointType checkpointType, [V Assert.AreEqual(key, output, $"output = {output}"); } } - bc1.CompletePending(true); + _ = bc1.CompletePending(true); } var task = store1.TakeFullCheckpointAsync(checkpointType); - using var store2 = new TsavoriteKV - (size, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 1, PageSizeBits = 10, MemorySizeBits = 20, ReadCacheSettings = useReadCache ? new ReadCacheSettings() : null }, - checkpointSettings: new CheckpointSettings { CheckpointDir = TestUtils.MethodTestDir } - ); + using var store2 = new TsavoriteKV(new() + { + IndexSize = indexSize, + LogDevice = log, + MutableFraction = 1, + PageSize = 1L << 10, + MemorySize = 1L << 20, + ReadCacheEnabled = useReadCache, + CheckpointDir = TestUtils.MethodTestDir + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); if (isAsync) { var (status, token) = await task; - await store2.RecoverAsync(default, token); + _ = await store2.RecoverAsync(default, token); } else { var (status, token) = task.AsTask().GetAwaiter().GetResult(); - store2.Recover(default, token); + _ = store2.Recover(default, token); } Assert.AreEqual(store1.Log.HeadAddress, store2.Log.HeadAddress); @@ -150,7 +167,7 @@ public async ValueTask RecoveryCheck1([Values] CheckpointType checkpointType, [V Assert.AreEqual(key, output, $"output = {output}"); } } - bc2.CompletePending(true); + _ = bc2.CompletePending(true); } } @@ -166,28 +183,42 @@ public class RecoveryCheck2Tests : RecoveryCheckBase [Test] [Category("TsavoriteKV"), Category("CheckpointRestore")] - public async ValueTask RecoveryCheck2([Values] CheckpointType checkpointType, [Values] bool isAsync, [Values] bool useReadCache, [Values(128, 1 << 10)] int size) + public async ValueTask RecoveryCheck2([Values] CheckpointType checkpointType, [Values] bool isAsync, [Values] bool useReadCache, [Values(1L << 13, 1L << 16)] long indexSize) { - using var store1 = new TsavoriteKV - (size, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 1, PageSizeBits = 10, MemorySizeBits = 20, ReadCacheSettings = useReadCache ? new ReadCacheSettings() : null }, - checkpointSettings: new CheckpointSettings { CheckpointDir = TestUtils.MethodTestDir } - ); + using var store1 = new TsavoriteKV(new() + { + IndexSize = indexSize, + LogDevice = log, + MutableFraction = 1, + PageSize = 1L << 10, + MemorySize = 1L << 20, + ReadCacheEnabled = useReadCache, + CheckpointDir = TestUtils.MethodTestDir + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); using var s1 = store1.NewSession>(new SimpleSimpleFunctions()); var bc1 = s1.BasicContext; - using var store2 = new TsavoriteKV - (size, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 1, PageSizeBits = 10, MemorySizeBits = 20, ReadCacheSettings = useReadCache ? new ReadCacheSettings() : null }, - checkpointSettings: new CheckpointSettings { CheckpointDir = TestUtils.MethodTestDir } - ); + using var store2 = new TsavoriteKV(new() + { + IndexSize = indexSize, + LogDevice = log, + MutableFraction = 1, + PageSize = 1L << 10, + MemorySize = 1L << 20, + ReadCacheEnabled = useReadCache, + CheckpointDir = TestUtils.MethodTestDir + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); for (int i = 0; i < 5; i++) { for (long key = 1000 * i; key < 1000 * i + 1000; key++) { - bc1.Upsert(ref key, ref key); + _ = bc1.Upsert(ref key, ref key); } if (useReadCache) @@ -203,7 +234,7 @@ public async ValueTask RecoveryCheck2([Values] CheckpointType checkpointType, [V Assert.AreEqual(key, output, $"output = {output}"); } } - bc1.CompletePending(true); + _ = bc1.CompletePending(true); } var task = store1.TakeHybridLogCheckpointAsync(checkpointType); @@ -211,12 +242,12 @@ public async ValueTask RecoveryCheck2([Values] CheckpointType checkpointType, [V if (isAsync) { var (status, token) = await task; - await store2.RecoverAsync(default, token); + _ = await store2.RecoverAsync(default, token); } else { var (status, token) = task.AsTask().GetAwaiter().GetResult(); - store2.Recover(default, token); + _ = store2.Recover(default, token); } Assert.AreEqual(store1.Log.HeadAddress, store2.Log.HeadAddress); @@ -235,7 +266,7 @@ public async ValueTask RecoveryCheck2([Values] CheckpointType checkpointType, [V Assert.AreEqual(key, output, $"output = {output}"); } } - bc2.CompletePending(true); + _ = bc2.CompletePending(true); } } @@ -247,21 +278,27 @@ public void RecoveryCheck2Repeated([Values] CheckpointType checkpointType) for (int i = 0; i < 6; i++) { - using var store = new TsavoriteKV - (128, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 1, PageSizeBits = 10, MemorySizeBits = 20 }, - checkpointSettings: new CheckpointSettings { CheckpointDir = TestUtils.MethodTestDir } - ); + using var store = new TsavoriteKV(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MutableFraction = 1, + PageSize = 1L << 10, + MemorySize = 1L << 20, + CheckpointDir = TestUtils.MethodTestDir + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); if (i > 0) - store.Recover(default, token); + _ = store.Recover(default, token); using var s1 = store.NewSession>(new SimpleSimpleFunctions()); var bc1 = s1.BasicContext; for (long key = 1000 * i; key < 1000 * i + 1000; key++) { - bc1.Upsert(ref key, ref key); + _ = bc1.Upsert(ref key, ref key); } var task = store.TakeHybridLogCheckpointAsync(checkpointType); @@ -282,7 +319,7 @@ public void RecoveryCheck2Repeated([Values] CheckpointType checkpointType) Assert.AreEqual(key, output, $"output = {output}"); } } - bc2.CompletePending(true); + _ = bc2.CompletePending(true); } } @@ -290,18 +327,25 @@ public void RecoveryCheck2Repeated([Values] CheckpointType checkpointType) [Category("TsavoriteKV"), Category("CheckpointRestore")] public void RecoveryRollback([Values] CheckpointType checkpointType) { - using var store = new TsavoriteKV - (128, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 1, PageSizeBits = 10, MemorySizeBits = 11, SegmentSizeBits = 11 }, - checkpointSettings: new CheckpointSettings { CheckpointDir = TestUtils.MethodTestDir } - ); + using var store = new TsavoriteKV(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MutableFraction = 1, + PageSize = 1L << 10, + MemorySize = 1L << 11, + SegmentSize = 1L << 11, + CheckpointDir = TestUtils.MethodTestDir + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); using var s1 = store.NewSession>(new SimpleSimpleFunctions()); var bc1 = s1.BasicContext; for (long key = 0; key < 1000; key++) { - bc1.Upsert(ref key, ref key); + _ = bc1.Upsert(ref key, ref key); } var task = store.TakeHybridLogCheckpointAsync(checkpointType); @@ -318,7 +362,7 @@ public void RecoveryRollback([Values] CheckpointType checkpointType) Assert.AreEqual(key, output, $"output = {output}"); } } - bc1.CompletePendingWithOutputs(out var completedOutputs, true); + _ = bc1.CompletePendingWithOutputs(out var completedOutputs, true); while (completedOutputs.Next()) { Assert.IsTrue(completedOutputs.Current.Status.Found); @@ -328,7 +372,7 @@ public void RecoveryRollback([Values] CheckpointType checkpointType) for (long key = 1000; key < 2000; key++) { - bc1.Upsert(ref key, ref key); + _ = bc1.Upsert(ref key, ref key); } // Reset store to empty state @@ -343,7 +387,7 @@ public void RecoveryRollback([Values] CheckpointType checkpointType) Assert.IsTrue(status.NotFound, $"status = {status}"); } } - bc1.CompletePendingWithOutputs(out completedOutputs, true); + _ = bc1.CompletePendingWithOutputs(out completedOutputs, true); while (completedOutputs.Next()) { Assert.IsTrue(completedOutputs.Current.Status.NotFound); @@ -351,7 +395,7 @@ public void RecoveryRollback([Values] CheckpointType checkpointType) completedOutputs.Dispose(); // Rollback to previous checkpoint - store.Recover(default, token); + _ = store.Recover(default, token); for (long key = 0; key < 1000; key++) { @@ -363,7 +407,7 @@ public void RecoveryRollback([Values] CheckpointType checkpointType) Assert.AreEqual(key, output, $"output = {output}"); } } - bc1.CompletePendingWithOutputs(out completedOutputs, true); + _ = bc1.CompletePendingWithOutputs(out completedOutputs, true); while (completedOutputs.Next()) { Assert.IsTrue(completedOutputs.Current.Status.Found); @@ -380,7 +424,7 @@ public void RecoveryRollback([Values] CheckpointType checkpointType) Assert.IsTrue(status.NotFound, $"status = {status}"); } } - bc1.CompletePendingWithOutputs(out completedOutputs, true); + _ = bc1.CompletePendingWithOutputs(out completedOutputs, true); while (completedOutputs.Next()) { Assert.IsTrue(completedOutputs.Current.Status.NotFound); @@ -389,7 +433,7 @@ public void RecoveryRollback([Values] CheckpointType checkpointType) for (long key = 1000; key < 2000; key++) { - bc1.Upsert(ref key, ref key); + _ = bc1.Upsert(ref key, ref key); } for (long key = 0; key < 2000; key++) @@ -403,7 +447,7 @@ public void RecoveryRollback([Values] CheckpointType checkpointType) } else { - bc1.CompletePendingWithOutputs(out completedOutputs, true); + _ = bc1.CompletePendingWithOutputs(out completedOutputs, true); while (completedOutputs.Next()) { Assert.IsTrue(completedOutputs.Current.Status.Found); @@ -412,7 +456,7 @@ public void RecoveryRollback([Values] CheckpointType checkpointType) completedOutputs.Dispose(); } } - bc1.CompletePendingWithOutputs(out completedOutputs, true); + _ = bc1.CompletePendingWithOutputs(out completedOutputs, true); while (completedOutputs.Next()) { Assert.IsTrue(completedOutputs.Current.Status.Found); @@ -433,28 +477,42 @@ public class RecoveryCheck3Tests : RecoveryCheckBase [Test] [Category("TsavoriteKV"), Category("CheckpointRestore")] - public async ValueTask RecoveryCheck3([Values] CheckpointType checkpointType, [Values] bool isAsync, [Values] bool useReadCache, [Values(128, 1 << 10)] int size) + public async ValueTask RecoveryCheck3([Values] CheckpointType checkpointType, [Values] bool isAsync, [Values] bool useReadCache, [Values(1L << 13, 1L << 16)] long indexSize) { - using var store1 = new TsavoriteKV - (size, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 1, PageSizeBits = 10, MemorySizeBits = 20, ReadCacheSettings = useReadCache ? new ReadCacheSettings() : null }, - checkpointSettings: new CheckpointSettings { CheckpointDir = TestUtils.MethodTestDir } - ); + using var store1 = new TsavoriteKV(new() + { + IndexSize = indexSize, + LogDevice = log, + MutableFraction = 1, + PageSize = 1L << 10, + MemorySize = 1L << 20, + ReadCacheEnabled = useReadCache, + CheckpointDir = TestUtils.MethodTestDir + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); using var s1 = store1.NewSession>(new SimpleSimpleFunctions()); var bc1 = s1.BasicContext; - using var store2 = new TsavoriteKV - (size, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 1, PageSizeBits = 10, MemorySizeBits = 20, ReadCacheSettings = useReadCache ? new ReadCacheSettings() : null }, - checkpointSettings: new CheckpointSettings { CheckpointDir = TestUtils.MethodTestDir } - ); + using var store2 = new TsavoriteKV(new() + { + IndexSize = indexSize, + LogDevice = log, + MutableFraction = 1, + PageSize = 1L << 10, + MemorySize = 1L << 20, + ReadCacheEnabled = useReadCache, + CheckpointDir = TestUtils.MethodTestDir + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); for (int i = 0; i < 5; i++) { for (long key = 1000 * i; key < 1000 * i + 1000; key++) { - bc1.Upsert(ref key, ref key); + _ = bc1.Upsert(ref key, ref key); } if (useReadCache) @@ -470,7 +528,7 @@ public async ValueTask RecoveryCheck3([Values] CheckpointType checkpointType, [V Assert.AreEqual(key, output, $"output = {output}"); } } - bc1.CompletePending(true); + _ = bc1.CompletePending(true); } var task = store1.TakeFullCheckpointAsync(checkpointType); @@ -478,12 +536,12 @@ public async ValueTask RecoveryCheck3([Values] CheckpointType checkpointType, [V if (isAsync) { var (status, token) = await task; - await store2.RecoverAsync(default, token); + _ = await store2.RecoverAsync(default, token); } else { var (status, token) = task.AsTask().GetAwaiter().GetResult(); - store2.Recover(default, token); + _ = store2.Recover(default, token); } Assert.AreEqual(store1.Log.HeadAddress, store2.Log.HeadAddress); @@ -502,7 +560,7 @@ public async ValueTask RecoveryCheck3([Values] CheckpointType checkpointType, [V Assert.AreEqual(key, output, $"output = {output}"); } } - bc2.CompletePending(true); + _ = bc2.CompletePending(true); } } @@ -519,28 +577,42 @@ public class RecoveryCheck4Tests : RecoveryCheckBase [Test] [Category("TsavoriteKV"), Category("CheckpointRestore")] - public async ValueTask RecoveryCheck4([Values] CheckpointType checkpointType, [Values] bool isAsync, [Values] bool useReadCache, [Values(128, 1 << 10)] int size) + public async ValueTask RecoveryCheck4([Values] CheckpointType checkpointType, [Values] bool isAsync, [Values] bool useReadCache, [Values(1L << 13, 1L << 16)] long indexSize) { - using var store1 = new TsavoriteKV - (size, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 1, PageSizeBits = 10, MemorySizeBits = 20, ReadCacheSettings = useReadCache ? new ReadCacheSettings() : null }, - checkpointSettings: new CheckpointSettings { CheckpointDir = TestUtils.MethodTestDir } - ); + using var store1 = new TsavoriteKV(new() + { + IndexSize = indexSize, + LogDevice = log, + MutableFraction = 1, + PageSize = 1L << 10, + MemorySize = 1L << 20, + ReadCacheEnabled = useReadCache, + CheckpointDir = TestUtils.MethodTestDir + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); using var s1 = store1.NewSession>(new SimpleSimpleFunctions()); var bc1 = s1.BasicContext; - using var store2 = new TsavoriteKV - (size, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 1, PageSizeBits = 10, MemorySizeBits = 20, ReadCacheSettings = useReadCache ? new ReadCacheSettings() : null }, - checkpointSettings: new CheckpointSettings { CheckpointDir = TestUtils.MethodTestDir } - ); + using var store2 = new TsavoriteKV(new() + { + IndexSize = indexSize, + LogDevice = log, + MutableFraction = 1, + PageSize = 1L << 10, + MemorySize = 1L << 20, + ReadCacheEnabled = useReadCache, + CheckpointDir = TestUtils.MethodTestDir + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); for (int i = 0; i < 5; i++) { for (long key = 1000 * i; key < 1000 * i + 1000; key++) { - bc1.Upsert(ref key, ref key); + _ = bc1.Upsert(ref key, ref key); } if (useReadCache) @@ -556,23 +628,23 @@ public async ValueTask RecoveryCheck4([Values] CheckpointType checkpointType, [V Assert.AreEqual(key, output, $"output = {output}"); } } - bc1.CompletePending(true); + _ = bc1.CompletePending(true); } if (i == 0) - store1.TakeIndexCheckpointAsync().AsTask().GetAwaiter().GetResult(); + _ = store1.TakeIndexCheckpointAsync().AsTask().GetAwaiter().GetResult(); var task = store1.TakeHybridLogCheckpointAsync(checkpointType); if (isAsync) { var (status, token) = await task; - await store2.RecoverAsync(default, token); + _ = await store2.RecoverAsync(default, token); } else { var (status, token) = task.AsTask().GetAwaiter().GetResult(); - store2.Recover(default, token); + _ = store2.Recover(default, token); } Assert.AreEqual(store1.Log.HeadAddress, store2.Log.HeadAddress); @@ -591,7 +663,7 @@ public async ValueTask RecoveryCheck4([Values] CheckpointType checkpointType, [V Assert.AreEqual(key, output, $"output = {output}"); } } - bc2.CompletePending(true); + _ = bc2.CompletePending(true); } } @@ -609,18 +681,26 @@ public class RecoveryCheck5Tests : RecoveryCheckBase [Test] [Category("TsavoriteKV")] [Category("CheckpointRestore")] - public async ValueTask RecoveryCheck5([Values] CheckpointType checkpointType, [Values] bool isAsync, [Values] bool useReadCache, [Values(128, 1 << 10)] int size) + public async ValueTask RecoveryCheck5([Values] CheckpointType checkpointType, [Values] bool isAsync, [Values] bool useReadCache, [Values(1L << 13, 1L << 16)] long indexSize) { - using var store1 = new TsavoriteKV - (size, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 1, PageSizeBits = 10, MemorySizeBits = 14, ReadCacheSettings = useReadCache ? new ReadCacheSettings() : null }, - checkpointSettings: new CheckpointSettings { CheckpointDir = TestUtils.MethodTestDir }); + using var store1 = new TsavoriteKV(new() + { + IndexSize = indexSize, + LogDevice = log, + MutableFraction = 1, + PageSize = 1L << 10, + MemorySize = 1L << 20, + ReadCacheEnabled = useReadCache, + CheckpointDir = TestUtils.MethodTestDir + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); using var s1 = store1.NewSession(new MyFunctions()); var bc1 = s1.BasicContext; for (long key = 0; key < 1000; key++) { - bc1.Upsert(ref key, ref key); + _ = bc1.Upsert(ref key, ref key); } if (useReadCache) @@ -636,10 +716,10 @@ public async ValueTask RecoveryCheck5([Values] CheckpointType checkpointType, [V Assert.AreEqual(key, output, $"output = {output}"); } } - bc1.CompletePending(true); + _ = bc1.CompletePending(true); } - store1.GrowIndex(); + _ = store1.GrowIndex(); for (long key = 0; key < 1000; key++) { @@ -651,25 +731,32 @@ public async ValueTask RecoveryCheck5([Values] CheckpointType checkpointType, [V Assert.AreEqual(key, output, $"output = {output}"); } } - bc1.CompletePending(true); + _ = bc1.CompletePending(true); var task = store1.TakeFullCheckpointAsync(checkpointType); - using var store2 = new TsavoriteKV - (size, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 1, PageSizeBits = 10, MemorySizeBits = 20, ReadCacheSettings = useReadCache ? new ReadCacheSettings() : null }, - checkpointSettings: new CheckpointSettings { CheckpointDir = TestUtils.MethodTestDir } - ); + using var store2 = new TsavoriteKV(new() + { + IndexSize = indexSize, + LogDevice = log, + MutableFraction = 1, + PageSize = 1L << 10, + MemorySize = 1L << 20, + ReadCacheEnabled = useReadCache, + CheckpointDir = TestUtils.MethodTestDir + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); if (isAsync) { var (status, token) = await task; - await store2.RecoverAsync(default, token); + _ = await store2.RecoverAsync(default, token); } else { var (status, token) = task.AsTask().GetAwaiter().GetResult(); - store2.Recover(default, token); + _ = store2.Recover(default, token); } Assert.AreEqual(store1.Log.HeadAddress, store2.Log.HeadAddress); @@ -689,7 +776,7 @@ public async ValueTask RecoveryCheck5([Values] CheckpointType checkpointType, [V Assert.AreEqual(key, output, $"output = {output}"); } } - bc2.CompletePending(true); + _ = bc2.CompletePending(true); } } @@ -730,26 +817,28 @@ public async ValueTask IncrSnapshotRecoveryCheck([Values] DeviceMode deviceMode) private async ValueTask IncrSnapshotRecoveryCheck(ICheckpointManager checkpointManager) { - using var store1 = new TsavoriteKV - (1 << 10, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 1, PageSizeBits = 10, MemorySizeBits = 20, ReadCacheSettings = null }, - checkpointSettings: new CheckpointSettings { CheckpointManager = checkpointManager } - ); + using var store1 = new TsavoriteKV(new() + { + IndexSize = 1L << 16, + LogDevice = log, + MutableFraction = 1, + PageSize = 1L << 10, + MemorySize = 1L << 20, + CheckpointManager = checkpointManager + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); using var s1 = store1.NewSession(new MyFunctions2()); var bc1 = s1.BasicContext; for (long key = 0; key < 1000; key++) - { - bc1.Upsert(ref key, ref key); - } + _ = bc1.Upsert(ref key, ref key); var task = store1.TakeHybridLogCheckpointAsync(CheckpointType.Snapshot); var (success, token) = await task; for (long key = 950; key < 1000; key++) - { - bc1.Upsert(key, key + 1); - } + _ = bc1.Upsert(key, key + 1); var version1 = store1.CurrentVersion; var _result1 = store1.TryInitiateHybridLogCheckpoint(out var _token1, CheckpointType.Snapshot, true); @@ -759,9 +848,7 @@ private async ValueTask IncrSnapshotRecoveryCheck(ICheckpointManager checkpointM Assert.AreEqual(token, _token1); for (long key = 1000; key < 2000; key++) - { - bc1.Upsert(key, key + 1); - } + _ = bc1.Upsert(key, key + 1); var version2 = store1.CurrentVersion; var _result2 = store1.TryInitiateHybridLogCheckpoint(out var _token2, CheckpointType.Snapshot, true); @@ -771,11 +858,17 @@ private async ValueTask IncrSnapshotRecoveryCheck(ICheckpointManager checkpointM Assert.AreEqual(token, _token2); // Test that we can recover to latest version - using var store2 = new TsavoriteKV - (1 << 10, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 1, PageSizeBits = 10, MemorySizeBits = 14, ReadCacheSettings = null }, - checkpointSettings: new CheckpointSettings { CheckpointManager = checkpointManager } - ); + using var store2 = new TsavoriteKV(new() + { + IndexSize = 1L << 16, + LogDevice = log, + MutableFraction = 1, + PageSize = 1L << 10, + MemorySize = 1L << 14, + CheckpointManager = checkpointManager + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); await store2.RecoverAsync(default, _token2); @@ -793,16 +886,22 @@ private async ValueTask IncrSnapshotRecoveryCheck(ICheckpointManager checkpointM MyFunctions2.Verify(status, key, output); } } - bc2.CompletePending(true); + _ = bc2.CompletePending(true); // Test that we can recover to earlier version - using var store3 = new TsavoriteKV - (1 << 10, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 1, PageSizeBits = 10, MemorySizeBits = 14, ReadCacheSettings = null }, - checkpointSettings: new CheckpointSettings { CheckpointManager = checkpointManager } + using var store3 = new TsavoriteKV(new() + { + IndexSize = 1L << 16, + LogDevice = log, + MutableFraction = 1, + PageSize = 1L << 10, + MemorySize = 1L << 14, + CheckpointManager = checkpointManager + }, StoreFunctions.Create(LongKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) ); - await store3.RecoverAsync(recoverTo: version1); + _ = await store3.RecoverAsync(recoverTo: version1); Assert.IsTrue(store3.EntryCount == 1000); using var s3 = store3.NewSession(new MyFunctions2()); @@ -816,7 +915,7 @@ private async ValueTask IncrSnapshotRecoveryCheck(ICheckpointManager checkpointM MyFunctions2.Verify(status, key, output); } } - bc3.CompletePending(true); + _ = bc3.CompletePending(true); } } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/test/RecoveryTestTypes.cs b/libs/storage/Tsavorite/cs/test/RecoveryTestTypes.cs index 7a9c4662f9..f5f9da583b 100644 --- a/libs/storage/Tsavorite/cs/test/RecoveryTestTypes.cs +++ b/libs/storage/Tsavorite/cs/test/RecoveryTestTypes.cs @@ -6,15 +6,18 @@ namespace Tsavorite.test.recovery.sumstore { - public struct AdId : ITsavoriteEqualityComparer + public struct AdId { public long adId; - public long GetHashCode64(ref AdId key) => Utility.GetHashCode(key.adId); + public override string ToString() => adId.ToString(); - public bool Equals(ref AdId k1, ref AdId k2) => k1.adId == k2.adId; + public struct Comparer : IKeyComparer + { + public long GetHashCode64(ref AdId key) => Utility.GetHashCode(key.adId); - public override string ToString() => adId.ToString(); + public bool Equals(ref AdId k1, ref AdId k2) => k1.adId == k2.adId; + } } public struct AdInput diff --git a/libs/storage/Tsavorite/cs/test/RecoveryTests.cs b/libs/storage/Tsavorite/cs/test/RecoveryTests.cs index 879cc03042..6fd6f75119 100644 --- a/libs/storage/Tsavorite/cs/test/RecoveryTests.cs +++ b/libs/storage/Tsavorite/cs/test/RecoveryTests.cs @@ -7,21 +7,32 @@ using System.Threading.Tasks; using NUnit.Framework; using Tsavorite.core; +using static Tsavorite.test.TestUtils; namespace Tsavorite.test.recovery.sumstore { + using LongAllocator = BlittableAllocator>>; + using LongStoreFunctions = StoreFunctions>; + using MyValueAllocator = GenericAllocator>>; + using MyValueStoreFunctions = StoreFunctions>; + + using SpanByteStoreFunctions = StoreFunctions; + + using StructAllocator = BlittableAllocator>>; + using StructStoreFunctions = StoreFunctions>; + [TestFixture] internal class DeviceTypeRecoveryTests { - internal const long numUniqueKeys = (1 << 12); - internal const long keySpace = (1L << 14); - internal const long numOps = (1L << 17); - internal const long completePendingInterval = (1L << 10); - internal const long checkpointInterval = (1L << 14); - - private TsavoriteKV store; - private readonly List logTokens = new(); - private readonly List indexTokens = new(); + internal const long NumUniqueKeys = 1L << 12; + internal const long KeySpace = 1L << 20; + internal const long NumOps = 1L << 17; + internal const long CompletePendingInterval = 1L << 10; + internal const long CheckpointInterval = 1L << 14; + + private TsavoriteKV store; + private readonly List logTokens = []; + private readonly List indexTokens = []; private IDevice log; [SetUp] @@ -30,15 +41,20 @@ public void Setup() // Only clean these in the initial Setup, as tests use the other Setup() overload to recover logTokens.Clear(); indexTokens.Clear(); - TestUtils.DeleteDirectory(TestUtils.MethodTestDir, true); + DeleteDirectory(MethodTestDir, true); } - private void Setup(TestUtils.DeviceType deviceType) + private void Setup(DeviceType deviceType) { - log = TestUtils.CreateTestDevice(deviceType, Path.Join(TestUtils.MethodTestDir, "Test.log")); - store = new TsavoriteKV(keySpace, - new LogSettings { LogDevice = log, SegmentSizeBits = 25 }, //new LogSettings { LogDevice = log, MemorySizeBits = 14, PageSizeBits = 9 }, // locks ups at session.RMW line in Populate() for Local Memory - new CheckpointSettings { CheckpointDir = TestUtils.MethodTestDir } + log = CreateTestDevice(deviceType, Path.Join(MethodTestDir, "Test.log")); + store = new(new() + { + IndexSize = KeySpace, + LogDevice = log, + SegmentSize = 1L << 25, //MemorySize = 1L << 14, PageSize = 1L << 9, // locks ups at session.RMW line in Populate() for Local Memory + CheckpointDir = MethodTestDir + }, StoreFunctions.Create(new AdId.Comparer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) ); } @@ -54,10 +70,10 @@ private void TearDown(bool deleteDir) // Do NOT clean up here unless specified, as tests use this TearDown() to prepare for recovery if (deleteDir) - TestUtils.DeleteDirectory(TestUtils.MethodTestDir); + DeleteDirectory(MethodTestDir); } - private void PrepareToRecover(TestUtils.DeviceType deviceType) + private void PrepareToRecover(DeviceType deviceType) { TearDown(deleteDir: false); Setup(deviceType); @@ -66,7 +82,7 @@ private void PrepareToRecover(TestUtils.DeviceType deviceType) [Test] [Category("TsavoriteKV")] [Category("CheckpointRestore")] - public async ValueTask RecoveryTestSeparateCheckpoint([Values] bool isAsync, [Values] TestUtils.DeviceType deviceType) + public async ValueTask RecoveryTestSeparateCheckpoint([Values] bool isAsync, [Values] DeviceType deviceType) { Setup(deviceType); Populate(SeparateCheckpointAction); @@ -83,7 +99,7 @@ public async ValueTask RecoveryTestSeparateCheckpoint([Values] bool isAsync, [Va [Category("TsavoriteKV")] [Category("CheckpointRestore")] [Category("Smoke")] - public async ValueTask RecoveryTestFullCheckpoint([Values] bool isAsync, [Values] TestUtils.DeviceType deviceType) + public async ValueTask RecoveryTestFullCheckpoint([Values] bool isAsync, [Values] DeviceType deviceType) { Setup(deviceType); Populate(FullCheckpointAction); @@ -97,7 +113,7 @@ public async ValueTask RecoveryTestFullCheckpoint([Values] bool isAsync, [Values private void FullCheckpointAction(int opNum) { - if ((opNum + 1) % checkpointInterval == 0) + if ((opNum + 1) % CheckpointInterval == 0) { Guid token; while (!store.TryInitiateFullCheckpoint(out token, CheckpointType.Snapshot)) { } @@ -109,10 +125,10 @@ private void FullCheckpointAction(int opNum) private void SeparateCheckpointAction(int opNum) { - if ((opNum + 1) % checkpointInterval != 0) + if ((opNum + 1) % CheckpointInterval != 0) return; - var checkpointNum = (opNum + 1) / checkpointInterval; + var checkpointNum = (opNum + 1) / CheckpointInterval; Guid token; if (checkpointNum % 2 == 1) { @@ -130,10 +146,10 @@ private void SeparateCheckpointAction(int opNum) private void Populate(Action checkpointAction) { // Prepare the dataset - var inputArray = new AdInput[numOps]; - for (int i = 0; i < numOps; i++) + var inputArray = new AdInput[NumOps]; + for (int i = 0; i < NumOps; i++) { - inputArray[i].adId.adId = i % numUniqueKeys; + inputArray[i].adId.adId = i % NumUniqueKeys; inputArray[i].numClicks.numClicks = 1; } @@ -142,18 +158,18 @@ private void Populate(Action checkpointAction) var bContext = session.BasicContext; // Process the batch of input data - for (int i = 0; i < numOps; i++) + for (int i = 0; i < NumOps; i++) { - bContext.RMW(ref inputArray[i].adId, ref inputArray[i], Empty.Default); + _ = bContext.RMW(ref inputArray[i].adId, ref inputArray[i], Empty.Default); checkpointAction(i); - if (i % completePendingInterval == 0) - bContext.CompletePending(false); + if (i % CompletePendingInterval == 0) + _ = bContext.CompletePending(false); } // Make sure operations are completed - bContext.CompletePending(true); + _ = bContext.CompletePending(true); } private async ValueTask RecoverAndTestAsync(int tokenIndex, bool isAsync) @@ -163,13 +179,13 @@ private async ValueTask RecoverAndTestAsync(int tokenIndex, bool isAsync) // Recover if (isAsync) - await store.RecoverAsync(indexToken, logToken); + _ = await store.RecoverAsync(indexToken, logToken); else - store.Recover(indexToken, logToken); + _ = store.Recover(indexToken, logToken); // Create array for reading - var inputArray = new AdInput[numUniqueKeys]; - for (int i = 0; i < numUniqueKeys; i++) + var inputArray = new AdInput[NumUniqueKeys]; + for (int i = 0; i < NumUniqueKeys; i++) { inputArray[i].adId.adId = i; inputArray[i].numClicks.numClicks = 0; @@ -183,7 +199,7 @@ private async ValueTask RecoverAndTestAsync(int tokenIndex, bool isAsync) Output output = default; // Issue read requests - for (var i = 0; i < numUniqueKeys; i++) + for (var i = 0; i < NumUniqueKeys; i++) { var status = bContext.Read(ref inputArray[i].adId, ref input, ref output, Empty.Default); Assert.IsTrue(status.Found, $"At tokenIndex {tokenIndex}, keyIndex {i}, AdId {inputArray[i].adId.adId}"); @@ -191,7 +207,7 @@ private async ValueTask RecoverAndTestAsync(int tokenIndex, bool isAsync) } // Complete all pending requests - bContext.CompletePending(true); + _ = bContext.CompletePending(true); } } @@ -200,8 +216,8 @@ public class AllocatorTypeRecoveryTests { const int StackAllocMax = 12; const int RandSeed = 101; - const long expectedValueBase = DeviceTypeRecoveryTests.numUniqueKeys * (DeviceTypeRecoveryTests.numOps / DeviceTypeRecoveryTests.numUniqueKeys - 1); - private static long ExpectedValue(int key) => expectedValueBase + key; + const long ExpectedValueBase = DeviceTypeRecoveryTests.NumUniqueKeys * (DeviceTypeRecoveryTests.NumOps / DeviceTypeRecoveryTests.NumUniqueKeys - 1); + private static long ExpectedValue(int key) => ExpectedValueBase + key; private IDisposable storeDisp; private Guid logToken; @@ -210,32 +226,35 @@ public class AllocatorTypeRecoveryTests private IDevice objlog; private bool smallSector; - // 'object' to avoid generic args - private object serializerSettingsObj; - [SetUp] public void Setup() { smallSector = false; - serializerSettingsObj = null; // Only clean these in the initial Setup, as tests use the other Setup() overload to recover logToken = Guid.Empty; indexToken = Guid.Empty; - TestUtils.DeleteDirectory(TestUtils.MethodTestDir, true); + DeleteDirectory(MethodTestDir, true); } - private TsavoriteKV Setup() + private TsavoriteKV Setup(AllocatorType allocatorType, Func storeFunctionsCreator, Func allocatorCreator) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - log = new LocalMemoryDevice(1L << 26, 1L << 22, 2, sector_size: smallSector ? 64 : (uint)512, fileName: Path.Join(TestUtils.MethodTestDir, $"{typeof(TData).Name}.log")); - objlog = serializerSettingsObj is null - ? null - : new LocalMemoryDevice(1L << 26, 1L << 22, 2, fileName: Path.Join(TestUtils.MethodTestDir, $"{typeof(TData).Name}.obj.log")); - - var result = new TsavoriteKV(DeviceTypeRecoveryTests.keySpace, - new LogSettings { LogDevice = log, ObjectLogDevice = objlog, SegmentSizeBits = 25 }, - new CheckpointSettings { CheckpointDir = TestUtils.MethodTestDir }, - serializerSettingsObj as SerializerSettings + log = new LocalMemoryDevice(1L << 26, 1L << 22, 2, sector_size: smallSector ? 64 : (uint)512, fileName: Path.Join(MethodTestDir, $"{typeof(TData).Name}.log")); + objlog = allocatorType == AllocatorType.Generic + ? new LocalMemoryDevice(1L << 26, 1L << 22, 2, fileName: Path.Join(MethodTestDir, $"{typeof(TData).Name}.obj.log")) + : null; + + var result = new TsavoriteKV(new() + { + IndexSize = DeviceTypeRecoveryTests.KeySpace, + LogDevice = log, + ObjectLogDevice = objlog, + SegmentSize = 1L << 25, + CheckpointDir = MethodTestDir + }, storeFunctionsCreator() + , allocatorCreator ); storeDisp = result; @@ -256,19 +275,22 @@ private void TearDown(bool deleteDir) // Do NOT clean up here unless specified, as tests use this TearDown() to prepare for recovery if (deleteDir) - TestUtils.DeleteDirectory(TestUtils.MethodTestDir); + DeleteDirectory(MethodTestDir); } - private TsavoriteKV PrepareToRecover() + private TsavoriteKV PrepareToRecover(AllocatorType allocatorType, + Func storeFunctionsCreator, Func allocatorCreator) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { TearDown(deleteDir: false); - return Setup(); + return Setup(allocatorType, storeFunctionsCreator, allocatorCreator); } [Test] [Category("TsavoriteKV")] [Category("CheckpointRestore")] - public async ValueTask RecoveryTestByAllocatorType([Values] TestUtils.AllocatorType allocatorType, [Values] bool isAsync) + public async ValueTask RecoveryTestByAllocatorType([Values] AllocatorType allocatorType, [Values] bool isAsync) { await TestDriver(allocatorType, isAsync); } @@ -276,36 +298,44 @@ public async ValueTask RecoveryTestByAllocatorType([Values] TestUtils.AllocatorT [Test] [Category("TsavoriteKV")] [Category("CheckpointRestore")] - public async ValueTask RecoveryTestFailOnSectorSize([Values] TestUtils.AllocatorType allocatorType, [Values] bool isAsync) + public async ValueTask RecoveryTestFailOnSectorSize([Values] AllocatorType allocatorType, [Values] bool isAsync) { smallSector = true; await TestDriver(allocatorType, isAsync); } - private async ValueTask TestDriver(TestUtils.AllocatorType allocatorType, [Values] bool isAsync) + private async ValueTask TestDriver(AllocatorType allocatorType, [Values] bool isAsync) { - ValueTask task; - switch (allocatorType) + var task = allocatorType switch { - case TestUtils.AllocatorType.FixedBlittable: - task = RunTest(Populate, Read, Recover, isAsync); - break; - case TestUtils.AllocatorType.SpanByte: - task = RunTest(Populate, Read, Recover, isAsync); - break; - case TestUtils.AllocatorType.Generic: - serializerSettingsObj = new MyValueSerializer(); - task = RunTest(Populate, Read, Recover, isAsync); - break; - default: - throw new ApplicationException("Unknown allocator type"); + AllocatorType.FixedBlittable => RunTest(allocatorType, + () => StoreFunctions.Create(LongKeyComparer.Instance), + (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions), + Populate, Read, Recover, isAsync), + AllocatorType.SpanByte => RunTest>(allocatorType, + StoreFunctions.Create, + (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions), + Populate, Read, Recover, isAsync), + AllocatorType.Generic => RunTest(allocatorType, + () => StoreFunctions.Create(new MyValue.Comparer(), () => new MyValueSerializer(), () => new MyValueSerializer(), DefaultRecordDisposer.Instance), + (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions), + Populate, Read, Recover, isAsync), + _ => throw new ApplicationException("Unknown allocator type"), }; + ; await task; } - private async ValueTask RunTest(Action> populateAction, Action> readAction, Func, bool, ValueTask> recoverFunc, bool isAsync) + private async ValueTask RunTest(AllocatorType allocatorType, + Func storeFunctionsCreator, Func allocatorCreator, + Action> populateAction, + Action> readAction, + Func, bool, ValueTask> recoverFunc, + bool isAsync) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - var store = Setup(); + var store = Setup(allocatorType, storeFunctionsCreator, allocatorCreator); populateAction(store); readAction(store); if (smallSector) @@ -320,24 +350,24 @@ private async ValueTask RunTest(Action> populat Assert.AreNotEqual(Guid.Empty, indexToken); readAction(store); - store = PrepareToRecover(); + store = PrepareToRecover(allocatorType, storeFunctionsCreator, allocatorCreator); await recoverFunc(store, isAsync); readAction(store); } - private void Populate(TsavoriteKV store) + private void Populate(TsavoriteKV store) { using var session = store.NewSession>(new SimpleSimpleFunctions()); var bContext = session.BasicContext; - for (int i = 0; i < DeviceTypeRecoveryTests.numOps; i++) - bContext.Upsert(i % DeviceTypeRecoveryTests.numUniqueKeys, i); - bContext.CompletePending(true); + for (int i = 0; i < DeviceTypeRecoveryTests.NumOps; i++) + _ = bContext.Upsert(i % DeviceTypeRecoveryTests.NumUniqueKeys, i); + _ = bContext.CompletePending(true); } static int GetRandomLength(Random r) => r.Next(StackAllocMax) + 1; // +1 to remain in range 1..StackAllocMax - private unsafe void Populate(TsavoriteKV store) + private unsafe void Populate(TsavoriteKV> store) { using var session = store.NewSession(new VLVectorFunctions()); var bContext = session.BasicContext; @@ -348,10 +378,10 @@ private unsafe void Populate(TsavoriteKV store) Span keySpan = stackalloc int[1]; Span valueSpan = stackalloc int[StackAllocMax]; - for (int i = 0; i < DeviceTypeRecoveryTests.numOps; i++) + for (int i = 0; i < DeviceTypeRecoveryTests.NumOps; i++) { // We must be consistent on length across iterations of each key value - var key0 = i % (int)DeviceTypeRecoveryTests.numUniqueKeys; + var key0 = i % (int)DeviceTypeRecoveryTests.NumUniqueKeys; if (key0 == 0) rng = new(RandSeed); @@ -363,26 +393,28 @@ private unsafe void Populate(TsavoriteKV store) valueSpan[j] = i; var valueSpanByte = valueSpan.Slice(0, len).AsSpanByte(); - bContext.Upsert(ref keySpanByte, ref valueSpanByte, Empty.Default); + _ = bContext.Upsert(ref keySpanByte, ref valueSpanByte, Empty.Default); } - bContext.CompletePending(true); + _ = bContext.CompletePending(true); } - private unsafe void Populate(TsavoriteKV store) + private unsafe void Populate(TsavoriteKV store) { using var session = store.NewSession(new MyFunctions2()); var bContext = session.BasicContext; - for (int i = 0; i < DeviceTypeRecoveryTests.numOps; i++) + for (int i = 0; i < DeviceTypeRecoveryTests.NumOps; i++) { - var key = new MyValue { value = i % (int)DeviceTypeRecoveryTests.numUniqueKeys }; + var key = new MyValue { value = i % (int)DeviceTypeRecoveryTests.NumUniqueKeys }; var value = new MyValue { value = i }; - bContext.Upsert(key, value); + _ = bContext.Upsert(key, value); } - bContext.CompletePending(true); + _ = bContext.CompletePending(true); } - private async ValueTask Checkpoint(TsavoriteKV store, bool isAsync) + private async ValueTask Checkpoint(TsavoriteKV store, bool isAsync) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { if (isAsync) { @@ -398,32 +430,32 @@ private async ValueTask Checkpoint(TsavoriteKV store, bool indexToken = logToken; } - private async ValueTask RecoverAndReadTest(TsavoriteKV store, bool isAsync) + private async ValueTask RecoverAndReadTest(TsavoriteKV store, bool isAsync) { await Recover(store, isAsync); Read(store); } - private static void Read(TsavoriteKV store) + private static void Read(TsavoriteKV store) { using var session = store.NewSession>(new SimpleSimpleFunctions()); var bContext = session.BasicContext; - for (var i = 0; i < DeviceTypeRecoveryTests.numUniqueKeys; i++) + for (var i = 0; i < DeviceTypeRecoveryTests.NumUniqueKeys; i++) { - var status = bContext.Read(i % DeviceTypeRecoveryTests.numUniqueKeys, default, out long output); + var status = bContext.Read(i % DeviceTypeRecoveryTests.NumUniqueKeys, default, out long output); Assert.IsTrue(status.Found, $"keyIndex {i}"); Assert.AreEqual(ExpectedValue(i), output); } } - private async ValueTask RecoverAndReadTest(TsavoriteKV store, bool isAsync) + private async ValueTask RecoverAndReadTest(TsavoriteKV> store, bool isAsync) { await Recover(store, isAsync); Read(store); } - private static void Read(TsavoriteKV store) + private static void Read(TsavoriteKV> store) { using var session = store.NewSession(new VLVectorFunctions()); var bContext = session.BasicContext; @@ -433,7 +465,7 @@ private static void Read(TsavoriteKV store) Span keySpan = stackalloc int[1]; var keySpanByte = keySpan.AsSpanByte(); - for (var i = 0; i < DeviceTypeRecoveryTests.numUniqueKeys; i++) + for (var i = 0; i < DeviceTypeRecoveryTests.NumUniqueKeys; i++) { keySpan[0] = i; @@ -448,18 +480,18 @@ private static void Read(TsavoriteKV store) } } - private async ValueTask RecoverAndReadTest(TsavoriteKV store, bool isAsync) + private async ValueTask RecoverAndReadTest(TsavoriteKV store, bool isAsync) { await Recover(store, isAsync); Read(store); } - private static void Read(TsavoriteKV store) + private static void Read(TsavoriteKV store) { using var session = store.NewSession(new MyFunctions2()); var bContext = session.BasicContext; - for (var i = 0; i < DeviceTypeRecoveryTests.numUniqueKeys; i++) + for (var i = 0; i < DeviceTypeRecoveryTests.NumUniqueKeys; i++) { var key = new MyValue { value = i }; var status = bContext.Read(key, default, out MyOutput output); @@ -468,12 +500,14 @@ private static void Read(TsavoriteKV store) } } - private async ValueTask Recover(TsavoriteKV store, bool isAsync = false) + private async ValueTask Recover(TsavoriteKV store, bool isAsync = false) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { if (isAsync) - await store.RecoverAsync(indexToken, logToken); + _ = await store.RecoverAsync(indexToken, logToken); else - store.Recover(indexToken, logToken); + _ = store.Recover(indexToken, logToken); } } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/test/ReproReadCacheTest.cs b/libs/storage/Tsavorite/cs/test/ReproReadCacheTest.cs index bedadb84af..35d3553285 100644 --- a/libs/storage/Tsavorite/cs/test/ReproReadCacheTest.cs +++ b/libs/storage/Tsavorite/cs/test/ReproReadCacheTest.cs @@ -14,6 +14,8 @@ namespace Tsavorite.test.ReadCacheTests { + using SpanByteStoreFunctions = StoreFunctions; + [TestFixture] internal class RandomReadCacheTests { @@ -49,46 +51,47 @@ public override void ReadCompletionCallback(ref SpanByte key, ref SpanByte input } IDevice log = default; - TsavoriteKV store = default; + TsavoriteKV> store = default; [SetUp] public void Setup() { DeleteDirectory(MethodTestDir, wait: true); - ReadCacheSettings readCacheSettings = default; string filename = Path.Join(MethodTestDir, "BasicTests.log"); + var kvSettings = new KVSettings() + { + IndexSize = 1L << 26, + MemorySize = 1L << 15, + PageSize = 1L << 12, + }; + foreach (var arg in TestContext.CurrentContext.Test.Arguments) { if (arg is ReadCacheMode rcm) { if (rcm == ReadCacheMode.UseReadCache) - readCacheSettings = new() - { - MemorySizeBits = 15, - PageSizeBits = 12, - SecondChanceFraction = 0.1, - }; + { + kvSettings.ReadCacheMemorySize = 1L << 15; + kvSettings.ReadCachePageSize = 1L << 12; + kvSettings.ReadCacheSecondChanceFraction = 0.1; + kvSettings.ReadCacheEnabled = true; + }; continue; } if (arg is DeviceType deviceType) { - log = CreateTestDevice(deviceType, filename, deleteOnClose: true); + kvSettings.LogDevice = CreateTestDevice(deviceType, filename, deleteOnClose: true); continue; } } - log ??= Devices.CreateLogDevice(filename, deleteOnClose: true); + kvSettings.LogDevice ??= Devices.CreateLogDevice(filename, deleteOnClose: true); - store = new TsavoriteKV( - size: 1L << 20, - new LogSettings - { - LogDevice = log, - MemorySizeBits = 15, - PageSizeBits = 12, - ReadCacheSettings = readCacheSettings, - }); + store = new(kvSettings + , StoreFunctions.Create() + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -117,7 +120,7 @@ public unsafe void RandomReadCacheTest([Values(1, 2, 8)] int numThreads, [Values const int PendingMod = 16; - void LocalRead(BasicContext sessionContext, int i, ref int numPending, bool isLast) + void LocalRead(BasicContext> sessionContext, int i, ref int numPending, bool isLast) { var keyString = $"{i}"; var inputString = $"{i * 2}"; @@ -147,7 +150,7 @@ void LocalRead(BasicContext 0 && ((numPending % PendingMod) == 0 || isLast)) { - sessionContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); + _ = sessionContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); using (completedOutputs) { while (completedOutputs.Next()) @@ -213,7 +216,7 @@ void LocalRun(int startKey, int endKey) var numKeysPerThread = MaxKeys / numThreads; - List tasks = new(); // Task rather than Thread for propagation of exception. + List tasks = []; // Task rather than Thread for propagation of exception. for (int t = 0; t < numThreads; t++) { var tid = t; @@ -222,7 +225,7 @@ void LocalRun(int startKey, int endKey) else tasks.Add(Task.Factory.StartNew(() => LocalRun(numKeysPerThread * tid, numKeysPerThread * (tid + 1)))); } - Task.WaitAll(tasks.ToArray()); + Task.WaitAll([.. tasks]); } } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/test/RevivificationTests.cs b/libs/storage/Tsavorite/cs/test/RevivificationTests.cs index 2dc5efc9ce..4a62a17192 100644 --- a/libs/storage/Tsavorite/cs/test/RevivificationTests.cs +++ b/libs/storage/Tsavorite/cs/test/RevivificationTests.cs @@ -15,6 +15,35 @@ namespace Tsavorite.test.Revivification { + // Must be in a separate block so the "using StructStoreFunctions" is the first line in its namespace declaration. + internal readonly struct RevivificationSpanByteComparer : IKeyComparer + { + private readonly SpanByteComparer defaultComparer; + private readonly int collisionRange; + + internal RevivificationSpanByteComparer(CollisionRange range) + { + defaultComparer = new SpanByteComparer(); + collisionRange = (int)range; + } + + public bool Equals(ref SpanByte k1, ref SpanByte k2) => defaultComparer.Equals(ref k1, ref k2); + + // The hash code ends with 0 so mod Ten isn't so helpful, so shift + public long GetHashCode64(ref SpanByte k) => (defaultComparer.GetHashCode64(ref k) >> 4) % collisionRange; + } +} + +namespace Tsavorite.test.Revivification +{ + using ClassAllocator = GenericAllocator>>; + using ClassStoreFunctions = StoreFunctions>; + + using IntAllocator = BlittableAllocator>>; + using IntStoreFunctions = StoreFunctions>; + + using SpanByteStoreFunctions = StoreFunctions; + public enum DeleteDest { FreeList, InChain } public enum CollisionRange { Ten = 10, None = int.MaxValue } @@ -49,16 +78,25 @@ internal static RMWInfo CopyToRMWInfo(ref UpsertInfo upsertInfo) Action = RMWAction.Default, }; - internal static FreeRecordPool CreateSingleBinFreeRecordPool(TsavoriteKV store, RevivificationBin binDef, int fixedRecordLength = 0) - => new(store, new RevivificationSettings() { FreeRecordBins = new[] { binDef } }, fixedRecordLength); + internal static FreeRecordPool CreateSingleBinFreeRecordPool( + TsavoriteKV store, RevivificationBin binDef, int fixedRecordLength = 0) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator + => new(store, new RevivificationSettings() { FreeRecordBins = [binDef] }, fixedRecordLength); - internal static bool HasRecords(TsavoriteKV store) + internal static bool HasRecords(TsavoriteKV store) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator => HasRecords(store.RevivificationManager.FreeRecordPool); - internal static bool HasRecords(TsavoriteKV store, FreeRecordPool pool) + internal static bool HasRecords(TsavoriteKV store, FreeRecordPool pool) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator => HasRecords(pool ?? store.RevivificationManager.FreeRecordPool); - internal static bool HasRecords(FreeRecordPool pool) + internal static bool HasRecords(FreeRecordPool pool) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { if (pool is not null) { @@ -71,7 +109,10 @@ internal static bool HasRecords(FreeRecordPool pool) return false; } - internal static FreeRecordPool SwapFreeRecordPool(TsavoriteKV store, FreeRecordPool inPool) + internal static FreeRecordPool SwapFreeRecordPool( + TsavoriteKV store, FreeRecordPool inPool) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { var pool = store.RevivificationManager.FreeRecordPool; store.RevivificationManager.FreeRecordPool = inPool; @@ -80,22 +121,45 @@ internal static FreeRecordPool SwapFreeRecordPool(Ts internal const int DefaultRecordWaitTimeoutMs = 2000; - internal static bool GetBinIndex(FreeRecordPool pool, int recordSize, out int binIndex) => pool.GetBinIndex(recordSize, out binIndex); - - internal static int GetBinCount(FreeRecordPool pool) => pool.bins.Length; - - internal static int GetRecordCount(FreeRecordPool pool, int binIndex) => pool.bins[binIndex].recordCount; - - internal static int GetMaxRecordSize(FreeRecordPool pool, int binIndex) => pool.bins[binIndex].maxRecordSize; - - internal static unsafe bool IsSet(FreeRecordPool pool, int binIndex, int recordIndex) => pool.bins[binIndex].records[recordIndex].IsSet; - - internal static bool TryTakeFromBin(FreeRecordPool pool, int binIndex, int recordSize, long minAddress, TsavoriteKV store, out long address, ref RevivificationStats revivStats) + internal static bool GetBinIndex(FreeRecordPool pool, int recordSize, out int binIndex) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator + => pool.GetBinIndex(recordSize, out binIndex); + + internal static int GetBinCount(FreeRecordPool pool) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator + => pool.bins.Length; + + internal static int GetRecordCount(FreeRecordPool pool, int binIndex) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator + => pool.bins[binIndex].recordCount; + + internal static int GetMaxRecordSize(FreeRecordPool pool, int binIndex) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator + => pool.bins[binIndex].maxRecordSize; + + internal static unsafe bool IsSet(FreeRecordPool pool, int binIndex, int recordIndex) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator + => pool.bins[binIndex].records[recordIndex].IsSet; + + internal static bool TryTakeFromBin(FreeRecordPool pool, int binIndex, int recordSize, long minAddress, + TsavoriteKV store, out long address, ref RevivificationStats revivStats) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator => pool.bins[binIndex].TryTake(recordSize, minAddress, store, out address, ref revivStats); - internal static int GetSegmentStart(FreeRecordPool pool, int binIndex, int recordSize) => pool.bins[binIndex].GetSegmentStart(recordSize); + internal static int GetSegmentStart(FreeRecordPool pool, int binIndex, int recordSize) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator + => pool.bins[binIndex].GetSegmentStart(recordSize); - internal static void WaitForRecords(TsavoriteKV store, bool want, FreeRecordPool pool = default) + internal static void WaitForRecords(TsavoriteKV store, bool want, FreeRecordPool pool = default) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { pool ??= store.RevivificationManager.FreeRecordPool; @@ -108,18 +172,23 @@ internal static void WaitForRecords(TsavoriteKV stor { if (sw.ElapsedMilliseconds >= DefaultRecordWaitTimeoutMs) Assert.Less(sw.ElapsedMilliseconds, DefaultRecordWaitTimeoutMs, $"Timeout while waiting for Pool.WaitForRecords to be {want}"); - Thread.Yield(); + _ = Thread.Yield(); } return; } } - internal static unsafe int GetFreeRecordCount(TsavoriteKV store) => GetFreeRecordCount(store.RevivificationManager.FreeRecordPool); + internal static unsafe int GetFreeRecordCount(TsavoriteKV store) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator + => GetFreeRecordCount(store.RevivificationManager.FreeRecordPool); - internal static unsafe int GetFreeRecordCount(FreeRecordPool pool) + internal static unsafe int GetFreeRecordCount(FreeRecordPool pool) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { // This returns the count of all records, not just the free ones. - int count = 0; + var count = 0; if (pool is not null) { foreach (var bin in pool.bins) @@ -134,39 +203,32 @@ internal static unsafe int GetFreeRecordCount(FreeRecordPool(TsavoriteKV store, TKey key) => AssertElidable(store, ref key); - internal static void AssertElidable(TsavoriteKV store, ref TKey key) + internal static void AssertElidable(TsavoriteKV store, TKey key) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator + => AssertElidable(store, ref key); + + internal static void AssertElidable(TsavoriteKV store, ref TKey key) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - OperationStackContext stackCtx = new(store.comparer.GetHashCode64(ref key)); + OperationStackContext stackCtx = new(store.storeFunctions.GetKeyHashCode64(ref key)); Assert.IsTrue(store.FindTag(ref stackCtx.hei), $"AssertElidable: Cannot find key {key}"); var recordInfo = store.hlog.GetInfo(store.hlog.GetPhysicalAddress(stackCtx.hei.Address)); - Assert.Less(recordInfo.PreviousAddress, store.hlog.BeginAddress, "AssertElidable: expected elidable key"); + Assert.Less(recordInfo.PreviousAddress, store.hlogBase.BeginAddress, "AssertElidable: expected elidable key"); } - internal static int GetRevivifiableRecordCount(TsavoriteKV store, int numRecords) + internal static int GetRevivifiableRecordCount(TsavoriteKV store, int numRecords) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator => (int)(numRecords * store.RevivificationManager.revivifiableFraction); - internal static int GetMinRevivifiableKey(TsavoriteKV store, int numRecords) + internal static int GetMinRevivifiableKey(TsavoriteKV store, int numRecords) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator => numRecords - GetRevivifiableRecordCount(store, numRecords); } - internal readonly struct RevivificationSpanByteComparer : ITsavoriteEqualityComparer - { - private readonly SpanByteComparer defaultComparer; - private readonly int collisionRange; - - internal RevivificationSpanByteComparer(CollisionRange range) - { - defaultComparer = new SpanByteComparer(); - collisionRange = (int)range; - } - - public bool Equals(ref SpanByte k1, ref SpanByte k2) => defaultComparer.Equals(ref k1, ref k2); - - // The hash code ends with 0 so mod Ten isn't so helpful, so shift - public long GetHashCode64(ref SpanByte k) => (defaultComparer.GetHashCode64(ref k) >> 4) % collisionRange; - } - [TestFixture] class RevivificationFixedLenTests { @@ -174,14 +236,14 @@ internal class RevivificationFixedLenFunctions : SimpleSimpleFunctions { } - const int numRecords = 1000; - internal const int valueMult = 1_000_000; + const int NumRecords = 1000; + internal const int ValueMult = 1_000_000; RevivificationFixedLenFunctions functions; - private TsavoriteKV store; - private ClientSession session; - private BasicContext bContext; + private TsavoriteKV store; + private ClientSession session; + private BasicContext bContext; private IDevice log; [SetUp] @@ -211,8 +273,15 @@ public void Setup() revivificationSettings.RevivifiableFraction = revivifiableFraction.Value; if (recordElision.HasValue) revivificationSettings.RestoreDeletedRecordsIfBinIsFull = recordElision.Value == RecordElision.NoElide; - store = new TsavoriteKV(1L << 18, new LogSettings { LogDevice = log, ObjectLogDevice = null, PageSizeBits = 12, MemorySizeBits = 20 }, - revivificationSettings: revivificationSettings); + store = new(new() + { + IndexSize = 1L << 24, + LogDevice = log, + PageSize = 1L << 12, + MemorySize = 1L << 20, + RevivificationSettings = revivificationSettings + }, StoreFunctions.Create(IntKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions)); functions = new RevivificationFixedLenFunctions(); session = store.NewSession(functions); bContext = session.BasicContext; @@ -233,9 +302,9 @@ public void TearDown() void Populate() { - for (int key = 0; key < numRecords; key++) + for (int key = 0; key < NumRecords; key++) { - var status = bContext.Upsert(key, key * valueMult); + var status = bContext.Upsert(key, key * ValueMult); Assert.IsTrue(status.Record.Created, status.ToString()); } } @@ -251,16 +320,16 @@ public void SimpleFixedLenTest([Values] DeleteDest deleteDest, [Values(UpdateOp. if (stayInChain) _ = RevivificationTestUtils.SwapFreeRecordPool(store, default); - var deleteKey = RevivificationTestUtils.GetMinRevivifiableKey(store, numRecords); + var deleteKey = RevivificationTestUtils.GetMinRevivifiableKey(store, NumRecords); if (!stayInChain) RevivificationTestUtils.AssertElidable(store, deleteKey); var tailAddress = store.Log.TailAddress; - bContext.Delete(deleteKey); + _ = bContext.Delete(deleteKey); Assert.AreEqual(tailAddress, store.Log.TailAddress); - var updateKey = deleteDest == DeleteDest.InChain ? deleteKey : numRecords + 1; - var updateValue = updateKey + valueMult; + var updateKey = deleteDest == DeleteDest.InChain ? deleteKey : NumRecords + 1; + var updateValue = updateKey + ValueMult; if (!stayInChain) { @@ -268,10 +337,7 @@ public void SimpleFixedLenTest([Values] DeleteDest deleteDest, [Values(UpdateOp. RevivificationTestUtils.WaitForRecords(store, want: true); } - if (updateOp == UpdateOp.Upsert) - bContext.Upsert(updateKey, updateValue); - else if (updateOp == UpdateOp.RMW) - bContext.RMW(updateKey, updateValue); + _ = updateOp == UpdateOp.Upsert ? bContext.Upsert(updateKey, updateValue) : bContext.RMW(updateKey, updateValue); if (!stayInChain) RevivificationTestUtils.WaitForRecords(store, want: false); @@ -288,9 +354,9 @@ public void UnelideTest([Values] RecordElision elision, [Values(UpdateOp.Upsert, var tailAddress = store.Log.TailAddress; // First delete all keys. This will overflow the bin. - for (var key = 0; key < numRecords; ++key) + for (var key = 0; key < NumRecords; ++key) { - bContext.Delete(key); + _ = bContext.Delete(key); Assert.AreEqual(tailAddress, store.Log.TailAddress); } @@ -298,20 +364,17 @@ public void UnelideTest([Values] RecordElision elision, [Values(UpdateOp.Upsert, RevivificationTestUtils.WaitForRecords(store, want: true); // Now re-add the keys. - for (var key = 0; key < numRecords; ++key) + for (var key = 0; key < NumRecords; ++key) { - var value = key + valueMult; - if (updateOp == UpdateOp.Upsert) - bContext.Upsert(key, value); - else if (updateOp == UpdateOp.RMW) - bContext.RMW(key, value); + var value = key + ValueMult; + _ = updateOp == UpdateOp.Upsert ? bContext.Upsert(key, value) : bContext.RMW(key, value); } // Now re-add the keys. For the elision case, we should see tailAddress grow sharply as only the records in the bin are available // for revivification. For In-Chain, we will revivify records that were unelided after the bin overflowed. But we have some records // ineligible for revivification due to revivifiableFraction. var recordSize = RecordInfo.GetLength() + sizeof(int) * 2; - var numIneligibleRecords = numRecords - RevivificationTestUtils.GetRevivifiableRecordCount(store, numRecords); + var numIneligibleRecords = NumRecords - RevivificationTestUtils.GetRevivifiableRecordCount(store, NumRecords); var noElisionExpectedTailAddress = tailAddress + numIneligibleRecords * recordSize; if (elision == RecordElision.NoElide) @@ -323,7 +386,9 @@ public void UnelideTest([Values] RecordElision elision, [Values(UpdateOp.Upsert, [Test] [Category(RevivificationCategory)] [Category(SmokeTestCategory)] +#pragma warning disable IDE0060 // Remove unused parameter (used by setup) public void SimpleMinAddressAddTest([Values] RevivifiableFraction revivifiableFraction) +#pragma warning restore IDE0060 // Remove unused parameter { Populate(); @@ -332,19 +397,21 @@ public void SimpleMinAddressAddTest([Values] RevivifiableFraction revivifiableFr Assert.AreEqual(0, RevivificationTestUtils.GetFreeRecordCount(store)); // This should go to FreeList because it's above the RevivifiableFraction - Assert.IsTrue(bContext.Delete(numRecords - 1).Found); + Assert.IsTrue(bContext.Delete(NumRecords - 1).Found); Assert.AreEqual(1, RevivificationTestUtils.GetFreeRecordCount(store)); } [Test] [Category(RevivificationCategory)] [Category(SmokeTestCategory)] +#pragma warning disable IDE0060 // Remove unused parameter (used by setup) public void SimpleMinAddressTakeTest([Values] RevivifiableFraction revivifiableFraction, [Values(UpdateOp.Upsert, UpdateOp.RMW)] UpdateOp updateOp) +#pragma warning restore IDE0060 // Remove unused parameter { Populate(); // This should go to FreeList because it's above the RevivifiableFraction - Assert.IsTrue(bContext.Delete(numRecords - 1).Found); + Assert.IsTrue(bContext.Delete(NumRecords - 1).Found); Assert.AreEqual(1, RevivificationTestUtils.GetFreeRecordCount(store)); RevivificationTestUtils.WaitForRecords(store, want: true); @@ -352,22 +419,18 @@ public void SimpleMinAddressTakeTest([Values] RevivifiableFraction revivifiableF var pool = RevivificationTestUtils.SwapFreeRecordPool(store, default); // Now add a bunch of records to drop the FreeListed address below the RevivifiableFraction - int maxRecord = numRecords * 2; - for (int key = numRecords; key < maxRecord; key++) + int maxRecord = NumRecords * 2; + for (int key = NumRecords; key < maxRecord; key++) { - var status = bContext.Upsert(key, key * valueMult); + var status = bContext.Upsert(key, key * ValueMult); Assert.IsTrue(status.Record.Created, status.ToString()); } // Restore the pool - RevivificationTestUtils.SwapFreeRecordPool(store, pool); + _ = RevivificationTestUtils.SwapFreeRecordPool(store, pool); var tailAddress = store.Log.TailAddress; - - if (updateOp == UpdateOp.Upsert) - bContext.Upsert(maxRecord, maxRecord * valueMult); - else if (updateOp == UpdateOp.RMW) - bContext.RMW(maxRecord, maxRecord * valueMult); + _ = updateOp == UpdateOp.Upsert ? bContext.Upsert(maxRecord, maxRecord * ValueMult) : bContext.RMW(maxRecord, maxRecord * ValueMult); Assert.Less(tailAddress, store.Log.TailAddress, "Expected tail address to grow (record was not revivified)"); } @@ -385,10 +448,10 @@ class RevivificationSpanByteTests internal class RevivificationSpanByteFunctions : SpanByteFunctions { - private readonly TsavoriteKV store; + private readonly TsavoriteKV> store; // Must be set after session is created - internal ClientSession session; + internal ClientSession> session; internal int expectedConcurrentDestLength = InitialLength; internal int expectedSingleDestLength = InitialLength; @@ -402,7 +465,7 @@ internal class RevivificationSpanByteFunctions : SpanByteFunctions internal bool readCcCalled, rmwCcCalled; - internal RevivificationSpanByteFunctions(TsavoriteKV store) + internal RevivificationSpanByteFunctions(TsavoriteKV> store) { this.store = store; } @@ -459,15 +522,15 @@ public override bool InitialUpdater(ref SpanByte key, ref SpanByte input, ref Sp if (value.Length == 0) { - Assert.AreEqual(expectedUsedValueLength, rmwInfo.UsedValueLength); // for the length header - Assert.AreEqual(SpanByteAllocator.kRecordAlignment, rmwInfo.FullValueLength); // This should be the "added record for Delete" case, so a "default" value + Assert.AreEqual(expectedUsedValueLength, rmwInfo.UsedValueLength); // for the length header + Assert.AreEqual(Constants.kRecordAlignment, rmwInfo.FullValueLength); // This should be the "added record for Delete" case, so a "default" value } else { Assert.AreEqual(expectedSingleDestLength, value.Length); Assert.AreEqual(expectedSingleFullValueLength, rmwInfo.FullValueLength); Assert.AreEqual(expectedUsedValueLength, rmwInfo.UsedValueLength); - Assert.GreaterOrEqual(rmwInfo.Address, store.hlog.ReadOnlyAddress); + Assert.GreaterOrEqual(rmwInfo.Address, store.hlogBase.ReadOnlyAddress); } return base.InitialUpdater(ref key, ref input, ref value, ref output, ref rmwInfo, ref recordInfo); } @@ -481,15 +544,15 @@ public override bool CopyUpdater(ref SpanByte key, ref SpanByte input, ref SpanB if (newValue.Length == 0) { - Assert.AreEqual(sizeof(int), rmwInfo.UsedValueLength); // for the length header - Assert.AreEqual(SpanByteAllocator.kRecordAlignment, rmwInfo.FullValueLength); // This should be the "added record for Delete" case, so a "default" value + Assert.AreEqual(sizeof(int), rmwInfo.UsedValueLength); // for the length header + Assert.AreEqual(Constants.kRecordAlignment, rmwInfo.FullValueLength); // This should be the "added record for Delete" case, so a "default" value } else { Assert.AreEqual(expectedSingleDestLength, newValue.Length); Assert.AreEqual(expectedSingleFullValueLength, rmwInfo.FullValueLength); Assert.AreEqual(expectedUsedValueLength, rmwInfo.UsedValueLength); - Assert.GreaterOrEqual(rmwInfo.Address, store.hlog.ReadOnlyAddress); + Assert.GreaterOrEqual(rmwInfo.Address, store.hlogBase.ReadOnlyAddress); } return base.CopyUpdater(ref key, ref input, ref oldValue, ref newValue, ref output, ref rmwInfo, ref recordInfo); } @@ -506,7 +569,7 @@ public override bool InPlaceUpdater(ref SpanByte key, ref SpanByte input, ref Sp var expectedUsedValueLength = expectedUsedValueLengths.Dequeue(); Assert.AreEqual(expectedUsedValueLength, rmwInfo.UsedValueLength); - Assert.GreaterOrEqual(rmwInfo.Address, store.hlog.ReadOnlyAddress); + Assert.GreaterOrEqual(rmwInfo.Address, store.hlogBase.ReadOnlyAddress); return base.InPlaceUpdater(ref key, ref input, ref value, ref output, ref rmwInfo, ref recordInfo); } @@ -523,7 +586,7 @@ public override bool SingleDeleter(ref SpanByte key, ref SpanByte value, ref Del var expectedUsedValueLength = expectedUsedValueLengths.Dequeue(); Assert.AreEqual(expectedUsedValueLength, deleteInfo.UsedValueLength); - Assert.GreaterOrEqual(deleteInfo.Address, store.hlog.ReadOnlyAddress); + Assert.GreaterOrEqual(deleteInfo.Address, store.hlogBase.ReadOnlyAddress); return base.SingleDeleter(ref key, ref value, ref deleteInfo, ref recordInfo); } @@ -537,7 +600,7 @@ public override bool ConcurrentDeleter(ref SpanByte key, ref SpanByte value, ref var expectedUsedValueLength = expectedUsedValueLengths.Dequeue(); Assert.AreEqual(expectedUsedValueLength, deleteInfo.UsedValueLength); - Assert.GreaterOrEqual(deleteInfo.Address, store.hlog.ReadOnlyAddress); + Assert.GreaterOrEqual(deleteInfo.Address, store.hlogBase.ReadOnlyAddress); return base.ConcurrentDeleter(ref key, ref value, ref deleteInfo, ref recordInfo); } @@ -583,20 +646,20 @@ public override void RMWCompletionCallback(ref SpanByte key, ref SpanByte input, static int RoundUpSpanByteFullValueLength(int dataLength) => RoundupTotalSizeFullValue(sizeof(int) + dataLength); - internal static int RoundupTotalSizeFullValue(int length) => (length + SpanByteAllocator.kRecordAlignment - 1) & (~(SpanByteAllocator.kRecordAlignment - 1)); + internal static int RoundupTotalSizeFullValue(int length) => (length + Constants.kRecordAlignment - 1) & (~(Constants.kRecordAlignment - 1)); static int RoundUpSpanByteUsedLength(int dataLength) => RoundUp(SpanByteTotalSize(dataLength), sizeof(int)); static int SpanByteTotalSize(int dataLength) => sizeof(int) + dataLength; - const int numRecords = 200; + const int NumRecords = 200; RevivificationSpanByteFunctions functions; RevivificationSpanByteComparer comparer; - private TsavoriteKV store; - private ClientSession session; - private BasicContext bContext; + private TsavoriteKV> store; + private ClientSession> session; + private BasicContext> bContext; private IDevice log; [SetUp] @@ -606,8 +669,16 @@ public void Setup() log = Devices.CreateLogDevice(Path.Combine(MethodTestDir, "test.log"), deleteOnClose: true); CollisionRange collisionRange = CollisionRange.None; - LogSettings logSettings = new() { LogDevice = log, ObjectLogDevice = null, PageSizeBits = 17, MemorySizeBits = 20 }; - var revivificationSettings = RevivificationSettings.PowerOf2Bins; + + var kvSettings = new KVSettings() + { + IndexSize = 1L << 24, + LogDevice = log, + PageSize = 1L << 17, + MemorySize = 1L << 20, + RevivificationSettings = RevivificationSettings.PowerOf2Bins + }; + foreach (var arg in TestContext.CurrentContext.Test.Arguments) { if (arg is CollisionRange cr) @@ -617,19 +688,22 @@ public void Setup() } if (arg is PendingOp) { - logSettings.ReadCopyOptions = new(ReadCopyFrom.Device, ReadCopyTo.MainLog); + kvSettings.ReadCopyOptions = new(ReadCopyFrom.Device, ReadCopyTo.MainLog); continue; } if (arg is RevivificationEnabled revivEnabled) { if (revivEnabled == RevivificationEnabled.NoReviv) - revivificationSettings = default; + kvSettings.RevivificationSettings = default; continue; } } comparer = new RevivificationSpanByteComparer(collisionRange); - store = new TsavoriteKV(1L << 16, logSettings, comparer: comparer, revivificationSettings: revivificationSettings); + store = new(kvSettings + , StoreFunctions.Create(comparer, SpanByteRecordDisposer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); functions = new RevivificationSpanByteFunctions(store); session = store.NewSession(functions); @@ -650,7 +724,7 @@ public void TearDown() DeleteDirectory(MethodTestDir); } - void Populate() => Populate(0, numRecords); + void Populate() => Populate(0, NumRecords); void Populate(int from, int to) { @@ -714,11 +788,8 @@ public void SpanByteNoRevivLengthTest([Values(UpdateOp.Upsert, UpdateOp.RMW)] Up functions.expectedUsedValueLengths.Enqueue(sizeof(int) + GrowLength); SpanByteAndMemory output = new(); + _ = updateOp == UpdateOp.Upsert ? bContext.Upsert(ref key, ref input, ref input, ref output) : bContext.RMW(ref key, ref input); - if (updateOp == UpdateOp.Upsert) - bContext.Upsert(ref key, ref input, ref input, ref output); - else if (updateOp == UpdateOp.RMW) - bContext.RMW(ref key, ref input); Assert.IsEmpty(functions.expectedUsedValueLengths); if (growth == Growth.Shrink) @@ -736,10 +807,7 @@ public void SpanByteNoRevivLengthTest([Values(UpdateOp.Upsert, UpdateOp.RMW)] Up functions.expectedSingleFullValueLength = RoundUpSpanByteFullValueLength(functions.expectedInputLength); functions.expectedUsedValueLengths.Enqueue(input.TotalSize); - if (updateOp == UpdateOp.Upsert) - bContext.Upsert(ref key, ref input, ref input, ref output); - else if (updateOp == UpdateOp.RMW) - bContext.RMW(ref key, ref input); + _ = updateOp == UpdateOp.Upsert ? bContext.Upsert(ref key, ref input, ref input, ref output) : bContext.RMW(ref key, ref input); Assert.IsEmpty(functions.expectedUsedValueLengths); } } @@ -778,10 +846,7 @@ public void SpanByteSimpleTest([Values(UpdateOp.Upsert, UpdateOp.RMW)] UpdateOp RevivificationTestUtils.WaitForRecords(store, want: true); - if (updateOp == UpdateOp.Upsert) - bContext.Upsert(ref key, ref input, ref input, ref output); - else if (updateOp == UpdateOp.RMW) - bContext.RMW(ref key, ref input); + _ = updateOp == UpdateOp.Upsert ? bContext.Upsert(ref key, ref input, ref input, ref output) : bContext.RMW(ref key, ref input); Assert.AreEqual(tailAddress, store.Log.TailAddress); } @@ -835,7 +900,7 @@ public void SpanByteIPUGrowAndRevivifyTest([Values(UpdateOp.Upsert, UpdateOp.RMW RevivificationTestUtils.WaitForRecords(store, want: true); // Get a new key and shrink the requested length so we revivify the free record from the failed IPU. - keyVec.Fill(numRecords + 1); + keyVec.Fill(NumRecords + 1); input = SpanByte.FromPinnedSpan(inputVec.Slice(0, InitialLength)); functions.expectedInputLength = InitialLength; @@ -893,30 +958,25 @@ public void SpanByteReadOnlyMinAddressTest([Values(UpdateOp.Upsert, UpdateOp.RMW functions.expectedSingleFullValueLength = functions.expectedConcurrentFullValueLength = RoundUpSpanByteFullValueLength(input); functions.expectedUsedValueLengths.Enqueue(SpanByteTotalSize(InitialLength)); - if (updateOp == UpdateOp.Upsert) - bContext.Upsert(ref key, ref input, ref input, ref output); - else if (updateOp == UpdateOp.RMW) - bContext.RMW(ref key, ref input); + _ = updateOp == UpdateOp.Upsert ? bContext.Upsert(ref key, ref input, ref input, ref output) : bContext.RMW(ref key, ref input); Assert.Greater(store.Log.TailAddress, tailAddress); } public enum UpdateKey { Unfound, DeletedAboveRO, DeletedBelowRO, CopiedBelowRO }; - const byte unfound = numRecords + 2; - const byte delBelowRO = numRecords / 2 - 4; - const byte copiedBelowRO = numRecords / 2 - 5; + const byte Unfound = NumRecords + 2; + const byte DelBelowRO = NumRecords / 2 - 4; + const byte CopiedBelowRO = NumRecords / 2 - 5; private long PrepareDeletes(bool stayInChain, byte delAboveRO, FlushMode flushMode, CollisionRange collisionRange) { - Populate(0, numRecords / 2); + Populate(0, NumRecords / 2); - FreeRecordPool pool = default; - if (stayInChain) - pool = RevivificationTestUtils.SwapFreeRecordPool(store, pool); + var pool = stayInChain ? RevivificationTestUtils.SwapFreeRecordPool(store, null) : null; // Delete key below (what will be) the readonly line. This is for a target for the test; the record should not be revivified. Span keyVecDelBelowRO = stackalloc byte[KeyLength]; - keyVecDelBelowRO.Fill(delBelowRO); + keyVecDelBelowRO.Fill(DelBelowRO); var delKeyBelowRO = SpanByte.FromPinnedSpan(keyVecDelBelowRO); functions.expectedUsedValueLengths.Enqueue(SpanByteTotalSize(InitialLength)); @@ -928,7 +988,7 @@ private long PrepareDeletes(bool stayInChain, byte delAboveRO, FlushMode flushMo else if (flushMode == FlushMode.OnDisk) store.Log.FlushAndEvict(wait: true); - Populate(numRecords / 2 + 1, numRecords); + Populate(NumRecords / 2 + 1, NumRecords); var tailAddress = store.Log.TailAddress; @@ -976,7 +1036,7 @@ public void SpanByteUpdateRevivifyTest([Values] DeleteDest deleteDest, [Values] bool stayInChain = deleteDest == DeleteDest.InChain || collisionRange != CollisionRange.None; // Collisions make the key inelidable - byte delAboveRO = (byte)(numRecords - (stayInChain + byte delAboveRO = (byte)(NumRecords - (stayInChain ? (int)CollisionRange.Ten + 3 // Will remain in chain : 2)); // Will be sent to free list @@ -991,13 +1051,13 @@ public void SpanByteUpdateRevivifyTest([Values] DeleteDest deleteDest, [Values] var keyToTest = SpanByte.FromPinnedSpan(keyVecToTest); bool expectReviv; - if (updateKey == UpdateKey.Unfound || updateKey == UpdateKey.CopiedBelowRO) + if (updateKey is UpdateKey.Unfound or UpdateKey.CopiedBelowRO) { // Unfound key should be satisfied from the freelist if !stayInChain, else will allocate a new record as it does not match the key chain. // CopiedBelowRO should be satisfied from the freelist if !stayInChain, else will allocate a new record as it does not match the key chain // (but exercises a different code path than Unfound). // CollisionRange.Ten has a valid PreviousAddress so it is not elided from the cache. - byte fillByte = updateKey == UpdateKey.Unfound ? unfound : copiedBelowRO; + byte fillByte = updateKey == UpdateKey.Unfound ? Unfound : CopiedBelowRO; keyVecToTest.Fill(fillByte); inputVec.Fill(fillByte); expectReviv = !stayInChain && collisionRange != CollisionRange.Ten; @@ -1006,7 +1066,7 @@ public void SpanByteUpdateRevivifyTest([Values] DeleteDest deleteDest, [Values] { // DeletedBelowRO will not match the key for the in-chain above-RO slot, and we cannot reviv below RO or retrieve below-RO from the // freelist, so we will always allocate a new record unless we're using the freelist. - byte fillByte = delBelowRO; + byte fillByte = DelBelowRO; keyVecToTest.Fill(fillByte); inputVec.Fill(fillByte); expectReviv = !stayInChain && collisionRange != CollisionRange.Ten; @@ -1032,10 +1092,7 @@ public void SpanByteUpdateRevivifyTest([Values] DeleteDest deleteDest, [Values] functions.expectedSingleFullValueLength = functions.expectedConcurrentFullValueLength = RoundUpSpanByteFullValueLength(input); functions.expectedUsedValueLengths.Enqueue(SpanByteTotalSize(InitialLength)); - if (updateOp == UpdateOp.Upsert) - bContext.Upsert(ref keyToTest, ref input, ref input, ref output); - else if (updateOp == UpdateOp.RMW) - bContext.RMW(ref keyToTest, ref input); + _ = updateOp == UpdateOp.Upsert ? bContext.Upsert(ref keyToTest, ref input, ref input, ref output) : bContext.RMW(ref keyToTest, ref input); if (expectReviv) Assert.AreEqual(tailAddress, store.Log.TailAddress); @@ -1055,7 +1112,7 @@ public void SimpleRevivifyTest([Values] DeleteDest deleteDest, [Values(UpdateOp. _ = RevivificationTestUtils.SwapFreeRecordPool(store, default); // This freed record stays in the hash chain. - byte chainKey = numRecords / 2 - 1; + byte chainKey = NumRecords / 2 - 1; Span keyVec = stackalloc byte[KeyLength]; keyVec.Fill(chainKey); var key = SpanByte.FromPinnedSpan(keyVec); @@ -1079,10 +1136,7 @@ public void SimpleRevivifyTest([Values] DeleteDest deleteDest, [Values(UpdateOp. // Revivify in the chain. Because this stays in the chain, the expectedFullValueLength is roundup(InitialLength) functions.expectedUsedValueLengths.Enqueue(SpanByteTotalSize(InitialLength)); - if (updateOp == UpdateOp.Upsert) - bContext.Upsert(ref key, ref input, ref input, ref output); - else if (updateOp == UpdateOp.RMW) - bContext.RMW(ref key, ref input); + _ = updateOp == UpdateOp.Upsert ? bContext.Upsert(ref key, ref input, ref input, ref output) : bContext.RMW(ref key, ref input); Assert.AreEqual(tailAddress, store.Log.TailAddress); } @@ -1101,8 +1155,8 @@ public void DeleteEntireChainAndRevivifyTest([Values(CollisionRange.Ten)] Collis var key = SpanByte.FromPinnedSpan(keyVec); var hash = comparer.GetHashCode64(ref key); - List deletedSlots = new(); - for (int ii = chainKey + 1; ii < numRecords; ++ii) + List deletedSlots = []; + for (int ii = chainKey + 1; ii < NumRecords; ++ii) { keyVec.Fill((byte)ii); if (comparer.GetHashCode64(ref key) != hash) @@ -1111,12 +1165,12 @@ public void DeleteEntireChainAndRevivifyTest([Values(CollisionRange.Ten)] Collis functions.expectedUsedValueLengths.Enqueue(SpanByteTotalSize(InitialLength)); var status = bContext.Delete(ref key); Assert.IsTrue(status.Found, status.ToString()); - if (ii > RevivificationTestUtils.GetMinRevivifiableKey(store, numRecords)) + if (ii > RevivificationTestUtils.GetMinRevivifiableKey(store, NumRecords)) deletedSlots.Add((byte)ii); } // For this test we're still limiting to byte repetition - Assert.Greater(255 - numRecords, deletedSlots.Count); + Assert.Greater(255 - NumRecords, deletedSlots.Count); RevivificationTestUtils.WaitForRecords(store, want: false); Assert.IsFalse(RevivificationTestUtils.HasRecords(store), "Expected empty pool"); Assert.Greater(deletedSlots.Count, 5); // should be about Ten @@ -1134,10 +1188,7 @@ public void DeleteEntireChainAndRevivifyTest([Values(CollisionRange.Ten)] Collis keyVec.Fill(deletedSlots[ii]); functions.expectedUsedValueLengths.Enqueue(SpanByteTotalSize(InitialLength)); - if (updateOp == UpdateOp.Upsert) - bContext.Upsert(ref key, ref input, ref input, ref output); - else if (updateOp == UpdateOp.RMW) - bContext.RMW(ref key, ref input); + _ = updateOp == UpdateOp.Upsert ? bContext.Upsert(ref key, ref input, ref input, ref output) : bContext.RMW(ref key, ref input); Assert.AreEqual(tailAddress, store.Log.TailAddress); } } @@ -1158,7 +1209,7 @@ public void DeleteAllRecordsAndRevivifyTest([Values(CollisionRange.None)] Collis var recordSize = RecordInfo.GetLength() + RoundUp(sizeof(int) + keyVec.Length, 8) + RoundUp(sizeof(int) + InitialLength, 8); // Delete - for (var ii = 0; ii < numRecords; ++ii) + for (var ii = 0; ii < NumRecords; ++ii) { keyVec.Fill((byte)ii); @@ -1167,7 +1218,7 @@ public void DeleteAllRecordsAndRevivifyTest([Values(CollisionRange.None)] Collis Assert.IsTrue(status.Found, status.ToString()); } Assert.AreEqual(tailAddress, store.Log.TailAddress); - Assert.AreEqual(RevivificationTestUtils.GetRevivifiableRecordCount(store, numRecords), RevivificationTestUtils.GetFreeRecordCount(store), $"Expected numRecords ({numRecords}) free records"); + Assert.AreEqual(RevivificationTestUtils.GetRevivifiableRecordCount(store, NumRecords), RevivificationTestUtils.GetFreeRecordCount(store), $"Expected numRecords ({NumRecords}) free records"); Span inputVec = stackalloc byte[InitialLength]; var input = SpanByte.FromPinnedSpan(inputVec); @@ -1182,17 +1233,14 @@ public void DeleteAllRecordsAndRevivifyTest([Values(CollisionRange.None)] Collis functions.expectedSingleFullValueLength = functions.expectedConcurrentFullValueLength = RoundUpSpanByteFullValueLength(InitialLength); // Revivify - var revivifiableKeyCount = RevivificationTestUtils.GetRevivifiableRecordCount(store, numRecords); - for (var ii = 0; ii < numRecords; ++ii) + var revivifiableKeyCount = RevivificationTestUtils.GetRevivifiableRecordCount(store, NumRecords); + for (var ii = 0; ii < NumRecords; ++ii) { keyVec.Fill((byte)ii); inputVec.Fill((byte)ii); functions.expectedUsedValueLengths.Enqueue(SpanByteTotalSize(InitialLength)); - if (updateOp == UpdateOp.Upsert) - bContext.Upsert(ref key, ref input, ref input, ref output); - else if (updateOp == UpdateOp.RMW) - bContext.RMW(ref key, ref input); + _ = updateOp == UpdateOp.Upsert ? bContext.Upsert(ref key, ref input, ref input, ref output) : bContext.RMW(ref key, ref input); if (ii < revivifiableKeyCount) Assert.AreEqual(tailAddress, store.Log.TailAddress, $"unexpected new record for key {ii}"); else @@ -1206,7 +1254,7 @@ public void DeleteAllRecordsAndRevivifyTest([Values(CollisionRange.None)] Collis RevivificationTestUtils.WaitForRecords(store, want: false); // Confirm - for (var ii = 0; ii < numRecords; ++ii) + for (var ii = 0; ii < NumRecords; ++ii) { keyVec.Fill((byte)ii); var status = bContext.Read(ref key, ref output); @@ -1225,7 +1273,7 @@ public void DeleteAllRecordsAndTakeSnapshotTest() var key = SpanByte.FromPinnedSpan(keyVec); // Delete - for (var ii = 0; ii < numRecords; ++ii) + for (var ii = 0; ii < NumRecords; ++ii) { keyVec.Fill((byte)ii); @@ -1233,7 +1281,7 @@ public void DeleteAllRecordsAndTakeSnapshotTest() var status = bContext.Delete(ref key); Assert.IsTrue(status.Found, status.ToString()); } - Assert.AreEqual(RevivificationTestUtils.GetRevivifiableRecordCount(store, numRecords), RevivificationTestUtils.GetFreeRecordCount(store), $"Expected numRecords ({numRecords}) free records"); + Assert.AreEqual(RevivificationTestUtils.GetRevivifiableRecordCount(store, NumRecords), RevivificationTestUtils.GetFreeRecordCount(store), $"Expected numRecords ({NumRecords}) free records"); _ = store.TakeHybridLogCheckpointAsync(CheckpointType.Snapshot).GetAwaiter().GetResult(); } @@ -1249,7 +1297,7 @@ public void DeleteAllRecordsAndIterateTest() var key = SpanByte.FromPinnedSpan(keyVec); // Delete - for (var ii = 0; ii < numRecords; ++ii) + for (var ii = 0; ii < NumRecords; ++ii) { keyVec.Fill((byte)ii); @@ -1259,7 +1307,7 @@ public void DeleteAllRecordsAndIterateTest() var status = bContext.Delete(ref key); Assert.IsTrue(status.Found, status.ToString()); } - Assert.AreEqual(RevivificationTestUtils.GetRevivifiableRecordCount(store, numRecords), RevivificationTestUtils.GetFreeRecordCount(store), $"Expected numRecords ({numRecords}) free records"); + Assert.AreEqual(RevivificationTestUtils.GetRevivifiableRecordCount(store, NumRecords), RevivificationTestUtils.GetFreeRecordCount(store), $"Expected numRecords ({NumRecords}) free records"); using var iterator = session.Iterate(); while (iterator.GetNext(out _)) @@ -1271,7 +1319,7 @@ public void DeleteAllRecordsAndIterateTest() [Category(SmokeTestCategory)] public void BinSelectionTest() { - FreeRecordPool pool = store.RevivificationManager.FreeRecordPool; + var pool = store.RevivificationManager.FreeRecordPool; int expectedBin = 0, recordSize = RevivificationTestUtils.GetMaxRecordSize(pool, expectedBin); while (true) { @@ -1298,7 +1346,7 @@ public void BinSelectionTest() //[Repeat(30)] public unsafe void ArtificialBinWrappingTest() { - FreeRecordPool pool = store.RevivificationManager.FreeRecordPool; + var pool = store.RevivificationManager.FreeRecordPool; if (TestContext.CurrentContext.CurrentRepeatCount > 0) Debug.WriteLine($"*** Current test iteration: {TestContext.CurrentContext.CurrentRepeatCount + 1} ***"); @@ -1339,7 +1387,7 @@ public unsafe void ArtificialBinWrappingTest() revivStats.Reset(); for (var ii = 0; ii < recordCount; ++ii) Assert.IsTrue(RevivificationTestUtils.TryTakeFromBin(pool, binIndex, recordSize, minAddress, store, out _, ref revivStats), $"ArtificialBinWrappingTest: failed to Take at ii == {ii}"); - var statsString = revivStats.Dump(); + _ = revivStats.Dump(); } [Test] @@ -1355,7 +1403,7 @@ public unsafe void LiveBinWrappingTest([Values(UpdateOp.Upsert, UpdateOp.RMW)] U // Note: this test assumes no collisions (every delete goes to the FreeList) - FreeRecordPool pool = store.RevivificationManager.FreeRecordPool; + var pool = store.RevivificationManager.FreeRecordPool; Span keyVec = stackalloc byte[KeyLength]; var key = SpanByte.FromPinnedSpan(keyVec); @@ -1373,7 +1421,7 @@ public unsafe void LiveBinWrappingTest([Values(UpdateOp.Upsert, UpdateOp.RMW)] U // Delete functions.expectedInputLength = InitialLength; - for (var ii = 0; ii < numRecords; ++ii) + for (var ii = 0; ii < NumRecords; ++ii) { keyVec.Fill((byte)ii); inputVec.Fill((byte)ii); @@ -1387,14 +1435,14 @@ public unsafe void LiveBinWrappingTest([Values(UpdateOp.Upsert, UpdateOp.RMW)] U if (deleteDest == DeleteDest.FreeList && waitMode == WaitMode.Wait) { var actualNumRecords = RevivificationTestUtils.GetFreeRecordCount(store); - Assert.AreEqual(RevivificationTestUtils.GetRevivifiableRecordCount(store, numRecords), actualNumRecords, $"mismatched free record count"); + Assert.AreEqual(RevivificationTestUtils.GetRevivifiableRecordCount(store, NumRecords), actualNumRecords, $"mismatched free record count"); } // Revivify functions.expectedInputLength = InitialLength; functions.expectedSingleDestLength = InitialLength; functions.expectedConcurrentDestLength = InitialLength; - for (var ii = 0; ii < numRecords; ++ii) + for (var ii = 0; ii < NumRecords; ++ii) { keyVec.Fill((byte)ii); inputVec.Fill((byte)ii); @@ -1403,15 +1451,12 @@ public unsafe void LiveBinWrappingTest([Values(UpdateOp.Upsert, UpdateOp.RMW)] U long tailAddress = store.Log.TailAddress; SpanByteAndMemory output = new(); - if (updateOp == UpdateOp.Upsert) - bContext.Upsert(ref key, ref input, ref input, ref output); - else if (updateOp == UpdateOp.RMW) - bContext.RMW(ref key, ref input); + _ = updateOp == UpdateOp.Upsert ? bContext.Upsert(ref key, ref input, ref input, ref output) : bContext.RMW(ref key, ref input); output.Memory?.Dispose(); if (deleteDest == DeleteDest.FreeList && waitMode == WaitMode.Wait && tailAddress != store.Log.TailAddress) { - var expectedReviv = ii < RevivificationTestUtils.GetRevivifiableRecordCount(store, numRecords); + var expectedReviv = ii < RevivificationTestUtils.GetRevivifiableRecordCount(store, NumRecords); if (expectedReviv != (tailAddress == store.Log.TailAddress)) { var freeRecs = RevivificationTestUtils.GetFreeRecordCount(store); @@ -1448,7 +1493,7 @@ public void LiveBinWrappingNoRevivTest([Values(UpdateOp.Upsert, UpdateOp.RMW)] U { // Delete functions.expectedInputLength = InitialLength; - for (var ii = 0; ii < numRecords; ++ii) + for (var ii = 0; ii < NumRecords; ++ii) { keyVec.Fill((byte)ii); inputVec.Fill((byte)ii); @@ -1458,7 +1503,7 @@ public void LiveBinWrappingNoRevivTest([Values(UpdateOp.Upsert, UpdateOp.RMW)] U Assert.IsTrue(status.Found, $"{status} for key {ii}, iter {iter}"); } - for (var ii = 0; ii < numRecords; ++ii) + for (var ii = 0; ii < NumRecords; ++ii) { keyVec.Fill((byte)ii); inputVec.Fill((byte)ii); @@ -1466,10 +1511,7 @@ public void LiveBinWrappingNoRevivTest([Values(UpdateOp.Upsert, UpdateOp.RMW)] U functions.expectedUsedValueLengths.Enqueue(SpanByteTotalSize(InitialLength)); SpanByteAndMemory output = new(); - if (updateOp == UpdateOp.Upsert) - bContext.Upsert(ref key, ref input, ref input, ref output); - else if (updateOp == UpdateOp.RMW) - bContext.RMW(ref key, ref input); + _ = updateOp == UpdateOp.Upsert ? bContext.Upsert(ref key, ref input, ref input, ref output) : bContext.RMW(ref key, ref input); output.Memory?.Dispose(); } } @@ -1488,7 +1530,7 @@ public void SimpleOversizeRevivifyTest([Values] DeleteDest deleteDest, [Values(U if (stayInChain) _ = RevivificationTestUtils.SwapFreeRecordPool(store, default); - byte chainKey = numRecords + 1; + byte chainKey = NumRecords + 1; Span keyVec = stackalloc byte[KeyLength]; var key = SpanByte.FromPinnedSpan(keyVec); @@ -1508,10 +1550,7 @@ public void SimpleOversizeRevivifyTest([Values] DeleteDest deleteDest, [Values(U functions.expectedUsedValueLengths.Enqueue(SpanByteTotalSize(OversizeLength)); // Initial insert of the oversize record - if (updateOp == UpdateOp.Upsert) - bContext.Upsert(ref key, ref input, ref input, ref output); - else if (updateOp == UpdateOp.RMW) - bContext.RMW(ref key, ref input); + _ = updateOp == UpdateOp.Upsert ? bContext.Upsert(ref key, ref input, ref input, ref output) : bContext.RMW(ref key, ref input); // Delete it functions.expectedUsedValueLengths.Enqueue(SpanByteTotalSize(OversizeLength)); @@ -1524,10 +1563,7 @@ public void SimpleOversizeRevivifyTest([Values] DeleteDest deleteDest, [Values(U // Revivify in the chain. Because this is oversize, the expectedFullValueLength remains the same functions.expectedUsedValueLengths.Enqueue(SpanByteTotalSize(OversizeLength)); - if (updateOp == UpdateOp.Upsert) - bContext.Upsert(ref key, ref input, ref input, ref output); - else if (updateOp == UpdateOp.RMW) - bContext.RMW(ref key, ref input); + _ = updateOp == UpdateOp.Upsert ? bContext.Upsert(ref key, ref input, ref input, ref output) : bContext.RMW(ref key, ref input); Assert.AreEqual(tailAddress, store.Log.TailAddress); } @@ -1539,13 +1575,13 @@ public enum PendingOp { Read, RMW }; [Category(SmokeTestCategory)] public void SimplePendingOpsRevivifyTest([Values(CollisionRange.None)] CollisionRange collisionRange, [Values] PendingOp pendingOp) { - byte delAboveRO = numRecords - 2; // Will be sent to free list - byte targetRO = numRecords / 2 - 15; + byte delAboveRO = NumRecords - 2; // Will be sent to free list + byte targetRO = NumRecords / 2 - 15; long tailAddress = PrepareDeletes(stayInChain: false, delAboveRO, FlushMode.OnDisk, collisionRange); // We always want freelist for this test. - FreeRecordPool pool = store.RevivificationManager.FreeRecordPool; + var pool = store.RevivificationManager.FreeRecordPool; Assert.IsTrue(RevivificationTestUtils.HasRecords(pool)); SpanByteAndMemory output = new(); @@ -1577,7 +1613,7 @@ public void SimplePendingOpsRevivifyTest([Values(CollisionRange.None)] Collision functions.expectedUsedValueLengths.Enqueue(SpanByteTotalSize(InitialLength)); var status = bContext.Read(ref key, ref inputSlice, ref output); Assert.IsTrue(status.IsPending, status.ToString()); - bContext.CompletePending(wait: true); + _ = bContext.CompletePending(wait: true); Assert.IsTrue(functions.readCcCalled); } else if (pendingOp == PendingOp.RMW) @@ -1590,8 +1626,8 @@ public void SimplePendingOpsRevivifyTest([Values(CollisionRange.None)] Collision functions.expectedUsedValueLengths.Enqueue(SpanByteTotalSize(InitialLength)); - bContext.RMW(ref key, ref input); - bContext.CompletePending(wait: true); + _ = bContext.RMW(ref key, ref input); + _ = bContext.CompletePending(wait: true); Assert.IsTrue(functions.rmwCcCalled); } Assert.AreEqual(tailAddress, store.Log.TailAddress); @@ -1601,13 +1637,13 @@ public void SimplePendingOpsRevivifyTest([Values(CollisionRange.None)] Collision [TestFixture] class RevivificationObjectTests { - const int numRecords = 1000; - internal const int valueMult = 1_000_000; + const int NumRecords = 1000; + internal const int ValueMult = 1_000_000; private MyFunctions functions; - private TsavoriteKV store; - private ClientSession session; - private BasicContext bContext; + private TsavoriteKV store; + private ClientSession session; + private BasicContext bContext; private IDevice log; private IDevice objlog; @@ -1618,11 +1654,18 @@ public void Setup() log = Devices.CreateLogDevice(Path.Combine(MethodTestDir, "test.log"), deleteOnClose: true); objlog = Devices.CreateLogDevice(Path.Combine(MethodTestDir, "test.obj.log"), deleteOnClose: true); - store = new TsavoriteKV - (128, - logSettings: new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MutableFraction = 0.1, MemorySizeBits = 22, PageSizeBits = 12 }, - serializerSettings: new SerializerSettings { keySerializer = () => new MyKeySerializer(), valueSerializer = () => new MyValueSerializer() }, - revivificationSettings: RevivificationSettings.DefaultFixedLength); + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + ObjectLogDevice = objlog, + MutableFraction = 0.1, + MemorySize = 1L << 22, + PageSize = 1L << 12, + RevivificationSettings = RevivificationSettings.DefaultFixedLength + }, StoreFunctions.Create(new MyKey.Comparer(), () => new MyKeySerializer(), () => new MyValueSerializer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); functions = new MyFunctions(); session = store.NewSession(functions); @@ -1646,10 +1689,10 @@ public void TearDown() void Populate() { - for (int key = 0; key < numRecords; key++) + for (int key = 0; key < NumRecords; key++) { var keyObj = new MyKey { key = key }; - var valueObj = new MyValue { value = key + valueMult }; + var valueObj = new MyValue { value = key + ValueMult }; var status = bContext.Upsert(keyObj, valueObj); Assert.IsTrue(status.Record.Created, status.ToString()); } @@ -1662,24 +1705,21 @@ public void SimpleObjectTest([Values] DeleteDest deleteDest, [Values(UpdateOp.Up { Populate(); - var deleteKey = RevivificationTestUtils.GetMinRevivifiableKey(store, numRecords); + var deleteKey = RevivificationTestUtils.GetMinRevivifiableKey(store, NumRecords); var tailAddress = store.Log.TailAddress; - bContext.Delete(new MyKey { key = deleteKey }); + _ = bContext.Delete(new MyKey { key = deleteKey }); Assert.AreEqual(tailAddress, store.Log.TailAddress); - var updateKey = deleteDest == DeleteDest.InChain ? deleteKey : numRecords + 1; + var updateKey = deleteDest == DeleteDest.InChain ? deleteKey : NumRecords + 1; var key = new MyKey { key = updateKey }; - var value = new MyValue { value = key.key + valueMult }; + var value = new MyValue { value = key.key + ValueMult }; var input = new MyInput { value = value.value }; RevivificationTestUtils.WaitForRecords(store, want: true); Assert.IsTrue(RevivificationTestUtils.HasRecords(store.RevivificationManager.FreeRecordPool), "Expected a free record after delete and WaitForRecords"); - if (updateOp == UpdateOp.Upsert) - bContext.Upsert(key, value); - else if (updateOp == UpdateOp.RMW) - bContext.RMW(key, input); + _ = updateOp == UpdateOp.Upsert ? bContext.Upsert(key, value) : bContext.RMW(key, input); RevivificationTestUtils.WaitForRecords(store, want: false); Assert.AreEqual(tailAddress, store.Log.TailAddress, "Expected tail address not to grow (record was revivified)"); @@ -1694,11 +1734,11 @@ class RevivificationSpanByteStressTests internal class RevivificationStressFunctions : SpanByteFunctions { - internal ITsavoriteEqualityComparer keyComparer; // non-null if we are doing key comparisons (and thus expectedKey is non-default) + internal IKeyComparer keyComparer; // non-null if we are doing key comparisons (and thus expectedKey is non-default) internal SpanByte expectedKey = default; // Set for each operation by the calling thread internal bool isFirstLap = true; // For first - internal RevivificationStressFunctions(ITsavoriteEqualityComparer keyComparer) => this.keyComparer = keyComparer; + internal RevivificationStressFunctions(IKeyComparer keyComparer) => this.keyComparer = keyComparer; [MethodImpl(MethodImplOptions.AggressiveInlining)] private void VerifyKey(ref SpanByte functionsKey) @@ -1763,15 +1803,15 @@ public override unsafe bool ConcurrentDeleter(ref SpanByte key, ref SpanByte val => base.ConcurrentDeleter(ref key, ref value, ref deleteInfo, ref recordInfo); } - const int numRecords = 200; + const int NumRecords = 200; const int DefaultMaxRecsPerBin = 1024; RevivificationStressFunctions functions; RevivificationSpanByteComparer comparer; - private TsavoriteKV store; - private ClientSession session; - private BasicContext bContext; + private TsavoriteKV> store; + private ClientSession> session; + private BasicContext> bContext; private IDevice log; [SetUp] @@ -1781,7 +1821,6 @@ public void Setup() log = Devices.CreateLogDevice(Path.Combine(MethodTestDir, "test.log"), deleteOnClose: true); CollisionRange collisionRange = CollisionRange.None; - LogSettings logSettings = new() { LogDevice = log, ObjectLogDevice = null, PageSizeBits = 17, MemorySizeBits = 20 }; foreach (var arg in TestContext.CurrentContext.Test.Arguments) { if (arg is CollisionRange cr) @@ -1792,7 +1831,16 @@ public void Setup() } comparer = new RevivificationSpanByteComparer(collisionRange); - store = new TsavoriteKV(1L << 16, logSettings, comparer: comparer, revivificationSettings: RevivificationSettings.PowerOf2Bins); + store = new(new() + { + IndexSize = 1L << 24, + LogDevice = log, + PageSize = 1L << 17, + MemorySize = 1L << 20, + RevivificationSettings = RevivificationSettings.PowerOf2Bins + }, StoreFunctions.Create(comparer, SpanByteRecordDisposer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); functions = new RevivificationStressFunctions(keyComparer: null); session = store.NewSession(functions); @@ -1822,7 +1870,7 @@ unsafe void Populate() SpanByteAndMemory output = new(); - for (int ii = 0; ii < numRecords; ++ii) + for (int ii = 0; ii < NumRecords; ++ii) { keyVec.Fill((byte)ii); inputVec.Fill((byte)ii); @@ -1860,7 +1908,7 @@ public void ArtificialFreeBinThreadStressTest(int numIterations, int numAddThrea NumberOfRecords = maxRecords }; var flags = new long[maxRecords]; - List strayFlags = new(0), strayRecords = new(); + List strayFlags = [], strayRecords = []; using var freeRecordPool = RevivificationTestUtils.CreateSingleBinFreeRecordPool(store, binDef); @@ -1927,13 +1975,13 @@ void runTakeThread(int tid) var addressBase = address - AddressIncrement; var prevFlag = Interlocked.CompareExchange(ref flags[addressBase], RemovedBase + tid, Added); Assert.AreEqual(1, prevFlag, $"Take() found unexpected addressBase {addressBase} (flag {prevFlag}), tid {tid}, iteration {iteration}"); - Interlocked.Increment(ref totalTaken); + _ = Interlocked.Increment(ref totalTaken); } } } // Task rather than Thread for propagation of exception. - List tasks = new(); + List tasks = []; // Make iteration 1-based to make the termination check easier in the threadprocs for (iteration = 1; iteration <= numIterations; ++iteration) @@ -1955,7 +2003,7 @@ void runTakeThread(int tid) try { var timeoutSec = 5; // 5s per iteration should be plenty - Assert.IsTrue(Task.WaitAll(tasks.ToArray(), TimeSpan.FromSeconds(timeoutSec)), $"Task timeout at {timeoutSec} sec, maxRec/taken {maxRecords}/{totalTaken}, iteration {iteration}"); + Assert.IsTrue(Task.WaitAll([.. tasks], TimeSpan.FromSeconds(timeoutSec)), $"Task timeout at {timeoutSec} sec, maxRec/taken {maxRecords}/{totalTaken}, iteration {iteration}"); endIteration(); } finally @@ -1993,13 +2041,13 @@ public unsafe void ArtificialSimpleTest() Assert.AreEqual(AddressIncrement + 1, address, "out address"); Assert.AreEqual(1, revivStats.successfulAdds, "Successful Adds"); Assert.AreEqual(1, revivStats.successfulTakes, "Successful Takes"); - var statsString = revivStats.Dump(); + _ = revivStats.Dump(); } public enum WrapMode { Wrap, NoWrap }; const int TakeSize = 40; - private FreeRecordPool CreateBestFitTestPool(int scanLimit, WrapMode wrapMode, ref RevivificationStats revivStats) + private FreeRecordPool> CreateBestFitTestPool(int scanLimit, WrapMode wrapMode, ref RevivificationStats revivStats) { var binDef = new RevivificationBin() { @@ -2136,13 +2184,13 @@ unsafe void runThread(int tid) } } - List tasks = new(); // Task rather than Thread for propagation of exception. + List tasks = []; // Task rather than Thread for propagation of exception. for (int t = 0; t < 8; t++) { var tid = t + 1; tasks.Add(Task.Factory.StartNew(() => runThread(tid))); } - Task.WaitAll(tasks.ToArray()); + Task.WaitAll([.. tasks]); Assert.IsTrue(counter == 0); } @@ -2171,11 +2219,11 @@ unsafe void runDeleteThread(int tid) for (var iteration = 0; iteration < numIterations; ++iteration) { - for (var ii = tid; ii < numRecords; ii += numDeleteThreads) + for (var ii = tid; ii < NumRecords; ii += numDeleteThreads) { var kk = rng.Next(keyRange); keyVec.Fill((byte)kk); - localbContext.Delete(key); + _ = localbContext.Delete(key); } } } @@ -2190,34 +2238,31 @@ unsafe void runUpdateThread(int tid) Random rng = new(tid * 101); - RevivificationStressFunctions localFunctions = new(keyComparer: store.comparer); + RevivificationStressFunctions localFunctions = new(keyComparer: comparer); using var localSession = store.NewSession(localFunctions); var localbContext = localSession.BasicContext; for (var iteration = 0; iteration < numIterations; ++iteration) { - for (var ii = tid; ii < numRecords; ii += numUpdateThreads) + for (var ii = tid; ii < NumRecords; ii += numUpdateThreads) { var kk = rng.Next(keyRange); keyVec.Fill((byte)kk); inputVec.Fill((byte)kk); localSession.functions.expectedKey = key; - if (updateOp == UpdateOp.Upsert) - localbContext.Upsert(key, input); - else - localbContext.RMW(key, input); + _ = updateOp == UpdateOp.Upsert ? localbContext.Upsert(key, input) : localbContext.RMW(key, input); localSession.functions.expectedKey = default; } // Clear keyComparer so it does not try to validate during CompletePending (when it doesn't have an expectedKey) localFunctions.keyComparer = null; - localbContext.CompletePending(wait: true); - localFunctions.keyComparer = store.comparer; + _ = localbContext.CompletePending(wait: true); + localFunctions.keyComparer = comparer; } } - List tasks = new(); // Task rather than Thread for propagation of exception. + List tasks = []; // Task rather than Thread for propagation of exception. for (int t = 0; t < numDeleteThreads; t++) { var tid = t + 1; @@ -2228,7 +2273,7 @@ unsafe void runUpdateThread(int tid) var tid = t + 1; tasks.Add(Task.Factory.StartNew(() => runUpdateThread(tid))); } - Task.WaitAll(tasks.ToArray()); + Task.WaitAll([.. tasks]); } public enum ThreadingPattern { SameKeys, RandomKeys }; @@ -2257,11 +2302,11 @@ unsafe void runDeleteThread(int tid) for (var iteration = 0; iteration < numIterations; ++iteration) { - for (var ii = tid; ii < numRecords; ii += numDeleteThreads) + for (var ii = tid; ii < NumRecords; ii += numDeleteThreads) { - var kk = threadingPattern == ThreadingPattern.RandomKeys ? rng.Next(numRecords) : ii; + var kk = threadingPattern == ThreadingPattern.RandomKeys ? rng.Next(NumRecords) : ii; keyVec.Fill((byte)kk); - localbContext.Delete(key); + _ = localbContext.Delete(key); } } } @@ -2276,34 +2321,31 @@ unsafe void runUpdateThread(int tid) Random rng = new(tid * 101); - RevivificationStressFunctions localFunctions = new(keyComparer: store.comparer); + RevivificationStressFunctions localFunctions = new(keyComparer: comparer); using var localSession = store.NewSession(localFunctions); var localbContext = localSession.BasicContext; for (var iteration = 0; iteration < numIterations; ++iteration) { - for (var ii = tid; ii < numRecords; ii += numUpdateThreads) + for (var ii = tid; ii < NumRecords; ii += numUpdateThreads) { - var kk = threadingPattern == ThreadingPattern.RandomKeys ? rng.Next(numRecords) : ii; + var kk = threadingPattern == ThreadingPattern.RandomKeys ? rng.Next(NumRecords) : ii; keyVec.Fill((byte)kk); inputVec.Fill((byte)kk); localSession.functions.expectedKey = key; - if (updateOp == UpdateOp.Upsert) - localbContext.Upsert(key, input); - else - localbContext.RMW(key, input); + _ = updateOp == UpdateOp.Upsert ? localbContext.Upsert(key, input) : localbContext.RMW(key, input); localSession.functions.expectedKey = default; } // Clear keyComparer so it does not try to validate during CompletePending (when it doesn't have an expectedKey) localFunctions.keyComparer = null; - localbContext.CompletePending(wait: true); - localFunctions.keyComparer = store.comparer; + _ = localbContext.CompletePending(wait: true); + localFunctions.keyComparer = comparer; } } - List tasks = new(); // Task rather than Thread for propagation of exception. + List tasks = []; // Task rather than Thread for propagation of exception. for (int t = 0; t < numDeleteThreads; t++) { var tid = t + 1; @@ -2314,7 +2356,7 @@ unsafe void runUpdateThread(int tid) var tid = t + 1; tasks.Add(Task.Factory.StartNew(() => runUpdateThread(tid))); } - Task.WaitAll(tasks.ToArray()); + Task.WaitAll([.. tasks]); } [Test] @@ -2341,10 +2383,10 @@ unsafe void runDeleteThread(int tid) for (var iteration = 0; iteration < numIterations; ++iteration) { - for (var ii = tid; ii < numRecords; ii += numDeleteThreads) + for (var ii = tid; ii < NumRecords; ii += numDeleteThreads) { keyVec.Fill((byte)ii); - localbContext.Delete(key); + _ = localbContext.Delete(key); } } } @@ -2363,27 +2405,24 @@ unsafe void runUpdateThread(int tid) for (var iteration = 0; iteration < numIterations; ++iteration) { - for (var ii = tid; ii < numRecords; ii += numUpdateThreads) + for (var ii = tid; ii < NumRecords; ii += numUpdateThreads) { keyVec.Fill((byte)ii); inputVec.Fill((byte)ii); localSession.functions.expectedKey = key; - if (updateOp == UpdateOp.Upsert) - localbContext.Upsert(key, input); - else - localbContext.RMW(key, input); + _ = updateOp == UpdateOp.Upsert ? localbContext.Upsert(key, input) : localbContext.RMW(key, input); localSession.functions.expectedKey = default; } // Clear keyComparer so it does not try to validate during CompletePending (when it doesn't have an expectedKey) localFunctions.keyComparer = null; - localbContext.CompletePending(wait: true); - localFunctions.keyComparer = store.comparer; + _ = localbContext.CompletePending(wait: true); + localFunctions.keyComparer = comparer; } } - List tasks = new(); // Task rather than Thread for propagation of exception. + List tasks = []; // Task rather than Thread for propagation of exception. for (int t = 0; t < numDeleteThreads; t++) { var tid = t + 1; @@ -2394,7 +2433,7 @@ unsafe void runUpdateThread(int tid) var tid = t + 1; tasks.Add(Task.Factory.StartNew(() => runUpdateThread(tid))); } - Task.WaitAll(tasks.ToArray()); + Task.WaitAll([.. tasks]); } } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/test/SessionTests.cs b/libs/storage/Tsavorite/cs/test/SessionTests.cs index f3b6dd3d3d..4577f74977 100644 --- a/libs/storage/Tsavorite/cs/test/SessionTests.cs +++ b/libs/storage/Tsavorite/cs/test/SessionTests.cs @@ -9,10 +9,13 @@ namespace Tsavorite.test.Session { + using StructAllocator = BlittableAllocator>>; + using StructStoreFunctions = StoreFunctions>; + [TestFixture] internal class SessionTests { - private TsavoriteKV store; + private TsavoriteKV store; private IDevice log; [SetUp] @@ -20,8 +23,14 @@ public void Setup() { DeleteDirectory(MethodTestDir, wait: true); log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "hlog1.log"), deleteOnClose: true); - store = new TsavoriteKV - (128, new LogSettings { LogDevice = log, MemorySizeBits = 29 }); + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MemorySize = 1L << 29, + }, StoreFunctions.Create(new KeyStruct.Comparer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -48,12 +57,12 @@ public void SessionTest1() var key1 = new KeyStruct { kfield1 = 13, kfield2 = 14 }; var value = new ValueStruct { vfield1 = 23, vfield2 = 24 }; - bContext.Upsert(ref key1, ref value, Empty.Default); + _ = bContext.Upsert(ref key1, ref value, Empty.Default); var status = bContext.Read(ref key1, ref input, ref output, Empty.Default); if (status.IsPending) { - bContext.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var outputs, wait: true); (status, output) = GetSinglePendingResult(outputs); } @@ -78,14 +87,14 @@ public void SessionTest2() var key2 = new KeyStruct { kfield1 = 15, kfield2 = 16 }; var value2 = new ValueStruct { vfield1 = 25, vfield2 = 26 }; - bContext1.Upsert(ref key1, ref value1, Empty.Default); - bContext2.Upsert(ref key2, ref value2, Empty.Default); + _ = bContext1.Upsert(ref key1, ref value1, Empty.Default); + _ = bContext2.Upsert(ref key2, ref value2, Empty.Default); var status = bContext1.Read(ref key1, ref input, ref output, Empty.Default); if (status.IsPending) { - bContext1.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext1.CompletePendingWithOutputs(out var outputs, wait: true); (status, output) = GetSinglePendingResult(outputs); } @@ -97,7 +106,7 @@ public void SessionTest2() if (status.IsPending) { - bContext2.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext2.CompletePendingWithOutputs(out var outputs, wait: true); (status, output) = GetSinglePendingResult(outputs); } @@ -121,12 +130,12 @@ public void SessionTest3() var key1 = new KeyStruct { kfield1 = 13, kfield2 = 14 }; var value = new ValueStruct { vfield1 = 23, vfield2 = 24 }; - bContext.Upsert(ref key1, ref value, Empty.Default); + _ = bContext.Upsert(ref key1, ref value, Empty.Default); var status = bContext.Read(ref key1, ref input, ref output, Empty.Default); if (status.IsPending) { - bContext.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var outputs, wait: true); (status, output) = GetSinglePendingResult(outputs); } @@ -152,12 +161,12 @@ public void SessionTest4() var key1 = new KeyStruct { kfield1 = 14, kfield2 = 15 }; var value1 = new ValueStruct { vfield1 = 24, vfield2 = 25 }; - bContext1.Upsert(ref key1, ref value1, Empty.Default); + _ = bContext1.Upsert(ref key1, ref value1, Empty.Default); var status = bContext1.Read(ref key1, ref input, ref output, Empty.Default); if (status.IsPending) { - bContext1.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext1.CompletePendingWithOutputs(out var outputs, wait: true); (status, output) = GetSinglePendingResult(outputs); } @@ -174,13 +183,13 @@ public void SessionTest4() var key2 = new KeyStruct { kfield1 = 15, kfield2 = 16 }; var value2 = new ValueStruct { vfield1 = 25, vfield2 = 26 }; - bContext2.Upsert(ref key2, ref value2, Empty.Default); + _ = bContext2.Upsert(ref key2, ref value2, Empty.Default); var status = bContext2.Read(ref key2, ref input, ref output, Empty.Default); if (status.IsPending) { - bContext2.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext2.CompletePendingWithOutputs(out var outputs, wait: true); (status, output) = GetSinglePendingResult(outputs); } @@ -207,12 +216,12 @@ public void SessionTest5() var key1 = new KeyStruct { kfield1 = 16, kfield2 = 17 }; var value1 = new ValueStruct { vfield1 = 26, vfield2 = 27 }; - bContext.Upsert(ref key1, ref value1, Empty.Default); + _ = bContext.Upsert(ref key1, ref value1, Empty.Default); var status = bContext.Read(ref key1, ref input, ref output, Empty.Default); if (status.IsPending) { - bContext.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var outputs, wait: true); (status, output) = GetSinglePendingResult(outputs); } @@ -228,13 +237,13 @@ public void SessionTest5() var key2 = new KeyStruct { kfield1 = 17, kfield2 = 18 }; var value2 = new ValueStruct { vfield1 = 27, vfield2 = 28 }; - bContext.Upsert(ref key2, ref value2, Empty.Default); + _ = bContext.Upsert(ref key2, ref value2, Empty.Default); status = bContext.Read(ref key2, ref input, ref output, Empty.Default); if (status.IsPending) { - bContext.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var outputs, wait: true); (status, output) = GetSinglePendingResult(outputs); } Assert.IsTrue(status.Found); @@ -243,7 +252,7 @@ public void SessionTest5() if (status.IsPending) { - bContext.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var outputs, wait: true); (status, output) = GetSinglePendingResult(outputs); } diff --git a/libs/storage/Tsavorite/cs/test/SharedDirectoryTests.cs b/libs/storage/Tsavorite/cs/test/SharedDirectoryTests.cs index 4b7d6f7483..29d8dcf628 100644 --- a/libs/storage/Tsavorite/cs/test/SharedDirectoryTests.cs +++ b/libs/storage/Tsavorite/cs/test/SharedDirectoryTests.cs @@ -14,13 +14,16 @@ namespace Tsavorite.test.recovery.sumstore { + using StructAllocator = BlittableAllocator>>; + using StructStoreFunctions = StoreFunctions>; + [TestFixture] internal class SharedDirectoryTests { - const long numUniqueKeys = (1 << 5); - const long keySpace = (1L << 5); - const long numOps = (1L << 10); - const long completePendingInterval = (1L << 10); + const long NumUniqueKeys = 1L << 5; + const long KeySpace = 1L << 11; + const long NumOps = 1L << 10; + const long CompletePendingInterval = 1L << 10; private string sharedLogDirectory; TsavoriteTestInstance original; TsavoriteTestInstance clone; @@ -30,7 +33,7 @@ public void Setup() { TestUtils.RecreateDirectory(TestUtils.MethodTestDir); sharedLogDirectory = Path.Join(TestUtils.MethodTestDir, "SharedLogs"); - Directory.CreateDirectory(sharedLogDirectory); + _ = Directory.CreateDirectory(sharedLogDirectory); original = new TsavoriteTestInstance(); clone = new TsavoriteTestInstance(); @@ -51,16 +54,16 @@ public void TearDown() public async ValueTask SharedLogDirectory([Values] bool isAsync) { original.Initialize(Path.Join(TestUtils.MethodTestDir, "OriginalCheckpoint"), sharedLogDirectory); - Assert.IsTrue(IsDirectoryEmpty(sharedLogDirectory)); // sanity check - Populate(original.Store); + Assert.IsTrue(SharedDirectoryTests.IsDirectoryEmpty(sharedLogDirectory)); // sanity check + SharedDirectoryTests.Populate(original.Store); // Take checkpoint from original to start the clone from Assert.IsTrue(original.Store.TryInitiateFullCheckpoint(out var checkpointGuid, CheckpointType.FoldOver)); original.Store.CompleteCheckpointAsync().GetAwaiter().GetResult(); // Sanity check against original - Assert.IsFalse(IsDirectoryEmpty(sharedLogDirectory)); - Test(original, checkpointGuid); + Assert.IsFalse(SharedDirectoryTests.IsDirectoryEmpty(sharedLogDirectory)); + SharedDirectoryTests.Test(original, checkpointGuid); // Copy checkpoint directory var cloneCheckpointDirectory = Path.Join(TestUtils.MethodTestDir, "CloneCheckpoint"); @@ -75,8 +78,8 @@ public async ValueTask SharedLogDirectory([Values] bool isAsync) clone.Store.Recover(checkpointGuid); // Both sessions should work concurrently - Test(original, checkpointGuid); - Test(clone, checkpointGuid); + SharedDirectoryTests.Test(original, checkpointGuid); + SharedDirectoryTests.Test(clone, checkpointGuid); // Dispose original, files should not be deleted on Windows original.TearDown(); @@ -84,21 +87,21 @@ public async ValueTask SharedLogDirectory([Values] bool isAsync) if (RuntimeInformation.IsOSPlatform(System.Runtime.InteropServices.OSPlatform.Windows)) { // Clone should still work on Windows - Assert.IsFalse(IsDirectoryEmpty(sharedLogDirectory)); - Test(clone, checkpointGuid); + Assert.IsFalse(SharedDirectoryTests.IsDirectoryEmpty(sharedLogDirectory)); + SharedDirectoryTests.Test(clone, checkpointGuid); } clone.TearDown(); // Files should be deleted after both instances are closed - Assert.IsTrue(IsDirectoryEmpty(sharedLogDirectory)); + Assert.IsTrue(SharedDirectoryTests.IsDirectoryEmpty(sharedLogDirectory)); } private struct TsavoriteTestInstance { public string CheckpointDirectory { get; private set; } public string LogDirectory { get; private set; } - public TsavoriteKV Store { get; private set; } + public TsavoriteKV Store { get; private set; } public IDevice LogDevice { get; private set; } public void Initialize(string checkpointDirectory, string logDirectory, bool populateLogHandles = false) @@ -140,10 +143,14 @@ public void Initialize(string checkpointDirectory, string logDirectory, bool pop LogDevice = new LocalStorageDevice(deviceFileName, deleteOnClose: true, disableFileBuffering: false, initialLogFileHandles: initialHandles); } - Store = new TsavoriteKV( - keySpace, - new LogSettings { LogDevice = LogDevice }, - new CheckpointSettings { CheckpointDir = CheckpointDirectory }); + Store = new(new() + { + IndexSize = KeySpace, + LogDevice = LogDevice, + CheckpointDir = CheckpointDirectory + }, StoreFunctions.Create(new AdId.Comparer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } public void TearDown() @@ -155,35 +162,35 @@ public void TearDown() } } - private void Populate(TsavoriteKV store) + private static void Populate(TsavoriteKV store) { using var session = store.NewSession(new Functions()); var bContext = session.BasicContext; // Prepare the dataset - var inputArray = new AdInput[numOps]; - for (int i = 0; i < numOps; i++) + var inputArray = new AdInput[NumOps]; + for (int i = 0; i < NumOps; i++) { - inputArray[i].adId.adId = i % numUniqueKeys; + inputArray[i].adId.adId = i % NumUniqueKeys; inputArray[i].numClicks.numClicks = 1; } // Process the batch of input data - for (int i = 0; i < numOps; i++) + for (int i = 0; i < NumOps; i++) { - bContext.RMW(ref inputArray[i].adId, ref inputArray[i], Empty.Default); + _ = bContext.RMW(ref inputArray[i].adId, ref inputArray[i], Empty.Default); - if (i % completePendingInterval == 0) + if (i % CompletePendingInterval == 0) { - bContext.CompletePending(false); + _ = bContext.CompletePending(false); } } // Make sure operations are completed - bContext.CompletePending(true); + _ = bContext.CompletePending(true); } - private void Test(TsavoriteTestInstance tsavoriteInstance, Guid checkpointToken) + private static void Test(TsavoriteTestInstance tsavoriteInstance, Guid checkpointToken) { var checkpointInfo = default(HybridLogRecoveryInfo); checkpointInfo.Recover(checkpointToken, @@ -193,8 +200,8 @@ private void Test(TsavoriteTestInstance tsavoriteInstance, Guid checkpointToken) new DirectoryInfo(tsavoriteInstance.CheckpointDirectory).FullName))); // Create array for reading - var inputArray = new AdInput[numUniqueKeys]; - for (int i = 0; i < numUniqueKeys; i++) + var inputArray = new AdInput[NumUniqueKeys]; + for (int i = 0; i < NumUniqueKeys; i++) { inputArray[i].adId.adId = i; inputArray[i].numClicks.numClicks = 0; @@ -207,7 +214,7 @@ private void Test(TsavoriteTestInstance tsavoriteInstance, Guid checkpointToken) var bContext = session.BasicContext; // Issue read requests - for (var i = 0; i < numUniqueKeys; i++) + for (var i = 0; i < NumUniqueKeys; i++) { var status = bContext.Read(ref inputArray[i].adId, ref input, ref output, Empty.Default); Assert.IsTrue(status.Found); @@ -215,18 +222,18 @@ private void Test(TsavoriteTestInstance tsavoriteInstance, Guid checkpointToken) } // Complete all pending requests - bContext.CompletePending(true); + _ = bContext.CompletePending(true); session.Dispose(); } - private bool IsDirectoryEmpty(string path) => !Directory.Exists(path) || !Directory.EnumerateFileSystemEntries(path).Any(); + private static bool IsDirectoryEmpty(string path) => !Directory.Exists(path) || !Directory.EnumerateFileSystemEntries(path).Any(); private static void CopyDirectory(DirectoryInfo source, DirectoryInfo target) { // Copy each file foreach (var file in source.GetFiles()) { - file.CopyTo(Path.Combine(target.FullName, file.Name), true); + _ = file.CopyTo(Path.Combine(target.FullName, file.Name), true); } // Copy each subdirectory diff --git a/libs/storage/Tsavorite/cs/test/SimpleRecoveryTest.cs b/libs/storage/Tsavorite/cs/test/SimpleRecoveryTest.cs index 3ab15f6af4..7e12384629 100644 --- a/libs/storage/Tsavorite/cs/test/SimpleRecoveryTest.cs +++ b/libs/storage/Tsavorite/cs/test/SimpleRecoveryTest.cs @@ -13,18 +13,21 @@ namespace Tsavorite.test.recovery.sumstore { + using StructAllocator = BlittableAllocator>>; + using StructStoreFunctions = StoreFunctions>; + [TestFixture] class RecoveryTests { - const int numOps = 5000; + const int NumOps = 5000; AdId[] inputArray; private byte[] commitCookie; string checkpointDir; ICheckpointManager checkpointManager; - private TsavoriteKV store1; - private TsavoriteKV store2; + private TsavoriteKV store1; + private TsavoriteKV store2; private IDevice log; @@ -34,8 +37,8 @@ public void Setup() DeleteDirectory(MethodTestDir, wait: true); checkpointManager = default; checkpointDir = default; - inputArray = new AdId[numOps]; - for (int i = 0; i < numOps; i++) + inputArray = new AdId[NumOps]; + for (int i = 0; i < NumOps; i++) inputArray[i].adId = i; } @@ -99,15 +102,29 @@ private async ValueTask SimpleRecoveryTest1_Worker(CheckpointType checkpointType log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "SimpleRecoveryTest1.log"), deleteOnClose: true); - store1 = new TsavoriteKV(128, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 0.1, MemorySizeBits = 29 }, - checkpointSettings: new CheckpointSettings { CheckpointDir = checkpointDir, CheckpointManager = checkpointManager } - ); - - store2 = new TsavoriteKV(128, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 0.1, MemorySizeBits = 29 }, - checkpointSettings: new CheckpointSettings { CheckpointDir = checkpointDir, CheckpointManager = checkpointManager } - ); + store1 = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MutableFraction = 0.1, + MemorySize = 1L << 29, + CheckpointDir = checkpointDir, + CheckpointManager = checkpointManager + }, StoreFunctions.Create(new AdId.Comparer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); + + store2 = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MutableFraction = 0.1, + MemorySize = 1L << 29, + CheckpointDir = checkpointDir, + CheckpointManager = checkpointManager + }, StoreFunctions.Create(new AdId.Comparer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); NumClicks value; AdInput inputArg = default; @@ -116,15 +133,15 @@ private async ValueTask SimpleRecoveryTest1_Worker(CheckpointType checkpointType var session1 = store1.NewSession(new AdSimpleFunctions()); var bContext1 = session1.BasicContext; - for (int key = 0; key < numOps; key++) + for (int key = 0; key < NumOps; key++) { value.numClicks = key; - bContext1.Upsert(ref inputArray[key], ref value, Empty.Default); + _ = bContext1.Upsert(ref inputArray[key], ref value, Empty.Default); } if (testCommitCookie) store1.CommitCookie = commitCookie; - store1.TryInitiateFullCheckpoint(out Guid token, checkpointType); + _ = store1.TryInitiateFullCheckpoint(out Guid token, checkpointType); if (completionSyncMode == CompletionSyncMode.Sync) store1.CompleteCheckpointAsync().AsTask().GetAwaiter().GetResult(); else @@ -132,9 +149,9 @@ private async ValueTask SimpleRecoveryTest1_Worker(CheckpointType checkpointType session1.Dispose(); if (completionSyncMode == CompletionSyncMode.Sync) - store2.Recover(token); + _ = store2.Recover(token); else - await store2.RecoverAsync(token); + _ = await store2.RecoverAsync(token); if (testCommitCookie) Assert.IsTrue(store2.RecoveredCommitCookie.SequenceEqual(commitCookie)); @@ -145,13 +162,13 @@ private async ValueTask SimpleRecoveryTest1_Worker(CheckpointType checkpointType var bContext2 = session2.BasicContext; Assert.AreEqual(1, session2.ID); // This is the first session on the recovered store - for (int key = 0; key < numOps; key++) + for (int key = 0; key < NumOps; key++) { var status = bContext2.Read(ref inputArray[key], ref inputArg, ref output, Empty.Default); if (status.IsPending) { - bContext2.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext2.CompletePendingWithOutputs(out var outputs, wait: true); Assert.IsTrue(outputs.Next()); output = outputs.Current.Output; Assert.IsFalse(outputs.Next()); @@ -171,16 +188,27 @@ public async ValueTask SimpleRecoveryTest2([Values] CheckpointType checkpointTyp checkpointManager = new DeviceLogCommitCheckpointManager(new LocalStorageNamedDeviceFactory(), new DefaultCheckpointNamingScheme(Path.Join(MethodTestDir, "checkpoints4")), false); log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "SimpleRecoveryTest2.log"), deleteOnClose: true); - store1 = new TsavoriteKV(128, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 0.1, MemorySizeBits = 29 }, - checkpointSettings: new CheckpointSettings { CheckpointManager = checkpointManager } - ); - - store2 = new TsavoriteKV(128, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 0.1, MemorySizeBits = 29 }, - checkpointSettings: new CheckpointSettings { CheckpointManager = checkpointManager } - ); - + store1 = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MutableFraction = 0.1, + MemorySize = 1L << 29, + CheckpointManager = checkpointManager + }, StoreFunctions.Create(new AdId.Comparer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); + + store2 = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MutableFraction = 0.1, + MemorySize = 1L << 29, + CheckpointManager = checkpointManager + }, StoreFunctions.Create(new AdId.Comparer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); NumClicks value; AdInput inputArg = default; @@ -189,33 +217,31 @@ public async ValueTask SimpleRecoveryTest2([Values] CheckpointType checkpointTyp var session1 = store1.NewSession(new AdSimpleFunctions()); var bContext1 = session1.BasicContext; - for (int key = 0; key < numOps; key++) + for (int key = 0; key < NumOps; key++) { value.numClicks = key; - bContext1.Upsert(ref inputArray[key], ref value, Empty.Default); + _ = bContext1.Upsert(ref inputArray[key], ref value, Empty.Default); } - store1.TryInitiateFullCheckpoint(out Guid token, checkpointType); + _ = store1.TryInitiateFullCheckpoint(out Guid token, checkpointType); store1.CompleteCheckpointAsync().AsTask().GetAwaiter().GetResult(); session1.Dispose(); if (completionSyncMode == CompletionSyncMode.Sync) - store2.Recover(token); + _ = store2.Recover(token); else - await store2.RecoverAsync(token); + _ = await store2.RecoverAsync(token); var session2 = store2.NewSession(new AdSimpleFunctions()); var bContext2 = session1.BasicContext; - for (int key = 0; key < numOps; key++) + for (int key = 0; key < NumOps; key++) { var status = bContext2.Read(ref inputArray[key], ref inputArg, ref output, Empty.Default); if (status.IsPending) - bContext2.CompletePending(true); + _ = bContext2.CompletePending(true); else - { Assert.AreEqual(key, output.value.numClicks); - } } session2.Dispose(); } @@ -227,15 +253,27 @@ public async ValueTask ShouldRecoverBeginAddress([Values] CompletionSyncMode com log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "SimpleRecoveryTest2.log"), deleteOnClose: true); checkpointDir = Path.Join(MethodTestDir, "checkpoints6"); - store1 = new TsavoriteKV(128, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 0.1, MemorySizeBits = 29 }, - checkpointSettings: new CheckpointSettings { CheckpointDir = checkpointDir } - ); - - store2 = new TsavoriteKV(128, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 0.1, MemorySizeBits = 29 }, - checkpointSettings: new CheckpointSettings { CheckpointDir = checkpointDir } - ); + store1 = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MutableFraction = 0.1, + MemorySize = 1L << 29, + CheckpointDir = checkpointDir + }, StoreFunctions.Create(new AdId.Comparer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); + + store2 = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MutableFraction = 0.1, + MemorySize = 1L << 29, + CheckpointDir = checkpointDir + }, StoreFunctions.Create(new AdId.Comparer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); NumClicks value; @@ -243,10 +281,10 @@ public async ValueTask ShouldRecoverBeginAddress([Values] CompletionSyncMode com var bContext1 = session1.BasicContext; var address = 0L; - for (int key = 0; key < numOps; key++) + for (int key = 0; key < NumOps; key++) { value.numClicks = key; - bContext1.Upsert(ref inputArray[key], ref value, Empty.Default); + _ = bContext1.Upsert(ref inputArray[key], ref value, Empty.Default); if (key == 2999) address = store1.Log.TailAddress; @@ -254,7 +292,7 @@ public async ValueTask ShouldRecoverBeginAddress([Values] CompletionSyncMode com store1.Log.ShiftBeginAddress(address); - store1.TryInitiateFullCheckpoint(out Guid token, CheckpointType.FoldOver); + _ = store1.TryInitiateFullCheckpoint(out Guid token, CheckpointType.FoldOver); if (completionSyncMode == CompletionSyncMode.Sync) store1.CompleteCheckpointAsync().AsTask().GetAwaiter().GetResult(); else @@ -262,9 +300,9 @@ public async ValueTask ShouldRecoverBeginAddress([Values] CompletionSyncMode com session1.Dispose(); if (completionSyncMode == CompletionSyncMode.Sync) - store2.Recover(token); + _ = store2.Recover(token); else - await store2.RecoverAsync(token); + _ = await store2.RecoverAsync(token); Assert.AreEqual(address, store2.Log.BeginAddress); } @@ -276,16 +314,27 @@ public async ValueTask SimpleReadAndUpdateInfoTest([Values] CompletionSyncMode c checkpointManager = new DeviceLogCommitCheckpointManager(new LocalStorageNamedDeviceFactory(), new DefaultCheckpointNamingScheme(Path.Join(MethodTestDir, "checkpoints")), false); log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "SimpleReadAndUpdateInfoTest.log"), deleteOnClose: true); - store1 = new TsavoriteKV(128, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 0.1, MemorySizeBits = 29 }, - checkpointSettings: new CheckpointSettings { CheckpointManager = checkpointManager } - ); - - store2 = new TsavoriteKV(128, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 0.1, MemorySizeBits = 29 }, - checkpointSettings: new CheckpointSettings { CheckpointManager = checkpointManager } - ); - + store1 = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MutableFraction = 0.1, + MemorySize = 1L << 29, + CheckpointManager = checkpointManager + }, StoreFunctions.Create(new AdId.Comparer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); + + store2 = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MutableFraction = 0.1, + MemorySize = 1L << 29, + CheckpointManager = checkpointManager + }, StoreFunctions.Create(new AdId.Comparer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); NumClicks value; AdInput inputArg = default; @@ -296,18 +345,18 @@ public async ValueTask SimpleReadAndUpdateInfoTest([Values] CompletionSyncMode c var session1 = store1.NewSession(functions1); var bContext1 = session1.BasicContext; - for (int key = 0; key < numOps; key++) + for (int key = 0; key < NumOps; key++) { value.numClicks = key; if ((key & 1) > 0) - bContext1.Upsert(ref inputArray[key], ref value, Empty.Default); + _ = bContext1.Upsert(ref inputArray[key], ref value, Empty.Default); else { AdInput input = new() { adId = inputArray[key], numClicks = value }; - bContext1.RMW(ref inputArray[key], ref input); + _ = bContext1.RMW(ref inputArray[key], ref input); } } - store1.TryInitiateFullCheckpoint(out Guid token, CheckpointType.FoldOver); + _ = store1.TryInitiateFullCheckpoint(out Guid token, CheckpointType.FoldOver); if (completionSyncMode == CompletionSyncMode.Sync) store1.CompleteCheckpointAsync().AsTask().GetAwaiter().GetResult(); else @@ -315,9 +364,9 @@ public async ValueTask SimpleReadAndUpdateInfoTest([Values] CompletionSyncMode c session1.Dispose(); if (completionSyncMode == CompletionSyncMode.Sync) - store2.Recover(token); + _ = store2.Recover(token); else - await store2.RecoverAsync(token); + _ = await store2.RecoverAsync(token); var session2 = store2.NewSession(functions2); var bContext2 = session2.BasicContext; @@ -342,7 +391,7 @@ public async ValueTask SimpleReadAndUpdateInfoTest([Values] CompletionSyncMode c inputArg.numClicks = new() { numClicks = lastKey }; status = bContext2.Read(ref inputArray[lastKey], ref inputArg, ref output, Empty.Default); Assert.IsTrue(status.IsPending, status.ToString()); - bContext2.CompletePending(wait: true); + _ = bContext2.CompletePending(wait: true); // Upsert does not go pending so is skipped here @@ -351,11 +400,10 @@ public async ValueTask SimpleReadAndUpdateInfoTest([Values] CompletionSyncMode c inputArg.numClicks = new() { numClicks = lastKey }; status = bContext2.RMW(ref inputArray[lastKey], ref inputArg); Assert.IsTrue(status.IsPending, status.ToString()); - bContext2.CompletePending(wait: true); + _ = bContext2.CompletePending(wait: true); session2.Dispose(); } - } public class AdSimpleFunctions : SessionFunctionsBase @@ -400,7 +448,7 @@ public override bool InPlaceUpdater(ref AdId key, ref AdInput input, ref NumClic { if (expectedVersion >= 0) Assert.AreEqual(expectedVersion, rmwInfo.Version); - Interlocked.Add(ref value.numClicks, input.numClicks.numClicks); + _ = Interlocked.Add(ref value.numClicks, input.numClicks.numClicks); return true; } diff --git a/libs/storage/Tsavorite/cs/test/SingleWriterTests.cs b/libs/storage/Tsavorite/cs/test/SingleWriterTests.cs index 3fd34704f3..77179c87f8 100644 --- a/libs/storage/Tsavorite/cs/test/SingleWriterTests.cs +++ b/libs/storage/Tsavorite/cs/test/SingleWriterTests.cs @@ -8,6 +8,9 @@ namespace Tsavorite.test.SingleWriter { + using IntAllocator = BlittableAllocator>>; + using IntStoreFunctions = StoreFunctions>; + internal class SingleWriterTestFunctions : SimpleSimpleFunctions { internal WriteReason actualReason; @@ -28,15 +31,15 @@ public override void PostSingleWriter(ref int key, ref int input, ref int src, r class SingleWriterTests { - const int numRecords = 1000; - const int valueMult = 1_000_000; - const WriteReason NoReason = (WriteReason)(255); + const int NumRecords = 1000; + const int ValueMult = 1_000_000; + const WriteReason NoReason = (WriteReason)255; SingleWriterTestFunctions functions; - private TsavoriteKV store; - private ClientSession session; - private BasicContext bContext; + private TsavoriteKV store; + private ClientSession session; + private BasicContext bContext; private IDevice log; [SetUp] @@ -45,22 +48,35 @@ public void Setup() DeleteDirectory(MethodTestDir, wait: true); log = Devices.CreateLogDevice(Path.Combine(MethodTestDir, "test.log"), deleteOnClose: false); - functions = new SingleWriterTestFunctions(); - LogSettings logSettings = new LogSettings { LogDevice = log, ObjectLogDevice = null, PageSizeBits = 12, MemorySizeBits = 22, ReadCopyOptions = new(ReadCopyFrom.Device, ReadCopyTo.MainLog) }; + functions = new(); + KVSettings kvSettings = new() + { + IndexSize = 1L << 26, + LogDevice = log, + PageSize = 1L << 12, + MemorySize = 1L << 22, + ReadCopyOptions = new(ReadCopyFrom.Device, ReadCopyTo.MainLog), + CheckpointDir = MethodTestDir + }; foreach (var arg in TestContext.CurrentContext.Test.Arguments) { if (arg is ReadCopyDestination dest) { if (dest == ReadCopyDestination.ReadCache) { - logSettings.ReadCacheSettings = new() { PageSizeBits = 12, MemorySizeBits = 22 }; - logSettings.ReadCopyOptions = default; + kvSettings.ReadCachePageSize = 1L << 12; + kvSettings.ReadCacheMemorySize = 1L << 22; + kvSettings.ReadCacheEnabled = true; + kvSettings.ReadCopyOptions = default; } break; } } - store = new TsavoriteKV(1L << 20, logSettings, new CheckpointSettings { CheckpointDir = MethodTestDir }); + store = new(kvSettings + , StoreFunctions.Create(IntKeyComparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); session = store.NewSession(functions); bContext = session.BasicContext; } @@ -81,8 +97,8 @@ void Populate() { int input = (int)WriteReason.Upsert; int output = 0; - for (int key = 0; key < numRecords; key++) - Assert.False(bContext.Upsert(key, input, key * valueMult, ref output).IsPending); + for (int key = 0; key < NumRecords; key++) + Assert.False(bContext.Upsert(key, input, key * ValueMult, ref output).IsPending); } [Test] @@ -102,7 +118,7 @@ public void SingleWriterReasonsTest([Values] ReadCopyDestination readCopyDestina int input = (int)expectedReason; var status = bContext.Read(key, input, out int output); Assert.IsTrue(status.IsPending); - bContext.CompletePending(wait: true); + _ = bContext.CompletePending(wait: true); Assert.AreEqual(expectedReason, functions.actualReason); functions.actualReason = NoReason; @@ -112,7 +128,7 @@ public void SingleWriterReasonsTest([Values] ReadCopyDestination readCopyDestina ReadOptions readOptions = new() { CopyOptions = new(ReadCopyFrom.AllImmutable, ReadCopyTo.MainLog) }; status = bContext.Read(ref key, ref input, ref output, ref readOptions, out _); Assert.IsTrue(status.IsPending && !status.IsCompleted); - bContext.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var outputs, wait: true); (status, output) = GetSinglePendingResult(outputs); Assert.IsTrue(!status.IsPending && status.IsCompleted && status.IsCompletedSuccessfully); Assert.IsTrue(status.Found && !status.NotFound && status.Record.Copied); @@ -121,156 +137,8 @@ public void SingleWriterReasonsTest([Values] ReadCopyDestination readCopyDestina functions.actualReason = NoReason; expectedReason = WriteReason.Compaction; input = (int)expectedReason; - store.Log.Compact(functions, ref input, ref output, store.Log.SafeReadOnlyAddress, CompactionType.Scan); + _ = store.Log.Compact(functions, ref input, ref output, store.Log.SafeReadOnlyAddress, CompactionType.Scan); Assert.AreEqual(expectedReason, functions.actualReason); } } - - [TestFixture] - public class StructWithStringTests - { - public struct StructWithString - { - public int intField; - public string stringField; - - public StructWithString(int intValue, string prefix) - { - intField = intValue; - stringField = prefix + intValue.ToString(); - } - - public override string ToString() => stringField; - - public class Comparer : ITsavoriteEqualityComparer - { - public long GetHashCode64(ref StructWithString k) => Utility.GetHashCode(k.intField); - - public bool Equals(ref StructWithString k1, ref StructWithString k2) - => k1.intField == k2.intField && k1.stringField == k2.stringField; - } - - public class Serializer : BinaryObjectSerializer - { - public override void Deserialize(out StructWithString obj) - { - var intField = reader.ReadInt32(); - var stringField = reader.ReadString(); - obj = new() { intField = intField, stringField = stringField }; - } - - public override void Serialize(ref StructWithString obj) - { - writer.Write(obj.intField); - writer.Write(obj.stringField); - } - } - } - - internal class StructWithStringTestFunctions : SimpleSimpleFunctions - { - } - - const int numRecords = 1_000; - const string keyPrefix = "key_"; - string valuePrefix = "value_"; - - StructWithStringTestFunctions functions; - - private TsavoriteKV store; - private ClientSession session; - private BasicContext bContext; - private IDevice log, objlog; - - [SetUp] - public void Setup() - { - // create a string of size 1024 bytes - valuePrefix = new string('a', 1024); - - DeleteDirectory(MethodTestDir, wait: true); - log = Devices.CreateLogDevice(Path.Combine(MethodTestDir, "test.log"), deleteOnClose: false); - objlog = Devices.CreateLogDevice(Path.Combine(MethodTestDir, "test.obj.log"), deleteOnClose: false); - SerializerSettings serializerSettings = new() - { - keySerializer = () => new StructWithString.Serializer(), - valueSerializer = () => new StructWithString.Serializer() - }; - store = new TsavoriteKV(1L << 20, - new LogSettings { LogDevice = log, ObjectLogDevice = objlog, PageSizeBits = 10, MemorySizeBits = 22, SegmentSizeBits = 16 }, - new CheckpointSettings { CheckpointDir = MethodTestDir }, - serializerSettings: serializerSettings, comparer: new StructWithString.Comparer()); - - functions = new(); - session = store.NewSession(functions); - bContext = session.BasicContext; - } - - [TearDown] - public void TearDown() - { - session?.Dispose(); - session = null; - store?.Dispose(); - store = null; - objlog?.Dispose(); - objlog = null; - log?.Dispose(); - log = null; - DeleteDirectory(MethodTestDir); - } - - void Populate() - { - for (int ii = 0; ii < numRecords; ii++) - { - StructWithString key = new(ii, keyPrefix); - StructWithString value = new(ii, valuePrefix); - bContext.Upsert(ref key, ref value); - if (ii % 3_000 == 0) - { - store.TakeHybridLogCheckpointAsync(CheckpointType.FoldOver).GetAwaiter().GetResult(); - store.Recover(); - } - } - } - - [Test] - [Category(TsavoriteKVTestCategory)] - [Category(SmokeTestCategory)] - public void StructWithStringCompactTest([Values] CompactionType compactionType, [Values] bool flush) - { - void readKey(int keyInt) - { - StructWithString key = new(keyInt, keyPrefix); - var (status, output) = bContext.Read(key); - if (status.IsPending) - { - bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); - using (completedOutputs) - (status, output) = GetSinglePendingResult(completedOutputs); - } - Assert.IsTrue(status.Found, status.ToString()); - Assert.AreEqual(key.intField, output.intField); - } - - Populate(); - readKey(12); - if (flush) - { - store.Log.FlushAndEvict(wait: true); - readKey(24); - } - int count = 0; - using var iter = store.Log.Scan(0, store.Log.TailAddress); - while (iter.GetNext(out var _)) - { - count++; - } - Assert.AreEqual(count, numRecords); - - store.Log.Compact(functions, store.Log.SafeReadOnlyAddress, compactionType); - readKey(48); - } - } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/test/SpanByteIterationTests.cs b/libs/storage/Tsavorite/cs/test/SpanByteIterationTests.cs index 04ff89e26c..bb0618e53d 100644 --- a/libs/storage/Tsavorite/cs/test/SpanByteIterationTests.cs +++ b/libs/storage/Tsavorite/cs/test/SpanByteIterationTests.cs @@ -11,10 +11,12 @@ namespace Tsavorite.test { + using SpanByteStoreFunctions = StoreFunctions; + [TestFixture] internal class SpanByteIterationTests { - private TsavoriteKV store; + private TsavoriteKV> store; private IDevice log; // Note: We always set value.length to 2, which includes both VLValue members; we are not exercising the "Variable Length" aspect here. @@ -24,7 +26,7 @@ internal class SpanByteIterationTests public void Setup() { // Clean up log files from previous test runs in case they weren't cleaned up - DeleteDirectory(TestUtils.MethodTestDir, wait: true); + DeleteDirectory(MethodTestDir, wait: true); } [TearDown] @@ -34,7 +36,7 @@ public void TearDown() store = null; log?.Dispose(); log = null; - DeleteDirectory(TestUtils.MethodTestDir); + DeleteDirectory(MethodTestDir); } internal struct SpanBytePushIterationTestFunctions : IScanIteratorFunctions @@ -58,9 +60,9 @@ public unsafe bool SingleReader(ref SpanByte key, ref SpanByte value, RecordMeta public bool ConcurrentReader(ref SpanByte key, ref SpanByte value, RecordMetadata recordMetadata, long numberOfRecords, out CursorRecordResult cursorRecordResult) => SingleReader(ref key, ref value, recordMetadata, numberOfRecords, out cursorRecordResult); - public bool OnStart(long beginAddress, long endAddress) => true; - public void OnException(Exception exception, long numberOfRecords) { } - public void OnStop(bool completed, long numberOfRecords) { } + public readonly bool OnStart(long beginAddress, long endAddress) => true; + public readonly void OnException(Exception exception, long numberOfRecords) { } + public readonly void OnStop(bool completed, long numberOfRecords) { } } [Test] @@ -69,8 +71,16 @@ public void OnStop(bool completed, long numberOfRecords) { } public unsafe void SpanByteIterationBasicTest([Values] DeviceType deviceType, [Values] ScanIteratorType scanIteratorType) { log = CreateTestDevice(deviceType, $"{MethodTestDir}{deviceType}.log"); - store = new TsavoriteKV - (1L << 20, new LogSettings { LogDevice = log, MemorySizeBits = 15, PageSizeBits = 9, SegmentSizeBits = 22 }); + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + MemorySize = 1L << 15, + PageSize = 1L << 9, + SegmentSize = 1L << 22 + }, StoreFunctions.Create() + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); using var session = store.NewSession(new VLVectorFunctions()); var bContext = session.BasicContext; @@ -88,7 +98,7 @@ void iterateAndVerify(int keyMultToValue, int expectedRecs) { using var iter = session.Iterate(); while (iter.GetNext(out var recordInfo)) - scanIteratorFunctions.SingleReader(ref iter.GetKey(), ref iter.GetValue(), default, default, out _); + _ = scanIteratorFunctions.SingleReader(ref iter.GetKey(), ref iter.GetValue(), default, default, out _); } else Assert.IsTrue(session.Iterate(ref scanIteratorFunctions), $"Failed to complete push iteration; numRecords = {scanIteratorFunctions.numRecords}"); @@ -107,7 +117,7 @@ void iterateAndVerify(int keyMultToValue, int expectedRecs) { keySpan[0] = i; valueSpan[0] = i; - bContext.Upsert(ref key, ref value); + _ = bContext.Upsert(ref key, ref value); } iterateAndVerify(1, totalRecords); @@ -115,7 +125,7 @@ void iterateAndVerify(int keyMultToValue, int expectedRecs) { keySpan[0] = i; valueSpan[0] = i * 2; - bContext.Upsert(ref key, ref value); + _ = bContext.Upsert(ref key, ref value); } iterateAndVerify(2, totalRecords); @@ -123,7 +133,7 @@ void iterateAndVerify(int keyMultToValue, int expectedRecs) { keySpan[0] = i; valueSpan[0] = i; - bContext.Upsert(ref key, ref value); + _ = bContext.Upsert(ref key, ref value); } iterateAndVerify(0, totalRecords); @@ -131,14 +141,14 @@ void iterateAndVerify(int keyMultToValue, int expectedRecs) { keySpan[0] = i; valueSpan[0] = i; - bContext.Upsert(ref key, ref value); + _ = bContext.Upsert(ref key, ref value); } iterateAndVerify(0, totalRecords); for (int i = 0; i < totalRecords; i += 2) { keySpan[0] = i; - bContext.Delete(ref key); + _ = bContext.Delete(ref key); } iterateAndVerify(0, totalRecords / 2); @@ -146,7 +156,7 @@ void iterateAndVerify(int keyMultToValue, int expectedRecs) { keySpan[0] = i; valueSpan[0] = i * 3; - bContext.Upsert(ref key, ref value); + _ = bContext.Upsert(ref key, ref value); } iterateAndVerify(3, totalRecords); @@ -160,8 +170,16 @@ void iterateAndVerify(int keyMultToValue, int expectedRecs) public void SpanByteIterationPushStopTest([Values] DeviceType deviceType) { log = CreateTestDevice(deviceType, Path.Join(MethodTestDir, $"{deviceType}.log")); - store = new TsavoriteKV - (1L << 20, new LogSettings { LogDevice = log, MemorySizeBits = 15, PageSizeBits = 9, SegmentSizeBits = 22 }); + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + MemorySize = 1L << 15, + PageSize = 1L << 9, + SegmentSize = 1L << 22 + }, StoreFunctions.Create() + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); using var session = store.NewSession(new VLVectorFunctions()); var bContext = session.BasicContext; @@ -192,7 +210,7 @@ void scanAndVerify(int stopAt, bool useScan) { keySpan[0] = i; valueSpan[0] = i; - bContext.Upsert(ref key, ref value); + _ = bContext.Upsert(ref key, ref value); } scanAndVerify(42, useScan: true); @@ -205,9 +223,18 @@ void scanAndVerify(int stopAt, bool useScan) public unsafe void SpanByteIterationPushLockTest([Values(1, 4)] int scanThreads, [Values(1, 4)] int updateThreads, [Values] ScanMode scanMode) { log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "lock_test.log")); + // Must be large enough to contain all records in memory to exercise locking - store = new TsavoriteKV - (1L << 20, new LogSettings { LogDevice = log, MemorySizeBits = 25, PageSizeBits = 19, SegmentSizeBits = 22 }); + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + MemorySize = 1L << 25, + PageSize = 1L << 19, + SegmentSize = 1L << 22 + }, StoreFunctions.Create() + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); const int totalRecords = 2000; var start = store.Log.TailAddress; @@ -237,7 +264,7 @@ void LocalUpdate(int tid) { keySpan[0] = i; valueSpan[0] = i * (tid + 1); - bContext.Upsert(ref key, ref value); + _ = bContext.Upsert(ref key, ref value); } } @@ -255,11 +282,11 @@ void LocalUpdate(int tid) { keySpan[0] = i; valueSpan[0] = i; - bContext.Upsert(ref key, ref value); + _ = bContext.Upsert(ref key, ref value); } } - List tasks = new(); // Task rather than Thread for propagation of exception. + List tasks = []; // Task rather than Thread for propagation of exception. var numThreads = scanThreads + updateThreads; for (int t = 0; t < numThreads; t++) { @@ -269,7 +296,7 @@ void LocalUpdate(int tid) else tasks.Add(Task.Factory.StartNew(() => LocalUpdate(tid))); } - Task.WaitAll(tasks.ToArray()); + Task.WaitAll([.. tasks]); } } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/test/SpanByteLogScanTests.cs b/libs/storage/Tsavorite/cs/test/SpanByteLogScanTests.cs index cf5412b3cc..c4d34e6729 100644 --- a/libs/storage/Tsavorite/cs/test/SpanByteLogScanTests.cs +++ b/libs/storage/Tsavorite/cs/test/SpanByteLogScanTests.cs @@ -7,39 +7,46 @@ using System.Threading.Tasks; using NUnit.Framework; using Tsavorite.core; +using Tsavorite.test.Revivification; using static Tsavorite.core.Utility; using static Tsavorite.test.TestUtils; -namespace Tsavorite.test +namespace Tsavorite.test.spanbyte { - [TestFixture] - internal class SpanByteLogScanTests + // Must be in a separate block so the "using SpanByteStoreFunctions" is the first line in its namespace declaration. + struct SpanByteComparerModulo : IKeyComparer { - private TsavoriteKV store; - private IDevice log; - const int totalRecords = 2000; - const int PageSizeBits = 15; - - struct SpanByteComparerModulo : ITsavoriteEqualityComparer - { - readonly long mod; + readonly long mod; - internal SpanByteComparerModulo(long mod) => this.mod = mod; + internal SpanByteComparerModulo(long mod) => this.mod = mod; - public bool Equals(ref SpanByte k1, ref SpanByte k2) => SpanByteComparer.StaticEquals(ref k1, ref k2); + public bool Equals(ref SpanByte k1, ref SpanByte k2) => SpanByteComparer.StaticEquals(ref k1, ref k2); - // Force collisions to create a chain - public long GetHashCode64(ref SpanByte k) - { - long hash = SpanByteComparer.StaticGetHashCode64(ref k); - return mod > 0 ? hash % mod : hash; - } + // Force collisions to create a chain + public long GetHashCode64(ref SpanByte k) + { + long hash = SpanByteComparer.StaticGetHashCode64(ref k); + return mod > 0 ? hash % mod : hash; } + } +} + +namespace Tsavorite.test.spanbyte +{ + using SpanByteStoreFunctions = StoreFunctions; + + [TestFixture] + internal class SpanByteLogScanTests + { + private TsavoriteKV> store; + private IDevice log; + const int TotalRecords = 2000; + const int PageSizeBits = 15; [SetUp] public void Setup() { - ITsavoriteEqualityComparer comparer = null; + SpanByteComparerModulo comparer = new(0); foreach (var arg in TestContext.CurrentContext.Test.Arguments) { if (arg is HashModulo mod && mod == HashModulo.Hundred) @@ -51,8 +58,15 @@ public void Setup() DeleteDirectory(MethodTestDir, wait: true); log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "test.log"), deleteOnClose: true); - store = new TsavoriteKV - (1L << 20, new LogSettings { LogDevice = log, MemorySizeBits = 25, PageSizeBits = PageSizeBits }, comparer: comparer); + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + MemorySize = 1L << 25, + PageSize = 1L << PageSizeBits + }, StoreFunctions.Create(comparer, SpanByteRecordDisposer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -89,7 +103,7 @@ public unsafe void SpanByteScanCursorTest([Values(HashModulo.NoMod, HashModulo.H Random rng = new(101); - for (int i = 0; i < totalRecords; i++) + for (int i = 0; i < TotalRecords; i++) { var valueFill = new string('x', rng.Next(120)); // Make the record lengths random var key = MemoryMarshal.Cast($"key_{i}".AsSpan()); @@ -98,7 +112,7 @@ public unsafe void SpanByteScanCursorTest([Values(HashModulo.NoMod, HashModulo.H fixed (byte* keyPtr = key) fixed (byte* valuePtr = value) { - bContext.Upsert(SpanByte.FromPinnedPointer(keyPtr, key.Length), SpanByte.FromPinnedPointer(valuePtr, value.Length)); + _ = bContext.Upsert(SpanByte.FromPinnedPointer(keyPtr, key.Length), SpanByte.FromPinnedPointer(valuePtr, value.Length)); } } @@ -116,7 +130,7 @@ public unsafe void SpanByteScanCursorTest([Values(HashModulo.NoMod, HashModulo.H scanCursorFuncs.Initialize(verifyKeys: true); while (session.ScanCursor(ref cursor, counts[iCount], scanCursorFuncs, endAddresses[iAddr])) ; - Assert.AreEqual(totalRecords, scanCursorFuncs.numRecords, $"count: {counts[iCount]}, endAddress {endAddresses[iAddr]}"); + Assert.AreEqual(TotalRecords, scanCursorFuncs.numRecords, $"count: {counts[iCount]}, endAddress {endAddresses[iAddr]}"); Assert.AreEqual(0, cursor, "Expected cursor to be 0, pt 1"); } } @@ -129,31 +143,31 @@ public unsafe void SpanByteScanCursorTest([Values(HashModulo.NoMod, HashModulo.H // Scan and verify we see them all scanCursorFuncs.Initialize(verifyKeys); Assert.IsFalse(session.ScanCursor(ref cursor, long.MaxValue, scanCursorFuncs, long.MaxValue), "Expected scan to finish and return false, pt 1"); - Assert.AreEqual(totalRecords, scanCursorFuncs.numRecords, "Unexpected count for all on-disk"); + Assert.AreEqual(TotalRecords, scanCursorFuncs.numRecords, "Unexpected count for all on-disk"); Assert.AreEqual(0, cursor, "Expected cursor to be 0, pt 2"); // Add another totalRecords, with keys incremented by totalRecords to remain distinct, and verify we see all keys. - for (int i = 0; i < totalRecords; i++) + for (int i = 0; i < TotalRecords; i++) { var valueFill = new string('x', rng.Next(120)); // Make the record lengths random - var key = MemoryMarshal.Cast($"key_{i + totalRecords}".AsSpan()); - var value = MemoryMarshal.Cast($"v{valueFill}_{i + totalRecords}".AsSpan()); + var key = MemoryMarshal.Cast($"key_{i + TotalRecords}".AsSpan()); + var value = MemoryMarshal.Cast($"v{valueFill}_{i + TotalRecords}".AsSpan()); fixed (byte* keyPtr = key) fixed (byte* valuePtr = value) { - bContext.Upsert(SpanByte.FromPinnedPointer(keyPtr, key.Length), SpanByte.FromPinnedPointer(valuePtr, value.Length)); + _ = bContext.Upsert(SpanByte.FromPinnedPointer(keyPtr, key.Length), SpanByte.FromPinnedPointer(valuePtr, value.Length)); } } scanCursorFuncs.Initialize(verifyKeys); Assert.IsFalse(session.ScanCursor(ref cursor, long.MaxValue, scanCursorFuncs, long.MaxValue), "Expected scan to finish and return false, pt 2"); - Assert.AreEqual(totalRecords * 2, scanCursorFuncs.numRecords, "Unexpected count for on-disk + in-mem"); + Assert.AreEqual(TotalRecords * 2, scanCursorFuncs.numRecords, "Unexpected count for on-disk + in-mem"); Assert.AreEqual(0, cursor, "Expected cursor to be 0, pt 3"); // Try an invalid cursor (not a multiple of 8) on-disk and verify we get one correct record. Use 3x page size to make sure page boundaries are tested. - Assert.Greater(store.hlog.GetTailAddress(), PageSize * 10, "Need enough space to exercise this"); + Assert.Greater(store.hlogBase.GetTailAddress(), PageSize * 10, "Need enough space to exercise this"); scanCursorFuncs.Initialize(verifyKeys); - cursor = store.hlog.BeginAddress - 1; + cursor = store.hlogBase.BeginAddress - 1; do { Assert.IsTrue(session.ScanCursor(ref cursor, 1, scanCursorFuncs, long.MaxValue, validateCursor: true), "Expected scan to finish and return false, pt 1"); @@ -164,7 +178,7 @@ public unsafe void SpanByteScanCursorTest([Values(HashModulo.NoMod, HashModulo.H SpanByte input = default; SpanByteAndMemory output = default; ReadOptions readOptions = default; - var readStatus = bContext.ReadAtAddress(store.hlog.HeadAddress, ref input, ref output, ref readOptions, out _); + var readStatus = bContext.ReadAtAddress(store.hlogBase.HeadAddress, ref input, ref output, ref readOptions, out _); Assert.IsTrue(readStatus.Found, $"Could not read at HeadAddress; {readStatus}"); var keyString = new string(MemoryMarshal.Cast(output.AsReadOnlySpan())); var keyOrdinal = int.Parse(keyString.Substring(keyString.IndexOf('_') + 1)); @@ -177,7 +191,7 @@ public unsafe void SpanByteScanCursorTest([Values(HashModulo.NoMod, HashModulo.H { Assert.IsTrue(session.ScanCursor(ref cursor, 1, scanCursorFuncs, long.MaxValue, validateCursor: true), "Expected scan to finish and return false, pt 1"); cursor = scanCursorFuncs.lastAddress + scanCursorFuncs.lastRecordSize + 1; - } while (cursor < store.hlog.HeadAddress + PageSize * 3); + } while (cursor < store.hlogBase.HeadAddress + PageSize * 3); } [Test] @@ -190,7 +204,7 @@ public unsafe void SpanByteScanCursorFilterTest([Values(HashModulo.NoMod, HashMo Random rng = new(101); - for (int i = 0; i < totalRecords; i++) + for (int i = 0; i < TotalRecords; i++) { var valueFill = new string('x', rng.Next(120)); // Make the record lengths random var key = MemoryMarshal.Cast($"key_{i}".AsSpan()); @@ -199,7 +213,7 @@ public unsafe void SpanByteScanCursorFilterTest([Values(HashModulo.NoMod, HashMo fixed (byte* keyPtr = key) fixed (byte* valuePtr = value) { - bContext.Upsert(SpanByte.FromPinnedPointer(keyPtr, key.Length), SpanByte.FromPinnedPointer(valuePtr, value.Length)); + _ = bContext.Upsert(SpanByte.FromPinnedPointer(keyPtr, key.Length), SpanByte.FromPinnedPointer(valuePtr, value.Length)); } } @@ -231,7 +245,7 @@ public unsafe void SpanByteScanCursorWithRCUTest([Values(RCULocation.RCUBefore, Random rng = new(101); - for (int i = 0; i < totalRecords; i++) + for (int i = 0; i < TotalRecords; i++) { var valueFill = new string('x', rng.Next(120)); // Make the record lengths random var key = MemoryMarshal.Cast($"key_{i}".AsSpan()); @@ -240,14 +254,14 @@ public unsafe void SpanByteScanCursorWithRCUTest([Values(RCULocation.RCUBefore, fixed (byte* keyPtr = key) fixed (byte* valuePtr = value) { - bContext.Upsert(SpanByte.FromPinnedPointer(keyPtr, key.Length), SpanByte.FromPinnedPointer(valuePtr, value.Length)); + _ = bContext.Upsert(SpanByte.FromPinnedPointer(keyPtr, key.Length), SpanByte.FromPinnedPointer(valuePtr, value.Length)); } } var scanCursorFuncs = new ScanCursorFuncs(store) { rcuLocation = rcuLocation, - rcuRecord = totalRecords - 10 + rcuRecord = TotalRecords - 10 }; long cursor = 0; @@ -256,20 +270,20 @@ public unsafe void SpanByteScanCursorWithRCUTest([Values(RCULocation.RCUBefore, { // RCU before we hit the record - verify we see it once; the original record is Sealed, and we see the one at the Tail. Assert.IsFalse(session.ScanCursor(ref cursor, long.MaxValue, scanCursorFuncs, long.MaxValue), "Expected scan to finish and return false, pt 1"); - Assert.AreEqual(totalRecords, scanCursorFuncs.numRecords, "Unexpected count for RCU before we hit the scan value"); + Assert.AreEqual(TotalRecords, scanCursorFuncs.numRecords, "Unexpected count for RCU before we hit the scan value"); } else { // RCU after we hit the record - verify we see it twice; once before we update, of course, then once again after it's added at the Tail. Assert.IsFalse(session.ScanCursor(ref cursor, long.MaxValue, scanCursorFuncs, long.MaxValue), "Expected scan to finish and return false, pt 1"); - Assert.AreEqual(totalRecords + 1, scanCursorFuncs.numRecords, "Unexpected count for RCU after we hit the scan value"); + Assert.AreEqual(TotalRecords + 1, scanCursorFuncs.numRecords, "Unexpected count for RCU after we hit the scan value"); } Assert.IsTrue(scanCursorFuncs.rcuDone, "RCU was not done"); } internal sealed class ScanCursorFuncs : IScanIteratorFunctions { - readonly TsavoriteKV store; + readonly TsavoriteKV> store; internal int numRecords; internal long lastAddress; @@ -279,7 +293,7 @@ internal sealed class ScanCursorFuncs : IScanIteratorFunctions filter; - internal ScanCursorFuncs(TsavoriteKV store) + internal ScanCursorFuncs(TsavoriteKV> store) { this.store = store; Initialize(verifyKeys: true); @@ -316,7 +330,7 @@ unsafe void CheckForRCU() fixed (byte* keyPtr = key) fixed (byte* valuePtr = value) { - bContext.Upsert(SpanByte.FromPinnedPointer(keyPtr, key.Length), SpanByte.FromPinnedPointer(valuePtr, value.Length)); + _ = bContext.Upsert(SpanByte.FromPinnedPointer(keyPtr, key.Length), SpanByte.FromPinnedPointer(valuePtr, value.Length)); } }).Wait(); @@ -338,7 +352,7 @@ public bool ConcurrentReader(ref SpanByte key, ref SpanByte value, RecordMetadat if (verifyKeys) { - if (rcuLocation != RCULocation.RCUNone && numRecords == totalRecords - rcuOffset) + if (rcuLocation != RCULocation.RCUNone && numRecords == TotalRecords - rcuOffset) Assert.AreEqual(rcuRecord, kfield1, "Expected to find the rcuRecord value at end of RCU-testing enumeration"); else Assert.AreEqual(numRecords + rcuOffset, kfield1, "Mismatched key field on Scan"); @@ -372,8 +386,17 @@ public unsafe void SpanByteJumpToBeginAddressTest() { DeleteDirectory(MethodTestDir, wait: true); using var log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "test.log"), deleteOnClose: true); - using var store = new TsavoriteKV - (1L << 20, new LogSettings { LogDevice = log, MemorySizeBits = 20, PageSizeBits = 15 }); + + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + MemorySize = 1L << 20, + PageSize = 1L << PageSizeBits + }, StoreFunctions.Create(new SpanByteComparerModulo(0), SpanByteRecordDisposer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); + using var session = store.NewSession>(new SpanByteFunctions()); var bContext = session.BasicContext; @@ -395,7 +418,7 @@ public unsafe void SpanByteJumpToBeginAddressTest() fixed (byte* keyPtr = key) fixed (byte* valuePtr = value) { - bContext.Upsert(SpanByte.FromPinnedPointer(keyPtr, key.Length), SpanByte.FromPinnedPointer(valuePtr, value.Length)); + _ = bContext.Upsert(SpanByte.FromPinnedPointer(keyPtr, key.Length), SpanByte.FromPinnedPointer(valuePtr, value.Length)); } } diff --git a/libs/storage/Tsavorite/cs/test/SpanByteTests.cs b/libs/storage/Tsavorite/cs/test/SpanByteTests.cs index 3b15d03ad9..b14cdb12d8 100644 --- a/libs/storage/Tsavorite/cs/test/SpanByteTests.cs +++ b/libs/storage/Tsavorite/cs/test/SpanByteTests.cs @@ -12,6 +12,8 @@ namespace Tsavorite.test.spanbyte { + using SpanByteStoreFunctions = StoreFunctions; + [TestFixture] internal class SpanByteTests { @@ -27,8 +29,16 @@ public unsafe void SpanByteTest1() try { using var log = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "hlog1.log"), deleteOnClose: true); - using var store = new TsavoriteKV - (128, new LogSettings { LogDevice = log, MemorySizeBits = 17, PageSizeBits = 12 }); + using var store = new TsavoriteKV>( + new() + { + IndexSize = 1L << 13, + LogDevice = log, + MemorySize = 1L << 17, + PageSize = 1L << 12 + }, StoreFunctions.Create() + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); using var session = store.NewSession>(new SpanByteFunctions()); var bContext = session.BasicContext; @@ -42,8 +52,8 @@ public unsafe void SpanByteTest1() var key1SpanByte = SpanByte.FromPinnedPointer(key1Ptr, key1.Length); var value1SpanByte = SpanByte.FromPinnedPointer(value1Ptr, value1.Length); - bContext.Upsert(key1SpanByte, value1SpanByte); - bContext.Read(ref key1SpanByte, ref input, ref output1); + _ = bContext.Upsert(key1SpanByte, value1SpanByte); + _ = bContext.Read(ref key1SpanByte, ref input, ref output1); } Assert.IsTrue(output1.IsSpanByte); @@ -59,8 +69,8 @@ public unsafe void SpanByteTest1() var key2SpanByte = SpanByte.FromPinnedPointer(key2Ptr, key2.Length); var value2SpanByte = SpanByte.FromPinnedPointer(value2Ptr, value2.Length); - bContext.Upsert(key2SpanByte, value2SpanByte); - bContext.Read(ref key2SpanByte, ref input, ref output2); + _ = bContext.Upsert(key2SpanByte, value2SpanByte); + _ = bContext.Read(ref key2SpanByte, ref input, ref output2); } Assert.IsTrue(!output2.IsSpanByte); @@ -83,9 +93,16 @@ public unsafe void MultiRead_SpanByte_Test() try { using var log = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "test.log"), deleteOnClose: true); - using var store = new TsavoriteKV( - size: 1L << 10, - new LogSettings { LogDevice = log, MemorySizeBits = 15, PageSizeBits = 12 }); + using var store = new TsavoriteKV>( + new() + { + IndexSize = 1L << 16, + LogDevice = log, + MemorySize = 1L << 15, + PageSize = 1L << 12 + }, StoreFunctions.Create() + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); using var session = store.NewSession>(new SpanByteFunctions()); var bContext = session.BasicContext; @@ -94,7 +111,7 @@ public unsafe void MultiRead_SpanByte_Test() var key = MemoryMarshal.Cast($"{i}".AsSpan()); var value = MemoryMarshal.Cast($"{i + 1000}".AsSpan()); fixed (byte* k = key, v = value) - bContext.Upsert(SpanByte.FromPinnedSpan(key), SpanByte.FromPinnedSpan(value)); + _ = bContext.Upsert(SpanByte.FromPinnedSpan(key), SpanByte.FromPinnedSpan(value)); } // Read, evict all records to disk, read again @@ -199,9 +216,16 @@ public unsafe void ShouldSkipEmptySpaceAtEndOfPage() TestUtils.DeleteDirectory(TestUtils.MethodTestDir, wait: true); using var log = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "vl-iter.log"), deleteOnClose: true); - using var store = new TsavoriteKV - (128, - new LogSettings { LogDevice = log, MemorySizeBits = 17, PageSizeBits = 10 }); // 1KB page + using var store = new TsavoriteKV>( + new() + { + IndexSize = 1L << 13, + LogDevice = log, + MemorySize = 1L << 17, + PageSize = 1L << 10 // 1KB page + }, StoreFunctions.Create() + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); using var session = store.NewSession(new VLVectorFunctions()); var bContext = session.BasicContext; @@ -219,8 +243,8 @@ public unsafe void ShouldSkipEmptySpaceAtEndOfPage() // the value space for the first record, and the length header for the second record. This is the space available for the second record's value. var p2value2len = PageSize - 2 * RecordInfo.GetLength() - - 2 * RoundUp(key.TotalSize, SpanByteAllocator.kRecordAlignment) - - RoundUp(value.TotalSize, SpanByteAllocator.kRecordAlignment) + - 2 * RoundUp(key.TotalSize, Constants.kRecordAlignment) + - RoundUp(value.TotalSize, Constants.kRecordAlignment) - sizeof(int); Set(ref keySpan, 3L, ref valueSpan, p2value2len, 3); // Inserted on page#1 Assert.AreEqual(PageSize * 2, store.Log.TailAddress, "TailAddress should be at the end of page#2"); @@ -253,7 +277,7 @@ void Set(ref Span keySpan, long keyValue, ref Span valueSpan, int va keySpan[0] = keyValue; value.Length = valueLength; valueSpan[0] = tag; - bContext.Upsert(ref key, ref value, Empty.Default); + _ = bContext.Upsert(ref key, ref value, Empty.Default); } } } diff --git a/libs/storage/Tsavorite/cs/test/SpanByteVLVectorTests.cs b/libs/storage/Tsavorite/cs/test/SpanByteVLVectorTests.cs index 455c20515a..4636e5f02c 100644 --- a/libs/storage/Tsavorite/cs/test/SpanByteVLVectorTests.cs +++ b/libs/storage/Tsavorite/cs/test/SpanByteVLVectorTests.cs @@ -9,6 +9,8 @@ namespace Tsavorite.test.spanbyte { + using SpanByteStoreFunctions = StoreFunctions; + [TestFixture] internal class SpanByteVLVectorTests { @@ -24,10 +26,16 @@ public unsafe void VLVectorSingleKeyTest() DeleteDirectory(MethodTestDir, wait: true); var log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "hlog1.log"), deleteOnClose: true); - var store = new TsavoriteKV - (128, - new LogSettings { LogDevice = log, MemorySizeBits = 17, PageSizeBits = 12 }, - null, null, null); + var store = new TsavoriteKV>( + new() + { + IndexSize = 1L << 13, + LogDevice = log, + MemorySize = 1L << 17, + PageSize = 1L << 12 + }, StoreFunctions.Create() + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); var session = store.NewSession(new VLVectorFunctions()); var bContext = session.BasicContext; @@ -46,7 +54,7 @@ public unsafe void VLVectorSingleKeyTest() valueSpan[j] = len; var valueSpanByte = valueSpan.Slice(0, len).AsSpanByte(); - bContext.Upsert(ref keySpanByte, ref valueSpanByte, Empty.Default); + _ = bContext.Upsert(ref keySpanByte, ref valueSpanByte, Empty.Default); } // Reset rng to get the same sequence of value lengths @@ -62,7 +70,7 @@ public unsafe void VLVectorSingleKeyTest() if (status.IsPending) { - bContext.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var outputs, wait: true); (status, output) = GetSinglePendingResult(outputs); } @@ -85,10 +93,16 @@ public unsafe void VLVectorMultiKeyTest() DeleteDirectory(MethodTestDir, wait: true); var log = Devices.CreateLogDevice(Path.Join(MethodTestDir, "hlog1.log"), deleteOnClose: true); - var store = new TsavoriteKV - (128, - new LogSettings { LogDevice = log, MemorySizeBits = 17, PageSizeBits = 12 }, - null, null, null); + var store = new TsavoriteKV>( + new() + { + IndexSize = 1L << 13, + LogDevice = log, + MemorySize = 1L << 17, + PageSize = 1L << 12 + }, StoreFunctions.Create() + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); var session = store.NewSession(new VLVectorFunctions()); var bContext = session.BasicContext; @@ -109,7 +123,7 @@ public unsafe void VLVectorMultiKeyTest() valueSpan[j] = valueLen; var valueSpanByte = valueSpan.Slice(0, valueLen).AsSpanByte(); - bContext.Upsert(ref keySpanByte, ref valueSpanByte, Empty.Default); + _ = bContext.Upsert(ref keySpanByte, ref valueSpanByte, Empty.Default); } // Reset rng to get the same sequence of key and value lengths @@ -127,7 +141,7 @@ public unsafe void VLVectorMultiKeyTest() if (status.IsPending) { - bContext.CompletePendingWithOutputs(out var outputs, wait: true); + _ = bContext.CompletePendingWithOutputs(out var outputs, wait: true); (status, output) = GetSinglePendingResult(outputs); } diff --git a/libs/storage/Tsavorite/cs/test/StateMachineBarrierTests.cs b/libs/storage/Tsavorite/cs/test/StateMachineBarrierTests.cs index 75c516d0a7..3145e26d71 100644 --- a/libs/storage/Tsavorite/cs/test/StateMachineBarrierTests.cs +++ b/libs/storage/Tsavorite/cs/test/StateMachineBarrierTests.cs @@ -8,31 +8,40 @@ namespace Tsavorite.test.statemachine { + using StructAllocator = BlittableAllocator>>; + using StructStoreFunctions = StoreFunctions>; + [TestFixture] public class StateMachineBarrierTests { IDevice log; - TsavoriteKV store; - const int numOps = 5000; + private TsavoriteKV store; + const int NumOps = 5000; AdId[] inputArray; [SetUp] public void Setup() { - inputArray = new AdId[numOps]; - for (int i = 0; i < numOps; i++) - { + inputArray = new AdId[NumOps]; + for (int i = 0; i < NumOps; i++) inputArray[i].adId = i; - } log = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "StateMachineTest1.log"), deleteOnClose: true); string checkpointDir = Path.Join(TestUtils.MethodTestDir, "statemachinetest"); - Directory.CreateDirectory(checkpointDir); - store = new TsavoriteKV - (128, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 0.1, PageSizeBits = 10, MemorySizeBits = 13 }, - checkpointSettings: new CheckpointSettings { CheckpointDir = checkpointDir, CheckpointVersionSwitchBarrier = true } - ); + _ = Directory.CreateDirectory(checkpointDir); + + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MutableFraction = 0.1, + PageSize = 1L << 10, + MemorySize = 1L << 13, + CheckpointDir = checkpointDir, + CheckpointVersionSwitchBarrier = true + }, StoreFunctions.Create(new AdId.Comparer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -89,9 +98,9 @@ public void StateMachineBarrierTest1() } void Prepare(out SimpleFunctions f, - out ClientSession s1, - out UnsafeContext uc1, - out ThreadSession s2, + out ClientSession s1, + out UnsafeContext uc1, + out ThreadSession s2, long toVersion = -1) { f = new SimpleFunctions(); @@ -100,7 +109,7 @@ void Prepare(out SimpleFunctions f, Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.REST, 1), store.SystemState)); // Take index checkpoint for recovery purposes - store.TryInitiateIndexCheckpoint(out _); + _ = store.TryInitiateIndexCheckpoint(out _); store.CompleteCheckpointAsync().AsTask().GetAwaiter().GetResult(); // Index checkpoint does not update version, so @@ -112,10 +121,10 @@ void Prepare(out SimpleFunctions f, s1 = store.NewSession(f, "foo"); var bc1 = s1.BasicContext; - for (int key = 0; key < numOps; key++) + for (int key = 0; key < NumOps; key++) { value.numClicks = key; - bc1.Upsert(ref inputArray[key], ref value, Empty.Default); + _ = bc1.Upsert(ref inputArray[key], ref value, Empty.Default); } // Ensure state machine needs no I/O wait during WAIT_FLUSH @@ -126,12 +135,12 @@ void Prepare(out SimpleFunctions f, uc1.BeginUnsafe(); // Start session s2 on another thread for testing - s2 = store.CreateThreadSession(f); + s2 = store.CreateThreadSession(f); // We should be in REST, 1 Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.REST, 1), store.SystemState)); - store.TryInitiateHybridLogCheckpoint(out _, CheckpointType.FoldOver, targetVersion: toVersion); + _ = store.TryInitiateHybridLogCheckpoint(out _, CheckpointType.FoldOver, targetVersion: toVersion); // We should be in PREPARE, 1 Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.PREPARE, 1), store.SystemState)); diff --git a/libs/storage/Tsavorite/cs/test/StateMachineTests.cs b/libs/storage/Tsavorite/cs/test/StateMachineTests.cs index 72c75c5ff7..db58101c82 100644 --- a/libs/storage/Tsavorite/cs/test/StateMachineTests.cs +++ b/libs/storage/Tsavorite/cs/test/StateMachineTests.cs @@ -9,31 +9,39 @@ namespace Tsavorite.test.statemachine { + using StructAllocator = BlittableAllocator>>; + using StructStoreFunctions = StoreFunctions>; + [TestFixture] public class StateMachineTests { IDevice log; - TsavoriteKV store; - const int numOps = 5000; + TsavoriteKV store; + const int NumOps = 5000; AdId[] inputArray; [SetUp] public void Setup() { - inputArray = new AdId[numOps]; - for (int i = 0; i < numOps; i++) - { + inputArray = new AdId[NumOps]; + for (int i = 0; i < NumOps; i++) inputArray[i].adId = i; - } log = Devices.CreateLogDevice(Path.Join(TestUtils.MethodTestDir, "StateMachineTest1.log"), deleteOnClose: true); string checkpointDir = Path.Join(TestUtils.MethodTestDir, "statemachinetest"); - Directory.CreateDirectory(checkpointDir); - store = new TsavoriteKV - (128, - logSettings: new LogSettings { LogDevice = log, MutableFraction = 0.1, PageSizeBits = 10, MemorySizeBits = 13 }, - checkpointSettings: new CheckpointSettings { CheckpointDir = checkpointDir } - ); + _ = Directory.CreateDirectory(checkpointDir); + + store = new(new() + { + IndexSize = 1L << 13, + LogDevice = log, + MutableFraction = 0.1, + PageSize = 1L << 10, + MemorySize = 1L << 13, + CheckpointDir = checkpointDir + }, StoreFunctions.Create(new AdId.Comparer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); } [TearDown] @@ -52,7 +60,7 @@ public void TearDown() [Category("Smoke")] public void StateMachineTest1() { - Prepare(out var f, out var s1, out var uc1, out var s2); + Prepare(out _, out var s1, out var uc1, out var s2); // We should be in PREPARE, 1 Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.PREPARE, 1), store.SystemState)); @@ -96,7 +104,7 @@ public void StateMachineTest1() [Category("TsavoriteKV"), Category("CheckpointRestore")] public void StateMachineTest2() { - Prepare(out var f, out var s1, out var uc1, out var s2); + Prepare(out _, out var s1, out var uc1, out var s2); // We should be in PREPARE, 1 Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.PREPARE, 1), store.SystemState)); @@ -134,7 +142,7 @@ public void StateMachineTest2() [Category("TsavoriteKV"), Category("CheckpointRestore")] public void StateMachineTest3() { - Prepare(out var f, out var s1, out var uc1, out var s2); + Prepare(out _, out var s1, out var uc1, out var s2); // We should be in PREPARE, 1 Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.PREPARE, 1), store.SystemState)); @@ -167,7 +175,7 @@ public void StateMachineTest3() [Category("TsavoriteKV"), Category("CheckpointRestore")] public void StateMachineTest4() { - Prepare(out var f, out var s1, out var uc1, out var s2); + Prepare(out _, out var s1, out var uc1, out var s2); // We should be in PREPARE, 1 Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.PREPARE, 1), store.SystemState)); @@ -209,7 +217,7 @@ public void StateMachineTest4() [Category("TsavoriteKV"), Category("CheckpointRestore")] public void StateMachineTest5() { - Prepare(out var f, out var s1, out var uc1, out var s2); + Prepare(out _, out var s1, out var uc1, out var s2); // We should be in PREPARE, 1 Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.PREPARE, 1), store.SystemState)); @@ -263,7 +271,7 @@ public void StateMachineTest5() [Category("TsavoriteKV"), Category("CheckpointRestore")] public void StateMachineTest6() { - Prepare(out var f, out var s1, out var uc1, out var s2); + Prepare(out _, out var s1, out var uc1, out var s2); // Suspend s1 uc1.EndUnsafe(); @@ -283,7 +291,7 @@ public void StateMachineTest6() s2.Dispose(); - store.TryInitiateHybridLogCheckpoint(out _, CheckpointType.FoldOver); + _ = store.TryInitiateHybridLogCheckpoint(out _, CheckpointType.FoldOver); store.CompleteCheckpointAsync().AsTask().GetAwaiter().GetResult(); // We should be in REST, 3 @@ -299,14 +307,14 @@ public void StateMachineTest6() [Category("TsavoriteKV"), Category("CheckpointRestore")] public void LUCScenario1() { - CreateSessions(out var f, out var s1, out var ts, out var lts); + CreateSessions(out _, out var s1, out var ts, out var lts); // System should be in REST, 1 Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.REST, 1), store.SystemState)); lts.getLUC(); Assert.IsTrue(lts.isProtected); - store.TryInitiateHybridLogCheckpoint(out _, CheckpointType.FoldOver); + _ = store.TryInitiateHybridLogCheckpoint(out _, CheckpointType.FoldOver); // System should be in PREPARE, 1 Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.PREPARE, 1), store.SystemState)); @@ -334,7 +342,7 @@ public void LUCScenario1() [Category("TsavoriteKV"), Category("CheckpointRestore")] public void LUCScenario2() { - CreateSessions(out var f, out var s1, out var ts, out var lts); + CreateSessions(out _, out var s1, out var ts, out var lts); // System should be in REST, 1 Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.REST, 1), store.SystemState)); @@ -342,7 +350,7 @@ public void LUCScenario2() var uc1 = s1.UnsafeContext; uc1.BeginUnsafe(); - store.TryInitiateHybridLogCheckpoint(out _, CheckpointType.FoldOver); + _ = store.TryInitiateHybridLogCheckpoint(out _, CheckpointType.FoldOver); // should not succeed since checkpoint is in progress lts.getLUC(); @@ -379,7 +387,7 @@ public void LUCScenario2() [Category("TsavoriteKV"), Category("CheckpointRestore")] public void LUCScenario3() { - CreateSessions(out var f, out var s1, out var ts, out var lts); + CreateSessions(out _, out var s1, out var ts, out var lts); // System should be in REST, 1 Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.REST, 1), store.SystemState)); @@ -389,7 +397,7 @@ public void LUCScenario3() luc1.BeginUnsafe(); luc1.BeginLockable(); - store.TryInitiateHybridLogCheckpoint(out _, CheckpointType.FoldOver); + _ = store.TryInitiateHybridLogCheckpoint(out _, CheckpointType.FoldOver); // System should be in PREPARE, 1 Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.PREPARE, 1), store.SystemState)); @@ -438,7 +446,7 @@ public void StateMachineCallbackTest1() { var callback = new TestCallback(); store.UnsafeRegisterCallback(callback); - Prepare(out var f, out var s1, out var uc1, out var s2); + Prepare(out _, out var s1, out var uc1, out var s2); // We should be in PREPARE, 1 Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.PREPARE, 1), store.SystemState)); @@ -483,7 +491,7 @@ public void StateMachineCallbackTest1() public void VersionChangeTest() { var toVersion = 1 + (1 << 14); - Prepare(out var f, out var s1, out var uc1, out var s2, toVersion); + Prepare(out _, out var s1, out var uc1, out var s2, toVersion); // We should be in PREPARE, 1 Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.PREPARE, 1), store.SystemState)); @@ -518,9 +526,9 @@ public void VersionChangeTest() } void Prepare(out SimpleFunctions f, - out ClientSession s1, - out UnsafeContext uc1, - out ThreadSession s2, + out ClientSession s1, + out UnsafeContext uc1, + out ThreadSession s2, long toVersion = -1) { f = new SimpleFunctions(); @@ -529,7 +537,7 @@ void Prepare(out SimpleFunctions f, Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.REST, 1), store.SystemState)); // Take index checkpoint for recovery purposes - store.TryInitiateIndexCheckpoint(out _); + _ = store.TryInitiateIndexCheckpoint(out _); store.CompleteCheckpointAsync().AsTask().GetAwaiter().GetResult(); // Index checkpoint does not update version, so @@ -541,10 +549,10 @@ void Prepare(out SimpleFunctions f, s1 = store.NewSession(f, "foo"); var bc1 = s1.BasicContext; - for (int key = 0; key < numOps; key++) + for (int key = 0; key < NumOps; key++) { value.numClicks = key; - bc1.Upsert(ref inputArray[key], ref value, Empty.Default); + _ = bc1.Upsert(ref inputArray[key], ref value, Empty.Default); } // Ensure state machine needs no I/O wait during WAIT_FLUSH @@ -555,12 +563,12 @@ void Prepare(out SimpleFunctions f, uc1.BeginUnsafe(); // Start session s2 on another thread for testing - s2 = store.CreateThreadSession(f); + s2 = store.CreateThreadSession(f); // We should be in REST, 1 Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.REST, 1), store.SystemState)); - store.TryInitiateHybridLogCheckpoint(out _, CheckpointType.FoldOver, targetVersion: toVersion); + _ = store.TryInitiateHybridLogCheckpoint(out _, CheckpointType.FoldOver, targetVersion: toVersion); // We should be in PREPARE, 1 Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.PREPARE, 1), store.SystemState)); @@ -568,9 +576,9 @@ void Prepare(out SimpleFunctions f, void CreateSessions(out SimpleFunctions f, - out ClientSession s1, - out ThreadSession ts, - out LUCThreadSession lts) + out ClientSession s1, + out ThreadSession ts, + out LUCThreadSession lts) { f = new SimpleFunctions(); NumClicks value; @@ -578,24 +586,24 @@ void CreateSessions(out SimpleFunctions f, s1 = store.NewSession(f, "foo"); var bc1 = s1.BasicContext; - for (int key = 0; key < numOps; key++) + for (int key = 0; key < NumOps; key++) { value.numClicks = key; - bc1.Upsert(ref inputArray[key], ref value, Empty.Default); + _ = bc1.Upsert(ref inputArray[key], ref value, Empty.Default); } // Ensure state machine needs no I/O wait during WAIT_FLUSH store.Log.ShiftReadOnlyAddress(store.Log.TailAddress, true); // Start session s2 on another thread for testing - ts = store.CreateThreadSession(f); - lts = store.CreateLUCThreadSession(f); + ts = store.CreateThreadSession(f); + lts = store.CreateLUCThreadSession(f); // We should be in REST, 1 Assert.IsTrue(SystemState.Equal(SystemState.Make(Phase.REST, 1), store.SystemState)); // Take index checkpoint for recovery purposes - store.TryInitiateIndexCheckpoint(out _); + _ = store.TryInitiateIndexCheckpoint(out _); store.CompleteCheckpointAsync().AsTask().GetAwaiter().GetResult(); } } @@ -609,15 +617,14 @@ public override void ReadCompletionCallback(ref AdId key, ref NumClicks input, r } } - public class TestCallback : IStateMachineCallback + public class TestCallback : IStateMachineCallback { - private readonly HashSet invokedStates = new(); - + private readonly HashSet invokedStates = []; - public void BeforeEnteringState(SystemState next, TsavoriteKV tsavorite) + public void BeforeEnteringState(SystemState next, TsavoriteKV tsavorite) { Assert.IsFalse(invokedStates.Contains(next)); - invokedStates.Add(next); + _ = invokedStates.Add(next); } public void CheckInvoked(SystemState state) diff --git a/libs/storage/Tsavorite/cs/test/StructWithStringTests.cs b/libs/storage/Tsavorite/cs/test/StructWithStringTests.cs new file mode 100644 index 0000000000..1e51f5fb0a --- /dev/null +++ b/libs/storage/Tsavorite/cs/test/StructWithStringTests.cs @@ -0,0 +1,163 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +using System.IO; +using NUnit.Framework; +using Tsavorite.core; +using static Tsavorite.test.TestUtils; + +namespace Tsavorite.test.StructWithString +{ + // Must be in a separate block so the "using StructStoreFunctions" is the first line in its namespace declaration. + public struct StructWithString(int intValue, string prefix) + { + public int intField = intValue; + public string stringField = prefix + intValue.ToString(); + + public override readonly string ToString() => stringField; + + public class Comparer : IKeyComparer + { + public long GetHashCode64(ref StructWithString k) => Utility.GetHashCode(k.intField); + + public bool Equals(ref StructWithString k1, ref StructWithString k2) => k1.intField == k2.intField && k1.stringField == k2.stringField; + } + + public class Serializer : BinaryObjectSerializer + { + public override void Deserialize(out StructWithString obj) + { + var intField = reader.ReadInt32(); + var stringField = reader.ReadString(); + obj = new() { intField = intField, stringField = stringField }; + } + + public override void Serialize(ref StructWithString obj) + { + writer.Write(obj.intField); + writer.Write(obj.stringField); + } + } + } +} + +namespace Tsavorite.test.StructWithString +{ + using ClassAllocator = GenericAllocator>>; + using ClassStoreFunctions = StoreFunctions>; + + [TestFixture] + public class StructWithStringTests + { + internal class StructWithStringTestFunctions : SimpleSimpleFunctions + { + } + + const int NumRecords = 1_000; + const string KeyPrefix = "key_"; + string valuePrefix = "value_"; + + StructWithStringTestFunctions functions; + + private TsavoriteKV store; + private ClientSession session; + private BasicContext bContext; + private IDevice log, objlog; + + [SetUp] + public void Setup() + { + // create a string of size 1024 bytes + valuePrefix = new string('a', 1024); + + DeleteDirectory(MethodTestDir, wait: true); + log = Devices.CreateLogDevice(Path.Combine(MethodTestDir, "test.log"), deleteOnClose: false); + objlog = Devices.CreateLogDevice(Path.Combine(MethodTestDir, "test.obj.log"), deleteOnClose: false); + + store = new(new() + { + IndexSize = 1L << 26, + LogDevice = log, + ObjectLogDevice = objlog, + PageSize = 1L << 10, + MemorySize = 1L << 22, + SegmentSize = 1L << 16, + CheckpointDir = MethodTestDir + }, StoreFunctions.Create(new StructWithString.Comparer(), () => new StructWithString.Serializer(), () => new StructWithString.Serializer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); + + functions = new(); + session = store.NewSession(functions); + bContext = session.BasicContext; + } + + [TearDown] + public void TearDown() + { + session?.Dispose(); + session = null; + store?.Dispose(); + store = null; + objlog?.Dispose(); + objlog = null; + log?.Dispose(); + log = null; + DeleteDirectory(MethodTestDir); + } + + void Populate() + { + for (int ii = 0; ii < NumRecords; ii++) + { + StructWithString key = new(ii, KeyPrefix); + StructWithString value = new(ii, valuePrefix); + bContext.Upsert(ref key, ref value); + if (ii % 3_000 == 0) + { + store.TakeHybridLogCheckpointAsync(CheckpointType.FoldOver).GetAwaiter().GetResult(); + store.Recover(); + } + } + } + + [Test] + [Category(TsavoriteKVTestCategory)] + [Category(SmokeTestCategory)] + public void StructWithStringCompactTest([Values] CompactionType compactionType, [Values] bool flush) + { + void readKey(int keyInt) + { + StructWithString key = new(keyInt, KeyPrefix); + var (status, output) = bContext.Read(key); + bool wasPending = status.IsPending; + if (status.IsPending) + { + bContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); + using (completedOutputs) + (status, output) = GetSinglePendingResult(completedOutputs); + } + + Assert.IsTrue(status.Found, $"{status.ToString()}; wasPending = {wasPending}"); + Assert.AreEqual(key.intField, output.intField); + } + + Populate(); + readKey(12); + if (flush) + { + store.Log.FlushAndEvict(wait: true); + readKey(24); + } + int count = 0; + using var iter = store.Log.Scan(0, store.Log.TailAddress); + while (iter.GetNext(out var _)) + count++; + Assert.AreEqual(count, NumRecords); + + readKey(48); + store.Log.Compact(functions, store.Log.SafeReadOnlyAddress, compactionType); + readKey(48); + } + } +} \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/test/TestTypes.cs b/libs/storage/Tsavorite/cs/test/TestTypes.cs index 0e48a1b724..7ada47061c 100644 --- a/libs/storage/Tsavorite/cs/test/TestTypes.cs +++ b/libs/storage/Tsavorite/cs/test/TestTypes.cs @@ -8,21 +8,20 @@ namespace Tsavorite.test { - public struct KeyStruct : ITsavoriteEqualityComparer + public struct KeyStruct { public long kfield1; public long kfield2; - public long GetHashCode64(ref KeyStruct key) - { - return Utility.GetHashCode(key.kfield1); - } - public bool Equals(ref KeyStruct k1, ref KeyStruct k2) + public override string ToString() => $"kfield1 {kfield1}, kfield2 {kfield2}"; + + public struct Comparer : IKeyComparer { - return k1.kfield1 == k2.kfield1 && k1.kfield2 == k2.kfield2; - } + public long GetHashCode64(ref KeyStruct key) => Utility.GetHashCode(key.kfield1); + public bool Equals(ref KeyStruct k1, ref KeyStruct k2) => k1.kfield1 == k2.kfield1 && k1.kfield2 == k2.kfield2; - public override string ToString() => $"kfield1 {kfield1}, kfield2 {kfield2}"; + public static Comparer Instance = new(); + } } public struct ValueStruct diff --git a/libs/storage/Tsavorite/cs/test/TestUtils.cs b/libs/storage/Tsavorite/cs/test/TestUtils.cs index 4b2df20954..9d0ba353aa 100644 --- a/libs/storage/Tsavorite/cs/test/TestUtils.cs +++ b/libs/storage/Tsavorite/cs/test/TestUtils.cs @@ -257,9 +257,11 @@ internal static async ValueTask DoTwoThreadRandomKeyTest(int count, bool doRando } } - internal static unsafe bool FindHashBucketEntryForKey(this TsavoriteKV store, ref Key key, out HashBucketEntry entry) + internal static unsafe bool FindHashBucketEntryForKey(this TsavoriteKV store, ref TKey key, out HashBucketEntry entry) + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { - HashEntryInfo hei = new(store.Comparer.GetHashCode64(ref key)); + HashEntryInfo hei = new(store.storeFunctions.GetKeyHashCode64(ref key)); var success = store.FindTag(ref hei); entry = hei.entry; return success; @@ -268,12 +270,18 @@ internal static unsafe bool FindHashBucketEntryForKey(this Tsavorite static class StaticTestUtils { - internal static (Status status, TOutput output) GetSinglePendingResult(this ITsavoriteContext sessionContext) + internal static (Status status, TOutput output) GetSinglePendingResult( + this ITsavoriteContext sessionContext) where Functions : ISessionFunctions + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator => sessionContext.GetSinglePendingResult(out _); - internal static (Status status, TOutput output) GetSinglePendingResult(this ITsavoriteContext sessionContext, out RecordMetadata recordMetadata) + internal static (Status status, TOutput output) GetSinglePendingResult( + this ITsavoriteContext sessionContext, out RecordMetadata recordMetadata) where Functions : ISessionFunctions + where TStoreFunctions : IStoreFunctions + where TAllocator : IAllocator { sessionContext.CompletePendingWithOutputs(out var completedOutputs, wait: true); return TestUtils.GetSinglePendingResult(completedOutputs, out recordMetadata); diff --git a/libs/storage/Tsavorite/cs/test/ThreadSession.cs b/libs/storage/Tsavorite/cs/test/ThreadSession.cs index 387de94b03..7d50f6c7ab 100644 --- a/libs/storage/Tsavorite/cs/test/ThreadSession.cs +++ b/libs/storage/Tsavorite/cs/test/ThreadSession.cs @@ -9,32 +9,44 @@ namespace Tsavorite.test.statemachine { internal static class Extension { - public static ThreadSession CreateThreadSession(this TsavoriteKV store, F f) - where K : new() where V : new() where F : ISessionFunctions - => new ThreadSession(store, f); - - public static LUCThreadSession CreateLUCThreadSession(this TsavoriteKV store, F f) - where K : new() where V : new() where F : ISessionFunctions - => new LUCThreadSession(store, f); + public static ThreadSession CreateThreadSession(this TsavoriteKV store, F f) + where K : new() + where V : new() + where F : ISessionFunctions + where SF : IStoreFunctions + where A : IAllocator + => new(store, f); + + public static LUCThreadSession CreateLUCThreadSession(this TsavoriteKV store, F f) + where K : new() + where V : new() + where F : ISessionFunctions + where SF : IStoreFunctions + where A : IAllocator + => new(store, f); } - internal class ThreadSession - where K : new() where V : new() where F : ISessionFunctions + internal class ThreadSession + where K : new() + where V : new() + where F : ISessionFunctions + where SF : IStoreFunctions + where A : IAllocator { - readonly TsavoriteKV store; - ClientSession s2; - UnsafeContext uc2; + readonly TsavoriteKV store; + ClientSession s2; + UnsafeContext uc2; readonly F f; readonly AutoResetEvent ev = new(false); readonly AsyncQueue q = new(); - public ThreadSession(TsavoriteKV store, F f) + public ThreadSession(TsavoriteKV store, F f) { this.store = store; this.f = f; var ss = new Thread(SecondSession); ss.Start(); - ev.WaitOne(); + _ = ev.WaitOne(); } public void Refresh(bool waitComplete = true) @@ -44,7 +56,7 @@ public void Refresh(bool waitComplete = true) public void CompleteOp() { - ev.WaitOne(); + _ = ev.WaitOne(); } public void Dispose() @@ -58,7 +70,7 @@ private void SecondSession() uc2 = s2.UnsafeContext; uc2.BeginUnsafe(); - ev.Set(); + _ = ev.Set(); while (true) { @@ -67,12 +79,12 @@ private void SecondSession() { case "refresh": uc2.Refresh(); - ev.Set(); + _ = ev.Set(); break; case "dispose": uc2.EndUnsafe(); s2.Dispose(); - ev.Set(); + _ = ev.Set(); return; default: throw new Exception("Unsupported command"); @@ -83,28 +95,33 @@ private void SecondSession() private void OtherSession(string command, bool waitComplete = true) { q.Enqueue(command); - if (waitComplete) ev.WaitOne(); + if (waitComplete) + _ = ev.WaitOne(); } } - internal class LUCThreadSession - where K : new() where V : new() where F : ISessionFunctions + internal class LUCThreadSession + where K : new() + where V : new() + where F : ISessionFunctions + where SF : IStoreFunctions + where A : IAllocator { - readonly TsavoriteKV store; - ClientSession session; - LockableUnsafeContext luc; + readonly TsavoriteKV store; + ClientSession session; + LockableUnsafeContext luc; readonly F f; readonly AutoResetEvent ev = new(false); readonly AsyncQueue q = new(); public bool isProtected = false; - public LUCThreadSession(TsavoriteKV store, F f) + public LUCThreadSession(TsavoriteKV store, F f) { this.store = store; this.f = f; var ss = new Thread(LUCThread); ss.Start(); - ev.WaitOne(); + _ = ev.WaitOne(); } public void Refresh() { @@ -128,7 +145,7 @@ public void getLUC() private void LUCThread() { session = store.NewSession(f, null); - ev.Set(); + _ = ev.Set(); while (true) { @@ -140,7 +157,7 @@ private void LUCThread() luc.Refresh(); else session.BasicContext.Refresh(); - ev.Set(); + _ = ev.Set(); break; case "dispose": if (isProtected) @@ -148,7 +165,7 @@ private void LUCThread() luc.EndUnsafe(); } session.Dispose(); - ev.Set(); + _ = ev.Set(); return; case "getLUC": luc = session.LockableUnsafeContext; @@ -162,13 +179,13 @@ private void LUCThread() luc.BeginLockable(); isProtected = true; } - ev.Set(); + _ = ev.Set(); break; case "DisposeLUC": luc.EndLockable(); luc.EndUnsafe(); isProtected = false; - ev.Set(); + _ = ev.Set(); break; default: throw new Exception("Unsupported command"); @@ -178,7 +195,7 @@ private void LUCThread() private void queue(string command) { q.Enqueue(command); - ev.WaitOne(); + _ = ev.WaitOne(); } } } \ No newline at end of file diff --git a/libs/storage/Tsavorite/cs/test/Tsavorite.test.csproj b/libs/storage/Tsavorite/cs/test/Tsavorite.test.csproj index 3e0ab33ae3..0c61c95648 100644 --- a/libs/storage/Tsavorite/cs/test/Tsavorite.test.csproj +++ b/libs/storage/Tsavorite/cs/test/Tsavorite.test.csproj @@ -7,13 +7,13 @@ - 1701;1702;1591 + 1701;1702;1591;IDE0130;IDE0065;IDE0007;IDE0048 - + all runtime; build; native; contentfiles; analyzers; buildtransitive diff --git a/libs/storage/Tsavorite/cs/test/UnsafeContextTests.cs b/libs/storage/Tsavorite/cs/test/UnsafeContextTests.cs index 6938ce1722..157a8e8138 100644 --- a/libs/storage/Tsavorite/cs/test/UnsafeContextTests.cs +++ b/libs/storage/Tsavorite/cs/test/UnsafeContextTests.cs @@ -12,14 +12,17 @@ namespace Tsavorite.test.UnsafeContext { + using StructAllocator = BlittableAllocator>>; + using StructStoreFunctions = StoreFunctions>; + //** These tests ensure the basics are fully covered - taken from BasicTests [TestFixture] internal class BasicUnsafeContextTests { - private TsavoriteKV store; - private ClientSession fullSession; - private UnsafeContext uContext; + private TsavoriteKV store; + private ClientSession fullSession; + private UnsafeContext uContext; private IDevice log; DeviceType deviceType; @@ -30,12 +33,17 @@ public void Setup() DeleteDirectory(MethodTestDir, wait: true); } - private void Setup(long size, LogSettings logSettings, DeviceType deviceType) + private void Setup(KVSettings kvSettings, DeviceType deviceType) { string filename = Path.Join(MethodTestDir, TestContext.CurrentContext.Test.Name + deviceType.ToString() + ".log"); log = CreateTestDevice(deviceType, filename); - logSettings.LogDevice = log; - store = new TsavoriteKV(size, logSettings); + kvSettings.LogDevice = log; + kvSettings.IndexSize = 1L << 13; + + store = new(kvSettings + , StoreFunctions.Create(KeyStruct.Comparer.Instance) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions) + ); fullSession = store.NewSession(new Functions()); uContext = fullSession.UnsafeContext; } @@ -62,7 +70,7 @@ private void AssertCompleted(Status expected, Status actual) private (Status status, OutputStruct output) CompletePendingResult() { - uContext.CompletePendingWithOutputs(out var completedOutputs); + _ = uContext.CompletePendingWithOutputs(out var completedOutputs); return GetSinglePendingResult(completedOutputs); } @@ -71,7 +79,7 @@ private void AssertCompleted(Status expected, Status actual) [Category("Smoke")] public void NativeInMemWriteRead([Values] DeviceType deviceType) { - Setup(128, new LogSettings { PageSizeBits = 10, MemorySizeBits = 12, SegmentSizeBits = 22 }, deviceType); + Setup(new() { PageSize = 1L << 10, MemorySize = 1L << 12, SegmentSize = 1L << 22 }, deviceType); uContext.BeginUnsafe(); try @@ -82,7 +90,7 @@ public void NativeInMemWriteRead([Values] DeviceType deviceType) var key1 = new KeyStruct { kfield1 = 13, kfield2 = 14 }; var value = new ValueStruct { vfield1 = 23, vfield2 = 24 }; - uContext.Upsert(ref key1, ref value, Empty.Default); + _ = uContext.Upsert(ref key1, ref value, Empty.Default); var status = uContext.Read(ref key1, ref input, ref output, Empty.Default); AssertCompleted(new(StatusCode.Found), status); @@ -100,7 +108,7 @@ public void NativeInMemWriteRead([Values] DeviceType deviceType) [Category("Smoke")] public void NativeInMemWriteReadDelete([Values] DeviceType deviceType) { - Setup(128, new LogSettings { PageSizeBits = 10, MemorySizeBits = 12, SegmentSizeBits = 22 }, deviceType); + Setup(new() { PageSize = 1L << 10, MemorySize = 1L << 12, SegmentSize = 1L << 22 }, deviceType); uContext.BeginUnsafe(); try @@ -111,11 +119,11 @@ public void NativeInMemWriteReadDelete([Values] DeviceType deviceType) var key1 = new KeyStruct { kfield1 = 13, kfield2 = 14 }; var value = new ValueStruct { vfield1 = 23, vfield2 = 24 }; - uContext.Upsert(ref key1, ref value, Empty.Default); + _ = uContext.Upsert(ref key1, ref value, Empty.Default); var status = uContext.Read(ref key1, ref input, ref output, Empty.Default); AssertCompleted(new(StatusCode.Found), status); - uContext.Delete(ref key1, Empty.Default); + _ = uContext.Delete(ref key1, Empty.Default); status = uContext.Read(ref key1, ref input, ref output, Empty.Default); AssertCompleted(new(StatusCode.NotFound), status); @@ -123,7 +131,7 @@ public void NativeInMemWriteReadDelete([Values] DeviceType deviceType) var key2 = new KeyStruct { kfield1 = 14, kfield2 = 15 }; var value2 = new ValueStruct { vfield1 = 24, vfield2 = 25 }; - uContext.Upsert(ref key2, ref value2, Empty.Default); + _ = uContext.Upsert(ref key2, ref value2, Empty.Default); status = uContext.Read(ref key2, ref input, ref output, Empty.Default); AssertCompleted(new(StatusCode.Found), status); @@ -147,8 +155,8 @@ public void NativeInMemWriteReadDelete2() const int count = 10; - // Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType); - Setup(128, new LogSettings { MemorySizeBits = 29 }, deviceType); + // Setup(new () { MemorySize = 1L << 22, SegmentSize = 1L << 22, PageSize = 1L << 10 }, deviceType); + Setup(new() { MemorySize = 1L << 29 }, deviceType); uContext.BeginUnsafe(); try @@ -161,13 +169,13 @@ public void NativeInMemWriteReadDelete2() var key1 = new KeyStruct { kfield1 = i, kfield2 = 14 }; var value = new ValueStruct { vfield1 = i, vfield2 = 24 }; - uContext.Upsert(ref key1, ref value, Empty.Default); + _ = uContext.Upsert(ref key1, ref value, Empty.Default); } for (int i = 0; i < 10 * count; i++) { var key1 = new KeyStruct { kfield1 = i, kfield2 = 14 }; - uContext.Delete(ref key1, Empty.Default); + _ = uContext.Delete(ref key1, Empty.Default); } for (int i = 0; i < 10 * count; i++) @@ -178,7 +186,7 @@ public void NativeInMemWriteReadDelete2() var status = uContext.Read(ref key1, ref input, ref output, Empty.Default); AssertCompleted(new(StatusCode.NotFound), status); - uContext.Upsert(ref key1, ref value, Empty.Default); + _ = uContext.Upsert(ref key1, ref value, Empty.Default); } for (int i = 0; i < 10 * count; i++) @@ -204,8 +212,8 @@ public unsafe void NativeInMemWriteRead2() int count = 200; - // Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType); - Setup(128, new LogSettings { MemorySizeBits = 29 }, deviceType); + // Setup(128, new () { MemorySize = 1L << 22, SegmentSize = 1L << 22, PageSize = 1L << 10 }, deviceType); + Setup(new() { MemorySize = 1L << 29 }, deviceType); uContext.BeginUnsafe(); try @@ -218,7 +226,7 @@ public unsafe void NativeInMemWriteRead2() var i = r.Next(10000); var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - uContext.Upsert(ref key1, ref value, Empty.Default); + _ = uContext.Upsert(ref key1, ref value, Empty.Default); } r = new Random(10); @@ -231,9 +239,7 @@ public unsafe void NativeInMemWriteRead2() var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; if (uContext.Read(ref key1, ref input, ref output, Empty.Default).IsPending) - { - uContext.CompletePending(true); - } + _ = uContext.CompletePending(true); Assert.AreEqual(value.vfield1, output.value.vfield1); Assert.AreEqual(value.vfield2, output.value.vfield2); @@ -270,7 +276,7 @@ public async Task TestShiftHeadAddressUC([Values] DeviceType deviceType, [Values Random r = new(RandSeed); var sw = Stopwatch.StartNew(); - Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType); + Setup(new() { MemorySize = 1L << 22, SegmentSize = 1L << 22, PageSize = 1L << 10 }, deviceType); uContext.BeginUnsafe(); try @@ -280,7 +286,7 @@ public async Task TestShiftHeadAddressUC([Values] DeviceType deviceType, [Values var i = r.Next(RandRange); var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; var value = new ValueStruct { vfield1 = i, vfield2 = i + 1 }; - uContext.Upsert(ref key1, ref value, Empty.Default); + _ = uContext.Upsert(ref key1, ref value, Empty.Default); } r = new Random(RandSeed); @@ -302,7 +308,7 @@ public async Task TestShiftHeadAddressUC([Values] DeviceType deviceType, [Values } if (syncMode == CompletionSyncMode.Sync) { - uContext.CompletePending(true); + _ = uContext.CompletePending(true); } else { @@ -329,7 +335,7 @@ public async Task TestShiftHeadAddressUC([Values] DeviceType deviceType, [Values CompletedOutputIterator outputs; if (syncMode == CompletionSyncMode.Sync) { - uContext.CompletePendingWithOutputs(out outputs, wait: true); + _ = uContext.CompletePendingWithOutputs(out outputs, wait: true); } else { @@ -362,7 +368,7 @@ public unsafe void NativeInMemRMWRefKeys([Values] DeviceType deviceType) InputStruct input = default; OutputStruct output = default; - Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType); + Setup(new() { MemorySize = 1L << 22, SegmentSize = 1L << 22, PageSize = 1L << 10 }, deviceType); uContext.BeginUnsafe(); try @@ -382,7 +388,7 @@ public unsafe void NativeInMemRMWRefKeys([Values] DeviceType deviceType) var i = nums[j]; var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; input = new InputStruct { ifield1 = i, ifield2 = i + 1 }; - uContext.RMW(ref key1, ref input, Empty.Default); + _ = uContext.RMW(ref key1, ref input, Empty.Default); } for (int j = 0; j < nums.Length; ++j) { @@ -391,7 +397,7 @@ public unsafe void NativeInMemRMWRefKeys([Values] DeviceType deviceType) input = new InputStruct { ifield1 = i, ifield2 = i + 1 }; if (uContext.RMW(ref key1, ref input, ref output, Empty.Default).IsPending) { - uContext.CompletePending(true); + _ = uContext.CompletePending(true); } else { @@ -434,7 +440,7 @@ public unsafe void NativeInMemRMWNoRefKeys([Values] DeviceType deviceType) { InputStruct input = default; - Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType); + Setup(new() { MemorySize = 1L << 22, SegmentSize = 1L << 22, PageSize = 1L << 10 }, deviceType); uContext.BeginUnsafe(); try @@ -454,14 +460,14 @@ public unsafe void NativeInMemRMWNoRefKeys([Values] DeviceType deviceType) var i = nums[j]; var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; input = new InputStruct { ifield1 = i, ifield2 = i + 1 }; - uContext.RMW(ref key1, ref input, Empty.Default); + _ = uContext.RMW(ref key1, ref input, Empty.Default); } for (int j = 0; j < nums.Length; ++j) { var i = nums[j]; var key1 = new KeyStruct { kfield1 = i, kfield2 = i + 1 }; input = new InputStruct { ifield1 = i, ifield2 = i + 1 }; - uContext.RMW(key1, input); // no ref and do not set any other params + _ = uContext.RMW(key1, input); // no ref and do not set any other params } OutputStruct output = default; @@ -500,7 +506,7 @@ public void ReadNoRefKeyInputOutput([Values] DeviceType deviceType) { InputStruct input = default; - Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType); + Setup(new() { MemorySize = 1L << 22, SegmentSize = 1L << 22, PageSize = 1L << 10 }, deviceType); uContext.BeginUnsafe(); try @@ -508,7 +514,7 @@ public void ReadNoRefKeyInputOutput([Values] DeviceType deviceType) var key1 = new KeyStruct { kfield1 = 13, kfield2 = 14 }; var value = new ValueStruct { vfield1 = 23, vfield2 = 24 }; - uContext.Upsert(ref key1, ref value, Empty.Default); + _ = uContext.Upsert(ref key1, ref value, Empty.Default); var status = uContext.Read(key1, input, out OutputStruct output, Empty.Default); AssertCompleted(new(StatusCode.Found), status); @@ -529,7 +535,7 @@ public void ReadNoRefKeyInputOutput([Values] DeviceType deviceType) [Category("TsavoriteKV")] public void ReadNoRefKey([Values] DeviceType deviceType) { - Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType); + Setup(new() { MemorySize = 1L << 22, SegmentSize = 1L << 22, PageSize = 1L << 10 }, deviceType); uContext.BeginUnsafe(); try @@ -537,7 +543,7 @@ public void ReadNoRefKey([Values] DeviceType deviceType) var key1 = new KeyStruct { kfield1 = 13, kfield2 = 14 }; var value = new ValueStruct { vfield1 = 23, vfield2 = 24 }; - uContext.Upsert(ref key1, ref value, Empty.Default); + _ = uContext.Upsert(ref key1, ref value, Empty.Default); var status = uContext.Read(key1, out OutputStruct output, Empty.Default); AssertCompleted(new(StatusCode.Found), status); @@ -560,7 +566,7 @@ public void ReadNoRefKey([Values] DeviceType deviceType) [Category("Smoke")] public void ReadWithoutInput([Values] DeviceType deviceType) { - Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType); + Setup(new() { MemorySize = 1L << 22, SegmentSize = 1L << 22, PageSize = 1L << 10 }, deviceType); uContext.BeginUnsafe(); try @@ -570,7 +576,7 @@ public void ReadWithoutInput([Values] DeviceType deviceType) var key1 = new KeyStruct { kfield1 = 13, kfield2 = 14 }; var value = new ValueStruct { vfield1 = 23, vfield2 = 24 }; - uContext.Upsert(ref key1, ref value, Empty.Default); + _ = uContext.Upsert(ref key1, ref value, Empty.Default); var status = uContext.Read(ref key1, ref output, Empty.Default); AssertCompleted(new(StatusCode.Found), status); @@ -592,7 +598,7 @@ public void ReadWithoutInput([Values] DeviceType deviceType) [Category("Smoke")] public void ReadBareMinParams([Values] DeviceType deviceType) { - Setup(128, new LogSettings { MemorySizeBits = 22, SegmentSizeBits = 22, PageSizeBits = 10 }, deviceType); + Setup(new() { MemorySize = 1L << 22, SegmentSize = 1L << 22, PageSize = 1L << 10 }, deviceType); uContext.BeginUnsafe(); try @@ -600,7 +606,7 @@ public void ReadBareMinParams([Values] DeviceType deviceType) var key1 = new KeyStruct { kfield1 = 13, kfield2 = 14 }; var value = new ValueStruct { vfield1 = 23, vfield2 = 24 }; - uContext.Upsert(ref key1, ref value, Empty.Default); + _ = uContext.Upsert(ref key1, ref value, Empty.Default); var (status, output) = uContext.Read(key1); AssertCompleted(new(StatusCode.Found), status); diff --git a/test/Garnet.test/CacheSizeTrackerTests.cs b/test/Garnet.test/CacheSizeTrackerTests.cs index 92a3756136..3cd7c0de13 100644 --- a/test/Garnet.test/CacheSizeTrackerTests.cs +++ b/test/Garnet.test/CacheSizeTrackerTests.cs @@ -10,11 +10,14 @@ namespace Garnet.test { + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + [TestFixture] public class CacheSizeTrackerTests { GarnetServer server; - TsavoriteKV objStore; + TsavoriteKV objStore; CacheSizeTracker cacheSizeTracker; [SetUp] @@ -30,7 +33,7 @@ public void Setup() [TearDown] public void TearDown() { - server.Dispose(); + server?.Dispose(); TestUtils.DeleteDirectory(TestUtils.MethodTestDir); } @@ -77,7 +80,7 @@ public void IncreaseEmptyPageCountTest() // Wait for the resizing to happen bool eventSignaled = epcEvent.Wait( - TimeSpan.FromSeconds(3 * LogSizeTracker.resizeTaskDelaySeconds)); // Wait for 3x resize task delay + TimeSpan.FromSeconds(3 * LogSizeTracker.resizeTaskDelaySeconds)); // Wait for 3x resize task delay if (!eventSignaled) { diff --git a/test/Garnet.test/GarnetObjectTests.cs b/test/Garnet.test/GarnetObjectTests.cs index 663f0ec249..77863951e4 100644 --- a/test/Garnet.test/GarnetObjectTests.cs +++ b/test/Garnet.test/GarnetObjectTests.cs @@ -8,10 +8,13 @@ namespace Garnet.test { + using ObjectStoreAllocator = GenericAllocator>>; + using ObjectStoreFunctions = StoreFunctions>; + [TestFixture] public class GarnetObjectTests { - TsavoriteKV store; + TsavoriteKV store; IDevice logDevice, objectLogDevice; [SetUp] @@ -145,27 +148,20 @@ public override bool CopyUpdater(ref byte[] key, ref IGarnetObject input, ref IG private void CreateStore() { - if (logDevice == null) - logDevice = Devices.CreateLogDevice(TestUtils.MethodTestDir + "/hlog.log"); - if (objectLogDevice == null) - objectLogDevice = Devices.CreateLogDevice(TestUtils.MethodTestDir + "/hlog.obj.log"); - var log = new LogSettings + logDevice ??= Devices.CreateLogDevice(TestUtils.MethodTestDir + "/hlog.log"); + objectLogDevice ??= Devices.CreateLogDevice(TestUtils.MethodTestDir + "/hlog.obj.log"); + + var kvSettings = new KVSettings { + IndexSize = 1L << 13, LogDevice = logDevice, ObjectLogDevice = objectLogDevice, - }; - - var ckpt = new CheckpointSettings - { CheckpointDir = TestUtils.MethodTestDir }; - var serializer = new SerializerSettings - { - valueSerializer = () => new MyGarnetObjectSerializer() - }; - - store = new TsavoriteKV(128, log, ckpt, serializer); + store = new(kvSettings + , StoreFunctions.Create(new ByteArrayKeyComparer(), () => new Tsavorite.core.ByteArrayBinaryObjectSerializer(), () => new MyGarnetObjectSerializer()) + , (allocatorSettings, storeFunctions) => new(allocatorSettings, storeFunctions)); } } diff --git a/test/Garnet.test/RespSortedSetTests.cs b/test/Garnet.test/RespSortedSetTests.cs index f0cc2959ec..e440c5d777 100644 --- a/test/Garnet.test/RespSortedSetTests.cs +++ b/test/Garnet.test/RespSortedSetTests.cs @@ -16,7 +16,12 @@ namespace Garnet.test { - using TestBasicGarnetApi = GarnetApi, BasicContext>; + using TestBasicGarnetApi = GarnetApi, + SpanByteAllocator>>, + BasicContext>, + GenericAllocator>>>>; [TestFixture] public class RespSortedSetTests diff --git a/website/docs/dev/tsavorite/storefunctions.md b/website/docs/dev/tsavorite/storefunctions.md new file mode 100644 index 0000000000..82ebf4c7ac --- /dev/null +++ b/website/docs/dev/tsavorite/storefunctions.md @@ -0,0 +1,54 @@ +--- +id: storefunctions +sidebar_label: StoreFunctions +title: StoreFunctions and Allocator Wrapper +--- + +# StoreFunctions and Allocator Struct Wrapper + +This section discusses both of these because they were part of a change to add two additional type args, `TStoreFunctions` and `TAllocator`, to `TsavoriteKV` as well as the various sessions and `*Context` (e.g. `BasicContext`). The purpose of both of these is to provide better performance by inlining calls. StoreFunctions also provides better logical design for the location of the operations that are store-level rather than session-level, as described below. + +From the caller point of view, we have two new type parameters on `TsavoriteKV`. The `TStoreFunctions` and `TAllocator` are also on `*.Context` (e.g. `BasicContext`) as well. C# allows the 'using' alias only as the first lines of a namespace declaration, and the alias is file-local and recognized by subsequent 'using' aliases, so the "Api" aliases such as `BasicGarnetApi` in multiple files are much longer now. + +`TsavoriteKV` constructor has been changed to take 3 parameters: +- `KVSettings`. This replaces the previous long list of parameters. `LogSettings`, `ReadCacheSettings`, and `CheckpointSettings` have become internal classes, used only by `TsavoriteKV` (created from `TsavoriteKVSettings`) when instantiating the Allocators (e.g. the new `AllocatorSettings` has a `LogSettings` member). `SerializerSettings` has been removed in favor of methods on `IStoreFunctions`. +- An instance of `TStoreFunctions`. This is usually obtained by a call to a static `StoreFunctions` factory method to create it, passing the individual components to be contained. +- A factory `Func<>` for the `TAllocator` instantiation. + +These are described in more detail below. + +## StoreFunctions overview +`StoreFunctions` refers to the set of callback functions that reside at the `TsavoriteKV` level, analogous to `ISessionFunctions` at the session level. Similar to `ISessionFunctions`, there is an `IStoreFunctions`. However, the `ISessionFunctions` implementation can be either a struct or a class' Tsavorite provides the `SessionFunctionsBase` class, which may be inherited from, as a utility. Type parameters implemented by classes, however, do not generate inlined code. + +Because `IStoreFunctions` is intended to provide maximum inlining, Tsavorite does not provide a `StoreFunctionsBase`. Instead, Tsavorite provides a `StoreFunctions` struct implementation, with optional implementations passed in, for: +- Key Comparison (previously passed as an `ITsavoriteKeyComparer` interface, which is not inlined) +- Key and Value Serializers. Due to limitations on type arguments, these must be passed as `Func<>` which creates the implementation instance, and because serialization is an expensive operation, we stay with the `IObjectSerializer` and `IObjectSerializer` interfaces rather than clutter the `IStoreFunctions` interface with the Key and Value Serializer type args. +- Record disposal (previously on `ISessionFunctions` as multiple methods, and now only a single method with a "reason" parameter). +- Checkpoint completion callback (previously on `ISessionFunctions`). + +Of course, because `TsavoriteKV` has the `TStoreFunctions` type parameter, this can be any type implemented by the caller, including a class instance (which would be slower). + +## Allocator Wrapper overview + +As with `StoreFunctions`, the Allocator Wrapper is intended to provide maximal inlining. As noted above, type parameters implemented by classes do not generate inlined code; the JITted code is general, for a single `IntPtr`-sized reference. For structs, however, the JITter generates code specific to that specific struct type, in part because the size can vary (e.g. when pushed on the stack as a parameter). + +There is a hack that allows a type parameter implemented by a class to be inlined: the generic type must be for a struct that wraps the class type and makes calls on that class type in a non-generic way. This is the approach the Allocator Wrapper takes: +- The `BlittableAllocator`, `GenericAllocator`, and `SpanByteAllocator` classes are now the wrapper structs, with `Key`, `Value`, and `TStoreFunctions` type args. These implement the `IAllocator` interface. +- There are new `BlittableAllocatorImpl`, `GenericAllocatorImpl`, and `SpanByteAllocatorImpl` classes that implement most of the functionality as previously, including inheriting from `AllocatorBase`. These also have `Key`, `Value`, and `TStoreFunctions` type args; the `TAllocator` is not needed as a type arg because it is known to be the `XxxAllocator` Wrapper struct. The wrapper structs contain an instance of the `XxxAllocatorImpl` class. +- `AllocatorBase` itself now contains a `_wrapper` field that is a struct-wrapper instance (which contains the instance pointer of the fully-derived allocator class) that is constrained to the `IAllocator` interface. `AllocatorBase` itself is templated on `TStoreFunctions` and `TAllocator`. + +The new Allocator definition supports two interfaces: +- `IAllocatorCallbacks`, which is inherited by `IAllocator`. This contains the derived-Allocator methods called by `AllocatorBase` that we want to inline rather then virtcall. The struct wrapper `AllocatorBase._wrapper` implements `IAllocatorCallbacks`, so the call on `_wrapper` inlines the call to `IAllocatorCallbacks`, which then calls down to the derived `*AllocatorImpl` class implementation. +- `IAllocator : IAllocatorCallbacks`. This is all inlined calls on the Allocator, including `IAllocatorCallbacks`. + - It turns out not to be possible to keep `IAllocatorCalbacks` as a separate type arg because `IAllocator` must propagate, but `IAllocatorCallbacks` remains as a separate interface (instead of combining it all into `IAllocator`) as the organization may be useful. + +There are still a number of abstract `AllocatorBase` methods, for which inlining of the method call is not important due to the time for the overall call. These are mostly IO and Scan/Iteration methods. + +Within `TsavoriteKV`, we have: +- `hlog` remains, but is now of type `TAllocator` (the wrapper struct). +- `hlogBase` is new; it is the `AllocatorBase`. All the calls on the allocator that we don’t need to inline (or are not virtual, such as *Address) are now called on hlogBase. + - It might be cleaner to rename these to `allocator` and `allocatorBase`. + +There is a new `AllocatorSettings` class as well that is created by `TsavoriteKV` when instantiating the allocator. Allocator instantiation is done by a factory `Func` rather that being passed in as an object, because: +- The caller would have to know more internal stuff such as the epoch, whether to create readcache, and so on. +- We create temporary `TsavoriteKV`s, such as when Scanning or Compacting, so there is no way to pass these instances in. diff --git a/website/sidebars.js b/website/sidebars.js index 21c854e8e9..aa5ea43d27 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -24,7 +24,7 @@ const sidebars = { {type: 'category', label: 'Server Extensions', items: ["extensions/transactions", "extensions/raw-strings", "extensions/objects"]}, {type: 'category', label: 'Cluster Mode', items: ["cluster/overview", "cluster/replication", "cluster/key-migration"]}, {type: 'category', label: 'Developer Guide', items: ["dev/onboarding", "dev/code-structure", "dev/configuration", "dev/network", "dev/processing", "dev/garnet-api", - {type: 'category', label: 'Tsavorite - Storage Layer', collapsed: true, items: ["dev/tsavorite/intro", "dev/tsavorite/reviv", "dev/tsavorite/locking"]}, + {type: 'category', label: 'Tsavorite - Storage Layer', collapsed: true, items: ["dev/tsavorite/intro", "dev/tsavorite/reviv", "dev/tsavorite/locking", "dev/tsavorite/storefunctions"]}, "dev/transactions", "dev/custom-commands", "dev/collection-broker",