-
Notifications
You must be signed in to change notification settings - Fork 19
/
Copy pathoptions.go
196 lines (171 loc) · 7.51 KB
/
options.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
package sturdyc
import "time"
type Option func(*Config)
// WithMetrics is used to make the cache report metrics.
func WithMetrics(recorder MetricsRecorder) Option {
return func(c *Config) {
recorder.ObserveCacheSize(c.getSize)
c.metricsRecorder = &distributedMetricsRecorder{recorder}
}
}
// WithClock can be used to change the clock that the cache uses. This is useful for testing.
func WithClock(clock Clock) Option {
return func(c *Config) {
c.clock = clock
}
}
// WithEvictionInterval sets the interval at which the cache scans a shard to
// evict expired entries. Setting this to a higher value will increase cache
// performance and is advised if you don't think you'll exceed the capacity.
// If the capacity is reached, the cache will still trigger an eviction.
func WithEvictionInterval(interval time.Duration) Option {
return func(c *Config) {
c.evictionInterval = interval
}
}
// WithNoContinuousEvictions improves cache performance when the cache capacity
// is unlikely to be exceeded. While this setting disables the continuous
// eviction job, it still allows for the eviction of the least recently used
// items once the cache reaches its full capacity.
func WithNoContinuousEvictions() Option {
return func(c *Config) {
c.disableContinuousEvictions = true
}
}
// WithMissingRecordStorage allows the cache to mark keys as missing from the
// underlying data source. This allows you to stop streams of outgoing requests
// for requests that don't exist. The keys will still have the same TTL and
// refresh durations as any of the other record in the cache.
func WithMissingRecordStorage() Option {
return func(c *Config) {
c.storeMissingRecords = true
}
}
// WithEarlyRefreshes instructs the cache to refresh the keys that are in
// active rotation, thereby preventing them from ever expiring. This can have a
// significant impact on your application's latency as you're able to
// continuously serve frequently used keys from memory. An asynchronous
// background refresh gets scheduled when a key is requested again after a
// random time between minRefreshTime and maxRefreshTime has passed. This is an
// important distinction because it means that the cache won't just naively
// refresh every key it's ever seen. The third argument to this function will
// also allow you to provide a duration for when a refresh should become
// synchronous. If any of the refreshes were to fail, you'll get the latest
// data from the cache for the duration of the TTL.
func WithEarlyRefreshes(minAsyncRefreshTime, maxAsyncRefreshTime, syncRefreshTime, retryBaseDelay time.Duration) Option {
return func(c *Config) {
c.earlyRefreshes = true
c.minAsyncRefreshTime = minAsyncRefreshTime
c.maxAsyncRefreshTime = maxAsyncRefreshTime
c.syncRefreshTime = syncRefreshTime
c.retryBaseDelay = retryBaseDelay
}
}
// WithRefreshCoalescing will make the cache refresh data from batchable
// endpoints more efficiently. It is going to create a buffer for each cache
// key permutation, and gather IDs until the bufferSize is reached, or the
// bufferDuration has passed.
//
// NOTE: This requires the WithEarlyRefreshes functionality to be enabled.
func WithRefreshCoalescing(bufferSize int, bufferDuration time.Duration) Option {
return func(c *Config) {
c.bufferRefreshes = true
c.bufferSize = bufferSize
c.bufferTimeout = bufferDuration
c.permutationBufferMap = make(map[string]*buffer)
}
}
// WithRelativeTimeKeyFormat allows you to control the truncation of time.Time
// values that are being passed in to the cache key functions.
func WithRelativeTimeKeyFormat(truncation time.Duration) Option {
return func(c *Config) {
c.useRelativeTimeKeyFormat = true
c.keyTruncation = truncation
}
}
// WithLog allows you to set a custom logger for the cache. The cache isn't chatty,
// and will only log warnings and errors that would be a nightmare to debug. If you
// absolutely don't want any logs, you can pass in the sturdyc.NoopLogger.
func WithLog(log Logger) Option {
return func(c *Config) {
c.log = log
}
}
// WithDistributedStorage allows you to use the cache with a distributed
// key-value store. The "GetOrFetch" and "GetOrFetchBatch" functions will check
// this store first and only proceed to the underlying data source if the key
// is missing. When a record is retrieved from the underlying data source, it
// is written both to memory and to the distributed storage. You are
// responsible for setting TTL and eviction policies for the distributed
// storage. Sturdyc will only read and write records.
func WithDistributedStorage(storage DistributedStorage) Option {
return func(c *Config) {
c.distributedStorage = &distributedStorage{storage}
c.distributedEarlyRefreshes = false
}
}
// WithDistributedStorageEarlyRefreshes is the distributed equivalent of the
// "WithEarlyRefreshes" option. It allows distributed records to be refreshed
// before their TTL expires. If a refresh fails, the cache will fall back to
// what was returned by the distributed storage. This ensures that data can be
// served for the duration of the TTL even if an upstream system goes down. To
// use this functionality, you need to implement an interface with two
// additional methods for deleting records compared to the simpler
// "WithDistributedStorage" option. This is because a distributed cache that is
// used with this option might have low refresh durations but high TTLs. If a
// record is deleted from the underlying data source, it needs to be propagated
// to the distributed storage before the TTL expires. However, please note that
// you are still responsible for managing the TTL and eviction policies for the
// distributed storage. Sturdyc will only delete records that have been removed
// at the underlying data source.
func WithDistributedStorageEarlyRefreshes(storage DistributedStorageWithDeletions, refreshAfter time.Duration) Option {
return func(c *Config) {
c.distributedStorage = storage
c.distributedEarlyRefreshes = true
c.distributedRefreshAfterDuration = refreshAfter
}
}
// WithDistributedMetrics instructs the cache to report additional metrics
// regarding its interaction with the distributed storage.
func WithDistributedMetrics(metricsRecorder DistributedMetricsRecorder) Option {
return func(c *Config) {
metricsRecorder.ObserveCacheSize(c.getSize)
c.metricsRecorder = metricsRecorder
}
}
// validateConfig is a helper function that panics if the cache has been configured incorrectly.
func validateConfig(capacity, numShards int, ttl time.Duration, evictionPercentage int, cfg *Config) {
if capacity <= 0 {
panic("capacity must be greater than 0")
}
if numShards <= 0 {
panic("numShards must be greater than 0")
}
if ttl <= 0 {
panic("ttl must be greater than 0")
}
if evictionPercentage < 0 || evictionPercentage > 100 {
panic("evictionPercentage must be between 0 and 100")
}
if !cfg.earlyRefreshes && cfg.bufferRefreshes {
panic("refresh buffering requires early refreshes to be enabled")
}
if cfg.bufferRefreshes && cfg.bufferSize < 1 {
panic("batchSize must be greater than 0")
}
if cfg.bufferRefreshes && cfg.bufferTimeout < 1 {
panic("bufferTimeout must be greater than 0")
}
if cfg.evictionInterval < 1 {
panic("evictionInterval must be greater than 0")
}
if cfg.minAsyncRefreshTime > cfg.maxAsyncRefreshTime {
panic("minRefreshTime must be less than or equal to maxRefreshTime")
}
if cfg.maxAsyncRefreshTime > cfg.syncRefreshTime {
panic("maxRefreshTime must be less than or equal to synchronousRefreshTime")
}
if cfg.retryBaseDelay < 0 {
panic("retryBaseDelay must be greater than or equal to 0")
}
}