-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathLRUCache.java
More file actions
277 lines (243 loc) · 9.26 KB
/
LRUCache.java
File metadata and controls
277 lines (243 loc) · 9.26 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
package lrucache;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* Thread-safe LRU cache with optional TTL expiry.
*
* Design decisions worth discussing in an interview:
*
* 1. ReadWriteLock instead of synchronized:
* Multiple threads can call get() in parallel (read lock).
* Only put/evict needs an exclusive write lock.
* Under read-heavy workloads this gives significantly better throughput.
*
* 2. Sentinel head/tail nodes:
* Avoids null checks on every linked-list operation.
* addToFront() and removeNode() never touch null pointers.
*
* 3. Key stored in Node:
* When evicting the LRU node we need to remove it from the HashMap too.
* Storing the key in the node avoids a costly reverse lookup.
*
* 4. TTL checked lazily on get() + eagerly by background sweeper:
* Lazy check = zero overhead when TTL is not used.
* Background sweeper = prevents unbounded memory growth from dead entries
* that are never accessed again.
*/
public class LRUCache<K, V> {
// -----------------------------------------------------------------------
// Fields
// -----------------------------------------------------------------------
private final int capacity;
private final long ttlMs; // 0 = no TTL
private final Map<K, Node<K, V>> map;
private final Node<K, V> head; // sentinel — most recently used end
private final Node<K, V> tail; // sentinel — least recently used end
private final ReadWriteLock lock = new ReentrantReadWriteLock();
private final CacheStats stats = new CacheStats();
private ScheduledExecutorService sweeper;
// -----------------------------------------------------------------------
// Constructor (use Builder for cleaner construction)
// -----------------------------------------------------------------------
LRUCache(int capacity, long ttlMs, long sweepIntervalMs) {
if (capacity <= 0) throw new IllegalArgumentException("capacity must be > 0");
this.capacity = capacity;
this.ttlMs = ttlMs;
this.map = new HashMap<>(capacity);
// Sentinel nodes — never hold real data
head = new Node<>(null, null, 0);
tail = new Node<>(null, null, 0);
head.next = tail;
tail.prev = head;
if (ttlMs > 0 && sweepIntervalMs > 0) {
startSweeper(sweepIntervalMs);
}
}
// -----------------------------------------------------------------------
// Public API
// -----------------------------------------------------------------------
/**
* Returns the value for key, or null on miss / expiry.
* Acquires a WRITE lock because a cache hit moves the node to the front.
*
* Interview note: you could use a read lock here and upgrade to write
* only on hit, but Java's ReentrantReadWriteLock does not support
* lock upgrading — you'd have to release the read lock first, creating
* a TOCTOU window. Keeping a single write lock is simpler and correct.
*/
public V get(K key) {
lock.writeLock().lock();
try {
Node<K, V> node = map.get(key);
if (node == null) {
stats.recordMiss();
return null;
}
if (node.isExpired()) {
removeNode(node);
map.remove(key);
stats.recordMiss();
stats.recordEviction();
return null;
}
// Cache hit — promote to MRU position
moveToFront(node);
stats.recordHit();
return node.value;
} finally {
lock.writeLock().unlock();
}
}
/**
* Inserts or updates a key. Evicts the LRU entry if at capacity.
* Always acquires a write lock.
*/
public void put(K key, V value) {
lock.writeLock().lock();
try {
Node<K, V> existing = map.get(key);
if (existing != null) {
// Update in place and promote
existing.value = value;
existing.expiryMs = (ttlMs > 0) ? System.currentTimeMillis() + ttlMs : -1L;
moveToFront(existing);
return;
}
if (map.size() >= capacity) {
evictLRU();
}
Node<K, V> node = new Node<>(key, value, ttlMs);
map.put(key, node);
addToFront(node);
} finally {
lock.writeLock().unlock();
}
}
/** Explicitly removes a key. Returns true if the key existed. */
public boolean remove(K key) {
lock.writeLock().lock();
try {
Node<K, V> node = map.remove(key);
if (node == null) return false;
removeNode(node);
return true;
} finally {
lock.writeLock().unlock();
}
}
/** Current number of entries (including potentially expired ones). */
public int size() {
lock.readLock().lock();
try { return map.size(); }
finally { lock.readLock().unlock(); }
}
public CacheStats stats() { return stats; }
/** Shuts down the background TTL sweeper if one was started. */
public void shutdown() {
if (sweeper != null) sweeper.shutdown();
}
// -----------------------------------------------------------------------
// Linked-list helpers (must be called under write lock)
// -----------------------------------------------------------------------
/** Inserts node right after head (MRU position). */
private void addToFront(Node<K, V> node) {
node.prev = head;
node.next = head.next;
head.next.prev = node;
head.next = node;
}
/** Unlinks node from wherever it currently sits. */
private void removeNode(Node<K, V> node) {
node.prev.next = node.next;
node.next.prev = node.prev;
}
/** Removes then re-inserts at front — O(1) promotion. */
private void moveToFront(Node<K, V> node) {
removeNode(node);
addToFront(node);
}
/** Evicts the node just before the tail (LRU position). */
private void evictLRU() {
Node<K, V> lru = tail.prev;
if (lru == head) return; // cache is empty
removeNode(lru);
map.remove(lru.key);
stats.recordEviction();
}
// -----------------------------------------------------------------------
// Background TTL sweeper
// -----------------------------------------------------------------------
/**
* Schedules a periodic task that removes expired entries.
* Without this, entries that are never accessed again would stay in
* memory until a put() triggers capacity eviction — potentially forever.
*
* Uses a daemon thread so it doesn't block JVM shutdown.
*/
private void startSweeper(long intervalMs) {
sweeper = Executors.newSingleThreadScheduledExecutor(r -> {
Thread t = new Thread(r, "lru-cache-sweeper");
t.setDaemon(true);
return t;
});
sweeper.scheduleAtFixedRate(this::sweepExpired, intervalMs, intervalMs, TimeUnit.MILLISECONDS);
}
private void sweepExpired() {
lock.writeLock().lock();
try {
List<K> toRemove = new ArrayList<>();
// Walk the linked list from LRU end — more likely to find expired entries
Node<K, V> cur = tail.prev;
while (cur != head) {
if (cur.isExpired()) toRemove.add(cur.key);
cur = cur.prev;
}
for (K key : toRemove) {
Node<K, V> node = map.remove(key);
if (node != null) {
removeNode(node);
stats.recordEviction();
}
}
} finally {
lock.writeLock().unlock();
}
}
// -----------------------------------------------------------------------
// Builder
// -----------------------------------------------------------------------
public static <K, V> Builder<K, V> builder() { return new Builder<>(); }
public static class Builder<K, V> {
private int capacity = 100;
private long ttlMs = 0;
private long sweepIntervalMs = 30_000; // 30 s default
public Builder<K, V> capacity(int capacity) {
this.capacity = capacity;
return this;
}
/** Sets TTL in milliseconds. Pass 0 to disable. */
public Builder<K, V> ttlMs(long ttlMs) {
this.ttlMs = ttlMs;
return this;
}
/** Convenience method using java.time.Duration style. */
public Builder<K, V> ttlSeconds(long seconds) {
this.ttlMs = seconds * 1000L;
return this;
}
public Builder<K, V> sweepIntervalMs(long ms) {
this.sweepIntervalMs = ms;
return this;
}
public LRUCache<K, V> build() {
return new LRUCache<>(capacity, ttlMs, sweepIntervalMs);
}
}
}