132
|
1 /*******************************************************************************
|
|
2
|
|
3 copyright: Copyright (c) 2004 Kris Bell. All rights reserved
|
|
4
|
|
5 license: BSD style: $(LICENSE)
|
|
6
|
|
7 version: April 2004: Initial release
|
|
8
|
|
9 author: Kris
|
|
10
|
|
11 *******************************************************************************/
|
|
12
|
|
13 module tango.net.cluster.QueuedCache;
|
|
14
|
|
15 private import tango.time.Time;
|
|
16
|
|
17 private import tango.net.cluster.model.ICache;
|
|
18
|
|
19 /******************************************************************************
|
|
20
|
|
21 QueuedCache extends the basic cache type by adding a limit to
|
|
22 the number of items contained at any given time. In addition,
|
|
23 QueuedCache sorts the cache entries such that those entries
|
|
24 frequently accessed are at the head of the queue, and those
|
|
25 least frequently accessed are at the tail. When the queue
|
|
26 becomes full, old entries are dropped from the tail and are
|
|
27 reused to house new cache entries.
|
|
28
|
|
29 This is great for keeping commonly accessed items around, while
|
|
30 limiting the amount of memory used. Typically, the queue size
|
|
31 would be set in the hundreds (perhaps thousands).
|
|
32
|
|
33 Note that key.init cannot be used as a valid key
|
|
34
|
|
35 ******************************************************************************/
|
|
36
|
|
37 class QueuedCache (K, V) : ICache!(K, V)
|
|
38 {
|
|
39 private QueuedEntry*[K] map;
|
|
40
|
|
41 // head and tail of queue
|
|
42 private QueuedEntry* head,
|
|
43 tail;
|
|
44
|
|
45 /**********************************************************************
|
|
46
|
|
47 Construct a cache with the specified maximum number of
|
|
48 entries. Additions to the cache beyond this number will
|
|
49 reuse the slot of the least-recently-referenced cache
|
|
50 entry. The concurrency level indicates approximately how
|
|
51 many threads will content for write access at one time.
|
|
52
|
|
53 **********************************************************************/
|
|
54
|
|
55 this (uint capacity)
|
|
56 {
|
|
57 auto set = new QueuedEntry [capacity];
|
|
58
|
|
59 foreach (inout entry; set)
|
|
60 {
|
|
61 if (tail)
|
|
62 tail.next = &entry;
|
|
63 entry.prev = tail;
|
|
64 tail = &entry;
|
|
65 }
|
|
66 head = set.ptr;
|
|
67 }
|
|
68
|
|
69 /**********************************************************************
|
|
70
|
|
71 Get the cache entry identified by the given key
|
|
72
|
|
73 **********************************************************************/
|
|
74
|
|
75 synchronized bool get (K key, inout V value)
|
|
76 {
|
|
77 // if we find 'key' then move it to the list head
|
|
78 auto e = lookup (key);
|
|
79 if (e)
|
|
80 {
|
|
81 value = reReference(e).value;
|
|
82 return true;
|
|
83 }
|
|
84 return false;
|
|
85 }
|
|
86
|
|
87 /**********************************************************************
|
|
88
|
|
89 Get the cache entry identified by the given key
|
|
90
|
|
91 **********************************************************************/
|
|
92
|
|
93 synchronized V get (K key)
|
|
94 {
|
|
95 // if we find 'key' then move it to the list head
|
|
96 auto e = lookup (key);
|
|
97 if (e)
|
|
98 return reReference(e).value;
|
|
99 return V.init;
|
|
100 }
|
|
101
|
|
102 /**********************************************************************
|
|
103
|
|
104 Place an entry into the cache and associate it with the
|
|
105 provided key. Note that there can be only one entry for
|
|
106 any particular key. If two entries are added with the
|
|
107 same key, the second effectively overwrites the first.
|
|
108
|
|
109 An optional time value allows for testing whether an
|
|
110 existing entry is newer than our provided one. Where
|
|
111 the provided time value is lesser, the put() operation
|
|
112 will be abandoned and false is returned.
|
|
113
|
|
114 Returns true if the cache was updated.
|
|
115
|
|
116 **********************************************************************/
|
|
117
|
|
118 synchronized bool put (K key, V value, Time time = Time.init)
|
|
119 {
|
|
120 assert (key !is key.init);
|
|
121
|
|
122 auto e = lookup (key);
|
|
123 if (e is null)
|
|
124 map[key] = e = addEntry();
|
|
125 else
|
|
126 if (time < e.time)
|
|
127 return false;
|
|
128
|
|
129 reReference(e).set (key, value, time);
|
|
130 return true;
|
|
131 }
|
|
132
|
|
133 /**********************************************************************
|
|
134
|
|
135 Same as above, but being careful to avoid heap activity
|
|
136 where the provided key and value are potentially aliased
|
|
137
|
|
138 **********************************************************************/
|
|
139
|
|
140 synchronized bool put (K peek, K delegate() key, V delegate() value, Time time = Time.init)
|
|
141 {
|
|
142 assert (peek !is peek.init);
|
|
143
|
|
144 auto e = lookup (peek);
|
|
145 if (e is null)
|
|
146 map[peek = key()] = e = addEntry();
|
|
147 else
|
|
148 if (time < e.time)
|
|
149 return false;
|
|
150 else
|
|
151 peek = e.key;
|
|
152
|
|
153 reReference(e).set (peek, value(), time);
|
|
154 return true;
|
|
155 }
|
|
156
|
|
157 /**********************************************************************
|
|
158
|
|
159 Remove (and return) the cache entry associated with the
|
|
160 provided key. Returns null if there is no such entry.
|
|
161
|
|
162 **********************************************************************/
|
|
163
|
|
164 synchronized V remove (K key, Time time = Time.max)
|
|
165 {
|
|
166 auto e = lookup (key);
|
|
167 if (e && (e.time < time))
|
|
168 {
|
|
169 auto value = e.value;
|
|
170
|
|
171 // don't actually kill the list entry -- just place
|
|
172 // it at the list 'tail' ready for subsequent reuse
|
|
173 deReference(e).set (K.init, V.init, Time.min);
|
|
174
|
|
175 map.remove (key);
|
|
176 return value;
|
|
177 }
|
|
178
|
|
179 return V.init;
|
|
180 }
|
|
181
|
|
182 /**********************************************************************
|
|
183
|
|
184 Iterate over elements
|
|
185
|
|
186 Note that this needs to be synchronized, and can therefore
|
|
187 be very costly in terms of blocking other threads. Use with
|
|
188 caution
|
|
189
|
|
190 **********************************************************************/
|
|
191
|
|
192 synchronized int opApply (int delegate(inout K key, inout V value) dg)
|
|
193 {
|
|
194 int ret;
|
|
195 foreach (k, v; map)
|
|
196 if ((ret = dg(k, v.value)) != 0)
|
|
197 break;
|
|
198 return ret;
|
|
199 }
|
|
200
|
|
201 /**********************************************************************
|
|
202
|
|
203
|
|
204 **********************************************************************/
|
|
205
|
|
206 private final QueuedEntry* lookup (K key)
|
|
207 {
|
|
208 auto p = key in map;
|
|
209 return (p ? *p : null);
|
|
210 }
|
|
211
|
|
212 /**********************************************************************
|
|
213
|
|
214 Place a cache entry at the tail of the queue. This makes
|
|
215 it the least-recently referenced.
|
|
216
|
|
217 **********************************************************************/
|
|
218
|
|
219 private final QueuedEntry* deReference (QueuedEntry* entry)
|
|
220 {
|
|
221 if (entry !is tail)
|
|
222 {
|
|
223 // adjust head
|
|
224 if (entry is head)
|
|
225 head = entry.next;
|
|
226
|
|
227 // move to tail
|
|
228 entry.extract;
|
|
229 tail = entry.append (tail);
|
|
230 }
|
|
231 return entry;
|
|
232 }
|
|
233
|
|
234 /**********************************************************************
|
|
235
|
|
236 Move a cache entry to the head of the queue. This makes
|
|
237 it the most-recently referenced.
|
|
238
|
|
239 **********************************************************************/
|
|
240
|
|
241 private final QueuedEntry* reReference (QueuedEntry* entry)
|
|
242 {
|
|
243 if (entry !is head)
|
|
244 {
|
|
245 // adjust tail
|
|
246 if (entry is tail)
|
|
247 tail = entry.prev;
|
|
248
|
|
249 // move to head
|
|
250 entry.extract;
|
|
251 head = entry.prepend (head);
|
|
252 }
|
|
253 return entry;
|
|
254 }
|
|
255
|
|
256 /**********************************************************************
|
|
257
|
|
258 Add an entry into the queue. If the queue is full, the
|
|
259 least-recently-referenced entry is reused for the new
|
|
260 addition.
|
|
261
|
|
262 **********************************************************************/
|
|
263
|
|
264 private final QueuedEntry* addEntry ()
|
|
265 {
|
|
266 // steal from tail ...
|
|
267 auto entry = tail;
|
|
268
|
|
269 // we're re-using an old QueuedEntry, so remove
|
|
270 // the old name from the hash-table first
|
|
271 if (entry.key !is entry.key.init)
|
|
272 map.remove (entry.key);
|
|
273
|
|
274 // place at head of list
|
|
275 return reReference (entry);
|
|
276 }
|
|
277
|
|
278 /**********************************************************************
|
|
279
|
|
280 A doubly-linked list entry, used as a wrapper for queued
|
|
281 cache entries.
|
|
282
|
|
283 **********************************************************************/
|
|
284
|
|
285 private static struct QueuedEntry
|
|
286 {
|
|
287 K key;
|
|
288 QueuedEntry* prev,
|
|
289 next;
|
|
290 Time time;
|
|
291 V value;
|
|
292
|
|
293 /**************************************************************
|
|
294
|
|
295 Set this entry with the given arguments.
|
|
296
|
|
297 **************************************************************/
|
|
298
|
|
299 QueuedEntry* set (K key, V value, Time time)
|
|
300 {
|
|
301 this.value = value;
|
|
302 this.time = time;
|
|
303 this.key = key;
|
|
304 return this;
|
|
305 }
|
|
306
|
|
307 /**************************************************************
|
|
308
|
|
309 Insert this entry into the linked-list just in front
|
|
310 of the given entry.
|
|
311
|
|
312 **************************************************************/
|
|
313
|
|
314 QueuedEntry* prepend (QueuedEntry* before)
|
|
315 {
|
|
316 assert (before);
|
|
317
|
|
318 prev = before.prev;
|
|
319
|
|
320 // patch 'prev' to point at me
|
|
321 if (prev)
|
|
322 prev.next = this;
|
|
323
|
|
324 //patch 'before' to point at me
|
|
325 next = before;
|
|
326 return before.prev = this;
|
|
327 }
|
|
328
|
|
329 /**************************************************************
|
|
330
|
|
331 Add this entry into the linked-list just after the
|
|
332 given entry.
|
|
333
|
|
334 **************************************************************/
|
|
335
|
|
336 QueuedEntry* append (QueuedEntry* after)
|
|
337 {
|
|
338 assert (after);
|
|
339
|
|
340 next = after.next;
|
|
341
|
|
342 // patch 'next' to point at me
|
|
343 if (next)
|
|
344 next.prev = this;
|
|
345
|
|
346 //patch 'after' to point at me
|
|
347 prev = after;
|
|
348 return after.next = this;
|
|
349 }
|
|
350
|
|
351 /**************************************************************
|
|
352
|
|
353 Remove this entry from the linked-list. The previous
|
|
354 and next entries are patched together appropriately.
|
|
355
|
|
356 **************************************************************/
|
|
357
|
|
358 QueuedEntry* extract ()
|
|
359 {
|
|
360 // make 'prev' and 'next' entries see each other
|
|
361 if (prev)
|
|
362 prev.next = next;
|
|
363
|
|
364 if (next)
|
|
365 next.prev = prev;
|
|
366
|
|
367 // Murphy's law
|
|
368 next = prev = null;
|
|
369 return this;
|
|
370 }
|
|
371 }
|
|
372 }
|
|
373
|
|
374
|
|
375
|
|
376 version (QueuedCache)
|
|
377 {
|
|
378 import tango.io.Stdout;
|
|
379
|
|
380 void main()
|
|
381 {
|
|
382 new QueuedCache!(int, char[])(100);
|
|
383 auto map = new QueuedCache!(char[], int)(2);
|
|
384
|
|
385 map.put ("one", 1);
|
|
386 map.put ("two", 2);
|
|
387 int v;
|
|
388 map.get ("one", v);
|
|
389 map.put ("three", 3);
|
|
390
|
|
391 foreach (k, v; map)
|
|
392 Stdout.formatln ("{}:{}", k, v);
|
|
393
|
|
394 foreach (k, v; map.map)
|
|
395 Stdout.formatln ("{}:{}", k, v.value);
|
|
396 }
|
|
397 }
|