132
|
1 module FileBucket;
|
|
2
|
|
3 private import tango.io.FilePath,
|
|
4 tango.io.FileConduit;
|
|
5
|
|
6 private import tango.core.Exception;
|
|
7
|
|
8 /******************************************************************************
|
|
9
|
|
10 FileBucket implements a simple mechanism to store and recover a
|
|
11 large quantity of data for the duration of the hosting process.
|
|
12 It is intended to act as a local-cache for a remote data-source,
|
|
13 or as a spillover area for large in-memory cache instances.
|
|
14
|
|
15 Note that any and all stored data is rendered invalid the moment
|
|
16 a FileBucket object is garbage-collected.
|
|
17
|
|
18 The implementation follows a fixed-capacity record scheme, where
|
|
19 content can be rewritten in-place until said capacity is reached.
|
|
20 At such time, the altered content is moved to a larger capacity
|
|
21 record at end-of-file, and a hole remains at the prior location.
|
|
22 These holes are not collected, since the lifespan of a FileBucket
|
|
23 is limited to that of the host process.
|
|
24
|
|
25 All index keys must be unique. Writing to the FileBucket with an
|
|
26 existing key will overwrite any previous content. What follows
|
|
27 is a contrived example:
|
|
28
|
|
29 ---
|
|
30 char[] text = "this is a test";
|
|
31
|
|
32 auto bucket = new FileBucket (new FilePath("bucket.bin"), FileBucket.HalfK);
|
|
33
|
|
34 // insert some data, and retrieve it again
|
|
35 bucket.put ("a key", text);
|
|
36 char[] b = cast(char[]) bucket.get ("a key");
|
|
37
|
|
38 assert (b == text);
|
|
39 bucket.close;
|
|
40 ---
|
|
41
|
|
42 ******************************************************************************/
|
|
43
|
|
44 class FileBucket
|
|
45 {
|
|
46 /**********************************************************************
|
|
47
|
|
48 Define the capacity (block-size) of each record
|
|
49
|
|
50 **********************************************************************/
|
|
51
|
|
52 struct BlockSize
|
|
53 {
|
|
54 int capacity;
|
|
55 }
|
|
56
|
|
57 // basic capacity for each record
|
|
58 private FilePath path;
|
|
59
|
|
60 // basic capacity for each record
|
|
61 private BlockSize block;
|
|
62
|
|
63 // where content is stored
|
|
64 private FileConduit file;
|
|
65
|
|
66 // pointers to file records
|
|
67 private Record[char[]] map;
|
|
68
|
|
69 // current file size
|
|
70 private long fileSize;
|
|
71
|
|
72 // current file usage
|
|
73 private long waterLine;
|
|
74
|
|
75 // supported block sizes
|
|
76 public static const BlockSize EighthK = {128-1},
|
|
77 HalfK = {512-1},
|
|
78 OneK = {1024*1-1},
|
|
79 TwoK = {1024*2-1},
|
|
80 FourK = {1024*4-1},
|
|
81 EightK = {1024*8-1},
|
|
82 SixteenK = {1024*16-1},
|
|
83 ThirtyTwoK = {1024*32-1},
|
|
84 SixtyFourK = {1024*64-1};
|
|
85
|
|
86
|
|
87 /**********************************************************************
|
|
88
|
|
89 Construct a FileBucket with the provided path and record-
|
|
90 size. Selecting a record size that roughly matches the
|
|
91 serialized content will limit 'thrashing'.
|
|
92
|
|
93 **********************************************************************/
|
|
94
|
|
95 this (char[] path, BlockSize block)
|
|
96 {
|
|
97 this (new FilePath(path), block);
|
|
98 }
|
|
99
|
|
100 /**********************************************************************
|
|
101
|
|
102 Construct a FileBucket with the provided path, record-size,
|
|
103 and inital record count. The latter causes records to be
|
|
104 pre-allocated, saving a certain amount of growth activity.
|
|
105 Selecting a record size that roughly matches the serialized
|
|
106 content will limit 'thrashing'.
|
|
107
|
|
108 **********************************************************************/
|
|
109
|
|
110 this (FilePath path, BlockSize block, uint initialRecords = 100)
|
|
111 {
|
|
112 this.path = path;
|
|
113 this.block = block;
|
|
114
|
|
115 // open a storage file
|
|
116 file = new FileConduit (path, FileConduit.ReadWriteCreate);
|
|
117
|
|
118 // set initial file size (can be zero)
|
|
119 fileSize = initialRecords * block.capacity;
|
|
120 file.seek (fileSize);
|
|
121 file.truncate ();
|
|
122 }
|
|
123
|
|
124 /**********************************************************************
|
|
125
|
|
126 Return the block-size in use for this FileBucket
|
|
127
|
|
128 **********************************************************************/
|
|
129
|
|
130 int getBufferSize ()
|
|
131 {
|
|
132 return block.capacity+1;
|
|
133 }
|
|
134
|
|
135 /**********************************************************************
|
|
136
|
|
137 Return where the FileBucket is located
|
|
138
|
|
139 **********************************************************************/
|
|
140
|
|
141 FilePath getFilePath ()
|
|
142 {
|
|
143 return path;
|
|
144 }
|
|
145
|
|
146 /**********************************************************************
|
|
147
|
|
148 Return the currently populated size of this FileBucket
|
|
149
|
|
150 **********************************************************************/
|
|
151
|
|
152 synchronized long length ()
|
|
153 {
|
|
154 return waterLine;
|
|
155 }
|
|
156
|
|
157 /**********************************************************************
|
|
158
|
|
159 Return the serialized data for the provided key. Returns
|
|
160 null if the key was not found.
|
|
161
|
|
162 **********************************************************************/
|
|
163
|
|
164 synchronized void[] get (char[] key)
|
|
165 {
|
|
166 Record r = null;
|
|
167
|
|
168 if (key in map)
|
|
169 {
|
|
170 r = map [key];
|
|
171 return r.read (this);
|
|
172 }
|
|
173 return null;
|
|
174 }
|
|
175
|
|
176 /**********************************************************************
|
|
177
|
|
178 Remove the provided key from this FileBucket.
|
|
179
|
|
180 **********************************************************************/
|
|
181
|
|
182 synchronized void remove (char[] key)
|
|
183 {
|
|
184 map.remove(key);
|
|
185 }
|
|
186
|
|
187 /**********************************************************************
|
|
188
|
|
189 Write a serialized block of data, and associate it with
|
|
190 the provided key. All keys must be unique, and it is the
|
|
191 responsibility of the programmer to ensure this. Reusing
|
|
192 an existing key will overwrite previous data.
|
|
193
|
|
194 Note that data is allowed to grow within the occupied
|
|
195 bucket until it becomes larger than the allocated space.
|
|
196 When this happens, the data is moved to a larger bucket
|
|
197 at the file tail.
|
|
198
|
|
199 **********************************************************************/
|
|
200
|
|
201 synchronized void put (char[] key, void[] data)
|
|
202 {
|
|
203 Record* r = key in map;
|
|
204
|
|
205 if (r is null)
|
|
206 {
|
|
207 auto rr = new Record;
|
|
208 map [key] = rr;
|
|
209 r = &rr;
|
|
210 }
|
|
211 r.write (this, data, block);
|
|
212 }
|
|
213
|
|
214 /**********************************************************************
|
|
215
|
|
216 Close this FileBucket -- all content is lost.
|
|
217
|
|
218 **********************************************************************/
|
|
219
|
|
220 synchronized void close ()
|
|
221 {
|
|
222 if (file)
|
|
223 {
|
|
224 file.detach;
|
|
225 file = null;
|
|
226 map = null;
|
|
227 }
|
|
228 }
|
|
229
|
|
230 /**********************************************************************
|
|
231
|
|
232 Each Record takes up a number of 'pages' within the file.
|
|
233 The size of these pages is determined by the BlockSize
|
|
234 provided during FileBucket construction. Additional space
|
|
235 at the end of each block is potentially wasted, but enables
|
|
236 content to grow in size without creating a myriad of holes.
|
|
237
|
|
238 **********************************************************************/
|
|
239
|
|
240 private static class Record
|
|
241 {
|
|
242 private long offset;
|
|
243 private int length,
|
|
244 capacity = -1;
|
|
245
|
|
246 /**************************************************************
|
|
247
|
|
248 **************************************************************/
|
|
249
|
|
250 private static void eof (FileBucket bucket)
|
|
251 {
|
|
252 throw new IOException ("Unexpected EOF in FileBucket '"~bucket.path.toString()~"'");
|
|
253 }
|
|
254
|
|
255 /**************************************************************
|
|
256
|
|
257 This should be protected from thread-contention at
|
|
258 a higher level.
|
|
259
|
|
260 **************************************************************/
|
|
261
|
|
262 void[] read (FileBucket bucket)
|
|
263 {
|
|
264 void[] data = new ubyte [length];
|
|
265
|
|
266 bucket.file.seek (offset);
|
|
267 if (bucket.file.read (data) != length)
|
|
268 eof (bucket);
|
|
269
|
|
270 return data;
|
|
271 }
|
|
272
|
|
273 /**************************************************************
|
|
274
|
|
275 This should be protected from thread-contention at
|
|
276 a higher level.
|
|
277
|
|
278 **************************************************************/
|
|
279
|
|
280 void write (FileBucket bucket, void[] data, BlockSize block)
|
|
281 {
|
|
282 length = data.length;
|
|
283
|
|
284 // create new slot if we exceed capacity
|
|
285 if (length > capacity)
|
|
286 createBucket (bucket, length, block);
|
|
287
|
|
288 // locate to start of content
|
|
289 bucket.file.seek (offset);
|
|
290
|
|
291 // write content
|
|
292 if (bucket.file.write (data) != length)
|
|
293 eof (bucket);
|
|
294 }
|
|
295
|
|
296 /**************************************************************
|
|
297
|
|
298 **************************************************************/
|
|
299
|
|
300 void createBucket (FileBucket bucket, int bytes, BlockSize block)
|
|
301 {
|
|
302 offset = bucket.waterLine;
|
|
303 capacity = (bytes + block.capacity) & ~block.capacity;
|
|
304
|
|
305 bucket.waterLine += capacity;
|
|
306 if (bucket.waterLine > bucket.fileSize)
|
|
307 {
|
|
308 // grow the filesize
|
|
309 bucket.fileSize = bucket.waterLine * 2;
|
|
310
|
|
311 // expand the physical file size
|
|
312 bucket.file.seek (bucket.fileSize);
|
|
313 bucket.file.truncate ();
|
|
314 }
|
|
315 }
|
|
316 }
|
|
317 }
|
|
318
|
|
319
|