1 /*-----------------------------------------------------------------------------
3 * (c) The AQUA Project, Glasgow University, 1995-1998
4 * (c) The GHC Team, 1999
6 * Dynamically expanding linear hash tables, as described in
7 * Per-\AAke Larson, ``Dynamic Hash Tables,'' CACM 31(4), April 1988,
9 * -------------------------------------------------------------------------- */
11 #include "PosixSource.h"
19 #define HSEGSIZE 1024 /* Size of a single hash table segment */
20 /* Also the minimum size of a hash table */
21 #define HDIRSIZE 1024 /* Size of the segment directory */
22 /* Maximum hash table size is HSEGSIZE * HDIRSIZE */
23 #define HLOAD 5 /* Maximum average load of a single hash bucket */
25 #define HCHUNK (1024 * sizeof(W_) / sizeof(HashList))
26 /* Number of HashList cells to allocate in one go */
29 /* Linked list of (key, data) pairs for separate chaining */
33 struct hashlist *next; /* Next cell in bucket chain (same hash value) */
36 typedef struct hashlist HashList;
39 int split; /* Next bucket to split when expanding */
40 int max; /* Max bucket of smaller table */
41 int mask1; /* Mask for doing the mod of h_1 (smaller table) */
42 int mask2; /* Mask for doing the mod of h_2 (larger table) */
43 int kcount; /* Number of keys */
44 int bcount; /* Number of buckets */
45 HashList **dir[HDIRSIZE]; /* Directory of segments */
46 HashFunction *hash; /* hash function */
47 CompareFunction *compare; /* key comparison function */
50 /* -----------------------------------------------------------------------------
51 * Hash first using the smaller table. If the bucket is less than the
52 * next bucket to be split, re-hash using the larger table.
53 * -------------------------------------------------------------------------- */
56 hashWord(HashTable *table, StgWord key)
60 /* Strip the boring zero bits */
61 key /= sizeof(StgWord);
63 /* Mod the size of the hash table (a power of 2) */
64 bucket = key & table->mask1;
66 if (bucket < table->split) {
67 /* Mod the size of the expanded hash table (also a power of 2) */
68 bucket = key & table->mask2;
74 hashStr(HashTable *table, char *key)
83 h = h % 1048583; /* some random large prime */
86 /* Mod the size of the hash table (a power of 2) */
87 bucket = h & table->mask1;
89 if (bucket < table->split) {
90 /* Mod the size of the expanded hash table (also a power of 2) */
91 bucket = h & table->mask2;
98 compareWord(StgWord key1, StgWord key2)
100 return (key1 == key2);
104 compareStr(StgWord key1, StgWord key2)
106 return (strcmp((char *)key1, (char *)key2) == 0);
110 /* -----------------------------------------------------------------------------
111 * Allocate a new segment of the dynamically growing hash table.
112 * -------------------------------------------------------------------------- */
115 allocSegment(HashTable *table, int segment)
117 table->dir[segment] = stgMallocBytes(HSEGSIZE * sizeof(HashList *),
122 /* -----------------------------------------------------------------------------
123 * Expand the larger hash table by one bucket, and split one bucket
124 * from the smaller table into two parts. Only the bucket referenced
125 * by @table->split@ is affected by the expansion.
126 * -------------------------------------------------------------------------- */
129 expand(HashTable *table)
140 if (table->split + table->max >= HDIRSIZE * HSEGSIZE)
141 /* Wow! That's big. Too big, so don't expand. */
144 /* Calculate indices of bucket to split */
145 oldsegment = table->split / HSEGSIZE;
146 oldindex = table->split % HSEGSIZE;
148 newbucket = table->max + table->split;
150 /* And the indices of the new bucket */
151 newsegment = newbucket / HSEGSIZE;
152 newindex = newbucket % HSEGSIZE;
155 allocSegment(table, newsegment);
157 if (++table->split == table->max) {
160 table->mask1 = table->mask2;
161 table->mask2 = table->mask2 << 1 | 1;
165 /* Split the bucket, paying no attention to the original order */
168 for (hl = table->dir[oldsegment][oldindex]; hl != NULL; hl = next) {
170 if (table->hash(table, hl->key) == newbucket) {
178 table->dir[oldsegment][oldindex] = old;
179 table->dir[newsegment][newindex] = new;
185 lookupHashTable(HashTable *table, StgWord key)
192 bucket = table->hash(table, key);
193 segment = bucket / HSEGSIZE;
194 index = bucket % HSEGSIZE;
196 for (hl = table->dir[segment][index]; hl != NULL; hl = hl->next)
197 if (table->compare(hl->key, key))
204 /* -----------------------------------------------------------------------------
205 * We allocate the hashlist cells in large chunks to cut down on malloc
206 * overhead. Although we keep a free list of hashlist cells, we make
207 * no effort to actually return the space to the malloc arena.
208 * -------------------------------------------------------------------------- */
210 static HashList *freeList = NULL;
212 static struct chunkList {
214 struct chunkList *next;
221 struct chunkList *cl;
223 if ((hl = freeList) != NULL) {
226 hl = stgMallocBytes(HCHUNK * sizeof(HashList), "allocHashList");
227 cl = stgMallocBytes(sizeof (*cl), "allocHashList: chunkList");
233 for (p = freeList; p < hl + HCHUNK - 1; p++)
241 freeHashList(HashList *hl)
248 insertHashTable(HashTable *table, StgWord key, void *data)
255 // Disable this assert; sometimes it's useful to be able to
256 // overwrite entries in the hash table.
257 // ASSERT(lookupHashTable(table, key) == NULL);
259 /* When the average load gets too high, we expand the table */
260 if (++table->kcount >= HLOAD * table->bcount)
263 bucket = table->hash(table, key);
264 segment = bucket / HSEGSIZE;
265 index = bucket % HSEGSIZE;
267 hl = allocHashList();
271 hl->next = table->dir[segment][index];
272 table->dir[segment][index] = hl;
277 removeHashTable(HashTable *table, StgWord key, void *data)
283 HashList *prev = NULL;
285 bucket = table->hash(table, key);
286 segment = bucket / HSEGSIZE;
287 index = bucket % HSEGSIZE;
289 for (hl = table->dir[segment][index]; hl != NULL; hl = hl->next) {
290 if (table->compare(hl->key,key) && (data == NULL || hl->data == data)) {
292 table->dir[segment][index] = hl->next;
294 prev->next = hl->next;
303 ASSERT(data == NULL);
307 /* -----------------------------------------------------------------------------
308 * When we free a hash table, we are also good enough to free the
309 * data part of each (key, data) pair, as long as our caller can tell
311 * -------------------------------------------------------------------------- */
314 freeHashTable(HashTable *table, void (*freeDataFun)(void *) )
321 /* The last bucket with something in it is table->max + table->split - 1 */
322 segment = (table->max + table->split - 1) / HSEGSIZE;
323 index = (table->max + table->split - 1) % HSEGSIZE;
325 while (segment >= 0) {
327 for (hl = table->dir[segment][index]; hl != NULL; hl = next) {
329 if (freeDataFun != NULL)
330 (*freeDataFun)(hl->data);
335 stgFree(table->dir[segment]);
337 index = HSEGSIZE - 1;
342 /* -----------------------------------------------------------------------------
343 * When we initialize a hash table, we set up the first segment as well,
344 * initializing all of the first segment's hash buckets to NULL.
345 * -------------------------------------------------------------------------- */
348 allocHashTable_(HashFunction *hash, CompareFunction *compare)
353 table = stgMallocBytes(sizeof(HashTable),"allocHashTable");
355 allocSegment(table, 0);
357 for (hb = table->dir[0]; hb < table->dir[0] + HSEGSIZE; hb++)
361 table->max = HSEGSIZE;
362 table->mask1 = HSEGSIZE - 1;
363 table->mask2 = 2 * HSEGSIZE - 1;
365 table->bcount = HSEGSIZE;
367 table->compare = compare;
375 return allocHashTable_(hashWord, compareWord);
379 allocStrHashTable(void)
381 return allocHashTable_((HashFunction *)hashStr,
382 (CompareFunction *)compareStr);
388 struct chunkList *cl;
390 while ((cl = chunks) != NULL) {