1 /*-----------------------------------------------------------------------------
3 * (c) The AQUA Project, Glasgow University, 1995-1998
4 * (c) The GHC Team, 1999
6 * Dynamically expanding linear hash tables, as described in
7 * Per-\AAke Larson, ``Dynamic Hash Tables,'' CACM 31(4), April 1988,
9 * -------------------------------------------------------------------------- */
11 #include "PosixSource.h"
19 #define HSEGSIZE 1024 /* Size of a single hash table segment */
20 /* Also the minimum size of a hash table */
21 #define HDIRSIZE 1024 /* Size of the segment directory */
22 /* Maximum hash table size is HSEGSIZE * HDIRSIZE */
23 #define HLOAD 5 /* Maximum average load of a single hash bucket */
25 #define HCHUNK (1024 * sizeof(W_) / sizeof(HashList))
26 /* Number of HashList cells to allocate in one go */
29 /* Linked list of (key, data) pairs for separate chaining */
33 struct hashlist *next; /* Next cell in bucket chain (same hash value) */
36 typedef struct hashlist HashList;
38 typedef int HashFunction(HashTable *table, StgWord key);
39 typedef int CompareFunction(StgWord key1, StgWord key2);
42 int split; /* Next bucket to split when expanding */
43 int max; /* Max bucket of smaller table */
44 int mask1; /* Mask for doing the mod of h_1 (smaller table) */
45 int mask2; /* Mask for doing the mod of h_2 (larger table) */
46 int kcount; /* Number of keys */
47 int bcount; /* Number of buckets */
48 HashList **dir[HDIRSIZE]; /* Directory of segments */
49 HashFunction *hash; /* hash function */
50 CompareFunction *compare; /* key comparison function */
53 /* -----------------------------------------------------------------------------
54 * Hash first using the smaller table. If the bucket is less than the
55 * next bucket to be split, re-hash using the larger table.
56 * -------------------------------------------------------------------------- */
59 hashWord(HashTable *table, StgWord key)
63 /* Strip the boring zero bits */
64 key /= sizeof(StgWord);
66 /* Mod the size of the hash table (a power of 2) */
67 bucket = key & table->mask1;
69 if (bucket < table->split) {
70 /* Mod the size of the expanded hash table (also a power of 2) */
71 bucket = key & table->mask2;
77 hashStr(HashTable *table, char *key)
86 h = h % 1048583; /* some random large prime */
89 /* Mod the size of the hash table (a power of 2) */
90 bucket = h & table->mask1;
92 if (bucket < table->split) {
93 /* Mod the size of the expanded hash table (also a power of 2) */
94 bucket = h & table->mask2;
101 compareWord(StgWord key1, StgWord key2)
103 return (key1 == key2);
107 compareStr(StgWord key1, StgWord key2)
109 return (strcmp((char *)key1, (char *)key2) == 0);
113 /* -----------------------------------------------------------------------------
114 * Allocate a new segment of the dynamically growing hash table.
115 * -------------------------------------------------------------------------- */
118 allocSegment(HashTable *table, int segment)
120 table->dir[segment] = stgMallocBytes(HSEGSIZE * sizeof(HashList *),
125 /* -----------------------------------------------------------------------------
126 * Expand the larger hash table by one bucket, and split one bucket
127 * from the smaller table into two parts. Only the bucket referenced
128 * by @table->split@ is affected by the expansion.
129 * -------------------------------------------------------------------------- */
132 expand(HashTable *table)
143 if (table->split + table->max >= HDIRSIZE * HSEGSIZE)
144 /* Wow! That's big. Too big, so don't expand. */
147 /* Calculate indices of bucket to split */
148 oldsegment = table->split / HSEGSIZE;
149 oldindex = table->split % HSEGSIZE;
151 newbucket = table->max + table->split;
153 /* And the indices of the new bucket */
154 newsegment = newbucket / HSEGSIZE;
155 newindex = newbucket % HSEGSIZE;
158 allocSegment(table, newsegment);
160 if (++table->split == table->max) {
163 table->mask1 = table->mask2;
164 table->mask2 = table->mask2 << 1 | 1;
168 /* Split the bucket, paying no attention to the original order */
171 for (hl = table->dir[oldsegment][oldindex]; hl != NULL; hl = next) {
173 if (table->hash(table, hl->key) == newbucket) {
181 table->dir[oldsegment][oldindex] = old;
182 table->dir[newsegment][newindex] = new;
188 lookupHashTable(HashTable *table, StgWord key)
195 bucket = table->hash(table, key);
196 segment = bucket / HSEGSIZE;
197 index = bucket % HSEGSIZE;
199 for (hl = table->dir[segment][index]; hl != NULL; hl = hl->next)
200 if (table->compare(hl->key, key))
207 /* -----------------------------------------------------------------------------
208 * We allocate the hashlist cells in large chunks to cut down on malloc
209 * overhead. Although we keep a free list of hashlist cells, we make
210 * no effort to actually return the space to the malloc arena.
211 * -------------------------------------------------------------------------- */
213 static HashList *freeList = NULL;
215 static struct chunkList {
217 struct chunkList *next;
224 struct chunkList *cl;
226 if ((hl = freeList) != NULL) {
229 hl = stgMallocBytes(HCHUNK * sizeof(HashList), "allocHashList");
230 cl = stgMallocBytes(sizeof (*cl), "allocHashList: chunkList");
236 for (p = freeList; p < hl + HCHUNK - 1; p++)
244 freeHashList(HashList *hl)
251 insertHashTable(HashTable *table, StgWord key, void *data)
258 // Disable this assert; sometimes it's useful to be able to
259 // overwrite entries in the hash table.
260 // ASSERT(lookupHashTable(table, key) == NULL);
262 /* When the average load gets too high, we expand the table */
263 if (++table->kcount >= HLOAD * table->bcount)
266 bucket = table->hash(table, key);
267 segment = bucket / HSEGSIZE;
268 index = bucket % HSEGSIZE;
270 hl = allocHashList();
274 hl->next = table->dir[segment][index];
275 table->dir[segment][index] = hl;
280 removeHashTable(HashTable *table, StgWord key, void *data)
286 HashList *prev = NULL;
288 bucket = table->hash(table, key);
289 segment = bucket / HSEGSIZE;
290 index = bucket % HSEGSIZE;
292 for (hl = table->dir[segment][index]; hl != NULL; hl = hl->next) {
293 if (table->compare(hl->key,key) && (data == NULL || hl->data == data)) {
295 table->dir[segment][index] = hl->next;
297 prev->next = hl->next;
306 ASSERT(data == NULL);
310 /* -----------------------------------------------------------------------------
311 * When we free a hash table, we are also good enough to free the
312 * data part of each (key, data) pair, as long as our caller can tell
314 * -------------------------------------------------------------------------- */
317 freeHashTable(HashTable *table, void (*freeDataFun)(void *) )
324 /* The last bucket with something in it is table->max + table->split - 1 */
325 segment = (table->max + table->split - 1) / HSEGSIZE;
326 index = (table->max + table->split - 1) % HSEGSIZE;
328 while (segment >= 0) {
330 for (hl = table->dir[segment][index]; hl != NULL; hl = next) {
332 if (freeDataFun != NULL)
333 (*freeDataFun)(hl->data);
338 stgFree(table->dir[segment]);
340 index = HSEGSIZE - 1;
345 /* -----------------------------------------------------------------------------
346 * When we initialize a hash table, we set up the first segment as well,
347 * initializing all of the first segment's hash buckets to NULL.
348 * -------------------------------------------------------------------------- */
351 allocHashTable_(HashFunction *hash, CompareFunction *compare)
356 table = stgMallocBytes(sizeof(HashTable),"allocHashTable");
358 allocSegment(table, 0);
360 for (hb = table->dir[0]; hb < table->dir[0] + HSEGSIZE; hb++)
364 table->max = HSEGSIZE;
365 table->mask1 = HSEGSIZE - 1;
366 table->mask2 = 2 * HSEGSIZE - 1;
368 table->bcount = HSEGSIZE;
370 table->compare = compare;
378 return allocHashTable_(hashWord, compareWord);
382 allocStrHashTable(void)
384 return allocHashTable_((HashFunction *)hashStr,
385 (CompareFunction *)compareStr);
391 struct chunkList *cl;
393 while ((cl = chunks) != NULL) {