X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FHash.c;h=d3e4cf47e985acf5c3f7bc352e4a8d791db18530;hb=4bd153f2dd93745183584054e17c6ff169691a49;hp=96713d827ba4f2ee56d67366f27b63cf073a6321;hpb=f3bed25cb37981ef391f750cae58280e71cd80bc;p=ghc-hetmet.git diff --git a/ghc/rts/Hash.c b/ghc/rts/Hash.c index 96713d8..d3e4cf4 100644 --- a/ghc/rts/Hash.c +++ b/ghc/rts/Hash.c @@ -1,5 +1,4 @@ /*----------------------------------------------------------------------------- - * $Id: Hash.c,v 1.1 1999/01/27 12:11:25 simonm Exp $ * * (c) The AQUA Project, Glasgow University, 1995-1998 * (c) The GHC Team, 1999 @@ -9,10 +8,14 @@ * pp. 446 -- 457. * -------------------------------------------------------------------------- */ +#include "PosixSource.h" #include "Rts.h" #include "Hash.h" #include "RtsUtils.h" +#include +#include + #define HSEGSIZE 1024 /* Size of a single hash table segment */ /* Also the minimum size of a hash table */ #define HDIRSIZE 1024 /* Size of the segment directory */ @@ -32,6 +35,9 @@ struct hashlist { typedef struct hashlist HashList; +typedef int HashFunction(HashTable *table, StgWord key); +typedef int CompareFunction(StgWord key1, StgWord key2); + struct hashtable { int split; /* Next bucket to split when expanding */ int max; /* Max bucket of smaller table */ @@ -40,6 +46,8 @@ struct hashtable { int kcount; /* Number of keys */ int bcount; /* Number of buckets */ HashList **dir[HDIRSIZE]; /* Directory of segments */ + HashFunction *hash; /* hash function */ + CompareFunction *compare; /* key comparison function */ }; /* ----------------------------------------------------------------------------- @@ -48,7 +56,7 @@ struct hashtable { * -------------------------------------------------------------------------- */ static int -hash(HashTable *table, W_ key) +hashWord(HashTable *table, StgWord key) { int bucket; @@ -65,6 +73,43 @@ hash(HashTable *table, W_ key) return bucket; } +static int +hashStr(HashTable *table, char *key) +{ + int h, bucket; + char *s; + + s = key; + for (h=0; *s; s++) { + h *= 128; + h += *s; + h = h % 1048583; /* some random large prime */ + } + + /* Mod the size of the hash table (a power of 2) */ + bucket = h & table->mask1; + + if (bucket < table->split) { + /* Mod the size of the expanded hash table (also a power of 2) */ + bucket = h & table->mask2; + } + + return bucket; +} + +static int +compareWord(StgWord key1, StgWord key2) +{ + return (key1 == key2); +} + +static int +compareStr(StgWord key1, StgWord key2) +{ + return (strcmp((char *)key1, (char *)key2) == 0); +} + + /* ----------------------------------------------------------------------------- * Allocate a new segment of the dynamically growing hash table. * -------------------------------------------------------------------------- */ @@ -125,7 +170,7 @@ expand(HashTable *table) old = new = NULL; for (hl = table->dir[oldsegment][oldindex]; hl != NULL; hl = next) { next = hl->next; - if (hash(table, hl->key) == newbucket) { + if (table->hash(table, hl->key) == newbucket) { hl->next = new; new = hl; } else { @@ -147,12 +192,12 @@ lookupHashTable(HashTable *table, StgWord key) int index; HashList *hl; - bucket = hash(table, key); + bucket = table->hash(table, key); segment = bucket / HSEGSIZE; index = bucket % HSEGSIZE; for (hl = table->dir[segment][index]; hl != NULL; hl = hl->next) - if (hl->key == key) + if (table->compare(hl->key, key)) return hl->data; /* It's not there */ @@ -202,12 +247,12 @@ insertHashTable(HashTable *table, StgWord key, void *data) /* We want no duplicates */ ASSERT(lookupHashTable(table, key) == NULL); - + /* When the average load gets too high, we expand the table */ if (++table->kcount >= HLOAD * table->bcount) expand(table); - bucket = hash(table, key); + bucket = table->hash(table, key); segment = bucket / HSEGSIZE; index = bucket % HSEGSIZE; @@ -229,16 +274,17 @@ removeHashTable(HashTable *table, StgWord key, void *data) HashList *hl; HashList *prev = NULL; - bucket = hash(table, key); + bucket = table->hash(table, key); segment = bucket / HSEGSIZE; index = bucket % HSEGSIZE; for (hl = table->dir[segment][index]; hl != NULL; hl = hl->next) { - if (hl->key == key && (data == NULL || hl->data == data)) { + if (table->compare(hl->key,key) && (data == NULL || hl->data == data)) { if (prev == NULL) table->dir[segment][index] = hl->next; else prev->next = hl->next; + freeHashList(hl); table->kcount--; return hl->data; } @@ -278,11 +324,11 @@ freeHashTable(HashTable *table, void (*freeDataFun)(void *) ) } index--; } - free(table->dir[segment]); + stgFree(table->dir[segment]); segment--; index = HSEGSIZE - 1; } - free(table); + stgFree(table); } /* ----------------------------------------------------------------------------- @@ -290,8 +336,8 @@ freeHashTable(HashTable *table, void (*freeDataFun)(void *) ) * initializing all of the first segment's hash buckets to NULL. * -------------------------------------------------------------------------- */ -HashTable * -allocHashTable(void) +static HashTable * +allocHashTable_(HashFunction *hash, CompareFunction *compare) { HashTable *table; HashList **hb; @@ -309,6 +355,21 @@ allocHashTable(void) table->mask2 = 2 * HSEGSIZE - 1; table->kcount = 0; table->bcount = HSEGSIZE; + table->hash = hash; + table->compare = compare; return table; } + +HashTable * +allocHashTable(void) +{ + return allocHashTable_(hashWord, compareWord); +} + +HashTable * +allocStrHashTable(void) +{ + return allocHashTable_((HashFunction *)hashStr, + (CompareFunction *)compareStr); +}