1 {-# OPTIONS -fno-implicit-prelude #-}
3 -----------------------------------------------------------------------------
5 -- Module : Data.HashTable
6 -- Copyright : (c) The University of Glasgow 2003
7 -- License : BSD-style (see the file libraries/base/LICENSE)
9 -- Maintainer : libraries@haskell.org
10 -- Stability : provisional
11 -- Portability : portable
13 -- An implementation of extensible hash tables, as described in
14 -- Per-Ake Larson, /Dynamic Hash Tables/, CACM 31(4), April 1988,
15 -- pp. 446--457. The implementation is also derived from the one
16 -- in GHC's runtime system (@ghc\/rts\/Hash.{c,h}@).
18 -----------------------------------------------------------------------------
20 module Data.HashTable (
21 -- * Basic hash table operations
22 HashTable, new, insert, delete, lookup,
23 -- * Converting to and from lists
33 -- This module is imported by Data.Dynamic, which is pretty low down in the
34 -- module hierarchy, so don't import "high-level" modules
36 #ifdef __GLASGOW_HASKELL__
39 import Prelude hiding ( lookup )
41 import Data.Tuple ( fst )
44 import Data.List ( maximumBy, filter, length, concat, foldl )
45 import Data.Int ( Int32 )
47 #if defined(__GLASGOW_HASKELL__)
49 import GHC.Real ( Integral(..), fromIntegral )
51 import GHC.IOBase ( IO, IOArray, newIOArray, readIOArray, writeIOArray,
52 unsafeReadIOArray, unsafeWriteIOArray,
53 IORef, newIORef, readIORef, writeIORef )
54 import GHC.Err ( undefined )
56 import Data.Char ( ord )
57 import Data.IORef ( IORef, newIORef, readIORef, writeIORef )
58 # if defined(__HUGS__)
59 import Hugs.IOArray ( IOArray, newIOArray, readIOArray, writeIOArray,
60 unsafeReadIOArray, unsafeWriteIOArray )
61 # elif defined(__NHC__)
62 import NHC.IOExtras ( IOArray, newIOArray, readIOArray, writeIOArray)
65 import Control.Monad ( when, mapM, sequence_ )
68 -----------------------------------------------------------------------
69 myReadArray :: IOArray Int32 a -> Int32 -> IO a
70 myWriteArray :: IOArray Int32 a -> Int32 -> a -> IO ()
71 #if defined(DEBUG) || defined(__NHC__)
72 myReadArray = readIOArray
73 myWriteArray = writeIOArray
75 myReadArray arr i = unsafeReadIOArray arr (fromIntegral i)
76 myWriteArray arr i x = unsafeWriteIOArray arr (fromIntegral i) x
79 -- | A hash table mapping keys of type @key@ to values of type @val@.
81 -- The implementation will grow the hash table as necessary, trying to
82 -- maintain a reasonable average load per bucket in the table.
84 newtype HashTable key val = HashTable (IORef (HT key val))
85 -- TODO: the IORef should really be an MVar.
89 split :: !Int32, -- Next bucket to split when expanding
90 max_bucket :: !Int32, -- Max bucket of smaller table
91 mask1 :: !Int32, -- Mask for doing the mod of h_1 (smaller table)
92 mask2 :: !Int32, -- Mask for doing the mod of h_2 (larger table)
93 kcount :: !Int32, -- Number of keys
94 bcount :: !Int32, -- Number of buckets
95 dir :: !(IOArray Int32 (IOArray Int32 [(key,val)])),
96 hash_fn :: key -> Int32,
97 cmp :: key -> key -> Bool
101 ALTERNATIVE IMPLEMENTATION:
103 This works out slightly slower, because there's a tradeoff between
104 allocating a complete new HT structure each time a modification is
105 made (in the version above), and allocating new Int32s each time one
106 of them is modified, as below. Using FastMutInt instead of IORef
107 Int32 helps, but yields an implementation which has about the same
108 performance as the version above (and is more complex).
110 data HashTable key val
112 split :: !(IORef Int32), -- Next bucket to split when expanding
113 max_bucket :: !(IORef Int32), -- Max bucket of smaller table
114 mask1 :: !(IORef Int32), -- Mask for doing the mod of h_1 (smaller table)
115 mask2 :: !(IORef Int32), -- Mask for doing the mod of h_2 (larger table)
116 kcount :: !(IORef Int32), -- Number of keys
117 bcount :: !(IORef Int32), -- Number of buckets
118 dir :: !(IOArray Int32 (IOArray Int32 [(key,val)])),
119 hash_fn :: key -> Int32,
120 cmp :: key -> key -> Bool
125 -- -----------------------------------------------------------------------------
126 -- Sample hash functions
130 -- This implementation of hash tables uses the low-order /n/ bits of the hash
131 -- value for a key, where /n/ varies as the hash table grows. A good hash
132 -- function therefore will give an even distribution regardless of /n/.
134 -- If your keyspace is integrals such that the low-order bits between
135 -- keys are highly variable, then you could get away with using 'id'
136 -- as the hash function.
138 -- We provide some sample hash functions for 'Int' and 'String' below.
140 -- | A sample hash function for 'Int', implemented as simply @(x `mod` P)@
141 -- where P is a suitable prime (currently 1500007). Should give
142 -- reasonable results for most distributions of 'Int' values, except
143 -- when the keys are all multiples of the prime!
145 hashInt :: Int -> Int32
146 hashInt = (`rem` prime) . fromIntegral
148 -- | A sample hash function for 'String's. The implementation is:
150 -- > hashString = fromIntegral . foldr f 0
151 -- > where f c m = ord c + (m * 128) `rem` 1500007
153 -- which seems to give reasonable results.
155 hashString :: String -> Int32
156 hashString = fromIntegral . foldl f 0
157 where f m c = ord c + (m * 128) `rem` fromIntegral prime
159 -- | A prime larger than the maximum hash table size
163 -- -----------------------------------------------------------------------------
166 sEGMENT_SIZE = 1024 :: Int32 -- Size of a single hash table segment
167 sEGMENT_SHIFT = 10 :: Int -- derived
168 sEGMENT_MASK = 0x3ff :: Int32 -- derived
170 dIR_SIZE = 1024 :: Int32 -- Size of the segment directory
171 -- Maximum hash table size is sEGMENT_SIZE * dIR_SIZE
173 hLOAD = 4 :: Int32 -- Maximum average load of a single hash bucket
175 -- -----------------------------------------------------------------------------
176 -- Creating a new hash table
178 -- | Creates a new hash table. The following property should hold for the @eq@
179 -- and @hash@ functions passed to 'new':
181 -- > eq A B => hash A == hash B
184 :: (key -> key -> Bool) -- ^ @eq@: An equality comparison on keys
185 -> (key -> Int32) -- ^ @hash@: A hash function on keys
186 -> IO (HashTable key val) -- ^ Returns: an empty hash table
189 -- make a new hash table with a single, empty, segment
190 dir <- newIOArray (0,dIR_SIZE) undefined
191 segment <- newIOArray (0,sEGMENT_SIZE-1) []
192 myWriteArray dir 0 segment
197 mask1 = (sEGMENT_SIZE - 1)
198 mask2 = (2 * sEGMENT_SIZE - 1)
200 bcount = sEGMENT_SIZE
202 ht = HT { dir=dir, split=split, max_bucket=max, mask1=mask1, mask2=mask2,
203 kcount=kcount, bcount=bcount, hash_fn=hash_fn, cmp=cmp
207 return (HashTable table)
209 -- -----------------------------------------------------------------------------
210 -- Inserting a key\/value pair into the hash table
212 -- | Inserts an key\/value mapping into the hash table.
213 insert :: HashTable key val -> key -> val -> IO ()
215 insert (HashTable ref) key val = do
216 table@HT{ kcount=k, bcount=b, dir=dir } <- readIORef ref
217 let table1 = table{ kcount = k+1 }
220 then expandHashTable table1
222 writeIORef ref table2
223 (segment_index,segment_offset) <- tableLocation table2 key
224 segment <- myReadArray dir segment_index
225 bucket <- myReadArray segment segment_offset
226 myWriteArray segment segment_offset ((key,val):bucket)
229 bucketIndex :: HT key val -> key -> IO Int32
230 bucketIndex HT{ hash_fn=hash_fn,
233 mask2=mask2 } key = do
235 h = fromIntegral (hash_fn key)
236 small_bucket = h .&. mask1
237 large_bucket = h .&. mask2
239 if small_bucket < split
240 then return large_bucket
241 else return small_bucket
243 tableLocation :: HT key val -> key -> IO (Int32,Int32)
244 tableLocation table key = do
245 bucket_index <- bucketIndex table key
247 segment_index = bucket_index `shiftR` sEGMENT_SHIFT
248 segment_offset = bucket_index .&. sEGMENT_MASK
250 return (segment_index,segment_offset)
252 expandHashTable :: HT key val -> IO (HT key val)
260 oldsegment = split `shiftR` sEGMENT_SHIFT
261 oldindex = split .&. sEGMENT_MASK
263 newbucket = max + split
264 newsegment = newbucket `shiftR` sEGMENT_SHIFT
265 newindex = newbucket .&. sEGMENT_MASK
267 when (newindex == 0) $
268 do segment <- newIOArray (0,sEGMENT_SIZE-1) []
269 myWriteArray dir newsegment segment
273 then table{ split = split+1,
275 -- we've expanded all the buckets in this table, so start from
276 -- the beginning again.
277 else table{ split = 0,
279 max_bucket = max * 2,
281 mask2 = mask2 `shiftL` 1 .|. 1 }
283 split_bucket old new [] = do
284 segment <- myReadArray dir oldsegment
285 myWriteArray segment oldindex old
286 segment <- myReadArray dir newsegment
287 myWriteArray segment newindex new
288 split_bucket old new ((k,v):xs) = do
289 h <- bucketIndex table' k
291 then split_bucket old ((k,v):new) xs
292 else split_bucket ((k,v):old) new xs
294 segment <- myReadArray dir oldsegment
295 bucket <- myReadArray segment oldindex
296 split_bucket [] [] bucket
299 -- -----------------------------------------------------------------------------
300 -- Deleting a mapping from the hash table
302 -- | Remove an entry from the hash table.
303 delete :: HashTable key val -> key -> IO ()
305 delete (HashTable ref) key = do
306 table@HT{ dir=dir, cmp=cmp } <- readIORef ref
307 (segment_index,segment_offset) <- tableLocation table key
308 segment <- myReadArray dir segment_index
309 bucket <- myReadArray segment segment_offset
310 myWriteArray segment segment_offset (filter (not.(key `cmp`).fst) bucket)
313 -- -----------------------------------------------------------------------------
314 -- Looking up an entry in the hash table
316 -- | Looks up the value of a key in the hash table.
317 lookup :: HashTable key val -> key -> IO (Maybe val)
319 lookup (HashTable ref) key = do
320 table@HT{ dir=dir, cmp=cmp } <- readIORef ref
321 (segment_index,segment_offset) <- tableLocation table key
322 segment <- myReadArray dir segment_index
323 bucket <- myReadArray segment segment_offset
324 case [ val | (key',val) <- bucket, cmp key key' ] of
326 (v:_) -> return (Just v)
328 -- -----------------------------------------------------------------------------
329 -- Converting to/from lists
331 -- | Convert a list of key\/value pairs into a hash table. Equality on keys
332 -- is taken from the Eq instance for the key type.
334 fromList :: Eq key => (key -> Int32) -> [(key,val)] -> IO (HashTable key val)
335 fromList hash_fn list = do
336 table <- new (==) hash_fn
337 sequence_ [ insert table k v | (k,v) <- list ]
340 -- | Converts a hash table to a list of key\/value pairs.
342 toList :: HashTable key val -> IO [(key,val)]
343 toList (HashTable ref) = do
344 HT{ dir=dir, max_bucket=max, split=split } <- readIORef ref
347 max_segment = (max + split - 1) `quot` sEGMENT_SIZE
349 segments <- mapM (segmentContents dir) [0 .. max_segment]
350 return (concat segments)
352 segmentContents dir seg_index = do
353 segment <- myReadArray dir seg_index
354 bs <- mapM (myReadArray segment) [0 .. sEGMENT_SIZE-1]
357 -- -----------------------------------------------------------------------------
360 -- | This function is useful for determining whether your hash function
361 -- is working well for your data set. It returns the longest chain
362 -- of key\/value pairs in the hash table for which all the keys hash to
363 -- the same bucket. If this chain is particularly long (say, longer
364 -- than 10 elements), then it might be a good idea to try a different
367 longestChain :: HashTable key val -> IO [(key,val)]
368 longestChain (HashTable ref) = do
369 HT{ dir=dir, max_bucket=max, split=split } <- readIORef ref
372 max_segment = (max + split - 1) `quot` sEGMENT_SIZE
374 --trace ("maxChainLength: max = " ++ show max ++ ", split = " ++ show split ++ ", max_segment = " ++ show max_segment) $ do
375 segments <- mapM (segmentMaxChainLength dir) [0 .. max_segment]
376 return (maximumBy lengthCmp segments)
378 segmentMaxChainLength dir seg_index = do
379 segment <- myReadArray dir seg_index
380 bs <- mapM (myReadArray segment) [0 .. sEGMENT_SIZE-1]
381 return (maximumBy lengthCmp bs)
383 lengthCmp x y = length x `compare` length y