1 {-# OPTIONS -fno-implicit-prelude #-}
3 -----------------------------------------------------------------------------
5 -- Module : Data.HashTable
6 -- Copyright : (c) The University of Glasgow 2003
7 -- License : BSD-style (see the file libraries/base/LICENSE)
9 -- Maintainer : libraries@haskell.org
10 -- Stability : provisional
11 -- Portability : portable
13 -- An implementation of extensible hash tables, as described in
14 -- Per-Ake Larson, /Dynamic Hash Tables/, CACM 31(4), April 1988,
15 -- pp. 446--457. The implementation is also derived from the one
16 -- in GHC's runtime system (@ghc\/rts\/Hash.{c,h}@).
18 -----------------------------------------------------------------------------
20 module Data.HashTable (
21 -- * Basic hash table operations
22 HashTable, new, insert, delete, lookup,
23 -- * Converting to and from lists
33 -- This module is imported by Data.Dynamic, which is pretty low down in the
34 -- module hierarchy, so don't import "high-level" modules
36 #ifdef __GLASGOW_HASKELL__
39 import Prelude hiding ( lookup )
41 import Data.Tuple ( fst )
44 import Data.List ( maximumBy, filter, length, concat )
45 import Data.Int ( Int32 )
47 #if defined(__GLASGOW_HASKELL__)
49 import GHC.Real ( Integral(..), fromIntegral )
51 import GHC.IOBase ( IO, IOArray, newIOArray, readIOArray, writeIOArray,
52 unsafeReadIOArray, unsafeWriteIOArray,
53 IORef, newIORef, readIORef, writeIORef )
54 import GHC.Err ( undefined )
56 import Data.Char ( ord )
57 import Data.IORef ( IORef, newIORef, readIORef, writeIORef )
58 # if defined(__HUGS__)
59 import Hugs.IOArray ( IOArray, newIOArray, readIOArray, writeIOArray,
60 unsafeReadIOArray, unsafeWriteIOArray )
61 # elif defined(__NHC__)
62 import NHC.IOExtras ( IOArray, newIOArray, readIOArray, writeIOArray)
65 import Control.Monad ( when, mapM, sequence_ )
68 -----------------------------------------------------------------------
69 myReadArray :: IOArray Int32 a -> Int32 -> IO a
70 myWriteArray :: IOArray Int32 a -> Int32 -> a -> IO ()
71 #if defined(DEBUG) || defined(__NHC__)
72 myReadArray = readIOArray
73 myWriteArray = writeIOArray
75 myReadArray arr i = unsafeReadIOArray arr (fromIntegral i)
76 myWriteArray arr i x = unsafeWriteIOArray arr (fromIntegral i) x
79 -- | A hash table mapping keys of type @key@ to values of type @val@.
81 -- The implementation will grow the hash table as necessary, trying to
82 -- maintain a reasonable average load per bucket in the table.
84 newtype HashTable key val = HashTable (IORef (HT key val))
85 -- TODO: the IORef should really be an MVar.
89 split :: !Int32, -- Next bucket to split when expanding
90 max_bucket :: !Int32, -- Max bucket of smaller table
91 mask1 :: !Int32, -- Mask for doing the mod of h_1 (smaller table)
92 mask2 :: !Int32, -- Mask for doing the mod of h_2 (larger table)
93 kcount :: !Int32, -- Number of keys
94 bcount :: !Int32, -- Number of buckets
95 dir :: !(IOArray Int32 (IOArray Int32 [(key,val)])),
96 hash_fn :: key -> Int32,
97 cmp :: key -> key -> Bool
101 ALTERNATIVE IMPLEMENTATION:
103 This works out slightly slower, because there's a tradeoff between
104 allocating a complete new HT structure each time a modification is
105 made (in the version above), and allocating new Int32s each time one
106 of them is modified, as below. Using FastMutInt instead of IORef
107 Int32 helps, but yields an implementation which has about the same
108 performance as the version above (and is more complex).
110 data HashTable key val
112 split :: !(IORef Int32), -- Next bucket to split when expanding
113 max_bucket :: !(IORef Int32), -- Max bucket of smaller table
114 mask1 :: !(IORef Int32), -- Mask for doing the mod of h_1 (smaller table)
115 mask2 :: !(IORef Int32), -- Mask for doing the mod of h_2 (larger table)
116 kcount :: !(IORef Int32), -- Number of keys
117 bcount :: !(IORef Int32), -- Number of buckets
118 dir :: !(IOArray Int32 (IOArray Int32 [(key,val)])),
119 hash_fn :: key -> Int32,
120 cmp :: key -> key -> Bool
125 -- -----------------------------------------------------------------------------
126 -- Sample hash functions
130 -- This implementation of hash tables uses the low-order /n/ bits of the hash
131 -- value for a key, where /n/ varies as the hash table grows. A good hash
132 -- function therefore will give an even distribution regardless of /n/.
134 -- If your keyspace is integrals such that the low-order bits between
135 -- keys are highly variable, then you could get away with using 'id'
136 -- as the hash function.
138 -- We provide some sample hash functions for 'Int' and 'String' below.
140 -- | A sample hash function for 'Int', implemented as simply @(x `mod` P)@
141 -- where P is a suitable prime (currently 1500007). Should give
142 -- reasonable results for most distributions of 'Int' values, except
143 -- when the keys are all multiples of the prime!
145 hashInt :: Int -> Int32
146 hashInt = (`rem` prime) . fromIntegral
148 -- | A sample hash function for 'String's. The implementation is:
150 -- > hashString = fromIntegral . foldr f 0
151 -- > where f c m = ord c + (m * 128) `rem` 1500007
153 -- which seems to give reasonable results.
155 hashString :: String -> Int32
156 hashString = fromIntegral . foldr f 0
157 where f c m = ord c + (m * 128) `rem` fromIntegral prime
159 -- | A prime larger than the maximum hash table size
163 -- -----------------------------------------------------------------------------
166 sEGMENT_SIZE = 1024 :: Int32 -- Size of a single hash table segment
167 sEGMENT_SHIFT = 10 :: Int -- derived
168 sEGMENT_MASK = 0x3ff :: Int32 -- derived
170 dIR_SIZE = 1024 :: Int32 -- Size of the segment directory
171 -- Maximum hash table size is sEGMENT_SIZE * dIR_SIZE
173 hLOAD = 4 :: Int32 -- Maximum average load of a single hash bucket
175 -- -----------------------------------------------------------------------------
176 -- Creating a new hash table
178 -- | Creates a new hash table. The following property should hold for the @eq@
179 -- and @hash@ functions passed to 'new':
181 -- > eq A B => hash A == hash B
184 :: (key -> key -> Bool) -- ^ @eq@: An equality comparison on keys
185 -> (key -> Int32) -- ^ @hash@: A hash function on keys
186 -> IO (HashTable key val) -- ^ Returns: an empty hash table
189 -- make a new hash table with a single, empty, segment
190 dir <- newIOArray (0,dIR_SIZE) undefined
191 segment <- newIOArray (0,sEGMENT_SIZE-1) []
192 myWriteArray dir 0 segment
197 mask1 = (sEGMENT_SIZE - 1)
198 mask2 = (2 * sEGMENT_SIZE - 1)
200 bcount = sEGMENT_SIZE
202 ht = HT { dir=dir, split=split, max_bucket=max, mask1=mask1, mask2=mask2,
203 kcount=kcount, bcount=bcount, hash_fn=hash_fn, cmp=cmp
207 return (HashTable table)
209 -- -----------------------------------------------------------------------------
210 -- Inserting a key\/value pair into the hash table
212 -- | Inserts an key\/value mapping into the hash table.
213 insert :: HashTable key val -> key -> val -> IO ()
215 insert (HashTable ref) key val = do
216 table@HT{ kcount=k, bcount=b, dir=dir } <- readIORef ref
217 let table1 = table{ kcount = k+1 }
220 then expandHashTable table1
222 writeIORef ref table2
223 (segment_index,segment_offset) <- tableLocation table key
224 segment <- myReadArray dir segment_index
225 bucket <- myReadArray segment segment_offset
226 myWriteArray segment segment_offset ((key,val):bucket)
229 bucketIndex :: HT key val -> key -> IO Int32
230 bucketIndex HT{ hash_fn=hash_fn,
233 mask2=mask2 } key = do
235 h = fromIntegral (hash_fn key)
236 small_bucket = h .&. mask1
237 large_bucket = h .&. mask2
239 if small_bucket < split
240 then return large_bucket
241 else return small_bucket
243 tableLocation :: HT key val -> key -> IO (Int32,Int32)
244 tableLocation table key = do
245 bucket_index <- bucketIndex table key
247 segment_index = bucket_index `shiftR` sEGMENT_SHIFT
248 segment_offset = bucket_index .&. sEGMENT_MASK
250 return (segment_index,segment_offset)
252 expandHashTable :: HT key val -> IO (HT key val)
259 oldsegment = split `shiftR` sEGMENT_SHIFT
260 oldindex = split .&. sEGMENT_MASK
262 newbucket = max + split
263 newsegment = newbucket `shiftR` sEGMENT_SHIFT
264 newindex = newbucket .&. sEGMENT_MASK
266 when (newindex == 0) $
267 do segment <- newIOArray (0,sEGMENT_SIZE-1) []
268 myWriteArray dir newsegment segment
272 then table{ split = split+1 }
273 -- we've expanded all the buckets in this table, so start from
274 -- the beginning again.
275 else table{ split = 0,
276 max_bucket = max * 2,
278 mask2 = mask2 `shiftL` 1 .|. 1 }
280 split_bucket old new [] = do
281 segment <- myReadArray dir oldsegment
282 myWriteArray segment oldindex old
283 segment <- myReadArray dir newsegment
284 myWriteArray segment newindex new
285 split_bucket old new ((k,v):xs) = do
286 h <- bucketIndex table' k
288 then split_bucket old ((k,v):new) xs
289 else split_bucket ((k,v):old) new xs
291 segment <- myReadArray dir oldsegment
292 bucket <- myReadArray segment oldindex
293 split_bucket [] [] bucket
296 -- -----------------------------------------------------------------------------
297 -- Deleting a mapping from the hash table
299 -- | Remove an entry from the hash table.
300 delete :: HashTable key val -> key -> IO ()
302 delete (HashTable ref) key = do
303 table@HT{ dir=dir, cmp=cmp } <- readIORef ref
304 (segment_index,segment_offset) <- tableLocation table key
305 segment <- myReadArray dir segment_index
306 bucket <- myReadArray segment segment_offset
307 myWriteArray segment segment_offset (filter (not.(key `cmp`).fst) bucket)
310 -- -----------------------------------------------------------------------------
311 -- Looking up an entry in the hash table
313 -- | Looks up the value of a key in the hash table.
314 lookup :: HashTable key val -> key -> IO (Maybe val)
316 lookup (HashTable ref) key = do
317 table@HT{ dir=dir, cmp=cmp } <- readIORef ref
318 (segment_index,segment_offset) <- tableLocation table key
319 segment <- myReadArray dir segment_index
320 bucket <- myReadArray segment segment_offset
321 case [ val | (key',val) <- bucket, cmp key key' ] of
323 (v:_) -> return (Just v)
325 -- -----------------------------------------------------------------------------
326 -- Converting to/from lists
328 -- | Convert a list of key\/value pairs into a hash table. Equality on keys
329 -- is taken from the Eq instance for the key type.
331 fromList :: Eq key => (key -> Int32) -> [(key,val)] -> IO (HashTable key val)
332 fromList hash_fn list = do
333 table <- new (==) hash_fn
334 sequence_ [ insert table k v | (k,v) <- list ]
337 -- | Converts a hash table to a list of key\/value pairs.
339 toList :: HashTable key val -> IO [(key,val)]
340 toList (HashTable ref) = do
341 HT{ dir=dir, max_bucket=max, split=split } <- readIORef ref
344 max_segment = (max + split - 1) `quot` sEGMENT_SIZE
346 segments <- mapM (segmentContents dir) [0 .. max_segment]
347 return (concat segments)
349 segmentContents dir seg_index = do
350 segment <- myReadArray dir seg_index
351 bs <- mapM (myReadArray segment) [0 .. sEGMENT_SIZE-1]
354 -- -----------------------------------------------------------------------------
357 -- | This function is useful for determining whether your hash function
358 -- is working well for your data set. It returns the longest chain
359 -- of key\/value pairs in the hash table for which all the keys hash to
360 -- the same bucket. If this chain is particularly long (say, longer
361 -- than 10 elements), then it might be a good idea to try a different
364 longestChain :: HashTable key val -> IO [(key,val)]
365 longestChain (HashTable ref) = do
366 HT{ dir=dir, max_bucket=max, split=split } <- readIORef ref
369 max_segment = (max + split - 1) `quot` sEGMENT_SIZE
371 --trace ("maxChainLength: max = " ++ show max ++ ", split = " ++ show split ++ ", max_segment = " ++ show max_segment) $ do
372 segments <- mapM (segmentMaxChainLength dir) [0 .. max_segment]
373 return (maximumBy lengthCmp segments)
375 segmentMaxChainLength dir seg_index = do
376 segment <- myReadArray dir seg_index
377 bs <- mapM (myReadArray segment) [0 .. sEGMENT_SIZE-1]
378 return (maximumBy lengthCmp bs)
380 lengthCmp x y = length x `compare` length y