1 /* -*- buffer-read-only: t -*- vi: set ro: */
2 /* DO NOT EDIT! GENERATED AUTOMATICALLY! */
3 /* hash - hashing table processing.
5 Copyright (C) 1998-2004, 2006-2007, 2009-2011 Free Software Foundation, Inc.
7 Written by Jim Meyering, 1992.
9 This program is free software: you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 /* A generic hash table package. */
24 /* Define USE_OBSTACK to 1 if you want the allocator to use obstacks instead
25 of malloc. If you change USE_OBSTACK, you have to recompile! */
31 #include "bitrotate.h"
40 # ifndef obstack_chunk_alloc
41 # define obstack_chunk_alloc malloc
43 # ifndef obstack_chunk_free
44 # define obstack_chunk_free free
51 struct hash_entry *next;
56 /* The array of buckets starts at BUCKET and extends to BUCKET_LIMIT-1,
57 for a possibility of N_BUCKETS. Among those, N_BUCKETS_USED buckets
58 are not empty, there are N_ENTRIES active entries in the table. */
59 struct hash_entry *bucket;
60 struct hash_entry const *bucket_limit;
62 size_t n_buckets_used;
65 /* Tuning arguments, kept in a physically separate structure. */
66 const Hash_tuning *tuning;
68 /* Three functions are given to `hash_initialize', see the documentation
69 block for this function. In a word, HASHER randomizes a user entry
70 into a number up from 0 up to some maximum minus 1; COMPARATOR returns
71 true if two user entries compare equally; and DATA_FREER is the cleanup
72 function for a user entry. */
74 Hash_comparator comparator;
75 Hash_data_freer data_freer;
77 /* A linked list of freed struct hash_entry structs. */
78 struct hash_entry *free_entry_list;
81 /* Whenever obstacks are used, it is possible to allocate all overflowed
82 entries into a single stack, so they all can be freed in a single
83 operation. It is not clear if the speedup is worth the trouble. */
84 struct obstack entry_stack;
88 /* A hash table contains many internal entries, each holding a pointer to
89 some user-provided data (also called a user entry). An entry indistinctly
90 refers to both the internal entry and its associated user entry. A user
91 entry contents may be hashed by a randomization function (the hashing
92 function, or just `hasher' for short) into a number (or `slot') between 0
93 and the current table size. At each slot position in the hash table,
94 starts a linked chain of entries for which the user data all hash to this
95 slot. A bucket is the collection of all entries hashing to the same slot.
97 A good `hasher' function will distribute entries rather evenly in buckets.
98 In the ideal case, the length of each bucket is roughly the number of
99 entries divided by the table size. Finding the slot for a data is usually
100 done in constant time by the `hasher', and the later finding of a precise
101 entry is linear in time with the size of the bucket. Consequently, a
102 larger hash table size (that is, a larger number of buckets) is prone to
103 yielding shorter chains, *given* the `hasher' function behaves properly.
105 Long buckets slow down the lookup algorithm. One might use big hash table
106 sizes in hope to reduce the average length of buckets, but this might
107 become inordinate, as unused slots in the hash table take some space. The
108 best bet is to make sure you are using a good `hasher' function (beware
109 that those are not that easy to write! :-), and to use a table size
110 larger than the actual number of entries. */
112 /* If an insertion makes the ratio of nonempty buckets to table size larger
113 than the growth threshold (a number between 0.0 and 1.0), then increase
114 the table size by multiplying by the growth factor (a number greater than
115 1.0). The growth threshold defaults to 0.8, and the growth factor
116 defaults to 1.414, meaning that the table will have doubled its size
117 every second time 80% of the buckets get used. */
118 #define DEFAULT_GROWTH_THRESHOLD 0.8
119 #define DEFAULT_GROWTH_FACTOR 1.414
121 /* If a deletion empties a bucket and causes the ratio of used buckets to
122 table size to become smaller than the shrink threshold (a number between
123 0.0 and 1.0), then shrink the table by multiplying by the shrink factor (a
124 number greater than the shrink threshold but smaller than 1.0). The shrink
125 threshold and factor default to 0.0 and 1.0, meaning that the table never
127 #define DEFAULT_SHRINK_THRESHOLD 0.0
128 #define DEFAULT_SHRINK_FACTOR 1.0
130 /* Use this to initialize or reset a TUNING structure to
131 some sensible values. */
132 static const Hash_tuning default_tuning =
134 DEFAULT_SHRINK_THRESHOLD,
135 DEFAULT_SHRINK_FACTOR,
136 DEFAULT_GROWTH_THRESHOLD,
137 DEFAULT_GROWTH_FACTOR,
141 /* Information and lookup. */
143 /* The following few functions provide information about the overall hash
144 table organization: the number of entries, number of buckets and maximum
145 length of buckets. */
147 /* Return the number of buckets in the hash table. The table size, the total
148 number of buckets (used plus unused), or the maximum number of slots, are
149 the same quantity. */
152 hash_get_n_buckets (const Hash_table *table)
154 return table->n_buckets;
157 /* Return the number of slots in use (non-empty buckets). */
160 hash_get_n_buckets_used (const Hash_table *table)
162 return table->n_buckets_used;
165 /* Return the number of active entries. */
168 hash_get_n_entries (const Hash_table *table)
170 return table->n_entries;
173 /* Return the length of the longest chain (bucket). */
176 hash_get_max_bucket_length (const Hash_table *table)
178 struct hash_entry const *bucket;
179 size_t max_bucket_length = 0;
181 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
185 struct hash_entry const *cursor = bucket;
186 size_t bucket_length = 1;
188 while (cursor = cursor->next, cursor)
191 if (bucket_length > max_bucket_length)
192 max_bucket_length = bucket_length;
196 return max_bucket_length;
199 /* Do a mild validation of a hash table, by traversing it and checking two
203 hash_table_ok (const Hash_table *table)
205 struct hash_entry const *bucket;
206 size_t n_buckets_used = 0;
207 size_t n_entries = 0;
209 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
213 struct hash_entry const *cursor = bucket;
215 /* Count bucket head. */
219 /* Count bucket overflow. */
220 while (cursor = cursor->next, cursor)
225 if (n_buckets_used == table->n_buckets_used && n_entries == table->n_entries)
232 hash_print_statistics (const Hash_table *table, FILE *stream)
234 size_t n_entries = hash_get_n_entries (table);
235 size_t n_buckets = hash_get_n_buckets (table);
236 size_t n_buckets_used = hash_get_n_buckets_used (table);
237 size_t max_bucket_length = hash_get_max_bucket_length (table);
239 fprintf (stream, "# entries: %lu\n", (unsigned long int) n_entries);
240 fprintf (stream, "# buckets: %lu\n", (unsigned long int) n_buckets);
241 fprintf (stream, "# buckets used: %lu (%.2f%%)\n",
242 (unsigned long int) n_buckets_used,
243 (100.0 * n_buckets_used) / n_buckets);
244 fprintf (stream, "max bucket length: %lu\n",
245 (unsigned long int) max_bucket_length);
248 /* Hash KEY and return a pointer to the selected bucket.
249 If TABLE->hasher misbehaves, abort. */
250 static struct hash_entry *
251 safe_hasher (const Hash_table *table, const void *key)
253 size_t n = table->hasher (key, table->n_buckets);
254 if (! (n < table->n_buckets))
256 return table->bucket + n;
259 /* If ENTRY matches an entry already in the hash table, return the
260 entry from the table. Otherwise, return NULL. */
263 hash_lookup (const Hash_table *table, const void *entry)
265 struct hash_entry const *bucket = safe_hasher (table, entry);
266 struct hash_entry const *cursor;
268 if (bucket->data == NULL)
271 for (cursor = bucket; cursor; cursor = cursor->next)
272 if (entry == cursor->data || table->comparator (entry, cursor->data))
280 /* The functions in this page traverse the hash table and process the
281 contained entries. For the traversal to work properly, the hash table
282 should not be resized nor modified while any particular entry is being
283 processed. In particular, entries should not be added, and an entry
284 may be removed only if there is no shrink threshold and the entry being
285 removed has already been passed to hash_get_next. */
287 /* Return the first data in the table, or NULL if the table is empty. */
290 hash_get_first (const Hash_table *table)
292 struct hash_entry const *bucket;
294 if (table->n_entries == 0)
297 for (bucket = table->bucket; ; bucket++)
298 if (! (bucket < table->bucket_limit))
300 else if (bucket->data)
304 /* Return the user data for the entry following ENTRY, where ENTRY has been
305 returned by a previous call to either `hash_get_first' or `hash_get_next'.
306 Return NULL if there are no more entries. */
309 hash_get_next (const Hash_table *table, const void *entry)
311 struct hash_entry const *bucket = safe_hasher (table, entry);
312 struct hash_entry const *cursor;
314 /* Find next entry in the same bucket. */
318 if (cursor->data == entry && cursor->next)
319 return cursor->next->data;
320 cursor = cursor->next;
322 while (cursor != NULL);
324 /* Find first entry in any subsequent bucket. */
325 while (++bucket < table->bucket_limit)
333 /* Fill BUFFER with pointers to active user entries in the hash table, then
334 return the number of pointers copied. Do not copy more than BUFFER_SIZE
338 hash_get_entries (const Hash_table *table, void **buffer,
342 struct hash_entry const *bucket;
343 struct hash_entry const *cursor;
345 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
349 for (cursor = bucket; cursor; cursor = cursor->next)
351 if (counter >= buffer_size)
353 buffer[counter++] = cursor->data;
361 /* Call a PROCESSOR function for each entry of a hash table, and return the
362 number of entries for which the processor function returned success. A
363 pointer to some PROCESSOR_DATA which will be made available to each call to
364 the processor function. The PROCESSOR accepts two arguments: the first is
365 the user entry being walked into, the second is the value of PROCESSOR_DATA
366 as received. The walking continue for as long as the PROCESSOR function
367 returns nonzero. When it returns zero, the walking is interrupted. */
370 hash_do_for_each (const Hash_table *table, Hash_processor processor,
371 void *processor_data)
374 struct hash_entry const *bucket;
375 struct hash_entry const *cursor;
377 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
381 for (cursor = bucket; cursor; cursor = cursor->next)
383 if (! processor (cursor->data, processor_data))
393 /* Allocation and clean-up. */
395 /* Return a hash index for a NUL-terminated STRING between 0 and N_BUCKETS-1.
396 This is a convenience routine for constructing other hashing functions. */
400 /* About hashings, Paul Eggert writes to me (FP), on 1994-01-01: "Please see
401 B. J. McKenzie, R. Harries & T. Bell, Selecting a hashing algorithm,
402 Software--practice & experience 20, 2 (Feb 1990), 209-224. Good hash
403 algorithms tend to be domain-specific, so what's good for [diffutils'] io.c
404 may not be good for your application." */
407 hash_string (const char *string, size_t n_buckets)
409 # define HASH_ONE_CHAR(Value, Byte) \
410 ((Byte) + rotl_sz (Value, 7))
415 for (; (ch = *string); string++)
416 value = HASH_ONE_CHAR (value, ch);
417 return value % n_buckets;
419 # undef HASH_ONE_CHAR
422 #else /* not USE_DIFF_HASH */
424 /* This one comes from `recode', and performs a bit better than the above as
425 per a few experiments. It is inspired from a hashing routine found in the
426 very old Cyber `snoop', itself written in typical Greg Mansfield style.
427 (By the way, what happened to this excellent man? Is he still alive?) */
430 hash_string (const char *string, size_t n_buckets)
435 for (; (ch = *string); string++)
436 value = (value * 31 + ch) % n_buckets;
440 #endif /* not USE_DIFF_HASH */
442 /* Return true if CANDIDATE is a prime number. CANDIDATE should be an odd
443 number at least equal to 11. */
446 is_prime (size_t candidate)
449 size_t square = divisor * divisor;
451 while (square < candidate && (candidate % divisor))
454 square += 4 * divisor;
458 return (candidate % divisor ? true : false);
461 /* Round a given CANDIDATE number up to the nearest prime, and return that
462 prime. Primes lower than 10 are merely skipped. */
465 next_prime (size_t candidate)
467 /* Skip small primes. */
471 /* Make it definitely odd. */
474 while (SIZE_MAX != candidate && !is_prime (candidate))
481 hash_reset_tuning (Hash_tuning *tuning)
483 *tuning = default_tuning;
486 /* If the user passes a NULL hasher, we hash the raw pointer. */
488 raw_hasher (const void *data, size_t n)
490 /* When hashing unique pointers, it is often the case that they were
491 generated by malloc and thus have the property that the low-order
492 bits are 0. As this tends to give poorer performance with small
493 tables, we rotate the pointer value before performing division,
494 in an attempt to improve hash quality. */
495 size_t val = rotr_sz ((size_t) data, 3);
499 /* If the user passes a NULL comparator, we use pointer comparison. */
501 raw_comparator (const void *a, const void *b)
507 /* For the given hash TABLE, check the user supplied tuning structure for
508 reasonable values, and return true if there is no gross error with it.
509 Otherwise, definitively reset the TUNING field to some acceptable default
510 in the hash table (that is, the user loses the right of further modifying
511 tuning arguments), and return false. */
514 check_tuning (Hash_table *table)
516 const Hash_tuning *tuning = table->tuning;
518 if (tuning == &default_tuning)
521 /* Be a bit stricter than mathematics would require, so that
522 rounding errors in size calculations do not cause allocations to
523 fail to grow or shrink as they should. The smallest allocation
524 is 11 (due to next_prime's algorithm), so an epsilon of 0.1
525 should be good enough. */
528 if (epsilon < tuning->growth_threshold
529 && tuning->growth_threshold < 1 - epsilon
530 && 1 + epsilon < tuning->growth_factor
531 && 0 <= tuning->shrink_threshold
532 && tuning->shrink_threshold + epsilon < tuning->shrink_factor
533 && tuning->shrink_factor <= 1
534 && tuning->shrink_threshold + epsilon < tuning->growth_threshold)
537 table->tuning = &default_tuning;
541 /* Compute the size of the bucket array for the given CANDIDATE and
542 TUNING, or return 0 if there is no possible way to allocate that
546 compute_bucket_size (size_t candidate, const Hash_tuning *tuning)
548 if (!tuning->is_n_buckets)
550 float new_candidate = candidate / tuning->growth_threshold;
551 if (SIZE_MAX <= new_candidate)
553 candidate = new_candidate;
555 candidate = next_prime (candidate);
556 if (xalloc_oversized (candidate, sizeof (struct hash_entry *)))
561 /* Allocate and return a new hash table, or NULL upon failure. The initial
562 number of buckets is automatically selected so as to _guarantee_ that you
563 may insert at least CANDIDATE different user entries before any growth of
564 the hash table size occurs. So, if have a reasonably tight a-priori upper
565 bound on the number of entries you intend to insert in the hash table, you
566 may save some table memory and insertion time, by specifying it here. If
567 the IS_N_BUCKETS field of the TUNING structure is true, the CANDIDATE
568 argument has its meaning changed to the wanted number of buckets.
570 TUNING points to a structure of user-supplied values, in case some fine
571 tuning is wanted over the default behavior of the hasher. If TUNING is
572 NULL, the default tuning parameters are used instead. If TUNING is
573 provided but the values requested are out of bounds or might cause
574 rounding errors, return NULL.
576 The user-supplied HASHER function, when not NULL, accepts two
577 arguments ENTRY and TABLE_SIZE. It computes, by hashing ENTRY contents, a
578 slot number for that entry which should be in the range 0..TABLE_SIZE-1.
579 This slot number is then returned.
581 The user-supplied COMPARATOR function, when not NULL, accepts two
582 arguments pointing to user data, it then returns true for a pair of entries
583 that compare equal, or false otherwise. This function is internally called
584 on entries which are already known to hash to the same bucket index,
585 but which are distinct pointers.
587 The user-supplied DATA_FREER function, when not NULL, may be later called
588 with the user data as an argument, just before the entry containing the
589 data gets freed. This happens from within `hash_free' or `hash_clear'.
590 You should specify this function only if you want these functions to free
591 all of your `data' data. This is typically the case when your data is
592 simply an auxiliary struct that you have malloc'd to aggregate several
596 hash_initialize (size_t candidate, const Hash_tuning *tuning,
597 Hash_hasher hasher, Hash_comparator comparator,
598 Hash_data_freer data_freer)
604 if (comparator == NULL)
605 comparator = raw_comparator;
607 table = malloc (sizeof *table);
612 tuning = &default_tuning;
613 table->tuning = tuning;
614 if (!check_tuning (table))
616 /* Fail if the tuning options are invalid. This is the only occasion
617 when the user gets some feedback about it. Once the table is created,
618 if the user provides invalid tuning options, we silently revert to
619 using the defaults, and ignore further request to change the tuning
624 table->n_buckets = compute_bucket_size (candidate, tuning);
625 if (!table->n_buckets)
628 table->bucket = calloc (table->n_buckets, sizeof *table->bucket);
629 if (table->bucket == NULL)
631 table->bucket_limit = table->bucket + table->n_buckets;
632 table->n_buckets_used = 0;
633 table->n_entries = 0;
635 table->hasher = hasher;
636 table->comparator = comparator;
637 table->data_freer = data_freer;
639 table->free_entry_list = NULL;
641 obstack_init (&table->entry_stack);
650 /* Make all buckets empty, placing any chained entries on the free list.
651 Apply the user-specified function data_freer (if any) to the datas of any
655 hash_clear (Hash_table *table)
657 struct hash_entry *bucket;
659 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
663 struct hash_entry *cursor;
664 struct hash_entry *next;
666 /* Free the bucket overflow. */
667 for (cursor = bucket->next; cursor; cursor = next)
669 if (table->data_freer)
670 table->data_freer (cursor->data);
674 /* Relinking is done one entry at a time, as it is to be expected
675 that overflows are either rare or short. */
676 cursor->next = table->free_entry_list;
677 table->free_entry_list = cursor;
680 /* Free the bucket head. */
681 if (table->data_freer)
682 table->data_freer (bucket->data);
688 table->n_buckets_used = 0;
689 table->n_entries = 0;
692 /* Reclaim all storage associated with a hash table. If a data_freer
693 function has been supplied by the user when the hash table was created,
694 this function applies it to the data of each entry before freeing that
698 hash_free (Hash_table *table)
700 struct hash_entry *bucket;
701 struct hash_entry *cursor;
702 struct hash_entry *next;
704 /* Call the user data_freer function. */
705 if (table->data_freer && table->n_entries)
707 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
711 for (cursor = bucket; cursor; cursor = cursor->next)
712 table->data_freer (cursor->data);
719 obstack_free (&table->entry_stack, NULL);
723 /* Free all bucket overflowed entries. */
724 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
726 for (cursor = bucket->next; cursor; cursor = next)
733 /* Also reclaim the internal list of previously freed entries. */
734 for (cursor = table->free_entry_list; cursor; cursor = next)
742 /* Free the remainder of the hash table structure. */
743 free (table->bucket);
747 /* Insertion and deletion. */
749 /* Get a new hash entry for a bucket overflow, possibly by recycling a
750 previously freed one. If this is not possible, allocate a new one. */
752 static struct hash_entry *
753 allocate_entry (Hash_table *table)
755 struct hash_entry *new;
757 if (table->free_entry_list)
759 new = table->free_entry_list;
760 table->free_entry_list = new->next;
765 new = obstack_alloc (&table->entry_stack, sizeof *new);
767 new = malloc (sizeof *new);
774 /* Free a hash entry which was part of some bucket overflow,
775 saving it for later recycling. */
778 free_entry (Hash_table *table, struct hash_entry *entry)
781 entry->next = table->free_entry_list;
782 table->free_entry_list = entry;
785 /* This private function is used to help with insertion and deletion. When
786 ENTRY matches an entry in the table, return a pointer to the corresponding
787 user data and set *BUCKET_HEAD to the head of the selected bucket.
788 Otherwise, return NULL. When DELETE is true and ENTRY matches an entry in
789 the table, unlink the matching entry. */
792 hash_find_entry (Hash_table *table, const void *entry,
793 struct hash_entry **bucket_head, bool delete)
795 struct hash_entry *bucket = safe_hasher (table, entry);
796 struct hash_entry *cursor;
798 *bucket_head = bucket;
800 /* Test for empty bucket. */
801 if (bucket->data == NULL)
804 /* See if the entry is the first in the bucket. */
805 if (entry == bucket->data || table->comparator (entry, bucket->data))
807 void *data = bucket->data;
813 struct hash_entry *next = bucket->next;
815 /* Bump the first overflow entry into the bucket head, then save
816 the previous first overflow entry for later recycling. */
818 free_entry (table, next);
829 /* Scan the bucket overflow. */
830 for (cursor = bucket; cursor->next; cursor = cursor->next)
832 if (entry == cursor->next->data
833 || table->comparator (entry, cursor->next->data))
835 void *data = cursor->next->data;
839 struct hash_entry *next = cursor->next;
841 /* Unlink the entry to delete, then save the freed entry for later
843 cursor->next = next->next;
844 free_entry (table, next);
851 /* No entry found. */
855 /* Internal helper, to move entries from SRC to DST. Both tables must
856 share the same free entry list. If SAFE, only move overflow
857 entries, saving bucket heads for later, so that no allocations will
858 occur. Return false if the free entry list is exhausted and an
862 transfer_entries (Hash_table *dst, Hash_table *src, bool safe)
864 struct hash_entry *bucket;
865 struct hash_entry *cursor;
866 struct hash_entry *next;
867 for (bucket = src->bucket; bucket < src->bucket_limit; bucket++)
871 struct hash_entry *new_bucket;
873 /* Within each bucket, transfer overflow entries first and
874 then the bucket head, to minimize memory pressure. After
875 all, the only time we might allocate is when moving the
876 bucket head, but moving overflow entries first may create
877 free entries that can be recycled by the time we finally
878 get to the bucket head. */
879 for (cursor = bucket->next; cursor; cursor = next)
882 new_bucket = safe_hasher (dst, data);
886 if (new_bucket->data)
888 /* Merely relink an existing entry, when moving from a
889 bucket overflow into a bucket overflow. */
890 cursor->next = new_bucket->next;
891 new_bucket->next = cursor;
895 /* Free an existing entry, when moving from a bucket
896 overflow into a bucket header. */
897 new_bucket->data = data;
898 dst->n_buckets_used++;
899 free_entry (dst, cursor);
902 /* Now move the bucket head. Be sure that if we fail due to
903 allocation failure that the src table is in a consistent
909 new_bucket = safe_hasher (dst, data);
911 if (new_bucket->data)
913 /* Allocate or recycle an entry, when moving from a bucket
914 header into a bucket overflow. */
915 struct hash_entry *new_entry = allocate_entry (dst);
917 if (new_entry == NULL)
920 new_entry->data = data;
921 new_entry->next = new_bucket->next;
922 new_bucket->next = new_entry;
926 /* Move from one bucket header to another. */
927 new_bucket->data = data;
928 dst->n_buckets_used++;
931 src->n_buckets_used--;
936 /* For an already existing hash table, change the number of buckets through
937 specifying CANDIDATE. The contents of the hash table are preserved. The
938 new number of buckets is automatically selected so as to _guarantee_ that
939 the table may receive at least CANDIDATE different user entries, including
940 those already in the table, before any other growth of the hash table size
941 occurs. If TUNING->IS_N_BUCKETS is true, then CANDIDATE specifies the
942 exact number of buckets desired. Return true iff the rehash succeeded. */
945 hash_rehash (Hash_table *table, size_t candidate)
948 Hash_table *new_table;
949 size_t new_size = compute_bucket_size (candidate, table->tuning);
953 if (new_size == table->n_buckets)
955 new_table = &storage;
956 new_table->bucket = calloc (new_size, sizeof *new_table->bucket);
957 if (new_table->bucket == NULL)
959 new_table->n_buckets = new_size;
960 new_table->bucket_limit = new_table->bucket + new_size;
961 new_table->n_buckets_used = 0;
962 new_table->n_entries = 0;
963 new_table->tuning = table->tuning;
964 new_table->hasher = table->hasher;
965 new_table->comparator = table->comparator;
966 new_table->data_freer = table->data_freer;
968 /* In order for the transfer to successfully complete, we need
969 additional overflow entries when distinct buckets in the old
970 table collide into a common bucket in the new table. The worst
971 case possible is a hasher that gives a good spread with the old
972 size, but returns a constant with the new size; if we were to
973 guarantee table->n_buckets_used-1 free entries in advance, then
974 the transfer would be guaranteed to not allocate memory.
975 However, for large tables, a guarantee of no further allocation
976 introduces a lot of extra memory pressure, all for an unlikely
977 corner case (most rehashes reduce, rather than increase, the
978 number of overflow entries needed). So, we instead ensure that
979 the transfer process can be reversed if we hit a memory
980 allocation failure mid-transfer. */
982 /* Merely reuse the extra old space into the new table. */
984 new_table->entry_stack = table->entry_stack;
986 new_table->free_entry_list = table->free_entry_list;
988 if (transfer_entries (new_table, table, false))
990 /* Entries transferred successfully; tie up the loose ends. */
991 free (table->bucket);
992 table->bucket = new_table->bucket;
993 table->bucket_limit = new_table->bucket_limit;
994 table->n_buckets = new_table->n_buckets;
995 table->n_buckets_used = new_table->n_buckets_used;
996 table->free_entry_list = new_table->free_entry_list;
997 /* table->n_entries and table->entry_stack already hold their value. */
1001 /* We've allocated new_table->bucket (and possibly some entries),
1002 exhausted the free list, and moved some but not all entries into
1003 new_table. We must undo the partial move before returning
1004 failure. The only way to get into this situation is if new_table
1005 uses fewer buckets than the old table, so we will reclaim some
1006 free entries as overflows in the new table are put back into
1007 distinct buckets in the old table.
1009 There are some pathological cases where a single pass through the
1010 table requires more intermediate overflow entries than using two
1011 passes. Two passes give worse cache performance and takes
1012 longer, but at this point, we're already out of memory, so slow
1013 and safe is better than failure. */
1014 table->free_entry_list = new_table->free_entry_list;
1015 if (! (transfer_entries (table, new_table, true)
1016 && transfer_entries (table, new_table, false)))
1018 /* table->n_entries already holds its value. */
1019 free (new_table->bucket);
1023 /* Return -1 upon memory allocation failure.
1024 Return 1 if insertion succeeded.
1025 Return 0 if there is already a matching entry in the table,
1026 and in that case, if MATCHED_ENT is non-NULL, set *MATCHED_ENT
1029 This interface is easier to use than hash_insert when you must
1030 distinguish between the latter two cases. More importantly,
1031 hash_insert is unusable for some types of ENTRY values. When using
1032 hash_insert, the only way to distinguish those cases is to compare
1033 the return value and ENTRY. That works only when you can have two
1034 different ENTRY values that point to data that compares "equal". Thus,
1035 when the ENTRY value is a simple scalar, you must use hash_insert0.
1036 ENTRY must not be NULL. */
1038 hash_insert0 (Hash_table *table, void const *entry, void const **matched_ent)
1041 struct hash_entry *bucket;
1043 /* The caller cannot insert a NULL entry, since hash_lookup returns NULL
1044 to indicate "not found", and hash_find_entry uses "bucket->data == NULL"
1045 to indicate an empty bucket. */
1049 /* If there's a matching entry already in the table, return that. */
1050 if ((data = hash_find_entry (table, entry, &bucket, false)) != NULL)
1053 *matched_ent = data;
1057 /* If the growth threshold of the buckets in use has been reached, increase
1058 the table size and rehash. There's no point in checking the number of
1059 entries: if the hashing function is ill-conditioned, rehashing is not
1060 likely to improve it. */
1062 if (table->n_buckets_used
1063 > table->tuning->growth_threshold * table->n_buckets)
1065 /* Check more fully, before starting real work. If tuning arguments
1066 became invalid, the second check will rely on proper defaults. */
1067 check_tuning (table);
1068 if (table->n_buckets_used
1069 > table->tuning->growth_threshold * table->n_buckets)
1071 const Hash_tuning *tuning = table->tuning;
1073 (tuning->is_n_buckets
1074 ? (table->n_buckets * tuning->growth_factor)
1075 : (table->n_buckets * tuning->growth_factor
1076 * tuning->growth_threshold));
1078 if (SIZE_MAX <= candidate)
1081 /* If the rehash fails, arrange to return NULL. */
1082 if (!hash_rehash (table, candidate))
1085 /* Update the bucket we are interested in. */
1086 if (hash_find_entry (table, entry, &bucket, false) != NULL)
1091 /* ENTRY is not matched, it should be inserted. */
1095 struct hash_entry *new_entry = allocate_entry (table);
1097 if (new_entry == NULL)
1100 /* Add ENTRY in the overflow of the bucket. */
1102 new_entry->data = (void *) entry;
1103 new_entry->next = bucket->next;
1104 bucket->next = new_entry;
1109 /* Add ENTRY right in the bucket head. */
1111 bucket->data = (void *) entry;
1113 table->n_buckets_used++;
1118 /* If ENTRY matches an entry already in the hash table, return the pointer
1119 to the entry from the table. Otherwise, insert ENTRY and return ENTRY.
1120 Return NULL if the storage required for insertion cannot be allocated.
1121 This implementation does not support duplicate entries or insertion of
1125 hash_insert (Hash_table *table, void const *entry)
1127 void const *matched_ent;
1128 int err = hash_insert0 (table, entry, &matched_ent);
1131 : (void *) (err == 0 ? matched_ent : entry));
1134 /* If ENTRY is already in the table, remove it and return the just-deleted
1135 data (the user may want to deallocate its storage). If ENTRY is not in the
1136 table, don't modify the table and return NULL. */
1139 hash_delete (Hash_table *table, const void *entry)
1142 struct hash_entry *bucket;
1144 data = hash_find_entry (table, entry, &bucket, true);
1151 table->n_buckets_used--;
1153 /* If the shrink threshold of the buckets in use has been reached,
1154 rehash into a smaller table. */
1156 if (table->n_buckets_used
1157 < table->tuning->shrink_threshold * table->n_buckets)
1159 /* Check more fully, before starting real work. If tuning arguments
1160 became invalid, the second check will rely on proper defaults. */
1161 check_tuning (table);
1162 if (table->n_buckets_used
1163 < table->tuning->shrink_threshold * table->n_buckets)
1165 const Hash_tuning *tuning = table->tuning;
1167 (tuning->is_n_buckets
1168 ? table->n_buckets * tuning->shrink_factor
1169 : (table->n_buckets * tuning->shrink_factor
1170 * tuning->growth_threshold));
1172 if (!hash_rehash (table, candidate))
1174 /* Failure to allocate memory in an attempt to
1175 shrink the table is not fatal. But since memory
1176 is low, we can at least be kind and free any
1177 spare entries, rather than keeping them tied up
1178 in the free entry list. */
1180 struct hash_entry *cursor = table->free_entry_list;
1181 struct hash_entry *next;
1184 next = cursor->next;
1188 table->free_entry_list = NULL;
1203 hash_print (const Hash_table *table)
1205 struct hash_entry *bucket = (struct hash_entry *) table->bucket;
1207 for ( ; bucket < table->bucket_limit; bucket++)
1209 struct hash_entry *cursor;
1212 printf ("%lu:\n", (unsigned long int) (bucket - table->bucket));
1214 for (cursor = bucket; cursor; cursor = cursor->next)
1216 char const *s = cursor->data;
1219 printf (" %s\n", s);
1224 #endif /* TESTING */