Line data Source code
1 : /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
2 :
3 : /***
4 : This file is part of systemd.
5 :
6 : Copyright 2010 Lennart Poettering
7 : Copyright 2014 Michal Schmidt
8 :
9 : systemd is free software; you can redistribute it and/or modify it
10 : under the terms of the GNU Lesser General Public License as published by
11 : the Free Software Foundation; either version 2.1 of the License, or
12 : (at your option) any later version.
13 :
14 : systemd is distributed in the hope that it will be useful, but
15 : WITHOUT ANY WARRANTY; without even the implied warranty of
16 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 : Lesser General Public License for more details.
18 :
19 : You should have received a copy of the GNU Lesser General Public License
20 : along with systemd; If not, see <http://www.gnu.org/licenses/>.
21 : ***/
22 :
23 : #include <stdlib.h>
24 : #include <errno.h>
25 : #include <pthread.h>
26 :
27 : #include "util.h"
28 : #include "hashmap.h"
29 : #include "set.h"
30 : #include "macro.h"
31 : #include "siphash24.h"
32 : #include "strv.h"
33 : #include "mempool.h"
34 : #include "random-util.h"
35 :
36 : #ifdef ENABLE_DEBUG_HASHMAP
37 : #include "list.h"
38 : #endif
39 :
40 : /*
41 : * Implementation of hashmaps.
42 : * Addressing: open
43 : * - uses less RAM compared to closed addressing (chaining), because
44 : * our entries are small (especially in Sets, which tend to contain
45 : * the majority of entries in systemd).
46 : * Collision resolution: Robin Hood
47 : * - tends to equalize displacement of entries from their optimal buckets.
48 : * Probe sequence: linear
49 : * - though theoretically worse than random probing/uniform hashing/double
50 : * hashing, it is good for cache locality.
51 : *
52 : * References:
53 : * Celis, P. 1986. Robin Hood Hashing.
54 : * Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
55 : * https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
56 : * - The results are derived for random probing. Suggests deletion with
57 : * tombstones and two mean-centered search methods. None of that works
58 : * well for linear probing.
59 : *
60 : * Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
61 : * ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
62 : * DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
63 : * http://www.math.uu.se/~svante/papers/sj157.pdf
64 : * - Applies to Robin Hood with linear probing. Contains remarks on
65 : * the unsuitability of mean-centered search with linear probing.
66 : *
67 : * Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
68 : * ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
69 : * DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
70 : * - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
71 : * in a successful search), and Janson writes about displacement. C = d + 1.
72 : *
73 : * Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
74 : * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
75 : * - Explanation of backward shift deletion with pictures.
76 : *
77 : * Khuong, P. 2013. The Other Robin Hood Hashing.
78 : * http://www.pvk.ca/Blog/2013/11/26/the-other-robin-hood-hashing/
79 : * - Short summary of random vs. linear probing, and tombstones vs. backward shift.
80 : */
81 :
82 : /*
83 : * XXX Ideas for improvement:
84 : * For unordered hashmaps, randomize iteration order, similarly to Perl:
85 : * http://blog.booking.com/hardening-perls-hash-function.html
86 : */
87 :
88 : /* INV_KEEP_FREE = 1 / (1 - max_load_factor)
89 : * e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
90 : #define INV_KEEP_FREE 5U
91 :
92 : /* Fields common to entries of all hashmap/set types */
93 : struct hashmap_base_entry {
94 : const void *key;
95 : };
96 :
97 : /* Entry types for specific hashmap/set types
98 : * hashmap_base_entry must be at the beginning of each entry struct. */
99 :
100 : struct plain_hashmap_entry {
101 : struct hashmap_base_entry b;
102 : void *value;
103 : };
104 :
105 : struct ordered_hashmap_entry {
106 : struct plain_hashmap_entry p;
107 : unsigned iterate_next, iterate_previous;
108 : };
109 :
110 : struct set_entry {
111 : struct hashmap_base_entry b;
112 : };
113 :
114 : /* In several functions it is advantageous to have the hash table extended
115 : * virtually by a couple of additional buckets. We reserve special index values
116 : * for these "swap" buckets. */
117 : #define _IDX_SWAP_BEGIN (UINT_MAX - 3)
118 : #define IDX_PUT (_IDX_SWAP_BEGIN + 0)
119 : #define IDX_TMP (_IDX_SWAP_BEGIN + 1)
120 : #define _IDX_SWAP_END (_IDX_SWAP_BEGIN + 2)
121 :
122 : #define IDX_FIRST (UINT_MAX - 1) /* special index for freshly initialized iterators */
123 : #define IDX_NIL UINT_MAX /* special index value meaning "none" or "end" */
124 :
125 : assert_cc(IDX_FIRST == _IDX_SWAP_END);
126 : assert_cc(IDX_FIRST == _IDX_ITERATOR_FIRST);
127 :
128 : /* Storage space for the "swap" buckets.
129 : * All entry types can fit into a ordered_hashmap_entry. */
130 : struct swap_entries {
131 : struct ordered_hashmap_entry e[_IDX_SWAP_END - _IDX_SWAP_BEGIN];
132 : };
133 :
134 : /* Distance from Initial Bucket */
135 : typedef uint8_t dib_raw_t;
136 : #define DIB_RAW_OVERFLOW ((dib_raw_t)0xfdU) /* indicates DIB value is greater than representable */
137 : #define DIB_RAW_REHASH ((dib_raw_t)0xfeU) /* entry yet to be rehashed during in-place resize */
138 : #define DIB_RAW_FREE ((dib_raw_t)0xffU) /* a free bucket */
139 : #define DIB_RAW_INIT ((char)DIB_RAW_FREE) /* a byte to memset a DIB store with when initializing */
140 :
141 : #define DIB_FREE UINT_MAX
142 :
143 : #ifdef ENABLE_DEBUG_HASHMAP
144 : struct hashmap_debug_info {
145 : LIST_FIELDS(struct hashmap_debug_info, debug_list);
146 : unsigned max_entries; /* high watermark of n_entries */
147 :
148 : /* who allocated this hashmap */
149 : int line;
150 : const char *file;
151 : const char *func;
152 :
153 : /* fields to detect modification while iterating */
154 : unsigned put_count; /* counts puts into the hashmap */
155 : unsigned rem_count; /* counts removals from hashmap */
156 : unsigned last_rem_idx; /* remembers last removal index */
157 : };
158 :
159 : /* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
160 : static LIST_HEAD(struct hashmap_debug_info, hashmap_debug_list);
161 : static pthread_mutex_t hashmap_debug_list_mutex = PTHREAD_MUTEX_INITIALIZER;
162 :
163 : #define HASHMAP_DEBUG_FIELDS struct hashmap_debug_info debug;
164 :
165 : #else /* !ENABLE_DEBUG_HASHMAP */
166 : #define HASHMAP_DEBUG_FIELDS
167 : #endif /* ENABLE_DEBUG_HASHMAP */
168 :
169 : enum HashmapType {
170 : HASHMAP_TYPE_PLAIN,
171 : HASHMAP_TYPE_ORDERED,
172 : HASHMAP_TYPE_SET,
173 : _HASHMAP_TYPE_MAX
174 : };
175 :
176 : struct _packed_ indirect_storage {
177 : char *storage; /* where buckets and DIBs are stored */
178 : uint8_t hash_key[HASH_KEY_SIZE]; /* hash key; changes during resize */
179 :
180 : unsigned n_entries; /* number of stored entries */
181 : unsigned n_buckets; /* number of buckets */
182 :
183 : unsigned idx_lowest_entry; /* Index below which all buckets are free.
184 : Makes "while(hashmap_steal_first())" loops
185 : O(n) instead of O(n^2) for unordered hashmaps. */
186 : uint8_t _pad[3]; /* padding for the whole HashmapBase */
187 : /* The bitfields in HashmapBase complete the alignment of the whole thing. */
188 : };
189 :
190 : struct direct_storage {
191 : /* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
192 : * That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
193 : * or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
194 : char storage[sizeof(struct indirect_storage)];
195 : };
196 :
197 : #define DIRECT_BUCKETS(entry_t) \
198 : (sizeof(struct direct_storage) / (sizeof(entry_t) + sizeof(dib_raw_t)))
199 :
200 : /* We should be able to store at least one entry directly. */
201 : assert_cc(DIRECT_BUCKETS(struct ordered_hashmap_entry) >= 1);
202 :
203 : /* We have 3 bits for n_direct_entries. */
204 : assert_cc(DIRECT_BUCKETS(struct set_entry) < (1 << 3));
205 :
206 : /* Hashmaps with directly stored entries all use this shared hash key.
207 : * It's no big deal if the key is guessed, because there can be only
208 : * a handful of directly stored entries in a hashmap. When a hashmap
209 : * outgrows direct storage, it gets its own key for indirect storage. */
210 : static uint8_t shared_hash_key[HASH_KEY_SIZE];
211 : static bool shared_hash_key_initialized;
212 :
213 : /* Fields that all hashmap/set types must have */
214 : struct HashmapBase {
215 : const struct hash_ops *hash_ops; /* hash and compare ops to use */
216 :
217 : union _packed_ {
218 : struct indirect_storage indirect; /* if has_indirect */
219 : struct direct_storage direct; /* if !has_indirect */
220 : };
221 :
222 : enum HashmapType type:2; /* HASHMAP_TYPE_* */
223 : bool has_indirect:1; /* whether indirect storage is used */
224 : unsigned n_direct_entries:3; /* Number of entries in direct storage.
225 : * Only valid if !has_indirect. */
226 : bool from_pool:1; /* whether was allocated from mempool */
227 : HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
228 : };
229 :
230 : /* Specific hash types
231 : * HashmapBase must be at the beginning of each hashmap struct. */
232 :
233 : struct Hashmap {
234 : struct HashmapBase b;
235 : };
236 :
237 : struct OrderedHashmap {
238 : struct HashmapBase b;
239 : unsigned iterate_list_head, iterate_list_tail;
240 : };
241 :
242 : struct Set {
243 : struct HashmapBase b;
244 : };
245 :
246 : DEFINE_MEMPOOL(hashmap_pool, Hashmap, 8);
247 : DEFINE_MEMPOOL(ordered_hashmap_pool, OrderedHashmap, 8);
248 : /* No need for a separate Set pool */
249 : assert_cc(sizeof(Hashmap) == sizeof(Set));
250 :
251 : struct hashmap_type_info {
252 : size_t head_size;
253 : size_t entry_size;
254 : struct mempool *mempool;
255 : unsigned n_direct_buckets;
256 : };
257 :
258 : static const struct hashmap_type_info hashmap_type_info[_HASHMAP_TYPE_MAX] = {
259 : [HASHMAP_TYPE_PLAIN] = {
260 : .head_size = sizeof(Hashmap),
261 : .entry_size = sizeof(struct plain_hashmap_entry),
262 : .mempool = &hashmap_pool,
263 : .n_direct_buckets = DIRECT_BUCKETS(struct plain_hashmap_entry),
264 : },
265 : [HASHMAP_TYPE_ORDERED] = {
266 : .head_size = sizeof(OrderedHashmap),
267 : .entry_size = sizeof(struct ordered_hashmap_entry),
268 : .mempool = &ordered_hashmap_pool,
269 : .n_direct_buckets = DIRECT_BUCKETS(struct ordered_hashmap_entry),
270 : },
271 : [HASHMAP_TYPE_SET] = {
272 : .head_size = sizeof(Set),
273 : .entry_size = sizeof(struct set_entry),
274 : .mempool = &hashmap_pool,
275 : .n_direct_buckets = DIRECT_BUCKETS(struct set_entry),
276 : },
277 : };
278 :
279 44778 : unsigned long string_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
280 : uint64_t u;
281 44778 : siphash24((uint8_t*) &u, p, strlen(p), hash_key);
282 44778 : return (unsigned long) u;
283 : }
284 :
285 22905 : int string_compare_func(const void *a, const void *b) {
286 22905 : return strcmp(a, b);
287 : }
288 :
289 : const struct hash_ops string_hash_ops = {
290 : .hash = string_hash_func,
291 : .compare = string_compare_func
292 : };
293 :
294 33790707 : unsigned long trivial_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
295 : uint64_t u;
296 33790707 : siphash24((uint8_t*) &u, &p, sizeof(p), hash_key);
297 33790707 : return (unsigned long) u;
298 : }
299 :
300 12662699 : int trivial_compare_func(const void *a, const void *b) {
301 12662699 : return a < b ? -1 : (a > b ? 1 : 0);
302 : }
303 :
304 : const struct hash_ops trivial_hash_ops = {
305 : .hash = trivial_hash_func,
306 : .compare = trivial_compare_func
307 : };
308 :
309 20820 : unsigned long uint64_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
310 : uint64_t u;
311 20820 : siphash24((uint8_t*) &u, p, sizeof(uint64_t), hash_key);
312 20820 : return (unsigned long) u;
313 : }
314 :
315 20672 : int uint64_compare_func(const void *_a, const void *_b) {
316 : uint64_t a, b;
317 20672 : a = *(const uint64_t*) _a;
318 20672 : b = *(const uint64_t*) _b;
319 20672 : return a < b ? -1 : (a > b ? 1 : 0);
320 : }
321 :
322 : const struct hash_ops uint64_hash_ops = {
323 : .hash = uint64_hash_func,
324 : .compare = uint64_compare_func
325 : };
326 :
327 : #if SIZEOF_DEV_T != 8
328 : unsigned long devt_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
329 : uint64_t u;
330 : siphash24((uint8_t*) &u, p, sizeof(dev_t), hash_key);
331 : return (unsigned long) u;
332 : }
333 :
334 : int devt_compare_func(const void *_a, const void *_b) {
335 : dev_t a, b;
336 : a = *(const dev_t*) _a;
337 : b = *(const dev_t*) _b;
338 : return a < b ? -1 : (a > b ? 1 : 0);
339 : }
340 :
341 : const struct hash_ops devt_hash_ops = {
342 : .hash = devt_hash_func,
343 : .compare = devt_compare_func
344 : };
345 : #endif
346 :
347 152707951 : static unsigned n_buckets(HashmapBase *h) {
348 305415902 : return h->has_indirect ? h->indirect.n_buckets
349 152707951 : : hashmap_type_info[h->type].n_direct_buckets;
350 : }
351 :
352 8454716 : static unsigned n_entries(HashmapBase *h) {
353 16909432 : return h->has_indirect ? h->indirect.n_entries
354 8454716 : : h->n_direct_entries;
355 : }
356 :
357 2119878 : static void n_entries_inc(HashmapBase *h) {
358 2119878 : if (h->has_indirect)
359 2112887 : h->indirect.n_entries++;
360 : else
361 6991 : h->n_direct_entries++;
362 2119878 : }
363 :
364 2109853 : static void n_entries_dec(HashmapBase *h) {
365 2109853 : if (h->has_indirect)
366 2107254 : h->indirect.n_entries--;
367 : else
368 2599 : h->n_direct_entries--;
369 2109853 : }
370 :
371 138337083 : static char *storage_ptr(HashmapBase *h) {
372 276674166 : return h->has_indirect ? h->indirect.storage
373 138337083 : : h->direct.storage;
374 : }
375 :
376 33859176 : static uint8_t *hash_key(HashmapBase *h) {
377 67718352 : return h->has_indirect ? h->indirect.hash_key
378 33859176 : : shared_hash_key;
379 : }
380 :
381 33859176 : static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
382 33859176 : return (unsigned) (h->hash_ops->hash(p, hash_key(h)) % n_buckets(h));
383 : }
384 : #define bucket_hash(h, p) base_bucket_hash(HASHMAP_BASE(h), p)
385 :
386 1984 : static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
387 : static uint8_t current[HASH_KEY_SIZE];
388 : static bool current_initialized = false;
389 :
390 : /* Returns a hash function key to use. In order to keep things
391 : * fast we will not generate a new key each time we allocate a
392 : * new hash table. Instead, we'll just reuse the most recently
393 : * generated one, except if we never generated one or when we
394 : * are rehashing an entire hash table because we reached a
395 : * fill level */
396 :
397 1984 : if (!current_initialized || !reuse_is_ok) {
398 933 : random_bytes(current, sizeof(current));
399 933 : current_initialized = true;
400 : }
401 :
402 1984 : memcpy(hash_key, current, sizeof(current));
403 1984 : }
404 :
405 100321063 : static struct hashmap_base_entry *bucket_at(HashmapBase *h, unsigned idx) {
406 200642126 : return (struct hashmap_base_entry*)
407 200642126 : (storage_ptr(h) + idx * hashmap_type_info[h->type].entry_size);
408 : }
409 :
410 7332 : static struct plain_hashmap_entry *plain_bucket_at(Hashmap *h, unsigned idx) {
411 7332 : return (struct plain_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
412 : }
413 :
414 5355671 : static struct ordered_hashmap_entry *ordered_bucket_at(OrderedHashmap *h, unsigned idx) {
415 5355671 : return (struct ordered_hashmap_entry*) bucket_at(HASHMAP_BASE(h), idx);
416 : }
417 :
418 0 : static struct set_entry *set_bucket_at(Set *h, unsigned idx) {
419 0 : return (struct set_entry*) bucket_at(HASHMAP_BASE(h), idx);
420 : }
421 :
422 30890429 : static struct ordered_hashmap_entry *bucket_at_swap(struct swap_entries *swap, unsigned idx) {
423 30890429 : return &swap->e[idx - _IDX_SWAP_BEGIN];
424 : }
425 :
426 : /* Returns a pointer to the bucket at index idx.
427 : * Understands real indexes and swap indexes, hence "_virtual". */
428 76193986 : static struct hashmap_base_entry *bucket_at_virtual(HashmapBase *h, struct swap_entries *swap,
429 : unsigned idx) {
430 76193986 : if (idx < _IDX_SWAP_BEGIN)
431 50167364 : return bucket_at(h, idx);
432 :
433 26026622 : if (idx < _IDX_SWAP_END)
434 26026622 : return &bucket_at_swap(swap, idx)->p.b;
435 :
436 0 : assert_not_reached("Invalid index");
437 : }
438 :
439 38016020 : static dib_raw_t *dib_raw_ptr(HashmapBase *h) {
440 76032040 : return (dib_raw_t*)
441 76032040 : (storage_ptr(h) + hashmap_type_info[h->type].entry_size * n_buckets(h));
442 : }
443 :
444 18372531 : static unsigned bucket_distance(HashmapBase *h, unsigned idx, unsigned from) {
445 18372531 : return idx >= from ? idx - from
446 18372531 : : n_buckets(h) + idx - from;
447 : }
448 :
449 56260036 : static unsigned bucket_calculate_dib(HashmapBase *h, unsigned idx, dib_raw_t raw_dib) {
450 : unsigned initial_bucket;
451 :
452 56260036 : if (raw_dib == DIB_RAW_FREE)
453 0 : return DIB_FREE;
454 :
455 56260036 : if (_likely_(raw_dib < DIB_RAW_OVERFLOW))
456 37887505 : return raw_dib;
457 :
458 : /*
459 : * Having an overflow DIB value is very unlikely. The hash function
460 : * would have to be bad. For example, in a table of size 2^24 filled
461 : * to load factor 0.9 the maximum observed DIB is only about 60.
462 : * In theory (assuming I used Maxima correctly), for an infinite size
463 : * hash table with load factor 0.8 the probability of a given entry
464 : * having DIB > 40 is 1.9e-8.
465 : * This returns the correct DIB value by recomputing the hash value in
466 : * the unlikely case. XXX Hitting this case could be a hint to rehash.
467 : */
468 18372531 : initial_bucket = bucket_hash(h, bucket_at(h, idx)->key);
469 18372531 : return bucket_distance(h, idx, initial_bucket);
470 : }
471 :
472 16158023 : static void bucket_set_dib(HashmapBase *h, unsigned idx, unsigned dib) {
473 16158023 : dib_raw_ptr(h)[idx] = dib != DIB_FREE ? MIN(dib, DIB_RAW_OVERFLOW) : DIB_RAW_FREE;
474 16158023 : }
475 :
476 2142171 : static unsigned skip_free_buckets(HashmapBase *h, unsigned idx) {
477 : dib_raw_t *dibs;
478 :
479 2142171 : dibs = dib_raw_ptr(h);
480 :
481 5327972 : for ( ; idx < n_buckets(h); idx++)
482 5314413 : if (dibs[idx] != DIB_RAW_FREE)
483 2128612 : return idx;
484 :
485 13559 : return IDX_NIL;
486 : }
487 :
488 2109853 : static void bucket_mark_free(HashmapBase *h, unsigned idx) {
489 2109853 : memzero(bucket_at(h, idx), hashmap_type_info[h->type].entry_size);
490 2109853 : bucket_set_dib(h, idx, DIB_FREE);
491 2109853 : }
492 :
493 26000005 : static void bucket_move_entry(HashmapBase *h, struct swap_entries *swap,
494 : unsigned from, unsigned to) {
495 : struct hashmap_base_entry *e_from, *e_to;
496 :
497 26000005 : assert(from != to);
498 :
499 26000005 : e_from = bucket_at_virtual(h, swap, from);
500 26000005 : e_to = bucket_at_virtual(h, swap, to);
501 :
502 26000005 : memcpy(e_to, e_from, hashmap_type_info[h->type].entry_size);
503 :
504 26000005 : if (h->type == HASHMAP_TYPE_ORDERED) {
505 12630657 : OrderedHashmap *lh = (OrderedHashmap*) h;
506 : struct ordered_hashmap_entry *le, *le_to;
507 :
508 12630657 : le_to = (struct ordered_hashmap_entry*) e_to;
509 :
510 12630657 : if (le_to->iterate_next != IDX_NIL) {
511 11569319 : le = (struct ordered_hashmap_entry*)
512 11569319 : bucket_at_virtual(h, swap, le_to->iterate_next);
513 11569319 : le->iterate_previous = to;
514 : }
515 :
516 12630657 : if (le_to->iterate_previous != IDX_NIL) {
517 12624657 : le = (struct ordered_hashmap_entry*)
518 12624657 : bucket_at_virtual(h, swap, le_to->iterate_previous);
519 12624657 : le->iterate_next = to;
520 : }
521 :
522 12630657 : if (lh->iterate_list_head == from)
523 6000 : lh->iterate_list_head = to;
524 12630657 : if (lh->iterate_list_tail == from)
525 1061338 : lh->iterate_list_tail = to;
526 : }
527 26000005 : }
528 :
529 56334436 : static unsigned next_idx(HashmapBase *h, unsigned idx) {
530 56334436 : return (idx + 1U) % n_buckets(h);
531 : }
532 :
533 4 : static unsigned prev_idx(HashmapBase *h, unsigned idx) {
534 4 : return (n_buckets(h) + idx - 1U) % n_buckets(h);
535 : }
536 :
537 4431304 : static void *entry_value(HashmapBase *h, struct hashmap_base_entry *e) {
538 4431304 : switch (h->type) {
539 :
540 : case HASHMAP_TYPE_PLAIN:
541 : case HASHMAP_TYPE_ORDERED:
542 4416225 : return ((struct plain_hashmap_entry*)e)->value;
543 :
544 : case HASHMAP_TYPE_SET:
545 15079 : return (void*) e->key;
546 :
547 : default:
548 0 : assert_not_reached("Unknown hashmap type");
549 : }
550 : }
551 :
552 2109853 : static void base_remove_entry(HashmapBase *h, unsigned idx) {
553 : unsigned left, right, prev, dib;
554 : dib_raw_t raw_dib, *dibs;
555 :
556 2109853 : dibs = dib_raw_ptr(h);
557 2109853 : assert(dibs[idx] != DIB_RAW_FREE);
558 :
559 : #ifdef ENABLE_DEBUG_HASHMAP
560 : h->debug.rem_count++;
561 : h->debug.last_rem_idx = idx;
562 : #endif
563 :
564 2109853 : left = idx;
565 : /* Find the stop bucket ("right"). It is either free or has DIB == 0. */
566 7039244 : for (right = next_idx(h, left); ; right = next_idx(h, right)) {
567 7039244 : raw_dib = dibs[right];
568 7039244 : if (raw_dib == 0 || raw_dib == DIB_RAW_FREE)
569 : break;
570 :
571 : /* The buckets are not supposed to be all occupied and with DIB > 0.
572 : * That would mean we could make everyone better off by shifting them
573 : * backward. This scenario is impossible. */
574 4929391 : assert(left != right);
575 4929391 : }
576 :
577 2109853 : if (h->type == HASHMAP_TYPE_ORDERED) {
578 1051504 : OrderedHashmap *lh = (OrderedHashmap*) h;
579 1051504 : struct ordered_hashmap_entry *le = ordered_bucket_at(lh, idx);
580 :
581 1051504 : if (le->iterate_next != IDX_NIL)
582 1051107 : ordered_bucket_at(lh, le->iterate_next)->iterate_previous = le->iterate_previous;
583 : else
584 397 : lh->iterate_list_tail = le->iterate_previous;
585 :
586 1051504 : if (le->iterate_previous != IDX_NIL)
587 25 : ordered_bucket_at(lh, le->iterate_previous)->iterate_next = le->iterate_next;
588 : else
589 1051479 : lh->iterate_list_head = le->iterate_next;
590 : }
591 :
592 : /* Now shift all buckets in the interval (left, right) one step backwards */
593 9149097 : for (prev = left, left = next_idx(h, left); left != right;
594 4929391 : prev = left, left = next_idx(h, left)) {
595 4929391 : dib = bucket_calculate_dib(h, left, dibs[left]);
596 4929391 : assert(dib != 0);
597 4929391 : bucket_move_entry(h, NULL, left, prev);
598 4929391 : bucket_set_dib(h, prev, dib - 1);
599 : }
600 :
601 2109853 : bucket_mark_free(h, prev);
602 2109853 : n_entries_dec(h);
603 2109853 : }
604 : #define remove_entry(h, idx) base_remove_entry(HASHMAP_BASE(h), idx)
605 :
606 1114821 : static unsigned hashmap_iterate_in_insertion_order(OrderedHashmap *h, Iterator *i) {
607 : struct ordered_hashmap_entry *e;
608 : unsigned idx;
609 :
610 1114821 : assert(h);
611 1114821 : assert(i);
612 :
613 1114821 : if (i->idx == IDX_NIL)
614 10534 : goto at_end;
615 :
616 1104287 : if (i->idx == IDX_FIRST && h->iterate_list_head == IDX_NIL)
617 101 : goto at_end;
618 :
619 1104186 : if (i->idx == IDX_FIRST) {
620 1062524 : idx = h->iterate_list_head;
621 1062524 : e = ordered_bucket_at(h, idx);
622 : } else {
623 41662 : idx = i->idx;
624 41662 : e = ordered_bucket_at(h, idx);
625 : /*
626 : * We allow removing the current entry while iterating, but removal may cause
627 : * a backward shift. The next entry may thus move one bucket to the left.
628 : * To detect when it happens, we remember the key pointer of the entry we were
629 : * going to iterate next. If it does not match, there was a backward shift.
630 : */
631 41662 : if (e->p.b.key != i->next_key) {
632 1 : idx = prev_idx(HASHMAP_BASE(h), idx);
633 1 : e = ordered_bucket_at(h, idx);
634 : }
635 41662 : assert(e->p.b.key == i->next_key);
636 : }
637 :
638 : #ifdef ENABLE_DEBUG_HASHMAP
639 : i->prev_idx = idx;
640 : #endif
641 :
642 1104186 : if (e->iterate_next != IDX_NIL) {
643 : struct ordered_hashmap_entry *n;
644 1092803 : i->idx = e->iterate_next;
645 1092803 : n = ordered_bucket_at(h, i->idx);
646 1092803 : i->next_key = n->p.b.key;
647 : } else
648 11383 : i->idx = IDX_NIL;
649 :
650 1104186 : return idx;
651 :
652 : at_end:
653 10635 : i->idx = IDX_NIL;
654 10635 : return IDX_NIL;
655 : }
656 :
657 1076633 : static unsigned hashmap_iterate_in_internal_order(HashmapBase *h, Iterator *i) {
658 : unsigned idx;
659 :
660 1076633 : assert(h);
661 1076633 : assert(i);
662 :
663 1076633 : if (i->idx == IDX_NIL)
664 8353 : goto at_end;
665 :
666 1068280 : if (i->idx == IDX_FIRST) {
667 : /* fast forward to the first occupied bucket */
668 1062162 : if (h->has_indirect) {
669 1053530 : i->idx = skip_free_buckets(h, h->indirect.idx_lowest_entry);
670 1053530 : h->indirect.idx_lowest_entry = i->idx;
671 : } else
672 8632 : i->idx = skip_free_buckets(h, 0);
673 :
674 1062162 : if (i->idx == IDX_NIL)
675 394 : goto at_end;
676 : } else {
677 : struct hashmap_base_entry *e;
678 :
679 6118 : assert(i->idx > 0);
680 :
681 6118 : e = bucket_at(h, i->idx);
682 : /*
683 : * We allow removing the current entry while iterating, but removal may cause
684 : * a backward shift. The next entry may thus move one bucket to the left.
685 : * To detect when it happens, we remember the key pointer of the entry we were
686 : * going to iterate next. If it does not match, there was a backward shift.
687 : */
688 6118 : if (e->key != i->next_key)
689 1 : e = bucket_at(h, --i->idx);
690 :
691 6118 : assert(e->key == i->next_key);
692 : }
693 :
694 1067886 : idx = i->idx;
695 : #ifdef ENABLE_DEBUG_HASHMAP
696 : i->prev_idx = idx;
697 : #endif
698 :
699 1067886 : i->idx = skip_free_buckets(h, i->idx + 1);
700 1067886 : if (i->idx != IDX_NIL)
701 1059137 : i->next_key = bucket_at(h, i->idx)->key;
702 : else
703 8749 : i->idx = IDX_NIL;
704 :
705 1067886 : return idx;
706 :
707 : at_end:
708 8747 : i->idx = IDX_NIL;
709 8747 : return IDX_NIL;
710 : }
711 :
712 2236792 : static unsigned hashmap_iterate_entry(HashmapBase *h, Iterator *i) {
713 2236792 : if (!h) {
714 45338 : i->idx = IDX_NIL;
715 45338 : return IDX_NIL;
716 : }
717 :
718 : #ifdef ENABLE_DEBUG_HASHMAP
719 : if (i->idx == IDX_FIRST) {
720 : i->put_count = h->debug.put_count;
721 : i->rem_count = h->debug.rem_count;
722 : } else {
723 : /* While iterating, must not add any new entries */
724 : assert(i->put_count == h->debug.put_count);
725 : /* ... or remove entries other than the current one */
726 : assert(i->rem_count == h->debug.rem_count ||
727 : (i->rem_count == h->debug.rem_count - 1 &&
728 : i->prev_idx == h->debug.last_rem_idx));
729 : /* Reset our removals counter */
730 : i->rem_count = h->debug.rem_count;
731 : }
732 : #endif
733 :
734 4382908 : return h->type == HASHMAP_TYPE_ORDERED ? hashmap_iterate_in_insertion_order((OrderedHashmap*) h, i)
735 2191454 : : hashmap_iterate_in_internal_order(h, i);
736 : }
737 :
738 131660 : bool internal_hashmap_iterate(HashmapBase *h, Iterator *i, void **value, const void **key) {
739 : struct hashmap_base_entry *e;
740 : void *data;
741 : unsigned idx;
742 :
743 131660 : idx = hashmap_iterate_entry(h, i);
744 131660 : if (idx == IDX_NIL) {
745 64701 : if (value)
746 64701 : *value = NULL;
747 64701 : if (key)
748 450 : *key = NULL;
749 :
750 64701 : return false;
751 : }
752 :
753 66959 : e = bucket_at(h, idx);
754 66959 : data = entry_value(h, e);
755 66959 : if (value)
756 66959 : *value = data;
757 66959 : if (key)
758 985 : *key = e->key;
759 :
760 66959 : return true;
761 : }
762 :
763 60987 : bool set_iterate(Set *s, Iterator *i, void **value) {
764 60987 : return internal_hashmap_iterate(HASHMAP_BASE(s), i, value, NULL);
765 : }
766 :
767 : #define HASHMAP_FOREACH_IDX(idx, h, i) \
768 : for ((i) = ITERATOR_FIRST, (idx) = hashmap_iterate_entry((h), &(i)); \
769 : (idx != IDX_NIL); \
770 : (idx) = hashmap_iterate_entry((h), &(i)))
771 :
772 15384 : static void reset_direct_storage(HashmapBase *h) {
773 15384 : const struct hashmap_type_info *hi = &hashmap_type_info[h->type];
774 : void *p;
775 :
776 15384 : assert(!h->has_indirect);
777 :
778 15384 : p = mempset(h->direct.storage, 0, hi->entry_size * hi->n_direct_buckets);
779 15384 : memset(p, DIB_RAW_INIT, sizeof(dib_raw_t) * hi->n_direct_buckets);
780 15384 : }
781 :
782 7327 : static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
783 : HashmapBase *h;
784 7327 : const struct hashmap_type_info *hi = &hashmap_type_info[type];
785 : bool use_pool;
786 :
787 7327 : use_pool = is_main_thread();
788 :
789 7327 : h = use_pool ? mempool_alloc0_tile(hi->mempool) : malloc0(hi->head_size);
790 :
791 7327 : if (!h)
792 0 : return NULL;
793 :
794 7327 : h->type = type;
795 7327 : h->from_pool = use_pool;
796 7327 : h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
797 :
798 7327 : if (type == HASHMAP_TYPE_ORDERED) {
799 2322 : OrderedHashmap *lh = (OrderedHashmap*)h;
800 2322 : lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
801 : }
802 :
803 7327 : reset_direct_storage(h);
804 :
805 7327 : if (!shared_hash_key_initialized) {
806 54 : random_bytes(shared_hash_key, sizeof(shared_hash_key));
807 54 : shared_hash_key_initialized= true;
808 : }
809 :
810 : #ifdef ENABLE_DEBUG_HASHMAP
811 : h->debug.func = func;
812 : h->debug.file = file;
813 : h->debug.line = line;
814 : assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
815 : LIST_PREPEND(debug_list, hashmap_debug_list, &h->debug);
816 : assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
817 : #endif
818 :
819 7327 : return h;
820 : }
821 :
822 351 : Hashmap *internal_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
823 351 : return (Hashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
824 : }
825 :
826 1608 : OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
827 1608 : return (OrderedHashmap*) hashmap_base_new(hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
828 : }
829 :
830 1936 : Set *internal_set_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
831 1936 : return (Set*) hashmap_base_new(hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
832 : }
833 :
834 13340 : static int hashmap_base_ensure_allocated(HashmapBase **h, const struct hash_ops *hash_ops,
835 : enum HashmapType type HASHMAP_DEBUG_PARAMS) {
836 : HashmapBase *q;
837 :
838 13340 : assert(h);
839 :
840 13340 : if (*h)
841 9910 : return 0;
842 :
843 3430 : q = hashmap_base_new(hash_ops, type HASHMAP_DEBUG_PASS_ARGS);
844 3430 : if (!q)
845 0 : return -ENOMEM;
846 :
847 3430 : *h = q;
848 3430 : return 0;
849 : }
850 :
851 3134 : int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
852 3134 : return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
853 : }
854 :
855 5671 : int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
856 5671 : return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
857 : }
858 :
859 4535 : int internal_set_ensure_allocated(Set **s, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
860 4535 : return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
861 : }
862 :
863 7325 : static void hashmap_free_no_clear(HashmapBase *h) {
864 7325 : assert(!h->has_indirect);
865 7325 : assert(!h->n_direct_entries);
866 :
867 : #ifdef ENABLE_DEBUG_HASHMAP
868 : assert_se(pthread_mutex_lock(&hashmap_debug_list_mutex) == 0);
869 : LIST_REMOVE(debug_list, hashmap_debug_list, &h->debug);
870 : assert_se(pthread_mutex_unlock(&hashmap_debug_list_mutex) == 0);
871 : #endif
872 :
873 7325 : if (h->from_pool)
874 7314 : mempool_free_tile(hashmap_type_info[h->type].mempool, h);
875 : else
876 11 : free(h);
877 7325 : }
878 :
879 26023 : HashmapBase *internal_hashmap_free(HashmapBase *h) {
880 :
881 : /* Free the hashmap, but nothing in it */
882 :
883 26023 : if (h) {
884 3601 : internal_hashmap_clear(h);
885 3601 : hashmap_free_no_clear(h);
886 : }
887 :
888 26023 : return NULL;
889 : }
890 :
891 4641 : HashmapBase *internal_hashmap_free_free(HashmapBase *h) {
892 :
893 : /* Free the hashmap and all data objects in it, but not the
894 : * keys */
895 :
896 4641 : if (h) {
897 3285 : internal_hashmap_clear_free(h);
898 3285 : hashmap_free_no_clear(h);
899 : }
900 :
901 4641 : return NULL;
902 : }
903 :
904 1348 : Hashmap *hashmap_free_free_free(Hashmap *h) {
905 :
906 : /* Free the hashmap and all data and key objects in it */
907 :
908 1348 : if (h) {
909 439 : hashmap_clear_free_free(h);
910 439 : hashmap_free_no_clear(HASHMAP_BASE(h));
911 : }
912 :
913 1348 : return NULL;
914 : }
915 :
916 8057 : void internal_hashmap_clear(HashmapBase *h) {
917 8057 : if (!h)
918 0 : return;
919 :
920 8057 : if (h->has_indirect) {
921 1071 : free(h->indirect.storage);
922 1071 : h->has_indirect = false;
923 : }
924 :
925 8057 : h->n_direct_entries = 0;
926 8057 : reset_direct_storage(h);
927 :
928 8057 : if (h->type == HASHMAP_TYPE_ORDERED) {
929 2343 : OrderedHashmap *lh = (OrderedHashmap*) h;
930 2343 : lh->iterate_list_head = lh->iterate_list_tail = IDX_NIL;
931 : }
932 : }
933 :
934 3975 : void internal_hashmap_clear_free(HashmapBase *h) {
935 : unsigned idx;
936 :
937 3975 : if (!h)
938 0 : return;
939 :
940 10412 : for (idx = skip_free_buckets(h, 0); idx != IDX_NIL;
941 2462 : idx = skip_free_buckets(h, idx + 1))
942 2462 : free(entry_value(h, bucket_at(h, idx)));
943 :
944 3975 : internal_hashmap_clear(h);
945 : }
946 :
947 441 : void hashmap_clear_free_free(Hashmap *h) {
948 : unsigned idx;
949 :
950 441 : if (!h)
951 0 : return;
952 :
953 6127 : for (idx = skip_free_buckets(HASHMAP_BASE(h), 0); idx != IDX_NIL;
954 5245 : idx = skip_free_buckets(HASHMAP_BASE(h), idx + 1)) {
955 5245 : struct plain_hashmap_entry *e = plain_bucket_at(h, idx);
956 5245 : free((void*)e->b.key);
957 5245 : free(e->value);
958 : }
959 :
960 441 : internal_hashmap_clear(HASHMAP_BASE(h));
961 : }
962 :
963 : static int resize_buckets(HashmapBase *h, unsigned entries_add);
964 :
965 : /*
966 : * Finds an empty bucket to put an entry into, starting the scan at 'idx'.
967 : * Performs Robin Hood swaps as it goes. The entry to put must be placed
968 : * by the caller into swap slot IDX_PUT.
969 : * If used for in-place resizing, may leave a displaced entry in swap slot
970 : * IDX_PUT. Caller must rehash it next.
971 : * Returns: true if it left a displaced entry to rehash next in IDX_PUT,
972 : * false otherwise.
973 : */
974 4789896 : static bool hashmap_put_robin_hood(HashmapBase *h, unsigned idx,
975 : struct swap_entries *swap) {
976 : dib_raw_t raw_dib, *dibs;
977 : unsigned dib, distance;
978 :
979 : #ifdef ENABLE_DEBUG_HASHMAP
980 : h->debug.put_count++;
981 : #endif
982 :
983 4789896 : dibs = dib_raw_ptr(h);
984 :
985 17979867 : for (distance = 0; ; distance++) {
986 17979867 : raw_dib = dibs[idx];
987 17979867 : if (raw_dib == DIB_RAW_FREE || raw_dib == DIB_RAW_REHASH) {
988 4789896 : if (raw_dib == DIB_RAW_REHASH)
989 624051 : bucket_move_entry(h, swap, idx, IDX_TMP);
990 :
991 4789896 : if (h->has_indirect && h->indirect.idx_lowest_entry > idx)
992 20 : h->indirect.idx_lowest_entry = idx;
993 :
994 4789896 : bucket_set_dib(h, idx, distance);
995 4789896 : bucket_move_entry(h, swap, IDX_PUT, idx);
996 4789896 : if (raw_dib == DIB_RAW_REHASH) {
997 624051 : bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
998 624051 : return true;
999 : }
1000 :
1001 4165845 : return false;
1002 : }
1003 :
1004 13189971 : dib = bucket_calculate_dib(h, idx, raw_dib);
1005 :
1006 13189971 : if (dib < distance) {
1007 : /* Found a wealthier entry. Go Robin Hood! */
1008 4328883 : bucket_set_dib(h, idx, distance);
1009 :
1010 : /* swap the entries */
1011 4328883 : bucket_move_entry(h, swap, idx, IDX_TMP);
1012 4328883 : bucket_move_entry(h, swap, IDX_PUT, idx);
1013 4328883 : bucket_move_entry(h, swap, IDX_TMP, IDX_PUT);
1014 :
1015 4328883 : distance = dib;
1016 : }
1017 :
1018 13189971 : idx = next_idx(h, idx);
1019 13189971 : }
1020 : }
1021 :
1022 : /*
1023 : * Puts an entry into a hashmap, boldly - no check whether key already exists.
1024 : * The caller must place the entry (only its key and value, not link indexes)
1025 : * in swap slot IDX_PUT.
1026 : * Caller must ensure: the key does not exist yet in the hashmap.
1027 : * that resize is not needed if !may_resize.
1028 : * Returns: 1 if entry was put successfully.
1029 : * -ENOMEM if may_resize==true and resize failed with -ENOMEM.
1030 : * Cannot return -ENOMEM if !may_resize.
1031 : */
1032 2119878 : static int hashmap_base_put_boldly(HashmapBase *h, unsigned idx,
1033 : struct swap_entries *swap, bool may_resize) {
1034 : struct ordered_hashmap_entry *new_entry;
1035 : int r;
1036 :
1037 2119878 : assert(idx < n_buckets(h));
1038 :
1039 2119878 : new_entry = bucket_at_swap(swap, IDX_PUT);
1040 :
1041 2119878 : if (may_resize) {
1042 2119486 : r = resize_buckets(h, 1);
1043 2119486 : if (r < 0)
1044 0 : return r;
1045 2119486 : if (r > 0)
1046 1980 : idx = bucket_hash(h, new_entry->p.b.key);
1047 : }
1048 2119878 : assert(n_entries(h) < n_buckets(h));
1049 :
1050 2119878 : if (h->type == HASHMAP_TYPE_ORDERED) {
1051 1056915 : OrderedHashmap *lh = (OrderedHashmap*) h;
1052 :
1053 1056915 : new_entry->iterate_next = IDX_NIL;
1054 1056915 : new_entry->iterate_previous = lh->iterate_list_tail;
1055 :
1056 1056915 : if (lh->iterate_list_tail != IDX_NIL) {
1057 : struct ordered_hashmap_entry *old_tail;
1058 :
1059 1056031 : old_tail = ordered_bucket_at(lh, lh->iterate_list_tail);
1060 1056031 : assert(old_tail->iterate_next == IDX_NIL);
1061 1056031 : old_tail->iterate_next = IDX_PUT;
1062 : }
1063 :
1064 1056915 : lh->iterate_list_tail = IDX_PUT;
1065 1056915 : if (lh->iterate_list_head == IDX_NIL)
1066 884 : lh->iterate_list_head = IDX_PUT;
1067 : }
1068 :
1069 2119878 : assert_se(hashmap_put_robin_hood(h, idx, swap) == false);
1070 :
1071 2119878 : n_entries_inc(h);
1072 : #ifdef ENABLE_DEBUG_HASHMAP
1073 : h->debug.max_entries = MAX(h->debug.max_entries, n_entries(h));
1074 : #endif
1075 :
1076 2119878 : return 1;
1077 : }
1078 : #define hashmap_put_boldly(h, idx, swap, may_resize) \
1079 : hashmap_base_put_boldly(HASHMAP_BASE(h), idx, swap, may_resize)
1080 :
1081 : /*
1082 : * Returns 0 if resize is not needed.
1083 : * 1 if successfully resized.
1084 : * -ENOMEM on allocation failure.
1085 : */
1086 2119496 : static int resize_buckets(HashmapBase *h, unsigned entries_add) {
1087 : struct swap_entries swap;
1088 : char *new_storage;
1089 : dib_raw_t *old_dibs, *new_dibs;
1090 : const struct hashmap_type_info *hi;
1091 : unsigned idx, optimal_idx;
1092 : unsigned old_n_buckets, new_n_buckets, n_rehashed, new_n_entries;
1093 : uint8_t new_shift;
1094 : bool rehash_next;
1095 :
1096 2119496 : assert(h);
1097 :
1098 2119496 : hi = &hashmap_type_info[h->type];
1099 2119496 : new_n_entries = n_entries(h) + entries_add;
1100 :
1101 : /* overflow? */
1102 2119496 : if (_unlikely_(new_n_entries < entries_add))
1103 2 : return -ENOMEM;
1104 :
1105 : /* For direct storage we allow 100% load, because it's tiny. */
1106 2119494 : if (!h->has_indirect && new_n_entries <= hi->n_direct_buckets)
1107 6988 : return 0;
1108 :
1109 : /*
1110 : * Load factor = n/m = 1 - (1/INV_KEEP_FREE).
1111 : * From it follows: m = n + n/(INV_KEEP_FREE - 1)
1112 : */
1113 2112506 : new_n_buckets = new_n_entries + new_n_entries / (INV_KEEP_FREE - 1);
1114 : /* overflow? */
1115 2112506 : if (_unlikely_(new_n_buckets < new_n_entries))
1116 2 : return -ENOMEM;
1117 :
1118 2112504 : if (_unlikely_(new_n_buckets > UINT_MAX / (hi->entry_size + sizeof(dib_raw_t))))
1119 0 : return -ENOMEM;
1120 :
1121 2112504 : old_n_buckets = n_buckets(h);
1122 :
1123 2112504 : if (_likely_(new_n_buckets <= old_n_buckets))
1124 2110520 : return 0;
1125 :
1126 1984 : new_shift = log2u_round_up(MAX(
1127 : new_n_buckets * (hi->entry_size + sizeof(dib_raw_t)),
1128 : 2 * sizeof(struct direct_storage)));
1129 :
1130 : /* Realloc storage (buckets and DIB array). */
1131 1984 : new_storage = realloc(h->has_indirect ? h->indirect.storage : NULL,
1132 1984 : 1U << new_shift);
1133 1984 : if (!new_storage)
1134 0 : return -ENOMEM;
1135 :
1136 : /* Must upgrade direct to indirect storage. */
1137 1984 : if (!h->has_indirect) {
1138 2144 : memcpy(new_storage, h->direct.storage,
1139 1072 : old_n_buckets * (hi->entry_size + sizeof(dib_raw_t)));
1140 1072 : h->indirect.n_entries = h->n_direct_entries;
1141 1072 : h->indirect.idx_lowest_entry = 0;
1142 1072 : h->n_direct_entries = 0;
1143 : }
1144 :
1145 : /* Get a new hash key. If we've just upgraded to indirect storage,
1146 : * allow reusing a previously generated key. It's still a different key
1147 : * from the shared one that we used for direct storage. */
1148 1984 : get_hash_key(h->indirect.hash_key, !h->has_indirect);
1149 :
1150 1984 : h->has_indirect = true;
1151 1984 : h->indirect.storage = new_storage;
1152 3968 : h->indirect.n_buckets = (1U << new_shift) /
1153 1984 : (hi->entry_size + sizeof(dib_raw_t));
1154 :
1155 1984 : old_dibs = (dib_raw_t*)(new_storage + hi->entry_size * old_n_buckets);
1156 1984 : new_dibs = dib_raw_ptr(h);
1157 :
1158 : /*
1159 : * Move the DIB array to the new place, replacing valid DIB values with
1160 : * DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
1161 : * Note: Overlap is not possible, because we have at least doubled the
1162 : * number of buckets and dib_raw_t is smaller than any entry type.
1163 : */
1164 3339647 : for (idx = 0; idx < old_n_buckets; idx++) {
1165 3337663 : assert(old_dibs[idx] != DIB_RAW_REHASH);
1166 3337663 : new_dibs[idx] = old_dibs[idx] == DIB_RAW_FREE ? DIB_RAW_FREE
1167 : : DIB_RAW_REHASH;
1168 : }
1169 :
1170 : /* Zero the area of newly added entries (including the old DIB area) */
1171 1984 : memzero(bucket_at(h, old_n_buckets),
1172 : (n_buckets(h) - old_n_buckets) * hi->entry_size);
1173 :
1174 : /* The upper half of the new DIB array needs initialization */
1175 1984 : memset(&new_dibs[old_n_buckets], DIB_RAW_INIT,
1176 1984 : (n_buckets(h) - old_n_buckets) * sizeof(dib_raw_t));
1177 :
1178 : /* Rehash entries that need it */
1179 1984 : n_rehashed = 0;
1180 3339647 : for (idx = 0; idx < old_n_buckets; idx++) {
1181 3337663 : if (new_dibs[idx] != DIB_RAW_REHASH)
1182 1291142 : continue;
1183 :
1184 2046521 : optimal_idx = bucket_hash(h, bucket_at(h, idx)->key);
1185 :
1186 : /*
1187 : * Not much to do if by luck the entry hashes to its current
1188 : * location. Just set its DIB.
1189 : */
1190 2046521 : if (optimal_idx == idx) {
1191 554 : new_dibs[idx] = 0;
1192 554 : n_rehashed++;
1193 554 : continue;
1194 : }
1195 :
1196 2045967 : new_dibs[idx] = DIB_RAW_FREE;
1197 2045967 : bucket_move_entry(h, &swap, idx, IDX_PUT);
1198 : /* bucket_move_entry does not clear the source */
1199 2045967 : memzero(bucket_at(h, idx), hi->entry_size);
1200 :
1201 : do {
1202 : /*
1203 : * Find the new bucket for the current entry. This may make
1204 : * another entry homeless and load it into IDX_PUT.
1205 : */
1206 2670018 : rehash_next = hashmap_put_robin_hood(h, optimal_idx, &swap);
1207 2670018 : n_rehashed++;
1208 :
1209 : /* Did the current entry displace another one? */
1210 2670018 : if (rehash_next)
1211 624051 : optimal_idx = bucket_hash(h, bucket_at_swap(&swap, IDX_PUT)->p.b.key);
1212 2670018 : } while (rehash_next);
1213 : }
1214 :
1215 1984 : assert(n_rehashed == n_entries(h));
1216 :
1217 1984 : return 1;
1218 : }
1219 :
1220 : /*
1221 : * Finds an entry with a matching key
1222 : * Returns: index of the found entry, or IDX_NIL if not found.
1223 : */
1224 12814093 : static unsigned base_bucket_scan(HashmapBase *h, unsigned idx, const void *key) {
1225 : struct hashmap_base_entry *e;
1226 : unsigned dib, distance;
1227 12814093 : dib_raw_t *dibs = dib_raw_ptr(h);
1228 :
1229 12814093 : assert(idx < n_buckets(h));
1230 :
1231 41880070 : for (distance = 0; ; distance++) {
1232 41880070 : if (dibs[idx] == DIB_RAW_FREE)
1233 3739396 : return IDX_NIL;
1234 :
1235 38140674 : dib = bucket_calculate_dib(h, idx, dibs[idx]);
1236 :
1237 38140674 : if (dib < distance)
1238 2610981 : return IDX_NIL;
1239 35529693 : if (dib == distance) {
1240 12615546 : e = bucket_at(h, idx);
1241 12615546 : if (h->hash_ops->compare(e->key, key) == 0)
1242 6463716 : return idx;
1243 : }
1244 :
1245 29065977 : idx = next_idx(h, idx);
1246 29065977 : }
1247 : }
1248 : #define bucket_scan(h, idx, key) base_bucket_scan(HASHMAP_BASE(h), idx, key)
1249 :
1250 2106857 : int hashmap_put(Hashmap *h, const void *key, void *value) {
1251 : struct swap_entries swap;
1252 : struct plain_hashmap_entry *e;
1253 : unsigned hash, idx;
1254 :
1255 2106857 : assert(h);
1256 :
1257 2106857 : hash = bucket_hash(h, key);
1258 2106857 : idx = bucket_scan(h, hash, key);
1259 2106857 : if (idx != IDX_NIL) {
1260 10 : e = plain_bucket_at(h, idx);
1261 10 : if (e->value == value)
1262 4 : return 0;
1263 6 : return -EEXIST;
1264 : }
1265 :
1266 2106847 : e = &bucket_at_swap(&swap, IDX_PUT)->p;
1267 2106847 : e->b.key = key;
1268 2106847 : e->value = value;
1269 2106847 : return hashmap_put_boldly(h, hash, &swap, true);
1270 : }
1271 :
1272 8579 : int set_put(Set *s, const void *key) {
1273 : struct swap_entries swap;
1274 : struct hashmap_base_entry *e;
1275 : unsigned hash, idx;
1276 :
1277 8579 : assert(s);
1278 :
1279 8579 : hash = bucket_hash(s, key);
1280 8579 : idx = bucket_scan(s, hash, key);
1281 8579 : if (idx != IDX_NIL)
1282 1555 : return 0;
1283 :
1284 7024 : e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1285 7024 : e->key = key;
1286 7024 : return hashmap_put_boldly(s, hash, &swap, true);
1287 : }
1288 :
1289 6096 : int hashmap_replace(Hashmap *h, const void *key, void *value) {
1290 : struct swap_entries swap;
1291 : struct plain_hashmap_entry *e;
1292 : unsigned hash, idx;
1293 :
1294 6096 : assert(h);
1295 :
1296 6096 : hash = bucket_hash(h, key);
1297 6096 : idx = bucket_scan(h, hash, key);
1298 6096 : if (idx != IDX_NIL) {
1299 485 : e = plain_bucket_at(h, idx);
1300 : #ifdef ENABLE_DEBUG_HASHMAP
1301 : /* Although the key is equal, the key pointer may have changed,
1302 : * and this would break our assumption for iterating. So count
1303 : * this operation as incompatible with iteration. */
1304 : if (e->b.key != key) {
1305 : h->b.debug.put_count++;
1306 : h->b.debug.rem_count++;
1307 : h->b.debug.last_rem_idx = idx;
1308 : }
1309 : #endif
1310 485 : e->b.key = key;
1311 485 : e->value = value;
1312 485 : return 0;
1313 : }
1314 :
1315 5611 : e = &bucket_at_swap(&swap, IDX_PUT)->p;
1316 5611 : e->b.key = key;
1317 5611 : e->value = value;
1318 5611 : return hashmap_put_boldly(h, hash, &swap, true);
1319 : }
1320 :
1321 4 : int hashmap_update(Hashmap *h, const void *key, void *value) {
1322 : struct plain_hashmap_entry *e;
1323 : unsigned hash, idx;
1324 :
1325 4 : assert(h);
1326 :
1327 4 : hash = bucket_hash(h, key);
1328 4 : idx = bucket_scan(h, hash, key);
1329 4 : if (idx == IDX_NIL)
1330 2 : return -ENOENT;
1331 :
1332 2 : e = plain_bucket_at(h, idx);
1333 2 : e->value = value;
1334 2 : return 0;
1335 : }
1336 :
1337 2265725 : void *internal_hashmap_get(HashmapBase *h, const void *key) {
1338 : struct hashmap_base_entry *e;
1339 : unsigned hash, idx;
1340 :
1341 2265725 : if (!h)
1342 908 : return NULL;
1343 :
1344 2264817 : hash = bucket_hash(h, key);
1345 2264817 : idx = bucket_scan(h, hash, key);
1346 2264817 : if (idx == IDX_NIL)
1347 12931 : return NULL;
1348 :
1349 2251886 : e = bucket_at(h, idx);
1350 2251886 : return entry_value(h, e);
1351 : }
1352 :
1353 5962 : void *hashmap_get2(Hashmap *h, const void *key, void **key2) {
1354 : struct plain_hashmap_entry *e;
1355 : unsigned hash, idx;
1356 :
1357 5962 : if (!h)
1358 2 : return NULL;
1359 :
1360 5960 : hash = bucket_hash(h, key);
1361 5960 : idx = bucket_scan(h, hash, key);
1362 5960 : if (idx == IDX_NIL)
1363 5254 : return NULL;
1364 :
1365 706 : e = plain_bucket_at(h, idx);
1366 706 : if (key2)
1367 706 : *key2 = (void*) e->b.key;
1368 :
1369 706 : return e->value;
1370 : }
1371 :
1372 6306364 : bool internal_hashmap_contains(HashmapBase *h, const void *key) {
1373 : unsigned hash;
1374 :
1375 6306364 : if (!h)
1376 2 : return false;
1377 :
1378 6306362 : hash = bucket_hash(h, key);
1379 6306362 : return bucket_scan(h, hash, key) != IDX_NIL;
1380 : }
1381 :
1382 2138506 : void *internal_hashmap_remove(HashmapBase *h, const void *key) {
1383 : struct hashmap_base_entry *e;
1384 : unsigned hash, idx;
1385 : void *data;
1386 :
1387 2138506 : if (!h)
1388 24886 : return NULL;
1389 :
1390 2113620 : hash = bucket_hash(h, key);
1391 2113620 : idx = bucket_scan(h, hash, key);
1392 2113620 : if (idx == IDX_NIL)
1393 7462 : return NULL;
1394 :
1395 2106158 : e = bucket_at(h, idx);
1396 2106158 : data = entry_value(h, e);
1397 2106158 : remove_entry(h, idx);
1398 :
1399 2106158 : return data;
1400 : }
1401 :
1402 26 : void *hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
1403 : struct plain_hashmap_entry *e;
1404 : unsigned hash, idx;
1405 : void *data;
1406 :
1407 26 : if (!h) {
1408 2 : if (rkey)
1409 2 : *rkey = NULL;
1410 2 : return NULL;
1411 : }
1412 :
1413 24 : hash = bucket_hash(h, key);
1414 24 : idx = bucket_scan(h, hash, key);
1415 24 : if (idx == IDX_NIL) {
1416 22 : if (rkey)
1417 22 : *rkey = NULL;
1418 22 : return NULL;
1419 : }
1420 :
1421 2 : e = plain_bucket_at(h, idx);
1422 2 : data = e->value;
1423 2 : if (rkey)
1424 2 : *rkey = (void*) e->b.key;
1425 :
1426 2 : remove_entry(h, idx);
1427 :
1428 2 : return data;
1429 : }
1430 :
1431 8 : int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1432 : struct swap_entries swap;
1433 : struct plain_hashmap_entry *e;
1434 : unsigned old_hash, new_hash, idx;
1435 :
1436 8 : if (!h)
1437 2 : return -ENOENT;
1438 :
1439 6 : old_hash = bucket_hash(h, old_key);
1440 6 : idx = bucket_scan(h, old_hash, old_key);
1441 6 : if (idx == IDX_NIL)
1442 2 : return -ENOENT;
1443 :
1444 4 : new_hash = bucket_hash(h, new_key);
1445 4 : if (bucket_scan(h, new_hash, new_key) != IDX_NIL)
1446 2 : return -EEXIST;
1447 :
1448 2 : remove_entry(h, idx);
1449 :
1450 2 : e = &bucket_at_swap(&swap, IDX_PUT)->p;
1451 2 : e->b.key = new_key;
1452 2 : e->value = value;
1453 2 : assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1454 :
1455 2 : return 0;
1456 : }
1457 :
1458 0 : int set_remove_and_put(Set *s, const void *old_key, const void *new_key) {
1459 : struct swap_entries swap;
1460 : struct hashmap_base_entry *e;
1461 : unsigned old_hash, new_hash, idx;
1462 :
1463 0 : if (!s)
1464 0 : return -ENOENT;
1465 :
1466 0 : old_hash = bucket_hash(s, old_key);
1467 0 : idx = bucket_scan(s, old_hash, old_key);
1468 0 : if (idx == IDX_NIL)
1469 0 : return -ENOENT;
1470 :
1471 0 : new_hash = bucket_hash(s, new_key);
1472 0 : if (bucket_scan(s, new_hash, new_key) != IDX_NIL)
1473 0 : return -EEXIST;
1474 :
1475 0 : remove_entry(s, idx);
1476 :
1477 0 : e = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1478 0 : e->key = new_key;
1479 0 : assert_se(hashmap_put_boldly(s, new_hash, &swap, false) == 1);
1480 :
1481 0 : return 0;
1482 : }
1483 :
1484 388 : int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
1485 : struct swap_entries swap;
1486 : struct plain_hashmap_entry *e;
1487 : unsigned old_hash, new_hash, idx_old, idx_new;
1488 :
1489 388 : if (!h)
1490 2 : return -ENOENT;
1491 :
1492 386 : old_hash = bucket_hash(h, old_key);
1493 386 : idx_old = bucket_scan(h, old_hash, old_key);
1494 386 : if (idx_old == IDX_NIL)
1495 2 : return -ENOENT;
1496 :
1497 384 : old_key = bucket_at(HASHMAP_BASE(h), idx_old)->key;
1498 :
1499 384 : new_hash = bucket_hash(h, new_key);
1500 384 : idx_new = bucket_scan(h, new_hash, new_key);
1501 384 : if (idx_new != IDX_NIL)
1502 382 : if (idx_old != idx_new) {
1503 42 : remove_entry(h, idx_new);
1504 : /* Compensate for a possible backward shift. */
1505 42 : if (old_key != bucket_at(HASHMAP_BASE(h), idx_old)->key)
1506 3 : idx_old = prev_idx(HASHMAP_BASE(h), idx_old);
1507 42 : assert(old_key == bucket_at(HASHMAP_BASE(h), idx_old)->key);
1508 : }
1509 :
1510 384 : remove_entry(h, idx_old);
1511 :
1512 384 : e = &bucket_at_swap(&swap, IDX_PUT)->p;
1513 384 : e->b.key = new_key;
1514 384 : e->value = value;
1515 384 : assert_se(hashmap_put_boldly(h, new_hash, &swap, false) == 1);
1516 :
1517 384 : return 0;
1518 : }
1519 :
1520 969 : void *hashmap_remove_value(Hashmap *h, const void *key, void *value) {
1521 : struct plain_hashmap_entry *e;
1522 : unsigned hash, idx;
1523 :
1524 969 : if (!h)
1525 8 : return NULL;
1526 :
1527 961 : hash = bucket_hash(h, key);
1528 961 : idx = bucket_scan(h, hash, key);
1529 961 : if (idx == IDX_NIL)
1530 91 : return NULL;
1531 :
1532 870 : e = plain_bucket_at(h, idx);
1533 870 : if (e->value != value)
1534 2 : return NULL;
1535 :
1536 868 : remove_entry(h, idx);
1537 :
1538 868 : return value;
1539 : }
1540 :
1541 2106415 : static unsigned find_first_entry(HashmapBase *h) {
1542 2106415 : Iterator i = ITERATOR_FIRST;
1543 :
1544 2106415 : if (!h || !n_entries(h))
1545 1348 : return IDX_NIL;
1546 :
1547 2105067 : return hashmap_iterate_entry(h, &i);
1548 : }
1549 :
1550 2101 : void *internal_hashmap_first(HashmapBase *h) {
1551 : unsigned idx;
1552 :
1553 2101 : idx = find_first_entry(h);
1554 2101 : if (idx == IDX_NIL)
1555 673 : return NULL;
1556 :
1557 1428 : return entry_value(h, bucket_at(h, idx));
1558 : }
1559 :
1560 2101254 : void *internal_hashmap_first_key(HashmapBase *h) {
1561 : struct hashmap_base_entry *e;
1562 : unsigned idx;
1563 :
1564 2101254 : idx = find_first_entry(h);
1565 2101254 : if (idx == IDX_NIL)
1566 2 : return NULL;
1567 :
1568 2101252 : e = bucket_at(h, idx);
1569 2101252 : return (void*) e->key;
1570 : }
1571 :
1572 3056 : void *internal_hashmap_steal_first(HashmapBase *h) {
1573 : struct hashmap_base_entry *e;
1574 : void *data;
1575 : unsigned idx;
1576 :
1577 3056 : idx = find_first_entry(h);
1578 3056 : if (idx == IDX_NIL)
1579 671 : return NULL;
1580 :
1581 2385 : e = bucket_at(h, idx);
1582 2385 : data = entry_value(h, e);
1583 2385 : remove_entry(h, idx);
1584 :
1585 2385 : return data;
1586 : }
1587 :
1588 4 : void *internal_hashmap_steal_first_key(HashmapBase *h) {
1589 : struct hashmap_base_entry *e;
1590 : void *key;
1591 : unsigned idx;
1592 :
1593 4 : idx = find_first_entry(h);
1594 4 : if (idx == IDX_NIL)
1595 2 : return NULL;
1596 :
1597 2 : e = bucket_at(h, idx);
1598 2 : key = (void*) e->key;
1599 2 : remove_entry(h, idx);
1600 :
1601 2 : return key;
1602 : }
1603 :
1604 2127021 : unsigned internal_hashmap_size(HashmapBase *h) {
1605 :
1606 2127021 : if (!h)
1607 19745 : return 0;
1608 :
1609 2107276 : return n_entries(h);
1610 : }
1611 :
1612 20 : unsigned internal_hashmap_buckets(HashmapBase *h) {
1613 :
1614 20 : if (!h)
1615 2 : return 0;
1616 :
1617 18 : return n_buckets(h);
1618 : }
1619 :
1620 4 : int internal_hashmap_merge(Hashmap *h, Hashmap *other) {
1621 : Iterator i;
1622 : unsigned idx;
1623 :
1624 4 : assert(h);
1625 :
1626 16 : HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1627 12 : struct plain_hashmap_entry *pe = plain_bucket_at(other, idx);
1628 : int r;
1629 :
1630 12 : r = hashmap_put(h, pe->b.key, pe->value);
1631 12 : if (r < 0 && r != -EEXIST)
1632 0 : return r;
1633 : }
1634 :
1635 4 : return 0;
1636 : }
1637 :
1638 0 : int set_merge(Set *s, Set *other) {
1639 : Iterator i;
1640 : unsigned idx;
1641 :
1642 0 : assert(s);
1643 :
1644 0 : HASHMAP_FOREACH_IDX(idx, HASHMAP_BASE(other), i) {
1645 0 : struct set_entry *se = set_bucket_at(other, idx);
1646 : int r;
1647 :
1648 0 : r = set_put(s, se->b.key);
1649 0 : if (r < 0)
1650 0 : return r;
1651 : }
1652 :
1653 0 : return 0;
1654 : }
1655 :
1656 8 : int internal_hashmap_reserve(HashmapBase *h, unsigned entries_add) {
1657 : int r;
1658 :
1659 8 : assert(h);
1660 :
1661 8 : r = resize_buckets(h, entries_add);
1662 8 : if (r < 0)
1663 4 : return r;
1664 :
1665 4 : return 0;
1666 : }
1667 :
1668 : /*
1669 : * The same as hashmap_merge(), but every new item from other is moved to h.
1670 : * Keys already in h are skipped and stay in other.
1671 : * Returns: 0 on success.
1672 : * -ENOMEM on alloc failure, in which case no move has been done.
1673 : */
1674 4 : int internal_hashmap_move(HashmapBase *h, HashmapBase *other) {
1675 : struct swap_entries swap;
1676 : struct hashmap_base_entry *e, *n;
1677 : Iterator i;
1678 : unsigned idx;
1679 : int r;
1680 :
1681 4 : assert(h);
1682 :
1683 4 : if (!other)
1684 2 : return 0;
1685 :
1686 2 : assert(other->type == h->type);
1687 :
1688 : /*
1689 : * This reserves buckets for the worst case, where none of other's
1690 : * entries are yet present in h. This is preferable to risking
1691 : * an allocation failure in the middle of the moving and having to
1692 : * rollback or return a partial result.
1693 : */
1694 2 : r = resize_buckets(h, n_entries(other));
1695 2 : if (r < 0)
1696 0 : return r;
1697 :
1698 10 : HASHMAP_FOREACH_IDX(idx, other, i) {
1699 : unsigned h_hash;
1700 :
1701 8 : e = bucket_at(other, idx);
1702 8 : h_hash = bucket_hash(h, e->key);
1703 8 : if (bucket_scan(h, h_hash, e->key) != IDX_NIL)
1704 2 : continue;
1705 :
1706 6 : n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1707 6 : n->key = e->key;
1708 6 : if (h->type != HASHMAP_TYPE_SET)
1709 6 : ((struct plain_hashmap_entry*) n)->value =
1710 6 : ((struct plain_hashmap_entry*) e)->value;
1711 6 : assert_se(hashmap_put_boldly(h, h_hash, &swap, false) == 1);
1712 :
1713 6 : remove_entry(other, idx);
1714 : }
1715 :
1716 2 : return 0;
1717 : }
1718 :
1719 10 : int internal_hashmap_move_one(HashmapBase *h, HashmapBase *other, const void *key) {
1720 : struct swap_entries swap;
1721 : unsigned h_hash, other_hash, idx;
1722 : struct hashmap_base_entry *e, *n;
1723 : int r;
1724 :
1725 10 : assert(h);
1726 :
1727 10 : h_hash = bucket_hash(h, key);
1728 10 : if (bucket_scan(h, h_hash, key) != IDX_NIL)
1729 2 : return -EEXIST;
1730 :
1731 8 : if (!other)
1732 2 : return -ENOENT;
1733 :
1734 6 : assert(other->type == h->type);
1735 :
1736 6 : other_hash = bucket_hash(other, key);
1737 6 : idx = bucket_scan(other, other_hash, key);
1738 6 : if (idx == IDX_NIL)
1739 2 : return -ENOENT;
1740 :
1741 4 : e = bucket_at(other, idx);
1742 :
1743 4 : n = &bucket_at_swap(&swap, IDX_PUT)->p.b;
1744 4 : n->key = e->key;
1745 4 : if (h->type != HASHMAP_TYPE_SET)
1746 4 : ((struct plain_hashmap_entry*) n)->value =
1747 4 : ((struct plain_hashmap_entry*) e)->value;
1748 4 : r = hashmap_put_boldly(h, h_hash, &swap, true);
1749 4 : if (r < 0)
1750 0 : return r;
1751 :
1752 4 : remove_entry(other, idx);
1753 4 : return 0;
1754 : }
1755 :
1756 2 : HashmapBase *internal_hashmap_copy(HashmapBase *h) {
1757 : HashmapBase *copy;
1758 : int r;
1759 :
1760 2 : assert(h);
1761 :
1762 2 : copy = hashmap_base_new(h->hash_ops, h->type HASHMAP_DEBUG_SRC_ARGS);
1763 2 : if (!copy)
1764 0 : return NULL;
1765 :
1766 2 : switch (h->type) {
1767 : case HASHMAP_TYPE_PLAIN:
1768 : case HASHMAP_TYPE_ORDERED:
1769 2 : r = hashmap_merge((Hashmap*)copy, (Hashmap*)h);
1770 2 : break;
1771 : case HASHMAP_TYPE_SET:
1772 0 : r = set_merge((Set*)copy, (Set*)h);
1773 0 : break;
1774 : default:
1775 0 : assert_not_reached("Unknown hashmap type");
1776 : }
1777 :
1778 2 : if (r < 0) {
1779 0 : internal_hashmap_free(copy);
1780 0 : return NULL;
1781 : }
1782 :
1783 2 : return copy;
1784 : }
1785 :
1786 13 : char **internal_hashmap_get_strv(HashmapBase *h) {
1787 : char **sv;
1788 : Iterator i;
1789 : unsigned idx, n;
1790 :
1791 13 : sv = new(char*, n_entries(h)+1);
1792 13 : if (!sv)
1793 0 : return NULL;
1794 :
1795 13 : n = 0;
1796 39 : HASHMAP_FOREACH_IDX(idx, h, i)
1797 26 : sv[n++] = entry_value(h, bucket_at(h, idx));
1798 13 : sv[n] = NULL;
1799 :
1800 13 : return sv;
1801 : }
1802 :
1803 10 : void *ordered_hashmap_next(OrderedHashmap *h, const void *key) {
1804 : struct ordered_hashmap_entry *e;
1805 : unsigned hash, idx;
1806 :
1807 10 : if (!h)
1808 1 : return NULL;
1809 :
1810 9 : hash = bucket_hash(h, key);
1811 9 : idx = bucket_scan(h, hash, key);
1812 9 : if (idx == IDX_NIL)
1813 1 : return NULL;
1814 :
1815 8 : e = ordered_bucket_at(h, idx);
1816 8 : if (e->iterate_next == IDX_NIL)
1817 2 : return NULL;
1818 6 : return ordered_bucket_at(h, e->iterate_next)->p.value;
1819 : }
1820 :
1821 2506 : int set_consume(Set *s, void *value) {
1822 : int r;
1823 :
1824 2506 : r = set_put(s, value);
1825 2506 : if (r <= 0)
1826 3 : free(value);
1827 :
1828 2506 : return r;
1829 : }
1830 :
1831 499 : int set_put_strdup(Set *s, const char *p) {
1832 : char *c;
1833 : int r;
1834 :
1835 499 : assert(s);
1836 499 : assert(p);
1837 :
1838 499 : c = strdup(p);
1839 499 : if (!c)
1840 0 : return -ENOMEM;
1841 :
1842 499 : r = set_consume(s, c);
1843 499 : if (r == -EEXIST)
1844 0 : return 0;
1845 :
1846 499 : return r;
1847 : }
1848 :
1849 0 : int set_put_strdupv(Set *s, char **l) {
1850 0 : int n = 0, r;
1851 : char **i;
1852 :
1853 0 : STRV_FOREACH(i, l) {
1854 0 : r = set_put_strdup(s, *i);
1855 0 : if (r < 0)
1856 0 : return r;
1857 :
1858 0 : n += r;
1859 : }
1860 :
1861 0 : return n;
1862 : }
|