Line data Source code
1 : /**
2 : * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 : * SPDX-License-Identifier: Apache-2.0.
4 : */
5 :
6 : /* For more information on how the RH hash works and in particular how we do
7 : * deletions, see:
8 : * http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/
9 : */
10 :
11 : #include <aws/common/hash_table.h>
12 : #include <aws/common/math.h>
13 : #include <aws/common/private/hash_table_impl.h>
14 : #include <aws/common/string.h>
15 :
16 : #include <limits.h>
17 : #include <stdio.h>
18 : #include <stdlib.h>
19 :
20 : /* Include lookup3.c so we can (potentially) inline it and make use of the mix()
21 : * macro. */
22 : #include <aws/common/private/lookup3.inl>
23 :
24 0 : static void s_suppress_unused_lookup3_func_warnings(void) {
25 0 : /* We avoid making changes to lookup3 if we can avoid it, but since it has functions
26 0 : * we're not using, reference them somewhere to suppress the unused function warning.
27 0 : */
28 0 : (void)hashword;
29 0 : (void)hashword2;
30 0 : (void)hashlittle;
31 0 : (void)hashbig;
32 0 : }
33 :
34 : /**
35 : * Calculate the hash for the given key.
36 : * Ensures a reasonable semantics for null keys.
37 : * Ensures that no object ever hashes to 0, which is the sentinal value for an empty hash element.
38 : */
39 0 : static uint64_t s_hash_for(struct hash_table_state *state, const void *key) {
40 0 : AWS_PRECONDITION(hash_table_state_is_valid(state));
41 0 : s_suppress_unused_lookup3_func_warnings();
42 0 :
43 0 : if (key == NULL) {
44 0 : /* The best answer */
45 0 : return 42;
46 0 : }
47 0 :
48 0 : uint64_t hash_code = state->hash_fn(key);
49 0 : if (!hash_code) {
50 0 : hash_code = 1;
51 0 : }
52 0 : AWS_RETURN_WITH_POSTCONDITION(hash_code, hash_code != 0);
53 0 : }
54 :
55 : /**
56 : * Check equality of two objects, with a reasonable semantics for null.
57 : */
58 0 : static bool s_safe_eq_check(aws_hash_callback_eq_fn *equals_fn, const void *a, const void *b) {
59 0 : /* Short circuit if the pointers are the same */
60 0 : if (a == b) {
61 0 : return true;
62 0 : }
63 0 : /* If one but not both are null, the objects are not equal */
64 0 : if (a == NULL || b == NULL) {
65 0 : return false;
66 0 : }
67 0 : /* If both are non-null, call the underlying equals fn */
68 0 : return equals_fn(a, b);
69 0 : }
70 :
71 : /**
72 : * Check equality of two hash keys, with a reasonable semantics for null keys.
73 : */
74 0 : static bool s_hash_keys_eq(struct hash_table_state *state, const void *a, const void *b) {
75 0 : AWS_PRECONDITION(hash_table_state_is_valid(state));
76 0 : bool rval = s_safe_eq_check(state->equals_fn, a, b);
77 0 : AWS_RETURN_WITH_POSTCONDITION(rval, hash_table_state_is_valid(state));
78 0 : }
79 :
80 0 : static size_t s_index_for(struct hash_table_state *map, struct hash_table_entry *entry) {
81 0 : AWS_PRECONDITION(hash_table_state_is_valid(map));
82 0 : size_t index = entry - map->slots;
83 0 : AWS_RETURN_WITH_POSTCONDITION(index, index < map->size && hash_table_state_is_valid(map));
84 0 : }
85 :
86 : #if 0
87 : /* Useful debugging code for anyone working on this in the future */
88 : static uint64_t s_distance(struct hash_table_state *state, int index) {
89 : return (index - state->slots[index].hash_code) & state->mask;
90 : }
91 :
92 : void hash_dump(struct aws_hash_table *tbl) {
93 : struct hash_table_state *state = tbl->p_impl;
94 :
95 : printf("Dumping hash table contents:\n");
96 :
97 : for (int i = 0; i < state->size; i++) {
98 : printf("%7d: ", i);
99 : struct hash_table_entry *e = &state->slots[i];
100 : if (!e->hash_code) {
101 : printf("EMPTY\n");
102 : } else {
103 : printf("k: %p v: %p hash_code: %lld displacement: %lld\n",
104 : e->element.key, e->element.value, e->hash_code,
105 : (i - e->hash_code) & state->mask);
106 : }
107 : }
108 : }
109 : #endif
110 :
111 : #if 0
112 : /* Not currently exposed as an API. Should we have something like this? Useful for benchmarks */
113 : AWS_COMMON_API
114 : void aws_hash_table_print_stats(struct aws_hash_table *table) {
115 : struct hash_table_state *state = table->p_impl;
116 : uint64_t total_disp = 0;
117 : uint64_t max_disp = 0;
118 :
119 : printf("\n=== Hash table statistics ===\n");
120 : printf("Table size: %zu/%zu (max load %zu, remaining %zu)\n", state->entry_count, state->size, state->max_load, state->max_load - state->entry_count);
121 : printf("Load factor: %02.2lf%% (max %02.2lf%%)\n",
122 : 100.0 * ((double)state->entry_count / (double)state->size),
123 : state->max_load_factor);
124 :
125 : for (size_t i = 0; i < state->size; i++) {
126 : if (state->slots[i].hash_code) {
127 : int displacement = distance(state, i);
128 : total_disp += displacement;
129 : if (displacement > max_disp) {
130 : max_disp = displacement;
131 : }
132 : }
133 : }
134 :
135 : size_t *disp_counts = calloc(sizeof(*disp_counts), max_disp + 1);
136 :
137 : for (size_t i = 0; i < state->size; i++) {
138 : if (state->slots[i].hash_code) {
139 : disp_counts[distance(state, i)]++;
140 : }
141 : }
142 :
143 : uint64_t median = 0;
144 : uint64_t passed = 0;
145 : for (uint64_t i = 0; i <= max_disp && passed < total_disp / 2; i++) {
146 : median = i;
147 : passed += disp_counts[i];
148 : }
149 :
150 : printf("Displacement statistics: Avg %02.2lf max %llu median %llu\n", (double)total_disp / (double)state->entry_count, max_disp, median);
151 : for (uint64_t i = 0; i <= max_disp; i++) {
152 : printf("Displacement %2lld: %zu entries\n", i, disp_counts[i]);
153 : }
154 : free(disp_counts);
155 : printf("\n");
156 : }
157 : #endif
158 :
159 0 : size_t aws_hash_table_get_entry_count(const struct aws_hash_table *map) {
160 0 : struct hash_table_state *state = map->p_impl;
161 0 : return state->entry_count;
162 0 : }
163 :
164 : /* Given a header template, allocates space for a hash table of the appropriate
165 : * size, and copies the state header into this allocated memory, which is
166 : * returned.
167 : */
168 148439 : static struct hash_table_state *s_alloc_state(const struct hash_table_state *template) {
169 148439 : size_t required_bytes;
170 148439 : if (hash_table_state_required_bytes(template->size, &required_bytes)) {
171 0 : return NULL;
172 0 : }
173 148439 :
174 148439 : /* An empty slot has hashcode 0. So this marks all slots as empty */
175 148439 : struct hash_table_state *state = aws_mem_calloc(template->alloc, 1, required_bytes);
176 148439 :
177 148439 : if (state == NULL) {
178 0 : return state;
179 0 : }
180 148439 :
181 148439 : *state = *template;
182 148439 : return state;
183 148439 : }
184 :
185 : /* Computes the correct size and max_load based on a requested size. */
186 148439 : static int s_update_template_size(struct hash_table_state *template, size_t expected_elements) {
187 148439 : size_t min_size = expected_elements;
188 148439 :
189 148439 : if (min_size < 2) {
190 68126 : min_size = 2;
191 68126 : }
192 148439 :
193 148439 : /* size is always a power of 2 */
194 148439 : size_t size;
195 148439 : if (aws_round_up_to_power_of_two(min_size, &size)) {
196 0 : return AWS_OP_ERR;
197 0 : }
198 148439 :
199 148439 : /* Update the template once we've calculated everything successfully */
200 148439 : template->size = size;
201 148439 : template->max_load = (size_t)(template->max_load_factor * (double)template->size);
202 148439 : /* Ensure that there is always at least one empty slot in the hash table */
203 148439 : if (template->max_load >= size) {
204 0 : template->max_load = size - 1;
205 0 : }
206 148439 :
207 148439 : /* Since size is a power of 2: (index & (size - 1)) == (index % size) */
208 148439 : template->mask = size - 1;
209 148439 :
210 148439 : return AWS_OP_SUCCESS;
211 148439 : }
212 :
213 : int aws_hash_table_init(
214 : struct aws_hash_table *map,
215 : struct aws_allocator *alloc,
216 : size_t size,
217 : aws_hash_fn *hash_fn,
218 : aws_hash_callback_eq_fn *equals_fn,
219 : aws_hash_callback_destroy_fn *destroy_key_fn,
220 148439 : aws_hash_callback_destroy_fn *destroy_value_fn) {
221 148439 : AWS_PRECONDITION(map != NULL);
222 148439 : AWS_PRECONDITION(alloc != NULL);
223 148439 : AWS_PRECONDITION(hash_fn != NULL);
224 148439 : AWS_PRECONDITION(equals_fn != NULL);
225 148439 :
226 148439 : struct hash_table_state template;
227 148439 : template.hash_fn = hash_fn;
228 148439 : template.equals_fn = equals_fn;
229 148439 : template.destroy_key_fn = destroy_key_fn;
230 148439 : template.destroy_value_fn = destroy_value_fn;
231 148439 : template.alloc = alloc;
232 148439 :
233 148439 : template.entry_count = 0;
234 148439 : template.max_load_factor = 0.95; /* TODO - make configurable? */
235 148439 :
236 148439 : if (s_update_template_size(&template, size)) {
237 0 : return AWS_OP_ERR;
238 0 : }
239 148439 : map->p_impl = s_alloc_state(&template);
240 148439 :
241 148439 : if (!map->p_impl) {
242 0 : return AWS_OP_ERR;
243 0 : }
244 148439 :
245 148439 : AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map));
246 148439 : }
247 :
248 0 : void aws_hash_table_clean_up(struct aws_hash_table *map) {
249 0 : AWS_PRECONDITION(map != NULL);
250 0 : AWS_PRECONDITION(
251 0 : map->p_impl == NULL || aws_hash_table_is_valid(map),
252 0 : "Input aws_hash_table [map] must be valid or hash_table_state pointer [map->p_impl] must be NULL, in case "
253 0 : "aws_hash_table_clean_up was called twice.");
254 0 : struct hash_table_state *state = map->p_impl;
255 0 :
256 0 : /* Ensure that we're idempotent */
257 0 : if (!state) {
258 0 : return;
259 0 : }
260 0 :
261 0 : aws_hash_table_clear(map);
262 0 : aws_mem_release(map->p_impl->alloc, map->p_impl);
263 0 :
264 0 : map->p_impl = NULL;
265 0 : AWS_POSTCONDITION(map->p_impl == NULL);
266 0 : }
267 :
268 0 : void aws_hash_table_swap(struct aws_hash_table *AWS_RESTRICT a, struct aws_hash_table *AWS_RESTRICT b) {
269 0 : AWS_PRECONDITION(a != b);
270 0 : struct aws_hash_table tmp = *a;
271 0 : *a = *b;
272 0 : *b = tmp;
273 0 : }
274 :
275 0 : void aws_hash_table_move(struct aws_hash_table *AWS_RESTRICT to, struct aws_hash_table *AWS_RESTRICT from) {
276 0 : AWS_PRECONDITION(to != NULL);
277 0 : AWS_PRECONDITION(from != NULL);
278 0 : AWS_PRECONDITION(to != from);
279 0 : AWS_PRECONDITION(aws_hash_table_is_valid(from));
280 0 :
281 0 : *to = *from;
282 0 : AWS_ZERO_STRUCT(*from);
283 0 : AWS_POSTCONDITION(aws_hash_table_is_valid(to));
284 0 : }
285 :
286 : /* Tries to find where the requested key is or where it should go if put.
287 : * Returns AWS_ERROR_SUCCESS if the item existed (leaving it in *entry),
288 : * or AWS_ERROR_HASHTBL_ITEM_NOT_FOUND if it did not (putting its destination
289 : * in *entry). Note that this does not take care of displacing whatever was in
290 : * that entry before.
291 : *
292 : * probe_idx is set to the probe index of the entry found.
293 : */
294 :
295 : static int s_find_entry1(
296 : struct hash_table_state *state,
297 : uint64_t hash_code,
298 : const void *key,
299 : struct hash_table_entry **p_entry,
300 : size_t *p_probe_idx);
301 :
302 : /* Inlined fast path: Check the first slot, only. */
303 : /* TODO: Force inlining? */
304 : static int inline s_find_entry(
305 : struct hash_table_state *state,
306 : uint64_t hash_code,
307 : const void *key,
308 : struct hash_table_entry **p_entry,
309 0 : size_t *p_probe_idx) {
310 0 : struct hash_table_entry *entry = &state->slots[hash_code & state->mask];
311 0 :
312 0 : if (entry->hash_code == 0) {
313 0 : if (p_probe_idx) {
314 0 : *p_probe_idx = 0;
315 0 : }
316 0 : *p_entry = entry;
317 0 : return AWS_ERROR_HASHTBL_ITEM_NOT_FOUND;
318 0 : }
319 0 :
320 0 : if (entry->hash_code == hash_code && s_hash_keys_eq(state, key, entry->element.key)) {
321 0 : if (p_probe_idx) {
322 0 : *p_probe_idx = 0;
323 0 : }
324 0 : *p_entry = entry;
325 0 : return AWS_OP_SUCCESS;
326 0 : }
327 0 :
328 0 : return s_find_entry1(state, hash_code, key, p_entry, p_probe_idx);
329 0 : }
330 :
331 : static int s_find_entry1(
332 : struct hash_table_state *state,
333 : uint64_t hash_code,
334 : const void *key,
335 : struct hash_table_entry **p_entry,
336 0 : size_t *p_probe_idx) {
337 0 : size_t probe_idx = 1;
338 0 : /* If we find a deleted entry, we record that index and return it as our probe index (i.e. we'll keep searching to
339 0 : * see if it already exists, but if not we'll overwrite the deleted entry).
340 0 : */
341 0 :
342 0 : int rv;
343 0 : struct hash_table_entry *entry;
344 0 : /* This loop is guaranteed to terminate because entry_probe is bounded above by state->mask (i.e. state->size - 1).
345 0 : * Since probe_idx increments every loop iteration, it will become larger than entry_probe after at most state->size
346 0 : * transitions and the loop will exit (if it hasn't already)
347 0 : */
348 0 : while (1) {
349 : #ifdef CBMC
350 : # pragma CPROVER check push
351 : # pragma CPROVER check disable "unsigned-overflow"
352 : #endif
353 : uint64_t index = (hash_code + probe_idx) & state->mask;
354 : #ifdef CBMC
355 : # pragma CPROVER check pop
356 : #endif
357 : entry = &state->slots[index];
358 0 : if (!entry->hash_code) {
359 0 : rv = AWS_ERROR_HASHTBL_ITEM_NOT_FOUND;
360 0 : break;
361 0 : }
362 0 :
363 0 : if (entry->hash_code == hash_code && s_hash_keys_eq(state, key, entry->element.key)) {
364 0 : rv = AWS_ERROR_SUCCESS;
365 0 : break;
366 0 : }
367 0 :
368 : #ifdef CBMC
369 : # pragma CPROVER check push
370 : # pragma CPROVER check disable "unsigned-overflow"
371 : #endif
372 0 : uint64_t entry_probe = (index - entry->hash_code) & state->mask;
373 : #ifdef CBMC
374 : # pragma CPROVER check pop
375 : #endif
376 :
377 0 : if (entry_probe < probe_idx) {
378 0 : /* We now know that our target entry cannot exist; if it did exist,
379 0 : * it would be at the current location as it has a higher probe
380 0 : * length than the entry we are examining and thus would have
381 0 : * preempted that item
382 0 : */
383 0 : rv = AWS_ERROR_HASHTBL_ITEM_NOT_FOUND;
384 0 : break;
385 0 : }
386 0 :
387 0 : probe_idx++;
388 0 : }
389 0 :
390 0 : *p_entry = entry;
391 0 : if (p_probe_idx) {
392 0 : *p_probe_idx = probe_idx;
393 0 : }
394 0 :
395 0 : return rv;
396 0 : }
397 :
398 0 : int aws_hash_table_find(const struct aws_hash_table *map, const void *key, struct aws_hash_element **p_elem) {
399 0 : AWS_PRECONDITION(aws_hash_table_is_valid(map));
400 0 : AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(p_elem), "Input aws_hash_element pointer [p_elem] must be writable.");
401 0 :
402 0 : struct hash_table_state *state = map->p_impl;
403 0 : uint64_t hash_code = s_hash_for(state, key);
404 0 : struct hash_table_entry *entry;
405 0 :
406 0 : int rv = s_find_entry(state, hash_code, key, &entry, NULL);
407 0 :
408 0 : if (rv == AWS_ERROR_SUCCESS) {
409 0 : *p_elem = &entry->element;
410 0 : } else {
411 0 : *p_elem = NULL;
412 0 : }
413 0 : AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map));
414 0 : }
415 :
416 : /**
417 : * Attempts to find a home for the given entry.
418 : * If the entry was empty (i.e. hash-code of 0), then the function does nothing and returns NULL
419 : * Otherwise, it emplaces the item, and returns a pointer to the newly emplaced entry.
420 : * This function is only called after the hash-table has been expanded to fit the new element,
421 : * so it should never fail.
422 : */
423 : static struct hash_table_entry *s_emplace_item(
424 : struct hash_table_state *state,
425 : struct hash_table_entry entry,
426 0 : size_t probe_idx) {
427 0 : AWS_PRECONDITION(hash_table_state_is_valid(state));
428 0 :
429 0 : if (entry.hash_code == 0) {
430 0 : AWS_RETURN_WITH_POSTCONDITION(NULL, hash_table_state_is_valid(state));
431 0 : }
432 0 :
433 0 : struct hash_table_entry *rval = NULL;
434 0 :
435 0 : /* Since a valid hash_table has at least one empty element, this loop will always terminate in at most linear time
436 0 : */
437 0 : while (entry.hash_code != 0) {
438 : #ifdef CBMC
439 : # pragma CPROVER check push
440 : # pragma CPROVER check disable "unsigned-overflow"
441 : #endif
442 : size_t index = (size_t)(entry.hash_code + probe_idx) & state->mask;
443 : #ifdef CBMC
444 : # pragma CPROVER check pop
445 : #endif
446 : struct hash_table_entry *victim = &state->slots[index];
447 0 :
448 : #ifdef CBMC
449 : # pragma CPROVER check push
450 : # pragma CPROVER check disable "unsigned-overflow"
451 : #endif
452 : size_t victim_probe_idx = (size_t)(index - victim->hash_code) & state->mask;
453 : #ifdef CBMC
454 : # pragma CPROVER check pop
455 : #endif
456 :
457 0 : if (!victim->hash_code || victim_probe_idx < probe_idx) {
458 0 : /* The first thing we emplace is the entry itself. A pointer to its location becomes the rval */
459 0 : if (!rval) {
460 0 : rval = victim;
461 0 : }
462 0 :
463 0 : struct hash_table_entry tmp = *victim;
464 0 : *victim = entry;
465 0 : entry = tmp;
466 0 :
467 0 : probe_idx = victim_probe_idx + 1;
468 0 : } else {
469 0 : probe_idx++;
470 0 : }
471 0 : }
472 0 :
473 0 : AWS_RETURN_WITH_POSTCONDITION(
474 0 : rval,
475 0 : hash_table_state_is_valid(state) && rval >= &state->slots[0] && rval < &state->slots[state->size],
476 0 : "Output hash_table_entry pointer [rval] must point in the slots of [state].");
477 0 : }
478 :
479 0 : static int s_expand_table(struct aws_hash_table *map) {
480 0 : struct hash_table_state *old_state = map->p_impl;
481 0 : struct hash_table_state template = *old_state;
482 0 :
483 0 : size_t new_size;
484 0 : if (aws_mul_size_checked(template.size, 2, &new_size)) {
485 0 : return AWS_OP_ERR;
486 0 : }
487 0 :
488 0 : if (s_update_template_size(&template, new_size)) {
489 0 : return AWS_OP_ERR;
490 0 : }
491 0 :
492 0 : struct hash_table_state *new_state = s_alloc_state(&template);
493 0 : if (!new_state) {
494 0 : return AWS_OP_ERR;
495 0 : }
496 0 :
497 0 : for (size_t i = 0; i < old_state->size; i++) {
498 0 : struct hash_table_entry entry = old_state->slots[i];
499 0 : if (entry.hash_code) {
500 0 : /* We can directly emplace since we know we won't put the same item twice */
501 0 : s_emplace_item(new_state, entry, 0);
502 0 : }
503 0 : }
504 0 :
505 0 : map->p_impl = new_state;
506 0 : aws_mem_release(new_state->alloc, old_state);
507 0 :
508 0 : return AWS_OP_SUCCESS;
509 0 : }
510 :
511 : int aws_hash_table_create(
512 : struct aws_hash_table *map,
513 : const void *key,
514 : struct aws_hash_element **p_elem,
515 0 : int *was_created) {
516 0 :
517 0 : struct hash_table_state *state = map->p_impl;
518 0 : uint64_t hash_code = s_hash_for(state, key);
519 0 : struct hash_table_entry *entry;
520 0 : size_t probe_idx;
521 0 : int ignored;
522 0 : if (!was_created) {
523 0 : was_created = &ignored;
524 0 : }
525 0 :
526 0 : int rv = s_find_entry(state, hash_code, key, &entry, &probe_idx);
527 0 :
528 0 : if (rv == AWS_ERROR_SUCCESS) {
529 0 : if (p_elem) {
530 0 : *p_elem = &entry->element;
531 0 : }
532 0 : *was_created = 0;
533 0 : return AWS_OP_SUCCESS;
534 0 : }
535 0 :
536 0 : /* Okay, we need to add an entry. Check the load factor first. */
537 0 : size_t incr_entry_count;
538 0 : if (aws_add_size_checked(state->entry_count, 1, &incr_entry_count)) {
539 0 : return AWS_OP_ERR;
540 0 : }
541 0 : if (incr_entry_count > state->max_load) {
542 0 : rv = s_expand_table(map);
543 0 : if (rv != AWS_OP_SUCCESS) {
544 0 : /* Any error was already raised in expand_table */
545 0 : return rv;
546 0 : }
547 0 : state = map->p_impl;
548 0 : /* If we expanded the table, we need to discard the probe index returned from find_entry,
549 0 : * as it's likely that we can find a more desirable slot. If we don't, then later gets will
550 0 : * terminate before reaching our probe index.
551 0 :
552 0 : * n.b. currently we ignore this probe_idx subsequently, but leaving
553 0 : this here so we don't
554 0 : * forget when we optimize later. */
555 0 : probe_idx = 0;
556 0 : }
557 0 :
558 0 : state->entry_count++;
559 0 : struct hash_table_entry new_entry;
560 0 : new_entry.element.key = key;
561 0 : new_entry.element.value = NULL;
562 0 : new_entry.hash_code = hash_code;
563 0 :
564 0 : entry = s_emplace_item(state, new_entry, probe_idx);
565 0 :
566 0 : if (p_elem) {
567 0 : *p_elem = &entry->element;
568 0 : }
569 0 :
570 0 : *was_created = 1;
571 0 :
572 0 : return AWS_OP_SUCCESS;
573 0 : }
574 :
575 : AWS_COMMON_API
576 0 : int aws_hash_table_put(struct aws_hash_table *map, const void *key, void *value, int *was_created) {
577 0 : struct aws_hash_element *p_elem;
578 0 : int was_created_fallback;
579 0 :
580 0 : if (!was_created) {
581 0 : was_created = &was_created_fallback;
582 0 : }
583 0 :
584 0 : if (aws_hash_table_create(map, key, &p_elem, was_created)) {
585 0 : return AWS_OP_ERR;
586 0 : }
587 0 :
588 0 : /*
589 0 : * aws_hash_table_create might resize the table, which results in map->p_impl changing.
590 0 : * It is therefore important to wait to read p_impl until after we return.
591 0 : */
592 0 : struct hash_table_state *state = map->p_impl;
593 0 :
594 0 : if (!*was_created) {
595 0 : if (p_elem->key != key && state->destroy_key_fn) {
596 0 : state->destroy_key_fn((void *)p_elem->key);
597 0 : }
598 0 :
599 0 : if (state->destroy_value_fn) {
600 0 : state->destroy_value_fn((void *)p_elem->value);
601 0 : }
602 0 : }
603 0 :
604 0 : p_elem->key = key;
605 0 : p_elem->value = value;
606 0 :
607 0 : return AWS_OP_SUCCESS;
608 0 : }
609 :
610 : /* Clears an entry. Does _not_ invoke destructor callbacks.
611 : * Returns the last slot touched (note that if we wrap, we'll report an index
612 : * lower than the original entry's index)
613 : */
614 0 : static size_t s_remove_entry(struct hash_table_state *state, struct hash_table_entry *entry) {
615 0 : AWS_PRECONDITION(hash_table_state_is_valid(state));
616 0 : AWS_PRECONDITION(state->entry_count > 0);
617 0 : AWS_PRECONDITION(
618 0 : entry >= &state->slots[0] && entry < &state->slots[state->size],
619 0 : "Input hash_table_entry [entry] pointer must point in the available slots.");
620 0 : state->entry_count--;
621 0 :
622 0 : /* Shift subsequent entries back until we find an entry that belongs at its
623 0 : * current position. This is important to ensure that subsequent searches
624 0 : * don't terminate at the removed element.
625 0 : */
626 0 : size_t index = s_index_for(state, entry);
627 0 : /* There is always at least one empty slot in the hash table, so this loop always terminates */
628 0 : while (1) {
629 0 : size_t next_index = (index + 1) & state->mask;
630 0 :
631 0 : /* If we hit an empty slot, stop */
632 0 : if (!state->slots[next_index].hash_code) {
633 0 : break;
634 0 : }
635 0 : /* If the next slot is at the start of the probe sequence, stop.
636 0 : * We know that nothing with an earlier home slot is after this;
637 0 : * otherwise this index-zero entry would have been evicted from its
638 0 : * home.
639 0 : */
640 0 : if ((state->slots[next_index].hash_code & state->mask) == next_index) {
641 0 : break;
642 0 : }
643 0 :
644 0 : /* Okay, shift this one back */
645 0 : state->slots[index] = state->slots[next_index];
646 0 : index = next_index;
647 0 : }
648 0 :
649 0 : /* Clear the entry we shifted out of */
650 0 : AWS_ZERO_STRUCT(state->slots[index]);
651 0 : AWS_RETURN_WITH_POSTCONDITION(index, hash_table_state_is_valid(state) && index <= state->size);
652 0 : }
653 :
654 : int aws_hash_table_remove(
655 : struct aws_hash_table *map,
656 : const void *key,
657 : struct aws_hash_element *p_value,
658 0 : int *was_present) {
659 0 : AWS_PRECONDITION(aws_hash_table_is_valid(map));
660 0 : AWS_PRECONDITION(
661 0 : p_value == NULL || AWS_OBJECT_PTR_IS_WRITABLE(p_value), "Input pointer [p_value] must be NULL or writable.");
662 0 : AWS_PRECONDITION(
663 0 : was_present == NULL || AWS_OBJECT_PTR_IS_WRITABLE(was_present),
664 0 : "Input pointer [was_present] must be NULL or writable.");
665 0 :
666 0 : struct hash_table_state *state = map->p_impl;
667 0 : uint64_t hash_code = s_hash_for(state, key);
668 0 : struct hash_table_entry *entry;
669 0 : int ignored;
670 0 :
671 0 : if (!was_present) {
672 0 : was_present = &ignored;
673 0 : }
674 0 :
675 0 : int rv = s_find_entry(state, hash_code, key, &entry, NULL);
676 0 :
677 0 : if (rv != AWS_ERROR_SUCCESS) {
678 0 : *was_present = 0;
679 0 : AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map));
680 0 : }
681 0 :
682 0 : *was_present = 1;
683 0 :
684 0 : if (p_value) {
685 0 : *p_value = entry->element;
686 0 : } else {
687 0 : if (state->destroy_key_fn) {
688 0 : state->destroy_key_fn((void *)entry->element.key);
689 0 : }
690 0 : if (state->destroy_value_fn) {
691 0 : state->destroy_value_fn(entry->element.value);
692 0 : }
693 0 : }
694 0 : s_remove_entry(state, entry);
695 0 :
696 0 : AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map));
697 0 : }
698 :
699 0 : int aws_hash_table_remove_element(struct aws_hash_table *map, struct aws_hash_element *p_value) {
700 0 : AWS_PRECONDITION(aws_hash_table_is_valid(map));
701 0 : AWS_PRECONDITION(p_value != NULL);
702 0 :
703 0 : struct hash_table_state *state = map->p_impl;
704 0 : struct hash_table_entry *entry = AWS_CONTAINER_OF(p_value, struct hash_table_entry, element);
705 0 :
706 0 : s_remove_entry(state, entry);
707 0 :
708 0 : AWS_SUCCEED_WITH_POSTCONDITION(aws_hash_table_is_valid(map));
709 0 : }
710 :
711 : int aws_hash_table_foreach(
712 : struct aws_hash_table *map,
713 : int (*callback)(void *context, struct aws_hash_element *pElement),
714 0 : void *context) {
715 0 :
716 0 : for (struct aws_hash_iter iter = aws_hash_iter_begin(map); !aws_hash_iter_done(&iter); aws_hash_iter_next(&iter)) {
717 0 : int rv = callback(context, &iter.element);
718 0 :
719 0 : if (rv & AWS_COMMON_HASH_TABLE_ITER_DELETE) {
720 0 : aws_hash_iter_delete(&iter, false);
721 0 : }
722 0 :
723 0 : if (!(rv & AWS_COMMON_HASH_TABLE_ITER_CONTINUE)) {
724 0 : break;
725 0 : }
726 0 : }
727 0 :
728 0 : return AWS_OP_SUCCESS;
729 0 : }
730 :
731 : bool aws_hash_table_eq(
732 : const struct aws_hash_table *a,
733 : const struct aws_hash_table *b,
734 0 : aws_hash_callback_eq_fn *value_eq) {
735 0 : AWS_PRECONDITION(aws_hash_table_is_valid(a));
736 0 : AWS_PRECONDITION(aws_hash_table_is_valid(b));
737 0 : AWS_PRECONDITION(value_eq != NULL);
738 0 :
739 0 : if (aws_hash_table_get_entry_count(a) != aws_hash_table_get_entry_count(b)) {
740 0 : AWS_RETURN_WITH_POSTCONDITION(false, aws_hash_table_is_valid(a) && aws_hash_table_is_valid(b));
741 0 : }
742 0 :
743 0 : /*
744 0 : * Now that we have established that the two tables have the same number of
745 0 : * entries, we can simply iterate one and compare against the same key in
746 0 : * the other.
747 0 : */
748 0 : for (size_t i = 0; i < a->p_impl->size; ++i) {
749 0 : const struct hash_table_entry *const a_entry = &a->p_impl->slots[i];
750 0 : if (a_entry->hash_code == 0) {
751 0 : continue;
752 0 : }
753 0 :
754 0 : struct aws_hash_element *b_element = NULL;
755 0 :
756 0 : aws_hash_table_find(b, a_entry->element.key, &b_element);
757 0 :
758 0 : if (!b_element) {
759 0 : /* Key is present in A only */
760 0 : AWS_RETURN_WITH_POSTCONDITION(false, aws_hash_table_is_valid(a) && aws_hash_table_is_valid(b));
761 0 : }
762 0 :
763 0 : if (!s_safe_eq_check(value_eq, a_entry->element.value, b_element->value)) {
764 0 : AWS_RETURN_WITH_POSTCONDITION(false, aws_hash_table_is_valid(a) && aws_hash_table_is_valid(b));
765 0 : }
766 0 : }
767 0 : AWS_RETURN_WITH_POSTCONDITION(true, aws_hash_table_is_valid(a) && aws_hash_table_is_valid(b));
768 0 : }
769 :
770 : /**
771 : * Given an iterator, and a start slot, find the next available filled slot if it exists
772 : * Otherwise, return an iter that will return true for aws_hash_iter_done().
773 : * Note that aws_hash_iter_is_valid() need not hold on entry to the function, since
774 : * it can be called on a partially constructed iter from aws_hash_iter_begin().
775 : *
776 : * Note that calling this on an iterator which is "done" is idempotent: it will return another
777 : * iterator which is "done".
778 : */
779 0 : static inline void s_get_next_element(struct aws_hash_iter *iter, size_t start_slot) {
780 0 : AWS_PRECONDITION(iter != NULL);
781 0 : AWS_PRECONDITION(aws_hash_table_is_valid(iter->map));
782 0 : struct hash_table_state *state = iter->map->p_impl;
783 0 : size_t limit = iter->limit;
784 0 :
785 0 : for (size_t i = start_slot; i < limit; i++) {
786 0 : struct hash_table_entry *entry = &state->slots[i];
787 0 :
788 0 : if (entry->hash_code) {
789 0 : iter->element = entry->element;
790 0 : iter->slot = i;
791 0 : iter->status = AWS_HASH_ITER_STATUS_READY_FOR_USE;
792 0 : return;
793 0 : }
794 0 : }
795 0 : iter->element.key = NULL;
796 0 : iter->element.value = NULL;
797 0 : iter->slot = iter->limit;
798 0 : iter->status = AWS_HASH_ITER_STATUS_DONE;
799 0 : AWS_POSTCONDITION(aws_hash_iter_is_valid(iter));
800 0 : }
801 :
802 0 : struct aws_hash_iter aws_hash_iter_begin(const struct aws_hash_table *map) {
803 0 : AWS_PRECONDITION(aws_hash_table_is_valid(map));
804 0 : struct hash_table_state *state = map->p_impl;
805 0 : struct aws_hash_iter iter;
806 0 : AWS_ZERO_STRUCT(iter);
807 0 : iter.map = map;
808 0 : iter.limit = state->size;
809 0 : s_get_next_element(&iter, 0);
810 0 : AWS_RETURN_WITH_POSTCONDITION(
811 0 : iter,
812 0 : aws_hash_iter_is_valid(&iter) &&
813 0 : (iter.status == AWS_HASH_ITER_STATUS_DONE || iter.status == AWS_HASH_ITER_STATUS_READY_FOR_USE),
814 0 : "The status of output aws_hash_iter [iter] must either be DONE or READY_FOR_USE.");
815 0 : }
816 :
817 0 : bool aws_hash_iter_done(const struct aws_hash_iter *iter) {
818 0 : AWS_PRECONDITION(aws_hash_iter_is_valid(iter));
819 0 : AWS_PRECONDITION(
820 0 : iter->status == AWS_HASH_ITER_STATUS_DONE || iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE,
821 0 : "Input aws_hash_iter [iter] must either be done, or ready to use.");
822 0 : /*
823 0 : * SIZE_MAX is a valid (non-terminal) value for iter->slot in the event that
824 0 : * we delete slot 0. See comments in aws_hash_iter_delete.
825 0 : *
826 0 : * As such we must use == rather than >= here.
827 0 : */
828 0 : bool rval = (iter->slot == iter->limit);
829 0 : AWS_POSTCONDITION(
830 0 : iter->status == AWS_HASH_ITER_STATUS_DONE || iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE,
831 0 : "The status of output aws_hash_iter [iter] must either be DONE or READY_FOR_USE.");
832 0 : AWS_POSTCONDITION(
833 0 : rval == (iter->status == AWS_HASH_ITER_STATUS_DONE),
834 0 : "Output bool [rval] must be true if and only if the status of [iter] is DONE.");
835 0 : AWS_POSTCONDITION(aws_hash_iter_is_valid(iter));
836 0 : return rval;
837 0 : }
838 :
839 0 : void aws_hash_iter_next(struct aws_hash_iter *iter) {
840 0 : AWS_PRECONDITION(aws_hash_iter_is_valid(iter));
841 : #ifdef CBMC
842 : # pragma CPROVER check push
843 : # pragma CPROVER check disable "unsigned-overflow"
844 : #endif
845 : s_get_next_element(iter, iter->slot + 1);
846 : #ifdef CBMC
847 : # pragma CPROVER check pop
848 : #endif
849 0 : AWS_POSTCONDITION(
850 0 : iter->status == AWS_HASH_ITER_STATUS_DONE || iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE,
851 0 : "The status of output aws_hash_iter [iter] must either be DONE or READY_FOR_USE.");
852 0 : AWS_POSTCONDITION(aws_hash_iter_is_valid(iter));
853 0 : }
854 :
855 0 : void aws_hash_iter_delete(struct aws_hash_iter *iter, bool destroy_contents) {
856 0 : AWS_PRECONDITION(
857 0 : iter->status == AWS_HASH_ITER_STATUS_READY_FOR_USE, "Input aws_hash_iter [iter] must be ready for use.");
858 0 : AWS_PRECONDITION(aws_hash_iter_is_valid(iter));
859 0 : AWS_PRECONDITION(
860 0 : iter->map->p_impl->entry_count > 0,
861 0 : "The hash_table_state pointed by input [iter] must contain at least one entry.");
862 0 :
863 0 : struct hash_table_state *state = iter->map->p_impl;
864 0 : if (destroy_contents) {
865 0 : if (state->destroy_key_fn) {
866 0 : state->destroy_key_fn((void *)iter->element.key);
867 0 : }
868 0 : if (state->destroy_value_fn) {
869 0 : state->destroy_value_fn(iter->element.value);
870 0 : }
871 0 : }
872 0 :
873 0 : size_t last_index = s_remove_entry(state, &state->slots[iter->slot]);
874 0 :
875 0 : /* If we shifted elements that are not part of the window we intend to iterate
876 0 : * over, it means we shifted an element that we already visited into the
877 0 : * iter->limit - 1 position. To avoid double iteration, we'll now reduce the
878 0 : * limit to compensate.
879 0 : *
880 0 : * Note that last_index cannot equal iter->slot, because slots[iter->slot]
881 0 : * is empty before we start walking the table.
882 0 : */
883 0 : if (last_index < iter->slot || last_index >= iter->limit) {
884 0 : iter->limit--;
885 0 : }
886 0 :
887 0 : /*
888 0 : * After removing this entry, the next entry might be in the same slot, or
889 0 : * in some later slot, or we might have no further entries.
890 0 : *
891 0 : * We also expect that the caller will call aws_hash_iter_done and aws_hash_iter_next
892 0 : * after this delete call. This gets a bit tricky if we just deleted the value
893 0 : * in slot 0, and a new value has shifted in.
894 0 : *
895 0 : * To deal with this, we'll just step back one slot, and let _next start iteration
896 0 : * at our current slot. Note that if we just deleted slot 0, this will result in
897 0 : * underflowing to SIZE_MAX; we have to take care in aws_hash_iter_done to avoid
898 0 : * treating this as an end-of-iteration condition.
899 0 : */
900 : #ifdef CBMC
901 : # pragma CPROVER check push
902 : # pragma CPROVER check disable "unsigned-overflow"
903 : #endif
904 : iter->slot--;
905 : #ifdef CBMC
906 : # pragma CPROVER check pop
907 : #endif
908 : iter->status = AWS_HASH_ITER_STATUS_DELETE_CALLED;
909 0 : AWS_POSTCONDITION(
910 0 : iter->status == AWS_HASH_ITER_STATUS_DELETE_CALLED,
911 0 : "The status of output aws_hash_iter [iter] must be DELETE_CALLED.");
912 0 : AWS_POSTCONDITION(aws_hash_iter_is_valid(iter));
913 0 : }
914 :
915 0 : void aws_hash_table_clear(struct aws_hash_table *map) {
916 0 : AWS_PRECONDITION(aws_hash_table_is_valid(map));
917 0 : struct hash_table_state *state = map->p_impl;
918 0 :
919 0 : /* Check that we have at least one destructor before iterating over the table */
920 0 : if (state->destroy_key_fn || state->destroy_value_fn) {
921 0 : for (size_t i = 0; i < state->size; ++i) {
922 0 : struct hash_table_entry *entry = &state->slots[i];
923 0 : if (!entry->hash_code) {
924 0 : continue;
925 0 : }
926 0 : if (state->destroy_key_fn) {
927 0 : state->destroy_key_fn((void *)entry->element.key);
928 0 : }
929 0 : if (state->destroy_value_fn) {
930 0 : state->destroy_value_fn(entry->element.value);
931 0 : }
932 0 : }
933 0 : }
934 0 : /* Since hash code 0 represents an empty slot we can just zero out the
935 0 : * entire table. */
936 0 : memset(state->slots, 0, sizeof(*state->slots) * state->size);
937 0 :
938 0 : state->entry_count = 0;
939 0 : AWS_POSTCONDITION(aws_hash_table_is_valid(map));
940 0 : }
941 :
942 0 : uint64_t aws_hash_c_string(const void *item) {
943 0 : AWS_PRECONDITION(aws_c_string_is_valid(item));
944 0 : const char *str = item;
945 0 :
946 0 : /* first digits of pi in hex */
947 0 : uint32_t b = 0x3243F6A8, c = 0x885A308D;
948 0 : hashlittle2(str, strlen(str), &c, &b);
949 0 :
950 0 : return ((uint64_t)b << 32) | c;
951 0 : }
952 :
953 134963 : uint64_t aws_hash_string(const void *item) {
954 134963 : AWS_PRECONDITION(aws_string_is_valid(item));
955 134963 : const struct aws_string *str = item;
956 134963 :
957 134963 : /* first digits of pi in hex */
958 134963 : uint32_t b = 0x3243F6A8, c = 0x885A308D;
959 134963 : hashlittle2(aws_string_bytes(str), str->len, &c, &b);
960 134963 : AWS_RETURN_WITH_POSTCONDITION(((uint64_t)b << 32) | c, aws_string_is_valid(str));
961 134963 : }
962 :
963 46743 : uint64_t aws_hash_byte_cursor_ptr(const void *item) {
964 46743 : AWS_PRECONDITION(aws_byte_cursor_is_valid(item));
965 46743 : const struct aws_byte_cursor *cur = item;
966 46743 :
967 46743 : /* first digits of pi in hex */
968 46743 : uint32_t b = 0x3243F6A8, c = 0x885A308D;
969 46743 : hashlittle2(cur->ptr, cur->len, &c, &b);
970 46743 : AWS_RETURN_WITH_POSTCONDITION(((uint64_t)b << 32) | c, aws_byte_cursor_is_valid(cur));
971 46743 : }
972 :
973 140733 : uint64_t aws_hash_ptr(const void *item) {
974 140733 : /* Since the numeric value of the pointer is considered, not the memory behind it, 0 is an acceptable value */
975 140733 : /* first digits of e in hex
976 140733 : * 2.b7e 1516 28ae d2a6 */
977 140733 : uint32_t b = 0x2b7e1516, c = 0x28aed2a6;
978 140733 :
979 140733 : hashlittle2(&item, sizeof(item), &c, &b);
980 140733 :
981 140733 : return ((uint64_t)b << 32) | c;
982 140733 : }
983 :
984 0 : uint64_t aws_hash_combine(uint64_t item1, uint64_t item2) {
985 0 : uint32_t b = item2 & 0xFFFFFFFF; /* LSB */
986 0 : uint32_t c = item2 >> 32; /* MSB */
987 0 :
988 0 : hashlittle2(&item1, sizeof(item1), &c, &b);
989 0 : return ((uint64_t)b << 32) | c;
990 0 : }
991 :
992 99353 : bool aws_hash_callback_c_str_eq(const void *a, const void *b) {
993 99353 : AWS_PRECONDITION(aws_c_string_is_valid(a));
994 99353 : AWS_PRECONDITION(aws_c_string_is_valid(b));
995 99353 : bool rval = !strcmp(a, b);
996 99353 : AWS_RETURN_WITH_POSTCONDITION(rval, aws_c_string_is_valid(a) && aws_c_string_is_valid(b));
997 99353 : }
998 :
999 130258 : bool aws_hash_callback_string_eq(const void *a, const void *b) {
1000 130258 : AWS_PRECONDITION(aws_string_is_valid(a));
1001 130258 : AWS_PRECONDITION(aws_string_is_valid(b));
1002 130258 : bool rval = aws_string_eq(a, b);
1003 130258 : AWS_RETURN_WITH_POSTCONDITION(rval, aws_c_string_is_valid(a) && aws_c_string_is_valid(b));
1004 130258 : }
1005 :
1006 128178 : void aws_hash_callback_string_destroy(void *a) {
1007 128178 : AWS_PRECONDITION(aws_string_is_valid(a));
1008 128178 : aws_string_destroy(a);
1009 128178 : }
1010 :
1011 0 : bool aws_ptr_eq(const void *a, const void *b) {
1012 0 : return a == b;
1013 0 : }
1014 :
1015 : /**
1016 : * Best-effort check of hash_table_state data-structure invariants
1017 : * Some invariants, such as that the number of entries is actually the
1018 : * same as the entry_count field, would require a loop to check
1019 : */
1020 2473707 : bool aws_hash_table_is_valid(const struct aws_hash_table *map) {
1021 2473707 : return map && map->p_impl && hash_table_state_is_valid(map->p_impl);
1022 2473707 : }
1023 :
1024 : /**
1025 : * Best-effort check of hash_table_state data-structure invariants
1026 : * Some invariants, such as that the number of entries is actually the
1027 : * same as the entry_count field, would require a loop to check
1028 : */
1029 2473707 : bool hash_table_state_is_valid(const struct hash_table_state *map) {
1030 2473707 : if (!map) {
1031 0 : return false;
1032 0 : }
1033 2473707 : bool hash_fn_nonnull = (map->hash_fn != NULL);
1034 2473707 : bool equals_fn_nonnull = (map->equals_fn != NULL);
1035 2473707 : /*destroy_key_fn and destroy_value_fn are both allowed to be NULL*/
1036 2473707 : bool alloc_nonnull = (map->alloc != NULL);
1037 2473707 : bool size_at_least_two = (map->size >= 2);
1038 2473707 : bool size_is_power_of_two = aws_is_power_of_two(map->size);
1039 2473707 : bool entry_count = (map->entry_count <= map->max_load);
1040 2473707 : bool max_load = (map->max_load < map->size);
1041 2473707 : bool mask_is_correct = (map->mask == (map->size - 1));
1042 2473707 : bool max_load_factor_bounded = map->max_load_factor == 0.95; //(map->max_load_factor < 1.0);
1043 2473707 : bool slots_allocated = AWS_MEM_IS_WRITABLE(&map->slots[0], sizeof(map->slots[0]) * map->size);
1044 2473707 :
1045 2473707 : return hash_fn_nonnull && equals_fn_nonnull && alloc_nonnull && size_at_least_two && size_is_power_of_two &&
1046 2473707 : entry_count && max_load && mask_is_correct && max_load_factor_bounded && slots_allocated;
1047 2473707 : }
1048 :
1049 : /**
1050 : * Given a pointer to a hash_iter, checks that it is well-formed, with all data-structure invariants.
1051 : */
1052 0 : bool aws_hash_iter_is_valid(const struct aws_hash_iter *iter) {
1053 0 : if (!iter) {
1054 0 : return false;
1055 0 : }
1056 0 : if (!iter->map) {
1057 0 : return false;
1058 0 : }
1059 0 : if (!aws_hash_table_is_valid(iter->map)) {
1060 0 : return false;
1061 0 : }
1062 0 : if (iter->limit > iter->map->p_impl->size) {
1063 0 : return false;
1064 0 : }
1065 0 :
1066 0 : switch (iter->status) {
1067 0 : case AWS_HASH_ITER_STATUS_DONE:
1068 0 : /* Done iff slot == limit */
1069 0 : return iter->slot == iter->limit;
1070 0 : case AWS_HASH_ITER_STATUS_DELETE_CALLED:
1071 0 : /* iter->slot can underflow to SIZE_MAX after a delete
1072 0 : * see the comments for aws_hash_iter_delete() */
1073 0 : return iter->slot <= iter->limit || iter->slot == SIZE_MAX;
1074 0 : case AWS_HASH_ITER_STATUS_READY_FOR_USE:
1075 0 : /* A slot must point to a valid location (i.e. hash_code != 0) */
1076 0 : return iter->slot < iter->limit && iter->map->p_impl->slots[iter->slot].hash_code != 0;
1077 0 : }
1078 0 : /* Invalid status code */
1079 0 : return false;
1080 0 : }
1081 :
1082 : /**
1083 : * Determine the total number of bytes needed for a hash-table with
1084 : * "size" slots. If the result would overflow a size_t, return
1085 : * AWS_OP_ERR; otherwise, return AWS_OP_SUCCESS with the result in
1086 : * "required_bytes".
1087 : */
1088 2332051 : int hash_table_state_required_bytes(size_t size, size_t *required_bytes) {
1089 2332051 :
1090 2332051 : size_t elemsize;
1091 2332051 : if (aws_mul_size_checked(size, sizeof(struct hash_table_entry), &elemsize)) {
1092 0 : return AWS_OP_ERR;
1093 0 : }
1094 2332051 :
1095 2332051 : if (aws_add_size_checked(elemsize, sizeof(struct hash_table_state), required_bytes)) {
1096 0 : return AWS_OP_ERR;
1097 0 : }
1098 2332051 :
1099 2332051 : return AWS_OP_SUCCESS;
1100 2332051 : }
|