tinydtls  0.8.6
uthash.h
Go to the documentation of this file.
1 /*
2 Copyright (c) 2003-2017, Troy D. Hanson http://troydhanson.github.com/uthash/
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7 
8  * Redistributions of source code must retain the above copyright
9  notice, this list of conditions and the following disclaimer.
10 
11 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
13 TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
14 PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
15 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
16 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
17 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
18 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
19 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
20 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
21 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22 */
23 
24 #ifndef UTHASH_H
25 #define UTHASH_H
26 
27 #define UTHASH_VERSION 2.0.2
28 
29 #include <string.h> /* memcmp, memset, strlen */
30 #include <stddef.h> /* ptrdiff_t */
31 #include <stdlib.h> /* exit */
32 
33 /* These macros use decltype or the earlier __typeof GNU extension.
34  As decltype is only available in newer compilers (VS2010 or gcc 4.3+
35  when compiling c++ source) this code uses whatever method is needed
36  or, for VS2008 where neither is available, uses casting workarounds. */
37 #if !defined(DECLTYPE) && !defined(NO_DECLTYPE)
38 #if defined(_MSC_VER) /* MS compiler */
39 #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */
40 #define DECLTYPE(x) (decltype(x))
41 #else /* VS2008 or older (or VS2010 in C mode) */
42 #define NO_DECLTYPE
43 #endif
44 #elif defined(__BORLANDC__) || defined(__ICCARM__) || defined(__LCC__) || defined(__WATCOMC__)
45 #define NO_DECLTYPE
46 #else /* GNU, Sun and other compilers */
47 #define DECLTYPE(x) (__typeof(x))
48 #endif
49 #endif
50 
51 #ifdef NO_DECLTYPE
52 #define DECLTYPE(x)
53 #define DECLTYPE_ASSIGN(dst,src) \
54 do { \
55  char **_da_dst = (char**)(&(dst)); \
56  *_da_dst = (char*)(src); \
57 } while (0)
58 #else
59 #define DECLTYPE_ASSIGN(dst,src) \
60 do { \
61  (dst) = DECLTYPE(dst)(src); \
62 } while (0)
63 #endif
64 
65 /* a number of the hash function use uint32_t which isn't defined on Pre VS2010 */
66 #if defined(_WIN32)
67 #if defined(_MSC_VER) && _MSC_VER >= 1600
68 #include <stdint.h>
69 #elif defined(__WATCOMC__) || defined(__MINGW32__) || defined(__CYGWIN__)
70 #include <stdint.h>
71 #else
72 typedef unsigned int uint32_t;
73 typedef unsigned char uint8_t;
74 #endif
75 #elif defined(__GNUC__) && !defined(__VXWORKS__)
76 #include <stdint.h>
77 #else
78 typedef unsigned int uint32_t;
79 typedef unsigned char uint8_t;
80 #endif
81 
82 #ifndef uthash_malloc
83 #define uthash_malloc(sz) malloc(sz) /* malloc fcn */
84 #endif
85 #ifndef uthash_free
86 #define uthash_free(ptr,sz) free(ptr) /* free fcn */
87 #endif
88 #ifndef uthash_bzero
89 #define uthash_bzero(a,n) memset(a,'\0',n)
90 #endif
91 #ifndef uthash_memcmp
92 #define uthash_memcmp(a,b,n) memcmp(a,b,n)
93 #endif
94 #ifndef uthash_strlen
95 #define uthash_strlen(s) strlen(s)
96 #endif
97 
98 #ifndef uthash_noexpand_fyi
99 #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */
100 #endif
101 #ifndef uthash_expand_fyi
102 #define uthash_expand_fyi(tbl) /* can be defined to log expands */
103 #endif
104 
105 #ifndef HASH_NONFATAL_OOM
106 #define HASH_NONFATAL_OOM 0
107 #endif
108 
109 #if HASH_NONFATAL_OOM
110 /* malloc failures can be recovered from */
111 
112 #ifndef uthash_nonfatal_oom
113 #define uthash_nonfatal_oom(obj) do {} while (0) /* non-fatal OOM error */
114 #endif
115 
116 #define HASH_RECORD_OOM(oomed) do { (oomed) = 1; } while (0)
117 #define IF_HASH_NONFATAL_OOM(x) x
118 
119 #else
120 /* malloc failures result in lost memory, hash tables are unusable */
121 
122 #ifndef uthash_fatal
123 #define uthash_fatal(msg) exit(-1) /* fatal OOM error */
124 #endif
125 
126 #define HASH_RECORD_OOM(oomed) uthash_fatal("out of memory")
127 #define IF_HASH_NONFATAL_OOM(x)
128 
129 #endif
130 
131 /* initial number of buckets */
132 #define HASH_INITIAL_NUM_BUCKETS 32U /* initial number of buckets */
133 #define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */
134 #define HASH_BKT_CAPACITY_THRESH 10U /* expand when bucket count reaches */
135 
136 /* calculate the element whose hash handle address is hhp */
137 #define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho)))
138 /* calculate the hash handle from element address elp */
139 #define HH_FROM_ELMT(tbl,elp) ((UT_hash_handle *)(((char*)(elp)) + ((tbl)->hho)))
140 
141 #define HASH_ROLLBACK_BKT(hh, head, itemptrhh) \
142 do { \
143  struct UT_hash_handle *_hd_hh_item = (itemptrhh); \
144  unsigned _hd_bkt; \
145  HASH_TO_BKT(_hd_hh_item->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \
146  (head)->hh.tbl->buckets[_hd_bkt].count++; \
147  _hd_hh_item->hh_next = NULL; \
148  _hd_hh_item->hh_prev = NULL; \
149 } while (0)
150 
151 #define HASH_VALUE(keyptr,keylen,hashv) \
152 do { \
153  HASH_FCN(keyptr, keylen, hashv); \
154 } while (0)
155 
156 #define HASH_FIND_BYHASHVALUE(hh,head,keyptr,keylen,hashval,out) \
157 do { \
158  (out) = NULL; \
159  if (head) { \
160  unsigned _hf_bkt; \
161  HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _hf_bkt); \
162  if (HASH_BLOOM_TEST((head)->hh.tbl, hashval) != 0) { \
163  HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], keyptr, keylen, hashval, out); \
164  } \
165  } \
166 } while (0)
167 
168 #define HASH_FIND(hh,head,keyptr,keylen,out) \
169 do { \
170  unsigned _hf_hashv; \
171  HASH_VALUE(keyptr, keylen, _hf_hashv); \
172  HASH_FIND_BYHASHVALUE(hh, head, keyptr, keylen, _hf_hashv, out); \
173 } while (0)
174 
175 #ifdef HASH_BLOOM
176 #define HASH_BLOOM_BITLEN (1UL << HASH_BLOOM)
177 #define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8UL) + (((HASH_BLOOM_BITLEN%8UL)!=0UL) ? 1UL : 0UL)
178 #define HASH_BLOOM_MAKE(tbl,oomed) \
179 do { \
180  (tbl)->bloom_nbits = HASH_BLOOM; \
181  (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \
182  if (!(tbl)->bloom_bv) { \
183  HASH_RECORD_OOM(oomed); \
184  } else { \
185  uthash_bzero((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \
186  (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \
187  } \
188 } while (0)
189 
190 #define HASH_BLOOM_FREE(tbl) \
191 do { \
192  uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \
193 } while (0)
194 
195 #define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U)))
196 #define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U)))
197 
198 #define HASH_BLOOM_ADD(tbl,hashv) \
199  HASH_BLOOM_BITSET((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U)))
200 
201 #define HASH_BLOOM_TEST(tbl,hashv) \
202  HASH_BLOOM_BITTEST((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U)))
203 
204 #else
205 #define HASH_BLOOM_MAKE(tbl,oomed)
206 #define HASH_BLOOM_FREE(tbl)
207 #define HASH_BLOOM_ADD(tbl,hashv)
208 #define HASH_BLOOM_TEST(tbl,hashv) (1)
209 #define HASH_BLOOM_BYTELEN 0U
210 #endif
211 
212 #define HASH_MAKE_TABLE(hh,head,oomed) \
213 do { \
214  (head)->hh.tbl = (UT_hash_table*)uthash_malloc(sizeof(UT_hash_table)); \
215  if (!(head)->hh.tbl) { \
216  HASH_RECORD_OOM(oomed); \
217  } else { \
218  uthash_bzero((head)->hh.tbl, sizeof(UT_hash_table)); \
219  (head)->hh.tbl->tail = &((head)->hh); \
220  (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \
221  (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \
222  (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \
223  (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \
224  HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \
225  (head)->hh.tbl->signature = HASH_SIGNATURE; \
226  if (!(head)->hh.tbl->buckets) { \
227  HASH_RECORD_OOM(oomed); \
228  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
229  } else { \
230  uthash_bzero((head)->hh.tbl->buckets, \
231  HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \
232  HASH_BLOOM_MAKE((head)->hh.tbl, oomed); \
233  IF_HASH_NONFATAL_OOM( \
234  if (oomed) { \
235  uthash_free((head)->hh.tbl->buckets, \
236  HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
237  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
238  } \
239  ) \
240  } \
241  } \
242 } while (0)
243 
244 #define HASH_REPLACE_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,replaced,cmpfcn) \
245 do { \
246  (replaced) = NULL; \
247  HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \
248  if (replaced) { \
249  HASH_DELETE(hh, head, replaced); \
250  } \
251  HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn); \
252 } while (0)
253 
254 #define HASH_REPLACE_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add,replaced) \
255 do { \
256  (replaced) = NULL; \
257  HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \
258  if (replaced) { \
259  HASH_DELETE(hh, head, replaced); \
260  } \
261  HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add); \
262 } while (0)
263 
264 #define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \
265 do { \
266  unsigned _hr_hashv; \
267  HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \
268  HASH_REPLACE_BYHASHVALUE(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced); \
269 } while (0)
270 
271 #define HASH_REPLACE_INORDER(hh,head,fieldname,keylen_in,add,replaced,cmpfcn) \
272 do { \
273  unsigned _hr_hashv; \
274  HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \
275  HASH_REPLACE_BYHASHVALUE_INORDER(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced, cmpfcn); \
276 } while (0)
277 
278 #define HASH_APPEND_LIST(hh, head, add) \
279 do { \
280  (add)->hh.next = NULL; \
281  (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \
282  (head)->hh.tbl->tail->next = (add); \
283  (head)->hh.tbl->tail = &((add)->hh); \
284 } while (0)
285 
286 #define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \
287 do { \
288  do { \
289  if (cmpfcn(DECLTYPE(head)(_hs_iter), add) > 0) { \
290  break; \
291  } \
292  } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \
293 } while (0)
294 
295 #ifdef NO_DECLTYPE
296 #undef HASH_AKBI_INNER_LOOP
297 #define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \
298 do { \
299  char *_hs_saved_head = (char*)(head); \
300  do { \
301  DECLTYPE_ASSIGN(head, _hs_iter); \
302  if (cmpfcn(head, add) > 0) { \
303  DECLTYPE_ASSIGN(head, _hs_saved_head); \
304  break; \
305  } \
306  DECLTYPE_ASSIGN(head, _hs_saved_head); \
307  } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \
308 } while (0)
309 #endif
310 
311 #if HASH_NONFATAL_OOM
312 
313 #define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed) \
314 do { \
315  if (!(oomed)) { \
316  unsigned _ha_bkt; \
317  (head)->hh.tbl->num_items++; \
318  HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \
319  HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], hh, &(add)->hh, oomed); \
320  if (oomed) { \
321  HASH_ROLLBACK_BKT(hh, head, &(add)->hh); \
322  HASH_DELETE_HH(hh, head, &(add)->hh); \
323  (add)->hh.tbl = NULL; \
324  uthash_nonfatal_oom(add); \
325  } else { \
326  HASH_BLOOM_ADD((head)->hh.tbl, hashval); \
327  HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \
328  } \
329  } else { \
330  (add)->hh.tbl = NULL; \
331  uthash_nonfatal_oom(add); \
332  } \
333 } while (0)
334 
335 #else
336 
337 #define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed) \
338 do { \
339  unsigned _ha_bkt; \
340  (head)->hh.tbl->num_items++; \
341  HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \
342  HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], hh, &(add)->hh, oomed); \
343  HASH_BLOOM_ADD((head)->hh.tbl, hashval); \
344  HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \
345 } while (0)
346 
347 #endif
348 
349 
350 #define HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh,head,keyptr,keylen_in,hashval,add,cmpfcn) \
351 do { \
352  IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; ) \
353  (add)->hh.hashv = (hashval); \
354  (add)->hh.key = (char*) (keyptr); \
355  (add)->hh.keylen = (unsigned) (keylen_in); \
356  if (!(head)) { \
357  (add)->hh.next = NULL; \
358  (add)->hh.prev = NULL; \
359  HASH_MAKE_TABLE(hh, add, _ha_oomed); \
360  IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { ) \
361  (head) = (add); \
362  IF_HASH_NONFATAL_OOM( } ) \
363  } else { \
364  void *_hs_iter = (head); \
365  (add)->hh.tbl = (head)->hh.tbl; \
366  HASH_AKBI_INNER_LOOP(hh, head, add, cmpfcn); \
367  if (_hs_iter) { \
368  (add)->hh.next = _hs_iter; \
369  if (((add)->hh.prev = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev)) { \
370  HH_FROM_ELMT((head)->hh.tbl, (add)->hh.prev)->next = (add); \
371  } else { \
372  (head) = (add); \
373  } \
374  HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev = (add); \
375  } else { \
376  HASH_APPEND_LIST(hh, head, add); \
377  } \
378  } \
379  HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed); \
380  HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE_INORDER"); \
381 } while (0)
382 
383 #define HASH_ADD_KEYPTR_INORDER(hh,head,keyptr,keylen_in,add,cmpfcn) \
384 do { \
385  unsigned _hs_hashv; \
386  HASH_VALUE(keyptr, keylen_in, _hs_hashv); \
387  HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, keyptr, keylen_in, _hs_hashv, add, cmpfcn); \
388 } while (0)
389 
390 #define HASH_ADD_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,cmpfcn) \
391  HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn)
392 
393 #define HASH_ADD_INORDER(hh,head,fieldname,keylen_in,add,cmpfcn) \
394  HASH_ADD_KEYPTR_INORDER(hh, head, &((add)->fieldname), keylen_in, add, cmpfcn)
395 
396 #define HASH_ADD_KEYPTR_BYHASHVALUE(hh,head,keyptr,keylen_in,hashval,add) \
397 do { \
398  IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; ) \
399  (add)->hh.hashv = (hashval); \
400  (add)->hh.key = (char*) (keyptr); \
401  (add)->hh.keylen = (unsigned) (keylen_in); \
402  if (!(head)) { \
403  (add)->hh.next = NULL; \
404  (add)->hh.prev = NULL; \
405  HASH_MAKE_TABLE(hh, add, _ha_oomed); \
406  IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { ) \
407  (head) = (add); \
408  IF_HASH_NONFATAL_OOM( } ) \
409  } else { \
410  (add)->hh.tbl = (head)->hh.tbl; \
411  HASH_APPEND_LIST(hh, head, add); \
412  } \
413  HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed); \
414  HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE"); \
415 } while (0)
416 
417 #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \
418 do { \
419  unsigned _ha_hashv; \
420  HASH_VALUE(keyptr, keylen_in, _ha_hashv); \
421  HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, keyptr, keylen_in, _ha_hashv, add); \
422 } while (0)
423 
424 #define HASH_ADD_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add) \
425  HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add)
426 
427 #define HASH_ADD(hh,head,fieldname,keylen_in,add) \
428  HASH_ADD_KEYPTR(hh, head, &((add)->fieldname), keylen_in, add)
429 
430 #define HASH_TO_BKT(hashv,num_bkts,bkt) \
431 do { \
432  bkt = ((hashv) & ((num_bkts) - 1U)); \
433 } while (0)
434 
435 /* delete "delptr" from the hash table.
436  * "the usual" patch-up process for the app-order doubly-linked-list.
437  * The use of _hd_hh_del below deserves special explanation.
438  * These used to be expressed using (delptr) but that led to a bug
439  * if someone used the same symbol for the head and deletee, like
440  * HASH_DELETE(hh,users,users);
441  * We want that to work, but by changing the head (users) below
442  * we were forfeiting our ability to further refer to the deletee (users)
443  * in the patch-up process. Solution: use scratch space to
444  * copy the deletee pointer, then the latter references are via that
445  * scratch pointer rather than through the repointed (users) symbol.
446  */
447 #define HASH_DELETE(hh,head,delptr) \
448  HASH_DELETE_HH(hh, head, &(delptr)->hh)
449 
450 #define HASH_DELETE_HH(hh,head,delptrhh) \
451 do { \
452  struct UT_hash_handle *_hd_hh_del = (delptrhh); \
453  if ((_hd_hh_del->prev == NULL) && (_hd_hh_del->next == NULL)) { \
454  HASH_BLOOM_FREE((head)->hh.tbl); \
455  uthash_free((head)->hh.tbl->buckets, \
456  (head)->hh.tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
457  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
458  (head) = NULL; \
459  } else { \
460  unsigned _hd_bkt; \
461  if (_hd_hh_del == (head)->hh.tbl->tail) { \
462  (head)->hh.tbl->tail = HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev); \
463  } \
464  if (_hd_hh_del->prev != NULL) { \
465  HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev)->next = _hd_hh_del->next; \
466  } else { \
467  DECLTYPE_ASSIGN(head, _hd_hh_del->next); \
468  } \
469  if (_hd_hh_del->next != NULL) { \
470  HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->next)->prev = _hd_hh_del->prev; \
471  } \
472  HASH_TO_BKT(_hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \
473  HASH_DEL_IN_BKT((head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \
474  (head)->hh.tbl->num_items--; \
475  } \
476  HASH_FSCK(hh, head, "HASH_DELETE_HH"); \
477 } while (0)
478 
479 /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */
480 #define HASH_FIND_STR(head,findstr,out) \
481  HASH_FIND(hh,head,findstr,(unsigned)uthash_strlen(findstr),out)
482 #define HASH_ADD_STR(head,strfield,add) \
483  HASH_ADD(hh,head,strfield[0],(unsigned)uthash_strlen(add->strfield),add)
484 #define HASH_REPLACE_STR(head,strfield,add,replaced) \
485  HASH_REPLACE(hh,head,strfield[0],(unsigned)uthash_strlen(add->strfield),add,replaced)
486 #define HASH_FIND_INT(head,findint,out) \
487  HASH_FIND(hh,head,findint,sizeof(int),out)
488 #define HASH_ADD_INT(head,intfield,add) \
489  HASH_ADD(hh,head,intfield,sizeof(int),add)
490 #define HASH_REPLACE_INT(head,intfield,add,replaced) \
491  HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced)
492 #define HASH_FIND_PTR(head,findptr,out) \
493  HASH_FIND(hh,head,findptr,sizeof(void *),out)
494 #define HASH_ADD_PTR(head,ptrfield,add) \
495  HASH_ADD(hh,head,ptrfield,sizeof(void *),add)
496 #define HASH_REPLACE_PTR(head,ptrfield,add,replaced) \
497  HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced)
498 #define HASH_DEL(head,delptr) \
499  HASH_DELETE(hh,head,delptr)
500 
501 /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined.
502  * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined.
503  */
504 #ifdef HASH_DEBUG
505 #define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0)
506 #define HASH_FSCK(hh,head,where) \
507 do { \
508  struct UT_hash_handle *_thh; \
509  if (head) { \
510  unsigned _bkt_i; \
511  unsigned _count = 0; \
512  char *_prev; \
513  for (_bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; ++_bkt_i) { \
514  unsigned _bkt_count = 0; \
515  _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \
516  _prev = NULL; \
517  while (_thh) { \
518  if (_prev != (char*)(_thh->hh_prev)) { \
519  HASH_OOPS("%s: invalid hh_prev %p, actual %p\n", \
520  (where), (void*)_thh->hh_prev, (void*)_prev); \
521  } \
522  _bkt_count++; \
523  _prev = (char*)(_thh); \
524  _thh = _thh->hh_next; \
525  } \
526  _count += _bkt_count; \
527  if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \
528  HASH_OOPS("%s: invalid bucket count %u, actual %u\n", \
529  (where), (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \
530  } \
531  } \
532  if (_count != (head)->hh.tbl->num_items) { \
533  HASH_OOPS("%s: invalid hh item count %u, actual %u\n", \
534  (where), (head)->hh.tbl->num_items, _count); \
535  } \
536  _count = 0; \
537  _prev = NULL; \
538  _thh = &(head)->hh; \
539  while (_thh) { \
540  _count++; \
541  if (_prev != (char*)_thh->prev) { \
542  HASH_OOPS("%s: invalid prev %p, actual %p\n", \
543  (where), (void*)_thh->prev, (void*)_prev); \
544  } \
545  _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \
546  _thh = (_thh->next ? HH_FROM_ELMT((head)->hh.tbl, _thh->next) : NULL); \
547  } \
548  if (_count != (head)->hh.tbl->num_items) { \
549  HASH_OOPS("%s: invalid app item count %u, actual %u\n", \
550  (where), (head)->hh.tbl->num_items, _count); \
551  } \
552  } \
553 } while (0)
554 #else
555 #define HASH_FSCK(hh,head,where)
556 #endif
557 
558 /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to
559  * the descriptor to which this macro is defined for tuning the hash function.
560  * The app can #include <unistd.h> to get the prototype for write(2). */
561 #ifdef HASH_EMIT_KEYS
562 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \
563 do { \
564  unsigned _klen = fieldlen; \
565  write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \
566  write(HASH_EMIT_KEYS, keyptr, (unsigned long)fieldlen); \
567 } while (0)
568 #else
569 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen)
570 #endif
571 
572 /* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */
573 #ifdef HASH_FUNCTION
574 #define HASH_FCN HASH_FUNCTION
575 #else
576 #define HASH_FCN HASH_JEN
577 #endif
578 
579 /* The Bernstein hash function, used in Perl prior to v5.6. Note (x<<5+x)=x*33. */
580 #define HASH_BER(key,keylen,hashv) \
581 do { \
582  unsigned _hb_keylen = (unsigned)keylen; \
583  const unsigned char *_hb_key = (const unsigned char*)(key); \
584  (hashv) = 0; \
585  while (_hb_keylen-- != 0U) { \
586  (hashv) = (((hashv) << 5) + (hashv)) + *_hb_key++; \
587  } \
588 } while (0)
589 
590 
591 /* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at
592  * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */
593 #define HASH_SAX(key,keylen,hashv) \
594 do { \
595  unsigned _sx_i; \
596  const unsigned char *_hs_key = (const unsigned char*)(key); \
597  hashv = 0; \
598  for (_sx_i=0; _sx_i < keylen; _sx_i++) { \
599  hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \
600  } \
601 } while (0)
602 /* FNV-1a variation */
603 #define HASH_FNV(key,keylen,hashv) \
604 do { \
605  unsigned _fn_i; \
606  const unsigned char *_hf_key = (const unsigned char*)(key); \
607  (hashv) = 2166136261U; \
608  for (_fn_i=0; _fn_i < keylen; _fn_i++) { \
609  hashv = hashv ^ _hf_key[_fn_i]; \
610  hashv = hashv * 16777619U; \
611  } \
612 } while (0)
613 
614 #define HASH_OAT(key,keylen,hashv) \
615 do { \
616  unsigned _ho_i; \
617  const unsigned char *_ho_key=(const unsigned char*)(key); \
618  hashv = 0; \
619  for(_ho_i=0; _ho_i < keylen; _ho_i++) { \
620  hashv += _ho_key[_ho_i]; \
621  hashv += (hashv << 10); \
622  hashv ^= (hashv >> 6); \
623  } \
624  hashv += (hashv << 3); \
625  hashv ^= (hashv >> 11); \
626  hashv += (hashv << 15); \
627 } while (0)
628 
629 #define HASH_JEN_MIX(a,b,c) \
630 do { \
631  a -= b; a -= c; a ^= ( c >> 13 ); \
632  b -= c; b -= a; b ^= ( a << 8 ); \
633  c -= a; c -= b; c ^= ( b >> 13 ); \
634  a -= b; a -= c; a ^= ( c >> 12 ); \
635  b -= c; b -= a; b ^= ( a << 16 ); \
636  c -= a; c -= b; c ^= ( b >> 5 ); \
637  a -= b; a -= c; a ^= ( c >> 3 ); \
638  b -= c; b -= a; b ^= ( a << 10 ); \
639  c -= a; c -= b; c ^= ( b >> 15 ); \
640 } while (0)
641 
642 #define HASH_JEN(key,keylen,hashv) \
643 do { \
644  unsigned _hj_i,_hj_j,_hj_k; \
645  unsigned const char *_hj_key=(unsigned const char*)(key); \
646  hashv = 0xfeedbeefu; \
647  _hj_i = _hj_j = 0x9e3779b9u; \
648  _hj_k = (unsigned)(keylen); \
649  while (_hj_k >= 12U) { \
650  _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \
651  + ( (unsigned)_hj_key[2] << 16 ) \
652  + ( (unsigned)_hj_key[3] << 24 ) ); \
653  _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \
654  + ( (unsigned)_hj_key[6] << 16 ) \
655  + ( (unsigned)_hj_key[7] << 24 ) ); \
656  hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \
657  + ( (unsigned)_hj_key[10] << 16 ) \
658  + ( (unsigned)_hj_key[11] << 24 ) ); \
659  \
660  HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
661  \
662  _hj_key += 12; \
663  _hj_k -= 12U; \
664  } \
665  hashv += (unsigned)(keylen); \
666  switch ( _hj_k ) { \
667  case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); /* FALLTHROUGH */ \
668  case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); /* FALLTHROUGH */ \
669  case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); /* FALLTHROUGH */ \
670  case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); /* FALLTHROUGH */ \
671  case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); /* FALLTHROUGH */ \
672  case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); /* FALLTHROUGH */ \
673  case 5: _hj_j += _hj_key[4]; /* FALLTHROUGH */ \
674  case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); /* FALLTHROUGH */ \
675  case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); /* FALLTHROUGH */ \
676  case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); /* FALLTHROUGH */ \
677  case 1: _hj_i += _hj_key[0]; /* FALLTHROUGH */ \
678  default: /* make gcc -Wswitch-default happy */ \
679  ; \
680  } \
681  HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
682 } while (0)
683 
684 /* The Paul Hsieh hash function */
685 #undef get16bits
686 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
687  || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
688 #define get16bits(d) (*((const uint16_t *) (d)))
689 #endif
690 
691 #if !defined (get16bits)
692 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \
693  +(uint32_t)(((const uint8_t *)(d))[0]) )
694 #endif
695 #define HASH_SFH(key,keylen,hashv) \
696 do { \
697  unsigned const char *_sfh_key=(unsigned const char*)(key); \
698  uint32_t _sfh_tmp, _sfh_len = (uint32_t)keylen; \
699  \
700  unsigned _sfh_rem = _sfh_len & 3U; \
701  _sfh_len >>= 2; \
702  hashv = 0xcafebabeu; \
703  \
704  /* Main loop */ \
705  for (;_sfh_len > 0U; _sfh_len--) { \
706  hashv += get16bits (_sfh_key); \
707  _sfh_tmp = ((uint32_t)(get16bits (_sfh_key+2)) << 11) ^ hashv; \
708  hashv = (hashv << 16) ^ _sfh_tmp; \
709  _sfh_key += 2U*sizeof (uint16_t); \
710  hashv += hashv >> 11; \
711  } \
712  \
713  /* Handle end cases */ \
714  switch (_sfh_rem) { \
715  case 3: hashv += get16bits (_sfh_key); \
716  hashv ^= hashv << 16; \
717  hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)]) << 18; \
718  hashv += hashv >> 11; \
719  break; \
720  case 2: hashv += get16bits (_sfh_key); \
721  hashv ^= hashv << 11; \
722  hashv += hashv >> 17; \
723  break; \
724  case 1: hashv += *_sfh_key; \
725  hashv ^= hashv << 10; \
726  hashv += hashv >> 1; \
727  } \
728  \
729  /* Force "avalanching" of final 127 bits */ \
730  hashv ^= hashv << 3; \
731  hashv += hashv >> 5; \
732  hashv ^= hashv << 4; \
733  hashv += hashv >> 17; \
734  hashv ^= hashv << 25; \
735  hashv += hashv >> 6; \
736 } while (0)
737 
738 #ifdef HASH_USING_NO_STRICT_ALIASING
739 /* The MurmurHash exploits some CPU's (x86,x86_64) tolerance for unaligned reads.
740  * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error.
741  * MurmurHash uses the faster approach only on CPU's where we know it's safe.
742  *
743  * Note the preprocessor built-in defines can be emitted using:
744  *
745  * gcc -m64 -dM -E - < /dev/null (on gcc)
746  * cc -## a.c (where a.c is a simple test file) (Sun Studio)
747  */
748 #if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86))
749 #define MUR_GETBLOCK(p,i) p[i]
750 #else /* non intel */
751 #define MUR_PLUS0_ALIGNED(p) (((unsigned long)p & 3UL) == 0UL)
752 #define MUR_PLUS1_ALIGNED(p) (((unsigned long)p & 3UL) == 1UL)
753 #define MUR_PLUS2_ALIGNED(p) (((unsigned long)p & 3UL) == 2UL)
754 #define MUR_PLUS3_ALIGNED(p) (((unsigned long)p & 3UL) == 3UL)
755 #define WP(p) ((uint32_t*)((unsigned long)(p) & ~3UL))
756 #if (defined(__BIG_ENDIAN__) || defined(SPARC) || defined(__ppc__) || defined(__ppc64__))
757 #define MUR_THREE_ONE(p) ((((*WP(p))&0x00ffffff) << 8) | (((*(WP(p)+1))&0xff000000) >> 24))
758 #define MUR_TWO_TWO(p) ((((*WP(p))&0x0000ffff) <<16) | (((*(WP(p)+1))&0xffff0000) >> 16))
759 #define MUR_ONE_THREE(p) ((((*WP(p))&0x000000ff) <<24) | (((*(WP(p)+1))&0xffffff00) >> 8))
760 #else /* assume little endian non-intel */
761 #define MUR_THREE_ONE(p) ((((*WP(p))&0xffffff00) >> 8) | (((*(WP(p)+1))&0x000000ff) << 24))
762 #define MUR_TWO_TWO(p) ((((*WP(p))&0xffff0000) >>16) | (((*(WP(p)+1))&0x0000ffff) << 16))
763 #define MUR_ONE_THREE(p) ((((*WP(p))&0xff000000) >>24) | (((*(WP(p)+1))&0x00ffffff) << 8))
764 #endif
765 #define MUR_GETBLOCK(p,i) (MUR_PLUS0_ALIGNED(p) ? ((p)[i]) : \
766  (MUR_PLUS1_ALIGNED(p) ? MUR_THREE_ONE(p) : \
767  (MUR_PLUS2_ALIGNED(p) ? MUR_TWO_TWO(p) : \
768  MUR_ONE_THREE(p))))
769 #endif
770 #define MUR_ROTL32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
771 #define MUR_FMIX(_h) \
772 do { \
773  _h ^= _h >> 16; \
774  _h *= 0x85ebca6bu; \
775  _h ^= _h >> 13; \
776  _h *= 0xc2b2ae35u; \
777  _h ^= _h >> 16; \
778 } while (0)
779 
780 #define HASH_MUR(key,keylen,hashv) \
781 do { \
782  const uint8_t *_mur_data = (const uint8_t*)(key); \
783  const int _mur_nblocks = (int)(keylen) / 4; \
784  uint32_t _mur_h1 = 0xf88D5353u; \
785  uint32_t _mur_c1 = 0xcc9e2d51u; \
786  uint32_t _mur_c2 = 0x1b873593u; \
787  uint32_t _mur_k1 = 0; \
788  const uint8_t *_mur_tail; \
789  const uint32_t *_mur_blocks = (const uint32_t*)(_mur_data+(_mur_nblocks*4)); \
790  int _mur_i; \
791  for (_mur_i = -_mur_nblocks; _mur_i != 0; _mur_i++) { \
792  _mur_k1 = MUR_GETBLOCK(_mur_blocks,_mur_i); \
793  _mur_k1 *= _mur_c1; \
794  _mur_k1 = MUR_ROTL32(_mur_k1,15); \
795  _mur_k1 *= _mur_c2; \
796  \
797  _mur_h1 ^= _mur_k1; \
798  _mur_h1 = MUR_ROTL32(_mur_h1,13); \
799  _mur_h1 = (_mur_h1*5U) + 0xe6546b64u; \
800  } \
801  _mur_tail = (const uint8_t*)(_mur_data + (_mur_nblocks*4)); \
802  _mur_k1=0; \
803  switch ((keylen) & 3U) { \
804  case 0: break; \
805  case 3: _mur_k1 ^= (uint32_t)_mur_tail[2] << 16; /* FALLTHROUGH */ \
806  case 2: _mur_k1 ^= (uint32_t)_mur_tail[1] << 8; /* FALLTHROUGH */ \
807  case 1: _mur_k1 ^= (uint32_t)_mur_tail[0]; \
808  _mur_k1 *= _mur_c1; \
809  _mur_k1 = MUR_ROTL32(_mur_k1,15); \
810  _mur_k1 *= _mur_c2; \
811  _mur_h1 ^= _mur_k1; \
812  } \
813  _mur_h1 ^= (uint32_t)(keylen); \
814  MUR_FMIX(_mur_h1); \
815  hashv = _mur_h1; \
816 } while (0)
817 #endif /* HASH_USING_NO_STRICT_ALIASING */
818 
819 /* iterate over items in a known bucket to find desired item */
820 #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,hashval,out) \
821 do { \
822  if ((head).hh_head != NULL) { \
823  DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (head).hh_head)); \
824  } else { \
825  (out) = NULL; \
826  } \
827  while ((out) != NULL) { \
828  if ((out)->hh.hashv == (hashval) && (out)->hh.keylen == (keylen_in)) { \
829  if (uthash_memcmp((out)->hh.key, keyptr, keylen_in) == 0) { \
830  break; \
831  } \
832  } \
833  if ((out)->hh.hh_next != NULL) { \
834  DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (out)->hh.hh_next)); \
835  } else { \
836  (out) = NULL; \
837  } \
838  } \
839 } while (0)
840 
841 /* add an item to a bucket */
842 #define HASH_ADD_TO_BKT(head,hh,addhh,oomed) \
843 do { \
844  UT_hash_bucket *_ha_head = &(head); \
845  _ha_head->count++; \
846  (addhh)->hh_next = _ha_head->hh_head; \
847  (addhh)->hh_prev = NULL; \
848  if (_ha_head->hh_head != NULL) { \
849  _ha_head->hh_head->hh_prev = (addhh); \
850  } \
851  _ha_head->hh_head = (addhh); \
852  if ((_ha_head->count >= ((_ha_head->expand_mult + 1U) * HASH_BKT_CAPACITY_THRESH)) \
853  && !(addhh)->tbl->noexpand) { \
854  HASH_EXPAND_BUCKETS(addhh,(addhh)->tbl, oomed); \
855  IF_HASH_NONFATAL_OOM( \
856  if (oomed) { \
857  HASH_DEL_IN_BKT(head,addhh); \
858  } \
859  ) \
860  } \
861 } while (0)
862 
863 /* remove an item from a given bucket */
864 #define HASH_DEL_IN_BKT(head,delhh) \
865 do { \
866  UT_hash_bucket *_hd_head = &(head); \
867  _hd_head->count--; \
868  if (_hd_head->hh_head == (delhh)) { \
869  _hd_head->hh_head = (delhh)->hh_next; \
870  } \
871  if ((delhh)->hh_prev) { \
872  (delhh)->hh_prev->hh_next = (delhh)->hh_next; \
873  } \
874  if ((delhh)->hh_next) { \
875  (delhh)->hh_next->hh_prev = (delhh)->hh_prev; \
876  } \
877 } while (0)
878 
879 /* Bucket expansion has the effect of doubling the number of buckets
880  * and redistributing the items into the new buckets. Ideally the
881  * items will distribute more or less evenly into the new buckets
882  * (the extent to which this is true is a measure of the quality of
883  * the hash function as it applies to the key domain).
884  *
885  * With the items distributed into more buckets, the chain length
886  * (item count) in each bucket is reduced. Thus by expanding buckets
887  * the hash keeps a bound on the chain length. This bounded chain
888  * length is the essence of how a hash provides constant time lookup.
889  *
890  * The calculation of tbl->ideal_chain_maxlen below deserves some
891  * explanation. First, keep in mind that we're calculating the ideal
892  * maximum chain length based on the *new* (doubled) bucket count.
893  * In fractions this is just n/b (n=number of items,b=new num buckets).
894  * Since the ideal chain length is an integer, we want to calculate
895  * ceil(n/b). We don't depend on floating point arithmetic in this
896  * hash, so to calculate ceil(n/b) with integers we could write
897  *
898  * ceil(n/b) = (n/b) + ((n%b)?1:0)
899  *
900  * and in fact a previous version of this hash did just that.
901  * But now we have improved things a bit by recognizing that b is
902  * always a power of two. We keep its base 2 log handy (call it lb),
903  * so now we can write this with a bit shift and logical AND:
904  *
905  * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0)
906  *
907  */
908 #define HASH_EXPAND_BUCKETS(hh,tbl,oomed) \
909 do { \
910  unsigned _he_bkt; \
911  unsigned _he_bkt_i; \
912  struct UT_hash_handle *_he_thh, *_he_hh_nxt; \
913  UT_hash_bucket *_he_new_buckets, *_he_newbkt; \
914  _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \
915  2UL * (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \
916  if (!_he_new_buckets) { \
917  HASH_RECORD_OOM(oomed); \
918  } else { \
919  uthash_bzero(_he_new_buckets, \
920  2UL * (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \
921  (tbl)->ideal_chain_maxlen = \
922  ((tbl)->num_items >> ((tbl)->log2_num_buckets+1U)) + \
923  ((((tbl)->num_items & (((tbl)->num_buckets*2U)-1U)) != 0U) ? 1U : 0U); \
924  (tbl)->nonideal_items = 0; \
925  for (_he_bkt_i = 0; _he_bkt_i < (tbl)->num_buckets; _he_bkt_i++) { \
926  _he_thh = (tbl)->buckets[ _he_bkt_i ].hh_head; \
927  while (_he_thh != NULL) { \
928  _he_hh_nxt = _he_thh->hh_next; \
929  HASH_TO_BKT(_he_thh->hashv, (tbl)->num_buckets * 2U, _he_bkt); \
930  _he_newbkt = &(_he_new_buckets[_he_bkt]); \
931  if (++(_he_newbkt->count) > (tbl)->ideal_chain_maxlen) { \
932  (tbl)->nonideal_items++; \
933  _he_newbkt->expand_mult = _he_newbkt->count / (tbl)->ideal_chain_maxlen; \
934  } \
935  _he_thh->hh_prev = NULL; \
936  _he_thh->hh_next = _he_newbkt->hh_head; \
937  if (_he_newbkt->hh_head != NULL) { \
938  _he_newbkt->hh_head->hh_prev = _he_thh; \
939  } \
940  _he_newbkt->hh_head = _he_thh; \
941  _he_thh = _he_hh_nxt; \
942  } \
943  } \
944  uthash_free((tbl)->buckets, (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \
945  (tbl)->num_buckets *= 2U; \
946  (tbl)->log2_num_buckets++; \
947  (tbl)->buckets = _he_new_buckets; \
948  (tbl)->ineff_expands = ((tbl)->nonideal_items > ((tbl)->num_items >> 1)) ? \
949  ((tbl)->ineff_expands+1U) : 0U; \
950  if ((tbl)->ineff_expands > 1U) { \
951  (tbl)->noexpand = 1; \
952  uthash_noexpand_fyi(tbl); \
953  } \
954  uthash_expand_fyi(tbl); \
955  } \
956 } while (0)
957 
958 
959 /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */
960 /* Note that HASH_SORT assumes the hash handle name to be hh.
961  * HASH_SRT was added to allow the hash handle name to be passed in. */
962 #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn)
963 #define HASH_SRT(hh,head,cmpfcn) \
964 do { \
965  unsigned _hs_i; \
966  unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \
967  struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \
968  if (head != NULL) { \
969  _hs_insize = 1; \
970  _hs_looping = 1; \
971  _hs_list = &((head)->hh); \
972  while (_hs_looping != 0U) { \
973  _hs_p = _hs_list; \
974  _hs_list = NULL; \
975  _hs_tail = NULL; \
976  _hs_nmerges = 0; \
977  while (_hs_p != NULL) { \
978  _hs_nmerges++; \
979  _hs_q = _hs_p; \
980  _hs_psize = 0; \
981  for (_hs_i = 0; _hs_i < _hs_insize; ++_hs_i) { \
982  _hs_psize++; \
983  _hs_q = ((_hs_q->next != NULL) ? \
984  HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \
985  if (_hs_q == NULL) { \
986  break; \
987  } \
988  } \
989  _hs_qsize = _hs_insize; \
990  while ((_hs_psize != 0U) || ((_hs_qsize != 0U) && (_hs_q != NULL))) { \
991  if (_hs_psize == 0U) { \
992  _hs_e = _hs_q; \
993  _hs_q = ((_hs_q->next != NULL) ? \
994  HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \
995  _hs_qsize--; \
996  } else if ((_hs_qsize == 0U) || (_hs_q == NULL)) { \
997  _hs_e = _hs_p; \
998  if (_hs_p != NULL) { \
999  _hs_p = ((_hs_p->next != NULL) ? \
1000  HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \
1001  } \
1002  _hs_psize--; \
1003  } else if ((cmpfcn( \
1004  DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_p)), \
1005  DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_q)) \
1006  )) <= 0) { \
1007  _hs_e = _hs_p; \
1008  if (_hs_p != NULL) { \
1009  _hs_p = ((_hs_p->next != NULL) ? \
1010  HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \
1011  } \
1012  _hs_psize--; \
1013  } else { \
1014  _hs_e = _hs_q; \
1015  _hs_q = ((_hs_q->next != NULL) ? \
1016  HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \
1017  _hs_qsize--; \
1018  } \
1019  if ( _hs_tail != NULL ) { \
1020  _hs_tail->next = ((_hs_e != NULL) ? \
1021  ELMT_FROM_HH((head)->hh.tbl, _hs_e) : NULL); \
1022  } else { \
1023  _hs_list = _hs_e; \
1024  } \
1025  if (_hs_e != NULL) { \
1026  _hs_e->prev = ((_hs_tail != NULL) ? \
1027  ELMT_FROM_HH((head)->hh.tbl, _hs_tail) : NULL); \
1028  } \
1029  _hs_tail = _hs_e; \
1030  } \
1031  _hs_p = _hs_q; \
1032  } \
1033  if (_hs_tail != NULL) { \
1034  _hs_tail->next = NULL; \
1035  } \
1036  if (_hs_nmerges <= 1U) { \
1037  _hs_looping = 0; \
1038  (head)->hh.tbl->tail = _hs_tail; \
1039  DECLTYPE_ASSIGN(head, ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \
1040  } \
1041  _hs_insize *= 2U; \
1042  } \
1043  HASH_FSCK(hh, head, "HASH_SRT"); \
1044  } \
1045 } while (0)
1046 
1047 /* This function selects items from one hash into another hash.
1048  * The end result is that the selected items have dual presence
1049  * in both hashes. There is no copy of the items made; rather
1050  * they are added into the new hash through a secondary hash
1051  * hash handle that must be present in the structure. */
1052 #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \
1053 do { \
1054  unsigned _src_bkt, _dst_bkt; \
1055  void *_last_elt = NULL, *_elt; \
1056  UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \
1057  ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \
1058  if ((src) != NULL) { \
1059  for (_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \
1060  for (_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \
1061  _src_hh != NULL; \
1062  _src_hh = _src_hh->hh_next) { \
1063  _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \
1064  if (cond(_elt)) { \
1065  IF_HASH_NONFATAL_OOM( int _hs_oomed = 0; ) \
1066  _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \
1067  _dst_hh->key = _src_hh->key; \
1068  _dst_hh->keylen = _src_hh->keylen; \
1069  _dst_hh->hashv = _src_hh->hashv; \
1070  _dst_hh->prev = _last_elt; \
1071  _dst_hh->next = NULL; \
1072  if (_last_elt_hh != NULL) { \
1073  _last_elt_hh->next = _elt; \
1074  } \
1075  if ((dst) == NULL) { \
1076  DECLTYPE_ASSIGN(dst, _elt); \
1077  HASH_MAKE_TABLE(hh_dst, dst, _hs_oomed); \
1078  IF_HASH_NONFATAL_OOM( \
1079  if (_hs_oomed) { \
1080  uthash_nonfatal_oom(_elt); \
1081  (dst) = NULL; \
1082  continue; \
1083  } \
1084  ) \
1085  } else { \
1086  _dst_hh->tbl = (dst)->hh_dst.tbl; \
1087  } \
1088  HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \
1089  HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt], hh_dst, _dst_hh, _hs_oomed); \
1090  (dst)->hh_dst.tbl->num_items++; \
1091  IF_HASH_NONFATAL_OOM( \
1092  if (_hs_oomed) { \
1093  HASH_ROLLBACK_BKT(hh_dst, dst, _dst_hh); \
1094  HASH_DELETE_HH(hh_dst, dst, _dst_hh); \
1095  _dst_hh->tbl = NULL; \
1096  uthash_nonfatal_oom(_elt); \
1097  continue; \
1098  } \
1099  ) \
1100  HASH_BLOOM_ADD(_dst_hh->tbl, _dst_hh->hashv); \
1101  _last_elt = _elt; \
1102  _last_elt_hh = _dst_hh; \
1103  } \
1104  } \
1105  } \
1106  } \
1107  HASH_FSCK(hh_dst, dst, "HASH_SELECT"); \
1108 } while (0)
1109 
1110 #define HASH_CLEAR(hh,head) \
1111 do { \
1112  if ((head) != NULL) { \
1113  HASH_BLOOM_FREE((head)->hh.tbl); \
1114  uthash_free((head)->hh.tbl->buckets, \
1115  (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \
1116  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
1117  (head) = NULL; \
1118  } \
1119 } while (0)
1120 
1121 #define HASH_OVERHEAD(hh,head) \
1122  (((head) != NULL) ? ( \
1123  (size_t)(((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \
1124  ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \
1125  sizeof(UT_hash_table) + \
1126  (HASH_BLOOM_BYTELEN))) : 0U)
1127 
1128 #ifdef NO_DECLTYPE
1129 #define HASH_ITER(hh,head,el,tmp) \
1130 for(((el)=(head)), ((*(char**)(&(tmp)))=(char*)((head!=NULL)?(head)->hh.next:NULL)); \
1131  (el) != NULL; ((el)=(tmp)), ((*(char**)(&(tmp)))=(char*)((tmp!=NULL)?(tmp)->hh.next:NULL)))
1132 #else
1133 #define HASH_ITER(hh,head,el,tmp) \
1134 for(((el)=(head)), ((tmp)=DECLTYPE(el)((head!=NULL)?(head)->hh.next:NULL)); \
1135  (el) != NULL; ((el)=(tmp)), ((tmp)=DECLTYPE(el)((tmp!=NULL)?(tmp)->hh.next:NULL)))
1136 #endif
1137 
1138 /* obtain a count of items in the hash */
1139 #define HASH_COUNT(head) HASH_CNT(hh,head)
1140 #define HASH_CNT(hh,head) ((head != NULL)?((head)->hh.tbl->num_items):0U)
1141 
1142 typedef struct UT_hash_bucket {
1144  unsigned count;
1145 
1146  /* expand_mult is normally set to 0. In this situation, the max chain length
1147  * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If
1148  * the bucket's chain exceeds this length, bucket expansion is triggered).
1149  * However, setting expand_mult to a non-zero value delays bucket expansion
1150  * (that would be triggered by additions to this particular bucket)
1151  * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH.
1152  * (The multiplier is simply expand_mult+1). The whole idea of this
1153  * multiplier is to reduce bucket expansions, since they are expensive, in
1154  * situations where we know that a particular bucket tends to be overused.
1155  * It is better to let its chain length grow to a longer yet-still-bounded
1156  * value, than to do an O(n) bucket expansion too often.
1157  */
1158  unsigned expand_mult;
1159 
1160 } UT_hash_bucket;
1161 
1162 /* random signature used only to find hash tables in external analysis */
1163 #define HASH_SIGNATURE 0xa0111fe1u
1164 #define HASH_BLOOM_SIGNATURE 0xb12220f2u
1165 
1166 typedef struct UT_hash_table {
1168  unsigned num_buckets, log2_num_buckets;
1169  unsigned num_items;
1170  struct UT_hash_handle *tail; /* tail hh in app order, for fast append */
1171  ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */
1172 
1173  /* in an ideal situation (all buckets used equally), no bucket would have
1174  * more than ceil(#items/#buckets) items. that's the ideal chain length. */
1176 
1177  /* nonideal_items is the number of items in the hash whose chain position
1178  * exceeds the ideal chain maxlen. these items pay the penalty for an uneven
1179  * hash distribution; reaching them in a chain traversal takes >ideal steps */
1180  unsigned nonideal_items;
1181 
1182  /* ineffective expands occur when a bucket doubling was performed, but
1183  * afterward, more than half the items in the hash had nonideal chain
1184  * positions. If this happens on two consecutive expansions we inhibit any
1185  * further expansion, as it's not helping; this happens when the hash
1186  * function isn't a good fit for the key domain. When expansion is inhibited
1187  * the hash will still work, albeit no longer in constant time. */
1188  unsigned ineff_expands, noexpand;
1189 
1190  uint32_t signature; /* used only to find hash tables in external analysis */
1191 #ifdef HASH_BLOOM
1192  uint32_t bloom_sig; /* used only to test bloom exists in external analysis */
1193  uint8_t *bloom_bv;
1194  uint8_t bloom_nbits;
1195 #endif
1196 
1197 } UT_hash_table;
1198 
1199 typedef struct UT_hash_handle {
1201  void *prev; /* prev element in app order */
1202  void *next; /* next element in app order */
1203  struct UT_hash_handle *hh_prev; /* previous hh in bucket order */
1204  struct UT_hash_handle *hh_next; /* next hh in bucket order */
1205  void *key; /* ptr to enclosing struct's key */
1206  unsigned keylen; /* enclosing struct's key len */
1207  unsigned hashv; /* result of hash-fcn(key) */
1208 } UT_hash_handle;
1209 
1210 #endif /* UTHASH_H */
struct UT_hash_handle * tail
Definition: uthash.h:1170
UT_hash_bucket * buckets
Definition: uthash.h:1167
unsigned expand_mult
Definition: uthash.h:1158
struct UT_hash_handle * hh_head
Definition: uthash.h:1143
struct UT_hash_handle * hh_next
Definition: uthash.h:1204
unsigned count
Definition: uthash.h:1144
unsigned noexpand
Definition: uthash.h:1188
unsigned keylen
Definition: uthash.h:1206
unsigned nonideal_items
Definition: uthash.h:1180
ptrdiff_t hho
Definition: uthash.h:1171
unsigned num_buckets
Definition: uthash.h:1168
struct UT_hash_bucket UT_hash_bucket
unsigned hashv
Definition: uthash.h:1207
void * key
Definition: uthash.h:1205
struct UT_hash_table * tbl
Definition: uthash.h:1200
unsigned int uint32_t
Definition: uthash.h:78
uint32_t signature
Definition: uthash.h:1190
struct UT_hash_table UT_hash_table
unsigned ideal_chain_maxlen
Definition: uthash.h:1175
struct UT_hash_handle UT_hash_handle
struct UT_hash_handle * hh_prev
Definition: uthash.h:1203
unsigned char uint8_t
Definition: uthash.h:79
unsigned num_items
Definition: uthash.h:1169
void * prev
Definition: uthash.h:1201
void * next
Definition: uthash.h:1202