Bug Summary

File:home/bhubbard/working/src/ceph/src/spdk/dpdk/lib/librte_hash/rte_cuckoo_hash.c
Warning:line 631, column 27
Access to field 'len' results in a dereference of a null pointer (loaded from variable 'cached_free_slots')

Annotated Source Code

[?] Use j/k keys for keyboard navigation

1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 * Copyright(c) 2018 Arm Limited
4 */
5
6#include <string.h>
7#include <stdint.h>
8#include <errno(*__errno_location ()).h>
9#include <stdio.h>
10#include <stdarg.h>
11#include <sys/queue.h>
12
13#include <rte_common.h>
14#include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
15#include <rte_log.h>
16#include <rte_prefetch.h>
17#include <rte_branch_prediction.h>
18#include <rte_malloc.h>
19#include <rte_eal.h>
20#include <rte_eal_memconfig.h>
21#include <rte_per_lcore.h>
22#include <rte_errno(per_lcore__rte_errno).h>
23#include <rte_string_fns.h>
24#include <rte_cpuflags.h>
25#include <rte_rwlock.h>
26#include <rte_spinlock.h>
27#include <rte_ring.h>
28#include <rte_compat.h>
29#include <rte_vect.h>
30
31#include "rte_hash.h"
32#include "rte_cuckoo_hash.h"
33
34#define FOR_EACH_BUCKET(CURRENT_BKT, START_BUCKET)for (CURRENT_BKT = START_BUCKET; CURRENT_BKT != ((void*)0); CURRENT_BKT
= CURRENT_BKT->next)
\
35 for (CURRENT_BKT = START_BUCKET; \
36 CURRENT_BKT != NULL((void*)0); \
37 CURRENT_BKT = CURRENT_BKT->next)
38
39TAILQ_HEAD(rte_hash_list, rte_tailq_entry)struct rte_hash_list { struct rte_tailq_entry *tqh_first; struct
rte_tailq_entry * *tqh_last; }
;
40
41static struct rte_tailq_elem rte_hash_tailq = {
42 .name = "RTE_HASH",
43};
44EAL_REGISTER_TAILQ(rte_hash_tailq)static void __attribute__((constructor(65535), used)) tailqinitfn_rte_hash_tailq
(void) { if (rte_eal_tailq_register(&rte_hash_tailq) <
0) __rte_panic(__func__, "Cannot initialize tailq: %s\n" "%.0s"
, rte_hash_tailq.name, "dummy"); }
45
46struct rte_hash *
47rte_hash_find_existing(const char *name)
48{
49 struct rte_hash *h = NULL((void*)0);
50 struct rte_tailq_entry *te;
51 struct rte_hash_list *hash_list;
52
53 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list)(struct rte_hash_list *)&(rte_hash_tailq.head)->tailq_head;
54
55 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK(&rte_eal_get_configuration()->mem_config->qlock));
56 TAILQ_FOREACH(te, hash_list, next)for ((te) = ((hash_list)->tqh_first); (te); (te) = ((te)->
next.tqe_next))
{
57 h = (struct rte_hash *) te->data;
58 if (strncmp(name, h->name, RTE_HASH_NAMESIZE)(__extension__ (__builtin_constant_p (32) && ((__builtin_constant_p
(name) && strlen (name) < ((size_t) (32))) || (__builtin_constant_p
(h->name) && strlen (h->name) < ((size_t) (
32)))) ? __extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p
(name) && __builtin_constant_p (h->name) &&
(__s1_len = __builtin_strlen (name), __s2_len = __builtin_strlen
(h->name), (!((size_t)(const void *)((name) + 1) - (size_t
)(const void *)(name) == 1) || __s1_len >= 4) && (
!((size_t)(const void *)((h->name) + 1) - (size_t)(const void
*)(h->name) == 1) || __s2_len >= 4)) ? __builtin_strcmp
(name, h->name) : (__builtin_constant_p (name) &&
((size_t)(const void *)((name) + 1) - (size_t)(const void *)
(name) == 1) && (__s1_len = __builtin_strlen (name), __s1_len
< 4) ? (__builtin_constant_p (h->name) && ((size_t
)(const void *)((h->name) + 1) - (size_t)(const void *)(h->
name) == 1) ? __builtin_strcmp (name, h->name) : (__extension__
({ const unsigned char *__s2 = (const unsigned char *) (const
char *) (h->name); int __result = (((const unsigned char *
) (const char *) (name))[0] - __s2[0]); if (__s1_len > 0 &&
__result == 0) { __result = (((const unsigned char *) (const
char *) (name))[1] - __s2[1]); if (__s1_len > 1 &&
__result == 0) { __result = (((const unsigned char *) (const
char *) (name))[2] - __s2[2]); if (__s1_len > 2 &&
__result == 0) __result = (((const unsigned char *) (const char
*) (name))[3] - __s2[3]); } } __result; }))) : (__builtin_constant_p
(h->name) && ((size_t)(const void *)((h->name)
+ 1) - (size_t)(const void *)(h->name) == 1) && (
__s2_len = __builtin_strlen (h->name), __s2_len < 4) ? (
__builtin_constant_p (name) && ((size_t)(const void *
)((name) + 1) - (size_t)(const void *)(name) == 1) ? __builtin_strcmp
(name, h->name) : (- (__extension__ ({ const unsigned char
*__s2 = (const unsigned char *) (const char *) (name); int __result
= (((const unsigned char *) (const char *) (h->name))[0] -
__s2[0]); if (__s2_len > 0 && __result == 0) { __result
= (((const unsigned char *) (const char *) (h->name))[1] -
__s2[1]); if (__s2_len > 1 && __result == 0) { __result
= (((const unsigned char *) (const char *) (h->name))[2] -
__s2[2]); if (__s2_len > 2 && __result == 0) __result
= (((const unsigned char *) (const char *) (h->name))[3] -
__s2[3]); } } __result; })))) : __builtin_strcmp (name, h->
name)))); }) : strncmp (name, h->name, 32)))
== 0)
59 break;
60 }
61 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK(&rte_eal_get_configuration()->mem_config->qlock));
62
63 if (te == NULL((void*)0)) {
64 rte_errno(per_lcore__rte_errno) = ENOENT2;
65 return NULL((void*)0);
66 }
67 return h;
68}
69
70static inline struct rte_hash_bucket *
71rte_hash_get_last_bkt(struct rte_hash_bucket *lst_bkt)
72{
73 while (lst_bkt->next != NULL((void*)0))
74 lst_bkt = lst_bkt->next;
75 return lst_bkt;
76}
77
78void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
79{
80 h->cmp_jump_table_idx = KEY_CUSTOM;
81 h->rte_hash_custom_cmp_eq = func;
82}
83
84static inline int
85rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
86{
87 if (h->cmp_jump_table_idx == KEY_CUSTOM)
88 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
89 else
90 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
91}
92
93/*
94 * We use higher 16 bits of hash as the signature value stored in table.
95 * We use the lower bits for the primary bucket
96 * location. Then we XOR primary bucket location and the signature
97 * to get the secondary bucket location. This is same as
98 * proposed in Bin Fan, et al's paper
99 * "MemC3: Compact and Concurrent MemCache with Dumber Caching and
100 * Smarter Hashing". The benefit to use
101 * XOR is that one could derive the alternative bucket location
102 * by only using the current bucket location and the signature.
103 */
104static inline uint16_t
105get_short_sig(const hash_sig_t hash)
106{
107 return hash >> 16;
108}
109
110static inline uint32_t
111get_prim_bucket_index(const struct rte_hash *h, const hash_sig_t hash)
112{
113 return hash & h->bucket_bitmask;
114}
115
116static inline uint32_t
117get_alt_bucket_index(const struct rte_hash *h,
118 uint32_t cur_bkt_idx, uint16_t sig)
119{
120 return (cur_bkt_idx ^ sig) & h->bucket_bitmask;
121}
122
123struct rte_hash *
124rte_hash_create(const struct rte_hash_parameters *params)
125{
126 struct rte_hash *h = NULL((void*)0);
127 struct rte_tailq_entry *te = NULL((void*)0);
128 struct rte_hash_list *hash_list;
129 struct rte_ring *r = NULL((void*)0);
130 struct rte_ring *r_ext = NULL((void*)0);
131 char hash_name[RTE_HASH_NAMESIZE32];
132 void *k = NULL((void*)0);
133 void *buckets = NULL((void*)0);
134 void *buckets_ext = NULL((void*)0);
135 char ring_name[RTE_RING_NAMESIZE(32 - sizeof("RG_") + 1)];
136 char ext_ring_name[RTE_RING_NAMESIZE(32 - sizeof("RG_") + 1)];
137 unsigned num_key_slots;
138 unsigned i;
139 unsigned int hw_trans_mem_support = 0, use_local_cache = 0;
140 unsigned int ext_table_support = 0;
141 unsigned int readwrite_concur_support = 0;
142 unsigned int writer_takes_lock = 0;
143 unsigned int no_free_on_del = 0;
144 uint32_t *ext_bkt_to_free = NULL((void*)0);
145 uint32_t *tbl_chng_cnt = NULL((void*)0);
146 unsigned int readwrite_concur_lf_support = 0;
147
148 rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
149
150 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list)(struct rte_hash_list *)&(rte_hash_tailq.head)->tailq_head;
151
152 if (params == NULL((void*)0)) {
153 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n")rte_log(4U, 6, "HASH" ": " "rte_hash_create has no parameters\n"
)
;
154 return NULL((void*)0);
155 }
156
157 /* Check for valid parameters */
158 if ((params->entries > RTE_HASH_ENTRIES_MAX(1 << 30)) ||
159 (params->entries < RTE_HASH_BUCKET_ENTRIES8) ||
160 (params->key_len == 0)) {
161 rte_errno(per_lcore__rte_errno) = EINVAL22;
162 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n")rte_log(4U, 6, "HASH" ": " "rte_hash_create has invalid parameters\n"
)
;
163 return NULL((void*)0);
164 }
165
166 /* Validate correct usage of extra options */
167 if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY0x04) &&
168 (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF0x20)) {
169 rte_errno(per_lcore__rte_errno) = EINVAL22;
170 RTE_LOG(ERR, HASH, "rte_hash_create: choose rw concurrency or "rte_log(4U, 6, "HASH" ": " "rte_hash_create: choose rw concurrency or "
"rw concurrency lock free\n")
171 "rw concurrency lock free\n")rte_log(4U, 6, "HASH" ": " "rte_hash_create: choose rw concurrency or "
"rw concurrency lock free\n")
;
172 return NULL((void*)0);
173 }
174
175 /* Check extra flags field to check extra options. */
176 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT0x01)
177 hw_trans_mem_support = 1;
178
179 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD0x02) {
180 use_local_cache = 1;
181 writer_takes_lock = 1;
182 }
183
184 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY0x04) {
185 readwrite_concur_support = 1;
186 writer_takes_lock = 1;
187 }
188
189 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE0x08)
190 ext_table_support = 1;
191
192 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL0x10)
193 no_free_on_del = 1;
194
195 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF0x20) {
196 readwrite_concur_lf_support = 1;
197 /* Enable not freeing internal memory/index on delete */
198 no_free_on_del = 1;
199 }
200
201 /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
202 if (use_local_cache)
203 /*
204 * Increase number of slots by total number of indices
205 * that can be stored in the lcore caches
206 * except for the first cache
207 */
208 num_key_slots = params->entries + (RTE_MAX_LCORE128 - 1) *
209 (LCORE_CACHE_SIZE64 - 1) + 1;
210 else
211 num_key_slots = params->entries + 1;
212
213 snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
214 /* Create ring (Dummy slot index is not enqueued) */
215 r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
216 params->socket_id, 0);
217 if (r == NULL((void*)0)) {
218 RTE_LOG(ERR, HASH, "memory allocation failed\n")rte_log(4U, 6, "HASH" ": " "memory allocation failed\n");
219 goto err;
220 }
221
222 const uint32_t num_buckets = rte_align32pow2(params->entries) /
223 RTE_HASH_BUCKET_ENTRIES8;
224
225 /* Create ring for extendable buckets. */
226 if (ext_table_support) {
227 snprintf(ext_ring_name, sizeof(ext_ring_name), "HT_EXT_%s",
228 params->name);
229 r_ext = rte_ring_create(ext_ring_name,
230 rte_align32pow2(num_buckets + 1),
231 params->socket_id, 0);
232
233 if (r_ext == NULL((void*)0)) {
234 RTE_LOG(ERR, HASH, "ext buckets memory allocation "rte_log(4U, 6, "HASH" ": " "ext buckets memory allocation " "failed\n"
)
235 "failed\n")rte_log(4U, 6, "HASH" ": " "ext buckets memory allocation " "failed\n"
)
;
236 goto err;
237 }
238 }
239
240 snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
241
242 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK(&rte_eal_get_configuration()->mem_config->qlock));
243
244 /* guarantee there's no existing: this is normally already checked
245 * by ring creation above */
246 TAILQ_FOREACH(te, hash_list, next)for ((te) = ((hash_list)->tqh_first); (te); (te) = ((te)->
next.tqe_next))
{
247 h = (struct rte_hash *) te->data;
248 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE)(__extension__ (__builtin_constant_p (32) && ((__builtin_constant_p
(params->name) && strlen (params->name) < (
(size_t) (32))) || (__builtin_constant_p (h->name) &&
strlen (h->name) < ((size_t) (32)))) ? __extension__ (
{ size_t __s1_len, __s2_len; (__builtin_constant_p (params->
name) && __builtin_constant_p (h->name) &&
(__s1_len = __builtin_strlen (params->name), __s2_len = __builtin_strlen
(h->name), (!((size_t)(const void *)((params->name) + 1
) - (size_t)(const void *)(params->name) == 1) || __s1_len
>= 4) && (!((size_t)(const void *)((h->name) +
1) - (size_t)(const void *)(h->name) == 1) || __s2_len >=
4)) ? __builtin_strcmp (params->name, h->name) : (__builtin_constant_p
(params->name) && ((size_t)(const void *)((params
->name) + 1) - (size_t)(const void *)(params->name) == 1
) && (__s1_len = __builtin_strlen (params->name), __s1_len
< 4) ? (__builtin_constant_p (h->name) && ((size_t
)(const void *)((h->name) + 1) - (size_t)(const void *)(h->
name) == 1) ? __builtin_strcmp (params->name, h->name) :
(__extension__ ({ const unsigned char *__s2 = (const unsigned
char *) (const char *) (h->name); int __result = (((const
unsigned char *) (const char *) (params->name))[0] - __s2
[0]); if (__s1_len > 0 && __result == 0) { __result
= (((const unsigned char *) (const char *) (params->name)
)[1] - __s2[1]); if (__s1_len > 1 && __result == 0
) { __result = (((const unsigned char *) (const char *) (params
->name))[2] - __s2[2]); if (__s1_len > 2 && __result
== 0) __result = (((const unsigned char *) (const char *) (params
->name))[3] - __s2[3]); } } __result; }))) : (__builtin_constant_p
(h->name) && ((size_t)(const void *)((h->name)
+ 1) - (size_t)(const void *)(h->name) == 1) && (
__s2_len = __builtin_strlen (h->name), __s2_len < 4) ? (
__builtin_constant_p (params->name) && ((size_t)(const
void *)((params->name) + 1) - (size_t)(const void *)(params
->name) == 1) ? __builtin_strcmp (params->name, h->name
) : (- (__extension__ ({ const unsigned char *__s2 = (const unsigned
char *) (const char *) (params->name); int __result = (((
const unsigned char *) (const char *) (h->name))[0] - __s2
[0]); if (__s2_len > 0 && __result == 0) { __result
= (((const unsigned char *) (const char *) (h->name))[1] -
__s2[1]); if (__s2_len > 1 && __result == 0) { __result
= (((const unsigned char *) (const char *) (h->name))[2] -
__s2[2]); if (__s2_len > 2 && __result == 0) __result
= (((const unsigned char *) (const char *) (h->name))[3] -
__s2[3]); } } __result; })))) : __builtin_strcmp (params->
name, h->name)))); }) : strncmp (params->name, h->name
, 32)))
== 0)
249 break;
250 }
251 h = NULL((void*)0);
252 if (te != NULL((void*)0)) {
253 rte_errno(per_lcore__rte_errno) = EEXIST17;
254 te = NULL((void*)0);
255 goto err_unlock;
256 }
257
258 te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
259 if (te == NULL((void*)0)) {
260 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n")rte_log(4U, 6, "HASH" ": " "tailq entry allocation failed\n");
261 goto err_unlock;
262 }
263
264 h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
265 RTE_CACHE_LINE_SIZE64, params->socket_id);
266
267 if (h == NULL((void*)0)) {
268 RTE_LOG(ERR, HASH, "memory allocation failed\n")rte_log(4U, 6, "HASH" ": " "memory allocation failed\n");
269 goto err_unlock;
270 }
271
272 buckets = rte_zmalloc_socket(NULL((void*)0),
273 num_buckets * sizeof(struct rte_hash_bucket),
274 RTE_CACHE_LINE_SIZE64, params->socket_id);
275
276 if (buckets == NULL((void*)0)) {
277 RTE_LOG(ERR, HASH, "buckets memory allocation failed\n")rte_log(4U, 6, "HASH" ": " "buckets memory allocation failed\n"
)
;
278 goto err_unlock;
279 }
280
281 /* Allocate same number of extendable buckets */
282 if (ext_table_support) {
283 buckets_ext = rte_zmalloc_socket(NULL((void*)0),
284 num_buckets * sizeof(struct rte_hash_bucket),
285 RTE_CACHE_LINE_SIZE64, params->socket_id);
286 if (buckets_ext == NULL((void*)0)) {
287 RTE_LOG(ERR, HASH, "ext buckets memory allocation "rte_log(4U, 6, "HASH" ": " "ext buckets memory allocation " "failed\n"
)
288 "failed\n")rte_log(4U, 6, "HASH" ": " "ext buckets memory allocation " "failed\n"
)
;
289 goto err_unlock;
290 }
291 /* Populate ext bkt ring. We reserve 0 similar to the
292 * key-data slot, just in case in future we want to
293 * use bucket index for the linked list and 0 means NULL
294 * for next bucket
295 */
296 for (i = 1; i <= num_buckets; i++)
297 rte_ring_sp_enqueue(r_ext, (void *)((uintptr_t) i));
298
299 if (readwrite_concur_lf_support) {
300 ext_bkt_to_free = rte_zmalloc(NULL((void*)0), sizeof(uint32_t) *
301 num_key_slots, 0);
302 if (ext_bkt_to_free == NULL((void*)0)) {
303 RTE_LOG(ERR, HASH, "ext bkt to free memory allocation "rte_log(4U, 6, "HASH" ": " "ext bkt to free memory allocation "
"failed\n")
304 "failed\n")rte_log(4U, 6, "HASH" ": " "ext bkt to free memory allocation "
"failed\n")
;
305 goto err_unlock;
306 }
307 }
308 }
309
310 const uint32_t key_entry_size =
311 RTE_ALIGN(sizeof(struct rte_hash_key) + params->key_len,(__typeof__(((sizeof(struct rte_hash_key) + params->key_len
) + ((__typeof__(sizeof(struct rte_hash_key) + params->key_len
)) (16) - 1))))((((sizeof(struct rte_hash_key) + params->key_len
) + ((__typeof__(sizeof(struct rte_hash_key) + params->key_len
)) (16) - 1))) & (~((__typeof__(((sizeof(struct rte_hash_key
) + params->key_len) + ((__typeof__(sizeof(struct rte_hash_key
) + params->key_len)) (16) - 1))))((16) - 1))))
312 KEY_ALIGNMENT)(__typeof__(((sizeof(struct rte_hash_key) + params->key_len
) + ((__typeof__(sizeof(struct rte_hash_key) + params->key_len
)) (16) - 1))))((((sizeof(struct rte_hash_key) + params->key_len
) + ((__typeof__(sizeof(struct rte_hash_key) + params->key_len
)) (16) - 1))) & (~((__typeof__(((sizeof(struct rte_hash_key
) + params->key_len) + ((__typeof__(sizeof(struct rte_hash_key
) + params->key_len)) (16) - 1))))((16) - 1))))
;
313 const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
314
315 k = rte_zmalloc_socket(NULL((void*)0), key_tbl_size,
316 RTE_CACHE_LINE_SIZE64, params->socket_id);
317
318 if (k == NULL((void*)0)) {
319 RTE_LOG(ERR, HASH, "memory allocation failed\n")rte_log(4U, 6, "HASH" ": " "memory allocation failed\n");
320 goto err_unlock;
321 }
322
323 tbl_chng_cnt = rte_zmalloc_socket(NULL((void*)0), sizeof(uint32_t),
324 RTE_CACHE_LINE_SIZE64, params->socket_id);
325
326 if (tbl_chng_cnt == NULL((void*)0)) {
327 RTE_LOG(ERR, HASH, "memory allocation failed\n")rte_log(4U, 6, "HASH" ": " "memory allocation failed\n");
328 goto err_unlock;
329 }
330
331/*
332 * If x86 architecture is used, select appropriate compare function,
333 * which may use x86 intrinsics, otherwise use memcmp
334 */
335#if defined(RTE_ARCH_X861) || defined(RTE_ARCH_ARM64)
336 /* Select function to compare keys */
337 switch (params->key_len) {
338 case 16:
339 h->cmp_jump_table_idx = KEY_16_BYTES;
340 break;
341 case 32:
342 h->cmp_jump_table_idx = KEY_32_BYTES;
343 break;
344 case 48:
345 h->cmp_jump_table_idx = KEY_48_BYTES;
346 break;
347 case 64:
348 h->cmp_jump_table_idx = KEY_64_BYTES;
349 break;
350 case 80:
351 h->cmp_jump_table_idx = KEY_80_BYTES;
352 break;
353 case 96:
354 h->cmp_jump_table_idx = KEY_96_BYTES;
355 break;
356 case 112:
357 h->cmp_jump_table_idx = KEY_112_BYTES;
358 break;
359 case 128:
360 h->cmp_jump_table_idx = KEY_128_BYTES;
361 break;
362 default:
363 /* If key is not multiple of 16, use generic memcmp */
364 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
365 }
366#else
367 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
368#endif
369
370 if (use_local_cache) {
371 h->local_free_slots = rte_zmalloc_socket(NULL((void*)0),
372 sizeof(struct lcore_cache) * RTE_MAX_LCORE128,
373 RTE_CACHE_LINE_SIZE64, params->socket_id);
374 }
375
376 /* Default hash function */
377#if defined(RTE_ARCH_X861)
378 default_hash_func = (rte_hash_function)rte_hash_crc;
379#elif defined(RTE_ARCH_ARM64)
380 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
381 default_hash_func = (rte_hash_function)rte_hash_crc;
382#endif
383 /* Setup hash context */
384 strlcpy(h->name, params->name, sizeof(h->name))rte_strlcpy(h->name, params->name, sizeof(h->name));
385 h->entries = params->entries;
386 h->key_len = params->key_len;
387 h->key_entry_size = key_entry_size;
388 h->hash_func_init_val = params->hash_func_init_val;
389
390 h->num_buckets = num_buckets;
391 h->bucket_bitmask = h->num_buckets - 1;
392 h->buckets = buckets;
393 h->buckets_ext = buckets_ext;
394 h->free_ext_bkts = r_ext;
395 h->hash_func = (params->hash_func == NULL((void*)0)) ?
396 default_hash_func : params->hash_func;
397 h->key_store = k;
398 h->free_slots = r;
399 h->ext_bkt_to_free = ext_bkt_to_free;
400 h->tbl_chng_cnt = tbl_chng_cnt;
401 *h->tbl_chng_cnt = 0;
402 h->hw_trans_mem_support = hw_trans_mem_support;
403 h->use_local_cache = use_local_cache;
404 h->readwrite_concur_support = readwrite_concur_support;
405 h->ext_table_support = ext_table_support;
406 h->writer_takes_lock = writer_takes_lock;
407 h->no_free_on_del = no_free_on_del;
408 h->readwrite_concur_lf_support = readwrite_concur_lf_support;
409
410#if defined(RTE_ARCH_X861)
411 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
412 h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
413 else
414#elif defined(RTE_ARCH_ARM64)
415 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
416 h->sig_cmp_fn = RTE_HASH_COMPARE_NEON;
417 else
418#endif
419 h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
420
421 /* Writer threads need to take the lock when:
422 * 1) RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY is enabled OR
423 * 2) RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD is enabled
424 */
425 if (h->writer_takes_lock) {
426 h->readwrite_lock = rte_malloc(NULL((void*)0), sizeof(rte_rwlock_t),
427 RTE_CACHE_LINE_SIZE64);
428 if (h->readwrite_lock == NULL((void*)0))
429 goto err_unlock;
430
431 rte_rwlock_init(h->readwrite_lock);
432 }
433
434 /* Populate free slots ring. Entry zero is reserved for key misses. */
435 for (i = 1; i < num_key_slots; i++)
436 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
437
438 te->data = (void *) h;
439 TAILQ_INSERT_TAIL(hash_list, te, next)do { (te)->next.tqe_next = ((void*)0); (te)->next.tqe_prev
= (hash_list)->tqh_last; *(hash_list)->tqh_last = (te)
; (hash_list)->tqh_last = &(te)->next.tqe_next; } while
( 0)
;
440 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK(&rte_eal_get_configuration()->mem_config->qlock));
441
442 return h;
443err_unlock:
444 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK(&rte_eal_get_configuration()->mem_config->qlock));
445err:
446 rte_ring_free(r);
447 rte_ring_free(r_ext);
448 rte_free(te);
449 rte_free(h);
450 rte_free(buckets);
451 rte_free(buckets_ext);
452 rte_free(k);
453 rte_free(tbl_chng_cnt);
454 rte_free(ext_bkt_to_free);
455 return NULL((void*)0);
456}
457
458void
459rte_hash_free(struct rte_hash *h)
460{
461 struct rte_tailq_entry *te;
462 struct rte_hash_list *hash_list;
463
464 if (h == NULL((void*)0))
465 return;
466
467 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list)(struct rte_hash_list *)&(rte_hash_tailq.head)->tailq_head;
468
469 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK(&rte_eal_get_configuration()->mem_config->qlock));
470
471 /* find out tailq entry */
472 TAILQ_FOREACH(te, hash_list, next)for ((te) = ((hash_list)->tqh_first); (te); (te) = ((te)->
next.tqe_next))
{
473 if (te->data == (void *) h)
474 break;
475 }
476
477 if (te == NULL((void*)0)) {
478 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK(&rte_eal_get_configuration()->mem_config->qlock));
479 return;
480 }
481
482 TAILQ_REMOVE(hash_list, te, next)do { if (((te)->next.tqe_next) != ((void*)0)) (te)->next
.tqe_next->next.tqe_prev = (te)->next.tqe_prev; else (hash_list
)->tqh_last = (te)->next.tqe_prev; *(te)->next.tqe_prev
= (te)->next.tqe_next; } while ( 0)
;
483
484 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK(&rte_eal_get_configuration()->mem_config->qlock));
485
486 if (h->use_local_cache)
487 rte_free(h->local_free_slots);
488 if (h->writer_takes_lock)
489 rte_free(h->readwrite_lock);
490 rte_ring_free(h->free_slots);
491 rte_ring_free(h->free_ext_bkts);
492 rte_free(h->key_store);
493 rte_free(h->buckets);
494 rte_free(h->buckets_ext);
495 rte_free(h->tbl_chng_cnt);
496 rte_free(h->ext_bkt_to_free);
497 rte_free(h);
498 rte_free(te);
499}
500
501hash_sig_t
502rte_hash_hash(const struct rte_hash *h, const void *key)
503{
504 /* calc hash result by key */
505 return h->hash_func(key, h->key_len, h->hash_func_init_val);
506}
507
508int32_t
509rte_hash_count(const struct rte_hash *h)
510{
511 uint32_t tot_ring_cnt, cached_cnt = 0;
512 uint32_t i, ret;
513
514 if (h == NULL((void*)0))
515 return -EINVAL22;
516
517 if (h->use_local_cache) {
518 tot_ring_cnt = h->entries + (RTE_MAX_LCORE128 - 1) *
519 (LCORE_CACHE_SIZE64 - 1);
520 for (i = 0; i < RTE_MAX_LCORE128; i++)
521 cached_cnt += h->local_free_slots[i].len;
522
523 ret = tot_ring_cnt - rte_ring_count(h->free_slots) -
524 cached_cnt;
525 } else {
526 tot_ring_cnt = h->entries;
527 ret = tot_ring_cnt - rte_ring_count(h->free_slots);
528 }
529 return ret;
530}
531
532/* Read write locks implemented using rte_rwlock */
533static inline void
534__hash_rw_writer_lock(const struct rte_hash *h)
535{
536 if (h->writer_takes_lock && h->hw_trans_mem_support)
537 rte_rwlock_write_lock_tm(h->readwrite_lock);
538 else if (h->writer_takes_lock)
539 rte_rwlock_write_lock(h->readwrite_lock);
540}
541
542static inline void
543__hash_rw_reader_lock(const struct rte_hash *h)
544{
545 if (h->readwrite_concur_support && h->hw_trans_mem_support)
546 rte_rwlock_read_lock_tm(h->readwrite_lock);
547 else if (h->readwrite_concur_support)
548 rte_rwlock_read_lock(h->readwrite_lock);
549}
550
551static inline void
552__hash_rw_writer_unlock(const struct rte_hash *h)
553{
554 if (h->writer_takes_lock && h->hw_trans_mem_support)
555 rte_rwlock_write_unlock_tm(h->readwrite_lock);
556 else if (h->writer_takes_lock)
557 rte_rwlock_write_unlock(h->readwrite_lock);
558}
559
560static inline void
561__hash_rw_reader_unlock(const struct rte_hash *h)
562{
563 if (h->readwrite_concur_support && h->hw_trans_mem_support)
564 rte_rwlock_read_unlock_tm(h->readwrite_lock);
565 else if (h->readwrite_concur_support)
566 rte_rwlock_read_unlock(h->readwrite_lock);
567}
568
569void
570rte_hash_reset(struct rte_hash *h)
571{
572 void *ptr;
573 uint32_t tot_ring_cnt, i;
574
575 if (h == NULL((void*)0))
576 return;
577
578 __hash_rw_writer_lock(h);
579 memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
580 memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
581 *h->tbl_chng_cnt = 0;
582
583 /* clear the free ring */
584 while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
585 continue;
586
587 /* clear free extendable bucket ring and memory */
588 if (h->ext_table_support) {
589 memset(h->buckets_ext, 0, h->num_buckets *
590 sizeof(struct rte_hash_bucket));
591 while (rte_ring_dequeue(h->free_ext_bkts, &ptr) == 0)
592 continue;
593 }
594
595 /* Repopulate the free slots ring. Entry zero is reserved for key misses */
596 if (h->use_local_cache)
597 tot_ring_cnt = h->entries + (RTE_MAX_LCORE128 - 1) *
598 (LCORE_CACHE_SIZE64 - 1);
599 else
600 tot_ring_cnt = h->entries;
601
602 for (i = 1; i < tot_ring_cnt + 1; i++)
603 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
604
605 /* Repopulate the free ext bkt ring. */
606 if (h->ext_table_support) {
607 for (i = 1; i <= h->num_buckets; i++)
608 rte_ring_sp_enqueue(h->free_ext_bkts,
609 (void *)((uintptr_t) i));
610 }
611
612 if (h->use_local_cache) {
613 /* Reset local caches per lcore */
614 for (i = 0; i < RTE_MAX_LCORE128; i++)
615 h->local_free_slots[i].len = 0;
616 }
617 __hash_rw_writer_unlock(h);
618}
619
620/*
621 * Function called to enqueue back an index in the cache/ring,
622 * as slot has not being used and it can be used in the
623 * next addition attempt.
624 */
625static inline void
626enqueue_slot_back(const struct rte_hash *h,
627 struct lcore_cache *cached_free_slots,
628 void *slot_id)
629{
630 if (h->use_local_cache) {
14
Assuming the condition is true
15
Taking true branch
631 cached_free_slots->objs[cached_free_slots->len] = slot_id;
16
Access to field 'len' results in a dereference of a null pointer (loaded from variable 'cached_free_slots')
632 cached_free_slots->len++;
633 } else
634 rte_ring_sp_enqueue(h->free_slots, slot_id);
635}
636
637/* Search a key from bucket and update its data.
638 * Writer holds the lock before calling this.
639 */
640static inline int32_t
641search_and_update(const struct rte_hash *h, void *data, const void *key,
642 struct rte_hash_bucket *bkt, uint16_t sig)
643{
644 int i;
645 struct rte_hash_key *k, *keys = h->key_store;
646
647 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES8; i++) {
648 if (bkt->sig_current[i] == sig) {
649 k = (struct rte_hash_key *) ((char *)keys +
650 bkt->key_idx[i] * h->key_entry_size);
651 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
652 /* 'pdata' acts as the synchronization point
653 * when an existing hash entry is updated.
654 * Key is not updated in this case.
655 */
656 __atomic_store_n(&k->pdata,
657 data,
658 __ATOMIC_RELEASE3);
659 /*
660 * Return index where key is stored,
661 * subtracting the first dummy index
662 */
663 return bkt->key_idx[i] - 1;
664 }
665 }
666 }
667 return -1;
668}
669
670/* Only tries to insert at one bucket (@prim_bkt) without trying to push
671 * buckets around.
672 * return 1 if matching existing key, return 0 if succeeds, return -1 for no
673 * empty entry.
674 */
675static inline int32_t
676rte_hash_cuckoo_insert_mw(const struct rte_hash *h,
677 struct rte_hash_bucket *prim_bkt,
678 struct rte_hash_bucket *sec_bkt,
679 const struct rte_hash_key *key, void *data,
680 uint16_t sig, uint32_t new_idx,
681 int32_t *ret_val)
682{
683 unsigned int i;
684 struct rte_hash_bucket *cur_bkt;
685 int32_t ret;
686
687 __hash_rw_writer_lock(h);
688 /* Check if key was inserted after last check but before this
689 * protected region in case of inserting duplicated keys.
690 */
691 ret = search_and_update(h, data, key, prim_bkt, sig);
692 if (ret != -1) {
693 __hash_rw_writer_unlock(h);
694 *ret_val = ret;
695 return 1;
696 }
697
698 FOR_EACH_BUCKET(cur_bkt, sec_bkt)for (cur_bkt = sec_bkt; cur_bkt != ((void*)0); cur_bkt = cur_bkt
->next)
{
699 ret = search_and_update(h, data, key, cur_bkt, sig);
700 if (ret != -1) {
701 __hash_rw_writer_unlock(h);
702 *ret_val = ret;
703 return 1;
704 }
705 }
706
707 /* Insert new entry if there is room in the primary
708 * bucket.
709 */
710 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES8; i++) {
711 /* Check if slot is available */
712 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)__builtin_expect(!!(prim_bkt->key_idx[i] == 0), 1)) {
713 prim_bkt->sig_current[i] = sig;
714 /* Key can be of arbitrary length, so it is
715 * not possible to store it atomically.
716 * Hence the new key element's memory stores
717 * (key as well as data) should be complete
718 * before it is referenced.
719 */
720 __atomic_store_n(&prim_bkt->key_idx[i],
721 new_idx,
722 __ATOMIC_RELEASE3);
723 break;
724 }
725 }
726 __hash_rw_writer_unlock(h);
727
728 if (i != RTE_HASH_BUCKET_ENTRIES8)
729 return 0;
730
731 /* no empty entry */
732 return -1;
733}
734
735/* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
736 * the path head with new entry (sig, alt_hash, new_idx)
737 * return 1 if matched key found, return -1 if cuckoo path invalided and fail,
738 * return 0 if succeeds.
739 */
740static inline int
741rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
742 struct rte_hash_bucket *bkt,
743 struct rte_hash_bucket *alt_bkt,
744 const struct rte_hash_key *key, void *data,
745 struct queue_node *leaf, uint32_t leaf_slot,
746 uint16_t sig, uint32_t new_idx,
747 int32_t *ret_val)
748{
749 uint32_t prev_alt_bkt_idx;
750 struct rte_hash_bucket *cur_bkt;
751 struct queue_node *prev_node, *curr_node = leaf;
752 struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
753 uint32_t prev_slot, curr_slot = leaf_slot;
754 int32_t ret;
755
756 __hash_rw_writer_lock(h);
757
758 /* In case empty slot was gone before entering protected region */
759 if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT0) {
760 __hash_rw_writer_unlock(h);
761 return -1;
762 }
763
764 /* Check if key was inserted after last check but before this
765 * protected region.
766 */
767 ret = search_and_update(h, data, key, bkt, sig);
768 if (ret != -1) {
769 __hash_rw_writer_unlock(h);
770 *ret_val = ret;
771 return 1;
772 }
773
774 FOR_EACH_BUCKET(cur_bkt, alt_bkt)for (cur_bkt = alt_bkt; cur_bkt != ((void*)0); cur_bkt = cur_bkt
->next)
{
775 ret = search_and_update(h, data, key, cur_bkt, sig);
776 if (ret != -1) {
777 __hash_rw_writer_unlock(h);
778 *ret_val = ret;
779 return 1;
780 }
781 }
782
783 while (likely(curr_node->prev != NULL)__builtin_expect(!!(curr_node->prev != ((void*)0)), 1)) {
784 prev_node = curr_node->prev;
785 prev_bkt = prev_node->bkt;
786 prev_slot = curr_node->prev_slot;
787
788 prev_alt_bkt_idx = get_alt_bucket_index(h,
789 prev_node->cur_bkt_idx,
790 prev_bkt->sig_current[prev_slot]);
791
792 if (unlikely(&h->buckets[prev_alt_bkt_idx]__builtin_expect(!!(&h->buckets[prev_alt_bkt_idx] != curr_bkt
), 0)
793 != curr_bkt)__builtin_expect(!!(&h->buckets[prev_alt_bkt_idx] != curr_bkt
), 0)
) {
794 /* revert it to empty, otherwise duplicated keys */
795 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
796 EMPTY_SLOT0,
797 __ATOMIC_RELEASE3);
798 __hash_rw_writer_unlock(h);
799 return -1;
800 }
801
802 if (h->readwrite_concur_lf_support) {
803 /* Inform the previous move. The current move need
804 * not be informed now as the current bucket entry
805 * is present in both primary and secondary.
806 * Since there is one writer, load acquires on
807 * tbl_chng_cnt are not required.
808 */
809 __atomic_store_n(h->tbl_chng_cnt,
810 *h->tbl_chng_cnt + 1,
811 __ATOMIC_RELEASE3);
812 /* The store to sig_current should not
813 * move above the store to tbl_chng_cnt.
814 */
815 __atomic_thread_fence(__ATOMIC_RELEASE3);
816 }
817
818 /* Need to swap current/alt sig to allow later
819 * Cuckoo insert to move elements back to its
820 * primary bucket if available
821 */
822 curr_bkt->sig_current[curr_slot] =
823 prev_bkt->sig_current[prev_slot];
824 /* Release the updated bucket entry */
825 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
826 prev_bkt->key_idx[prev_slot],
827 __ATOMIC_RELEASE3);
828
829 curr_slot = prev_slot;
830 curr_node = prev_node;
831 curr_bkt = curr_node->bkt;
832 }
833
834 if (h->readwrite_concur_lf_support) {
835 /* Inform the previous move. The current move need
836 * not be informed now as the current bucket entry
837 * is present in both primary and secondary.
838 * Since there is one writer, load acquires on
839 * tbl_chng_cnt are not required.
840 */
841 __atomic_store_n(h->tbl_chng_cnt,
842 *h->tbl_chng_cnt + 1,
843 __ATOMIC_RELEASE3);
844 /* The store to sig_current should not
845 * move above the store to tbl_chng_cnt.
846 */
847 __atomic_thread_fence(__ATOMIC_RELEASE3);
848 }
849
850 curr_bkt->sig_current[curr_slot] = sig;
851 /* Release the new bucket entry */
852 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
853 new_idx,
854 __ATOMIC_RELEASE3);
855
856 __hash_rw_writer_unlock(h);
857
858 return 0;
859
860}
861
862/*
863 * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
864 * Cuckoo
865 */
866static inline int
867rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,
868 struct rte_hash_bucket *bkt,
869 struct rte_hash_bucket *sec_bkt,
870 const struct rte_hash_key *key, void *data,
871 uint16_t sig, uint32_t bucket_idx,
872 uint32_t new_idx, int32_t *ret_val)
873{
874 unsigned int i;
875 struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN1000];
876 struct queue_node *tail, *head;
877 struct rte_hash_bucket *curr_bkt, *alt_bkt;
878 uint32_t cur_idx, alt_idx;
879
880 tail = queue;
881 head = queue + 1;
882 tail->bkt = bkt;
883 tail->prev = NULL((void*)0);
884 tail->prev_slot = -1;
885 tail->cur_bkt_idx = bucket_idx;
886
887 /* Cuckoo bfs Search */
888 while (likely(tail != head && head <__builtin_expect(!!(tail != head && head < queue +
1000 - 8), 1)
889 queue + RTE_HASH_BFS_QUEUE_MAX_LEN -__builtin_expect(!!(tail != head && head < queue +
1000 - 8), 1)
890 RTE_HASH_BUCKET_ENTRIES)__builtin_expect(!!(tail != head && head < queue +
1000 - 8), 1)
) {
891 curr_bkt = tail->bkt;
892 cur_idx = tail->cur_bkt_idx;
893 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES8; i++) {
894 if (curr_bkt->key_idx[i] == EMPTY_SLOT0) {
895 int32_t ret = rte_hash_cuckoo_move_insert_mw(h,
896 bkt, sec_bkt, key, data,
897 tail, i, sig,
898 new_idx, ret_val);
899 if (likely(ret != -1)__builtin_expect(!!(ret != -1), 1))
900 return ret;
901 }
902
903 /* Enqueue new node and keep prev node info */
904 alt_idx = get_alt_bucket_index(h, cur_idx,
905 curr_bkt->sig_current[i]);
906 alt_bkt = &(h->buckets[alt_idx]);
907 head->bkt = alt_bkt;
908 head->cur_bkt_idx = alt_idx;
909 head->prev = tail;
910 head->prev_slot = i;
911 head++;
912 }
913 tail++;
914 }
915
916 return -ENOSPC28;
917}
918
919static inline int32_t
920__rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
921 hash_sig_t sig, void *data)
922{
923 uint16_t short_sig;
924 uint32_t prim_bucket_idx, sec_bucket_idx;
925 struct rte_hash_bucket *prim_bkt, *sec_bkt, *cur_bkt;
926 struct rte_hash_key *new_k, *keys = h->key_store;
927 void *slot_id = NULL((void*)0);
928 void *ext_bkt_id = NULL((void*)0);
929 uint32_t new_idx, bkt_id;
930 int ret;
931 unsigned n_slots;
932 unsigned lcore_id;
933 unsigned int i;
934 struct lcore_cache *cached_free_slots = NULL((void*)0);
2
'cached_free_slots' initialized to a null pointer value
935 int32_t ret_val;
936 struct rte_hash_bucket *last;
937
938 short_sig = get_short_sig(sig);
939 prim_bucket_idx = get_prim_bucket_index(h, sig);
940 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
941 prim_bkt = &h->buckets[prim_bucket_idx];
942 sec_bkt = &h->buckets[sec_bucket_idx];
943 rte_prefetch0(prim_bkt);
944 rte_prefetch0(sec_bkt);
945
946 /* Check if key is already inserted in primary location */
947 __hash_rw_writer_lock(h);
948 ret = search_and_update(h, data, key, prim_bkt, short_sig);
949 if (ret != -1) {
3
Assuming the condition is false
4
Taking false branch
950 __hash_rw_writer_unlock(h);
951 return ret;
952 }
953
954 /* Check if key is already inserted in secondary location */
955 FOR_EACH_BUCKET(cur_bkt, sec_bkt)for (cur_bkt = sec_bkt; cur_bkt != ((void*)0); cur_bkt = cur_bkt
->next)
{
956 ret = search_and_update(h, data, key, cur_bkt, short_sig);
957 if (ret != -1) {
958 __hash_rw_writer_unlock(h);
959 return ret;
960 }
961 }
962
963 __hash_rw_writer_unlock(h);
964
965 /* Did not find a match, so get a new slot for storing the new key */
966 if (h->use_local_cache) {
5
Assuming the condition is false
6
Taking false branch
967 lcore_id = rte_lcore_id();
968 cached_free_slots = &h->local_free_slots[lcore_id];
969 /* Try to get a free slot from the local cache */
970 if (cached_free_slots->len == 0) {
971 /* Need to get another burst of free slots from global ring */
972 n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
973 cached_free_slots->objs,
974 LCORE_CACHE_SIZE64, NULL((void*)0));
975 if (n_slots == 0) {
976 return -ENOSPC28;
977 }
978
979 cached_free_slots->len += n_slots;
980 }
981
982 /* Get a free slot from the local cache */
983 cached_free_slots->len--;
984 slot_id = cached_free_slots->objs[cached_free_slots->len];
985 } else {
986 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) {
7
Taking false branch
987 return -ENOSPC28;
988 }
989 }
990
991 new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size)((void*)((uintptr_t)(keys) + ((uintptr_t)slot_id * h->key_entry_size
)))
;
992 new_idx = (uint32_t)((uintptr_t) slot_id);
993 /* Copy key */
994 memcpy(new_k->key, key, h->key_len);
995 /* Key can be of arbitrary length, so it is not possible to store
996 * it atomically. Hence the new key element's memory stores
997 * (key as well as data) should be complete before it is referenced.
998 * 'pdata' acts as the synchronization point when an existing hash
999 * entry is updated.
1000 */
1001 __atomic_store_n(&new_k->pdata,
1002 data,
1003 __ATOMIC_RELEASE3);
1004
1005 /* Find an empty slot and insert */
1006 ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
1007 short_sig, new_idx, &ret_val);
1008 if (ret == 0)
8
Assuming 'ret' is not equal to 0
9
Taking false branch
1009 return new_idx - 1;
1010 else if (ret == 1) {
10
Assuming 'ret' is equal to 1
11
Taking true branch
1011 enqueue_slot_back(h, cached_free_slots, slot_id);
12
Passing null pointer value via 2nd parameter 'cached_free_slots'
13
Calling 'enqueue_slot_back'
1012 return ret_val;
1013 }
1014
1015 /* Primary bucket full, need to make space for new entry */
1016 ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
1017 short_sig, prim_bucket_idx, new_idx, &ret_val);
1018 if (ret == 0)
1019 return new_idx - 1;
1020 else if (ret == 1) {
1021 enqueue_slot_back(h, cached_free_slots, slot_id);
1022 return ret_val;
1023 }
1024
1025 /* Also search secondary bucket to get better occupancy */
1026 ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
1027 short_sig, sec_bucket_idx, new_idx, &ret_val);
1028
1029 if (ret == 0)
1030 return new_idx - 1;
1031 else if (ret == 1) {
1032 enqueue_slot_back(h, cached_free_slots, slot_id);
1033 return ret_val;
1034 }
1035
1036 /* if ext table not enabled, we failed the insertion */
1037 if (!h->ext_table_support) {
1038 enqueue_slot_back(h, cached_free_slots, slot_id);
1039 return ret;
1040 }
1041
1042 /* Now we need to go through the extendable bucket. Protection is needed
1043 * to protect all extendable bucket processes.
1044 */
1045 __hash_rw_writer_lock(h);
1046 /* We check for duplicates again since could be inserted before the lock */
1047 ret = search_and_update(h, data, key, prim_bkt, short_sig);
1048 if (ret != -1) {
1049 enqueue_slot_back(h, cached_free_slots, slot_id);
1050 goto failure;
1051 }
1052
1053 FOR_EACH_BUCKET(cur_bkt, sec_bkt)for (cur_bkt = sec_bkt; cur_bkt != ((void*)0); cur_bkt = cur_bkt
->next)
{
1054 ret = search_and_update(h, data, key, cur_bkt, short_sig);
1055 if (ret != -1) {
1056 enqueue_slot_back(h, cached_free_slots, slot_id);
1057 goto failure;
1058 }
1059 }
1060
1061 /* Search sec and ext buckets to find an empty entry to insert. */
1062 FOR_EACH_BUCKET(cur_bkt, sec_bkt)for (cur_bkt = sec_bkt; cur_bkt != ((void*)0); cur_bkt = cur_bkt
->next)
{
1063 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES8; i++) {
1064 /* Check if slot is available */
1065 if (likely(cur_bkt->key_idx[i] == EMPTY_SLOT)__builtin_expect(!!(cur_bkt->key_idx[i] == 0), 1)) {
1066 cur_bkt->sig_current[i] = short_sig;
1067 /* Store to signature should not leak after
1068 * the store to key_idx
1069 */
1070 __atomic_store_n(&cur_bkt->key_idx[i],
1071 new_idx,
1072 __ATOMIC_RELEASE3);
1073 __hash_rw_writer_unlock(h);
1074 return new_idx - 1;
1075 }
1076 }
1077 }
1078
1079 /* Failed to get an empty entry from extendable buckets. Link a new
1080 * extendable bucket. We first get a free bucket from ring.
1081 */
1082 if (rte_ring_sc_dequeue(h->free_ext_bkts, &ext_bkt_id) != 0) {
1083 ret = -ENOSPC28;
1084 goto failure;
1085 }
1086
1087 bkt_id = (uint32_t)((uintptr_t)ext_bkt_id) - 1;
1088 /* Use the first location of the new bucket */
1089 (h->buckets_ext[bkt_id]).sig_current[0] = short_sig;
1090 /* Store to signature should not leak after
1091 * the store to key_idx
1092 */
1093 __atomic_store_n(&(h->buckets_ext[bkt_id]).key_idx[0],
1094 new_idx,
1095 __ATOMIC_RELEASE3);
1096 /* Link the new bucket to sec bucket linked list */
1097 last = rte_hash_get_last_bkt(sec_bkt);
1098 last->next = &h->buckets_ext[bkt_id];
1099 __hash_rw_writer_unlock(h);
1100 return new_idx - 1;
1101
1102failure:
1103 __hash_rw_writer_unlock(h);
1104 return ret;
1105
1106}
1107
1108int32_t
1109rte_hash_add_key_with_hash(const struct rte_hash *h,
1110 const void *key, hash_sig_t sig)
1111{
1112 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1113 return __rte_hash_add_key_with_hash(h, key, sig, 0);
1114}
1115
1116int32_t
1117rte_hash_add_key(const struct rte_hash *h, const void *key)
1118{
1119 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1120 return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
1121}
1122
1123int
1124rte_hash_add_key_with_hash_data(const struct rte_hash *h,
1125 const void *key, hash_sig_t sig, void *data)
1126{
1127 int ret;
1128
1129 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1130 ret = __rte_hash_add_key_with_hash(h, key, sig, data);
1131 if (ret >= 0)
1132 return 0;
1133 else
1134 return ret;
1135}
1136
1137int
1138rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
1139{
1140 int ret;
1141
1142 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1143
1144 ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
1
Calling '__rte_hash_add_key_with_hash'
1145 if (ret >= 0)
1146 return 0;
1147 else
1148 return ret;
1149}
1150
1151/* Search one bucket to find the match key - uses rw lock */
1152static inline int32_t
1153search_one_bucket_l(const struct rte_hash *h, const void *key,
1154 uint16_t sig, void **data,
1155 const struct rte_hash_bucket *bkt)
1156{
1157 int i;
1158 struct rte_hash_key *k, *keys = h->key_store;
1159
1160 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES8; i++) {
1161 if (bkt->sig_current[i] == sig &&
1162 bkt->key_idx[i] != EMPTY_SLOT0) {
1163 k = (struct rte_hash_key *) ((char *)keys +
1164 bkt->key_idx[i] * h->key_entry_size);
1165
1166 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1167 if (data != NULL((void*)0))
1168 *data = k->pdata;
1169 /*
1170 * Return index where key is stored,
1171 * subtracting the first dummy index
1172 */
1173 return bkt->key_idx[i] - 1;
1174 }
1175 }
1176 }
1177 return -1;
1178}
1179
1180/* Search one bucket to find the match key */
1181static inline int32_t
1182search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
1183 void **data, const struct rte_hash_bucket *bkt)
1184{
1185 int i;
1186 uint32_t key_idx;
1187 void *pdata;
1188 struct rte_hash_key *k, *keys = h->key_store;
1189
1190 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES8; i++) {
1191 key_idx = __atomic_load_n(&bkt->key_idx[i],
1192 __ATOMIC_ACQUIRE2);
1193 if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT0) {
1194 k = (struct rte_hash_key *) ((char *)keys +
1195 key_idx * h->key_entry_size);
1196 pdata = __atomic_load_n(&k->pdata,
1197 __ATOMIC_ACQUIRE2);
1198
1199 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1200 if (data != NULL((void*)0))
1201 *data = pdata;
1202 /*
1203 * Return index where key is stored,
1204 * subtracting the first dummy index
1205 */
1206 return key_idx - 1;
1207 }
1208 }
1209 }
1210 return -1;
1211}
1212
1213static inline int32_t
1214__rte_hash_lookup_with_hash_l(const struct rte_hash *h, const void *key,
1215 hash_sig_t sig, void **data)
1216{
1217 uint32_t prim_bucket_idx, sec_bucket_idx;
1218 struct rte_hash_bucket *bkt, *cur_bkt;
1219 int ret;
1220 uint16_t short_sig;
1221
1222 short_sig = get_short_sig(sig);
1223 prim_bucket_idx = get_prim_bucket_index(h, sig);
1224 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1225
1226 bkt = &h->buckets[prim_bucket_idx];
1227
1228 __hash_rw_reader_lock(h);
1229
1230 /* Check if key is in primary location */
1231 ret = search_one_bucket_l(h, key, short_sig, data, bkt);
1232 if (ret != -1) {
1233 __hash_rw_reader_unlock(h);
1234 return ret;
1235 }
1236 /* Calculate secondary hash */
1237 bkt = &h->buckets[sec_bucket_idx];
1238
1239 /* Check if key is in secondary location */
1240 FOR_EACH_BUCKET(cur_bkt, bkt)for (cur_bkt = bkt; cur_bkt != ((void*)0); cur_bkt = cur_bkt->
next)
{
1241 ret = search_one_bucket_l(h, key, short_sig,
1242 data, cur_bkt);
1243 if (ret != -1) {
1244 __hash_rw_reader_unlock(h);
1245 return ret;
1246 }
1247 }
1248
1249 __hash_rw_reader_unlock(h);
1250
1251 return -ENOENT2;
1252}
1253
1254static inline int32_t
1255__rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
1256 hash_sig_t sig, void **data)
1257{
1258 uint32_t prim_bucket_idx, sec_bucket_idx;
1259 struct rte_hash_bucket *bkt, *cur_bkt;
1260 uint32_t cnt_b, cnt_a;
1261 int ret;
1262 uint16_t short_sig;
1263
1264 short_sig = get_short_sig(sig);
1265 prim_bucket_idx = get_prim_bucket_index(h, sig);
1266 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1267
1268 do {
1269 /* Load the table change counter before the lookup
1270 * starts. Acquire semantics will make sure that
1271 * loads in search_one_bucket are not hoisted.
1272 */
1273 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
1274 __ATOMIC_ACQUIRE2);
1275
1276 /* Check if key is in primary location */
1277 bkt = &h->buckets[prim_bucket_idx];
1278 ret = search_one_bucket_lf(h, key, short_sig, data, bkt);
1279 if (ret != -1) {
1280 __hash_rw_reader_unlock(h);
1281 return ret;
1282 }
1283 /* Calculate secondary hash */
1284 bkt = &h->buckets[sec_bucket_idx];
1285
1286 /* Check if key is in secondary location */
1287 FOR_EACH_BUCKET(cur_bkt, bkt)for (cur_bkt = bkt; cur_bkt != ((void*)0); cur_bkt = cur_bkt->
next)
{
1288 ret = search_one_bucket_lf(h, key, short_sig,
1289 data, cur_bkt);
1290 if (ret != -1) {
1291 __hash_rw_reader_unlock(h);
1292 return ret;
1293 }
1294 }
1295
1296 /* The loads of sig_current in search_one_bucket
1297 * should not move below the load from tbl_chng_cnt.
1298 */
1299 __atomic_thread_fence(__ATOMIC_ACQUIRE2);
1300 /* Re-read the table change counter to check if the
1301 * table has changed during search. If yes, re-do
1302 * the search.
1303 * This load should not get hoisted. The load
1304 * acquires on cnt_b, key index in primary bucket
1305 * and key index in secondary bucket will make sure
1306 * that it does not get hoisted.
1307 */
1308 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
1309 __ATOMIC_ACQUIRE2);
1310 } while (cnt_b != cnt_a);
1311
1312 return -ENOENT2;
1313}
1314
1315static inline int32_t
1316__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
1317 hash_sig_t sig, void **data)
1318{
1319 if (h->readwrite_concur_lf_support)
1320 return __rte_hash_lookup_with_hash_lf(h, key, sig, data);
1321 else
1322 return __rte_hash_lookup_with_hash_l(h, key, sig, data);
1323}
1324
1325int32_t
1326rte_hash_lookup_with_hash(const struct rte_hash *h,
1327 const void *key, hash_sig_t sig)
1328{
1329 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1330 return __rte_hash_lookup_with_hash(h, key, sig, NULL((void*)0));
1331}
1332
1333int32_t
1334rte_hash_lookup(const struct rte_hash *h, const void *key)
1335{
1336 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1337 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL((void*)0));
1338}
1339
1340int
1341rte_hash_lookup_with_hash_data(const struct rte_hash *h,
1342 const void *key, hash_sig_t sig, void **data)
1343{
1344 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1345 return __rte_hash_lookup_with_hash(h, key, sig, data);
1346}
1347
1348int
1349rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
1350{
1351 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1352 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
1353}
1354
1355static inline void
1356remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
1357{
1358 unsigned lcore_id, n_slots;
1359 struct lcore_cache *cached_free_slots;
1360
1361 if (h->use_local_cache) {
1362 lcore_id = rte_lcore_id();
1363 cached_free_slots = &h->local_free_slots[lcore_id];
1364 /* Cache full, need to free it. */
1365 if (cached_free_slots->len == LCORE_CACHE_SIZE64) {
1366 /* Need to enqueue the free slots in global ring. */
1367 n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
1368 cached_free_slots->objs,
1369 LCORE_CACHE_SIZE64, NULL((void*)0));
1370 ERR_IF_TRUE((n_slots == 0),
1371 "%s: could not enqueue free slots in global ring\n",
1372 __func__);
1373 cached_free_slots->len -= n_slots;
1374 }
1375 /* Put index of new free slot in cache. */
1376 cached_free_slots->objs[cached_free_slots->len] =
1377 (void *)((uintptr_t)bkt->key_idx[i]);
1378 cached_free_slots->len++;
1379 } else {
1380 rte_ring_sp_enqueue(h->free_slots,
1381 (void *)((uintptr_t)bkt->key_idx[i]));
1382 }
1383}
1384
1385/* Compact the linked list by moving key from last entry in linked list to the
1386 * empty slot.
1387 */
1388static inline void
1389__rte_hash_compact_ll(const struct rte_hash *h,
1390 struct rte_hash_bucket *cur_bkt, int pos) {
1391 int i;
1392 struct rte_hash_bucket *last_bkt;
1393
1394 if (!cur_bkt->next)
1395 return;
1396
1397 last_bkt = rte_hash_get_last_bkt(cur_bkt);
1398
1399 for (i = RTE_HASH_BUCKET_ENTRIES8 - 1; i >= 0; i--) {
1400 if (last_bkt->key_idx[i] != EMPTY_SLOT0) {
1401 cur_bkt->sig_current[pos] = last_bkt->sig_current[i];
1402 __atomic_store_n(&cur_bkt->key_idx[pos],
1403 last_bkt->key_idx[i],
1404 __ATOMIC_RELEASE3);
1405 if (h->readwrite_concur_lf_support) {
1406 /* Inform the readers that the table has changed
1407 * Since there is one writer, load acquire on
1408 * tbl_chng_cnt is not required.
1409 */
1410 __atomic_store_n(h->tbl_chng_cnt,
1411 *h->tbl_chng_cnt + 1,
1412 __ATOMIC_RELEASE3);
1413 /* The store to sig_current should
1414 * not move above the store to tbl_chng_cnt.
1415 */
1416 __atomic_thread_fence(__ATOMIC_RELEASE3);
1417 }
1418 last_bkt->sig_current[i] = NULL_SIGNATURE0;
1419 __atomic_store_n(&last_bkt->key_idx[i],
1420 EMPTY_SLOT0,
1421 __ATOMIC_RELEASE3);
1422 return;
1423 }
1424 }
1425}
1426
1427/* Search one bucket and remove the matched key.
1428 * Writer is expected to hold the lock while calling this
1429 * function.
1430 */
1431static inline int32_t
1432search_and_remove(const struct rte_hash *h, const void *key,
1433 struct rte_hash_bucket *bkt, uint16_t sig, int *pos)
1434{
1435 struct rte_hash_key *k, *keys = h->key_store;
1436 unsigned int i;
1437 uint32_t key_idx;
1438
1439 /* Check if key is in bucket */
1440 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES8; i++) {
1441 key_idx = __atomic_load_n(&bkt->key_idx[i],
1442 __ATOMIC_ACQUIRE2);
1443 if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT0) {
1444 k = (struct rte_hash_key *) ((char *)keys +
1445 key_idx * h->key_entry_size);
1446 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1447 bkt->sig_current[i] = NULL_SIGNATURE0;
1448 /* Free the key store index if
1449 * no_free_on_del is disabled.
1450 */
1451 if (!h->no_free_on_del)
1452 remove_entry(h, bkt, i);
1453
1454 __atomic_store_n(&bkt->key_idx[i],
1455 EMPTY_SLOT0,
1456 __ATOMIC_RELEASE3);
1457
1458 *pos = i;
1459 /*
1460 * Return index where key is stored,
1461 * subtracting the first dummy index
1462 */
1463 return key_idx - 1;
1464 }
1465 }
1466 }
1467 return -1;
1468}
1469
1470static inline int32_t
1471__rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
1472 hash_sig_t sig)
1473{
1474 uint32_t prim_bucket_idx, sec_bucket_idx;
1475 struct rte_hash_bucket *prim_bkt, *sec_bkt, *prev_bkt, *last_bkt;
1476 struct rte_hash_bucket *cur_bkt;
1477 int pos;
1478 int32_t ret, i;
1479 uint16_t short_sig;
1480
1481 short_sig = get_short_sig(sig);
1482 prim_bucket_idx = get_prim_bucket_index(h, sig);
1483 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1484 prim_bkt = &h->buckets[prim_bucket_idx];
1485
1486 __hash_rw_writer_lock(h);
1487 /* look for key in primary bucket */
1488 ret = search_and_remove(h, key, prim_bkt, short_sig, &pos);
1489 if (ret != -1) {
1490 __rte_hash_compact_ll(h, prim_bkt, pos);
1491 last_bkt = prim_bkt->next;
1492 prev_bkt = prim_bkt;
1493 goto return_bkt;
1494 }
1495
1496 /* Calculate secondary hash */
1497 sec_bkt = &h->buckets[sec_bucket_idx];
1498
1499 FOR_EACH_BUCKET(cur_bkt, sec_bkt)for (cur_bkt = sec_bkt; cur_bkt != ((void*)0); cur_bkt = cur_bkt
->next)
{
1500 ret = search_and_remove(h, key, cur_bkt, short_sig, &pos);
1501 if (ret != -1) {
1502 __rte_hash_compact_ll(h, cur_bkt, pos);
1503 last_bkt = sec_bkt->next;
1504 prev_bkt = sec_bkt;
1505 goto return_bkt;
1506 }
1507 }
1508
1509 __hash_rw_writer_unlock(h);
1510 return -ENOENT2;
1511
1512/* Search last bucket to see if empty to be recycled */
1513return_bkt:
1514 if (!last_bkt) {
1515 __hash_rw_writer_unlock(h);
1516 return ret;
1517 }
1518 while (last_bkt->next) {
1519 prev_bkt = last_bkt;
1520 last_bkt = last_bkt->next;
1521 }
1522
1523 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES8; i++) {
1524 if (last_bkt->key_idx[i] != EMPTY_SLOT0)
1525 break;
1526 }
1527 /* found empty bucket and recycle */
1528 if (i == RTE_HASH_BUCKET_ENTRIES8) {
1529 prev_bkt->next = NULL((void*)0);
1530 uint32_t index = last_bkt - h->buckets_ext + 1;
1531 /* Recycle the empty bkt if
1532 * no_free_on_del is disabled.
1533 */
1534 if (h->no_free_on_del)
1535 /* Store index of an empty ext bkt to be recycled
1536 * on calling rte_hash_del_xxx APIs.
1537 * When lock free read-write concurrency is enabled,
1538 * an empty ext bkt cannot be put into free list
1539 * immediately (as readers might be using it still).
1540 * Hence freeing of the ext bkt is piggy-backed to
1541 * freeing of the key index.
1542 */
1543 h->ext_bkt_to_free[ret] = index;
1544 else
1545 rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index);
1546 }
1547 __hash_rw_writer_unlock(h);
1548 return ret;
1549}
1550
1551int32_t
1552rte_hash_del_key_with_hash(const struct rte_hash *h,
1553 const void *key, hash_sig_t sig)
1554{
1555 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1556 return __rte_hash_del_key_with_hash(h, key, sig);
1557}
1558
1559int32_t
1560rte_hash_del_key(const struct rte_hash *h, const void *key)
1561{
1562 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1563 return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
1564}
1565
1566int
1567rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
1568 void **key)
1569{
1570 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1571
1572 struct rte_hash_key *k, *keys = h->key_store;
1573 k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
1574 h->key_entry_size);
1575 *key = k->key;
1576
1577 if (position !=
1578 __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
1579 NULL((void*)0))) {
1580 return -ENOENT2;
1581 }
1582
1583 return 0;
1584}
1585
1586int __rte_experimental__attribute__((deprecated("Symbol is not yet part of stable ABI"
), section(".text.experimental")))
1587rte_hash_free_key_with_position(const struct rte_hash *h,
1588 const int32_t position)
1589{
1590 /* Key index where key is stored, adding the first dummy index */
1591 uint32_t key_idx = position + 1;
1592
1593 RETURN_IF_TRUE(((h == NULL) || (key_idx == EMPTY_SLOT)), -EINVAL);
1594
1595 unsigned int lcore_id, n_slots;
1596 struct lcore_cache *cached_free_slots;
1597 const uint32_t total_entries = h->use_local_cache ?
1598 h->entries + (RTE_MAX_LCORE128 - 1) * (LCORE_CACHE_SIZE64 - 1) + 1
1599 : h->entries + 1;
1600
1601 /* Out of bounds */
1602 if (key_idx >= total_entries)
1603 return -EINVAL22;
1604 if (h->ext_table_support && h->readwrite_concur_lf_support) {
1605 uint32_t index = h->ext_bkt_to_free[position];
1606 if (index) {
1607 /* Recycle empty ext bkt to free list. */
1608 rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index);
1609 h->ext_bkt_to_free[position] = 0;
1610 }
1611 }
1612
1613 if (h->use_local_cache) {
1614 lcore_id = rte_lcore_id();
1615 cached_free_slots = &h->local_free_slots[lcore_id];
1616 /* Cache full, need to free it. */
1617 if (cached_free_slots->len == LCORE_CACHE_SIZE64) {
1618 /* Need to enqueue the free slots in global ring. */
1619 n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
1620 cached_free_slots->objs,
1621 LCORE_CACHE_SIZE64, NULL((void*)0));
1622 RETURN_IF_TRUE((n_slots == 0), -EFAULT);
1623 cached_free_slots->len -= n_slots;
1624 }
1625 /* Put index of new free slot in cache. */
1626 cached_free_slots->objs[cached_free_slots->len] =
1627 (void *)((uintptr_t)key_idx);
1628 cached_free_slots->len++;
1629 } else {
1630 rte_ring_sp_enqueue(h->free_slots,
1631 (void *)((uintptr_t)key_idx));
1632 }
1633
1634 return 0;
1635}
1636
1637static inline void
1638compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
1639 const struct rte_hash_bucket *prim_bkt,
1640 const struct rte_hash_bucket *sec_bkt,
1641 uint16_t sig,
1642 enum rte_hash_sig_compare_function sig_cmp_fn)
1643{
1644 unsigned int i;
1645
1646 /* For match mask the first bit of every two bits indicates the match */
1647 switch (sig_cmp_fn) {
1648#if defined(RTE_MACHINE_CPUFLAG_SSE21)
1649 case RTE_HASH_COMPARE_SSE:
1650 /* Compare all signatures in the bucket */
1651 *prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1652 _mm_load_si128(
1653 (__m128i const *)prim_bkt->sig_current),
1654 _mm_set1_epi16(sig)));
1655 /* Compare all signatures in the bucket */
1656 *sec_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1657 _mm_load_si128(
1658 (__m128i const *)sec_bkt->sig_current),
1659 _mm_set1_epi16(sig)));
1660 break;
1661#elif defined(RTE_MACHINE_CPUFLAG_NEON)
1662 case RTE_HASH_COMPARE_NEON: {
1663 uint16x8_t vmat, vsig, x;
1664 uint64x2_t x64;
1665 int16x8_t shift = {-15, -13, -11, -9, -7, -5, -3, -1};
1666
1667 vsig = vld1q_dup_u16((uint16_t const *)&sig);
1668 /* Compare all signatures in the primary bucket */
1669 vmat = vceqq_u16(vsig,
1670 vld1q_u16((uint16_t const *)prim_bkt->sig_current));
1671 x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
1672 x64 = vpaddlq_u32(vpaddlq_u16(x));
1673 *prim_hash_matches = (uint32_t)(vgetq_lane_u64(x64, 0) +
1674 vgetq_lane_u64(x64, 1));
1675 /* Compare all signatures in the secondary bucket */
1676 vmat = vceqq_u16(vsig,
1677 vld1q_u16((uint16_t const *)sec_bkt->sig_current));
1678 x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
1679 x64 = vpaddlq_u32(vpaddlq_u16(x));
1680 *sec_hash_matches = (uint32_t)(vgetq_lane_u64(x64, 0) +
1681 vgetq_lane_u64(x64, 1)); }
1682 break;
1683#endif
1684 default:
1685 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES8; i++) {
1686 *prim_hash_matches |=
1687 ((sig == prim_bkt->sig_current[i]) << (i << 1));
1688 *sec_hash_matches |=
1689 ((sig == sec_bkt->sig_current[i]) << (i << 1));
1690 }
1691 }
1692}
1693
1694#define PREFETCH_OFFSET4 4
1695static inline void
1696__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
1697 int32_t num_keys, int32_t *positions,
1698 uint64_t *hit_mask, void *data[])
1699{
1700 uint64_t hits = 0;
1701 int32_t i;
1702 int32_t ret;
1703 uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX64];
1704 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX64];
1705 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX64];
1706 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX64];
1707 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX64];
1708 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX64];
1709 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX64] = {0};
1710 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX64] = {0};
1711 struct rte_hash_bucket *cur_bkt, *next_bkt;
1712
1713 /* Prefetch first keys */
1714 for (i = 0; i < PREFETCH_OFFSET4 && i < num_keys; i++)
1715 rte_prefetch0(keys[i]);
1716
1717 /*
1718 * Prefetch rest of the keys, calculate primary and
1719 * secondary bucket and prefetch them
1720 */
1721 for (i = 0; i < (num_keys - PREFETCH_OFFSET4); i++) {
1722 rte_prefetch0(keys[i + PREFETCH_OFFSET4]);
1723
1724 prim_hash[i] = rte_hash_hash(h, keys[i]);
1725
1726 sig[i] = get_short_sig(prim_hash[i]);
1727 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1728 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1729
1730 primary_bkt[i] = &h->buckets[prim_index[i]];
1731 secondary_bkt[i] = &h->buckets[sec_index[i]];
1732
1733 rte_prefetch0(primary_bkt[i]);
1734 rte_prefetch0(secondary_bkt[i]);
1735 }
1736
1737 /* Calculate and prefetch rest of the buckets */
1738 for (; i < num_keys; i++) {
1739 prim_hash[i] = rte_hash_hash(h, keys[i]);
1740
1741 sig[i] = get_short_sig(prim_hash[i]);
1742 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1743 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1744
1745 primary_bkt[i] = &h->buckets[prim_index[i]];
1746 secondary_bkt[i] = &h->buckets[sec_index[i]];
1747
1748 rte_prefetch0(primary_bkt[i]);
1749 rte_prefetch0(secondary_bkt[i]);
1750 }
1751
1752 __hash_rw_reader_lock(h);
1753
1754 /* Compare signatures and prefetch key slot of first hit */
1755 for (i = 0; i < num_keys; i++) {
1756 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1757 primary_bkt[i], secondary_bkt[i],
1758 sig[i], h->sig_cmp_fn);
1759
1760 if (prim_hitmask[i]) {
1761 uint32_t first_hit =
1762 __builtin_ctzl(prim_hitmask[i])
1763 >> 1;
1764 uint32_t key_idx =
1765 primary_bkt[i]->key_idx[first_hit];
1766 const struct rte_hash_key *key_slot =
1767 (const struct rte_hash_key *)(
1768 (const char *)h->key_store +
1769 key_idx * h->key_entry_size);
1770 rte_prefetch0(key_slot);
1771 continue;
1772 }
1773
1774 if (sec_hitmask[i]) {
1775 uint32_t first_hit =
1776 __builtin_ctzl(sec_hitmask[i])
1777 >> 1;
1778 uint32_t key_idx =
1779 secondary_bkt[i]->key_idx[first_hit];
1780 const struct rte_hash_key *key_slot =
1781 (const struct rte_hash_key *)(
1782 (const char *)h->key_store +
1783 key_idx * h->key_entry_size);
1784 rte_prefetch0(key_slot);
1785 }
1786 }
1787
1788 /* Compare keys, first hits in primary first */
1789 for (i = 0; i < num_keys; i++) {
1790 positions[i] = -ENOENT2;
1791 while (prim_hitmask[i]) {
1792 uint32_t hit_index =
1793 __builtin_ctzl(prim_hitmask[i])
1794 >> 1;
1795 uint32_t key_idx =
1796 primary_bkt[i]->key_idx[hit_index];
1797 const struct rte_hash_key *key_slot =
1798 (const struct rte_hash_key *)(
1799 (const char *)h->key_store +
1800 key_idx * h->key_entry_size);
1801
1802 /*
1803 * If key index is 0, do not compare key,
1804 * as it is checking the dummy slot
1805 */
1806 if (!!key_idx &
1807 !rte_hash_cmp_eq(
1808 key_slot->key, keys[i], h)) {
1809 if (data != NULL((void*)0))
1810 data[i] = key_slot->pdata;
1811
1812 hits |= 1ULL << i;
1813 positions[i] = key_idx - 1;
1814 goto next_key;
1815 }
1816 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
1817 }
1818
1819 while (sec_hitmask[i]) {
1820 uint32_t hit_index =
1821 __builtin_ctzl(sec_hitmask[i])
1822 >> 1;
1823 uint32_t key_idx =
1824 secondary_bkt[i]->key_idx[hit_index];
1825 const struct rte_hash_key *key_slot =
1826 (const struct rte_hash_key *)(
1827 (const char *)h->key_store +
1828 key_idx * h->key_entry_size);
1829
1830 /*
1831 * If key index is 0, do not compare key,
1832 * as it is checking the dummy slot
1833 */
1834
1835 if (!!key_idx &
1836 !rte_hash_cmp_eq(
1837 key_slot->key, keys[i], h)) {
1838 if (data != NULL((void*)0))
1839 data[i] = key_slot->pdata;
1840
1841 hits |= 1ULL << i;
1842 positions[i] = key_idx - 1;
1843 goto next_key;
1844 }
1845 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
1846 }
1847next_key:
1848 continue;
1849 }
1850
1851 /* all found, do not need to go through ext bkt */
1852 if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
1853 if (hit_mask != NULL((void*)0))
1854 *hit_mask = hits;
1855 __hash_rw_reader_unlock(h);
1856 return;
1857 }
1858
1859 /* need to check ext buckets for match */
1860 for (i = 0; i < num_keys; i++) {
1861 if ((hits & (1ULL << i)) != 0)
1862 continue;
1863 next_bkt = secondary_bkt[i]->next;
1864 FOR_EACH_BUCKET(cur_bkt, next_bkt)for (cur_bkt = next_bkt; cur_bkt != ((void*)0); cur_bkt = cur_bkt
->next)
{
1865 if (data != NULL((void*)0))
1866 ret = search_one_bucket_l(h, keys[i],
1867 sig[i], &data[i], cur_bkt);
1868 else
1869 ret = search_one_bucket_l(h, keys[i],
1870 sig[i], NULL((void*)0), cur_bkt);
1871 if (ret != -1) {
1872 positions[i] = ret;
1873 hits |= 1ULL << i;
1874 break;
1875 }
1876 }
1877 }
1878
1879 __hash_rw_reader_unlock(h);
1880
1881 if (hit_mask != NULL((void*)0))
1882 *hit_mask = hits;
1883}
1884
1885static inline void
1886__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
1887 int32_t num_keys, int32_t *positions,
1888 uint64_t *hit_mask, void *data[])
1889{
1890 uint64_t hits = 0;
1891 int32_t i;
1892 int32_t ret;
1893 uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX64];
1894 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX64];
1895 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX64];
1896 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX64];
1897 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX64];
1898 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX64];
1899 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX64] = {0};
1900 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX64] = {0};
1901 struct rte_hash_bucket *cur_bkt, *next_bkt;
1902 void *pdata[RTE_HASH_LOOKUP_BULK_MAX64];
1903 uint32_t cnt_b, cnt_a;
1904
1905 /* Prefetch first keys */
1906 for (i = 0; i < PREFETCH_OFFSET4 && i < num_keys; i++)
1907 rte_prefetch0(keys[i]);
1908
1909 /*
1910 * Prefetch rest of the keys, calculate primary and
1911 * secondary bucket and prefetch them
1912 */
1913 for (i = 0; i < (num_keys - PREFETCH_OFFSET4); i++) {
1914 rte_prefetch0(keys[i + PREFETCH_OFFSET4]);
1915
1916 prim_hash[i] = rte_hash_hash(h, keys[i]);
1917
1918 sig[i] = get_short_sig(prim_hash[i]);
1919 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1920 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1921
1922 primary_bkt[i] = &h->buckets[prim_index[i]];
1923 secondary_bkt[i] = &h->buckets[sec_index[i]];
1924
1925 rte_prefetch0(primary_bkt[i]);
1926 rte_prefetch0(secondary_bkt[i]);
1927 }
1928
1929 /* Calculate and prefetch rest of the buckets */
1930 for (; i < num_keys; i++) {
1931 prim_hash[i] = rte_hash_hash(h, keys[i]);
1932
1933 sig[i] = get_short_sig(prim_hash[i]);
1934 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1935 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1936
1937 primary_bkt[i] = &h->buckets[prim_index[i]];
1938 secondary_bkt[i] = &h->buckets[sec_index[i]];
1939
1940 rte_prefetch0(primary_bkt[i]);
1941 rte_prefetch0(secondary_bkt[i]);
1942 }
1943
1944 for (i = 0; i < num_keys; i++)
1945 positions[i] = -ENOENT2;
1946
1947 do {
1948 /* Load the table change counter before the lookup
1949 * starts. Acquire semantics will make sure that
1950 * loads in compare_signatures are not hoisted.
1951 */
1952 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
1953 __ATOMIC_ACQUIRE2);
1954
1955 /* Compare signatures and prefetch key slot of first hit */
1956 for (i = 0; i < num_keys; i++) {
1957 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1958 primary_bkt[i], secondary_bkt[i],
1959 sig[i], h->sig_cmp_fn);
1960
1961 if (prim_hitmask[i]) {
1962 uint32_t first_hit =
1963 __builtin_ctzl(prim_hitmask[i])
1964 >> 1;
1965 uint32_t key_idx =
1966 primary_bkt[i]->key_idx[first_hit];
1967 const struct rte_hash_key *key_slot =
1968 (const struct rte_hash_key *)(
1969 (const char *)h->key_store +
1970 key_idx * h->key_entry_size);
1971 rte_prefetch0(key_slot);
1972 continue;
1973 }
1974
1975 if (sec_hitmask[i]) {
1976 uint32_t first_hit =
1977 __builtin_ctzl(sec_hitmask[i])
1978 >> 1;
1979 uint32_t key_idx =
1980 secondary_bkt[i]->key_idx[first_hit];
1981 const struct rte_hash_key *key_slot =
1982 (const struct rte_hash_key *)(
1983 (const char *)h->key_store +
1984 key_idx * h->key_entry_size);
1985 rte_prefetch0(key_slot);
1986 }
1987 }
1988
1989 /* Compare keys, first hits in primary first */
1990 for (i = 0; i < num_keys; i++) {
1991 while (prim_hitmask[i]) {
1992 uint32_t hit_index =
1993 __builtin_ctzl(prim_hitmask[i])
1994 >> 1;
1995 uint32_t key_idx =
1996 __atomic_load_n(
1997 &primary_bkt[i]->key_idx[hit_index],
1998 __ATOMIC_ACQUIRE2);
1999 const struct rte_hash_key *key_slot =
2000 (const struct rte_hash_key *)(
2001 (const char *)h->key_store +
2002 key_idx * h->key_entry_size);
2003
2004 if (key_idx != EMPTY_SLOT0)
2005 pdata[i] = __atomic_load_n(
2006 &key_slot->pdata,
2007 __ATOMIC_ACQUIRE2);
2008 /*
2009 * If key index is 0, do not compare key,
2010 * as it is checking the dummy slot
2011 */
2012 if (!!key_idx &
2013 !rte_hash_cmp_eq(
2014 key_slot->key, keys[i], h)) {
2015 if (data != NULL((void*)0))
2016 data[i] = pdata[i];
2017
2018 hits |= 1ULL << i;
2019 positions[i] = key_idx - 1;
2020 goto next_key;
2021 }
2022 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
2023 }
2024
2025 while (sec_hitmask[i]) {
2026 uint32_t hit_index =
2027 __builtin_ctzl(sec_hitmask[i])
2028 >> 1;
2029 uint32_t key_idx =
2030 __atomic_load_n(
2031 &secondary_bkt[i]->key_idx[hit_index],
2032 __ATOMIC_ACQUIRE2);
2033 const struct rte_hash_key *key_slot =
2034 (const struct rte_hash_key *)(
2035 (const char *)h->key_store +
2036 key_idx * h->key_entry_size);
2037
2038 if (key_idx != EMPTY_SLOT0)
2039 pdata[i] = __atomic_load_n(
2040 &key_slot->pdata,
2041 __ATOMIC_ACQUIRE2);
2042 /*
2043 * If key index is 0, do not compare key,
2044 * as it is checking the dummy slot
2045 */
2046
2047 if (!!key_idx &
2048 !rte_hash_cmp_eq(
2049 key_slot->key, keys[i], h)) {
2050 if (data != NULL((void*)0))
2051 data[i] = pdata[i];
2052
2053 hits |= 1ULL << i;
2054 positions[i] = key_idx - 1;
2055 goto next_key;
2056 }
2057 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
2058 }
2059next_key:
2060 continue;
2061 }
2062
2063 /* all found, do not need to go through ext bkt */
2064 if (hits == ((1ULL << num_keys) - 1)) {
2065 if (hit_mask != NULL((void*)0))
2066 *hit_mask = hits;
2067 return;
2068 }
2069 /* need to check ext buckets for match */
2070 if (h->ext_table_support) {
2071 for (i = 0; i < num_keys; i++) {
2072 if ((hits & (1ULL << i)) != 0)
2073 continue;
2074 next_bkt = secondary_bkt[i]->next;
2075 FOR_EACH_BUCKET(cur_bkt, next_bkt)for (cur_bkt = next_bkt; cur_bkt != ((void*)0); cur_bkt = cur_bkt
->next)
{
2076 if (data != NULL((void*)0))
2077 ret = search_one_bucket_lf(h,
2078 keys[i], sig[i],
2079 &data[i], cur_bkt);
2080 else
2081 ret = search_one_bucket_lf(h,
2082 keys[i], sig[i],
2083 NULL((void*)0), cur_bkt);
2084 if (ret != -1) {
2085 positions[i] = ret;
2086 hits |= 1ULL << i;
2087 break;
2088 }
2089 }
2090 }
2091 }
2092 /* The loads of sig_current in compare_signatures
2093 * should not move below the load from tbl_chng_cnt.
2094 */
2095 __atomic_thread_fence(__ATOMIC_ACQUIRE2);
2096 /* Re-read the table change counter to check if the
2097 * table has changed during search. If yes, re-do
2098 * the search.
2099 * This load should not get hoisted. The load
2100 * acquires on cnt_b, primary key index and secondary
2101 * key index will make sure that it does not get
2102 * hoisted.
2103 */
2104 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
2105 __ATOMIC_ACQUIRE2);
2106 } while (cnt_b != cnt_a);
2107
2108 if (hit_mask != NULL((void*)0))
2109 *hit_mask = hits;
2110}
2111
2112static inline void
2113__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2114 int32_t num_keys, int32_t *positions,
2115 uint64_t *hit_mask, void *data[])
2116{
2117 if (h->readwrite_concur_lf_support)
2118 __rte_hash_lookup_bulk_lf(h, keys, num_keys, positions,
2119 hit_mask, data);
2120 else
2121 __rte_hash_lookup_bulk_l(h, keys, num_keys, positions,
2122 hit_mask, data);
2123}
2124
2125int
2126rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2127 uint32_t num_keys, int32_t *positions)
2128{
2129 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2130 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2131 (positions == NULL)), -EINVAL);
2132
2133 __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL((void*)0), NULL((void*)0));
2134 return 0;
2135}
2136
2137int
2138rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
2139 uint32_t num_keys, uint64_t *hit_mask, void *data[])
2140{
2141 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2142 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2143 (hit_mask == NULL)), -EINVAL);
2144
2145 int32_t positions[num_keys];
2146
2147 __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
2148
2149 /* Return number of hits */
2150 return __builtin_popcountl(*hit_mask);
2151}
2152
2153int32_t
2154rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
2155{
2156 uint32_t bucket_idx, idx, position;
2157 struct rte_hash_key *next_key;
2158
2159 RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
2160
2161 const uint32_t total_entries_main = h->num_buckets *
2162 RTE_HASH_BUCKET_ENTRIES8;
2163 const uint32_t total_entries = total_entries_main << 1;
2164
2165 /* Out of bounds of all buckets (both main table and ext table) */
2166 if (*next >= total_entries_main)
2167 goto extend_table;
2168
2169 /* Calculate bucket and index of current iterator */
2170 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES8;
2171 idx = *next % RTE_HASH_BUCKET_ENTRIES8;
2172
2173 /* If current position is empty, go to the next one */
2174 while ((position = __atomic_load_n(&h->buckets[bucket_idx].key_idx[idx],
2175 __ATOMIC_ACQUIRE2)) == EMPTY_SLOT0) {
2176 (*next)++;
2177 /* End of table */
2178 if (*next == total_entries_main)
2179 goto extend_table;
2180 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES8;
2181 idx = *next % RTE_HASH_BUCKET_ENTRIES8;
2182 }
2183
2184 __hash_rw_reader_lock(h);
2185 next_key = (struct rte_hash_key *) ((char *)h->key_store +
2186 position * h->key_entry_size);
2187 /* Return key and data */
2188 *key = next_key->key;
2189 *data = next_key->pdata;
2190
2191 __hash_rw_reader_unlock(h);
2192
2193 /* Increment iterator */
2194 (*next)++;
2195
2196 return position - 1;
2197
2198/* Begin to iterate extendable buckets */
2199extend_table:
2200 /* Out of total bound or if ext bucket feature is not enabled */
2201 if (*next >= total_entries || !h->ext_table_support)
2202 return -ENOENT2;
2203
2204 bucket_idx = (*next - total_entries_main) / RTE_HASH_BUCKET_ENTRIES8;
2205 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES8;
2206
2207 while ((position = h->buckets_ext[bucket_idx].key_idx[idx]) == EMPTY_SLOT0) {
2208 (*next)++;
2209 if (*next == total_entries)
2210 return -ENOENT2;
2211 bucket_idx = (*next - total_entries_main) /
2212 RTE_HASH_BUCKET_ENTRIES8;
2213 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES8;
2214 }
2215 __hash_rw_reader_lock(h);
2216 next_key = (struct rte_hash_key *) ((char *)h->key_store +
2217 position * h->key_entry_size);
2218 /* Return key and data */
2219 *key = next_key->key;
2220 *data = next_key->pdata;
2221
2222 __hash_rw_reader_unlock(h);
2223
2224 /* Increment iterator */
2225 (*next)++;
2226 return position - 1;
2227}