File: | home/bhubbard/working/src/ceph/src/spdk/dpdk/lib/librte_mempool/rte_mempool.c |
Warning: | line 441, column 2 Value stored to 'external' is never read |
[?] Use j/k keys for keyboard navigation
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2010-2014 Intel Corporation. |
3 | * Copyright(c) 2016 6WIND S.A. |
4 | */ |
5 | |
6 | #include <stdbool.h> |
7 | #include <stdio.h> |
8 | #include <string.h> |
9 | #include <stdint.h> |
10 | #include <stdarg.h> |
11 | #include <unistd.h> |
12 | #include <inttypes.h> |
13 | #include <errno(*__errno_location ()).h> |
14 | #include <sys/queue.h> |
15 | #include <sys/mman.h> |
16 | |
17 | #include <rte_common.h> |
18 | #include <rte_log.h> |
19 | #include <rte_debug.h> |
20 | #include <rte_memory.h> |
21 | #include <rte_memzone.h> |
22 | #include <rte_malloc.h> |
23 | #include <rte_atomic.h> |
24 | #include <rte_launch.h> |
25 | #include <rte_eal.h> |
26 | #include <rte_eal_memconfig.h> |
27 | #include <rte_per_lcore.h> |
28 | #include <rte_lcore.h> |
29 | #include <rte_branch_prediction.h> |
30 | #include <rte_errno(per_lcore__rte_errno).h> |
31 | #include <rte_string_fns.h> |
32 | #include <rte_spinlock.h> |
33 | |
34 | #include "rte_mempool.h" |
35 | |
36 | TAILQ_HEAD(rte_mempool_list, rte_tailq_entry)struct rte_mempool_list { struct rte_tailq_entry *tqh_first; struct rte_tailq_entry * *tqh_last; }; |
37 | |
38 | static struct rte_tailq_elem rte_mempool_tailq = { |
39 | .name = "RTE_MEMPOOL", |
40 | }; |
41 | EAL_REGISTER_TAILQ(rte_mempool_tailq)static void __attribute__((constructor(65535), used)) tailqinitfn_rte_mempool_tailq (void) { if (rte_eal_tailq_register(&rte_mempool_tailq) < 0) __rte_panic(__func__, "Cannot initialize tailq: %s\n" "%.0s" , rte_mempool_tailq.name, "dummy"); } |
42 | |
43 | #define CACHE_FLUSHTHRESH_MULTIPLIER1.5 1.5 |
44 | #define CALC_CACHE_FLUSHTHRESH(c)((__typeof__(c))((c) * 1.5)) \ |
45 | ((typeof__typeof__(c))((c) * CACHE_FLUSHTHRESH_MULTIPLIER1.5)) |
46 | |
47 | /* |
48 | * return the greatest common divisor between a and b (fast algorithm) |
49 | * |
50 | */ |
51 | static unsigned get_gcd(unsigned a, unsigned b) |
52 | { |
53 | unsigned c; |
54 | |
55 | if (0 == a) |
56 | return b; |
57 | if (0 == b) |
58 | return a; |
59 | |
60 | if (a < b) { |
61 | c = a; |
62 | a = b; |
63 | b = c; |
64 | } |
65 | |
66 | while (b != 0) { |
67 | c = a % b; |
68 | a = b; |
69 | b = c; |
70 | } |
71 | |
72 | return a; |
73 | } |
74 | |
75 | /* |
76 | * Depending on memory configuration, objects addresses are spread |
77 | * between channels and ranks in RAM: the pool allocator will add |
78 | * padding between objects. This function return the new size of the |
79 | * object. |
80 | */ |
81 | static unsigned optimize_object_size(unsigned obj_size) |
82 | { |
83 | unsigned nrank, nchan; |
84 | unsigned new_obj_size; |
85 | |
86 | /* get number of channels */ |
87 | nchan = rte_memory_get_nchannel(); |
88 | if (nchan == 0) |
89 | nchan = 4; |
90 | |
91 | nrank = rte_memory_get_nrank(); |
92 | if (nrank == 0) |
93 | nrank = 1; |
94 | |
95 | /* process new object size */ |
96 | new_obj_size = (obj_size + RTE_MEMPOOL_ALIGN_MASK(64 - 1)) / RTE_MEMPOOL_ALIGN64; |
97 | while (get_gcd(new_obj_size, nrank * nchan) != 1) |
98 | new_obj_size++; |
99 | return new_obj_size * RTE_MEMPOOL_ALIGN64; |
100 | } |
101 | |
102 | struct pagesz_walk_arg { |
103 | int socket_id; |
104 | size_t min; |
105 | }; |
106 | |
107 | static int |
108 | find_min_pagesz(const struct rte_memseg_list *msl, void *arg) |
109 | { |
110 | struct pagesz_walk_arg *wa = arg; |
111 | bool_Bool valid; |
112 | |
113 | /* |
114 | * we need to only look at page sizes available for a particular socket |
115 | * ID. so, we either need an exact match on socket ID (can match both |
116 | * native and external memory), or, if SOCKET_ID_ANY was specified as a |
117 | * socket ID argument, we must only look at native memory and ignore any |
118 | * page sizes associated with external memory. |
119 | */ |
120 | valid = msl->socket_id == wa->socket_id; |
121 | valid |= wa->socket_id == SOCKET_ID_ANY-1 && msl->external == 0; |
122 | |
123 | if (valid && msl->page_sz < wa->min) |
124 | wa->min = msl->page_sz; |
125 | |
126 | return 0; |
127 | } |
128 | |
129 | static size_t |
130 | get_min_page_size(int socket_id) |
131 | { |
132 | struct pagesz_walk_arg wa; |
133 | |
134 | wa.min = SIZE_MAX(18446744073709551615UL); |
135 | wa.socket_id = socket_id; |
136 | |
137 | rte_memseg_list_walk(find_min_pagesz, &wa); |
138 | |
139 | return wa.min == SIZE_MAX(18446744073709551615UL) ? (size_t) getpagesize() : wa.min; |
140 | } |
141 | |
142 | |
143 | static void |
144 | mempool_add_elem(struct rte_mempool *mp, __rte_unused__attribute__((__unused__)) void *opaque, |
145 | void *obj, rte_iova_t iova) |
146 | { |
147 | struct rte_mempool_objhdr *hdr; |
148 | struct rte_mempool_objtlr *tlr __rte_unused__attribute__((__unused__)); |
149 | |
150 | /* set mempool ptr in header */ |
151 | hdr = RTE_PTR_SUB(obj, sizeof(*hdr))((void*)((uintptr_t)obj - (sizeof(*hdr)))); |
152 | hdr->mp = mp; |
153 | hdr->iova = iova; |
154 | STAILQ_INSERT_TAIL(&mp->elt_list, hdr, next)do { (hdr)->next.stqe_next = ((void*)0); *(&mp->elt_list )->stqh_last = (hdr); (&mp->elt_list)->stqh_last = &(hdr)->next.stqe_next; } while ( 0); |
155 | mp->populated_size++; |
156 | |
157 | #ifdef RTE_LIBRTE_MEMPOOL_DEBUG |
158 | hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE20xf2eef2eedadd2e55ULL; |
159 | tlr = __mempool_get_trailer(obj); |
160 | tlr->cookie = RTE_MEMPOOL_TRAILER_COOKIE0xadd2e55badbadbadULL; |
161 | #endif |
162 | } |
163 | |
164 | /* call obj_cb() for each mempool element */ |
165 | uint32_t |
166 | rte_mempool_obj_iter(struct rte_mempool *mp, |
167 | rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg) |
168 | { |
169 | struct rte_mempool_objhdr *hdr; |
170 | void *obj; |
171 | unsigned n = 0; |
172 | |
173 | STAILQ_FOREACH(hdr, &mp->elt_list, next)for ((hdr) = ((&mp->elt_list)->stqh_first); (hdr); ( hdr) = ((hdr)->next.stqe_next)) { |
174 | obj = (char *)hdr + sizeof(*hdr); |
175 | obj_cb(mp, obj_cb_arg, obj, n); |
176 | n++; |
177 | } |
178 | |
179 | return n; |
180 | } |
181 | |
182 | /* call mem_cb() for each mempool memory chunk */ |
183 | uint32_t |
184 | rte_mempool_mem_iter(struct rte_mempool *mp, |
185 | rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg) |
186 | { |
187 | struct rte_mempool_memhdr *hdr; |
188 | unsigned n = 0; |
189 | |
190 | STAILQ_FOREACH(hdr, &mp->mem_list, next)for ((hdr) = ((&mp->mem_list)->stqh_first); (hdr); ( hdr) = ((hdr)->next.stqe_next)) { |
191 | mem_cb(mp, mem_cb_arg, hdr, n); |
192 | n++; |
193 | } |
194 | |
195 | return n; |
196 | } |
197 | |
198 | /* get the header, trailer and total size of a mempool element. */ |
199 | uint32_t |
200 | rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, |
201 | struct rte_mempool_objsz *sz) |
202 | { |
203 | struct rte_mempool_objsz lsz; |
204 | |
205 | sz = (sz != NULL((void*)0)) ? sz : &lsz; |
206 | |
207 | sz->header_size = sizeof(struct rte_mempool_objhdr); |
208 | if ((flags & MEMPOOL_F_NO_CACHE_ALIGN0x0002) == 0) |
209 | sz->header_size = RTE_ALIGN_CEIL(sz->header_size,(__typeof__(((sz->header_size) + ((__typeof__(sz->header_size )) (64) - 1))))((((sz->header_size) + ((__typeof__(sz-> header_size)) (64) - 1))) & (~((__typeof__(((sz->header_size ) + ((__typeof__(sz->header_size)) (64) - 1))))((64) - 1)) )) |
210 | RTE_MEMPOOL_ALIGN)(__typeof__(((sz->header_size) + ((__typeof__(sz->header_size )) (64) - 1))))((((sz->header_size) + ((__typeof__(sz-> header_size)) (64) - 1))) & (~((__typeof__(((sz->header_size ) + ((__typeof__(sz->header_size)) (64) - 1))))((64) - 1)) )); |
211 | |
212 | #ifdef RTE_LIBRTE_MEMPOOL_DEBUG |
213 | sz->trailer_size = sizeof(struct rte_mempool_objtlr); |
214 | #else |
215 | sz->trailer_size = 0; |
216 | #endif |
217 | |
218 | /* element size is 8 bytes-aligned at least */ |
219 | sz->elt_size = RTE_ALIGN_CEIL(elt_size, sizeof(uint64_t))(__typeof__(((elt_size) + ((__typeof__(elt_size)) (sizeof(uint64_t )) - 1))))((((elt_size) + ((__typeof__(elt_size)) (sizeof(uint64_t )) - 1))) & (~((__typeof__(((elt_size) + ((__typeof__(elt_size )) (sizeof(uint64_t)) - 1))))((sizeof(uint64_t)) - 1)))); |
220 | |
221 | /* expand trailer to next cache line */ |
222 | if ((flags & MEMPOOL_F_NO_CACHE_ALIGN0x0002) == 0) { |
223 | sz->total_size = sz->header_size + sz->elt_size + |
224 | sz->trailer_size; |
225 | sz->trailer_size += ((RTE_MEMPOOL_ALIGN64 - |
226 | (sz->total_size & RTE_MEMPOOL_ALIGN_MASK(64 - 1))) & |
227 | RTE_MEMPOOL_ALIGN_MASK(64 - 1)); |
228 | } |
229 | |
230 | /* |
231 | * increase trailer to add padding between objects in order to |
232 | * spread them across memory channels/ranks |
233 | */ |
234 | if ((flags & MEMPOOL_F_NO_SPREAD0x0001) == 0) { |
235 | unsigned new_size; |
236 | new_size = optimize_object_size(sz->header_size + sz->elt_size + |
237 | sz->trailer_size); |
238 | sz->trailer_size = new_size - sz->header_size - sz->elt_size; |
239 | } |
240 | |
241 | /* this is the size of an object, including header and trailer */ |
242 | sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size; |
243 | |
244 | return sz->total_size; |
245 | } |
246 | |
247 | /* free a memchunk allocated with rte_memzone_reserve() */ |
248 | static void |
249 | rte_mempool_memchunk_mz_free(__rte_unused__attribute__((__unused__)) struct rte_mempool_memhdr *memhdr, |
250 | void *opaque) |
251 | { |
252 | const struct rte_memzone *mz = opaque; |
253 | rte_memzone_free(mz); |
254 | } |
255 | |
256 | /* Free memory chunks used by a mempool. Objects must be in pool */ |
257 | static void |
258 | rte_mempool_free_memchunks(struct rte_mempool *mp) |
259 | { |
260 | struct rte_mempool_memhdr *memhdr; |
261 | void *elt; |
262 | |
263 | while (!STAILQ_EMPTY(&mp->elt_list)((&mp->elt_list)->stqh_first == ((void*)0))) { |
264 | rte_mempool_ops_dequeue_bulk(mp, &elt, 1); |
265 | (void)elt; |
266 | STAILQ_REMOVE_HEAD(&mp->elt_list, next)do { if (((&mp->elt_list)->stqh_first = (&mp-> elt_list)->stqh_first->next.stqe_next) == ((void*)0)) ( &mp->elt_list)->stqh_last = &(&mp->elt_list )->stqh_first; } while ( 0); |
267 | mp->populated_size--; |
268 | } |
269 | |
270 | while (!STAILQ_EMPTY(&mp->mem_list)((&mp->mem_list)->stqh_first == ((void*)0))) { |
271 | memhdr = STAILQ_FIRST(&mp->mem_list)((&mp->mem_list)->stqh_first); |
272 | STAILQ_REMOVE_HEAD(&mp->mem_list, next)do { if (((&mp->mem_list)->stqh_first = (&mp-> mem_list)->stqh_first->next.stqe_next) == ((void*)0)) ( &mp->mem_list)->stqh_last = &(&mp->mem_list )->stqh_first; } while ( 0); |
273 | if (memhdr->free_cb != NULL((void*)0)) |
274 | memhdr->free_cb(memhdr, memhdr->opaque); |
275 | rte_free(memhdr); |
276 | mp->nb_mem_chunks--; |
277 | } |
278 | } |
279 | |
280 | static int |
281 | mempool_ops_alloc_once(struct rte_mempool *mp) |
282 | { |
283 | int ret; |
284 | |
285 | /* create the internal ring if not already done */ |
286 | if ((mp->flags & MEMPOOL_F_POOL_CREATED0x0010) == 0) { |
287 | ret = rte_mempool_ops_alloc(mp); |
288 | if (ret != 0) |
289 | return ret; |
290 | mp->flags |= MEMPOOL_F_POOL_CREATED0x0010; |
291 | } |
292 | return 0; |
293 | } |
294 | |
295 | /* Add objects in the pool, using a physically contiguous memory |
296 | * zone. Return the number of objects added, or a negative value |
297 | * on error. |
298 | */ |
299 | int |
300 | rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, |
301 | rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, |
302 | void *opaque) |
303 | { |
304 | unsigned i = 0; |
305 | size_t off; |
306 | struct rte_mempool_memhdr *memhdr; |
307 | int ret; |
308 | |
309 | ret = mempool_ops_alloc_once(mp); |
310 | if (ret != 0) |
311 | return ret; |
312 | |
313 | /* mempool is already populated */ |
314 | if (mp->populated_size >= mp->size) |
315 | return -ENOSPC28; |
316 | |
317 | memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0); |
318 | if (memhdr == NULL((void*)0)) |
319 | return -ENOMEM12; |
320 | |
321 | memhdr->mp = mp; |
322 | memhdr->addr = vaddr; |
323 | memhdr->iova = iova; |
324 | memhdr->len = len; |
325 | memhdr->free_cb = free_cb; |
326 | memhdr->opaque = opaque; |
327 | |
328 | if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN0x0002) |
329 | off = RTE_PTR_ALIGN_CEIL(vaddr, 8)((__typeof__((__typeof__(vaddr))((void*)((uintptr_t)(vaddr) + ((8) - 1)))))(__typeof__((uintptr_t)(__typeof__(vaddr))((void *)((uintptr_t)(vaddr) + ((8) - 1)))))(((uintptr_t)(__typeof__ (vaddr))((void*)((uintptr_t)(vaddr) + ((8) - 1)))) & (~(( __typeof__((uintptr_t)(__typeof__(vaddr))((void*)((uintptr_t) (vaddr) + ((8) - 1)))))((8) - 1))))) - vaddr; |
330 | else |
331 | off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_CACHE_LINE_SIZE)((__typeof__((__typeof__(vaddr))((void*)((uintptr_t)(vaddr) + ((64) - 1)))))(__typeof__((uintptr_t)(__typeof__(vaddr))((void *)((uintptr_t)(vaddr) + ((64) - 1)))))(((uintptr_t)(__typeof__ (vaddr))((void*)((uintptr_t)(vaddr) + ((64) - 1)))) & (~( (__typeof__((uintptr_t)(__typeof__(vaddr))((void*)((uintptr_t )(vaddr) + ((64) - 1)))))((64) - 1))))) - vaddr; |
332 | |
333 | if (off > len) { |
334 | ret = -EINVAL22; |
335 | goto fail; |
336 | } |
337 | |
338 | i = rte_mempool_ops_populate(mp, mp->size - mp->populated_size, |
339 | (char *)vaddr + off, |
340 | (iova == RTE_BAD_IOVA((rte_iova_t)-1)) ? RTE_BAD_IOVA((rte_iova_t)-1) : (iova + off), |
341 | len - off, mempool_add_elem, NULL((void*)0)); |
342 | |
343 | /* not enough room to store one object */ |
344 | if (i == 0) { |
345 | ret = -EINVAL22; |
346 | goto fail; |
347 | } |
348 | |
349 | STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next)do { (memhdr)->next.stqe_next = ((void*)0); *(&mp-> mem_list)->stqh_last = (memhdr); (&mp->mem_list)-> stqh_last = &(memhdr)->next.stqe_next; } while ( 0); |
350 | mp->nb_mem_chunks++; |
351 | return i; |
352 | |
353 | fail: |
354 | rte_free(memhdr); |
355 | return ret; |
356 | } |
357 | |
358 | /* Populate the mempool with a virtual area. Return the number of |
359 | * objects added, or a negative value on error. |
360 | */ |
361 | int |
362 | rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, |
363 | size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, |
364 | void *opaque) |
365 | { |
366 | rte_iova_t iova; |
367 | size_t off, phys_len; |
368 | int ret, cnt = 0; |
369 | |
370 | /* address and len must be page-aligned */ |
371 | if (RTE_PTR_ALIGN_CEIL(addr, pg_sz)((__typeof__((__typeof__(addr))((void*)((uintptr_t)(addr) + ( (pg_sz) - 1)))))(__typeof__((uintptr_t)(__typeof__(addr))((void *)((uintptr_t)(addr) + ((pg_sz) - 1)))))(((uintptr_t)(__typeof__ (addr))((void*)((uintptr_t)(addr) + ((pg_sz) - 1)))) & (~ ((__typeof__((uintptr_t)(__typeof__(addr))((void*)((uintptr_t )(addr) + ((pg_sz) - 1)))))((pg_sz) - 1))))) != addr) |
372 | return -EINVAL22; |
373 | if (RTE_ALIGN_CEIL(len, pg_sz)(__typeof__(((len) + ((__typeof__(len)) (pg_sz) - 1))))((((len ) + ((__typeof__(len)) (pg_sz) - 1))) & (~((__typeof__((( len) + ((__typeof__(len)) (pg_sz) - 1))))((pg_sz) - 1)))) != len) |
374 | return -EINVAL22; |
375 | |
376 | if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG0x0020) |
377 | return rte_mempool_populate_iova(mp, addr, RTE_BAD_IOVA((rte_iova_t)-1), |
378 | len, free_cb, opaque); |
379 | |
380 | for (off = 0; off + pg_sz <= len && |
381 | mp->populated_size < mp->size; off += phys_len) { |
382 | |
383 | iova = rte_mem_virt2iova(addr + off); |
384 | |
385 | if (iova == RTE_BAD_IOVA((rte_iova_t)-1) && rte_eal_has_hugepages()) { |
386 | ret = -EINVAL22; |
387 | goto fail; |
388 | } |
389 | |
390 | /* populate with the largest group of contiguous pages */ |
391 | for (phys_len = pg_sz; off + phys_len < len; phys_len += pg_sz) { |
392 | rte_iova_t iova_tmp; |
393 | |
394 | iova_tmp = rte_mem_virt2iova(addr + off + phys_len); |
395 | |
396 | if (iova_tmp != iova + phys_len) |
397 | break; |
398 | } |
399 | |
400 | ret = rte_mempool_populate_iova(mp, addr + off, iova, |
401 | phys_len, free_cb, opaque); |
402 | if (ret < 0) |
403 | goto fail; |
404 | /* no need to call the free callback for next chunks */ |
405 | free_cb = NULL((void*)0); |
406 | cnt += ret; |
407 | } |
408 | |
409 | return cnt; |
410 | |
411 | fail: |
412 | rte_mempool_free_memchunks(mp); |
413 | return ret; |
414 | } |
415 | |
416 | /* Default function to populate the mempool: allocate memory in memzones, |
417 | * and populate them. Return the number of objects added, or a negative |
418 | * value on error. |
419 | */ |
420 | int |
421 | rte_mempool_populate_default(struct rte_mempool *mp) |
422 | { |
423 | unsigned int mz_flags = RTE_MEMZONE_1GB0x00000002|RTE_MEMZONE_SIZE_HINT_ONLY0x00000004; |
424 | char mz_name[RTE_MEMZONE_NAMESIZE32]; |
425 | const struct rte_memzone *mz; |
426 | ssize_t mem_size; |
427 | size_t align, pg_sz, pg_shift; |
428 | rte_iova_t iova; |
429 | unsigned mz_id, n; |
430 | int ret; |
431 | bool_Bool no_contig, try_contig, no_pageshift, external; |
432 | |
433 | ret = mempool_ops_alloc_once(mp); |
434 | if (ret != 0) |
435 | return ret; |
436 | |
437 | /* check if we can retrieve a valid socket ID */ |
438 | ret = rte_malloc_heap_socket_is_external(mp->socket_id); |
439 | if (ret < 0) |
440 | return -EINVAL22; |
441 | external = ret; |
Value stored to 'external' is never read | |
442 | |
443 | /* mempool must not be populated */ |
444 | if (mp->nb_mem_chunks != 0) |
445 | return -EEXIST17; |
446 | |
447 | no_contig = mp->flags & MEMPOOL_F_NO_IOVA_CONTIG0x0020; |
448 | |
449 | /* |
450 | * the following section calculates page shift and page size values. |
451 | * |
452 | * these values impact the result of calc_mem_size operation, which |
453 | * returns the amount of memory that should be allocated to store the |
454 | * desired number of objects. when not zero, it allocates more memory |
455 | * for the padding between objects, to ensure that an object does not |
456 | * cross a page boundary. in other words, page size/shift are to be set |
457 | * to zero if mempool elements won't care about page boundaries. |
458 | * there are several considerations for page size and page shift here. |
459 | * |
460 | * if we don't need our mempools to have physically contiguous objects, |
461 | * then just set page shift and page size to 0, because the user has |
462 | * indicated that there's no need to care about anything. |
463 | * |
464 | * if we do need contiguous objects, there is also an option to reserve |
465 | * the entire mempool memory as one contiguous block of memory, in |
466 | * which case the page shift and alignment wouldn't matter as well. |
467 | * |
468 | * if we require contiguous objects, but not necessarily the entire |
469 | * mempool reserved space to be contiguous, then there are two options. |
470 | * |
471 | * if our IO addresses are virtual, not actual physical (IOVA as VA |
472 | * case), then no page shift needed - our memory allocation will give us |
473 | * contiguous IO memory as far as the hardware is concerned, so |
474 | * act as if we're getting contiguous memory. |
475 | * |
476 | * if our IO addresses are physical, we may get memory from bigger |
477 | * pages, or we might get memory from smaller pages, and how much of it |
478 | * we require depends on whether we want bigger or smaller pages. |
479 | * However, requesting each and every memory size is too much work, so |
480 | * what we'll do instead is walk through the page sizes available, pick |
481 | * the smallest one and set up page shift to match that one. We will be |
482 | * wasting some space this way, but it's much nicer than looping around |
483 | * trying to reserve each and every page size. |
484 | * |
485 | * However, since size calculation will produce page-aligned sizes, it |
486 | * makes sense to first try and see if we can reserve the entire memzone |
487 | * in one contiguous chunk as well (otherwise we might end up wasting a |
488 | * 1G page on a 10MB memzone). If we fail to get enough contiguous |
489 | * memory, then we'll go and reserve space page-by-page. |
490 | * |
491 | * We also have to take into account the fact that memory that we're |
492 | * going to allocate from can belong to an externally allocated memory |
493 | * area, in which case the assumption of IOVA as VA mode being |
494 | * synonymous with IOVA contiguousness will not hold. We should also try |
495 | * to go for contiguous memory even if we're in no-huge mode, because |
496 | * external memory may in fact be IOVA-contiguous. |
497 | */ |
498 | external = rte_malloc_heap_socket_is_external(mp->socket_id) == 1; |
499 | no_pageshift = no_contig || |
500 | (!external && rte_eal_iova_mode() == RTE_IOVA_VA); |
501 | try_contig = !no_contig && !no_pageshift && |
502 | (rte_eal_has_hugepages() || external); |
503 | |
504 | if (no_pageshift) { |
505 | pg_sz = 0; |
506 | pg_shift = 0; |
507 | } else if (try_contig) { |
508 | pg_sz = get_min_page_size(mp->socket_id); |
509 | pg_shift = rte_bsf32(pg_sz); |
510 | } else { |
511 | pg_sz = getpagesize(); |
512 | pg_shift = rte_bsf32(pg_sz); |
513 | } |
514 | |
515 | for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) { |
516 | size_t min_chunk_size; |
517 | unsigned int flags; |
518 | |
519 | if (try_contig || no_pageshift) |
520 | mem_size = rte_mempool_ops_calc_mem_size(mp, n, |
521 | 0, &min_chunk_size, &align); |
522 | else |
523 | mem_size = rte_mempool_ops_calc_mem_size(mp, n, |
524 | pg_shift, &min_chunk_size, &align); |
525 | |
526 | if (mem_size < 0) { |
527 | ret = mem_size; |
528 | goto fail; |
529 | } |
530 | |
531 | ret = snprintf(mz_name, sizeof(mz_name), |
532 | RTE_MEMPOOL_MZ_FORMAT"MP_" "%s" "_%d", mp->name, mz_id); |
533 | if (ret < 0 || ret >= (int)sizeof(mz_name)) { |
534 | ret = -ENAMETOOLONG36; |
535 | goto fail; |
536 | } |
537 | |
538 | flags = mz_flags; |
539 | |
540 | /* if we're trying to reserve contiguous memory, add appropriate |
541 | * memzone flag. |
542 | */ |
543 | if (try_contig) |
544 | flags |= RTE_MEMZONE_IOVA_CONTIG0x00100000; |
545 | |
546 | mz = rte_memzone_reserve_aligned(mz_name, mem_size, |
547 | mp->socket_id, flags, align); |
548 | |
549 | /* if we were trying to allocate contiguous memory, failed and |
550 | * minimum required contiguous chunk fits minimum page, adjust |
551 | * memzone size to the page size, and try again. |
552 | */ |
553 | if (mz == NULL((void*)0) && try_contig && min_chunk_size <= pg_sz) { |
554 | try_contig = false0; |
555 | flags &= ~RTE_MEMZONE_IOVA_CONTIG0x00100000; |
556 | |
557 | mem_size = rte_mempool_ops_calc_mem_size(mp, n, |
558 | pg_shift, &min_chunk_size, &align); |
559 | if (mem_size < 0) { |
560 | ret = mem_size; |
561 | goto fail; |
562 | } |
563 | |
564 | mz = rte_memzone_reserve_aligned(mz_name, mem_size, |
565 | mp->socket_id, flags, align); |
566 | } |
567 | /* don't try reserving with 0 size if we were asked to reserve |
568 | * IOVA-contiguous memory. |
569 | */ |
570 | if (min_chunk_size < (size_t)mem_size && mz == NULL((void*)0)) { |
571 | /* not enough memory, retry with the biggest zone we |
572 | * have |
573 | */ |
574 | mz = rte_memzone_reserve_aligned(mz_name, 0, |
575 | mp->socket_id, flags, |
576 | RTE_MAX(pg_sz, align)__extension__ ({ __typeof__ (pg_sz) _a = (pg_sz); __typeof__ ( align) _b = (align); _a > _b ? _a : _b; })); |
577 | } |
578 | if (mz == NULL((void*)0)) { |
579 | ret = -rte_errno(per_lcore__rte_errno); |
580 | goto fail; |
581 | } |
582 | |
583 | if (mz->len < min_chunk_size) { |
584 | rte_memzone_free(mz); |
585 | ret = -ENOMEM12; |
586 | goto fail; |
587 | } |
588 | |
589 | if (no_contig) |
590 | iova = RTE_BAD_IOVA((rte_iova_t)-1); |
591 | else |
592 | iova = mz->iova; |
593 | |
594 | if (no_pageshift || try_contig) |
595 | ret = rte_mempool_populate_iova(mp, mz->addr, |
596 | iova, mz->len, |
597 | rte_mempool_memchunk_mz_free, |
598 | (void *)(uintptr_t)mz); |
599 | else |
600 | ret = rte_mempool_populate_virt(mp, mz->addr, |
601 | RTE_ALIGN_FLOOR(mz->len, pg_sz)(__typeof__(mz->len))((mz->len) & (~((__typeof__(mz ->len))((pg_sz) - 1)))), pg_sz, |
602 | rte_mempool_memchunk_mz_free, |
603 | (void *)(uintptr_t)mz); |
604 | if (ret < 0) { |
605 | rte_memzone_free(mz); |
606 | goto fail; |
607 | } |
608 | } |
609 | |
610 | return mp->size; |
611 | |
612 | fail: |
613 | rte_mempool_free_memchunks(mp); |
614 | return ret; |
615 | } |
616 | |
617 | /* return the memory size required for mempool objects in anonymous mem */ |
618 | static ssize_t |
619 | get_anon_size(const struct rte_mempool *mp) |
620 | { |
621 | ssize_t size; |
622 | size_t pg_sz, pg_shift; |
623 | size_t min_chunk_size; |
624 | size_t align; |
625 | |
626 | pg_sz = getpagesize(); |
627 | pg_shift = rte_bsf32(pg_sz); |
628 | size = rte_mempool_ops_calc_mem_size(mp, mp->size, pg_shift, |
629 | &min_chunk_size, &align); |
630 | |
631 | return size; |
632 | } |
633 | |
634 | /* unmap a memory zone mapped by rte_mempool_populate_anon() */ |
635 | static void |
636 | rte_mempool_memchunk_anon_free(struct rte_mempool_memhdr *memhdr, |
637 | void *opaque) |
638 | { |
639 | ssize_t size; |
640 | |
641 | /* |
642 | * Calculate size since memhdr->len has contiguous chunk length |
643 | * which may be smaller if anon map is split into many contiguous |
644 | * chunks. Result must be the same as we calculated on populate. |
645 | */ |
646 | size = get_anon_size(memhdr->mp); |
647 | if (size < 0) |
648 | return; |
649 | |
650 | munmap(opaque, size); |
651 | } |
652 | |
653 | /* populate the mempool with an anonymous mapping */ |
654 | int |
655 | rte_mempool_populate_anon(struct rte_mempool *mp) |
656 | { |
657 | ssize_t size; |
658 | int ret; |
659 | char *addr; |
660 | |
661 | /* mempool is already populated, error */ |
662 | if ((!STAILQ_EMPTY(&mp->mem_list)((&mp->mem_list)->stqh_first == ((void*)0))) || mp->nb_mem_chunks != 0) { |
663 | rte_errno(per_lcore__rte_errno) = EINVAL22; |
664 | return 0; |
665 | } |
666 | |
667 | ret = mempool_ops_alloc_once(mp); |
668 | if (ret != 0) |
669 | return ret; |
670 | |
671 | size = get_anon_size(mp); |
672 | if (size < 0) { |
673 | rte_errno(per_lcore__rte_errno) = -size; |
674 | return 0; |
675 | } |
676 | |
677 | /* get chunk of virtually continuous memory */ |
678 | addr = mmap(NULL((void*)0), size, PROT_READ0x1 | PROT_WRITE0x2, |
679 | MAP_SHARED0x01 | MAP_ANONYMOUS0x20, -1, 0); |
680 | if (addr == MAP_FAILED((void *) -1)) { |
681 | rte_errno(per_lcore__rte_errno) = errno(*__errno_location ()); |
682 | return 0; |
683 | } |
684 | /* can't use MMAP_LOCKED, it does not exist on BSD */ |
685 | if (mlock(addr, size) < 0) { |
686 | rte_errno(per_lcore__rte_errno) = errno(*__errno_location ()); |
687 | munmap(addr, size); |
688 | return 0; |
689 | } |
690 | |
691 | ret = rte_mempool_populate_virt(mp, addr, size, getpagesize(), |
692 | rte_mempool_memchunk_anon_free, addr); |
693 | if (ret == 0) |
694 | goto fail; |
695 | |
696 | return mp->populated_size; |
697 | |
698 | fail: |
699 | rte_mempool_free_memchunks(mp); |
700 | return 0; |
701 | } |
702 | |
703 | /* free a mempool */ |
704 | void |
705 | rte_mempool_free(struct rte_mempool *mp) |
706 | { |
707 | struct rte_mempool_list *mempool_list = NULL((void*)0); |
708 | struct rte_tailq_entry *te; |
709 | |
710 | if (mp == NULL((void*)0)) |
711 | return; |
712 | |
713 | mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list)(struct rte_mempool_list *)&(rte_mempool_tailq.head)-> tailq_head; |
714 | rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK(&rte_eal_get_configuration()->mem_config->qlock)); |
715 | /* find out tailq entry */ |
716 | TAILQ_FOREACH(te, mempool_list, next)for ((te) = ((mempool_list)->tqh_first); (te); (te) = ((te )->next.tqe_next)) { |
717 | if (te->data == (void *)mp) |
718 | break; |
719 | } |
720 | |
721 | if (te != NULL((void*)0)) { |
722 | TAILQ_REMOVE(mempool_list, te, next)do { if (((te)->next.tqe_next) != ((void*)0)) (te)->next .tqe_next->next.tqe_prev = (te)->next.tqe_prev; else (mempool_list )->tqh_last = (te)->next.tqe_prev; *(te)->next.tqe_prev = (te)->next.tqe_next; } while ( 0); |
723 | rte_free(te); |
724 | } |
725 | rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK(&rte_eal_get_configuration()->mem_config->qlock)); |
726 | |
727 | rte_mempool_free_memchunks(mp); |
728 | rte_mempool_ops_free(mp); |
729 | rte_memzone_free(mp->mz); |
730 | } |
731 | |
732 | static void |
733 | mempool_cache_init(struct rte_mempool_cache *cache, uint32_t size) |
734 | { |
735 | cache->size = size; |
736 | cache->flushthresh = CALC_CACHE_FLUSHTHRESH(size)((__typeof__(size))((size) * 1.5)); |
737 | cache->len = 0; |
738 | } |
739 | |
740 | /* |
741 | * Create and initialize a cache for objects that are retrieved from and |
742 | * returned to an underlying mempool. This structure is identical to the |
743 | * local_cache[lcore_id] pointed to by the mempool structure. |
744 | */ |
745 | struct rte_mempool_cache * |
746 | rte_mempool_cache_create(uint32_t size, int socket_id) |
747 | { |
748 | struct rte_mempool_cache *cache; |
749 | |
750 | if (size == 0 || size > RTE_MEMPOOL_CACHE_MAX_SIZE512) { |
751 | rte_errno(per_lcore__rte_errno) = EINVAL22; |
752 | return NULL((void*)0); |
753 | } |
754 | |
755 | cache = rte_zmalloc_socket("MEMPOOL_CACHE", sizeof(*cache), |
756 | RTE_CACHE_LINE_SIZE64, socket_id); |
757 | if (cache == NULL((void*)0)) { |
758 | RTE_LOG(ERR, MEMPOOL, "Cannot allocate mempool cache.\n")rte_log(4U, 3, "MEMPOOL" ": " "Cannot allocate mempool cache.\n" ); |
759 | rte_errno(per_lcore__rte_errno) = ENOMEM12; |
760 | return NULL((void*)0); |
761 | } |
762 | |
763 | mempool_cache_init(cache, size); |
764 | |
765 | return cache; |
766 | } |
767 | |
768 | /* |
769 | * Free a cache. It's the responsibility of the user to make sure that any |
770 | * remaining objects in the cache are flushed to the corresponding |
771 | * mempool. |
772 | */ |
773 | void |
774 | rte_mempool_cache_free(struct rte_mempool_cache *cache) |
775 | { |
776 | rte_free(cache); |
777 | } |
778 | |
779 | /* create an empty mempool */ |
780 | struct rte_mempool * |
781 | rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, |
782 | unsigned cache_size, unsigned private_data_size, |
783 | int socket_id, unsigned flags) |
784 | { |
785 | char mz_name[RTE_MEMZONE_NAMESIZE32]; |
786 | struct rte_mempool_list *mempool_list; |
787 | struct rte_mempool *mp = NULL((void*)0); |
788 | struct rte_tailq_entry *te = NULL((void*)0); |
789 | const struct rte_memzone *mz = NULL((void*)0); |
790 | size_t mempool_size; |
791 | unsigned int mz_flags = RTE_MEMZONE_1GB0x00000002|RTE_MEMZONE_SIZE_HINT_ONLY0x00000004; |
792 | struct rte_mempool_objsz objsz; |
793 | unsigned lcore_id; |
794 | int ret; |
795 | |
796 | /* compilation-time checks */ |
797 | RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &((void)sizeof(char[1 - 2*!!((sizeof(struct rte_mempool) & (64 -1)) != 0)])) |
798 | RTE_CACHE_LINE_MASK) != 0)((void)sizeof(char[1 - 2*!!((sizeof(struct rte_mempool) & (64 -1)) != 0)])); |
799 | RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &((void)sizeof(char[1 - 2*!!((sizeof(struct rte_mempool_cache) & (64 -1)) != 0)])) |
800 | RTE_CACHE_LINE_MASK) != 0)((void)sizeof(char[1 - 2*!!((sizeof(struct rte_mempool_cache) & (64 -1)) != 0)])); |
801 | #ifdef RTE_LIBRTE_MEMPOOL_DEBUG |
802 | RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &((void)sizeof(char[1 - 2*!!((sizeof(struct rte_mempool_debug_stats ) & (64 -1)) != 0)])) |
803 | RTE_CACHE_LINE_MASK) != 0)((void)sizeof(char[1 - 2*!!((sizeof(struct rte_mempool_debug_stats ) & (64 -1)) != 0)])); |
804 | RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &((void)sizeof(char[1 - 2*!!((__builtin_offsetof(struct rte_mempool , stats) & (64 -1)) != 0)])) |
805 | RTE_CACHE_LINE_MASK) != 0)((void)sizeof(char[1 - 2*!!((__builtin_offsetof(struct rte_mempool , stats) & (64 -1)) != 0)])); |
806 | #endif |
807 | |
808 | mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list)(struct rte_mempool_list *)&(rte_mempool_tailq.head)-> tailq_head; |
809 | |
810 | /* asked for zero items */ |
811 | if (n == 0) { |
812 | rte_errno(per_lcore__rte_errno) = EINVAL22; |
813 | return NULL((void*)0); |
814 | } |
815 | |
816 | /* asked cache too big */ |
817 | if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE512 || |
818 | CALC_CACHE_FLUSHTHRESH(cache_size)((__typeof__(cache_size))((cache_size) * 1.5)) > n) { |
819 | rte_errno(per_lcore__rte_errno) = EINVAL22; |
820 | return NULL((void*)0); |
821 | } |
822 | |
823 | /* "no cache align" imply "no spread" */ |
824 | if (flags & MEMPOOL_F_NO_CACHE_ALIGN0x0002) |
825 | flags |= MEMPOOL_F_NO_SPREAD0x0001; |
826 | |
827 | /* calculate mempool object sizes. */ |
828 | if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) { |
829 | rte_errno(per_lcore__rte_errno) = EINVAL22; |
830 | return NULL((void*)0); |
831 | } |
832 | |
833 | rte_rwlock_write_lock(RTE_EAL_MEMPOOL_RWLOCK(&rte_eal_get_configuration()->mem_config->mplock)); |
834 | |
835 | /* |
836 | * reserve a memory zone for this mempool: private data is |
837 | * cache-aligned |
838 | */ |
839 | private_data_size = (private_data_size + |
840 | RTE_MEMPOOL_ALIGN_MASK(64 - 1)) & (~RTE_MEMPOOL_ALIGN_MASK(64 - 1)); |
841 | |
842 | |
843 | /* try to allocate tailq entry */ |
844 | te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0); |
845 | if (te == NULL((void*)0)) { |
846 | RTE_LOG(ERR, MEMPOOL, "Cannot allocate tailq entry!\n")rte_log(4U, 3, "MEMPOOL" ": " "Cannot allocate tailq entry!\n" ); |
847 | goto exit_unlock; |
848 | } |
849 | |
850 | mempool_size = MEMPOOL_HEADER_SIZE(mp, cache_size)(sizeof(*(mp)) + (((cache_size) == 0) ? 0 : (sizeof(struct rte_mempool_cache ) * 128))); |
851 | mempool_size += private_data_size; |
852 | mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN)(__typeof__(((mempool_size) + ((__typeof__(mempool_size)) (64 ) - 1))))((((mempool_size) + ((__typeof__(mempool_size)) (64) - 1))) & (~((__typeof__(((mempool_size) + ((__typeof__(mempool_size )) (64) - 1))))((64) - 1)))); |
853 | |
854 | ret = snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT"MP_" "%s", name); |
855 | if (ret < 0 || ret >= (int)sizeof(mz_name)) { |
856 | rte_errno(per_lcore__rte_errno) = ENAMETOOLONG36; |
857 | goto exit_unlock; |
858 | } |
859 | |
860 | mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags); |
861 | if (mz == NULL((void*)0)) |
862 | goto exit_unlock; |
863 | |
864 | /* init the mempool structure */ |
865 | mp = mz->addr; |
866 | memset(mp, 0, MEMPOOL_HEADER_SIZE(mp, cache_size)(sizeof(*(mp)) + (((cache_size) == 0) ? 0 : (sizeof(struct rte_mempool_cache ) * 128)))); |
867 | ret = strlcpy(mp->name, name, sizeof(mp->name))rte_strlcpy(mp->name, name, sizeof(mp->name)); |
868 | if (ret < 0 || ret >= (int)sizeof(mp->name)) { |
869 | rte_errno(per_lcore__rte_errno) = ENAMETOOLONG36; |
870 | goto exit_unlock; |
871 | } |
872 | mp->mz = mz; |
873 | mp->size = n; |
874 | mp->flags = flags; |
875 | mp->socket_id = socket_id; |
876 | mp->elt_size = objsz.elt_size; |
877 | mp->header_size = objsz.header_size; |
878 | mp->trailer_size = objsz.trailer_size; |
879 | /* Size of default caches, zero means disabled. */ |
880 | mp->cache_size = cache_size; |
881 | mp->private_data_size = private_data_size; |
882 | STAILQ_INIT(&mp->elt_list)do { (&mp->elt_list)->stqh_first = ((void*)0); (& mp->elt_list)->stqh_last = &(&mp->elt_list)-> stqh_first; } while ( 0); |
883 | STAILQ_INIT(&mp->mem_list)do { (&mp->mem_list)->stqh_first = ((void*)0); (& mp->mem_list)->stqh_last = &(&mp->mem_list)-> stqh_first; } while ( 0); |
884 | |
885 | /* |
886 | * local_cache pointer is set even if cache_size is zero. |
887 | * The local_cache points to just past the elt_pa[] array. |
888 | */ |
889 | mp->local_cache = (struct rte_mempool_cache *) |
890 | RTE_PTR_ADD(mp, MEMPOOL_HEADER_SIZE(mp, 0))((void*)((uintptr_t)(mp) + ((sizeof(*(mp)) + (((0) == 0) ? 0 : (sizeof(struct rte_mempool_cache) * 128)))))); |
891 | |
892 | /* Init all default caches. */ |
893 | if (cache_size != 0) { |
894 | for (lcore_id = 0; lcore_id < RTE_MAX_LCORE128; lcore_id++) |
895 | mempool_cache_init(&mp->local_cache[lcore_id], |
896 | cache_size); |
897 | } |
898 | |
899 | te->data = mp; |
900 | |
901 | rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK(&rte_eal_get_configuration()->mem_config->qlock)); |
902 | TAILQ_INSERT_TAIL(mempool_list, te, next)do { (te)->next.tqe_next = ((void*)0); (te)->next.tqe_prev = (mempool_list)->tqh_last; *(mempool_list)->tqh_last = (te); (mempool_list)->tqh_last = &(te)->next.tqe_next ; } while ( 0); |
903 | rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK(&rte_eal_get_configuration()->mem_config->qlock)); |
904 | rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK(&rte_eal_get_configuration()->mem_config->mplock)); |
905 | |
906 | return mp; |
907 | |
908 | exit_unlock: |
909 | rte_rwlock_write_unlock(RTE_EAL_MEMPOOL_RWLOCK(&rte_eal_get_configuration()->mem_config->mplock)); |
910 | rte_free(te); |
911 | rte_mempool_free(mp); |
912 | return NULL((void*)0); |
913 | } |
914 | |
915 | /* create the mempool */ |
916 | struct rte_mempool * |
917 | rte_mempool_create(const char *name, unsigned n, unsigned elt_size, |
918 | unsigned cache_size, unsigned private_data_size, |
919 | rte_mempool_ctor_t *mp_init, void *mp_init_arg, |
920 | rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, |
921 | int socket_id, unsigned flags) |
922 | { |
923 | int ret; |
924 | struct rte_mempool *mp; |
925 | |
926 | mp = rte_mempool_create_empty(name, n, elt_size, cache_size, |
927 | private_data_size, socket_id, flags); |
928 | if (mp == NULL((void*)0)) |
929 | return NULL((void*)0); |
930 | |
931 | /* |
932 | * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to |
933 | * set the correct index into the table of ops structs. |
934 | */ |
935 | if ((flags & MEMPOOL_F_SP_PUT0x0004) && (flags & MEMPOOL_F_SC_GET0x0008)) |
936 | ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL((void*)0)); |
937 | else if (flags & MEMPOOL_F_SP_PUT0x0004) |
938 | ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL((void*)0)); |
939 | else if (flags & MEMPOOL_F_SC_GET0x0008) |
940 | ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL((void*)0)); |
941 | else |
942 | ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL((void*)0)); |
943 | |
944 | if (ret) |
945 | goto fail; |
946 | |
947 | /* call the mempool priv initializer */ |
948 | if (mp_init) |
949 | mp_init(mp, mp_init_arg); |
950 | |
951 | if (rte_mempool_populate_default(mp) < 0) |
952 | goto fail; |
953 | |
954 | /* call the object initializers */ |
955 | if (obj_init) |
956 | rte_mempool_obj_iter(mp, obj_init, obj_init_arg); |
957 | |
958 | return mp; |
959 | |
960 | fail: |
961 | rte_mempool_free(mp); |
962 | return NULL((void*)0); |
963 | } |
964 | |
965 | /* Return the number of entries in the mempool */ |
966 | unsigned int |
967 | rte_mempool_avail_count(const struct rte_mempool *mp) |
968 | { |
969 | unsigned count; |
970 | unsigned lcore_id; |
971 | |
972 | count = rte_mempool_ops_get_count(mp); |
973 | |
974 | if (mp->cache_size == 0) |
975 | return count; |
976 | |
977 | for (lcore_id = 0; lcore_id < RTE_MAX_LCORE128; lcore_id++) |
978 | count += mp->local_cache[lcore_id].len; |
979 | |
980 | /* |
981 | * due to race condition (access to len is not locked), the |
982 | * total can be greater than size... so fix the result |
983 | */ |
984 | if (count > mp->size) |
985 | return mp->size; |
986 | return count; |
987 | } |
988 | |
989 | /* return the number of entries allocated from the mempool */ |
990 | unsigned int |
991 | rte_mempool_in_use_count(const struct rte_mempool *mp) |
992 | { |
993 | return mp->size - rte_mempool_avail_count(mp); |
994 | } |
995 | |
996 | /* dump the cache status */ |
997 | static unsigned |
998 | rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp) |
999 | { |
1000 | unsigned lcore_id; |
1001 | unsigned count = 0; |
1002 | unsigned cache_count; |
1003 | |
1004 | fprintf(f, " internal cache infos:\n"); |
1005 | fprintf(f, " cache_size=%"PRIu32"u""\n", mp->cache_size); |
1006 | |
1007 | if (mp->cache_size == 0) |
1008 | return count; |
1009 | |
1010 | for (lcore_id = 0; lcore_id < RTE_MAX_LCORE128; lcore_id++) { |
1011 | cache_count = mp->local_cache[lcore_id].len; |
1012 | fprintf(f, " cache_count[%u]=%"PRIu32"u""\n", |
1013 | lcore_id, cache_count); |
1014 | count += cache_count; |
1015 | } |
1016 | fprintf(f, " total_cache_count=%u\n", count); |
1017 | return count; |
1018 | } |
1019 | |
1020 | #ifndef __INTEL_COMPILER |
1021 | #pragma GCC diagnostic ignored "-Wcast-qual" |
1022 | #endif |
1023 | |
1024 | /* check and update cookies or panic (internal) */ |
1025 | void rte_mempool_check_cookies(const struct rte_mempool *mp, |
1026 | void * const *obj_table_const, unsigned n, int free) |
1027 | { |
1028 | #ifdef RTE_LIBRTE_MEMPOOL_DEBUG |
1029 | struct rte_mempool_objhdr *hdr; |
1030 | struct rte_mempool_objtlr *tlr; |
1031 | uint64_t cookie; |
1032 | void *tmp; |
1033 | void *obj; |
1034 | void **obj_table; |
1035 | |
1036 | /* Force to drop the "const" attribute. This is done only when |
1037 | * DEBUG is enabled */ |
1038 | tmp = (void *) obj_table_const; |
1039 | obj_table = tmp; |
1040 | |
1041 | while (n--) { |
1042 | obj = obj_table[n]; |
1043 | |
1044 | if (rte_mempool_from_obj(obj) != mp) |
1045 | rte_panic("MEMPOOL: object is owned by another "__rte_panic(__func__, "MEMPOOL: object is owned by another " "mempool\n" "%.0s", "dummy") |
1046 | "mempool\n")__rte_panic(__func__, "MEMPOOL: object is owned by another " "mempool\n" "%.0s", "dummy"); |
1047 | |
1048 | hdr = __mempool_get_header(obj); |
1049 | cookie = hdr->cookie; |
1050 | |
1051 | if (free == 0) { |
1052 | if (cookie != RTE_MEMPOOL_HEADER_COOKIE10xbadbadbadadd2e55ULL) { |
1053 | RTE_LOG(CRIT, MEMPOOL,rte_log(3U, 3, "MEMPOOL" ": " "obj=%p, mempool=%p, cookie=%" "l" "x" "\n", obj, (const void *) mp, cookie) |
1054 | "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",rte_log(3U, 3, "MEMPOOL" ": " "obj=%p, mempool=%p, cookie=%" "l" "x" "\n", obj, (const void *) mp, cookie) |
1055 | obj, (const void *) mp, cookie)rte_log(3U, 3, "MEMPOOL" ": " "obj=%p, mempool=%p, cookie=%" "l" "x" "\n", obj, (const void *) mp, cookie); |
1056 | rte_panic("MEMPOOL: bad header cookie (put)\n")__rte_panic(__func__, "MEMPOOL: bad header cookie (put)\n" "%.0s" , "dummy"); |
1057 | } |
1058 | hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE20xf2eef2eedadd2e55ULL; |
1059 | } else if (free == 1) { |
1060 | if (cookie != RTE_MEMPOOL_HEADER_COOKIE20xf2eef2eedadd2e55ULL) { |
1061 | RTE_LOG(CRIT, MEMPOOL,rte_log(3U, 3, "MEMPOOL" ": " "obj=%p, mempool=%p, cookie=%" "l" "x" "\n", obj, (const void *) mp, cookie) |
1062 | "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",rte_log(3U, 3, "MEMPOOL" ": " "obj=%p, mempool=%p, cookie=%" "l" "x" "\n", obj, (const void *) mp, cookie) |
1063 | obj, (const void *) mp, cookie)rte_log(3U, 3, "MEMPOOL" ": " "obj=%p, mempool=%p, cookie=%" "l" "x" "\n", obj, (const void *) mp, cookie); |
1064 | rte_panic("MEMPOOL: bad header cookie (get)\n")__rte_panic(__func__, "MEMPOOL: bad header cookie (get)\n" "%.0s" , "dummy"); |
1065 | } |
1066 | hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE10xbadbadbadadd2e55ULL; |
1067 | } else if (free == 2) { |
1068 | if (cookie != RTE_MEMPOOL_HEADER_COOKIE10xbadbadbadadd2e55ULL && |
1069 | cookie != RTE_MEMPOOL_HEADER_COOKIE20xf2eef2eedadd2e55ULL) { |
1070 | RTE_LOG(CRIT, MEMPOOL,rte_log(3U, 3, "MEMPOOL" ": " "obj=%p, mempool=%p, cookie=%" "l" "x" "\n", obj, (const void *) mp, cookie) |
1071 | "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",rte_log(3U, 3, "MEMPOOL" ": " "obj=%p, mempool=%p, cookie=%" "l" "x" "\n", obj, (const void *) mp, cookie) |
1072 | obj, (const void *) mp, cookie)rte_log(3U, 3, "MEMPOOL" ": " "obj=%p, mempool=%p, cookie=%" "l" "x" "\n", obj, (const void *) mp, cookie); |
1073 | rte_panic("MEMPOOL: bad header cookie (audit)\n")__rte_panic(__func__, "MEMPOOL: bad header cookie (audit)\n" "%.0s" , "dummy"); |
1074 | } |
1075 | } |
1076 | tlr = __mempool_get_trailer(obj); |
1077 | cookie = tlr->cookie; |
1078 | if (cookie != RTE_MEMPOOL_TRAILER_COOKIE0xadd2e55badbadbadULL) { |
1079 | RTE_LOG(CRIT, MEMPOOL,rte_log(3U, 3, "MEMPOOL" ": " "obj=%p, mempool=%p, cookie=%" "l" "x" "\n", obj, (const void *) mp, cookie) |
1080 | "obj=%p, mempool=%p, cookie=%" PRIx64 "\n",rte_log(3U, 3, "MEMPOOL" ": " "obj=%p, mempool=%p, cookie=%" "l" "x" "\n", obj, (const void *) mp, cookie) |
1081 | obj, (const void *) mp, cookie)rte_log(3U, 3, "MEMPOOL" ": " "obj=%p, mempool=%p, cookie=%" "l" "x" "\n", obj, (const void *) mp, cookie); |
1082 | rte_panic("MEMPOOL: bad trailer cookie\n")__rte_panic(__func__, "MEMPOOL: bad trailer cookie\n" "%.0s", "dummy"); |
1083 | } |
1084 | } |
1085 | #else |
1086 | RTE_SET_USED(mp)(void)(mp); |
1087 | RTE_SET_USED(obj_table_const)(void)(obj_table_const); |
1088 | RTE_SET_USED(n)(void)(n); |
1089 | RTE_SET_USED(free)(void)(free); |
1090 | #endif |
1091 | } |
1092 | |
1093 | void |
1094 | rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp, |
1095 | void * const *first_obj_table_const, unsigned int n, int free) |
1096 | { |
1097 | #ifdef RTE_LIBRTE_MEMPOOL_DEBUG |
1098 | struct rte_mempool_info info; |
1099 | const size_t total_elt_sz = |
1100 | mp->header_size + mp->elt_size + mp->trailer_size; |
1101 | unsigned int i, j; |
1102 | |
1103 | rte_mempool_ops_get_info(mp, &info); |
1104 | |
1105 | for (i = 0; i < n; ++i) { |
1106 | void *first_obj = first_obj_table_const[i]; |
1107 | |
1108 | for (j = 0; j < info.contig_block_size; ++j) { |
1109 | void *obj; |
1110 | |
1111 | obj = (void *)((uintptr_t)first_obj + j * total_elt_sz); |
1112 | rte_mempool_check_cookies(mp, &obj, 1, free); |
1113 | } |
1114 | } |
1115 | #else |
1116 | RTE_SET_USED(mp)(void)(mp); |
1117 | RTE_SET_USED(first_obj_table_const)(void)(first_obj_table_const); |
1118 | RTE_SET_USED(n)(void)(n); |
1119 | RTE_SET_USED(free)(void)(free); |
1120 | #endif |
1121 | } |
1122 | |
1123 | #ifdef RTE_LIBRTE_MEMPOOL_DEBUG |
1124 | static void |
1125 | mempool_obj_audit(struct rte_mempool *mp, __rte_unused__attribute__((__unused__)) void *opaque, |
1126 | void *obj, __rte_unused__attribute__((__unused__)) unsigned idx) |
1127 | { |
1128 | __mempool_check_cookies(mp, &obj, 1, 2)do {} while(0); |
1129 | } |
1130 | |
1131 | static void |
1132 | mempool_audit_cookies(struct rte_mempool *mp)do {} while(0) |
1133 | { |
1134 | unsigned num; |
1135 | |
1136 | num = rte_mempool_obj_iter(mp, mempool_obj_audit, NULL((void*)0)); |
1137 | if (num != mp->size) { |
1138 | rte_panic("rte_mempool_obj_iter(mempool=%p, size=%u) "__rte_panic(__func__, "rte_mempool_obj_iter(mempool=%p, size=%u) " "iterated only over %u elements\n" "%.0s", mp, mp->size, num , "dummy") |
1139 | "iterated only over %u elements\n",__rte_panic(__func__, "rte_mempool_obj_iter(mempool=%p, size=%u) " "iterated only over %u elements\n" "%.0s", mp, mp->size, num , "dummy") |
1140 | mp, mp->size, num)__rte_panic(__func__, "rte_mempool_obj_iter(mempool=%p, size=%u) " "iterated only over %u elements\n" "%.0s", mp, mp->size, num , "dummy"); |
1141 | } |
1142 | } |
1143 | #else |
1144 | #define mempool_audit_cookies(mp)do {} while(0) do {} while(0) |
1145 | #endif |
1146 | |
1147 | #ifndef __INTEL_COMPILER |
1148 | #pragma GCC diagnostic error "-Wcast-qual" |
1149 | #endif |
1150 | |
1151 | /* check cookies before and after objects */ |
1152 | static void |
1153 | mempool_audit_cache(const struct rte_mempool *mp) |
1154 | { |
1155 | /* check cache size consistency */ |
1156 | unsigned lcore_id; |
1157 | |
1158 | if (mp->cache_size == 0) |
1159 | return; |
1160 | |
1161 | for (lcore_id = 0; lcore_id < RTE_MAX_LCORE128; lcore_id++) { |
1162 | const struct rte_mempool_cache *cache; |
1163 | cache = &mp->local_cache[lcore_id]; |
1164 | if (cache->len > cache->flushthresh) { |
1165 | RTE_LOG(CRIT, MEMPOOL, "badness on cache[%u]\n",rte_log(3U, 3, "MEMPOOL" ": " "badness on cache[%u]\n", lcore_id ) |
1166 | lcore_id)rte_log(3U, 3, "MEMPOOL" ": " "badness on cache[%u]\n", lcore_id ); |
1167 | rte_panic("MEMPOOL: invalid cache len\n")__rte_panic(__func__, "MEMPOOL: invalid cache len\n" "%.0s", "dummy" ); |
1168 | } |
1169 | } |
1170 | } |
1171 | |
1172 | /* check the consistency of mempool (size, cookies, ...) */ |
1173 | void |
1174 | rte_mempool_audit(struct rte_mempool *mp) |
1175 | { |
1176 | mempool_audit_cache(mp); |
1177 | mempool_audit_cookies(mp)do {} while(0); |
1178 | |
1179 | /* For case where mempool DEBUG is not set, and cache size is 0 */ |
1180 | RTE_SET_USED(mp)(void)(mp); |
1181 | } |
1182 | |
1183 | /* dump the status of the mempool on the console */ |
1184 | void |
1185 | rte_mempool_dump(FILE *f, struct rte_mempool *mp) |
1186 | { |
1187 | #ifdef RTE_LIBRTE_MEMPOOL_DEBUG |
1188 | struct rte_mempool_info info; |
1189 | struct rte_mempool_debug_stats sum; |
1190 | unsigned lcore_id; |
1191 | #endif |
1192 | struct rte_mempool_memhdr *memhdr; |
1193 | unsigned common_count; |
1194 | unsigned cache_count; |
1195 | size_t mem_len = 0; |
1196 | |
1197 | RTE_ASSERT(f != NULL)do {} while (0); |
1198 | RTE_ASSERT(mp != NULL)do {} while (0); |
1199 | |
1200 | fprintf(f, "mempool <%s>@%p\n", mp->name, mp); |
1201 | fprintf(f, " flags=%x\n", mp->flags); |
1202 | fprintf(f, " pool=%p\n", mp->pool_data); |
1203 | fprintf(f, " iova=0x%" PRIx64"l" "x" "\n", mp->mz->iova); |
1204 | fprintf(f, " nb_mem_chunks=%u\n", mp->nb_mem_chunks); |
1205 | fprintf(f, " size=%"PRIu32"u""\n", mp->size); |
1206 | fprintf(f, " populated_size=%"PRIu32"u""\n", mp->populated_size); |
1207 | fprintf(f, " header_size=%"PRIu32"u""\n", mp->header_size); |
1208 | fprintf(f, " elt_size=%"PRIu32"u""\n", mp->elt_size); |
1209 | fprintf(f, " trailer_size=%"PRIu32"u""\n", mp->trailer_size); |
1210 | fprintf(f, " total_obj_size=%"PRIu32"u""\n", |
1211 | mp->header_size + mp->elt_size + mp->trailer_size); |
1212 | |
1213 | fprintf(f, " private_data_size=%"PRIu32"u""\n", mp->private_data_size); |
1214 | |
1215 | STAILQ_FOREACH(memhdr, &mp->mem_list, next)for ((memhdr) = ((&mp->mem_list)->stqh_first); (memhdr ); (memhdr) = ((memhdr)->next.stqe_next)) |
1216 | mem_len += memhdr->len; |
1217 | if (mem_len != 0) { |
1218 | fprintf(f, " avg bytes/object=%#Lf\n", |
1219 | (long double)mem_len / mp->size); |
1220 | } |
1221 | |
1222 | cache_count = rte_mempool_dump_cache(f, mp); |
1223 | common_count = rte_mempool_ops_get_count(mp); |
1224 | if ((cache_count + common_count) > mp->size) |
1225 | common_count = mp->size - cache_count; |
1226 | fprintf(f, " common_pool_count=%u\n", common_count); |
1227 | |
1228 | /* sum and dump statistics */ |
1229 | #ifdef RTE_LIBRTE_MEMPOOL_DEBUG |
1230 | rte_mempool_ops_get_info(mp, &info); |
1231 | memset(&sum, 0, sizeof(sum)); |
1232 | for (lcore_id = 0; lcore_id < RTE_MAX_LCORE128; lcore_id++) { |
1233 | sum.put_bulk += mp->stats[lcore_id].put_bulk; |
1234 | sum.put_objs += mp->stats[lcore_id].put_objs; |
1235 | sum.get_success_bulk += mp->stats[lcore_id].get_success_bulk; |
1236 | sum.get_success_objs += mp->stats[lcore_id].get_success_objs; |
1237 | sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk; |
1238 | sum.get_fail_objs += mp->stats[lcore_id].get_fail_objs; |
1239 | sum.get_success_blks += mp->stats[lcore_id].get_success_blks; |
1240 | sum.get_fail_blks += mp->stats[lcore_id].get_fail_blks; |
1241 | } |
1242 | fprintf(f, " stats:\n"); |
1243 | fprintf(f, " put_bulk=%"PRIu64"l" "u""\n", sum.put_bulk); |
1244 | fprintf(f, " put_objs=%"PRIu64"l" "u""\n", sum.put_objs); |
1245 | fprintf(f, " get_success_bulk=%"PRIu64"l" "u""\n", sum.get_success_bulk); |
1246 | fprintf(f, " get_success_objs=%"PRIu64"l" "u""\n", sum.get_success_objs); |
1247 | fprintf(f, " get_fail_bulk=%"PRIu64"l" "u""\n", sum.get_fail_bulk); |
1248 | fprintf(f, " get_fail_objs=%"PRIu64"l" "u""\n", sum.get_fail_objs); |
1249 | if (info.contig_block_size > 0) { |
1250 | fprintf(f, " get_success_blks=%"PRIu64"l" "u""\n", |
1251 | sum.get_success_blks); |
1252 | fprintf(f, " get_fail_blks=%"PRIu64"l" "u""\n", sum.get_fail_blks); |
1253 | } |
1254 | #else |
1255 | fprintf(f, " no statistics available\n"); |
1256 | #endif |
1257 | |
1258 | rte_mempool_audit(mp); |
1259 | } |
1260 | |
1261 | /* dump the status of all mempools on the console */ |
1262 | void |
1263 | rte_mempool_list_dump(FILE *f) |
1264 | { |
1265 | struct rte_mempool *mp = NULL((void*)0); |
1266 | struct rte_tailq_entry *te; |
1267 | struct rte_mempool_list *mempool_list; |
1268 | |
1269 | mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list)(struct rte_mempool_list *)&(rte_mempool_tailq.head)-> tailq_head; |
1270 | |
1271 | rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK(&rte_eal_get_configuration()->mem_config->mplock)); |
1272 | |
1273 | TAILQ_FOREACH(te, mempool_list, next)for ((te) = ((mempool_list)->tqh_first); (te); (te) = ((te )->next.tqe_next)) { |
1274 | mp = (struct rte_mempool *) te->data; |
1275 | rte_mempool_dump(f, mp); |
1276 | } |
1277 | |
1278 | rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK(&rte_eal_get_configuration()->mem_config->mplock)); |
1279 | } |
1280 | |
1281 | /* search a mempool from its name */ |
1282 | struct rte_mempool * |
1283 | rte_mempool_lookup(const char *name) |
1284 | { |
1285 | struct rte_mempool *mp = NULL((void*)0); |
1286 | struct rte_tailq_entry *te; |
1287 | struct rte_mempool_list *mempool_list; |
1288 | |
1289 | mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list)(struct rte_mempool_list *)&(rte_mempool_tailq.head)-> tailq_head; |
1290 | |
1291 | rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK(&rte_eal_get_configuration()->mem_config->mplock)); |
1292 | |
1293 | TAILQ_FOREACH(te, mempool_list, next)for ((te) = ((mempool_list)->tqh_first); (te); (te) = ((te )->next.tqe_next)) { |
1294 | mp = (struct rte_mempool *) te->data; |
1295 | if (strncmp(name, mp->name, RTE_MEMPOOL_NAMESIZE)(__extension__ (__builtin_constant_p (((32 - sizeof("RG_") + 1 ) - sizeof("MP_") + 1)) && ((__builtin_constant_p (name ) && strlen (name) < ((size_t) (((32 - sizeof("RG_" ) + 1) - sizeof("MP_") + 1)))) || (__builtin_constant_p (mp-> name) && strlen (mp->name) < ((size_t) (((32 - sizeof ("RG_") + 1) - sizeof("MP_") + 1))))) ? __extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p (name) && __builtin_constant_p (mp->name) && (__s1_len = __builtin_strlen (name) , __s2_len = __builtin_strlen (mp->name), (!((size_t)(const void *)((name) + 1) - (size_t)(const void *)(name) == 1) || __s1_len >= 4) && (!((size_t)(const void *)((mp->name) + 1) - (size_t)(const void *)(mp->name) == 1) || __s2_len >= 4)) ? __builtin_strcmp (name, mp->name) : (__builtin_constant_p (name) && ((size_t)(const void *)((name) + 1) - (size_t )(const void *)(name) == 1) && (__s1_len = __builtin_strlen (name), __s1_len < 4) ? (__builtin_constant_p (mp->name ) && ((size_t)(const void *)((mp->name) + 1) - (size_t )(const void *)(mp->name) == 1) ? __builtin_strcmp (name, mp ->name) : (__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) (mp->name); int __result = (((const unsigned char *) (const char *) (name))[0] - __s2[0 ]); if (__s1_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) (name))[1] - __s2[1 ]); if (__s1_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) (name))[2] - __s2[2 ]); if (__s1_len > 2 && __result == 0) __result = ( ((const unsigned char *) (const char *) (name))[3] - __s2[3]) ; } } __result; }))) : (__builtin_constant_p (mp->name) && ((size_t)(const void *)((mp->name) + 1) - (size_t)(const void *)(mp->name) == 1) && (__s2_len = __builtin_strlen (mp->name), __s2_len < 4) ? (__builtin_constant_p (name ) && ((size_t)(const void *)((name) + 1) - (size_t)(const void *)(name) == 1) ? __builtin_strcmp (name, mp->name) : (- (__extension__ ({ const unsigned char *__s2 = (const unsigned char *) (const char *) (name); int __result = (((const unsigned char *) (const char *) (mp->name))[0] - __s2[0]); if (__s2_len > 0 && __result == 0) { __result = (((const unsigned char *) (const char *) (mp->name))[1] - __s2[1]); if (__s2_len > 1 && __result == 0) { __result = (((const unsigned char *) (const char *) (mp->name))[2] - __s2[2]); if (__s2_len > 2 && __result == 0) __result = (((const unsigned char *) (const char *) (mp->name))[3] - __s2[3]); } } __result ; })))) : __builtin_strcmp (name, mp->name)))); }) : strncmp (name, mp->name, ((32 - sizeof("RG_") + 1) - sizeof("MP_" ) + 1)))) == 0) |
1296 | break; |
1297 | } |
1298 | |
1299 | rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK(&rte_eal_get_configuration()->mem_config->mplock)); |
1300 | |
1301 | if (te == NULL((void*)0)) { |
1302 | rte_errno(per_lcore__rte_errno) = ENOENT2; |
1303 | return NULL((void*)0); |
1304 | } |
1305 | |
1306 | return mp; |
1307 | } |
1308 | |
1309 | void rte_mempool_walk(void (*func)(struct rte_mempool *, void *), |
1310 | void *arg) |
1311 | { |
1312 | struct rte_tailq_entry *te = NULL((void*)0); |
1313 | struct rte_mempool_list *mempool_list; |
1314 | void *tmp_te; |
1315 | |
1316 | mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list)(struct rte_mempool_list *)&(rte_mempool_tailq.head)-> tailq_head; |
1317 | |
1318 | rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK(&rte_eal_get_configuration()->mem_config->mplock)); |
1319 | |
1320 | TAILQ_FOREACH_SAFE(te, mempool_list, next, tmp_te)for ((te) = (((mempool_list))->tqh_first); (te) && ((tmp_te) = (((te))->next.tqe_next), 1); (te) = (tmp_te)) { |
1321 | (*func)((struct rte_mempool *) te->data, arg); |
1322 | } |
1323 | |
1324 | rte_rwlock_read_unlock(RTE_EAL_MEMPOOL_RWLOCK(&rte_eal_get_configuration()->mem_config->mplock)); |
1325 | } |