Bug Summary

File:home/bhubbard/working/src/ceph/src/spdk/dpdk/lib/librte_eal/common/rte_service.c
Warning:line 785, column 3
Value stored to 'all_cycles' is never read

Annotated Source Code

[?] Use j/k keys for keyboard navigation

1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4
5#include <stdio.h>
6#include <unistd.h>
7#include <inttypes.h>
8#include <limits.h>
9#include <string.h>
10
11#include <rte_compat.h>
12#include <rte_service.h>
13#include "include/rte_service_component.h"
14
15#include <rte_eal.h>
16#include <rte_lcore.h>
17#include <rte_common.h>
18#include <rte_debug.h>
19#include <rte_cycles.h>
20#include <rte_atomic.h>
21#include <rte_memory.h>
22#include <rte_malloc.h>
23
24#define RTE_SERVICE_NUM_MAX64 64
25
26#define SERVICE_F_REGISTERED(1 << 0) (1 << 0)
27#define SERVICE_F_STATS_ENABLED(1 << 1) (1 << 1)
28#define SERVICE_F_START_CHECK(1 << 2) (1 << 2)
29
30/* runstates for services and lcores, denoting if they are active or not */
31#define RUNSTATE_STOPPED0 0
32#define RUNSTATE_RUNNING1 1
33
34/* internal representation of a service */
35struct rte_service_spec_impl {
36 /* public part of the struct */
37 struct rte_service_spec spec;
38
39 /* atomic lock that when set indicates a service core is currently
40 * running this service callback. When not set, a core may take the
41 * lock and then run the service callback.
42 */
43 rte_atomic32_t execute_lock;
44
45 /* API set/get-able variables */
46 int8_t app_runstate;
47 int8_t comp_runstate;
48 uint8_t internal_flags;
49
50 /* per service statistics */
51 rte_atomic32_t num_mapped_cores;
52 uint64_t calls;
53 uint64_t cycles_spent;
54 uint8_t active_on_lcore[RTE_MAX_LCORE128];
55} __rte_cache_aligned__attribute__((__aligned__(64)));
56
57/* the internal values of a service core */
58struct core_state {
59 /* map of services IDs are run on this core */
60 uint64_t service_mask;
61 uint8_t runstate; /* running or stopped */
62 uint8_t is_service_core; /* set if core is currently a service core */
63
64 uint64_t loops;
65 uint64_t calls_per_service[RTE_SERVICE_NUM_MAX64];
66} __rte_cache_aligned__attribute__((__aligned__(64)));
67
68static uint32_t rte_service_count;
69static struct rte_service_spec_impl *rte_services;
70static struct core_state *lcore_states;
71static uint32_t rte_service_library_initialized;
72
73int32_t rte_service_init(void)
74{
75 if (rte_service_library_initialized) {
76 printf("service library init() called, init flag %d\n",
77 rte_service_library_initialized);
78 return -EALREADY114;
79 }
80
81 rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX64,
82 sizeof(struct rte_service_spec_impl),
83 RTE_CACHE_LINE_SIZE64);
84 if (!rte_services) {
85 printf("error allocating rte services array\n");
86 goto fail_mem;
87 }
88
89 lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE128,
90 sizeof(struct core_state), RTE_CACHE_LINE_SIZE64);
91 if (!lcore_states) {
92 printf("error allocating core states array\n");
93 goto fail_mem;
94 }
95
96 int i;
97 int count = 0;
98 struct rte_config *cfg = rte_eal_get_configuration();
99 for (i = 0; i < RTE_MAX_LCORE128; i++) {
100 if (lcore_config[i].core_role == ROLE_SERVICE) {
101 if ((unsigned int)i == cfg->master_lcore)
102 continue;
103 rte_service_lcore_add(i);
104 count++;
105 }
106 }
107
108 rte_service_library_initialized = 1;
109 return 0;
110fail_mem:
111 if (rte_services)
112 rte_free(rte_services);
113 if (lcore_states)
114 rte_free(lcore_states);
115 return -ENOMEM12;
116}
117
118void
119rte_service_finalize(void)
120{
121 if (!rte_service_library_initialized)
122 return;
123
124 if (rte_services)
125 rte_free(rte_services);
126
127 if (lcore_states)
128 rte_free(lcore_states);
129
130 rte_service_library_initialized = 0;
131}
132
133/* returns 1 if service is registered and has not been unregistered
134 * Returns 0 if service never registered, or has been unregistered
135 */
136static inline int
137service_valid(uint32_t id)
138{
139 return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED(1 << 0));
140}
141
142/* validate ID and retrieve service pointer, or return error value */
143#define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval)do { if (id >= 64 || !service_valid(id)) return retval; service
= &rte_services[id]; } while (0)
do { \
144 if (id >= RTE_SERVICE_NUM_MAX64 || !service_valid(id)) \
145 return retval; \
146 service = &rte_services[id]; \
147} while (0)
148
149/* returns 1 if statistics should be collected for service
150 * Returns 0 if statistics should not be collected for service
151 */
152static inline int
153service_stats_enabled(struct rte_service_spec_impl *impl)
154{
155 return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED(1 << 1));
156}
157
158static inline int
159service_mt_safe(struct rte_service_spec_impl *s)
160{
161 return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE(1 << 0));
162}
163
164int32_t
165rte_service_set_stats_enable(uint32_t id, int32_t enabled)
166{
167 struct rte_service_spec_impl *s;
168 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0)do { if (id >= 64 || !service_valid(id)) return 0; s = &
rte_services[id]; } while (0)
;
169
170 if (enabled)
171 s->internal_flags |= SERVICE_F_STATS_ENABLED(1 << 1);
172 else
173 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED(1 << 1));
174
175 return 0;
176}
177
178int32_t
179rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
180{
181 struct rte_service_spec_impl *s;
182 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0)do { if (id >= 64 || !service_valid(id)) return 0; s = &
rte_services[id]; } while (0)
;
183
184 if (enabled)
185 s->internal_flags |= SERVICE_F_START_CHECK(1 << 2);
186 else
187 s->internal_flags &= ~(SERVICE_F_START_CHECK(1 << 2));
188
189 return 0;
190}
191
192uint32_t
193rte_service_get_count(void)
194{
195 return rte_service_count;
196}
197
198int32_t
199rte_service_get_by_name(const char *name, uint32_t *service_id)
200{
201 if (!service_id)
202 return -EINVAL22;
203
204 int i;
205 for (i = 0; i < RTE_SERVICE_NUM_MAX64; i++) {
206 if (service_valid(i) &&
207 strcmp(name, rte_services[i].spec.name)__extension__ ({ size_t __s1_len, __s2_len; (__builtin_constant_p
(name) && __builtin_constant_p (rte_services[i].spec
.name) && (__s1_len = __builtin_strlen (name), __s2_len
= __builtin_strlen (rte_services[i].spec.name), (!((size_t)(
const void *)((name) + 1) - (size_t)(const void *)(name) == 1
) || __s1_len >= 4) && (!((size_t)(const void *)((
rte_services[i].spec.name) + 1) - (size_t)(const void *)(rte_services
[i].spec.name) == 1) || __s2_len >= 4)) ? __builtin_strcmp
(name, rte_services[i].spec.name) : (__builtin_constant_p (name
) && ((size_t)(const void *)((name) + 1) - (size_t)(const
void *)(name) == 1) && (__s1_len = __builtin_strlen (
name), __s1_len < 4) ? (__builtin_constant_p (rte_services
[i].spec.name) && ((size_t)(const void *)((rte_services
[i].spec.name) + 1) - (size_t)(const void *)(rte_services[i].
spec.name) == 1) ? __builtin_strcmp (name, rte_services[i].spec
.name) : (__extension__ ({ const unsigned char *__s2 = (const
unsigned char *) (const char *) (rte_services[i].spec.name);
int __result = (((const unsigned char *) (const char *) (name
))[0] - __s2[0]); if (__s1_len > 0 && __result == 0
) { __result = (((const unsigned char *) (const char *) (name
))[1] - __s2[1]); if (__s1_len > 1 && __result == 0
) { __result = (((const unsigned char *) (const char *) (name
))[2] - __s2[2]); if (__s1_len > 2 && __result == 0
) __result = (((const unsigned char *) (const char *) (name))
[3] - __s2[3]); } } __result; }))) : (__builtin_constant_p (rte_services
[i].spec.name) && ((size_t)(const void *)((rte_services
[i].spec.name) + 1) - (size_t)(const void *)(rte_services[i].
spec.name) == 1) && (__s2_len = __builtin_strlen (rte_services
[i].spec.name), __s2_len < 4) ? (__builtin_constant_p (name
) && ((size_t)(const void *)((name) + 1) - (size_t)(const
void *)(name) == 1) ? __builtin_strcmp (name, rte_services[i
].spec.name) : (- (__extension__ ({ const unsigned char *__s2
= (const unsigned char *) (const char *) (name); int __result
= (((const unsigned char *) (const char *) (rte_services[i].
spec.name))[0] - __s2[0]); if (__s2_len > 0 && __result
== 0) { __result = (((const unsigned char *) (const char *) (
rte_services[i].spec.name))[1] - __s2[1]); if (__s2_len > 1
&& __result == 0) { __result = (((const unsigned char
*) (const char *) (rte_services[i].spec.name))[2] - __s2[2])
; if (__s2_len > 2 && __result == 0) __result = ((
(const unsigned char *) (const char *) (rte_services[i].spec.
name))[3] - __s2[3]); } } __result; })))) : __builtin_strcmp (
name, rte_services[i].spec.name)))); })
== 0) {
208 *service_id = i;
209 return 0;
210 }
211 }
212
213 return -ENODEV19;
214}
215
216const char *
217rte_service_get_name(uint32_t id)
218{
219 struct rte_service_spec_impl *s;
220 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0)do { if (id >= 64 || !service_valid(id)) return 0; s = &
rte_services[id]; } while (0)
;
221 return s->spec.name;
222}
223
224int32_t
225rte_service_probe_capability(uint32_t id, uint32_t capability)
226{
227 struct rte_service_spec_impl *s;
228 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL)do { if (id >= 64 || !service_valid(id)) return -22; s = &
rte_services[id]; } while (0)
;
229 return !!(s->spec.capabilities & capability);
230}
231
232int32_t
233rte_service_component_register(const struct rte_service_spec *spec,
234 uint32_t *id_ptr)
235{
236 uint32_t i;
237 int32_t free_slot = -1;
238
239 if (spec->callback == NULL((void*)0) || strlen(spec->name) == 0)
240 return -EINVAL22;
241
242 for (i = 0; i < RTE_SERVICE_NUM_MAX64; i++) {
243 if (!service_valid(i)) {
244 free_slot = i;
245 break;
246 }
247 }
248
249 if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX64))
250 return -ENOSPC28;
251
252 struct rte_service_spec_impl *s = &rte_services[free_slot];
253 s->spec = *spec;
254 s->internal_flags |= SERVICE_F_REGISTERED(1 << 0) | SERVICE_F_START_CHECK(1 << 2);
255
256 rte_smp_wmb()do { __asm__ volatile ("" : : : "memory"); } while(0);
257 rte_service_count++;
258
259 if (id_ptr)
260 *id_ptr = free_slot;
261
262 return 0;
263}
264
265int32_t
266rte_service_component_unregister(uint32_t id)
267{
268 uint32_t i;
269 struct rte_service_spec_impl *s;
270 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL)do { if (id >= 64 || !service_valid(id)) return -22; s = &
rte_services[id]; } while (0)
;
271
272 rte_service_count--;
273 rte_smp_wmb()do { __asm__ volatile ("" : : : "memory"); } while(0);
274
275 s->internal_flags &= ~(SERVICE_F_REGISTERED(1 << 0));
276
277 /* clear the run-bit in all cores */
278 for (i = 0; i < RTE_MAX_LCORE128; i++)
279 lcore_states[i].service_mask &= ~(UINT64_C(1)1UL << id);
280
281 memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
282
283 return 0;
284}
285
286int32_t
287rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
288{
289 struct rte_service_spec_impl *s;
290 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL)do { if (id >= 64 || !service_valid(id)) return -22; s = &
rte_services[id]; } while (0)
;
291
292 if (runstate)
293 s->comp_runstate = RUNSTATE_RUNNING1;
294 else
295 s->comp_runstate = RUNSTATE_STOPPED0;
296
297 rte_smp_wmb()do { __asm__ volatile ("" : : : "memory"); } while(0);
298 return 0;
299}
300
301int32_t
302rte_service_runstate_set(uint32_t id, uint32_t runstate)
303{
304 struct rte_service_spec_impl *s;
305 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL)do { if (id >= 64 || !service_valid(id)) return -22; s = &
rte_services[id]; } while (0)
;
306
307 if (runstate)
308 s->app_runstate = RUNSTATE_RUNNING1;
309 else
310 s->app_runstate = RUNSTATE_STOPPED0;
311
312 rte_smp_wmb()do { __asm__ volatile ("" : : : "memory"); } while(0);
313 return 0;
314}
315
316int32_t
317rte_service_runstate_get(uint32_t id)
318{
319 struct rte_service_spec_impl *s;
320 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL)do { if (id >= 64 || !service_valid(id)) return -22; s = &
rte_services[id]; } while (0)
;
321 rte_smp_rmb()do { __asm__ volatile ("" : : : "memory"); } while(0);
322
323 int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK(1 << 2));
324 int lcore_mapped = (rte_atomic32_read(&s->num_mapped_cores) > 0);
325
326 return (s->app_runstate == RUNSTATE_RUNNING1) &&
327 (s->comp_runstate == RUNSTATE_RUNNING1) &&
328 (check_disabled | lcore_mapped);
329}
330
331static inline void
332rte_service_runner_do_callback(struct rte_service_spec_impl *s,
333 struct core_state *cs, uint32_t service_idx)
334{
335 void *userdata = s->spec.callback_userdata;
336
337 if (service_stats_enabled(s)) {
338 uint64_t start = rte_rdtsc();
339 s->spec.callback(userdata);
340 uint64_t end = rte_rdtsc();
341 s->cycles_spent += end - start;
342 cs->calls_per_service[service_idx]++;
343 s->calls++;
344 } else
345 s->spec.callback(userdata);
346}
347
348
349static inline int32_t
350service_run(uint32_t i, int lcore, struct core_state *cs, uint64_t service_mask)
351{
352 if (!service_valid(i))
353 return -EINVAL22;
354 struct rte_service_spec_impl *s = &rte_services[i];
355 if (s->comp_runstate != RUNSTATE_RUNNING1 ||
356 s->app_runstate != RUNSTATE_RUNNING1 ||
357 !(service_mask & (UINT64_C(1)1UL << i))) {
358 s->active_on_lcore[lcore] = 0;
359 return -ENOEXEC8;
360 }
361
362 s->active_on_lcore[lcore] = 1;
363
364 /* check do we need cmpset, if MT safe or <= 1 core
365 * mapped, atomic ops are not required.
366 */
367 const int use_atomics = (service_mt_safe(s) == 0) &&
368 (rte_atomic32_read(&s->num_mapped_cores) > 1);
369 if (use_atomics) {
370 if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
371 return -EBUSY16;
372
373 rte_service_runner_do_callback(s, cs, i);
374 rte_atomic32_clear(&s->execute_lock);
375 } else
376 rte_service_runner_do_callback(s, cs, i);
377
378 return 0;
379}
380
381int32_t __rte_experimental__attribute__((section(".text.experimental")))
382rte_service_may_be_active(uint32_t id)
383{
384 uint32_t ids[RTE_MAX_LCORE128] = {0};
385 struct rte_service_spec_impl *s = &rte_services[id];
386 int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE128);
387 int i;
388
389 if (!service_valid(id))
390 return -EINVAL22;
391
392 for (i = 0; i < lcore_count; i++) {
393 if (s->active_on_lcore[ids[i]])
394 return 1;
395 }
396
397 return 0;
398}
399
400int32_t rte_service_run_iter_on_app_lcore(uint32_t id,
401 uint32_t serialize_mt_unsafe)
402{
403 /* run service on calling core, using all-ones as the service mask */
404 if (!service_valid(id))
405 return -EINVAL22;
406
407 struct core_state *cs = &lcore_states[rte_lcore_id()];
408 struct rte_service_spec_impl *s = &rte_services[id];
409
410 /* Atomically add this core to the mapped cores first, then examine if
411 * we can run the service. This avoids a race condition between
412 * checking the value, and atomically adding to the mapped count.
413 */
414 if (serialize_mt_unsafe)
415 rte_atomic32_inc(&s->num_mapped_cores);
416
417 if (service_mt_safe(s) == 0 &&
418 rte_atomic32_read(&s->num_mapped_cores) > 1) {
419 if (serialize_mt_unsafe)
420 rte_atomic32_dec(&s->num_mapped_cores);
421 return -EBUSY16;
422 }
423
424 int ret = service_run(id, rte_lcore_id(), cs, UINT64_MAX(18446744073709551615UL));
425
426 if (serialize_mt_unsafe)
427 rte_atomic32_dec(&s->num_mapped_cores);
428
429 return ret;
430}
431
432static int32_t
433rte_service_runner_func(void *arg)
434{
435 RTE_SET_USED(arg)(void)(arg);
436 uint32_t i;
437 const int lcore = rte_lcore_id();
438 struct core_state *cs = &lcore_states[lcore];
439
440 while (lcore_states[lcore].runstate == RUNSTATE_RUNNING1) {
441 const uint64_t service_mask = cs->service_mask;
442
443 for (i = 0; i < RTE_SERVICE_NUM_MAX64; i++) {
444 /* return value ignored as no change to code flow */
445 service_run(i, lcore, cs, service_mask);
446 }
447
448 cs->loops++;
449
450 rte_smp_rmb()do { __asm__ volatile ("" : : : "memory"); } while(0);
451 }
452
453 lcore_config[lcore].state = WAIT;
454
455 return 0;
456}
457
458int32_t
459rte_service_lcore_count(void)
460{
461 int32_t count = 0;
462 uint32_t i;
463 for (i = 0; i < RTE_MAX_LCORE128; i++)
464 count += lcore_states[i].is_service_core;
465 return count;
466}
467
468int32_t
469rte_service_lcore_list(uint32_t array[], uint32_t n)
470{
471 uint32_t count = rte_service_lcore_count();
472 if (count > n)
473 return -ENOMEM12;
474
475 if (!array)
476 return -EINVAL22;
477
478 uint32_t i;
479 uint32_t idx = 0;
480 for (i = 0; i < RTE_MAX_LCORE128; i++) {
481 struct core_state *cs = &lcore_states[i];
482 if (cs->is_service_core) {
483 array[idx] = i;
484 idx++;
485 }
486 }
487
488 return count;
489}
490
491int32_t
492rte_service_lcore_count_services(uint32_t lcore)
493{
494 if (lcore >= RTE_MAX_LCORE128)
495 return -EINVAL22;
496
497 struct core_state *cs = &lcore_states[lcore];
498 if (!cs->is_service_core)
499 return -ENOTSUP95;
500
501 return __builtin_popcountll(cs->service_mask);
502}
503
504int32_t
505rte_service_start_with_defaults(void)
506{
507 /* create a default mapping from cores to services, then start the
508 * services to make them transparent to unaware applications.
509 */
510 uint32_t i;
511 int ret;
512 uint32_t count = rte_service_get_count();
513
514 int32_t lcore_iter = 0;
515 uint32_t ids[RTE_MAX_LCORE128] = {0};
516 int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE128);
517
518 if (lcore_count == 0)
519 return -ENOTSUP95;
520
521 for (i = 0; (int)i < lcore_count; i++)
522 rte_service_lcore_start(ids[i]);
523
524 for (i = 0; i < count; i++) {
525 /* do 1:1 core mapping here, with each service getting
526 * assigned a single core by default. Adding multiple services
527 * should multiplex to a single core, or 1:1 if there are the
528 * same amount of services as service-cores
529 */
530 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
531 if (ret)
532 return -ENODEV19;
533
534 lcore_iter++;
535 if (lcore_iter >= lcore_count)
536 lcore_iter = 0;
537
538 ret = rte_service_runstate_set(i, 1);
539 if (ret)
540 return -ENOEXEC8;
541 }
542
543 return 0;
544}
545
546static int32_t
547service_update(struct rte_service_spec *service, uint32_t lcore,
548 uint32_t *set, uint32_t *enabled)
549{
550 uint32_t i;
551 int32_t sid = -1;
552
553 for (i = 0; i < RTE_SERVICE_NUM_MAX64; i++) {
554 if ((struct rte_service_spec *)&rte_services[i] == service &&
555 service_valid(i)) {
556 sid = i;
557 break;
558 }
559 }
560
561 if (sid == -1 || lcore >= RTE_MAX_LCORE128)
562 return -EINVAL22;
563
564 if (!lcore_states[lcore].is_service_core)
565 return -EINVAL22;
566
567 uint64_t sid_mask = UINT64_C(1)1UL << sid;
568 if (set) {
569 uint64_t lcore_mapped = lcore_states[lcore].service_mask &
570 sid_mask;
571
572 if (*set && !lcore_mapped) {
573 lcore_states[lcore].service_mask |= sid_mask;
574 rte_atomic32_inc(&rte_services[sid].num_mapped_cores);
575 }
576 if (!*set && lcore_mapped) {
577 lcore_states[lcore].service_mask &= ~(sid_mask);
578 rte_atomic32_dec(&rte_services[sid].num_mapped_cores);
579 }
580 }
581
582 if (enabled)
583 *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
584
585 rte_smp_wmb()do { __asm__ volatile ("" : : : "memory"); } while(0);
586
587 return 0;
588}
589
590int32_t
591rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
592{
593 struct rte_service_spec_impl *s;
594 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL)do { if (id >= 64 || !service_valid(id)) return -22; s = &
rte_services[id]; } while (0)
;
595 uint32_t on = enabled > 0;
596 return service_update(&s->spec, lcore, &on, 0);
597}
598
599int32_t
600rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
601{
602 struct rte_service_spec_impl *s;
603 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL)do { if (id >= 64 || !service_valid(id)) return -22; s = &
rte_services[id]; } while (0)
;
604 uint32_t enabled;
605 int ret = service_update(&s->spec, lcore, 0, &enabled);
606 if (ret == 0)
607 return enabled;
608 return ret;
609}
610
611static void
612set_lcore_state(uint32_t lcore, int32_t state)
613{
614 /* mark core state in hugepage backed config */
615 struct rte_config *cfg = rte_eal_get_configuration();
616 cfg->lcore_role[lcore] = state;
617
618 /* mark state in process local lcore_config */
619 lcore_config[lcore].core_role = state;
620
621 /* update per-lcore optimized state tracking */
622 lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
623}
624
625int32_t
626rte_service_lcore_reset_all(void)
627{
628 /* loop over cores, reset all to mask 0 */
629 uint32_t i;
630 for (i = 0; i < RTE_MAX_LCORE128; i++) {
631 if (lcore_states[i].is_service_core) {
632 lcore_states[i].service_mask = 0;
633 set_lcore_state(i, ROLE_RTE);
634 lcore_states[i].runstate = RUNSTATE_STOPPED0;
635 }
636 }
637 for (i = 0; i < RTE_SERVICE_NUM_MAX64; i++)
638 rte_atomic32_set(&rte_services[i].num_mapped_cores, 0);
639
640 rte_smp_wmb()do { __asm__ volatile ("" : : : "memory"); } while(0);
641
642 return 0;
643}
644
645int32_t
646rte_service_lcore_add(uint32_t lcore)
647{
648 if (lcore >= RTE_MAX_LCORE128)
649 return -EINVAL22;
650 if (lcore_states[lcore].is_service_core)
651 return -EALREADY114;
652
653 set_lcore_state(lcore, ROLE_SERVICE);
654
655 /* ensure that after adding a core the mask and state are defaults */
656 lcore_states[lcore].service_mask = 0;
657 lcore_states[lcore].runstate = RUNSTATE_STOPPED0;
658
659 rte_smp_wmb()do { __asm__ volatile ("" : : : "memory"); } while(0);
660
661 return rte_eal_wait_lcore(lcore);
662}
663
664int32_t
665rte_service_lcore_del(uint32_t lcore)
666{
667 if (lcore >= RTE_MAX_LCORE128)
668 return -EINVAL22;
669
670 struct core_state *cs = &lcore_states[lcore];
671 if (!cs->is_service_core)
672 return -EINVAL22;
673
674 if (cs->runstate != RUNSTATE_STOPPED0)
675 return -EBUSY16;
676
677 set_lcore_state(lcore, ROLE_RTE);
678
679 rte_smp_wmb()do { __asm__ volatile ("" : : : "memory"); } while(0);
680 return 0;
681}
682
683int32_t
684rte_service_lcore_start(uint32_t lcore)
685{
686 if (lcore >= RTE_MAX_LCORE128)
687 return -EINVAL22;
688
689 struct core_state *cs = &lcore_states[lcore];
690 if (!cs->is_service_core)
691 return -EINVAL22;
692
693 if (cs->runstate == RUNSTATE_RUNNING1)
694 return -EALREADY114;
695
696 /* set core to run state first, and then launch otherwise it will
697 * return immediately as runstate keeps it in the service poll loop
698 */
699 lcore_states[lcore].runstate = RUNSTATE_RUNNING1;
700
701 int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
702 /* returns -EBUSY if the core is already launched, 0 on success */
703 return ret;
704}
705
706int32_t
707rte_service_lcore_stop(uint32_t lcore)
708{
709 if (lcore >= RTE_MAX_LCORE128)
710 return -EINVAL22;
711
712 if (lcore_states[lcore].runstate == RUNSTATE_STOPPED0)
713 return -EALREADY114;
714
715 uint32_t i;
716 uint64_t service_mask = lcore_states[lcore].service_mask;
717 for (i = 0; i < RTE_SERVICE_NUM_MAX64; i++) {
718 int32_t enabled = service_mask & (UINT64_C(1)1UL << i);
719 int32_t service_running = rte_service_runstate_get(i);
720 int32_t only_core = (1 ==
721 rte_atomic32_read(&rte_services[i].num_mapped_cores));
722
723 /* if the core is mapped, and the service is running, and this
724 * is the only core that is mapped, the service would cease to
725 * run if this core stopped, so fail instead.
726 */
727 if (enabled && service_running && only_core)
728 return -EBUSY16;
729 }
730
731 lcore_states[lcore].runstate = RUNSTATE_STOPPED0;
732
733 return 0;
734}
735
736int32_t
737rte_service_attr_get(uint32_t id, uint32_t attr_id, uint64_t *attr_value)
738{
739 struct rte_service_spec_impl *s;
740 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL)do { if (id >= 64 || !service_valid(id)) return -22; s = &
rte_services[id]; } while (0)
;
741
742 if (!attr_value)
743 return -EINVAL22;
744
745 switch (attr_id) {
746 case RTE_SERVICE_ATTR_CYCLES0:
747 *attr_value = s->cycles_spent;
748 return 0;
749 case RTE_SERVICE_ATTR_CALL_COUNT1:
750 *attr_value = s->calls;
751 return 0;
752 default:
753 return -EINVAL22;
754 }
755}
756
757int32_t __rte_experimental__attribute__((section(".text.experimental")))
758rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
759 uint64_t *attr_value)
760{
761 struct core_state *cs;
762
763 if (lcore >= RTE_MAX_LCORE128 || !attr_value)
764 return -EINVAL22;
765
766 cs = &lcore_states[lcore];
767 if (!cs->is_service_core)
768 return -ENOTSUP95;
769
770 switch (attr_id) {
771 case RTE_SERVICE_LCORE_ATTR_LOOPS0:
772 *attr_value = cs->loops;
773 return 0;
774 default:
775 return -EINVAL22;
776 }
777}
778
779static void
780rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
781 uint64_t all_cycles, uint32_t reset)
782{
783 /* avoid divide by zero */
784 if (all_cycles == 0)
785 all_cycles = 1;
Value stored to 'all_cycles' is never read
786
787 int calls = 1;
788 if (s->calls != 0)
789 calls = s->calls;
790
791 if (reset) {
792 s->cycles_spent = 0;
793 s->calls = 0;
794 return;
795 }
796
797 if (f == NULL((void*)0))
798 return;
799
800 fprintf(f, " %s: stats %d\tcalls %"PRIu64"l" "u""\tcycles %"
801 PRIu64"l" "u""\tavg: %"PRIu64"l" "u""\n",
802 s->spec.name, service_stats_enabled(s), s->calls,
803 s->cycles_spent, s->cycles_spent / calls);
804}
805
806int32_t
807rte_service_attr_reset_all(uint32_t id)
808{
809 struct rte_service_spec_impl *s;
810 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL)do { if (id >= 64 || !service_valid(id)) return -22; s = &
rte_services[id]; } while (0)
;
811
812 int reset = 1;
813 rte_service_dump_one(NULL((void*)0), s, 0, reset);
814 return 0;
815}
816
817int32_t __rte_experimental__attribute__((section(".text.experimental")))
818rte_service_lcore_attr_reset_all(uint32_t lcore)
819{
820 struct core_state *cs;
821
822 if (lcore >= RTE_MAX_LCORE128)
823 return -EINVAL22;
824
825 cs = &lcore_states[lcore];
826 if (!cs->is_service_core)
827 return -ENOTSUP95;
828
829 cs->loops = 0;
830
831 return 0;
832}
833
834static void
835service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
836{
837 uint32_t i;
838 struct core_state *cs = &lcore_states[lcore];
839
840 fprintf(f, "%02d\t", lcore);
841 for (i = 0; i < RTE_SERVICE_NUM_MAX64; i++) {
842 if (!service_valid(i))
843 continue;
844 fprintf(f, "%"PRIu64"l" "u""\t", cs->calls_per_service[i]);
845 if (reset)
846 cs->calls_per_service[i] = 0;
847 }
848 fprintf(f, "\n");
849}
850
851int32_t
852rte_service_dump(FILE *f, uint32_t id)
853{
854 uint32_t i;
855 int print_one = (id != UINT32_MAX(4294967295U));
856
857 uint64_t total_cycles = 0;
858
859 for (i = 0; i < RTE_SERVICE_NUM_MAX64; i++) {
860 if (!service_valid(i))
861 continue;
862 total_cycles += rte_services[i].cycles_spent;
863 }
864
865 /* print only the specified service */
866 if (print_one) {
867 struct rte_service_spec_impl *s;
868 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL)do { if (id >= 64 || !service_valid(id)) return -22; s = &
rte_services[id]; } while (0)
;
869 fprintf(f, "Service %s Summary\n", s->spec.name);
870 uint32_t reset = 0;
871 rte_service_dump_one(f, s, total_cycles, reset);
872 return 0;
873 }
874
875 /* print all services, as UINT32_MAX was passed as id */
876 fprintf(f, "Services Summary\n");
877 for (i = 0; i < RTE_SERVICE_NUM_MAX64; i++) {
878 if (!service_valid(i))
879 continue;
880 uint32_t reset = 0;
881 rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
882 }
883
884 fprintf(f, "Service Cores Summary\n");
885 for (i = 0; i < RTE_MAX_LCORE128; i++) {
886 if (lcore_config[i].core_role != ROLE_SERVICE)
887 continue;
888
889 uint32_t reset = 0;
890 service_dump_calls_per_lcore(f, i, reset);
891 }
892
893 return 0;
894}