Test application that can be used to stress CPU, memory, and HW accelerators.
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include <inttypes.h>
#include <signal.h>
#include <stdlib.h>
#include <getopt.h>
#include <odp/helper/odph_api.h>
#define MODE_MEMCPY 0x1
#define MODE_COPY_U32 0x2
#define MODE_SQRT_U32 0x4
#define MODE_SQRT_F32 0x8
#define TMODE_SHARED_REL 0
#define TMODE_PRIVATE_REL 1
typedef struct test_options_t {
uint32_t num_cpu;
uint64_t period_ns;
uint64_t rounds;
uint64_t mem_size;
int mode;
int group_mode;
int timer_mode;
} test_options_t;
typedef struct test_stat_t {
uint64_t rounds;
uint64_t tot_nsec;
uint64_t work_nsec;
uint64_t dummy_sum;
} test_stat_t;
typedef struct test_stat_sum_t {
uint64_t rounds;
uint64_t tot_nsec;
uint64_t work_nsec;
} test_stat_sum_t;
typedef struct thread_arg_t {
void *global;
int worker_idx;
} thread_arg_t;
typedef struct test_global_t {
test_options_t test_options;
uint64_t period_ticks;
void *worker_mem;
test_stat_sum_t stat_sum;
} test_global_t;
test_global_t *test_global;
static const uint32_t pseudo_rand[] = {
14917, 9914, 5313, 4092, 16041, 7757, 17247, 14804, 3255, 7675,
13149, 7288, 5665, 7095, 9594, 1296, 2058, 6013, 17779, 11788,
14855, 760, 16891, 2483, 10937, 16385, 13593, 10674, 4080, 2392,
12218, 11475, 6009, 5798, 7582, 8358, 4520, 14655, 10555, 6598,
10598, 16097, 16634, 17102, 16296, 17142, 5748, 11079, 14569, 10961,
16693, 17775, 19155, 14102, 16132, 19561, 8746, 4521, 8280, 355,
10655, 14539, 5641, 2343, 19213, 9187, 570, 15096, 780, 1711,
8007, 8128, 17416, 14123, 4713, 13774, 11450, 9031, 1194, 16531,
9349, 3496, 19130, 19458, 12412, 9168, 9508, 10607, 5952, 19375,
14934, 18276, 12116, 510, 14272, 10362, 4095, 6789, 1600, 18509,
9274, 2815, 3175, 1122, 6495, 7991, 18831, 17550, 7056, 16185,
18594, 19178, 10028, 1182, 13410, 16173, 3548, 8013, 6099, 2619,
7359, 6889, 15227, 4910, 12341, 18904, 671, 5851, 9836, 18105,
13624, 8138, 5751, 15590, 17415, 15330, 697, 11439, 7008, 10676,
9863, 17163, 10885, 5581, 8078, 4689, 9870, 18370, 19323, 8831,
11444, 3602, 10125, 6244, 13171, 19335, 15635, 19684, 17581, 9513,
8444, 13724, 5243, 9987, 19886, 5087, 17292, 16294, 19627, 14985,
1999, 9889, 1311, 5589, 10084, 911, 301, 2260, 15305, 8265,
409, 1732, 1463, 17680, 15038, 2440, 4239, 9554, 14045, 924,
13997, 3472, 18304, 4848, 10601, 18604, 6459, 19394, 2962, 11218,
5405, 9869, 133, 2512, 13440, 4350, 625, 6580, 5082, 12908,
11517, 8919, 354, 14216, 3190, 15515, 1277, 1028, 507, 9525,
10115, 811, 1268, 17587, 5192, 7240, 17371, 4902, 19908, 1027,
3475, 8658, 11782, 13701, 13034, 154, 4940, 12679, 14067, 2707,
10180, 4669, 17756, 6602, 6727, 818, 8644, 580, 16988, 19127
};
static void print_usage(void)
{
printf("\n"
"Stress test options:\n"
"\n"
" -c, --num_cpu Number of CPUs (worker threads). 0: all available CPUs. Default: 1\n"
" -p, --period_ns Timeout period in nsec. Default: 100 ms\n"
" -r, --rounds Number of timeout rounds. Default: 2\n"
" -m, --mode Test mode flags, multiple may be selected. Default: 0x1\n"
" 0: No stress, just wait for timeouts\n"
" 0x1: memcpy()\n"
" 0x2: Memory copy loop\n"
" 0x4: Integer square root\n"
" 0x8: Floating point square root\n"
" -s, --mem_size Memory size per worker in bytes. Default: 2048\n"
" -g, --group_mode Select schedule group mode: Default: 1\n"
" 0: Use GROUP_ALL group. Scheduler load balances timeout events.\n"
" 1: Create a group per CPU. Dedicated timeout event per CPU.\n"
" -t, --timer_mode Select timer mode: Default: 0\n"
" 0: Shared timer pool with relative timers\n"
" 1: Private (per worker) timer pools with relative timers. Requires\n"
" private schedule group mode (-g 1).\n"
" -h, --help This help\n"
"\n");
}
static int parse_options(int argc, char *argv[], test_options_t *test_options)
{
int opt;
int ret = 0;
static const struct option longopts[] = {
{"num_cpu", required_argument, NULL, 'c'},
{"period_ns", required_argument, NULL, 'p'},
{"rounds", required_argument, NULL, 'r'},
{"mode", required_argument, NULL, 'm'},
{"mem_size", required_argument, NULL, 's'},
{"group_mode", required_argument, NULL, 'g'},
{"timer_mode", required_argument, NULL, 't'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
static const char *shortopts = "+c:p:r:m:s:t:g:h";
test_options->num_cpu = 1;
test_options->rounds = 2;
test_options->mode = MODE_MEMCPY;
test_options->mem_size = 2048;
test_options->group_mode = 1;
test_options->timer_mode = TMODE_SHARED_REL;
while (1) {
opt = getopt_long(argc, argv, shortopts, longopts, NULL);
if (opt == -1)
break;
switch (opt) {
case 'c':
test_options->num_cpu = atoi(optarg);
break;
case 'p':
test_options->period_ns = atoll(optarg);
break;
case 'r':
test_options->rounds = atoll(optarg);
break;
case 'm':
test_options->mode = strtoul(optarg, NULL, 0);
break;
case 's':
test_options->mem_size = atoll(optarg);
break;
case 'g':
test_options->group_mode = atoi(optarg);
break;
case 't':
test_options->timer_mode = atoi(optarg);
break;
case 'h':
default:
print_usage();
ret = -1;
break;
}
}
if (test_options->mode) {
if (test_options->mem_size < sizeof(uint32_t)) {
ODPH_ERR("Too small memory size. Minimum is %zu bytes.\n",
sizeof(uint32_t));
return -1;
}
}
if (test_options->timer_mode == TMODE_PRIVATE_REL && test_options->group_mode == 0) {
ODPH_ERR("Private timer mode requires private schedule group mode\n");
return -1;
}
return ret;
}
static int set_num_cpu(test_global_t *global)
{
int ret;
test_options_t *test_options = &global->test_options;
int num_cpu = test_options->num_cpu;
return -1;
}
if (num_cpu && ret != num_cpu) {
ODPH_ERR("Too many workers. Max supported %i\n.", ret);
return -1;
}
if (num_cpu == 0) {
num_cpu = ret;
test_options->num_cpu = num_cpu;
}
return 0;
}
static int join_group(test_global_t *global, int worker_idx, int thr)
{
group = global->group[worker_idx];
ODPH_ERR("Thread %i failed to join group %i\n", thr, worker_idx);
return -1;
}
return 0;
}
static int create_timer_pool(test_global_t *global,
odp_timer_pool_t *timer_pool)
{
ODPH_ERR("Timer pool create failed\n");
return -1;
}
ODPH_ERR("Timer pool start failed\n");
return -1;
}
*timer_pool = tp;
return 0;
}
{
ODPH_ERR("Timeout alloc failed (%u)\n", worker_idx);
return -1;
}
ODPH_ERR("Timer alloc failed (%u)\n", worker_idx);
return -1;
}
*timer_out = timer;
ODPH_ERR("Timer start failed (%i)\n", worker_idx);
return -1;
}
return 0;
}
static int worker_thread(void *arg)
{
int thr, timer_ret;
uint32_t exit_test;
uint64_t tot_nsec, work_sum, max_nsec, i;
uint8_t *src = NULL, *dst = NULL;
uint32_t *src_u32 = NULL, *dst_u32 = NULL;
thread_arg_t *thread_arg = arg;
int worker_idx = thread_arg->worker_idx;
test_global_t *global = thread_arg->global;
test_options_t *test_options = &global->test_options;
const int group_mode = test_options->group_mode;
const int mode = test_options->mode;
const int data_mode = mode & (MODE_SQRT_U32 | MODE_SQRT_F32);
const uint64_t mem_size = test_options->mem_size;
const uint64_t copy_size = mem_size / 2;
const uint64_t num_words = mem_size / sizeof(uint32_t);
const uint64_t copy_words = num_words / 2;
uint64_t rounds = 0;
uint64_t dummy_sum = 0;
uint32_t done = 0;
uint64_t tot_rounds = test_options->rounds * test_options->num_cpu;
max_nsec = 2 * test_options->rounds * test_options->period_ns;
printf(
"Thread %i starting on CPU %i\n", thr,
odp_cpu_id());
if (group_mode == 0) {
} else {
if (join_group(global, worker_idx, thr)) {
done = 1;
}
}
if (mode) {
src = (uint8_t *)global->worker_mem + worker_idx * mem_size;
dst = src + copy_size;
src_u32 = (uint32_t *)(uintptr_t)src;
dst_u32 = (uint32_t *)(uintptr_t)dst;
}
start_param.
tick = global->period_ticks;
if (test_options->timer_mode == TMODE_PRIVATE_REL) {
int ret;
ret = create_timer_pool(global, &priv_timer_pool);
if (ret == 0) {
test_options->period_ns);
ret = start_timer(global, priv_timer_pool, &start_param, worker_idx,
&priv_timer);
}
if (ret) {
done = 1;
}
}
work_sum = 0;
while (1) {
exit_test += done;
exit_test += 1;
if (exit_test) {
break;
}
continue;
}
rounds++;
if (group_mode) {
if (rounds >= test_options->rounds)
done = 1;
} else {
done = 1;
}
if (done == 0) {
ODPH_ERR("Timer start failed (%" PRIu64 ")\n", rounds);
done = 1;
}
}
if (mode) {
if (mode & MODE_MEMCPY)
memcpy(dst, src, copy_size);
if (mode & MODE_COPY_U32)
for (i = 0; i < copy_words; i++)
dst_u32[i] = src_u32[i];
if (data_mode) {
for (i = 0; i < num_words; i++) {
if (mode & MODE_SQRT_U32)
dummy_sum += odph_stress_sqrt_u32(src_u32[i]);
if (mode & MODE_SQRT_F32)
dummy_sum += odph_stress_sqrt_f32(src_u32[i]);
}
}
}
if (done) {
}
}
global->stat[thr].rounds = rounds;
global->stat[thr].tot_nsec = tot_nsec;
global->stat[thr].work_nsec = work_sum;
global->stat[thr].dummy_sum = dummy_sum;
return 0;
}
static int start_workers(test_global_t *global,
odp_instance_t instance)
{
odph_thread_common_param_t thr_common;
int i, ret;
test_options_t *test_options = &global->test_options;
int num_cpu = test_options->num_cpu;
odph_thread_param_t thr_param[num_cpu];
memset(global->thread_tbl, 0, sizeof(global->thread_tbl));
odph_thread_common_param_init(&thr_common);
thr_common.instance = instance;
thr_common.cpumask = &global->cpumask;
for (i = 0; i < num_cpu; i++) {
odph_thread_param_init(&thr_param[i]);
thr_param[i].start = worker_thread;
thr_param[i].arg = &global->thread_arg[i];
}
ret = odph_thread_create(global->thread_tbl, &thr_common, thr_param, num_cpu);
if (ret != num_cpu) {
ODPH_ERR("Thread create failed %i\n", ret);
return -1;
}
return 0;
}
static int create_timeout_pool(test_global_t *global)
{
double duration;
test_options_t *test_options = &global->test_options;
uint32_t num_cpu = test_options->num_cpu;
uint64_t period_ns = test_options->period_ns;
uint64_t res_ns = period_ns / 1000;
uint32_t num_tp = 1;
if (test_options->timer_mode == TMODE_PRIVATE_REL)
num_tp = num_cpu;
ODPH_ERR("Timer capability failed\n");
return -1;
}
ODPH_ERR("Timer does not support sched queues\n");
return -1;
}
ODPH_ERR("Too many timer pools requested %u (max %u)\n", num_tp,
return -1;
}
timer_res_capa.
max_tmo = 2 * period_ns;
ODPH_ERR("Timer resolution capability failed. Too long period.\n");
return -1;
}
if (res_ns < timer_res_capa.
res_ns)
res_ns = timer_res_capa.
res_ns;
printf(" num timers %u\n", num_cpu);
printf(" resolution %" PRIu64 " nsec\n", res_ns);
printf(" period %" PRIu64 " nsec\n", period_ns);
printf(" test duration %.2f sec\n", duration);
if (test_options->group_mode == 0)
printf(" force stop after %.2f sec\n", 2 * duration);
printf("\n");
pool_param.
tmo.
num = 4 * num_cpu;
global->tmo_pool = pool;
ODPH_ERR("Pool create failed\n");
return -1;
}
timer_pool_param->
res_ns = res_ns;
timer_pool_param->
min_tmo = period_ns / 2;
timer_pool_param->
max_tmo = 2 * period_ns;
if (test_options->timer_mode == TMODE_PRIVATE_REL)
timer_pool_param->
priv = 1;
return 0;
}
static int create_queues(test_global_t *global)
{
uint32_t i;
test_options_t *test_options = &global->test_options;
uint32_t num_cpu = test_options->num_cpu;
ODPH_ERR("Schedule capability failed\n");
return -1;
}
if (test_options->group_mode) {
ODPH_ERR("Too many workers. Not enough schedule groups.\n");
return -1;
}
for (i = 0; i < num_cpu; i++) {
ODPH_ERR("Schedule group create failed (%u)\n", i);
return -1;
}
}
}
for (i = 0; i < num_cpu; i++) {
if (test_options->group_mode)
ODPH_ERR("Timeout dest queue create failed (%u)\n", i);
return -1;
}
}
return 0;
}
static int start_shared_timers(test_global_t *global)
{
uint32_t i;
test_options_t *test_options = &global->test_options;
uint32_t num_cpu = test_options->num_cpu;
for (i = 0; i < num_cpu; i++) {
ODPH_ERR("Timeout alloc failed (%u)\n", i);
return -1;
}
}
for (i = 0; i < num_cpu; i++) {
timer[i] =
odp_timer_alloc(global->timer_pool, global->tmo_queue[i], NULL);
ODPH_ERR("Timer alloc failed (%u)\n", i);
return -1;
}
global->timer[i] = timer[i];
}
start_param.
tick = global->period_ticks;
for (i = 0; i < num_cpu; i++) {
ODPH_ERR("Timer start failed (%u)\n", i);
return -1;
}
}
return 0;
}
static void destroy_timers(test_global_t *global)
{
uint32_t i;
test_options_t *test_options = &global->test_options;
uint32_t num_cpu = test_options->num_cpu;
for (i = 0; i < num_cpu; i++) {
continue;
ODPH_ERR("Timer free failed (%u)\n", i);
}
}
static void destroy_queues(test_global_t *global)
{
uint32_t i;
test_options_t *test_options = &global->test_options;
uint32_t num_cpu = test_options->num_cpu;
for (i = 0; i < num_cpu; i++) {
continue;
ODPH_ERR("Queue destroy failed (%u)\n", i);
}
if (test_options->group_mode) {
for (i = 0; i < num_cpu; i++) {
continue;
ODPH_ERR("Schedule group destroy failed (%u)\n", i);
}
}
}
static void sig_handler(int signo)
{
(void)signo;
if (test_global == NULL)
return;
}
static void stop_workers(test_global_t *global)
{
uint32_t i;
test_options_t *test_options = &global->test_options;
uint32_t num_cpu = test_options->num_cpu;
for (i = 0; i < num_cpu; i++) {
queue = global->tmo_queue[i];
continue;
continue;
ODPH_ERR("Enqueue failed %u\n", i);
}
}
}
static void sum_stat(test_global_t *global)
{
uint32_t i;
test_options_t *test_options = &global->test_options;
uint32_t num_cpu = test_options->num_cpu;
test_stat_sum_t *sum = &global->stat_sum;
memset(sum, 0, sizeof(test_stat_sum_t));
for (i = 1; i < num_cpu + 1 ; i++) {
sum->rounds += global->stat[i].rounds;
sum->tot_nsec += global->stat[i].tot_nsec;
sum->work_nsec += global->stat[i].work_nsec;
}
}
static void print_stat(test_global_t *global)
{
uint32_t i;
test_options_t *test_options = &global->test_options;
uint32_t num_cpu = test_options->num_cpu;
int mode = test_options->mode;
test_stat_sum_t *sum = &global->stat_sum;
double sec_ave, work_ave, perc;
double round_ave = 0.0;
double rate_ave = 0.0;
double rate_tot = 0.0;
double cpu_load = 0.0;
const double mega = 1000000.0;
const double giga = 1000000000.0;
uint32_t num = 0;
if (num_cpu == 0)
return;
sec_ave = (sum->tot_nsec / giga) / num_cpu;
work_ave = (sum->work_nsec / giga) / num_cpu;
printf("\n");
printf("CPU load from work (percent) per thread:\n");
printf("----------------------------------------------\n");
printf(" 1 2 3 4 5 6 7 8 9 10");
for (i = 1; i < num_cpu + 1; i++) {
if (global->stat[i].tot_nsec == 0)
continue;
if ((num % 10) == 0)
printf("\n ");
perc = 100.0 * ((double)global->stat[i].work_nsec) / global->stat[i].tot_nsec;
printf("%6.2f ", perc);
num++;
}
if (sec_ave > 0.0) {
round_ave = (double)sum->rounds / num_cpu;
cpu_load = 100.0 * (work_ave / sec_ave);
if (mode) {
uint64_t data_bytes;
if (mode == MODE_MEMCPY || mode == MODE_COPY_U32 ||
mode == (MODE_COPY_U32 | MODE_MEMCPY))
data_bytes = sum->rounds * test_options->mem_size / 2;
else
data_bytes = sum->rounds * test_options->mem_size;
rate_ave = data_bytes / (sum->work_nsec / giga);
rate_tot = rate_ave * num_cpu;
}
}
printf("\n\n");
printf("TOTAL (%i workers)\n", num_cpu);
printf(" ave time: %.2f sec\n", sec_ave);
printf(" ave work: %.2f sec\n", work_ave);
printf(" ave CPU load: %.2f\n", cpu_load);
printf(" ave rounds per sec: %.2f\n", round_ave / sec_ave);
printf(" ave data rate: %.2f MB/sec\n", rate_ave / mega);
printf(" total data rate: %.2f MB/sec\n", rate_tot / mega);
printf("\n");
}
int main(int argc, char **argv)
{
odph_helper_options_t helper_options;
test_global_t *global;
test_options_t *test_options;
int i, mode;
uint32_t num_cpu;
uint64_t mem_size;
int shared_timers = 1;
signal(SIGINT, sig_handler);
argc = odph_parse_options(argc, argv);
if (odph_options(&helper_options)) {
ODPH_ERR("Reading ODP helper options failed.\n");
exit(EXIT_FAILURE);
}
ODPH_ERR("Global init failed.\n");
exit(EXIT_FAILURE);
}
ODPH_ERR("Local init failed.\n");
exit(EXIT_FAILURE);
}
shm =
odp_shm_reserve(
"Stress global",
sizeof(test_global_t), ODP_CACHE_LINE_SIZE, 0);
shm_global = shm;
ODPH_ERR("SHM reserve failed.\n");
exit(EXIT_FAILURE);
}
if (global == NULL) {
ODPH_ERR("SHM addr failed\n");
exit(EXIT_FAILURE);
}
test_global = global;
memset(global, 0, sizeof(test_global_t));
global->thread_arg[i].global = global;
global->thread_arg[i].worker_idx = i;
}
if (parse_options(argc, argv, &global->test_options))
exit(EXIT_FAILURE);
test_options = &global->test_options;
mode = test_options->mode;
if (set_num_cpu(global))
exit(EXIT_FAILURE);
num_cpu = test_options->num_cpu;
if (mode) {
uint64_t num_words;
uint32_t *word;
uint32_t num_rand = ODPH_ARRAY_SIZE(pseudo_rand);
mem_size = test_options->mem_size * num_cpu;
shm_work = shm;
ODPH_ERR("SHM reserve failed.\n");
exit(EXIT_FAILURE);
}
if (global->worker_mem == NULL) {
ODPH_ERR("SHM addr failed\n");
exit(EXIT_FAILURE);
}
num_words = mem_size / sizeof(uint32_t);
word = (uint32_t *)global->worker_mem;
for (uint64_t j = 0; j < num_words; j++)
word[j] = pseudo_rand[j % num_rand];
}
printf("\n");
printf("Test parameters\n");
printf(" num workers %u\n", num_cpu);
printf(" mode 0x%x\n", mode);
printf(" group mode %i\n", test_options->group_mode);
printf(" timer mode %i\n", test_options->timer_mode);
printf(" mem size per worker %" PRIu64 " bytes\n", test_options->mem_size);
if (test_options->timer_mode != TMODE_SHARED_REL)
shared_timers = 0;
if (create_timeout_pool(global))
exit(EXIT_FAILURE);
if (shared_timers) {
if (create_timer_pool(global, &tp))
exit(EXIT_FAILURE);
global->timer_pool = tp;
}
if (create_queues(global))
exit(EXIT_FAILURE);
start_workers(global, instance);
if (shared_timers) {
if (start_shared_timers(global)) {
ODPH_ERR("Timers did not start. Stopping workers.\n");
stop_workers(global);
}
}
odph_thread_join(global->thread_tbl, num_cpu);
sum_stat(global);
print_stat(global);
if (shared_timers)
destroy_timers(global);
destroy_queues(global);
if (mode) {
ODPH_ERR("SHM free failed.\n");
exit(EXIT_FAILURE);
}
}
ODPH_ERR("SHM free failed.\n");
exit(EXIT_FAILURE);
}
ODPH_ERR("Term local failed.\n");
exit(EXIT_FAILURE);
}
ODPH_ERR("Term global failed.\n");
exit(EXIT_FAILURE);
}
return 0;
}
void odp_atomic_init_u32(odp_atomic_u32_t *atom, uint32_t val)
Initialize atomic uint32 variable.
void odp_atomic_add_u32(odp_atomic_u32_t *atom, uint32_t val)
Add to atomic uint32 variable.
uint32_t odp_atomic_load_u32(odp_atomic_u32_t *atom)
Load value of atomic uint32 variable.
void odp_atomic_init_u64(odp_atomic_u64_t *atom, uint64_t val)
Initialize atomic uint64 variable.
uint64_t odp_atomic_fetch_inc_u64(odp_atomic_u64_t *atom)
Fetch and increment atomic uint64 variable.
void odp_barrier_init(odp_barrier_t *barr, int count)
Initialize barrier with thread count.
void odp_barrier_wait(odp_barrier_t *barr)
Synchronize thread execution on barrier.
int odp_cpu_id(void)
CPU identifier.
int odp_cpumask_default_worker(odp_cpumask_t *mask, int num)
Default CPU mask for worker threads.
void odp_event_free(odp_event_t event)
Free event.
#define ODP_EVENT_INVALID
Invalid event.
void odp_init_param_init(odp_init_t *param)
Initialize the odp_init_t to default values for all fields.
int odp_init_local(odp_instance_t instance, odp_thread_type_t thr_type)
Thread local ODP initialization.
int odp_init_global(odp_instance_t *instance, const odp_init_t *params, const odp_platform_init_t *platform_params)
Global ODP initialization.
int odp_term_local(void)
Thread local ODP termination.
int odp_term_global(odp_instance_t instance)
Global ODP termination.
uint64_t odp_instance_t
ODP instance ID.
odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *param)
Create a pool.
void odp_pool_param_init(odp_pool_param_t *param)
Initialize pool params.
int odp_pool_destroy(odp_pool_t pool)
Destroy a pool previously created by odp_pool_create()
#define ODP_POOL_INVALID
Invalid pool.
@ ODP_POOL_TIMEOUT
Timeout pool.
void odp_queue_param_init(odp_queue_param_t *param)
Initialize queue params.
#define ODP_QUEUE_INVALID
Invalid queue.
int odp_queue_enq(odp_queue_t queue, odp_event_t ev)
Enqueue an event to a queue.
odp_queue_t odp_queue_create(const char *name, const odp_queue_param_t *param)
Queue create.
int odp_queue_destroy(odp_queue_t queue)
Destroy ODP queue.
@ ODP_QUEUE_TYPE_SCHED
Scheduled queue.
#define ODP_SCHED_WAIT
Wait infinitely.
#define ODP_SCHED_SYNC_PARALLEL
Parallel scheduled queues.
int odp_schedule_group_t
Scheduler thread group.
void odp_schedule_config_init(odp_schedule_config_t *config)
Initialize schedule configuration options.
int odp_schedule_group_join(odp_schedule_group_t group, const odp_thrmask_t *mask)
Join a schedule group.
int odp_schedule_group_destroy(odp_schedule_group_t group)
Schedule group destroy.
#define ODP_SCHED_GROUP_INVALID
Invalid scheduler group.
#define ODP_SCHED_NO_WAIT
Do not wait.
int odp_schedule_config(const odp_schedule_config_t *config)
Global schedule configuration.
uint64_t odp_schedule_wait_time(uint64_t ns)
Schedule wait time.
int odp_schedule_capability(odp_schedule_capability_t *capa)
Query scheduler capabilities.
odp_schedule_group_t odp_schedule_group_create(const char *name, const odp_thrmask_t *mask)
Schedule group create.
odp_event_t odp_schedule(odp_queue_t *from, uint64_t wait)
Schedule an event.
#define ODP_SCHED_GROUP_ALL
Group of all threads.
int odp_shm_free(odp_shm_t shm)
Free a contiguous block of shared memory.
#define ODP_SHM_INVALID
Invalid shared memory block.
void * odp_shm_addr(odp_shm_t shm)
Shared memory block address.
odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align, uint32_t flags)
Reserve a contiguous block of shared memory.
void odp_sys_info_print(void)
Print system info.
#define ODP_THREAD_COUNT_MAX
Maximum number of threads supported in build time.
void odp_thrmask_set(odp_thrmask_t *mask, int thr)
Add thread to mask.
int odp_thread_id(void)
Get thread identifier.
void odp_thrmask_zero(odp_thrmask_t *mask)
Clear entire thread mask.
@ ODP_THREAD_WORKER
Worker thread.
@ ODP_THREAD_CONTROL
Control thread.
odp_time_t odp_time_sum(odp_time_t t1, odp_time_t t2)
Time sum.
#define ODP_TIME_SEC_IN_NS
A second in nanoseconds.
odp_time_t odp_time_local_from_ns(uint64_t ns)
Convert nanoseconds to local time.
odp_time_t odp_time_local(void)
Current local time.
#define ODP_TIME_MSEC_IN_NS
A millisecond in nanoseconds.
int odp_time_cmp(odp_time_t t2, odp_time_t t1)
Compare two times.
uint64_t odp_time_diff_ns(odp_time_t t2, odp_time_t t1)
Time difference in nanoseconds.
int odp_timer_pool_start_multi(odp_timer_pool_t timer_pool[], int num)
Start timer pools.
odp_timeout_t odp_timeout_alloc(odp_pool_t pool)
Timeout alloc.
int odp_timer_free(odp_timer_t timer)
Free a timer.
odp_timeout_t odp_timeout_from_event(odp_event_t ev)
Get timeout handle from a ODP_EVENT_TIMEOUT type event.
#define ODP_TIMER_POOL_INVALID
Invalid timer pool handle.
odp_timer_pool_t odp_timer_pool_create(const char *name, const odp_timer_pool_param_t *params)
Create a timer pool.
odp_timer_t odp_timeout_timer(odp_timeout_t tmo)
Return timer handle for the timeout.
int odp_timer_capability(odp_timer_clk_src_t clk_src, odp_timer_capability_t *capa)
Query timer capabilities per clock source.
uint64_t odp_timer_ns_to_tick(odp_timer_pool_t timer_pool, uint64_t ns)
Convert nanoseconds to timer ticks.
int odp_timer_start(odp_timer_t timer, const odp_timer_start_t *start_param)
Start a timer.
int odp_timer_res_capability(odp_timer_clk_src_t clk_src, odp_timer_res_capability_t *res_capa)
Timer resolution capability.
odp_event_t odp_timeout_to_event(odp_timeout_t tmo)
Convert timeout handle to event handle.
#define ODP_TIMEOUT_INVALID
Invalid timeout handle.
odp_timer_t odp_timer_alloc(odp_timer_pool_t timer_pool, odp_queue_t queue, const void *user_ptr)
Allocate a timer.
#define ODP_CLOCK_DEFAULT
The default clock source.
#define ODP_TIMER_INVALID
Invalid timer handle.
void odp_timer_pool_param_init(odp_timer_pool_param_t *param)
Initialize timer pool parameters.
void odp_timer_pool_destroy(odp_timer_pool_t timer_pool)
Destroy a timer pool.
@ ODP_TIMER_SUCCESS
Timer operation succeeded.
@ ODP_TIMER_TICK_REL
Relative ticks.
Global initialization parameters.
odp_mem_model_t mem_model
Application memory model.
uint32_t num
Number of buffers in the pool.
struct odp_pool_param_t::@127 tmo
Parameters for timeout pools.
uint32_t cache_size
Maximum number of buffers cached locally per thread.
odp_pool_type_t type
Pool type.
odp_schedule_param_t sched
Scheduler parameters.
odp_queue_type_t type
Queue type.
uint32_t max_groups
Maximum number of scheduling groups.
odp_bool_t worker
ODP_SCHED_GROUP_WORKER.
odp_bool_t control
ODP_SCHED_GROUP_CONTROL.
struct odp_schedule_config_t::@143 sched_group
Enable/disable predefined scheduling groups.
odp_bool_t all
ODP_SCHED_GROUP_ALL.
odp_schedule_group_t group
Thread group.
odp_schedule_sync_t sync
Synchronization method.
uint32_t max_pools
Maximum number of timer pools for single shot timers (per clock source)
odp_bool_t queue_type_sched
Scheduled queue destination support.
uint64_t res_ns
Timeout resolution in nanoseconds.
int priv
Thread private timer pool.
uint64_t min_tmo
Minimum relative timeout in nanoseconds.
uint32_t num_timers
Number of timers in the pool.
odp_timer_clk_src_t clk_src
Clock source for timers.
uint64_t max_tmo
Maximum relative timeout in nanoseconds.
Timer resolution capability.
uint64_t max_tmo
Maximum relative timeout in nanoseconds.
uint64_t res_ns
Timeout resolution in nanoseconds.
uint64_t tick
Expiration time in ticks.
odp_event_t tmo_ev
Timeout event.
odp_timer_tick_type_t tick_type
Tick type.