API Reference Manual  1.45.0
odp_dmafwd.c

This tester application can be used to profile the performance of an ODP DMA implementation.Tester workflow consists of packet reception, copy and forwarding steps. Packets are first received from configured interfaces after which packets are copied, either with plain SW memory copy or with DMA offload copy. Finally, copied packets are echoed back to the sender(s).

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2023 Nokia
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <inttypes.h>
#include <stdlib.h>
#include <signal.h>
#include <stdio.h>
#include <unistd.h>
#include <time.h>
#include <odp_api.h>
#include <odp/helper/odph_api.h>
#define EXIT_NOT_SUP 2
#define PROG_NAME "odp_dmafwd"
#define DELIMITER ","
enum {
SW_COPY = 0U,
DMA_COPY_EV,
DMA_COPY_POLL
};
#define DEF_CPY_TYPE SW_COPY
#define DEF_CNT 32768U
#define DEF_LEN 1024U
#define DEF_WORKERS 1U
#define DEF_TIME 0U
#define MAX_IFS 2U
#define MAX_OUT_QS 32U
#define MAX_BURST 32U
#define MAX_WORKERS (ODP_THREAD_COUNT_MAX - 1)
#define DIV_IF(a, b) ((b) > 0U ? ((a) / (b)) : 0U)
ODP_STATIC_ASSERT(MAX_IFS < UINT8_MAX, "Too large maximum interface count");
ODP_STATIC_ASSERT(MAX_OUT_QS < UINT8_MAX, "Too large maximum output queue count");
typedef struct {
uint32_t burst_size;
uint32_t num_pkts;
uint32_t pkt_len;
uint32_t cache_size;
} dynamic_defs_t;
typedef enum {
PRS_OK,
PRS_NOK,
PRS_TERM,
PRS_NOT_SUP
} parse_result_t;
typedef struct prog_config_s prog_config_t;
typedef struct {
uint64_t copy_errs;
uint64_t trs;
uint64_t start_errs;
uint64_t trs_errs;
uint64_t buf_alloc_errs;
uint64_t compl_alloc_errs;
uint64_t pkt_alloc_errs;
uint64_t trs_poll_errs;
uint64_t trs_polled;
uint64_t fwd_pkts;
uint64_t discards;
uint64_t sched_cc;
uint64_t tot_cc;
uint64_t sched_rounds;
} stats_t;
typedef struct ODP_ALIGNED_CACHE {
prog_config_t *prog_config;
odp_dma_t dma_handle;
odp_pool_t compl_pool;
odp_pool_t copy_pool;
odp_pool_t trs_pool;
odp_queue_t compl_q;
odp_stash_t inflight_stash;
stats_t stats;
int thr_idx;
} thread_config_t;
typedef struct pktio_s {
odp_pktout_queue_t out_qs[MAX_OUT_QS];
char *name;
odp_pktio_t handle;
uint8_t num_out_qs;
} pktio_t;
typedef struct {
odp_packet_t src_pkts[MAX_BURST];
odp_packet_t dst_pkts[MAX_BURST];
pktio_t *pktio;
int num;
} transfer_t;
/* Function for initializing transfer structures */
typedef transfer_t *(*init_fn_t)(odp_dma_transfer_param_t *trs_param,
odp_dma_compl_param_t *compl_param, odp_dma_seg_t *src_segs,
odp_dma_seg_t *dst_segs, pktio_t *pktio, thread_config_t *config);
/* Function for starting transfers */
typedef odp_bool_t (*start_fn_t)(odp_dma_transfer_param_t *trs_param,
odp_dma_compl_param_t *compl_param, thread_config_t *config);
/* Function for setting up packets for copy */
typedef void (*pkt_fn_t)(odp_packet_t pkts[], int num, pktio_t *pktio, init_fn_t init_fn,
start_fn_t start_fn, thread_config_t *config);
/* Function for draining and tearing down inflight operations */
typedef void (*drain_fn_t)(thread_config_t *config);
typedef struct prog_config_s {
uint8_t pktio_idx_map[ODP_PKTIO_MAX_INDEX + 1];
odph_thread_t thread_tbl[MAX_WORKERS];
thread_config_t thread_config[MAX_WORKERS];
pktio_t pktios[MAX_IFS];
dynamic_defs_t dyn_defs;
odp_barrier_t init_barrier;
odp_barrier_t term_barrier;
odp_atomic_u32_t is_running;
odp_pool_t pktio_pool;
odp_pool_t copy_pool;
odp_pool_t trs_pool;
struct {
init_fn_t init_fn;
start_fn_t start_fn;
pkt_fn_t pkt_fn;
drain_fn_t drain_fn;
};
uint64_t inflight_obj_size;
uint32_t burst_size;
uint32_t num_pkts;
uint32_t pkt_len;
uint32_t cache_size;
uint32_t num_inflight;
uint32_t trs_cache_size;
uint32_t compl_cache_size;
uint32_t stash_cache_size;
double time_sec;
odp_stash_type_t stash_type;
int num_thrs;
uint8_t num_ifs;
uint8_t copy_type;
} prog_config_t;
typedef struct {
odp_packet_t pkts[MAX_BURST * 2U];
pktio_t *pktio;
int num;
} pkt_vec_t;
static prog_config_t *prog_conf;
static void terminate(int signal ODP_UNUSED)
{
odp_atomic_store_u32(&prog_conf->is_running, 0U);
}
static void init_config(prog_config_t *config)
{
uint32_t burst_size;
odp_pool_param_t pool_param;
thread_config_t *thr;
memset(config, 0, sizeof(*config));
if (odp_dma_capability(&dma_capa) == 0) {
burst_size = ODPH_MIN(dma_capa.max_src_segs, dma_capa.max_dst_segs);
burst_size = ODPH_MIN(burst_size, MAX_BURST);
config->dyn_defs.burst_size = burst_size;
}
if (odp_pool_capability(&pool_capa) == 0) {
config->dyn_defs.num_pkts = pool_capa.pkt.max_num > 0U ?
ODPH_MIN(pool_capa.pkt.max_num, DEF_CNT) : DEF_CNT;
config->dyn_defs.pkt_len = pool_capa.pkt.max_len > 0U ?
ODPH_MIN(pool_capa.pkt.max_len, DEF_LEN) : DEF_LEN;
odp_pool_param_init(&pool_param);
config->dyn_defs.cache_size = pool_param.pkt.cache_size;
}
config->pktio_pool = ODP_POOL_INVALID;
config->copy_pool = ODP_POOL_INVALID;
config->trs_pool = ODP_POOL_INVALID;
config->burst_size = config->dyn_defs.burst_size;
config->num_pkts = config->dyn_defs.num_pkts;
config->pkt_len = config->dyn_defs.pkt_len;
config->cache_size = config->dyn_defs.cache_size;
config->time_sec = DEF_TIME;
config->num_thrs = DEF_WORKERS;
config->copy_type = DEF_CPY_TYPE;
for (int i = 0; i < MAX_WORKERS; ++i) {
thr = &config->thread_config[i];
thr->dma_handle = ODP_DMA_INVALID;
thr->compl_pool = ODP_POOL_INVALID;
thr->compl_q = ODP_QUEUE_INVALID;
thr->inflight_stash = ODP_STASH_INVALID;
}
for (uint32_t i = 0U; i < MAX_IFS; ++i)
config->pktios[i].handle = ODP_PKTIO_INVALID;
}
static void print_usage(dynamic_defs_t *dyn_defs)
{
printf("\n"
"DMA performance tester with packet I/O. Receive and forward packets after\n"
"software copy or DMA offload copy.\n"
"\n"
"Usage: " PROG_NAME " OPTIONS\n"
"\n"
" E.g. " PROG_NAME " -i eth0\n"
" " PROG_NAME " -i eth0 -t 0\n"
" " PROG_NAME " -i eth0 -t 1 -b 15 -l 4096 -c 5\n"
"\n"
"Mandatory OPTIONS:\n"
"\n"
" -i, --interfaces Ethernet interfaces for packet I/O, comma-separated, no\n"
" spaces.\n"
"\n"
"Optional OPTIONS:\n"
"\n"
" -t, --copy_type Type of copy. %u by default.\n"
" 0: SW\n"
" 1: DMA with event completion\n"
" 2: DMA with poll completion\n"
" -b, --burst_size Copy burst size. This many packets are accumulated before\n"
" copy. %u by default.\n"
" -n, --num_pkts Number of packet buffers allocated for packet I/O pool.\n"
" %u by default.\n"
" -l, --pkt_len Maximum size of packet buffers in packet I/O pool. %u by\n"
" default.\n"
" -c, --worker_count Amount of workers. %u by default.\n"
" -C, --cache_size Maximum cache size for pools. %u by default.\n"
" -T, --time_sec Time in seconds to run. 0 means infinite. %u by default.\n"
" -h, --help This help.\n"
"\n", DEF_CPY_TYPE, dyn_defs->burst_size, dyn_defs->num_pkts, dyn_defs->pkt_len,
DEF_WORKERS, dyn_defs->cache_size, DEF_TIME);
}
static void parse_interfaces(prog_config_t *config, const char *optarg)
{
char *tmp_str = strdup(optarg), *tmp;
if (tmp_str == NULL)
return;
tmp = strtok(tmp_str, DELIMITER);
while (tmp && config->num_ifs < MAX_IFS) {
config->pktios[config->num_ifs].name = strdup(tmp);
if (config->pktios[config->num_ifs].name != NULL)
++config->num_ifs;
tmp = strtok(NULL, DELIMITER);
}
free(tmp_str);
}
static odp_bool_t get_stash_capa(odp_stash_capability_t *stash_capa, odp_stash_type_t *stash_type)
{
if (odp_stash_capability(stash_capa, ODP_STASH_TYPE_FIFO) == 0) {
*stash_type = ODP_STASH_TYPE_FIFO;
return true;
}
*stash_type = ODP_STASH_TYPE_DEFAULT;
return true;
}
return false;
}
static parse_result_t check_options(prog_config_t *config)
{
uint32_t burst_size;
const uint64_t obj_size = sizeof(odp_dma_transfer_id_t);
uint64_t max_num;
if (config->num_ifs == 0U) {
ODPH_ERR("Invalid number of interfaces: %u (min: 1, max: %u)\n", config->num_ifs,
MAX_IFS);
return PRS_NOK;
}
if (config->copy_type != SW_COPY && config->copy_type != DMA_COPY_EV &&
config->copy_type != DMA_COPY_POLL) {
ODPH_ERR("Invalid copy type: %u\n", config->copy_type);
return PRS_NOK;
}
if (config->num_thrs <= 0 || config->num_thrs > MAX_WORKERS) {
ODPH_ERR("Invalid worker count: %d (min: 1, max: %d)\n", config->num_thrs,
MAX_WORKERS);
return PRS_NOK;
}
if (odp_dma_capability(&dma_capa) < 0) {
ODPH_ERR("Error querying DMA capabilities\n");
return PRS_NOK;
}
if ((uint32_t)config->num_thrs > dma_capa.max_sessions) {
ODPH_ERR("Unsupported DMA session count: %d (max: %u)\n", config->num_thrs,
dma_capa.max_sessions);
return PRS_NOT_SUP;
}
burst_size = ODPH_MIN(dma_capa.max_src_segs, dma_capa.max_dst_segs);
burst_size = ODPH_MIN(burst_size, MAX_BURST);
if (config->burst_size == 0U || config->burst_size > burst_size) {
ODPH_ERR("Invalid segment count for DMA: %u (min: 1, max: %u)\n",
config->burst_size, burst_size);
return PRS_NOK;
}
if (config->pkt_len > dma_capa.max_seg_len) {
ODPH_ERR("Invalid packet length for DMA: %u (max: %u)\n", config->pkt_len,
dma_capa.max_seg_len);
return PRS_NOK;
}
config->num_inflight = dma_capa.max_transfers;
if (odp_pool_capability(&pool_capa) < 0) {
ODPH_ERR("Error querying pool capabilities\n");
return PRS_NOK;
}
if (config->cache_size < pool_capa.pkt.min_cache_size ||
config->cache_size > pool_capa.pkt.max_cache_size) {
ODPH_ERR("Invalid pool cache size: %u (min: %u, max: %u)\n", config->cache_size,
pool_capa.pkt.min_cache_size, pool_capa.pkt.max_cache_size);
return PRS_NOK;
}
if (config->copy_type != SW_COPY)
config->trs_cache_size = ODPH_MIN(ODPH_MAX(config->cache_size,
pool_capa.buf.min_cache_size),
pool_capa.buf.max_cache_size);
if (config->copy_type == DMA_COPY_EV) {
if ((dma_capa.compl_mode_mask & ODP_DMA_COMPL_EVENT) == 0U ||
!dma_capa.queue_type_sched) {
ODPH_ERR("Unsupported DMA completion mode: event (mode support: %x, "
"scheduled queue support: %u)\n", dma_capa.compl_mode_mask,
dma_capa.queue_type_sched);
return PRS_NOT_SUP;
}
if ((uint32_t)config->num_thrs > dma_capa.pool.max_pools) {
ODPH_ERR("Invalid amount of DMA completion pools: %d (max: %u)\n",
config->num_thrs, dma_capa.pool.max_pools);
return PRS_NOK;
}
if (config->num_inflight > dma_capa.pool.max_num) {
ODPH_ERR("Invalid amount of DMA completion events: %u (max: %u)\n",
config->num_inflight, dma_capa.pool.max_num);
return PRS_NOK;
}
config->compl_cache_size = ODPH_MIN(ODPH_MAX(config->cache_size,
dma_capa.pool.min_cache_size),
dma_capa.pool.max_cache_size);
} else if (config->copy_type == DMA_COPY_POLL) {
if ((dma_capa.compl_mode_mask & ODP_DMA_COMPL_POLL) == 0U) {
ODPH_ERR("Unsupported DMA completion mode: poll (mode support: %x)\n",
dma_capa.compl_mode_mask);
return PRS_NOT_SUP;
}
if (!get_stash_capa(&stash_capa, &config->stash_type)) {
ODPH_ERR("Error querying stash capabilities\n");
return PRS_NOK;
}
if ((uint32_t)config->num_thrs > stash_capa.max_stashes) {
ODPH_ERR("Invalid amount of stashes: %d (max: %u)\n", config->num_thrs,
stash_capa.max_stashes);
return PRS_NOK;
}
if (obj_size == sizeof(uint8_t)) {
max_num = stash_capa.max_num.u8;
} else if (obj_size == sizeof(uint16_t)) {
max_num = stash_capa.max_num.u16;
} else if (obj_size <= sizeof(uint32_t)) {
max_num = stash_capa.max_num.u32;
} else if (obj_size <= sizeof(uint64_t)) {
max_num = stash_capa.max_num.u64;
} else if (obj_size <= sizeof(odp_u128_t)) {
max_num = stash_capa.max_num.u128;
} else {
ODPH_ERR("Invalid stash object size: %" PRIu64 "\n", obj_size);
return PRS_NOK;
}
if (config->num_inflight > max_num) {
ODPH_ERR("Invalid stash size: %u (max: %" PRIu64 ")\n",
config->num_inflight, max_num);
return PRS_NOK;
}
config->inflight_obj_size = obj_size;
config->stash_cache_size = ODPH_MIN(config->cache_size, stash_capa.max_cache_size);
}
if (config->num_pkts == 0U ||
(pool_capa.pkt.max_num > 0U && config->num_pkts > pool_capa.pkt.max_num)) {
ODPH_ERR("Invalid pool packet count: %u (min: 1, max: %u)\n", config->num_pkts,
pool_capa.pkt.max_num);
return PRS_NOK;
}
if (config->pkt_len == 0U ||
(pool_capa.pkt.max_len > 0U && config->pkt_len > pool_capa.pkt.max_len)) {
ODPH_ERR("Invalid pool packet length: %u (min: 1, max: %u)\n", config->pkt_len,
pool_capa.pkt.max_len);
return PRS_NOK;
}
if (config->num_inflight > pool_capa.buf.max_num) {
ODPH_ERR("Invalid pool buffer count: %u (max: %u)\n", config->num_inflight,
pool_capa.buf.max_num);
return PRS_NOK;
}
return PRS_OK;
}
static parse_result_t parse_options(int argc, char **argv, prog_config_t *config)
{
int opt, long_index;
static const struct option longopts[] = {
{ "interfaces", required_argument, NULL, 'i' },
{ "copy_type", required_argument, NULL, 't' },
{ "burst_size", required_argument, NULL, 'b' },
{ "num_pkts", required_argument, NULL, 'n' },
{ "pkt_len", required_argument, NULL, 'l' },
{ "worker_count", required_argument, NULL, 'c' },
{ "cache_size", required_argument, NULL, 'C' },
{ "time_sec", required_argument, NULL, 'T' },
{ "help", no_argument, NULL, 'h' },
{ NULL, 0, NULL, 0 }
};
static const char *shortopts = "i:t:b:n:l:c:C:T:h";
init_config(config);
while (1) {
opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
if (opt == -1)
break;
switch (opt) {
case 'i':
parse_interfaces(config, optarg);
break;
case 't':
config->copy_type = atoi(optarg);
break;
case 'b':
config->burst_size = atoi(optarg);
break;
case 'n':
config->num_pkts = atoi(optarg);
break;
case 'l':
config->pkt_len = atoi(optarg);
break;
case 'c':
config->num_thrs = atoi(optarg);
break;
case 'C':
config->cache_size = atoi(optarg);
break;
case 'T':
config->time_sec = atof(optarg);
break;
case 'h':
print_usage(&config->dyn_defs);
return PRS_TERM;
case '?':
default:
print_usage(&config->dyn_defs);
return PRS_NOK;
}
}
return check_options(config);
}
static parse_result_t setup_program(int argc, char **argv, prog_config_t *config)
{
struct sigaction action = { .sa_handler = terminate };
if (sigemptyset(&action.sa_mask) == -1 || sigaddset(&action.sa_mask, SIGINT) == -1 ||
sigaddset(&action.sa_mask, SIGTERM) == -1 ||
sigaddset(&action.sa_mask, SIGHUP) == -1 || sigaction(SIGINT, &action, NULL) == -1 ||
sigaction(SIGTERM, &action, NULL) == -1 || sigaction(SIGHUP, &action, NULL) == -1) {
ODPH_ERR("Error installing signal handler\n");
return PRS_NOK;
}
return parse_options(argc, argv, config);
}
static inline int send_packets(odp_pktout_queue_t queue, odp_packet_t pkts[], int num)
{
int ret = odp_pktout_send(queue, pkts, num);
if (odp_unlikely(ret < num)) {
ret = ret < 0 ? 0 : ret;
odp_packet_free_multi(&pkts[ret], num - ret);
}
return ret;
}
static void sw_copy_and_send_packets(odp_packet_t pkts[], int num, pktio_t *pktio,
init_fn_t init_fn ODP_UNUSED, start_fn_t start_fn ODP_UNUSED,
thread_config_t *config)
{
odp_packet_t old_pkt, new_pkt;
odp_pool_t copy_pool = config->copy_pool;
odp_packet_t out_pkts[num];
int num_out_pkts = 0, num_sent;
stats_t *stats = &config->stats;
for (int i = 0; i < num; ++i) {
old_pkt = pkts[i];
new_pkt = odp_packet_copy(old_pkt, copy_pool);
if (new_pkt != ODP_PACKET_INVALID)
out_pkts[num_out_pkts++] = new_pkt;
else
++stats->copy_errs;
odp_packet_free(old_pkt);
}
if (num_out_pkts > 0) {
num_sent = send_packets(pktio->out_qs[config->thr_idx % pktio->num_out_qs],
out_pkts, num_out_pkts);
stats->fwd_pkts += num_sent;
stats->discards += num_out_pkts - num_sent;
}
}
static transfer_t *init_dma_ev_trs(odp_dma_transfer_param_t *trs_param,
odp_dma_compl_param_t *compl_param, odp_dma_seg_t *src_segs,
odp_dma_seg_t *dst_segs, pktio_t *pktio,
thread_config_t *config)
{
stats_t *stats = &config->stats;
transfer_t *trs;
buf = odp_buffer_alloc(config->trs_pool);
++stats->buf_alloc_errs;
return NULL;
}
trs = (transfer_t *)odp_buffer_addr(buf);
trs->num = 0;
trs->pktio = pktio;
trs_param->num_src = 0U;
trs_param->num_dst = 0U;
trs_param->src_seg = src_segs;
trs_param->dst_seg = dst_segs;
c_ev = odp_dma_compl_alloc(config->compl_pool);
++stats->compl_alloc_errs;
return NULL;
}
compl_param->event = odp_dma_compl_to_event(c_ev);
compl_param->queue = config->compl_q;
compl_param->user_ptr = buf;
memset(src_segs, 0, sizeof(*src_segs) * MAX_BURST);
memset(dst_segs, 0, sizeof(*dst_segs) * MAX_BURST);
return trs;
}
static transfer_t *init_dma_poll_trs(odp_dma_transfer_param_t *trs_param,
odp_dma_compl_param_t *compl_param, odp_dma_seg_t *src_segs,
odp_dma_seg_t *dst_segs, pktio_t *pktio,
thread_config_t *config)
{
stats_t *stats = &config->stats;
transfer_t *trs;
buf = odp_buffer_alloc(config->trs_pool);
++stats->buf_alloc_errs;
return NULL;
}
trs = (transfer_t *)odp_buffer_addr(buf);
trs->num = 0;
trs->pktio = pktio;
trs_param->num_src = 0U;
trs_param->num_dst = 0U;
trs_param->src_seg = src_segs;
trs_param->dst_seg = dst_segs;
compl_param->transfer_id = odp_dma_transfer_id_alloc(config->dma_handle);
++stats->compl_alloc_errs;
return NULL;
}
compl_param->user_ptr = buf;
memset(src_segs, 0, sizeof(*src_segs) * MAX_BURST);
memset(dst_segs, 0, sizeof(*dst_segs) * MAX_BURST);
return trs;
}
static odp_bool_t start_dma_ev_trs(odp_dma_transfer_param_t *trs_param,
odp_dma_compl_param_t *compl_param, thread_config_t *config)
{
const int ret = odp_dma_transfer_start(config->dma_handle, trs_param, compl_param);
if (odp_unlikely(ret <= 0)) {
odp_buffer_free(compl_param->user_ptr);
odp_event_free(compl_param->event);
return false;
}
return true;
}
static odp_bool_t start_dma_poll_trs(odp_dma_transfer_param_t *trs_param,
odp_dma_compl_param_t *compl_param, thread_config_t *config)
{
const int ret = odp_dma_transfer_start(config->dma_handle, trs_param, compl_param);
if (odp_unlikely(ret <= 0)) {
odp_buffer_free(compl_param->user_ptr);
odp_dma_transfer_id_free(config->dma_handle, compl_param->transfer_id);
return false;
}
if (odp_unlikely(odp_stash_put(config->inflight_stash, &compl_param->transfer_id, 1) != 1))
/* Should not happen, but make it visible if it somehow does */
ODPH_ABORT("DMA inflight transfer stash overflow, aborting");
return true;
}
static void dma_copy(odp_packet_t pkts[], int num, pktio_t *pktio, init_fn_t init_fn,
start_fn_t start_fn, thread_config_t *config)
{
odp_dma_compl_param_t compl_param;
transfer_t *trs = NULL;
odp_dma_seg_t src_segs[MAX_BURST], dst_segs[MAX_BURST];
uint32_t num_segs = 0U, pkt_len;
odp_pool_t copy_pool = config->copy_pool;
stats_t *stats = &config->stats;
odp_dma_compl_param_init(&compl_param);
for (int i = 0; i < num; ++i) {
pkt = pkts[i];
if (odp_unlikely(trs == NULL)) {
trs = init_fn(&trs_param, &compl_param, src_segs, dst_segs, pktio, config);
if (trs == NULL) {
continue;
}
}
pkt_len = odp_packet_len(pkt);
src_segs[num_segs].packet = pkt;
src_segs[num_segs].len = pkt_len;
dst_segs[num_segs].packet = odp_packet_alloc(copy_pool, pkt_len);
if (odp_unlikely(dst_segs[num_segs].packet == ODP_PACKET_INVALID)) {
++stats->pkt_alloc_errs;
continue;
}
dst_segs[num_segs].len = pkt_len;
trs->src_pkts[num_segs] = src_segs[num_segs].packet;
trs->dst_pkts[num_segs] = dst_segs[num_segs].packet;
++trs->num;
++trs_param.num_src;
++trs_param.num_dst;
++num_segs;
}
if (num_segs > 0U)
if (odp_unlikely(!start_fn(&trs_param, &compl_param, config))) {
odp_packet_free_multi(trs->src_pkts, trs->num);
odp_packet_free_multi(trs->dst_pkts, trs->num);
++stats->start_errs;
}
}
static void drain_events(thread_config_t *config ODP_UNUSED)
{
transfer_t *trs;
while (true) {
if (ev == ODP_EVENT_INVALID)
break;
type = odp_event_type(ev);
if (type == ODP_EVENT_DMA_COMPL) {
memset(&res, 0, sizeof(res));
buf = (odp_buffer_t)res.user_ptr;
trs = (transfer_t *)odp_buffer_addr(buf);
odp_packet_free_multi(trs->src_pkts, trs->num);
odp_packet_free_multi(trs->dst_pkts, trs->num);
}
}
}
static void drain_polled(thread_config_t *config)
{
int ret;
transfer_t *trs;
while (true) {
if (odp_stash_get(config->inflight_stash, &id, 1) != 1)
break;
memset(&res, 0, sizeof(res));
do {
ret = odp_dma_transfer_done(config->dma_handle, id, &res);
} while (ret == 0);
odp_dma_transfer_id_free(config->dma_handle, id);
if (ret < 0)
continue;
buf = (odp_buffer_t)res.user_ptr;
trs = (transfer_t *)odp_buffer_addr(buf);
odp_packet_free_multi(trs->src_pkts, trs->num);
odp_packet_free_multi(trs->dst_pkts, trs->num);
}
}
static odp_bool_t setup_copy(prog_config_t *config)
{
odp_pool_param_t pool_param;
thread_config_t *thr;
const odp_dma_param_t dma_param = {
.mt_mode = ODP_DMA_MT_SERIAL,
.order = ODP_DMA_ORDER_NONE };
odp_dma_pool_param_t compl_pool_param;
odp_queue_param_t queue_param;
odp_stash_param_t stash_param;
odp_pool_param_init(&pool_param);
pool_param.pkt.seg_len = config->pkt_len;
pool_param.pkt.len = config->pkt_len;
pool_param.pkt.num = config->num_pkts;
pool_param.pkt.cache_size = config->cache_size;
pool_param.type = ODP_POOL_PACKET;
config->copy_pool = odp_pool_create(PROG_NAME "_copy", &pool_param);
if (config->copy_pool == ODP_POOL_INVALID) {
ODPH_ERR("Error creating packet copy pool\n");
return false;
}
if (config->copy_type == SW_COPY) {
config->pkt_fn = sw_copy_and_send_packets;
for (int i = 0; i < config->num_thrs; ++i)
config->thread_config[i].copy_pool = config->copy_pool;
return true;
}
pool_param.buf.num = config->num_inflight;
pool_param.buf.size = sizeof(transfer_t);
pool_param.buf.cache_size = config->trs_cache_size;
pool_param.type = ODP_POOL_BUFFER;
config->trs_pool = odp_pool_create(PROG_NAME "_dma_trs", &pool_param);
if (config->trs_pool == ODP_POOL_INVALID) {
ODPH_ERR("Error creating DMA transfer tracking pool\n");
return false;
}
for (int i = 0; i < config->num_thrs; ++i) {
thr = &config->thread_config[i];
thr->copy_pool = config->copy_pool;
thr->trs_pool = config->trs_pool;
thr->dma_handle = odp_dma_create(PROG_NAME "_dma", &dma_param);
if (thr->dma_handle == ODP_DMA_INVALID) {
ODPH_ERR("Error creating DMA session\n");
return false;
}
if (config->copy_type == DMA_COPY_EV) {
odp_dma_pool_param_init(&compl_pool_param);
compl_pool_param.num = config->num_inflight;
compl_pool_param.cache_size = config->compl_cache_size;
thr->compl_pool = odp_dma_pool_create(PROG_NAME "_dma_compl",
&compl_pool_param);
if (thr->compl_pool == ODP_POOL_INVALID) {
ODPH_ERR("Error creating DMA event completion pool\n");
return false;
}
odp_queue_param_init(&queue_param);
queue_param.type = ODP_QUEUE_TYPE_SCHED;
queue_param.sched.prio = odp_schedule_max_prio();
thr->compl_q = odp_queue_create(PROG_NAME "_dma_compl", &queue_param);
if (thr->compl_q == ODP_QUEUE_INVALID) {
ODPH_ERR("Error creating DMA completion queue\n");
return false;
}
config->init_fn = init_dma_ev_trs;
config->start_fn = start_dma_ev_trs;
config->drain_fn = drain_events;
} else {
odp_stash_param_init(&stash_param);
stash_param.type = config->stash_type;
stash_param.num_obj = config->num_inflight;
stash_param.obj_size = config->inflight_obj_size;
stash_param.cache_size = config->stash_cache_size;
thr->inflight_stash = odp_stash_create("_dma_inflight", &stash_param);
if (thr->inflight_stash == ODP_STASH_INVALID) {
ODPH_ERR("Error creating DMA inflight transfer stash\n");
return false;
}
config->init_fn = init_dma_poll_trs;
config->start_fn = start_dma_poll_trs;
config->drain_fn = drain_polled;
}
}
config->pkt_fn = dma_copy;
return true;
}
static odp_bool_t setup_pktios(prog_config_t *config)
{
odp_pool_param_t pool_param;
pktio_t *pktio;
odp_pktio_param_t pktio_param;
uint32_t num_input_qs, num_output_qs;
odp_pool_param_init(&pool_param);
pool_param.pkt.seg_len = config->pkt_len;
pool_param.pkt.len = config->pkt_len;
pool_param.pkt.num = config->num_pkts;
pool_param.pkt.cache_size = config->cache_size;
pool_param.type = ODP_POOL_PACKET;
config->pktio_pool = odp_pool_create(PROG_NAME, &pool_param);
if (config->pktio_pool == ODP_POOL_INVALID) {
ODPH_ERR("Error creating packet I/O pool\n");
return false;
}
for (uint32_t i = 0U; i < config->num_ifs; ++i) {
pktio = &config->pktios[i];
odp_pktio_param_init(&pktio_param);
pktio->handle = odp_pktio_open(pktio->name, config->pktio_pool, &pktio_param);
if (pktio->handle == ODP_PKTIO_INVALID) {
ODPH_ERR("Error opening packet I/O (%s)\n", pktio->name);
return false;
}
config->pktio_idx_map[odp_pktio_index(pktio->handle)] = i;
if (odp_pktio_capability(pktio->handle, &capa) < 0) {
ODPH_ERR("Error querying packet I/O capabilities (%s)\n", pktio->name);
return false;
}
num_input_qs = ODPH_MIN((uint32_t)config->num_thrs, capa.max_input_queues);
num_output_qs = ODPH_MIN((uint32_t)config->num_thrs, capa.max_output_queues);
num_output_qs = ODPH_MIN(num_output_qs, MAX_OUT_QS);
if (num_input_qs > 1) {
pktin_param.hash_enable = true;
pktin_param.hash_proto.proto.ipv4 = 1U;
}
pktin_param.num_queues = num_input_qs;
if (odp_pktin_queue_config(pktio->handle, &pktin_param) < 0) {
ODPH_ERR("Error configuring packet I/O input queues (%s)\n", pktio->name);
return false;
}
if (num_output_qs == (uint32_t)config->num_thrs)
pktout_param.num_queues = num_output_qs;
pktio->num_out_qs = num_output_qs;
if (odp_pktout_queue_config(pktio->handle, &pktout_param) < 0) {
ODPH_ERR("Error configuring packet I/O output queues (%s)\n", pktio->name);
return false;
}
if (odp_pktout_queue(pktio->handle, pktio->out_qs, num_output_qs) !=
(int)num_output_qs) {
ODPH_ERR("Error querying packet I/O output queues (%s)\n", pktio->name);
return false;
}
if (odp_pktio_start(pktio->handle) < 0) {
ODPH_ERR("Error starting packet I/O (%s)\n", pktio->name);
return false;
}
}
return true;
}
static inline void send_dma_poll_trs_pkts(int burst_size, thread_config_t *config)
{
odp_stash_t stash_handle = config->inflight_stash;
odp_dma_transfer_id_t ids[burst_size], id;
int32_t num;
odp_dma_t dma_handle = config->dma_handle;
int ret;
transfer_t *trs;
pktio_t *pktio;
int num_sent;
stats_t *stats = &config->stats;
while (true) {
num = odp_stash_get(stash_handle, &ids, burst_size);
if (num <= 0)
break;
for (int32_t i = 0; i < num; ++i) {
id = ids[i];
ret = odp_dma_transfer_done(dma_handle, id, &res);
if (ret == 0) {
if (odp_unlikely(odp_stash_put(stash_handle, &id, 1) != 1))
/* Should not happen, but make it visible if it somehow
* does */
ODPH_ABORT("DMA inflight transfer stash overflow,"
" aborting");
++stats->trs_polled;
continue;
}
odp_dma_transfer_id_free(dma_handle, id);
if (ret < 0) {
++stats->trs_poll_errs;
continue;
}
buf = (odp_buffer_t)res.user_ptr;
trs = (transfer_t *)odp_buffer_addr(buf);
if (res.success) {
pktio = trs->pktio;
num_sent = send_packets(pktio->out_qs[config->thr_idx %
pktio->num_out_qs],
trs->dst_pkts, trs->num);
++stats->trs;
stats->fwd_pkts += num_sent;
stats->discards += trs->num - num_sent;
} else {
odp_packet_free_multi(trs->dst_pkts, trs->num);
++stats->trs_errs;
}
odp_packet_free_multi(trs->src_pkts, trs->num);
}
}
}
static inline void send_dma_ev_trs_pkts(odp_dma_compl_t compl_ev, thread_config_t *config)
{
transfer_t *trs;
pktio_t *pktio;
int num_sent;
stats_t *stats = &config->stats;
memset(&res, 0, sizeof(res));
odp_dma_compl_result(compl_ev, &res);
buf = (odp_buffer_t)res.user_ptr;
trs = (transfer_t *)odp_buffer_addr(buf);
if (res.success) {
pktio = trs->pktio;
num_sent = send_packets(pktio->out_qs[config->thr_idx % pktio->num_out_qs],
trs->dst_pkts, trs->num);
++stats->trs;
stats->fwd_pkts += num_sent;
stats->discards += trs->num - num_sent;
} else {
odp_packet_free_multi(trs->dst_pkts, trs->num);
++stats->trs_errs;
}
odp_packet_free_multi(trs->src_pkts, trs->num);
odp_dma_compl_free(compl_ev);
}
static inline void push_packet(odp_packet_t pkt, pkt_vec_t pkt_vecs[], uint8_t *pktio_idx_map)
{
uint8_t idx = pktio_idx_map[odp_packet_input_index(pkt)];
pkt_vec_t *pkt_vec = &pkt_vecs[idx];
pkt_vec->pkts[pkt_vec->num++] = pkt;
}
static inline void pop_packets(pkt_vec_t *pkt_vec, int num_procd)
{
pkt_vec->num -= num_procd;
for (int i = 0, j = num_procd; i < pkt_vec->num; ++i, ++j)
pkt_vec->pkts[i] = pkt_vec->pkts[j];
}
static void free_pending_packets(pkt_vec_t pkt_vecs[], uint32_t num_ifs)
{
for (uint32_t i = 0U; i < num_ifs; ++i)
odp_packet_free_multi(pkt_vecs[i].pkts, pkt_vecs[i].num);
}
static int process_packets(void *args)
{
thread_config_t *config = args;
const uint8_t num_ifs = config->prog_config->num_ifs;
pkt_vec_t pkt_vecs[num_ifs], *pkt_vec;
odp_atomic_u32_t *is_running = &config->prog_config->is_running;
uint64_t c1, c2, c3, c4, cdiff = 0U, rounds = 0U;
const uint8_t copy_type = config->prog_config->copy_type;
const int burst_size = config->prog_config->burst_size;
odp_event_t evs[burst_size];
int num_evs;
uint8_t *pktio_map = config->prog_config->pktio_idx_map;
stats_t *stats = &config->stats;
init_fn_t init_fn = config->prog_config->init_fn;
start_fn_t start_fn = config->prog_config->start_fn;
pkt_fn_t pkt_fn = config->prog_config->pkt_fn;
for (uint32_t i = 0U; i < num_ifs; ++i) {
pkt_vecs[i].pktio = &config->prog_config->pktios[i];
pkt_vecs[i].num = 0;
}
config->thr_idx = odp_thread_id();
odp_barrier_wait(&config->prog_config->init_barrier);
while (odp_atomic_load_u32(is_running)) {
num_evs = odp_schedule_multi_no_wait(NULL, evs, burst_size);
cdiff += odp_cpu_cycles_diff(c4, c3);
++rounds;
if (copy_type == DMA_COPY_POLL)
send_dma_poll_trs_pkts(burst_size, config);
if (num_evs == 0)
continue;
for (int i = 0; i < num_evs; ++i) {
ev = evs[i];
type = odp_event_type(ev);
if (type == ODP_EVENT_DMA_COMPL) {
send_dma_ev_trs_pkts(odp_dma_compl_from_event(ev), config);
} else if (type == ODP_EVENT_PACKET) {
push_packet(odp_packet_from_event(ev), pkt_vecs, pktio_map);
} else {
++stats->discards;
}
}
for (uint32_t i = 0U; i < num_ifs; ++i) {
pkt_vec = &pkt_vecs[i];
if (pkt_vec->num >= burst_size) {
pkt_fn(pkt_vec->pkts, burst_size, pkt_vec->pktio, init_fn,
start_fn, config);
pop_packets(pkt_vec, burst_size);
}
}
}
stats->sched_cc = cdiff;
stats->tot_cc = odp_cpu_cycles_diff(c2, c1);
stats->sched_rounds = rounds;
free_pending_packets(pkt_vecs, num_ifs);
odp_barrier_wait(&config->prog_config->term_barrier);
if (config->prog_config->drain_fn)
config->prog_config->drain_fn(config);
return 0;
}
static odp_bool_t setup_workers(prog_config_t *config)
{
odp_cpumask_t cpumask;
int num_workers;
odph_thread_common_param_t thr_common;
odph_thread_param_t thr_param[config->num_thrs];
num_workers = odp_cpumask_default_worker(&cpumask, config->num_thrs);
odph_thread_common_param_init(&thr_common);
thr_common.instance = config->odp_instance;
thr_common.cpumask = &cpumask;
for (int i = 0; i < config->num_thrs; ++i) {
odph_thread_param_init(&thr_param[i]);
thr_param[i].start = process_packets;
thr_param[i].thr_type = ODP_THREAD_WORKER;
config->thread_config[i].prog_config = config;
thr_param[i].arg = &config->thread_config[i];
}
num_workers = odph_thread_create(config->thread_tbl, &thr_common, thr_param, num_workers);
if (num_workers != config->num_thrs) {
ODPH_ERR("Error configuring worker threads\n");
return false;
}
return true;
}
static odp_bool_t setup_test(prog_config_t *config)
{
odp_barrier_init(&config->init_barrier, config->num_thrs + 1);
odp_barrier_init(&config->term_barrier, config->num_thrs + 1);
if (!setup_copy(config))
return false;
if (!setup_pktios(config))
return false;
if (!setup_workers(config))
return false;
odp_barrier_wait(&config->init_barrier);
return true;
}
static void stop_test(prog_config_t *config)
{
for (uint32_t i = 0U; i < config->num_ifs; ++i)
if (config->pktios[i].handle != ODP_PKTIO_INVALID)
(void)odp_pktio_stop(config->pktios[i].handle);
odp_barrier_wait(&config->term_barrier);
(void)odph_thread_join(config->thread_tbl, config->num_thrs);
}
static void teardown(prog_config_t *config)
{
thread_config_t *thr;
for (uint32_t i = 0U; i < config->num_ifs; ++i) {
free(config->pktios[i].name);
if (config->pktios[i].handle != ODP_PKTIO_INVALID)
(void)odp_pktio_close(config->pktios[i].handle);
}
if (config->pktio_pool != ODP_POOL_INVALID)
(void)odp_pool_destroy(config->pktio_pool);
for (int i = 0; i < config->num_thrs; ++i) {
thr = &config->thread_config[i];
if (thr->inflight_stash != ODP_STASH_INVALID)
(void)odp_stash_destroy(thr->inflight_stash);
if (thr->compl_q != ODP_QUEUE_INVALID)
(void)odp_queue_destroy(thr->compl_q);
if (thr->compl_pool != ODP_POOL_INVALID)
(void)odp_pool_destroy(thr->compl_pool);
if (thr->dma_handle != ODP_DMA_INVALID)
(void)odp_dma_destroy(thr->dma_handle);
}
if (config->copy_pool != ODP_POOL_INVALID)
(void)odp_pool_destroy(config->copy_pool);
if (config->trs_pool != ODP_POOL_INVALID)
(void)odp_pool_destroy(config->trs_pool);
}
static void print_stats(const prog_config_t *config)
{
const stats_t *stats;
const char *align1 = config->copy_type == DMA_COPY_EV ? " " : "";
const char *align2 = config->copy_type == SW_COPY ? " " :
config->copy_type == DMA_COPY_EV ? " " :
" ";
printf("\n==================\n\n"
"DMA forwarder done\n\n"
" copy mode: %s\n"
" burst size: %u\n"
" packet length: %u\n"
" max cache size: %u\n", config->copy_type == SW_COPY ? "SW" :
config->copy_type == DMA_COPY_EV ? "DMA-event" : "DMA-poll",
config->burst_size, config->pkt_len, config->cache_size);
for (int i = 0; i < config->num_thrs; ++i) {
stats = &config->thread_config[i].stats;
printf("\n worker %d:\n", i);
if (config->copy_type == SW_COPY) {
printf(" packet copy errors: %" PRIu64 "\n",
stats->copy_errs);
} else {
printf(" successful DMA transfers: %s%" PRIu64 "\n"
" DMA transfer start errors: %s%" PRIu64 "\n"
" DMA transfer errors: %s%" PRIu64 "\n"
" transfer buffer allocation errors: %s%" PRIu64 "\n"
" copy packet allocation errors: %s%" PRIu64 "\n",
align1, stats->trs, align1, stats->start_errs, align1,
stats->trs_errs, align1, stats->buf_alloc_errs, align1,
stats->pkt_alloc_errs);
if (config->copy_type == DMA_COPY_EV)
printf(" completion event allocation errors: %" PRIu64 "\n",
stats->compl_alloc_errs);
else
printf(" transfer ID allocation errors: %" PRIu64 "\n"
" transfer poll errors: %" PRIu64 "\n"
" transfers polled: %" PRIu64 "\n",
stats->compl_alloc_errs, stats->trs_poll_errs,
stats->trs_polled);
}
printf(" packets forwarded:%s%" PRIu64 "\n"
" packets dropped: %s%" PRIu64 "\n"
" call cycles per schedule round:\n"
" total: %" PRIu64 "\n"
" schedule: %" PRIu64 "\n"
" rounds: %" PRIu64 "\n", align2, stats->fwd_pkts, align2,
stats->discards, DIV_IF(stats->tot_cc, stats->sched_rounds),
DIV_IF(stats->sched_cc, stats->sched_rounds), stats->sched_rounds);
}
printf("\n==================\n");
}
int main(int argc, char **argv)
{
odph_helper_options_t odph_opts;
odp_init_t init_param;
int ret = EXIT_SUCCESS;
parse_result_t parse_res;
argc = odph_parse_options(argc, argv);
if (odph_options(&odph_opts) == -1) {
ODPH_ERR("Error while reading ODP helper options, exiting\n");
exit(EXIT_FAILURE);
}
odp_init_param_init(&init_param);
init_param.mem_model = odph_opts.mem_model;
if (odp_init_global(&odp_instance, &init_param, NULL)) {
ODPH_ERR("ODP global init failed, exiting\n");
exit(EXIT_FAILURE);
}
ODPH_ERR("ODP local init failed, exiting\n");
exit(EXIT_FAILURE);
}
shm_cfg = odp_shm_reserve(PROG_NAME "_cfg", sizeof(prog_config_t), ODP_CACHE_LINE_SIZE,
0U);
if (shm_cfg == ODP_SHM_INVALID) {
ODPH_ERR("Error reserving shared memory\n");
ret = EXIT_FAILURE;
goto out;
}
prog_conf = odp_shm_addr(shm_cfg);
if (prog_conf == NULL) {
ODPH_ERR("Error resolving shared memory address\n");
ret = EXIT_FAILURE;
goto out;
}
parse_res = setup_program(argc, argv, prog_conf);
if (parse_res == PRS_NOK) {
ret = EXIT_FAILURE;
goto out_test;
}
if (parse_res == PRS_TERM) {
ret = EXIT_SUCCESS;
goto out_test;
}
if (parse_res == PRS_NOT_SUP) {
ret = EXIT_NOT_SUP;
goto out_test;
}
if (odp_schedule_config(NULL) < 0) {
ODPH_ERR("Error configuring scheduler\n");
ret = EXIT_FAILURE;
goto out_test;
}
prog_conf->odp_instance = odp_instance;
odp_atomic_init_u32(&prog_conf->is_running, 1U);
if (!setup_test(prog_conf)) {
ret = EXIT_FAILURE;
goto out_test;
}
if (prog_conf->time_sec > 0.001) {
struct timespec ts;
ts.tv_sec = prog_conf->time_sec;
ts.tv_nsec = (prog_conf->time_sec - ts.tv_sec) * ODP_TIME_SEC_IN_NS;
nanosleep(&ts, NULL);
odp_atomic_store_u32(&prog_conf->is_running, 0U);
} else {
while (odp_atomic_load_u32(&prog_conf->is_running))
sleep(1U);
}
stop_test(prog_conf);
print_stats(prog_conf);
out_test:
teardown(prog_conf);
out:
if (shm_cfg != ODP_SHM_INVALID)
(void)odp_shm_free(shm_cfg);
if (odp_term_local()) {
ODPH_ERR("ODP local terminate failed, exiting\n");
exit(EXIT_FAILURE);
}
ODPH_ERR("ODP global terminate failed, exiting\n");
exit(EXIT_FAILURE);
}
return ret;
}
void odp_atomic_init_u32(odp_atomic_u32_t *atom, uint32_t val)
Initialize atomic uint32 variable.
uint32_t odp_atomic_load_u32(odp_atomic_u32_t *atom)
Load value of atomic uint32 variable.
void odp_atomic_store_u32(odp_atomic_u32_t *atom, uint32_t val)
Store value to atomic uint32 variable.
void odp_barrier_init(odp_barrier_t *barr, int count)
Initialize barrier with thread count.
void odp_barrier_wait(odp_barrier_t *barr)
Synchronize thread execution on barrier.
odp_buffer_t odp_buffer_alloc(odp_pool_t pool)
Buffer alloc.
void odp_buffer_free(odp_buffer_t buf)
Buffer free.
void * odp_buffer_addr(odp_buffer_t buf)
Buffer start address.
_odp_abi_buffer_t * odp_buffer_t
ODP buffer.
#define ODP_BUFFER_INVALID
Invalid buffer.
#define ODP_ALIGNED_CACHE
Defines type/struct/variable to be cache line size aligned.
#define odp_unlikely(x)
Branch unlikely taken.
Definition: spec/hints.h:64
#define ODP_UNUSED
Intentionally unused variables of functions.
Definition: spec/hints.h:54
uint64_t odp_cpu_cycles_diff(uint64_t c2, uint64_t c1)
CPU cycle count difference.
uint64_t odp_cpu_cycles(void)
Current CPU cycle count.
int odp_cpumask_default_worker(odp_cpumask_t *mask, int num)
Default CPU mask for worker threads.
odp_dma_t odp_dma_create(const char *name, const odp_dma_param_t *param)
Create DMA session.
odp_pool_t odp_dma_pool_create(const char *name, const odp_dma_pool_param_t *pool_param)
Create DMA completion event pool.
int odp_dma_transfer_done(odp_dma_t dma, odp_dma_transfer_id_t transfer_id, odp_dma_result_t *result)
Check if DMA transfer has completed.
#define ODP_DMA_TYPE_COPY
Copy data.
void odp_dma_transfer_id_free(odp_dma_t dma, odp_dma_transfer_id_t transfer_id)
Free DMA transfer identifier.
#define ODP_DMA_COMPL_EVENT
Asynchronous transfer with completion event.
void odp_dma_transfer_param_init(odp_dma_transfer_param_t *trs_param)
Initialize DMA transfer parameters.
int odp_dma_destroy(odp_dma_t dma)
Destroy DMA session.
int odp_dma_compl_result(odp_dma_compl_t dma_compl, odp_dma_result_t *result)
Check DMA completion event.
void odp_dma_compl_param_init(odp_dma_compl_param_t *compl_param)
Initialize DMA transfer completion parameters.
#define ODP_DMA_TRANSFER_ID_INVALID
Invalid DMA transfer identifier.
#define ODP_DMA_COMPL_INVALID
Invalid DMA completion event.
odp_event_t odp_dma_compl_to_event(odp_dma_compl_t dma_compl)
Convert DMA completion event to event.
void odp_dma_pool_param_init(odp_dma_pool_param_t *pool_param)
Initialize DMA completion event pool parameters.
int odp_dma_transfer_start(odp_dma_t dma, const odp_dma_transfer_param_t *trs_param, const odp_dma_compl_param_t *compl_param)
Start DMA transfer.
uint64_t odp_dma_transfer_id_t
DMA transfer identifier.
odp_dma_compl_t odp_dma_compl_from_event(odp_event_t ev)
Convert event to DMA completion event.
#define ODP_DMA_MAIN_TO_MAIN
DMA transfer within the main memory.
int odp_dma_capability(odp_dma_capability_t *capa)
Query DMA capabilities.
odp_dma_transfer_id_t odp_dma_transfer_id_alloc(odp_dma_t dma)
Allocate DMA transfer identifier.
void odp_dma_compl_free(odp_dma_compl_t dma_compl)
Free DMA completion event.
#define ODP_DMA_INVALID
Invalid DMA session.
#define ODP_DMA_COMPL_POLL
Asynchronous transfer with completion polling.
odp_dma_compl_t odp_dma_compl_alloc(odp_pool_t pool)
Allocate DMA completion event.
@ ODP_DMA_MT_SERIAL
Application serializes operations.
@ ODP_DMA_FORMAT_PACKET
Data format is odp_packet_t.
@ ODP_DMA_ORDER_NONE
No specific ordering between transfers.
void odp_event_free(odp_event_t event)
Free event.
odp_event_type_t odp_event_type(odp_event_t event)
Event type of an event.
odp_event_type_t
Event type.
#define ODP_EVENT_INVALID
Invalid event.
int odp_instance(odp_instance_t *instance)
Get instance handle.
void odp_init_param_init(odp_init_t *param)
Initialize the odp_init_t to default values for all fields.
#define ODP_STATIC_ASSERT(cond, msg)
Compile time assertion macro.
int odp_init_local(odp_instance_t instance, odp_thread_type_t thr_type)
Thread local ODP initialization.
int odp_init_global(odp_instance_t *instance, const odp_init_t *params, const odp_platform_init_t *platform_params)
Global ODP initialization.
int odp_term_local(void)
Thread local ODP termination.
int odp_term_global(odp_instance_t instance)
Global ODP termination.
uint64_t odp_instance_t
ODP instance ID.
void odp_pktin_queue_param_init(odp_pktin_queue_param_t *param)
Initialize packet input queue parameters.
void odp_pktio_param_init(odp_pktio_param_t *param)
Initialize pktio params.
int odp_pktio_close(odp_pktio_t pktio)
Close a packet IO interface.
int odp_pktout_queue(odp_pktio_t pktio, odp_pktout_queue_t queues[], int num)
Direct packet output queues.
odp_pktio_t odp_pktio_open(const char *name, odp_pool_t pool, const odp_pktio_param_t *param)
Open a packet IO interface.
int odp_pktio_start(odp_pktio_t pktio)
Start packet receive and transmit.
#define ODP_PKTIO_INVALID
Invalid packet IO handle.
void odp_pktout_queue_param_init(odp_pktout_queue_param_t *param)
Initialize packet output queue parameters.
int odp_pktio_stop(odp_pktio_t pktio)
Stop packet receive and transmit.
int odp_pktio_index(odp_pktio_t pktio)
Get pktio interface index.
int odp_pktio_capability(odp_pktio_t pktio, odp_pktio_capability_t *capa)
Query packet IO interface capabilities.
#define ODP_PKTIO_MAX_INDEX
Maximum packet IO interface index.
int odp_pktout_send(odp_pktout_queue_t queue, const odp_packet_t packets[], int num)
Send packets directly to an interface output queue.
int odp_pktin_queue_config(odp_pktio_t pktio, const odp_pktin_queue_param_t *param)
Configure packet input queues.
int odp_pktout_queue_config(odp_pktio_t pktio, const odp_pktout_queue_param_t *param)
Configure packet output queues.
@ ODP_PKTOUT_MODE_DIRECT
Direct packet output on the interface.
@ ODP_PKTIO_OP_MT_UNSAFE
Not multithread safe operation.
@ ODP_PKTIN_MODE_SCHED
Packet input through scheduler and scheduled event queues.
int odp_packet_input_index(odp_packet_t pkt)
Packet input interface index.
odp_packet_t odp_packet_copy(odp_packet_t pkt, odp_pool_t pool)
Full copy of a packet.
uint32_t odp_packet_len(odp_packet_t pkt)
Packet data length.
odp_packet_t odp_packet_alloc(odp_pool_t pool, uint32_t len)
Allocate a packet from a packet pool.
odp_packet_t odp_packet_from_event(odp_event_t ev)
Get packet handle from event.
void odp_packet_free(odp_packet_t pkt)
Free packet.
#define ODP_PACKET_INVALID
Invalid packet.
void odp_packet_free_multi(const odp_packet_t pkt[], int num)
Free multiple packets.
odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *param)
Create a pool.
int odp_pool_capability(odp_pool_capability_t *capa)
Query pool capabilities.
void odp_pool_param_init(odp_pool_param_t *param)
Initialize pool params.
int odp_pool_destroy(odp_pool_t pool)
Destroy a pool previously created by odp_pool_create()
#define ODP_POOL_INVALID
Invalid pool.
@ ODP_POOL_BUFFER
Buffer pool.
@ ODP_POOL_PACKET
Packet pool.
void odp_queue_param_init(odp_queue_param_t *param)
Initialize queue params.
#define ODP_QUEUE_INVALID
Invalid queue.
odp_queue_t odp_queue_create(const char *name, const odp_queue_param_t *param)
Queue create.
int odp_queue_destroy(odp_queue_t queue)
Destroy ODP queue.
@ ODP_QUEUE_TYPE_SCHED
Scheduled queue.
int odp_schedule_multi_no_wait(odp_queue_t *from, odp_event_t events[], int num)
Schedule, do not wait for events.
#define ODP_SCHED_SYNC_PARALLEL
Parallel scheduled queues.
int odp_schedule_default_prio(void)
Default scheduling priority level.
int odp_schedule_max_prio(void)
Maximum scheduling priority level.
int odp_schedule_config(const odp_schedule_config_t *config)
Global schedule configuration.
uint64_t odp_schedule_wait_time(uint64_t ns)
Schedule wait time.
odp_event_t odp_schedule(odp_queue_t *from, uint64_t wait)
Schedule an event.
int odp_shm_free(odp_shm_t shm)
Free a contiguous block of shared memory.
#define ODP_SHM_INVALID
Invalid shared memory block.
void * odp_shm_addr(odp_shm_t shm)
Shared memory block address.
odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align, uint32_t flags)
Reserve a contiguous block of shared memory.
odp_stash_type_t
Stash types.
#define ODP_STASH_INVALID
Invalid stash handle.
int32_t odp_stash_put(odp_stash_t stash, const void *obj, int32_t num)
Put object handles into a stash.
void odp_stash_param_init(odp_stash_param_t *param)
Initialize stash params.
int32_t odp_stash_get(odp_stash_t stash, void *obj, int32_t num)
Get object handles from a stash.
int odp_stash_destroy(odp_stash_t stash)
Destroy a stash.
int odp_stash_capability(odp_stash_capability_t *capa, odp_stash_type_t type)
Query stash capabilities.
odp_stash_t odp_stash_create(const char *name, const odp_stash_param_t *param)
Create a stash.
@ ODP_STASH_TYPE_DEFAULT
The default stash type.
@ ODP_STASH_TYPE_FIFO
Stash type FIFO.
@ ODP_STASH_OP_LOCAL
Thread local operation.
int odp_bool_t
Use odp boolean type to have it well-defined and known size, regardless which compiler is used as thi...
int odp_thread_id(void)
Get thread identifier.
@ ODP_THREAD_WORKER
Worker thread.
@ ODP_THREAD_CONTROL
Control thread.
#define ODP_TIME_SEC_IN_NS
A second in nanoseconds.
#define ODP_TIME_MSEC_IN_NS
A millisecond in nanoseconds.
The OpenDataPlane API.
uint32_t max_sessions
Maximum number of DMA sessions.
uint32_t max_transfers
Maximum number of transfers per DMA session.
odp_dma_pool_capability_t pool
DMA completion event pool capabilities.
uint32_t max_dst_segs
Maximum number of destination segments in a single transfer.
uint32_t max_src_segs
Maximum number of source segments in a single transfer.
odp_bool_t queue_type_sched
Scheduled queue support.
odp_dma_compl_mode_t compl_mode_mask
Supported completion modes.
uint32_t max_seg_len
Maximum segment length in bytes.
DMA transfer completion parameters.
odp_dma_transfer_id_t transfer_id
Transfer identifier.
void * user_ptr
User context pointer.
odp_event_t event
Completion event.
odp_dma_compl_mode_t compl_mode
Completion mode.
odp_queue_t queue
Completion queue.
DMA session parameters.
odp_dma_direction_t direction
Transfer direction.
uint32_t min_cache_size
Minimum size of thread local cache.
uint32_t max_cache_size
Maximum size of thread local cache.
uint32_t max_pools
Maximum number of DMA completion event pools.
uint32_t max_num
Maximum number of DMA completion events in a pool.
DMA completion event pool parameters.
uint32_t cache_size
Maximum number of events cached locally per thread.
uint32_t num
Number of DMA completion events in the pool.
DMA transfer results.
void * user_ptr
User context pointer.
odp_bool_t success
DMA transfer success.
odp_packet_t packet
Packet handle.
uint32_t len
Segment length in bytes.
DMA transfer parameters.
odp_dma_seg_t * dst_seg
Table of destination segments.
odp_dma_data_format_t dst_format
Destination data format.
uint32_t num_dst
Number of destination segments.
uint32_t num_src
Number of source segments.
odp_dma_seg_t * src_seg
Table of source segments.
odp_dma_data_format_t src_format
Source data format.
Global initialization parameters.
odp_mem_model_t mem_model
Application memory model.
Packet input queue parameters.
uint32_t num_queues
Number of input queues to be created.
odp_queue_param_t queue_param
Queue parameters.
odp_pktin_hash_proto_t hash_proto
Protocol field selection for hashing.
odp_bool_t hash_enable
Enable flow hashing.
Packet IO capabilities.
uint32_t max_input_queues
Maximum number of input queues.
uint32_t max_output_queues
Maximum number of output queues.
Packet IO parameters.
odp_pktin_mode_t in_mode
Packet input mode.
odp_pktout_mode_t out_mode
Packet output mode.
Packet output queue parameters.
odp_pktio_op_mode_t op_mode
Operation mode.
uint32_t num_queues
Number of output queues to be created.
struct odp_pool_capability_t::@118 buf
Buffer pool capabilities
uint32_t max_num
Maximum number of buffers of any size.
uint32_t min_cache_size
Minimum size of thread local cache.
struct odp_pool_capability_t::@119 pkt
Packet pool capabilities
uint32_t max_cache_size
Maximum size of thread local cache.
uint32_t max_len
Maximum packet data length in bytes.
Pool parameters.
uint32_t num
Number of buffers in the pool.
struct odp_pool_param_t::@122 buf
Parameters for buffer pools.
uint32_t cache_size
Maximum number of buffers cached locally per thread.
uint32_t size
Minimum buffer size in bytes.
odp_pool_type_t type
Pool type.
struct odp_pool_param_t::@123 pkt
Parameters for packet pools.
uint32_t len
Minimum length of 'num' packets.
uint32_t seg_len
Minimum number of packet data bytes that can be stored in the first segment of a newly allocated pack...
ODP Queue parameters.
odp_schedule_param_t sched
Scheduler parameters.
odp_queue_type_t type
Queue type.
odp_schedule_prio_t prio
Priority level.
odp_schedule_sync_t sync
Synchronization method.
Stash capabilities (per stash type)
uint64_t u16
Maximum number of 2 byte object handles.
uint64_t u128
Maximum number of 16 byte object handles.
struct odp_stash_capability_t::@142 max_num
Maximum number of object handles per stash for each object size.
uint32_t max_stashes
Maximum number of stashes of this type.
uint64_t u64
Maximum number of 8 byte object handles.
uint32_t max_cache_size
Maximum size of thread local cache.
uint64_t u32
Maximum number of 4 byte object handles.
uint64_t u8
Maximum number of 1 byte object handles.
Stash parameters.
uint32_t cache_size
Maximum number of object handles cached locally per thread.
odp_stash_op_mode_t get_mode
Get operation mode.
uint32_t obj_size
Object handle size in bytes.
odp_stash_type_t type
Stash type.
odp_stash_op_mode_t put_mode
Put operation mode.
uint64_t num_obj
Number of object handles.
128-bit unsigned integer structure
struct odp_pktin_hash_proto_t::@99 proto
Protocol header fields for hashing.
uint32_t ipv4
IPv4 addresses.