This L2 forwarding application can be used as example as well as performance test for different ODP packet I/O modes (direct, queue or scheduled).Note that this example is tuned for performance. As a result, when using scheduled packet input mode with direct or queued output mode and multiple output queues, packet order is not guaranteed. To maintain packet order, use a single worker thread or output interfaces with one output queue.
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <stdlib.h>
#include <getopt.h>
#include <unistd.h>
#include <errno.h>
#include <inttypes.h>
#include <signal.h>
#include <odp/helper/odph_api.h>
#define MAX_WORKERS (ODP_THREAD_COUNT_MAX - 1)
#define DEFAULT_NUM_PKT (16 * 1024)
#define POOL_PKT_LEN 1536
#define MAX_PKT_BURST 32
#define MAX_QUEUES 32
#define MAX_GROUPS 32
#define MAX_PKTIOS 8
#define DEFAULT_VEC_SIZE MAX_PKT_BURST
#define DEFAULT_VEC_TMO ODP_TIME_MSEC_IN_NS
#define EXTRA_STR_LEN 32
typedef enum pktin_mode_t {
DIRECT_RECV,
PLAIN_QUEUE,
SCHED_PARALLEL,
SCHED_ATOMIC,
SCHED_ORDERED,
} pktin_mode_t;
typedef enum pktout_mode_t {
PKTOUT_DIRECT,
PKTOUT_QUEUE
} pktout_mode_t;
static inline int sched_mode(pktin_mode_t in_mode)
{
return (in_mode == SCHED_PARALLEL) ||
(in_mode == SCHED_ATOMIC) ||
(in_mode == SCHED_ORDERED);
}
#define NO_PATH(file_name) (strrchr((file_name), '/') ? \
strrchr((file_name), '/') + 1 : (file_name))
typedef struct {
uint8_t extra_feat;
uint8_t has_state;
uint8_t prefetch;
uint8_t dst_change;
uint8_t src_change;
uint16_t data_rd;
uint8_t error_check;
uint8_t packet_copy;
uint8_t chksum;
uint8_t verbose_pkt;
unsigned int cpu_count;
int if_count;
int addr_count;
int num_workers;
char **if_names;
odph_ethaddr_t addrs[MAX_PKTIOS];
pktin_mode_t in_mode;
pktout_mode_t out_mode;
int time;
int accuracy;
char *if_str;
int sched_mode;
int num_groups;
int group_mode;
int burst_rx;
int rx_queues;
int pool_per_if;
uint32_t num_pkt;
int flow_control;
bool pause_rx;
bool pause_tx;
bool vector_mode;
uint32_t num_vec;
uint64_t vec_tmo_ns;
uint32_t vec_size;
int verbose;
uint32_t packet_len;
uint32_t seg_len;
int promisc_mode;
int flow_aware;
uint8_t input_ts;
int mtu;
int num_om;
int num_prio;
struct {
uint32_t nth;
uint32_t thr_compl_id;
uint32_t tot_compl_id;
} tx_compl;
char *output_map[MAX_PKTIOS];
} appl_args_t;
struct {
uint64_t packets;
uint64_t rx_drops;
uint64_t tx_drops;
uint64_t tx_c_misses;
uint64_t tx_c_fails;
uint64_t copy_fails;
uint64_t dummy_sum;
} s;
uint8_t padding[ODP_CACHE_LINE_SIZE];
} stats_t;
typedef struct {
uint32_t init;
uint32_t max;
uint32_t free_head;
uint32_t poll_head;
uint32_t num_act;
uint32_t max_act;
int interval;
int next_req;
} tx_compl_t;
typedef struct {
tx_compl_t tx_compl;
} state_t;
typedef struct thread_args_t {
stats_t stats;
state_t state;
struct {
int rx_idx;
int tx_idx;
int rx_queue_idx;
int tx_queue_idx;
} pktio[MAX_PKTIOS];
int thr_idx;
int num_pktio;
int num_grp_join;
} thread_args_t;
typedef struct {
odph_thread_t thread_tbl[MAX_WORKERS];
thread_args_t thread_args[MAX_WORKERS];
appl_args_t appl;
odph_ethaddr_t port_eth_addr[MAX_PKTIOS];
odph_ethaddr_t dst_eth_addr[MAX_PKTIOS];
int dst_port[MAX_PKTIOS];
struct {
int num_rx_thr;
int num_tx_thr;
int num_rx_queue;
int num_tx_queue;
int next_rx_queue;
int next_tx_queue;
} pktios[MAX_PKTIOS];
uint32_t pkt_len;
uint32_t num_pkt;
uint32_t seg_len;
uint32_t vector_num;
uint32_t vector_max_size;
} args_t;
static args_t *gbl_args;
{
if (gbl_args == NULL)
return;
}
static int setup_sig_handler(void)
{
struct sigaction action = { .sa_handler = sig_handler };
if (sigemptyset(&action.sa_mask) || sigaction(SIGINT, &action, NULL))
return -1;
return 0;
}
static inline int drop_err_pkts(
odp_packet_t pkt_tbl[],
unsigned num)
{
unsigned dropped = 0;
unsigned i, j;
for (i = 0, j = 0; i < num; ++i) {
pkt = pkt_tbl[i];
dropped++;
pkt_tbl[j - 1] = pkt;
}
}
return dropped;
}
static inline void prefetch_data(uint8_t prefetch,
odp_packet_t pkt_tbl[], uint32_t num)
{
if (prefetch == 0)
return;
for (uint32_t i = 0; i < num; i++)
}
unsigned num, int dst_port)
{
odph_ethhdr_t *eth;
unsigned i;
if (!gbl_args->appl.dst_change && !gbl_args->appl.src_change)
return;
for (i = 0; i < num; ++i) {
pkt = pkt_tbl[i];
if (gbl_args->appl.src_change)
eth->src = gbl_args->port_eth_addr[dst_port];
if (gbl_args->appl.dst_change)
eth->dst = gbl_args->dst_eth_addr[dst_port];
}
}
unsigned pkts)
{
int ret;
unsigned sent = 0;
while (sent < pkts) {
break;
}
sent += ret;
}
return sent;
}
static inline void chksum_insert(
odp_packet_t *pkt_tbl,
int pkts)
{
int i;
for (i = 0; i < pkts; i++) {
pkt = pkt_tbl[i];
}
}
{
uintptr_t data_ptr;
uint32_t bit, align;
for (int i = 0; i < num; i++) {
pkt = pkt_tbl[i];
for (bit = 0, align = 1; bit < 32; bit++, align *= 2)
if (data_ptr & (0x1 << bit))
break;
printf(" Packet data: 0x%" PRIxPTR "\n"
" Packet len: %u\n"
" Packet seg len: %u\n"
" Data align: %u\n"
" Num segments: %i\n"
" Headroom size: %u\n"
" User area size: %u\n\n",
}
}
static inline void data_rd(
odp_packet_t *pkt_tbl,
int num, uint16_t rd_words, stats_t *stats)
{
uint64_t *data;
int i;
uint32_t len, words, j;
uint64_t sum = 0;
for (i = 0; i < num; i++) {
pkt = pkt_tbl[i];
words = rd_words;
if (rd_words * 8 > len)
words = len / 8;
for (j = 0; j < words; j++)
sum += data[j];
}
stats->s.dummy_sum += sum;
}
static inline int copy_packets(
odp_packet_t *pkt_tbl,
int pkts)
{
int i;
int copy_fails = 0;
for (i = 0; i < pkts; i++) {
old_pkt = pkt_tbl[i];
pkt_tbl[i] = new_pkt;
} else {
copy_fails++;
}
}
return copy_fails;
}
static inline int process_extra_features(
const appl_args_t *appl_args,
odp_packet_t *pkt_tbl,
int pkts, stats_t *stats)
{
uint16_t rd_words = appl_args->data_rd;
if (appl_args->verbose_pkt)
print_packets(pkt_tbl, pkts);
if (rd_words)
data_rd(pkt_tbl, pkts, rd_words, stats);
if (appl_args->packet_copy) {
int fails;
fails = copy_packets(pkt_tbl, pkts);
stats->s.copy_fails += fails;
}
if (appl_args->chksum)
chksum_insert(pkt_tbl, pkts);
if (appl_args->error_check) {
int rx_drops;
rx_drops = drop_err_pkts(pkt_tbl, pkts);
stats->s.rx_drops += rx_drops;
if (pkts == rx_drops)
return 0;
pkts -= rx_drops;
}
}
}
return pkts;
}
static inline void handle_tx_event_compl(tx_compl_t *tx_c,
odp_packet_t pkts[],
int num,
int tx_idx, stats_t *stats)
{
int next_req = tx_c->next_req;
const int interval = tx_c->interval;
tx_c->opt.queue = gbl_args->pktios[tx_idx].compl_q;
while (next_req <= num) {
pkt = pkts[next_req - 1];
stats->s.tx_c_fails++;
next_req = num + 1;
break;
}
next_req += interval;
}
tx_c->next_req = next_req - num;
}
static inline void handle_tx_poll_compl(tx_compl_t *tx_c,
odp_packet_t pkts[],
int num,
int tx_idx,
stats_t *stats)
{
uint32_t num_act = tx_c->num_act, poll_head = tx_c->poll_head, free_head = tx_c->free_head;
const uint32_t max = tx_c->max, init = tx_c->init, max_act = tx_c->max_act;
int next_req = tx_c->next_req;
const int interval = tx_c->interval;
while (num_act > 0) {
break;
--num_act;
if (++poll_head > max)
poll_head = init;
}
while (next_req <= num) {
pkt = pkts[next_req - 1];
if (num_act == max_act) {
stats->s.tx_c_misses++;
next_req = num + 1;
break;
}
tx_c->opt.compl_id = free_head;
stats->s.tx_c_fails++;
next_req = num + 1;
break;
}
if (++free_head > max)
free_head = init;
++num_act;
next_req += interval;
}
tx_c->free_head = free_head;
tx_c->poll_head = poll_head;
tx_c->num_act = num_act;
tx_c->next_req = next_req - num;
}
static inline void handle_tx_state(state_t *state,
odp_packet_t pkts[],
int num,
int tx_idx,
stats_t *stats)
{
tx_compl_t *tx_c = &state->tx_compl;
handle_tx_event_compl(tx_c, pkts, num, tx_idx, stats);
handle_tx_poll_compl(tx_c, pkts, num, tx_idx, stats);
}
static inline void handle_state_failure(state_t *state,
odp_packet_t packet)
{
--state->tx_compl.num_act;
--state->tx_compl.free_head;
if (state->tx_compl.free_head == UINT32_MAX ||
state->tx_compl.free_head < state->tx_compl.init)
state->tx_compl.free_head = state->tx_compl.max;
}
}
int pkts,
int use_event_queue,
int tx_idx,
state_t *state,
stats_t *stats)
{
int sent;
unsigned int tx_drops;
int i;
handle_tx_state(state, pkt_tbl, pkts, tx_idx, stats);
sent = event_queue_send(tx_queue, pkt_tbl, pkts);
else
tx_drops = pkts - sent;
stats->s.tx_drops += tx_drops;
for (i = sent; i < pkts; i++) {
pkt = pkt_tbl[i];
handle_state_failure(state, pkt);
}
}
stats->s.packets += pkts;
}
static int handle_rx_state(state_t *state,
odp_event_t evs[],
int num)
{
return num;
return 0;
}
static int run_worker_sched_mode_vector(void *arg)
{
int thr;
int i;
int pktio, num_pktio;
uint16_t max_burst;
thread_args_t *thr_args = arg;
stats_t *stats = &thr_args->stats;
const appl_args_t *appl_args = &gbl_args->appl;
state_t *state = appl_args->has_state ? &thr_args->state : NULL;
int use_event_queue = gbl_args->appl.out_mode;
pktin_mode_t in_mode = gbl_args->appl.in_mode;
max_burst = gbl_args->appl.burst_rx;
for (i = 0; i < thr_args->num_grp_join; i++) {
ODPH_ERR("Join failed: %i\n", i);
return -1;
}
}
num_pktio = thr_args->num_pktio;
if (num_pktio > MAX_PKTIOS) {
ODPH_ERR("Too many pktios %i\n", num_pktio);
return -1;
}
for (pktio = 0; pktio < num_pktio; pktio++) {
tx_queue[pktio] = thr_args->pktio[pktio].tx_queue;
pktout[pktio] = thr_args->
pktio[pktio].pktout;
}
printf("[%02i] PKTIN_SCHED_%s_VECTOR, %s\n", thr,
(in_mode == SCHED_PARALLEL) ? "PARALLEL" :
((in_mode == SCHED_ATOMIC) ? "ATOMIC" : "ORDERED"),
(use_event_queue) ? "PKTOUT_QUEUE" : "PKTOUT_DIRECT");
int events;
if (events <= 0)
continue;
for (i = 0; i < events; i++) {
int src_idx, dst_idx;
int pkts = 0;
pkt_tbl = &pkt;
pkts = 1;
} else if (state != NULL) {
pkts = handle_rx_state(state, ev_tbl, events);
if (pkts <= 0)
continue;
}
prefetch_data(appl_args->prefetch, pkt_tbl, pkts);
pkts = process_extra_features(appl_args, pkt_tbl, pkts, stats);
continue;
}
ODPH_ASSERT(src_idx >= 0);
dst_idx = gbl_args->dst_port_from_idx[src_idx];
fill_eth_addrs(pkt_tbl, pkts, dst_idx);
send_packets(pkt_tbl, pkts, use_event_queue, dst_idx, tx_queue[dst_idx],
pktout[dst_idx], state, stats);
}
}
while (1) {
break;
}
while (1) {
break;
}
return 0;
}
static int run_worker_sched_mode(void *arg)
{
int pkts;
int thr;
int dst_idx;
int i;
int pktio, num_pktio;
uint16_t max_burst;
char extra_str[EXTRA_STR_LEN];
thread_args_t *thr_args = arg;
stats_t *stats = &thr_args->stats;
const appl_args_t *appl_args = &gbl_args->appl;
state_t *state = appl_args->has_state ? &thr_args->state : NULL;
int use_event_queue = gbl_args->appl.out_mode;
pktin_mode_t in_mode = gbl_args->appl.in_mode;
max_burst = gbl_args->appl.burst_rx;
memset(extra_str, 0, EXTRA_STR_LEN);
for (i = 0; i < thr_args->num_grp_join; i++) {
ODPH_ERR("Join failed: %i\n", i);
return -1;
}
if (gbl_args->appl.verbose) {
uint64_t tmp = (uint64_t)(uintptr_t)thr_args->group[i];
printf("[%02i] Joined group 0x%" PRIx64 "\n", thr, tmp);
}
}
if (thr_args->num_grp_join)
snprintf(extra_str, EXTRA_STR_LEN, ", joined %i groups", thr_args->num_grp_join);
else if (gbl_args->appl.num_groups == 0)
snprintf(extra_str, EXTRA_STR_LEN, ", GROUP_ALL");
else if (gbl_args->appl.num_groups)
snprintf(extra_str, EXTRA_STR_LEN, ", GROUP_WORKER");
num_pktio = thr_args->num_pktio;
if (num_pktio > MAX_PKTIOS) {
ODPH_ERR("Too many pktios %i\n", num_pktio);
return -1;
}
for (pktio = 0; pktio < num_pktio; pktio++) {
tx_queue[pktio] = thr_args->pktio[pktio].tx_queue;
pktout[pktio] = thr_args->
pktio[pktio].pktout;
}
printf("[%02i] PKTIN_SCHED_%s, %s%s\n", thr,
(in_mode == SCHED_PARALLEL) ? "PARALLEL" :
((in_mode == SCHED_ATOMIC) ? "ATOMIC" : "ORDERED"),
(use_event_queue) ? "PKTOUT_QUEUE" : "PKTOUT_DIRECT", extra_str);
int src_idx;
if (pkts <= 0)
continue;
pkts = handle_rx_state(state, ev_tbl, pkts);
if (pkts <= 0)
continue;
}
prefetch_data(appl_args->prefetch, pkt_tbl, pkts);
pkts = process_extra_features(appl_args, pkt_tbl, pkts, stats);
continue;
ODPH_ASSERT(src_idx >= 0);
dst_idx = gbl_args->dst_port_from_idx[src_idx];
fill_eth_addrs(pkt_tbl, pkts, dst_idx);
send_packets(pkt_tbl, pkts, use_event_queue, dst_idx, tx_queue[dst_idx],
pktout[dst_idx], state, stats);
}
while (1) {
break;
}
while (1) {
break;
}
return 0;
}
static int run_worker_plain_queue_mode(void *arg)
{
int thr;
int pkts;
uint16_t max_burst;
int dst_idx, num_pktio;
int pktio = 0;
thread_args_t *thr_args = arg;
stats_t *stats = &thr_args->stats;
const appl_args_t *appl_args = &gbl_args->appl;
state_t *state = appl_args->has_state ? &thr_args->state : NULL;
int use_event_queue = gbl_args->appl.out_mode;
int i;
max_burst = gbl_args->appl.burst_rx;
num_pktio = thr_args->num_pktio;
dst_idx = thr_args->pktio[pktio].tx_idx;
queue = thr_args->pktio[pktio].rx_queue;
pktout = thr_args->
pktio[pktio].pktout;
tx_queue = thr_args->pktio[pktio].tx_queue;
printf("[%02i] num pktios %i, PKTIN_QUEUE, %s\n", thr, num_pktio,
(use_event_queue) ? "PKTOUT_QUEUE" : "PKTOUT_DIRECT");
if (num_pktio > 1) {
dst_idx = thr_args->pktio[pktio].tx_idx;
queue = thr_args->pktio[pktio].rx_queue;
pktout = thr_args->
pktio[pktio].pktout;
tx_queue = thr_args->pktio[pktio].tx_queue;
pktio++;
if (pktio == num_pktio)
pktio = 0;
}
continue;
prefetch_data(appl_args->prefetch, pkt_tbl, pkts);
pkts = process_extra_features(appl_args, pkt_tbl, pkts, stats);
continue;
fill_eth_addrs(pkt_tbl, pkts, dst_idx);
send_packets(pkt_tbl, pkts, use_event_queue, dst_idx, tx_queue, pktout, state,
stats);
}
for (i = 0; i < num_pktio; i++) {
queue = thr_args->pktio[i].rx_queue;
do {
}
}
return 0;
}
static int run_worker_direct_mode(void *arg)
{
int thr;
int pkts;
uint16_t max_burst;
int dst_idx, num_pktio;
int pktio = 0;
thread_args_t *thr_args = arg;
stats_t *stats = &thr_args->stats;
const appl_args_t *appl_args = &gbl_args->appl;
state_t *state = appl_args->has_state ? &thr_args->state : NULL;
int use_event_queue = gbl_args->appl.out_mode;
max_burst = gbl_args->appl.burst_rx;
num_pktio = thr_args->num_pktio;
dst_idx = thr_args->pktio[pktio].tx_idx;
pktin = thr_args->
pktio[pktio].pktin;
pktout = thr_args->
pktio[pktio].pktout;
tx_queue = thr_args->pktio[pktio].tx_queue;
printf("[%02i] num pktios %i, PKTIN_DIRECT, %s\n", thr, num_pktio,
(use_event_queue) ? "PKTOUT_QUEUE" : "PKTOUT_DIRECT");
if (num_pktio > 1) {
dst_idx = thr_args->pktio[pktio].tx_idx;
pktin = thr_args->
pktio[pktio].pktin;
pktout = thr_args->
pktio[pktio].pktout;
tx_queue = thr_args->pktio[pktio].tx_queue;
pktio++;
if (pktio == num_pktio)
pktio = 0;
}
continue;
prefetch_data(appl_args->prefetch, pkt_tbl, pkts);
pkts = process_extra_features(appl_args, pkt_tbl, pkts, stats);
continue;
fill_eth_addrs(pkt_tbl, pkts, dst_idx);
send_packets(pkt_tbl, pkts, use_event_queue, dst_idx, tx_queue, pktout, state,
stats);
}
return 0;
}
{
uint64_t vec_tmo_ns;
uint32_t vec_size;
if (gbl_args->appl.vec_size == 0)
vec_size = DEFAULT_VEC_SIZE;
else
vec_size = gbl_args->appl.vec_size;
if (gbl_args->appl.vec_size == 0) {
printf("\nWarning: Modified vector size to %u\n\n", vec_size);
} else {
ODPH_ERR("Invalid pktio vector size %u, valid range [%u, %u]\n",
return -1;
}
}
if (gbl_args->appl.vec_tmo_ns == 0)
vec_tmo_ns = DEFAULT_VEC_TMO;
else
vec_tmo_ns = gbl_args->appl.vec_tmo_ns;
if (gbl_args->appl.vec_tmo_ns == 0) {
printf("\nWarning: Modified vector timeout to %" PRIu64 "\n\n", vec_tmo_ns);
} else {
ODPH_ERR("Invalid vector timeout %" PRIu64 ", valid range [%" PRIu64
", %" PRIu64 "]\n", vec_tmo_ns,
return -1;
}
}
return 0;
}
static int create_pktio(
const char *dev,
int idx,
int num_rx,
int num_tx,
odp_pool_t pool,
{
pktin_mode_t in_mode = gbl_args->appl.in_mode;
uint8_t *addr;
if (in_mode == PLAIN_QUEUE)
else if (in_mode != DIRECT_RECV)
if (gbl_args->appl.out_mode != PKTOUT_DIRECT)
if (num_rx == 0)
if (num_tx == 0)
ODPH_ERR("Pktio open failed: %s\n", dev);
return -1;
}
ODPH_ERR("Pktio info failed: %s\n", dev);
return -1;
}
ODPH_ERR("Pktio capability query failed: %s\n", dev);
return -1;
}
if (gbl_args->appl.input_ts) {
ODPH_ERR("Packet input timestamping not supported: %s\n", dev);
return -1;
}
}
if (gbl_args->appl.error_check || gbl_args->appl.chksum)
if (gbl_args->appl.chksum) {
}
ODPH_ERR("Transmit event completion not supported: %s\n", dev);
return -1;
}
ODPH_ERR("Transmit poll completion not supported: %s\n", dev);
return -1;
}
}
}
if (gbl_args->appl.pause_rx) {
ODPH_ERR("Reception of pause frames not supported: %s\n", dev);
return -1;
}
}
if (gbl_args->appl.pause_tx) {
ODPH_ERR("Transmission of pause frames not supported: %s\n", dev);
return -1;
}
}
ODPH_ERR("Promisc mode set not supported: %s\n", dev);
return -1;
}
ODPH_ERR("Promisc mode enable failed: %s\n", dev);
return -1;
}
}
if (gbl_args->appl.mtu) {
uint32_t maxlen_input = pktio_capa.
maxlen.
max_input ? gbl_args->appl.mtu : 0;
ODPH_ERR("Modifying interface MTU not supported: %s\n", dev);
return -1;
}
if (maxlen_input &&
ODPH_ERR("Unsupported MTU value %" PRIu32 " for %s "
"(min %" PRIu32 ", max %" PRIu32 ")\n", maxlen_input, dev,
return -1;
}
if (maxlen_output &&
ODPH_ERR("Unsupported MTU value %" PRIu32 " for %s "
"(min %" PRIu32 ", max %" PRIu32 ")\n", maxlen_output, dev,
return -1;
}
ODPH_ERR("Setting MTU failed: %s\n", dev);
return -1;
}
}
if (gbl_args->appl.sched_mode) {
if (gbl_args->appl.num_prio) {
prio = gbl_args->appl.prio[idx];
} else {
gbl_args->appl.prio[idx] = prio;
}
if (gbl_args->appl.in_mode == SCHED_ATOMIC)
else if (gbl_args->appl.in_mode == SCHED_ORDERED)
else
ODPH_ERR("Creating completion queue failed: %s\n", dev);
return -1;
}
}
}
printf("Warning: %s: maximum number of input queues: %i\n", dev, num_rx);
}
if (num_rx < gbl_args->appl.num_workers)
printf("Warning: %s: sharing %i input queues between %i workers\n",
dev, num_rx, gbl_args->appl.num_workers);
printf("Warning: %s: sharing %i output queues between %i workers\n",
}
pktin_param.
hash_enable = (num_rx > 1 || gbl_args->appl.flow_aware) ? 1 : 0;
if (gbl_args->appl.vector_mode) {
ODPH_ERR("Packet vector input not supported: %s\n", dev);
return -1;
}
if (set_pktin_vector_params(&pktin_param, vec_pool, pktio_capa))
return -1;
}
ODPH_ERR("Input queue config failed: %s\n", dev);
return -1;
}
ODPH_ERR("Output queue config failed: %s\n", dev);
return -1;
}
if (num_rx > 0) {
if (gbl_args->appl.in_mode == DIRECT_RECV) {
!= num_rx) {
ODPH_ERR("Pktin queue query failed: %s\n", dev);
return -1;
}
} else {
!= num_rx) {
ODPH_ERR("Pktin event queue query failed: %s\n", dev);
return -1;
}
}
}
if (num_tx > 0) {
if (gbl_args->appl.out_mode == PKTOUT_DIRECT) {
!= num_tx) {
ODPH_ERR("Pktout queue query failed: %s\n", dev);
return -1;
}
} else {
!= num_tx) {
ODPH_ERR("Event queue query failed: %s\n", dev);
return -1;
}
}
}
ODPH_ETHADDR_LEN) != ODPH_ETHADDR_LEN) {
ODPH_ERR("Reading interface Ethernet address failed: %s\n", dev);
return -1;
}
addr = gbl_args->port_eth_addr[idx].addr;
printf(" dev: %s, drv: %s, rx_queues: %i, tx_queues: %i, mac: "
"%02x:%02x:%02x:%02x:%02x:%02x\n", dev, info.
drv_name, num_rx, num_tx,
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
if (gbl_args->appl.verbose)
gbl_args->pktios[idx].num_rx_queue = num_rx;
gbl_args->pktios[idx].num_tx_queue = num_tx;
gbl_args->pktios[idx].pktio = pktio;
return 0;
}
static int print_speed_stats(int num_workers, stats_t **thr_stats,
int duration, int timeout)
{
uint64_t pkts = 0;
uint64_t pkts_prev = 0;
uint64_t pps;
uint64_t rx_drops, tx_drops, tx_c_misses, tx_c_fails, copy_fails;
uint64_t maximum_pps = 0;
int i;
int elapsed = 0;
int stats_enabled = 1;
int loop_forever = (duration == 0);
if (timeout <= 0) {
stats_enabled = 0;
timeout = 1;
}
do {
pkts = 0;
rx_drops = 0;
tx_drops = 0;
tx_c_misses = 0;
tx_c_fails = 0;
copy_fails = 0;
sleep(timeout);
for (i = 0; i < num_workers; i++) {
pkts += thr_stats[i]->s.packets;
rx_drops += thr_stats[i]->s.rx_drops;
tx_drops += thr_stats[i]->s.tx_drops;
tx_c_misses += thr_stats[i]->s.tx_c_misses;
tx_c_fails += thr_stats[i]->s.tx_c_fails;
copy_fails += thr_stats[i]->s.copy_fails;
}
if (stats_enabled) {
pps = (pkts - pkts_prev) / timeout;
if (pps > maximum_pps)
maximum_pps = pps;
printf("%" PRIu64 " pps, %" PRIu64 " max pps, ", pps,
maximum_pps);
if (gbl_args->appl.packet_copy)
printf("%" PRIu64 " copy fails, ", copy_fails);
printf("%" PRIu64 " tx compl misses, %" PRIu64 " tx compl fails, ",
tx_c_misses, tx_c_fails);
printf("%" PRIu64 " rx drops, %" PRIu64 " tx drops\n",
rx_drops, tx_drops);
pkts_prev = pkts;
}
elapsed += timeout;
(elapsed < duration)));
if (stats_enabled)
printf("TEST RESULT: %" PRIu64 " maximum packets per second.\n",
maximum_pps);
return pkts > 100 ? 0 : -1;
}
static void print_port_mapping(void)
{
int if_count;
int pktio;
if_count = gbl_args->appl.if_count;
printf("\nPort config\n--------------------\n");
for (pktio = 0; pktio < if_count; pktio++) {
const char *dev = gbl_args->appl.if_names[pktio];
printf("Port %i (%s)\n", pktio, dev);
printf(" rx workers %i\n",
gbl_args->pktios[pktio].num_rx_thr);
printf(" tx workers %i\n",
gbl_args->pktios[pktio].num_tx_thr);
printf(" rx queues %i\n",
gbl_args->pktios[pktio].num_rx_queue);
printf(" tx queues %i\n",
gbl_args->pktios[pktio].num_tx_queue);
}
printf("\n");
}
static int find_dest_port(int port)
{
const char *output = gbl_args->appl.output_map[port];
if (output != NULL)
for (int i = 0; i < gbl_args->appl.if_count; i++)
if (strcmp(output, gbl_args->appl.if_names[i]) == 0)
return i;
if (gbl_args->appl.if_count % 2 == 0)
return (port % 2 == 0) ? port + 1 : port - 1;
if (port == gbl_args->appl.if_count - 1)
return 0;
else
return port + 1;
}
static void bind_workers(void)
{
int if_count, num_workers;
int rx_idx, tx_idx, thr, pktio, i;
thread_args_t *thr_args;
if_count = gbl_args->appl.if_count;
num_workers = gbl_args->appl.num_workers;
if (gbl_args->appl.sched_mode) {
for (i = 0; i < if_count; i++) {
gbl_args->pktios[i].num_rx_thr = num_workers;
gbl_args->pktios[i].num_tx_thr = num_workers;
}
for (thr = 0; thr < num_workers; thr++) {
thr_args = &gbl_args->thread_args[thr];
thr_args->num_pktio = if_count;
for (i = 0; i < if_count; i++) {
thr_args->pktio[i].rx_idx = i;
thr_args->pktio[i].tx_idx = i;
}
}
} else {
for (rx_idx = 0; rx_idx < if_count; rx_idx++)
gbl_args->dst_port[rx_idx] = find_dest_port(rx_idx);
if (if_count > num_workers) {
thr = 0;
for (rx_idx = 0; rx_idx < if_count; rx_idx++) {
thr_args = &gbl_args->thread_args[thr];
pktio = thr_args->num_pktio;
tx_idx = gbl_args->dst_port[rx_idx];
thr_args->pktio[pktio].rx_idx = rx_idx;
thr_args->pktio[pktio].tx_idx = tx_idx;
thr_args->num_pktio++;
gbl_args->pktios[rx_idx].num_rx_thr++;
gbl_args->pktios[tx_idx].num_tx_thr++;
thr++;
if (thr >= num_workers)
thr = 0;
}
} else {
rx_idx = 0;
for (thr = 0; thr < num_workers; thr++) {
thr_args = &gbl_args->thread_args[thr];
pktio = thr_args->num_pktio;
tx_idx = gbl_args->dst_port[rx_idx];
thr_args->pktio[pktio].rx_idx = rx_idx;
thr_args->pktio[pktio].tx_idx = tx_idx;
thr_args->num_pktio++;
gbl_args->pktios[rx_idx].num_rx_thr++;
gbl_args->pktios[tx_idx].num_tx_thr++;
rx_idx++;
if (rx_idx >= if_count)
rx_idx = 0;
}
}
}
}
static void bind_queues(void)
{
int num_workers;
int thr, i;
num_workers = gbl_args->appl.num_workers;
printf("\nQueue binding (indexes)\n-----------------------\n");
for (thr = 0; thr < num_workers; thr++) {
int rx_idx, tx_idx;
thread_args_t *thr_args = &gbl_args->thread_args[thr];
int num = thr_args->num_pktio;
printf("worker %i\n", thr);
for (i = 0; i < num; i++) {
int rx_queue, tx_queue;
rx_idx = thr_args->pktio[i].rx_idx;
tx_idx = thr_args->pktio[i].tx_idx;
rx_queue = gbl_args->pktios[rx_idx].next_rx_queue;
tx_queue = gbl_args->pktios[tx_idx].next_tx_queue;
thr_args->pktio[i].rx_queue_idx = rx_queue;
thr_args->pktio[i].tx_queue_idx = tx_queue;
thr_args->pktio[i].pktin =
gbl_args->pktios[rx_idx].pktin[rx_queue];
thr_args->pktio[i].rx_queue =
gbl_args->pktios[rx_idx].rx_q[rx_queue];
thr_args->pktio[i].pktout =
gbl_args->pktios[tx_idx].pktout[tx_queue];
thr_args->pktio[i].tx_queue =
gbl_args->pktios[tx_idx].tx_q[tx_queue];
if (!gbl_args->appl.sched_mode)
printf(" rx: pktio %i, queue %i\n",
rx_idx, rx_queue);
printf(" tx: pktio %i, queue %i\n",
tx_idx, tx_queue);
rx_queue++;
tx_queue++;
if (rx_queue >= gbl_args->pktios[rx_idx].num_rx_queue)
rx_queue = 0;
if (tx_queue >= gbl_args->pktios[tx_idx].num_tx_queue)
tx_queue = 0;
gbl_args->pktios[rx_idx].next_rx_queue = rx_queue;
gbl_args->pktios[tx_idx].next_tx_queue = tx_queue;
}
}
printf("\n");
}
static void init_state(const appl_args_t *args, state_t *state, int thr_idx)
{
const uint32_t cnt = args->tx_compl.thr_compl_id + 1;
state->tx_compl.opt.mode = args->tx_compl.mode;
state->tx_compl.init = thr_idx * cnt;
state->tx_compl.max = state->tx_compl.init + cnt - 1;
state->tx_compl.free_head = state->tx_compl.init;
state->tx_compl.poll_head = state->tx_compl.init;
state->tx_compl.num_act = 0;
state->tx_compl.max_act = state->tx_compl.max - state->tx_compl.init + 1;
state->tx_compl.interval = args->tx_compl.nth;
state->tx_compl.next_req = state->tx_compl.interval;
}
static void init_port_lookup_tbl(void)
{
int rx_idx, if_count;
if_count = gbl_args->appl.if_count;
for (rx_idx = 0; rx_idx < if_count; rx_idx++) {
int dst_port = find_dest_port(rx_idx);
if (pktio_idx < 0) {
ODPH_ERR("Reading pktio (%s) index failed: %i\n",
gbl_args->appl.if_names[rx_idx], pktio_idx);
exit(EXIT_FAILURE);
}
gbl_args->dst_port_from_idx[pktio_idx] = dst_port;
}
}
static void usage(char *progname)
{
printf("\n"
"OpenDataPlane L2 forwarding application.\n"
"\n"
"Usage: %s [options]\n"
"\n"
" E.g. %s -i eth0,eth1,eth2,eth3 -m 0 -t 1\n"
" In the above example,\n"
" eth0 will send pkts to eth1 and vice versa\n"
" eth2 will send pkts to eth3 and vice versa\n"
"\n"
"Mandatory OPTIONS:\n"
" -i, --interface <name> Eth interfaces (comma-separated, no spaces)\n"
" Interface count min 1, max %i\n"
"\n"
"Optional OPTIONS:\n"
" -m, --mode <arg> Packet input mode\n"
" 0: Direct mode: PKTIN_MODE_DIRECT (default)\n"
" 1: Scheduler mode with parallel queues:\n"
" PKTIN_MODE_SCHED + SCHED_SYNC_PARALLEL\n"
" 2: Scheduler mode with atomic queues:\n"
" PKTIN_MODE_SCHED + SCHED_SYNC_ATOMIC\n"
" 3: Scheduler mode with ordered queues:\n"
" PKTIN_MODE_SCHED + SCHED_SYNC_ORDERED\n"
" 4: Plain queue mode: PKTIN_MODE_QUEUE\n"
" -o, --out_mode <arg> Packet output mode\n"
" 0: Direct mode: PKTOUT_MODE_DIRECT (default)\n"
" 1: Queue mode: PKTOUT_MODE_QUEUE\n"
" -O, --output_map <list> List of destination ports for passed interfaces\n"
" (comma-separated, no spaces). Ordering follows\n"
" the '--interface' option, e.g. passing\n"
" '-i eth0,eth1' and '-O eth0,eth1' would result\n"
" in eth0 and eth1 looping packets back.\n"
" -c, --count <num> CPU count, 0=all available, default=1\n"
" -t, --time <sec> Time in seconds to run.\n"
" -a, --accuracy <sec> Time in seconds get print statistics\n"
" (default is 1 second).\n"
" -d, --dst_change <arg> 0: Don't change packets' dst eth addresses\n"
" 1: Change packets' dst eth addresses (default)\n"
" -s, --src_change <arg> 0: Don't change packets' src eth addresses\n"
" 1: Change packets' src eth addresses (default)\n"
" -r, --dst_addr <addr> Destination addresses (comma-separated, no\n"
" spaces) Requires also the -d flag to be set\n"
" -e, --error_check <arg> 0: Don't check packet errors (default)\n"
" 1: Check packet errors\n"
" -k, --chksum <arg> 0: Don't use checksum offload (default)\n"
" 1: Use checksum offload\n",
NO_PATH(progname), NO_PATH(progname), MAX_PKTIOS);
printf(" -g, --groups <num> Number of new groups to create (1 ... num).\n"
" Interfaces are placed into the groups in round\n"
" robin.\n"
" 0: Use SCHED_GROUP_ALL (default)\n"
" -1: Use SCHED_GROUP_WORKER\n"
" -G, --group_mode <arg> Select how threads join new groups\n"
" (when -g > 0)\n"
" 0: All threads join all created groups\n"
" (default)\n"
" 1: All threads join first N created groups.\n"
" N is number of interfaces (== active\n"
" groups).\n"
" 2: Each thread joins a part of the first N\n"
" groups (in round robin).\n"
" -I, --prio <prio list> Schedule priority of packet input queues.\n"
" Comma separated list of priorities (no spaces).\n"
" A value per interface. All queues of an\n"
" interface have the same priority. Values must\n"
" be between odp_schedule_min_prio and\n"
" odp_schedule_max_prio.\n"
" odp_schedule_default_prio is used by default.\n"
" -b, --burst_rx <num> 0: Use max burst size (default)\n"
" num: Max number of packets per receive call\n"
" -q, --rx_queues <num> Number of RX queues per interface in scheduler\n"
" mode\n"
" 0: RX queue per worker CPU (default)\n"
" -p, --packet_copy 0: Don't copy packet (default)\n"
" 1: Create and send copy of the received packet.\n"
" Free the original packet.\n"
" -R, --data_rd <num> Number of packet data words (uint64_t) to read\n"
" from every received packet. Number of words is\n"
" rounded down to fit into the first segment of a\n"
" packet. Default is 0.\n"
" -y, --pool_per_if Create a packet (and packet vector) pool per\n"
" interface.\n"
" 0: Share a single pool between all interfaces\n"
" (default)\n"
" 1: Create a pool per interface\n"
" -n, --num_pkt <num> Number of packets per pool. Default is 16k or\n"
" the maximum capability. Use 0 for the default.\n"
" -u, --vector_mode Enable vector mode.\n"
" Supported only with scheduler packet input\n"
" modes (1-3).\n"
" -w, --num_vec <num> Number of vectors per pool.\n"
" Default is num_pkts divided by vec_size.\n"
" -x, --vec_size <num> Vector size (default %i).\n"
" -z, --vec_tmo_ns <ns> Vector timeout in ns (default %llu ns).\n"
" -M, --mtu <len> Interface MTU in bytes.\n"
" -P, --promisc_mode Enable promiscuous mode.\n"
" -l, --packet_len <len> Maximum length of packets supported\n"
" (default %d).\n"
" -L, --seg_len <len> Packet pool segment length\n"
" (default equal to packet length).\n"
" -F, --prefetch <num> Prefetch packet data in 64 byte multiples\n"
" (default 1).\n"
" -f, --flow_aware Enable flow aware scheduling.\n"
" -T, --input_ts Enable packet input timestamping.\n",
DEFAULT_VEC_SIZE, DEFAULT_VEC_TMO, POOL_PKT_LEN);
printf(" -C, --tx_compl <mode,n,max_id> Enable transmit completion with a specified\n"
" completion mode for nth packet, with maximum\n"
" completion ID per worker thread in case of poll\n"
" completion (comma-separated, no spaces).\n"
" 0: Event completion mode\n"
" 1: Poll completion mode\n"
" -X, --flow_control <mode> Ethernet flow control mode.\n"
" 0: Flow control disabled (default)\n"
" 1: Enable reception of pause frames\n"
" 2: Enable transmission of pause frames\n"
" 3: Enable reception and transmission of pause\n"
" frames\n"
" -v, --verbose Verbose output.\n"
" -V, --verbose_pkt Print debug information on every received\n"
" packet.\n"
" -h, --help Display help and exit.\n\n"
"\n");
}
static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
{
int opt;
char *token;
char *tmp_str, *tmp;
size_t str_len, len;
int i;
static const struct option longopts[] = {
{"count", required_argument, NULL, 'c'},
{"time", required_argument, NULL, 't'},
{"accuracy", required_argument, NULL, 'a'},
{"interface", required_argument, NULL, 'i'},
{"mode", required_argument, NULL, 'm'},
{"out_mode", required_argument, NULL, 'o'},
{"output_map", required_argument, NULL, 'O'},
{"dst_addr", required_argument, NULL, 'r'},
{"dst_change", required_argument, NULL, 'd'},
{"src_change", required_argument, NULL, 's'},
{"error_check", required_argument, NULL, 'e'},
{"chksum", required_argument, NULL, 'k'},
{"groups", required_argument, NULL, 'g'},
{"group_mode", required_argument, NULL, 'G'},
{"prio", required_argument, NULL, 'I'},
{"burst_rx", required_argument, NULL, 'b'},
{"rx_queues", required_argument, NULL, 'q'},
{"packet_copy", required_argument, NULL, 'p'},
{"data_rd", required_argument, NULL, 'R'},
{"pool_per_if", required_argument, NULL, 'y'},
{"num_pkt", required_argument, NULL, 'n'},
{"num_vec", required_argument, NULL, 'w'},
{"vec_size", required_argument, NULL, 'x'},
{"vec_tmo_ns", required_argument, NULL, 'z'},
{"vector_mode", no_argument, NULL, 'u'},
{"mtu", required_argument, NULL, 'M'},
{"promisc_mode", no_argument, NULL, 'P'},
{"packet_len", required_argument, NULL, 'l'},
{"seg_len", required_argument, NULL, 'L'},
{"prefetch", required_argument, NULL, 'F'},
{"flow_aware", no_argument, NULL, 'f'},
{"input_ts", no_argument, NULL, 'T'},
{"tx_compl", required_argument, NULL, 'C'},
{"flow_control", required_argument, NULL, 'X'},
{"verbose", no_argument, NULL, 'v'},
{"verbose_pkt", no_argument, NULL, 'V'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
static const char *shortopts = "+c:t:a:i:m:o:O:r:d:s:e:k:g:G:I:"
"b:q:p:R:y:n:l:L:w:x:X:z:M:F:uPfTC:vVh";
appl_args->time = 0;
appl_args->accuracy = 1;
appl_args->cpu_count = 1;
appl_args->dst_change = 1;
appl_args->src_change = 1;
appl_args->num_groups = 0;
appl_args->group_mode = 0;
appl_args->error_check = 0;
appl_args->packet_copy = 0;
appl_args->burst_rx = 0;
appl_args->rx_queues = 0;
appl_args->verbose = 0;
appl_args->verbose_pkt = 0;
appl_args->chksum = 0;
appl_args->pool_per_if = 0;
appl_args->num_pkt = 0;
appl_args->packet_len = POOL_PKT_LEN;
appl_args->seg_len = UINT32_MAX;
appl_args->mtu = 0;
appl_args->promisc_mode = 0;
appl_args->vector_mode = 0;
appl_args->num_vec = 0;
appl_args->vec_size = 0;
appl_args->vec_tmo_ns = 0;
appl_args->flow_aware = 0;
appl_args->input_ts = 0;
appl_args->num_prio = 0;
appl_args->prefetch = 1;
appl_args->data_rd = 0;
appl_args->flow_control = 0;
while (1) {
opt = getopt_long(argc, argv, shortopts, longopts, NULL);
if (opt == -1)
break;
switch (opt) {
case 'c':
appl_args->cpu_count = atoi(optarg);
break;
case 't':
appl_args->time = atoi(optarg);
break;
case 'a':
appl_args->accuracy = atoi(optarg);
break;
case 'r':
len = strlen(optarg);
if (len == 0) {
ODPH_ERR("Bad dest address string\n");
exit(EXIT_FAILURE);
}
str_len = len + 1;
tmp_str = malloc(str_len);
if (tmp_str == NULL) {
ODPH_ERR("Dest address malloc() failed\n");
exit(EXIT_FAILURE);
}
memcpy(tmp_str, optarg, str_len);
for (token = strtok(tmp_str, ","), i = 0;
token != NULL; token = strtok(NULL, ","), i++) {
if (i >= MAX_PKTIOS) {
ODPH_ERR("Too many MAC addresses\n");
exit(EXIT_FAILURE);
}
if (odph_eth_addr_parse(&appl_args->addrs[i], token) != 0) {
ODPH_ERR("Invalid MAC address\n");
exit(EXIT_FAILURE);
}
}
appl_args->addr_count = i;
if (appl_args->addr_count < 1) {
ODPH_ERR("Bad dest address count\n");
exit(EXIT_FAILURE);
}
free(tmp_str);
break;
case 'i':
len = strlen(optarg);
if (len == 0) {
ODPH_ERR("Bad pktio interface string\n");
exit(EXIT_FAILURE);
}
str_len = len + 1;
appl_args->if_str = malloc(str_len);
if (appl_args->if_str == NULL) {
ODPH_ERR("Pktio interface malloc() failed\n");
exit(EXIT_FAILURE);
}
memcpy(appl_args->if_str, optarg, str_len);
for (token = strtok(appl_args->if_str, ","), i = 0;
token != NULL;
token = strtok(NULL, ","), i++)
;
appl_args->if_count = i;
if (appl_args->if_count < 1 || appl_args->if_count > MAX_PKTIOS) {
ODPH_ERR("Bad pktio interface count: %i\n", appl_args->if_count);
exit(EXIT_FAILURE);
}
appl_args->if_names = calloc(appl_args->if_count, sizeof(char *));
memcpy(appl_args->if_str, optarg, str_len);
for (token = strtok(appl_args->if_str, ","), i = 0;
token != NULL; token = strtok(NULL, ","), i++) {
appl_args->if_names[i] = token;
}
break;
case 'm':
i = atoi(optarg);
if (i == 1)
appl_args->in_mode = SCHED_PARALLEL;
else if (i == 2)
appl_args->in_mode = SCHED_ATOMIC;
else if (i == 3)
appl_args->in_mode = SCHED_ORDERED;
else if (i == 4)
appl_args->in_mode = PLAIN_QUEUE;
else
appl_args->in_mode = DIRECT_RECV;
break;
case 'o':
i = atoi(optarg);
if (i != 0)
appl_args->out_mode = PKTOUT_QUEUE;
break;
case 'O':
if (strlen(optarg) == 0) {
ODPH_ERR("Bad output map string\n");
exit(EXIT_FAILURE);
}
tmp_str = strdup(optarg);
if (tmp_str == NULL) {
ODPH_ERR("Output map string duplication failed\n");
exit(EXIT_FAILURE);
}
token = strtok(tmp_str, ",");
while (token) {
if (appl_args->num_om >= MAX_PKTIOS) {
ODPH_ERR("Bad output map element count\n");
exit(EXIT_FAILURE);
}
appl_args->output_map[appl_args->num_om] = strdup(token);
if (appl_args->output_map[appl_args->num_om] == NULL) {
ODPH_ERR("Output map element duplication failed\n");
exit(EXIT_FAILURE);
}
appl_args->num_om++;
token = strtok(NULL, ",");
}
free(tmp_str);
break;
case 'd':
appl_args->dst_change = atoi(optarg);
break;
case 's':
appl_args->src_change = atoi(optarg);
break;
case 'e':
appl_args->error_check = atoi(optarg);
break;
case 'k':
appl_args->chksum = atoi(optarg);
break;
case 'g':
appl_args->num_groups = atoi(optarg);
break;
case 'G':
appl_args->group_mode = atoi(optarg);
break;
case 'I':
len = strlen(optarg);
if (len == 0) {
ODPH_ERR("Bad priority list\n");
exit(EXIT_FAILURE);
}
str_len = len + 1;
tmp_str = malloc(str_len);
if (tmp_str == NULL) {
ODPH_ERR("Priority list malloc() failed\n");
exit(EXIT_FAILURE);
}
memcpy(tmp_str, optarg, str_len);
token = strtok(tmp_str, ",");
for (i = 0; token != NULL; token = strtok(NULL, ","), i++) {
if (i >= MAX_PKTIOS) {
ODPH_ERR("Too many priorities\n");
exit(EXIT_FAILURE);
}
appl_args->prio[i] = atoi(token);
appl_args->num_prio++;
}
if (appl_args->num_prio == 0) {
ODPH_ERR("Bad priority list\n");
exit(EXIT_FAILURE);
}
free(tmp_str);
break;
case 'b':
appl_args->burst_rx = atoi(optarg);
break;
case 'q':
appl_args->rx_queues = atoi(optarg);
break;
case 'p':
appl_args->packet_copy = atoi(optarg);
break;
case 'R':
appl_args->data_rd = atoi(optarg);
break;
case 'y':
appl_args->pool_per_if = atoi(optarg);
break;
case 'n':
appl_args->num_pkt = atoi(optarg);
break;
case 'l':
appl_args->packet_len = atoi(optarg);
break;
case 'L':
appl_args->seg_len = atoi(optarg);
break;
case 'M':
appl_args->mtu = atoi(optarg);
break;
case 'P':
appl_args->promisc_mode = 1;
break;
case 'u':
appl_args->vector_mode = 1;
break;
case 'w':
appl_args->num_vec = atoi(optarg);
break;
case 'x':
appl_args->vec_size = atoi(optarg);
break;
case 'X':
appl_args->flow_control = atoi(optarg);
if (appl_args->flow_control == 1 || appl_args->flow_control == 3)
appl_args->pause_rx = true;
if (appl_args->flow_control == 2 || appl_args->flow_control == 3)
appl_args->pause_tx = true;
break;
case 'z':
appl_args->vec_tmo_ns = atoi(optarg);
break;
case 'F':
appl_args->prefetch = atoi(optarg);
break;
case 'f':
appl_args->flow_aware = 1;
break;
case 'T':
appl_args->input_ts = 1;
break;
case 'C':
if (strlen(optarg) == 0) {
ODPH_ERR("Bad transmit completion parameter string\n");
exit(EXIT_FAILURE);
}
tmp_str = strdup(optarg);
if (tmp_str == NULL) {
ODPH_ERR("Transmit completion parameter string duplication"
" failed\n");
exit(EXIT_FAILURE);
}
tmp = strtok(tmp_str, ",");
if (tmp == NULL) {
ODPH_ERR("Invalid transmit completion parameter format\n");
exit(EXIT_FAILURE);
}
i = atoi(tmp);
if (i == 0)
else if (i == 1)
tmp = strtok(NULL, ",");
if (tmp == NULL) {
ODPH_ERR("Invalid transmit completion parameter format\n");
exit(EXIT_FAILURE);
}
appl_args->tx_compl.nth = atoi(tmp);
tmp = strtok(NULL, ",");
if (tmp == NULL) {
ODPH_ERR("Invalid transmit completion parameter format\n");
exit(EXIT_FAILURE);
}
appl_args->tx_compl.thr_compl_id = atoi(tmp);
}
free(tmp_str);
break;
case 'v':
appl_args->verbose = 1;
break;
case 'V':
appl_args->verbose_pkt = 1;
break;
case 'h':
usage(argv[0]);
exit(EXIT_SUCCESS);
break;
default:
break;
}
}
if (appl_args->if_count == 0) {
ODPH_ERR("No pktio interfaces\n");
exit(EXIT_FAILURE);
}
if (appl_args->num_om && appl_args->num_om != appl_args->if_count) {
ODPH_ERR("Different number of output mappings and pktio interfaces\n");
exit(EXIT_FAILURE);
}
if (appl_args->num_prio && appl_args->num_prio != appl_args->if_count) {
ODPH_ERR("Different number of priorities and pktio interfaces\n");
exit(EXIT_FAILURE);
}
if (appl_args->addr_count != 0 && appl_args->addr_count != appl_args->if_count) {
ODPH_ERR("Number of dest addresses differs from number of interfaces\n");
exit(EXIT_FAILURE);
}
if (appl_args->burst_rx > MAX_PKT_BURST) {
ODPH_ERR("Burst size (%i) too large. Maximum is %i.\n",
appl_args->burst_rx, MAX_PKT_BURST);
exit(EXIT_FAILURE);
}
appl_args->tx_compl.nth == 0) {
ODPH_ERR("Invalid packet interval for transmit completion: %u\n",
appl_args->tx_compl.nth);
exit(EXIT_FAILURE);
}
(appl_args->in_mode == PLAIN_QUEUE || appl_args->in_mode == DIRECT_RECV)) {
ODPH_ERR("Transmit event completion mode not supported with plain queue or direct "
"input modes\n");
exit(EXIT_FAILURE);
}
appl_args->tx_compl.tot_compl_id = (appl_args->tx_compl.thr_compl_id + 1) *
appl_args->cpu_count - 1;
if (appl_args->burst_rx == 0)
appl_args->burst_rx = MAX_PKT_BURST;
appl_args->extra_feat = 0;
if (appl_args->error_check || appl_args->chksum ||
appl_args->packet_copy || appl_args->data_rd || appl_args->verbose_pkt)
appl_args->extra_feat = 1;
appl_args->has_state = 0;
appl_args->has_state = 1;
optind = 1;
}
static void print_options(void)
{
int i;
appl_args_t *appl_args = &gbl_args->appl;
printf("\n"
"odp_l2fwd options\n"
"-----------------\n"
"IF-count: %i\n"
"Using IFs: ", appl_args->if_count);
for (i = 0; i < appl_args->if_count; ++i)
printf(" %s", appl_args->if_names[i]);
printf("\n"
"Mode: ");
if (appl_args->in_mode == DIRECT_RECV)
printf("PKTIN_DIRECT, ");
else if (appl_args->in_mode == PLAIN_QUEUE)
printf("PKTIN_QUEUE, ");
else if (appl_args->in_mode == SCHED_PARALLEL)
printf("PKTIN_SCHED_PARALLEL, ");
else if (appl_args->in_mode == SCHED_ATOMIC)
printf("PKTIN_SCHED_ATOMIC, ");
else if (appl_args->in_mode == SCHED_ORDERED)
printf("PKTIN_SCHED_ORDERED, ");
if (appl_args->out_mode)
printf("PKTOUT_QUEUE\n");
else
printf("PKTOUT_DIRECT\n");
if (appl_args->num_om > 0) {
printf("Output mappings: ");
for (i = 0; i < appl_args->num_om; ++i)
printf(" %s", appl_args->output_map[i]);
printf("\n");
}
printf("MTU: ");
if (appl_args->mtu)
printf("%i bytes\n", appl_args->mtu);
else
printf("interface default\n");
printf("Promisc mode: %s\n", appl_args->promisc_mode ?
"enabled" : "disabled");
if (appl_args->flow_control)
printf("Flow control: %s%s\n",
appl_args->pause_rx ? "rx " : "",
appl_args->pause_tx ? "tx" : "");
printf("Flow aware: %s\n", appl_args->flow_aware ?
"yes" : "no");
printf("Input TS: %s\n", appl_args->input_ts ? "yes" : "no");
printf("Burst size: %i\n", appl_args->burst_rx);
printf("RX queues per IF: %i\n", appl_args->rx_queues);
printf("Number of pools: %i\n", appl_args->pool_per_if ?
appl_args->if_count : 1);
if (appl_args->extra_feat || appl_args->has_state) {
printf("Extra features: %s%s%s%s%s%s\n",
appl_args->error_check ? "error_check " : "",
appl_args->chksum ? "chksum " : "",
appl_args->packet_copy ? "packet_copy " : "",
appl_args->data_rd ? "data_rd" : "",
appl_args->verbose_pkt ? "verbose_pkt" : "");
}
printf("Num worker threads: %i\n", appl_args->num_workers);
printf("CPU mask: %s\n", gbl_args->cpumaskstr);
if (appl_args->num_groups > 0)
printf("num groups: %i\n", appl_args->num_groups);
else if (appl_args->num_groups == 0)
printf("group: ODP_SCHED_GROUP_ALL\n");
else
printf("group: ODP_SCHED_GROUP_WORKER\n");
printf("Packets per pool: %u\n", appl_args->num_pkt);
printf("Packet length: %u\n", appl_args->packet_len);
printf("Segment length: %u\n", appl_args->seg_len == UINT32_MAX ? 0 :
appl_args->seg_len);
printf("Read data: %u bytes\n", appl_args->data_rd * 8);
printf("Prefetch data %u bytes\n", appl_args->prefetch * 64);
printf("Vectors per pool: %u\n", appl_args->num_vec);
printf("Vector size: %u\n", appl_args->vec_size);
printf("Priority per IF: ");
for (i = 0; i < appl_args->if_count; i++)
printf(" %i", appl_args->prio[i]);
printf("\n\n");
}
static void gbl_args_init(args_t *args)
{
int pktio, queue;
memset(args, 0, sizeof(args_t));
for (pktio = 0; pktio < MAX_PKTIOS; pktio++) {
for (queue = 0; queue < MAX_QUEUES; queue++)
}
}
{
int i;
for (i = 0; i < num; i++) {
ODPH_ERR("Group create failed\n");
exit(EXIT_FAILURE);
}
}
}
{
uint32_t num_vec, vec_size;
if (gbl_args->appl.vec_size == 0)
vec_size = DEFAULT_VEC_SIZE;
else
vec_size = gbl_args->appl.vec_size;
if (gbl_args->appl.vec_size == 0) {
printf("\nWarning: Vector size reduced to %u\n\n", vec_size);
} else {
ODPH_ERR("Vector size too big %u. Maximum is %u.\n",
return -1;
}
}
if (gbl_args->appl.num_vec == 0) {
uint32_t num_pkt = gbl_args->appl.num_pkt ?
gbl_args->appl.num_pkt : DEFAULT_NUM_PKT;
num_vec = (num_pkt + vec_size - 1) / vec_size;
} else {
num_vec = gbl_args->appl.num_vec;
}
if (gbl_args->appl.num_vec == 0) {
printf("\nWarning: number of vectors reduced to %u\n\n", num_vec);
} else {
ODPH_ERR("Too many vectors (%u) per pool. Maximum is %u.\n",
return -1;
}
}
return 0;
}
int main(int argc, char *argv[])
{
odph_helper_options_t helper_options;
odph_thread_param_t thr_param[MAX_WORKERS];
odph_thread_common_param_t thr_common;
int i;
int num_workers, num_thr;
odph_ethaddr_t new_addr;
int ret;
stats_t *stats[MAX_WORKERS];
int if_count, num_pools, num_vec_pools;
int (*thr_run_func)(void *);
int num_groups, max_groups;
odp_pool_t pool_tbl[MAX_PKTIOS], vec_pool_tbl[MAX_PKTIOS];
uint32_t pkt_len, num_pkt, seg_len;
argc = odph_parse_options(argc, argv);
if (odph_options(&helper_options)) {
ODPH_ERR("Reading ODP helper options failed.\n");
exit(EXIT_FAILURE);
}
if (setup_sig_handler()) {
ODPH_ERR("Signal handler setup failed\n");
exit(EXIT_FAILURE);
}
ODPH_ERR("ODP global init failed.\n");
exit(EXIT_FAILURE);
}
ODPH_ERR("ODP local init failed.\n");
exit(EXIT_FAILURE);
}
ODP_CACHE_LINE_SIZE, 0);
ODPH_ERR("Shared mem reserve failed.\n");
exit(EXIT_FAILURE);
}
if (gbl_args == NULL) {
ODPH_ERR("Shared mem addr failed.\n");
exit(EXIT_FAILURE);
}
gbl_args_init(gbl_args);
parse_args(argc, argv, &gbl_args->appl);
if (sched_mode(gbl_args->appl.in_mode))
gbl_args->appl.sched_mode = 1;
num_workers = MAX_WORKERS;
if (gbl_args->appl.cpu_count && gbl_args->appl.cpu_count < MAX_WORKERS)
num_workers = gbl_args->appl.cpu_count;
gbl_args->appl.num_workers = num_workers;
print_options();
for (i = 0; i < num_workers; i++)
gbl_args->thread_args[i].thr_idx = i;
if_count = gbl_args->appl.if_count;
num_pools = 1;
if (gbl_args->appl.pool_per_if)
num_pools = if_count;
ODPH_ERR("Pool capability failed\n");
return -1;
}
ODPH_ERR("Too many pools %i\n", num_pools);
return -1;
}
pkt_len = gbl_args->appl.packet_len;
printf("\nWarning: packet length reduced to %u\n\n", pkt_len);
}
if (gbl_args->appl.seg_len == UINT32_MAX)
seg_len = gbl_args->appl.packet_len;
else
seg_len = gbl_args->appl.seg_len;
if ((gbl_args->appl.seg_len != UINT32_MAX) && (seg_len != gbl_args->appl.seg_len))
printf("\nWarning: Segment length requested %d configured %d\n",
gbl_args->appl.seg_len, seg_len);
if (seg_len < gbl_args->appl.data_rd * 8) {
ODPH_ERR("Requested data read length %u exceeds maximum segment length %u\n",
gbl_args->appl.data_rd * 8, seg_len);
return -1;
}
if (gbl_args->appl.num_pkt == 0)
num_pkt = DEFAULT_NUM_PKT;
else
num_pkt = gbl_args->appl.num_pkt;
if (gbl_args->appl.num_pkt == 0) {
printf("\nWarning: number of packets reduced to %u\n\n",
num_pkt);
} else {
ODPH_ERR("Too many packets %u. Maximum is %u.\n",
return -1;
}
}
gbl_args->num_pkt = num_pkt;
gbl_args->pkt_len = pkt_len;
gbl_args->seg_len = seg_len;
printf("Resulting pool parameter values:\n");
printf("Packets per pool: %u\n", num_pkt);
printf("Packet length: %u\n", pkt_len);
printf("Segment length: %u\n", seg_len);
for (i = 0; i < num_pools; i++) {
ODPH_ERR("Pool create failed %i\n", i);
exit(EXIT_FAILURE);
}
if (gbl_args->appl.verbose)
}
num_vec_pools = 0;
if (gbl_args->appl.vector_mode) {
if (!sched_mode(gbl_args->appl.in_mode)) {
ODPH_ERR("Vector mode only supports scheduler pktin modes (1-3)\n");
return -1;
}
num_vec_pools = gbl_args->appl.pool_per_if ? if_count : 1;
ODPH_ERR("Too many vector pools %i\n", num_vec_pools);
return -1;
}
if (set_vector_pool_params(¶ms, &pool_capa))
return -1;
printf("Vectors per pool: %u\n", gbl_args->vector_num);
printf("Vector size: %u\n", gbl_args->vector_max_size);
for (i = 0; i < num_vec_pools; i++) {
ODPH_ERR("Vector pool create failed %i\n", i);
exit(EXIT_FAILURE);
}
if (gbl_args->appl.verbose)
}
}
printf("\n");
bind_workers();
ODPH_ERR("Schedule capability failed\n");
exit(EXIT_FAILURE);
}
if (gbl_args->appl.flow_aware) {
} else {
ODPH_ERR("Flow aware mode not supported\n");
exit(EXIT_FAILURE);
}
}
num_groups = gbl_args->appl.num_groups;
if (max_groups > MAX_GROUPS)
max_groups = MAX_GROUPS;
if (num_groups > max_groups) {
ODPH_ERR("Too many groups. Maximum is %i.\n", max_groups);
exit(EXIT_FAILURE);
}
if (num_groups == 0) {
num_groups = 1;
} else if (num_groups == -1) {
num_groups = 1;
} else {
create_groups(num_groups, group);
}
pool = pool_tbl[0];
vec_pool = vec_pool_tbl[0];
printf("\nInterfaces\n----------\n");
for (i = 0; i < if_count; ++i) {
const char *dev = gbl_args->appl.if_names[i];
int num_rx, num_tx;
num_rx = gbl_args->appl.rx_queues > 0 ? gbl_args->appl.rx_queues : num_workers;
num_tx = num_workers;
if (!gbl_args->appl.sched_mode) {
num_rx = gbl_args->pktios[i].num_rx_thr;
num_tx = gbl_args->pktios[i].num_tx_thr;
}
grp = group[i % num_groups];
if (gbl_args->appl.pool_per_if) {
pool = pool_tbl[i];
vec_pool = vec_pool_tbl[i];
}
if (create_pktio(dev, i, num_rx, num_tx, pool, vec_pool, grp))
exit(EXIT_FAILURE);
if (gbl_args->appl.dst_change) {
memset(&new_addr, 0, sizeof(odph_ethaddr_t));
if (gbl_args->appl.addr_count) {
memcpy(&new_addr, &gbl_args->appl.addrs[i],
sizeof(odph_ethaddr_t));
} else {
new_addr.addr[0] = 0x02;
new_addr.addr[5] = i;
}
gbl_args->dst_eth_addr[i] = new_addr;
}
}
bind_queues();
init_port_lookup_tbl();
if (!gbl_args->appl.sched_mode)
print_port_mapping();
if (gbl_args->appl.in_mode == DIRECT_RECV)
thr_run_func = run_worker_direct_mode;
else if (gbl_args->appl.in_mode == PLAIN_QUEUE)
thr_run_func = run_worker_plain_queue_mode;
else
thr_run_func = gbl_args->appl.vector_mode ?
run_worker_sched_mode_vector : run_worker_sched_mode;
odph_thread_common_param_init(&thr_common);
thr_common.instance = instance;
thr_common.cpumask = &cpumask;
thr_common.sync = 1;
for (i = 0; i < num_workers; ++i) {
int j;
int num_join;
int mode = gbl_args->appl.group_mode;
init_state(&gbl_args->appl, &gbl_args->thread_args[i].state, i);
odph_thread_param_init(&thr_param[i]);
thr_param[i].start = thr_run_func;
thr_param[i].arg = &gbl_args->thread_args[i];
gbl_args->thread_args[i].num_grp_join = 0;
if (gbl_args->appl.num_groups > 0) {
num_join = if_count < num_groups ? if_count : num_groups;
if (mode == 0 || mode == 1) {
if (mode == 0)
num_join = num_groups;
gbl_args->thread_args[i].num_grp_join = num_join;
for (j = 0; j < num_join; j++)
gbl_args->thread_args[i].group[j] = group[j];
} else {
if (num_workers >= num_join) {
gbl_args->thread_args[i].num_grp_join = 1;
gbl_args->thread_args[i].group[0] = group[i % num_join];
} else {
int cnt = 0;
for (j = 0; i + j < num_join; j += num_workers) {
gbl_args->thread_args[i].group[cnt] = group[i + j];
cnt++;
}
gbl_args->thread_args[i].num_grp_join = cnt;
}
}
}
stats[i] = &gbl_args->thread_args[i].stats;
}
num_thr = odph_thread_create(gbl_args->thread_tbl, &thr_common,
thr_param, num_workers);
if (num_thr != num_workers) {
ODPH_ERR("Worker create failed: %i\n", num_thr);
exit(EXIT_FAILURE);
}
if (gbl_args->appl.verbose)
for (i = 0; i < if_count; ++i) {
pktio = gbl_args->pktios[i].pktio;
if (ret) {
ODPH_ERR("Pktio start failed: %s\n", gbl_args->appl.if_names[i]);
exit(EXIT_FAILURE);
}
}
ret = print_speed_stats(num_workers, stats, gbl_args->appl.time,
gbl_args->appl.accuracy);
for (i = 0; i < if_count; ++i) {
ODPH_ERR("Pktio stop failed: %s\n", gbl_args->appl.if_names[i]);
exit(EXIT_FAILURE);
}
}
if (gbl_args->appl.in_mode != DIRECT_RECV)
odph_thread_join_result_t res[num_workers];
if (odph_thread_join_result(gbl_args->thread_tbl, res, num_workers) != num_workers) {
ODPH_ERR("Worker join failed\n");
exit(EXIT_FAILURE);
}
for (i = 0; i < num_workers; i++) {
if (res[i].is_sig || res[i].ret != 0) {
ODPH_ERR("Worker thread failure%s: %d\n", res[i].is_sig ?
" (signaled)" : "", res[i].ret);
exit(EXIT_FAILURE);
}
}
for (i = 0; i < if_count; ++i) {
printf("Pktio %s extra statistics:\n", gbl_args->appl.if_names[i]);
}
ODPH_ERR("Pktio close failed: %s\n", gbl_args->appl.if_names[i]);
exit(EXIT_FAILURE);
}
}
free(gbl_args->appl.if_names);
free(gbl_args->appl.if_str);
for (i = 0; i < gbl_args->appl.num_om; i++)
free(gbl_args->appl.output_map[i]);
gbl_args = NULL;
for (i = 0; i < num_pools; i++) {
ODPH_ERR("Pool destroy failed: %i\n", i);
exit(EXIT_FAILURE);
}
}
for (i = 0; i < num_vec_pools; i++) {
ODPH_ERR("Vector pool destroy failed: %i\n", i);
exit(EXIT_FAILURE);
}
}
ODPH_ERR("Shm free failed\n");
exit(EXIT_FAILURE);
}
ODPH_ERR("Term local failed\n");
exit(EXIT_FAILURE);
}
ODPH_ERR("Term global failed\n");
exit(EXIT_FAILURE);
}
return ret;
}
void odp_atomic_init_u32(odp_atomic_u32_t *atom, uint32_t val)
Initialize atomic uint32 variable.
uint32_t odp_atomic_load_u32(odp_atomic_u32_t *atom)
Load value of atomic uint32 variable.
void odp_atomic_store_u32(odp_atomic_u32_t *atom, uint32_t val)
Store value to atomic uint32 variable.
void odp_barrier_init(odp_barrier_t *barr, int count)
Initialize barrier with thread count.
void odp_mb_full(void)
Full memory barrier.
void odp_barrier_wait(odp_barrier_t *barr)
Synchronize thread execution on barrier.
#define ODP_ALIGNED_CACHE
Defines type/struct/variable to be cache line size aligned.
#define odp_unlikely(x)
Branch unlikely taken.
#define ODP_UNUSED
Intentionally unused variables of functions.
#define odp_likely(x)
Branch likely taken.
int odp_cpumask_default_worker(odp_cpumask_t *mask, int num)
Default CPU mask for worker threads.
int32_t odp_cpumask_to_str(const odp_cpumask_t *mask, char *str, int32_t size)
Format a string from CPU mask.
#define ODP_CPUMASK_STR_SIZE
The maximum number of characters needed to record any CPU mask as a string (output of odp_cpumask_to_...
void odp_event_free_multi(const odp_event_t event[], int num)
Free multiple events.
void odp_event_free(odp_event_t event)
Free event.
odp_event_type_t odp_event_type(odp_event_t event)
Event type of an event.
#define ODP_EVENT_INVALID
Invalid event.
void odp_init_param_init(odp_init_t *param)
Initialize the odp_init_t to default values for all fields.
int odp_init_local(odp_instance_t instance, odp_thread_type_t thr_type)
Thread local ODP initialization.
int odp_init_global(odp_instance_t *instance, const odp_init_t *params, const odp_platform_init_t *platform_params)
Global ODP initialization.
int odp_term_local(void)
Thread local ODP termination.
int odp_term_global(odp_instance_t instance)
Global ODP termination.
uint64_t odp_instance_t
ODP instance ID.
int odp_pktio_mac_addr(odp_pktio_t pktio, void *mac_addr, int size)
Get the default MAC address of a packet IO interface.
void odp_pktin_queue_param_init(odp_pktin_queue_param_t *param)
Initialize packet input queue parameters.
void odp_pktio_param_init(odp_pktio_param_t *param)
Initialize pktio params.
int odp_pktio_promisc_mode(odp_pktio_t pktio)
Determine if promiscuous mode is enabled for a packet IO interface.
int odp_pktio_close(odp_pktio_t pktio)
Close a packet IO interface.
int odp_pktio_info(odp_pktio_t pktio, odp_pktio_info_t *info)
Retrieve information about a pktio.
int odp_pktout_queue(odp_pktio_t pktio, odp_pktout_queue_t queues[], int num)
Direct packet output queues.
int odp_pktio_maxlen_set(odp_pktio_t pktio, uint32_t maxlen_input, uint32_t maxlen_output)
Set maximum frame lengths.
int odp_pktio_promisc_mode_set(odp_pktio_t pktio, odp_bool_t enable)
Set promiscuous mode.
void odp_pktio_extra_stats_print(odp_pktio_t pktio)
Print extra statistics for a packet IO interface.
void odp_pktio_config_init(odp_pktio_config_t *config)
Initialize packet IO configuration options.
int odp_pktin_event_queue(odp_pktio_t pktio, odp_queue_t queues[], int num)
Event queues for packet input.
odp_pktio_t odp_pktio_open(const char *name, odp_pool_t pool, const odp_pktio_param_t *param)
Open a packet IO interface.
int odp_pktio_config(odp_pktio_t pktio, const odp_pktio_config_t *config)
Configure packet IO interface options.
void odp_pktio_print(odp_pktio_t pktio)
Print pktio info to the console.
int odp_pktio_start(odp_pktio_t pktio)
Start packet receive and transmit.
#define ODP_PKTIO_INVALID
Invalid packet IO handle.
int odp_pktin_queue(odp_pktio_t pktio, odp_pktin_queue_t queues[], int num)
Direct packet input queues.
void odp_pktout_queue_param_init(odp_pktout_queue_param_t *param)
Initialize packet output queue parameters.
int odp_pktout_event_queue(odp_pktio_t pktio, odp_queue_t queues[], int num)
Event queues for packet output.
int odp_pktio_stop(odp_pktio_t pktio)
Stop packet receive and transmit.
int odp_pktin_recv(odp_pktin_queue_t queue, odp_packet_t packets[], int num)
Receive packets directly from an interface input queue.
int odp_pktio_index(odp_pktio_t pktio)
Get pktio interface index.
int odp_pktio_capability(odp_pktio_t pktio, odp_pktio_capability_t *capa)
Query packet IO interface capabilities.
#define ODP_PKTIO_MAX_INDEX
Maximum packet IO interface index.
int odp_pktout_send(odp_pktout_queue_t queue, const odp_packet_t packets[], int num)
Send packets directly to an interface output queue.
odp_pktio_op_mode_t
Packet IO operation mode.
int odp_pktio_extra_stat_info(odp_pktio_t pktio, odp_pktio_extra_stat_info_t info[], int num)
Get extra statistics counter information for a packet IO interface.
int odp_pktin_queue_config(odp_pktio_t pktio, const odp_pktin_queue_param_t *param)
Configure packet input queues.
int odp_pktout_queue_config(odp_pktio_t pktio, const odp_pktout_queue_param_t *param)
Configure packet output queues.
@ ODP_PKTOUT_MODE_QUEUE
Packet output through event queues.
@ ODP_PKTOUT_MODE_DISABLED
Application will never send to this interface.
@ ODP_PKTIO_OP_MT_UNSAFE
Not multithread safe operation.
@ ODP_PKTIO_OP_MT
Multithread safe operation.
@ ODP_PKTIO_LINK_PAUSE_ON
Pause frame flow control enabled.
@ ODP_PKTIN_MODE_QUEUE
Packet input through plain event queues.
@ ODP_PKTIN_MODE_DISABLED
Application will never receive from this interface.
@ ODP_PKTIN_MODE_SCHED
Packet input through scheduler and scheduled event queues.
void odp_packet_from_event_multi(odp_packet_t pkt[], const odp_event_t ev[], int num)
Convert multiple packet events to packet handles.
int odp_packet_tx_compl_request(odp_packet_t pkt, const odp_packet_tx_compl_opt_t *opt)
Request packet transmit completion.
int odp_packet_input_index(odp_packet_t pkt)
Packet input interface index.
int odp_packet_tx_compl_done(odp_pktio_t pktio, uint32_t compl_id)
Check packet transmit completion.
void odp_packet_to_event_multi(const odp_packet_t pkt[], odp_event_t ev[], int num)
Convert multiple packet handles to events.
uint32_t odp_packet_seg_len(odp_packet_t pkt)
Packet data length following the data pointer.
uint32_t odp_packet_headroom(odp_packet_t pkt)
Packet headroom length.
void odp_packet_prefetch(odp_packet_t pkt, uint32_t offset, uint32_t len)
Packet data prefetch.
odp_packet_vector_t odp_packet_vector_from_event(odp_event_t ev)
Get packet vector handle from event.
int odp_packet_num_segs(odp_packet_t pkt)
Number of segments.
odp_packet_t odp_packet_copy(odp_packet_t pkt, odp_pool_t pool)
Full copy of a packet.
uint32_t odp_packet_user_area_size(odp_packet_t pkt)
User area size.
void * odp_packet_data(odp_packet_t pkt)
Packet data pointer.
odp_packet_tx_compl_mode_t
Packet transmit completion mode.
uint32_t odp_packet_len(odp_packet_t pkt)
Packet data length.
int odp_packet_has_error(odp_packet_t pkt)
Check for all parse errors in packet.
int odp_packet_has_tx_compl_request(odp_packet_t pkt)
Check if packet transmit completion is requested.
odp_packet_t odp_packet_from_event(odp_event_t ev)
Get packet handle from event.
void odp_packet_free(odp_packet_t pkt)
Free packet.
void odp_packet_vector_free(odp_packet_vector_t pktv)
Free packet vector.
void odp_packet_l4_chksum_insert(odp_packet_t pkt, int insert)
Layer 4 checksum insertion override.
#define ODP_PACKET_INVALID
Invalid packet.
uint32_t odp_packet_vector_tbl(odp_packet_vector_t pktv, odp_packet_t **pkt_tbl)
Get packet vector table.
odp_pool_t odp_packet_pool(odp_packet_t pkt)
Packet pool.
#define ODP_PACKET_VECTOR_INVALID
Invalid packet vector.
void odp_packet_l3_chksum_insert(odp_packet_t pkt, int insert)
Layer 3 checksum insertion override.
@ ODP_PROTO_LAYER_ALL
All layers.
@ ODP_PROTO_LAYER_NONE
No layers.
@ ODP_PACKET_TX_COMPL_POLL
Enable packet transmit completion check through polling.
@ ODP_PACKET_TX_COMPL_DISABLED
Disable packet transmit completion.
@ ODP_PACKET_TX_COMPL_EVENT
Enable packet transmit completion event.
odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *param)
Create a pool.
int odp_pool_capability(odp_pool_capability_t *capa)
Query pool capabilities.
void odp_pool_param_init(odp_pool_param_t *param)
Initialize pool params.
int odp_pool_destroy(odp_pool_t pool)
Destroy a pool previously created by odp_pool_create()
void odp_pool_print(odp_pool_t pool)
Print pool info.
#define ODP_POOL_INVALID
Invalid pool.
@ ODP_POOL_VECTOR
Vector event pool.
@ ODP_POOL_PACKET
Packet pool.
int odp_queue_enq_multi(odp_queue_t queue, const odp_event_t events[], int num)
Enqueue multiple events to a queue.
void odp_queue_param_init(odp_queue_param_t *param)
Initialize queue params.
#define ODP_QUEUE_INVALID
Invalid queue.
odp_event_t odp_queue_deq(odp_queue_t queue)
Dequeue an event from a queue.
odp_queue_t odp_queue_create(const char *name, const odp_queue_param_t *param)
Queue create.
int odp_queue_destroy(odp_queue_t queue)
Destroy ODP queue.
int odp_queue_deq_multi(odp_queue_t queue, odp_event_t events[], int num)
Dequeue multiple events from a queue.
@ ODP_QUEUE_TYPE_SCHED
Scheduled queue.
int odp_schedule_sync_t
Scheduler synchronization method.
int odp_schedule_multi_no_wait(odp_queue_t *from, odp_event_t events[], int num)
Schedule, do not wait for events.
#define ODP_SCHED_SYNC_PARALLEL
Parallel scheduled queues.
int odp_schedule_prio_t
Scheduling priority level.
int odp_schedule_group_t
Scheduler thread group.
void odp_schedule_config_init(odp_schedule_config_t *config)
Initialize schedule configuration options.
int odp_schedule_group_join(odp_schedule_group_t group, const odp_thrmask_t *mask)
Join a schedule group.
#define ODP_SCHED_SYNC_ATOMIC
Atomic queue synchronization.
#define ODP_SCHED_SYNC_ORDERED
Ordered queue synchronization.
#define ODP_SCHED_GROUP_WORKER
Group of all worker threads.
#define ODP_SCHED_GROUP_INVALID
Invalid scheduler group.
#define ODP_SCHED_NO_WAIT
Do not wait.
int odp_schedule_default_prio(void)
Default scheduling priority level.
void odp_schedule_pause(void)
Pause scheduling.
int odp_schedule_config(const odp_schedule_config_t *config)
Global schedule configuration.
uint64_t odp_schedule_wait_time(uint64_t ns)
Schedule wait time.
int odp_schedule_capability(odp_schedule_capability_t *capa)
Query scheduler capabilities.
odp_schedule_group_t odp_schedule_group_create(const char *name, const odp_thrmask_t *mask)
Schedule group create.
odp_event_t odp_schedule(odp_queue_t *from, uint64_t wait)
Schedule an event.
void odp_schedule_resume(void)
Resume scheduling.
#define ODP_SCHED_GROUP_ALL
Group of all threads.
void odp_shm_print_all(void)
Print all shared memory blocks.
int odp_shm_free(odp_shm_t shm)
Free a contiguous block of shared memory.
#define ODP_SHM_INVALID
Invalid shared memory block.
void * odp_shm_addr(odp_shm_t shm)
Shared memory block address.
odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align, uint32_t flags)
Reserve a contiguous block of shared memory.
void odp_sys_info_print(void)
Print system info.
void odp_thrmask_set(odp_thrmask_t *mask, int thr)
Add thread to mask.
int odp_thread_id(void)
Get thread identifier.
void odp_thrmask_zero(odp_thrmask_t *mask)
Clear entire thread mask.
@ ODP_THREAD_WORKER
Worker thread.
@ ODP_THREAD_CONTROL
Control thread.
uint64_t odp_time_to_ns(odp_time_t time)
Convert time to nanoseconds.
odp_time_t odp_time_diff(odp_time_t t2, odp_time_t t1)
Time difference.
#define ODP_TIME_SEC_IN_NS
A second in nanoseconds.
odp_time_t odp_time_local(void)
Current local time.
Global initialization parameters.
odp_mem_model_t mem_model
Application memory model.
odp_feature_t not_used
Unused features.
Packet transmit completion request options.
Packet input queue parameters.
uint32_t num_queues
Number of input queues to be created.
odp_pktio_op_mode_t op_mode
Operation mode.
odp_queue_param_t queue_param
Queue parameters.
odp_pktin_hash_proto_t hash_proto
Protocol field selection for hashing.
odp_bool_t hash_enable
Enable flow hashing.
odp_pktin_vector_config_t vector
Packet input vector configuration.
uint64_t max_tmo_ns
Maximum timeout in nanoseconds for the producer to wait for the vector of packets.
uint64_t min_tmo_ns
Minimum value allowed to be configured to odp_pktin_vector_config_t::max_tmo_ns.
uint32_t min_size
Minimum value allowed to be configured to odp_pktin_vector_config_t::max_size.
uint32_t max_size
Maximum number of packets that can be accumulated into a packet vector by a producer.
odp_support_t supported
Packet input vector availability.
odp_bool_t enable
Enable packet input vector.
uint32_t max_size
Maximum number of packets in a vector.
uint64_t max_tmo_ns
Maximum time to wait for packets.
odp_pool_t pool
Vector pool.
odp_bool_t queue_type_sched
Scheduled queue support.
uint32_t mode_poll
Packet transmit completion mode ODP_PACKET_TX_COMPL_POLL support.
odp_pktio_set_op_t set_op
Supported set operations.
uint32_t max_output
Maximum valid value for 'maxlen_output'.
odp_pktin_vector_capability_t vector
Packet input vector capability.
uint32_t max_input_queues
Maximum number of input queues.
uint32_t max_input
Maximum valid value for 'maxlen_input'.
struct odp_pktio_capability_t::@110 flow_control
Supported flow control modes.
odp_pktio_config_t config
Supported pktio configuration options.
uint32_t min_output
Minimum valid value for 'maxlen_output'.
uint32_t max_output_queues
Maximum number of output queues.
uint32_t max_compl_id
Maximum supported completion ID value.
struct odp_pktio_capability_t::@108 tx_compl
Supported packet Tx completion options.
uint32_t min_input
Minimum valid value for 'maxlen_input'.
uint32_t mode_event
Packet transmit completion mode ODP_PACKET_TX_COMPL_EVENT support.
struct odp_pktio_capability_t::@107 maxlen
Supported frame lengths for odp_pktio_maxlen_set()
uint32_t pause_tx
Generation of traditional Ethernet pause frames.
uint32_t pause_rx
Reception of traditional Ethernet pause frames.
Packet IO configuration options.
uint32_t max_compl_id
Maximum completion index.
uint32_t mode_event
Enable packet transmit completion events.
odp_pktio_link_pause_t pause_tx
Transmission of flow control frames.
odp_pktout_config_opt_t pktout
Packet output configuration options bit field.
struct odp_pktio_config_t::@102 flow_control
Link flow control configuration.
uint32_t mode_poll
Enable packet transmit completion check through polling.
odp_pktio_link_pause_t pause_rx
Reception of flow control frames.
odp_pktio_parser_config_t parser
Packet input parser configuration.
struct odp_pktio_config_t::@103 tx_compl
Packet transmit completion configuration.
odp_pktin_config_opt_t pktin
Packet input configuration options bit field.
const char * drv_name
Packet IO driver name (implementation specific)
odp_pktin_mode_t in_mode
Packet input mode.
odp_pktout_mode_t out_mode
Packet output mode.
odp_proto_layer_t layer
Protocol parsing level in packet input.
Packet output queue parameters.
odp_pktio_op_mode_t op_mode
Operation mode.
uint32_t num_queues
Number of output queues to be created.
struct odp_pool_capability_t::@122 pkt
Packet pool capabilities
uint32_t max_num
Maximum number of buffers of any size.
uint32_t max_segs_per_pkt
Maximum number of segments per packet.
struct odp_pool_capability_t::@124 vector
Vector pool capabilities.
uint32_t max_size
Maximum buffer data size in bytes.
uint32_t min_seg_len
Minimum packet segment data length in bytes.
uint32_t max_pools
Maximum number of pools of any type (odp_pool_type_t)
uint32_t max_seg_len
Maximum packet segment data length in bytes.
uint32_t max_len
Maximum packet data length in bytes.
uint32_t num
Number of buffers in the pool.
odp_pool_type_t type
Pool type.
uint32_t len
Minimum length of 'num' packets.
uint32_t max_size
Maximum number of handles (such as odp_packet_t) in a vector.
uint32_t seg_len
Minimum number of packet data bytes that can be stored in the first segment of a newly allocated pack...
struct odp_pool_param_t::@126 pkt
Parameters for packet pools.
struct odp_pool_param_t::@128 vector
Parameters for vector pools.
odp_schedule_param_t sched
Scheduler parameters.
odp_queue_type_t type
Queue type.
uint32_t max_flow_id
Maximum flow ID per queue.
uint32_t max_groups
Maximum number of scheduling groups.
uint32_t max_flow_id
Maximum flow ID per queue.
odp_schedule_group_t group
Thread group.
odp_schedule_prio_t prio
Priority level.
odp_schedule_sync_t sync
Synchronization method.
uint32_t tm
Traffic Manager APIs, e.g., odp_tm_xxx()
uint32_t crypto
Crypto APIs, e.g., odp_crypto_xxx()
uint32_t ipsec
IPsec APIs, e.g., odp_ipsec_xxx()
uint32_t timer
Timer APIs, e.g., odp_timer_xxx(), odp_timeout_xxx()
uint32_t cls
Classifier APIs, e.g., odp_cls_xxx(), odp_cos_xxx()
struct odp_feature_t::@148 feat
Individual feature bits.
uint32_t compress
Compression APIs, e.g., odp_comp_xxx()
uint64_t ts_all
Timestamp all packets on packet input.
struct odp_pktin_config_opt_t::@100 bit
Option flags.
uint32_t ipv4_udp
IPv4 addresses and UDP port numbers.
struct odp_pktin_hash_proto_t::@99 proto
Protocol header fields for hashing.
struct odp_pktio_set_op_t::@104 op
Operation flags.
uint32_t maxlen
Maximum frame length.
uint32_t promisc_mode
Promiscuous mode.
struct odp_pktout_config_opt_t::@101 bit
Option flags for packet output.
uint64_t no_packet_refs
Packet references not used on packet output.
uint64_t ipv4_chksum_ena
Enable IPv4 header checksum insertion.
uint64_t tcp_chksum_ena
Enable TCP checksum insertion.
uint64_t udp_chksum_ena
Enable UDP checksum insertion.