API Reference Manual  1.45.0
odp_traffic_mgmt.c

Traffic manager API example application

/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2015 EZchip Semiconductor Ltd.
* Copyright (c) 2015-2018 Linaro Limited
* Copyright (c) 2022 Marvell
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <execinfo.h>
#include <inttypes.h>
#include <signal.h>
#include <sys/resource.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <odp_api.h>
#include <odp/helper/odph_api.h>
#define NUM_SVC_CLASSES 4
#define USERS_PER_SVC_CLASS 2
#define APPS_PER_USER 2
#define TM_QUEUES_PER_APP 2
#define NUM_USERS (USERS_PER_SVC_CLASS * NUM_SVC_CLASSES)
#define NUM_TM_QUEUES (NUM_USERS * APPS_PER_USER * TM_QUEUES_PER_APP)
#define TM_QUEUES_PER_USER (TM_QUEUES_PER_APP * APPS_PER_USER)
#define TM_QUEUES_PER_CLASS (USERS_PER_SVC_CLASS * TM_QUEUES_PER_USER)
#define MAX_NODES_PER_LEVEL (NUM_USERS * APPS_PER_USER)
#define KBPS 1000
#define MBPS 1000000
#define PERCENT(percent) (100 * percent)
#define FALSE 0
#define TRUE 1
#define RANDOM_BUF_LEN 1024
typedef struct {
odp_tm_shaper_params_t shaper_params;
odp_tm_threshold_params_t threshold_params;
} profile_params_set_t;
typedef struct {
odp_tm_shaper_t shaper_profile;
odp_tm_threshold_t threshold_profile;
} profile_set_t;
static const odp_init_t ODP_INIT_PARAMS = {
.abort_fn = odp_override_abort
};
static profile_params_set_t COMPANY_PROFILE_PARAMS = {
.shaper_params = {
.commit_rate = 50 * MBPS, .commit_burst = 1000000,
.peak_rate = 0, .peak_burst = 0,
.dual_rate = FALSE, .shaper_len_adjust = 20
},
.threshold_params = {
.max_pkts = 100000, .enable_max_pkts = TRUE,
.max_bytes = 10000000, .enable_max_bytes = TRUE
},
.wred_params = {
.min_threshold = PERCENT(70),
.med_threshold = PERCENT(90),
.med_drop_prob = PERCENT(80),
.max_drop_prob = PERCENT(100),
.enable_wred = TRUE,
.use_byte_fullness = FALSE,
},
.min_threshold = PERCENT(70),
.med_threshold = PERCENT(90),
.med_drop_prob = PERCENT(80),
.max_drop_prob = PERCENT(100),
.enable_wred = TRUE,
.use_byte_fullness = FALSE,
},
.min_threshold = PERCENT(40),
.med_threshold = PERCENT(70),
.med_drop_prob = PERCENT(70),
.max_drop_prob = PERCENT(100),
.enable_wred = TRUE,
.use_byte_fullness = FALSE,
},
}
};
static profile_params_set_t COS0_PROFILE_PARAMS = {
.shaper_params = {
.commit_rate = 1 * MBPS, .commit_burst = 100000,
.peak_rate = 4 * MBPS, .peak_burst = 200000,
.dual_rate = TRUE, .shaper_len_adjust = 20
},
.threshold_params = {
.max_pkts = 10000, .enable_max_pkts = TRUE,
.max_bytes = 1000000, .enable_max_bytes = TRUE
},
.wred_params = {
.min_threshold = PERCENT(80),
.med_threshold = PERCENT(90),
.med_drop_prob = PERCENT(50),
.max_drop_prob = PERCENT(100),
.enable_wred = TRUE,
.use_byte_fullness = FALSE,
},
.min_threshold = PERCENT(80),
.med_threshold = PERCENT(90),
.med_drop_prob = PERCENT(50),
.max_drop_prob = PERCENT(100),
.enable_wred = TRUE,
.use_byte_fullness = FALSE,
},
.min_threshold = PERCENT(60),
.med_threshold = PERCENT(80),
.med_drop_prob = PERCENT(70),
.max_drop_prob = PERCENT(100),
.enable_wred = TRUE,
.use_byte_fullness = FALSE,
},
}
};
static profile_params_set_t COS1_PROFILE_PARAMS = {
.shaper_params = {
.commit_rate = 500 * KBPS, .commit_burst = 50000,
.peak_rate = 1500 * KBPS, .peak_burst = 150000,
.dual_rate = TRUE, .shaper_len_adjust = 20
},
.threshold_params = {
.max_pkts = 5000, .enable_max_pkts = TRUE,
.max_bytes = 500000, .enable_max_bytes = TRUE
},
.wred_params = {
.min_threshold = PERCENT(40),
.med_threshold = PERCENT(90),
.med_drop_prob = PERCENT(70),
.max_drop_prob = PERCENT(100),
.enable_wred = TRUE,
.use_byte_fullness = FALSE,
},
.min_threshold = PERCENT(40),
.med_threshold = PERCENT(90),
.med_drop_prob = PERCENT(70),
.max_drop_prob = PERCENT(100),
.enable_wred = TRUE,
.use_byte_fullness = FALSE,
},
.min_threshold = PERCENT(50),
.med_threshold = PERCENT(80),
.med_drop_prob = PERCENT(80),
.max_drop_prob = PERCENT(100),
.enable_wred = TRUE,
.use_byte_fullness = FALSE,
},
}
};
static profile_params_set_t COS2_PROFILE_PARAMS = {
.shaper_params = {
.commit_rate = 200 * KBPS, .commit_burst = 20000,
.peak_rate = 400 * KBPS, .peak_burst = 40000,
.dual_rate = TRUE, .shaper_len_adjust = 20
},
.threshold_params = {
.max_pkts = 1000, .enable_max_pkts = TRUE,
.max_bytes = 100000, .enable_max_bytes = TRUE
},
.wred_params = {
.min_threshold = PERCENT(50),
.med_threshold = PERCENT(80),
.med_drop_prob = PERCENT(70),
.max_drop_prob = PERCENT(100),
.enable_wred = TRUE,
.use_byte_fullness = FALSE,
},
.min_threshold = PERCENT(50),
.med_threshold = PERCENT(80),
.med_drop_prob = PERCENT(70),
.max_drop_prob = PERCENT(100),
.enable_wred = TRUE,
.use_byte_fullness = FALSE,
},
.min_threshold = PERCENT(40),
.med_threshold = PERCENT(70),
.med_drop_prob = PERCENT(80),
.max_drop_prob = PERCENT(100),
.enable_wred = TRUE,
.use_byte_fullness = FALSE,
},
}
};
static profile_params_set_t COS3_PROFILE_PARAMS = {
.shaper_params = {
.commit_rate = 100 * KBPS, .commit_burst = 5000,
.peak_rate = 0, .peak_burst = 0,
.dual_rate = FALSE, .shaper_len_adjust = 20
},
.threshold_params = {
.max_pkts = 400, .enable_max_pkts = TRUE,
.max_bytes = 60000, .enable_max_bytes = TRUE
},
.wred_params = {
.min_threshold = PERCENT(40),
.med_threshold = PERCENT(70),
.med_drop_prob = PERCENT(80),
.max_drop_prob = PERCENT(100),
.enable_wred = TRUE,
.use_byte_fullness = FALSE,
},
.min_threshold = PERCENT(40),
.med_threshold = PERCENT(70),
.med_drop_prob = PERCENT(80),
.max_drop_prob = PERCENT(100),
.enable_wred = TRUE,
.use_byte_fullness = FALSE,
},
.min_threshold = PERCENT(30),
.med_threshold = PERCENT(60),
.med_drop_prob = PERCENT(80),
.max_drop_prob = PERCENT(100),
.enable_wred = TRUE,
.use_byte_fullness = FALSE,
},
}
};
static profile_set_t COMPANY_PROFILE_SET;
static profile_set_t COS_PROFILE_SETS[NUM_SVC_CLASSES];
static profile_set_t USER_PROFILE_SETS[NUM_SVC_CLASSES];
static profile_set_t APP_PROFILE_SETS[NUM_SVC_CLASSES][APPS_PER_USER];
static odp_tm_t odp_tm_test;
static odp_pool_t odp_pool;
static odp_tm_queue_t queue_num_tbls[NUM_SVC_CLASSES][TM_QUEUES_PER_CLASS];
static uint32_t next_queue_nums[NUM_SVC_CLASSES];
static uint8_t random_buf[RANDOM_BUF_LEN];
static uint32_t next_rand_byte;
static odp_atomic_u32_t atomic_pkts_into_tm;
static odp_atomic_u32_t atomic_pkts_from_tm;
static uint32_t g_num_pkts_to_send = 100;
static uint8_t g_print_tm_stats = TRUE;
static void tester_egress_fcn(odp_packet_t odp_pkt);
static uint64_t tm_shaper_min_rate;
static uint64_t tm_shaper_max_rate;
static uint32_t tm_shaper_min_burst;
static uint32_t tm_shaper_max_burst;
static uint64_t
clamp_rate(uint64_t rate)
{
uint64_t val = ODPH_MIN(ODPH_MAX(rate, tm_shaper_min_rate), tm_shaper_max_rate);
if (!rate)
return 0;
if (val != rate)
printf("INFO: Clamped shaper rate from %" PRIu64 " bps"
" to %" PRIu64 " bps\n", rate, val);
return val;
}
static uint32_t
clamp_burst(uint32_t burst)
{
uint32_t val = ODPH_MIN(ODPH_MAX(burst, tm_shaper_min_burst), tm_shaper_max_burst);
if (!burst)
return 0;
if (val != burst)
printf("INFO: Clamped shaper burst from %" PRIu32 "bits to %" PRIu32 "bits\n",
burst, val);
return val;
}
/* Returns the number of errors encountered. */
static uint32_t create_profile_set(profile_params_set_t *profile_params_set,
profile_set_t *profile_set,
const char *base_name,
uint32_t name_idx,
uint32_t shaper_scale,
uint32_t threshold_scale)
{
odp_tm_threshold_params_t threshold_params, *thresholds;
odp_tm_shaper_params_t shaper_params, *shaper;
odp_tm_wred_params_t wred_params, *wred;
uint32_t err_cnt, color;
char name[ODP_TM_NAME_LEN], wred_name[ODP_TM_NAME_LEN];
err_cnt = 0;
if (name_idx == 0)
snprintf(name, sizeof(name), "%s", base_name);
else
snprintf(name, sizeof(name), "%s-%" PRIu32,
base_name, name_idx);
odp_tm_shaper_params_init(&shaper_params);
shaper = &profile_params_set->shaper_params;
shaper_params.commit_rate = clamp_rate(shaper->commit_rate *
shaper_scale);
shaper_params.peak_rate = clamp_rate(shaper->peak_rate *
shaper_scale);
shaper_params.commit_burst = clamp_burst(shaper->commit_burst *
shaper_scale);
shaper_params.peak_burst = clamp_burst(shaper->peak_burst *
shaper_scale);
shaper_params.dual_rate = shaper->dual_rate;
shaper_params.shaper_len_adjust = shaper->shaper_len_adjust;
profile_set->shaper_profile = odp_tm_shaper_create(name,
&shaper_params);
if (profile_set->shaper_profile == ODP_TM_INVALID)
err_cnt++;
odp_tm_threshold_params_init(&threshold_params);
thresholds = &profile_params_set->threshold_params;
threshold_params.max_pkts = thresholds->max_pkts * threshold_scale;
threshold_params.max_bytes = thresholds->max_bytes * threshold_scale;
threshold_params.enable_max_pkts = thresholds->enable_max_pkts;
threshold_params.enable_max_bytes = thresholds->enable_max_bytes;
profile_set->threshold_profile =
odp_tm_threshold_create(name, &threshold_params);
if (profile_set->threshold_profile == ODP_TM_INVALID)
err_cnt++;
for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) {
snprintf(wred_name, sizeof(wred_name), "%s-%" PRIu32,
name, color);
odp_tm_wred_params_init(&wred_params);
wred = &profile_params_set->wred_params[color];
wred_params.min_threshold = wred->min_threshold;
wred_params.med_threshold = wred->med_threshold;
wred_params.med_drop_prob = wred->med_drop_prob;
wred_params.max_drop_prob = wred->max_drop_prob;
wred_params.enable_wred = wred->enable_wred;
wred_params.use_byte_fullness = wred->use_byte_fullness;
profile_set->wred_profiles[color] =
odp_tm_wred_create(wred_name, &wred_params);
if (profile_set->wred_profiles[color] == ODP_TM_INVALID)
err_cnt++;
}
return err_cnt;
}
/* Returns the number of errors encountered. */
static uint32_t init_profile_sets(void)
{
uint32_t class_shaper_scale, class_threshold_scale, user_shaper_scale;
uint32_t user_threshold_scale, err_cnt, app_idx;
class_shaper_scale = TM_QUEUES_PER_CLASS / 2;
class_threshold_scale = TM_QUEUES_PER_CLASS;
user_shaper_scale = TM_QUEUES_PER_USER / 2;
user_threshold_scale = TM_QUEUES_PER_USER;
err_cnt = 0;
err_cnt += create_profile_set(&COMPANY_PROFILE_PARAMS,
&COMPANY_PROFILE_SET,
"CompanyProfiles", 0, 1, 1);
err_cnt += create_profile_set(&COS0_PROFILE_PARAMS,
&COS_PROFILE_SETS[0], "ServiceClass0", 0,
class_shaper_scale,
class_threshold_scale);
err_cnt += create_profile_set(&COS1_PROFILE_PARAMS,
&COS_PROFILE_SETS[1], "ServiceClass1", 0,
class_shaper_scale,
class_threshold_scale);
err_cnt += create_profile_set(&COS2_PROFILE_PARAMS,
&COS_PROFILE_SETS[2], "ServiceClass2", 0,
class_shaper_scale,
class_threshold_scale);
err_cnt += create_profile_set(&COS3_PROFILE_PARAMS,
&COS_PROFILE_SETS[3], "ServiceClass3", 0,
class_shaper_scale,
class_threshold_scale);
err_cnt += create_profile_set(&COS0_PROFILE_PARAMS,
&USER_PROFILE_SETS[0], "UserSvc0", 0,
user_shaper_scale, user_threshold_scale);
err_cnt += create_profile_set(&COS1_PROFILE_PARAMS,
&USER_PROFILE_SETS[1], "UserSvc1", 0,
user_shaper_scale, user_threshold_scale);
err_cnt += create_profile_set(&COS2_PROFILE_PARAMS,
&USER_PROFILE_SETS[2], "UserSvc2", 0,
user_shaper_scale, user_threshold_scale);
err_cnt += create_profile_set(&COS3_PROFILE_PARAMS,
&USER_PROFILE_SETS[3], "UserSvc3", 0,
user_shaper_scale, user_threshold_scale);
for (app_idx = 0; app_idx < APPS_PER_USER; app_idx++) {
err_cnt += create_profile_set(&COS0_PROFILE_PARAMS,
&APP_PROFILE_SETS[0][app_idx],
"AppSvc0", app_idx + 1, 1, 1);
err_cnt += create_profile_set(&COS1_PROFILE_PARAMS,
&APP_PROFILE_SETS[1][app_idx],
"AppSvc1", app_idx + 1, 1, 1);
err_cnt += create_profile_set(&COS2_PROFILE_PARAMS,
&APP_PROFILE_SETS[2][app_idx],
"AppSvc2", app_idx + 1, 1, 1);
err_cnt += create_profile_set(&COS3_PROFILE_PARAMS,
&APP_PROFILE_SETS[3][app_idx],
"AppSvc3", app_idx + 1, 1, 1);
}
return err_cnt;
}
static int config_example_user(odp_tm_node_t cos_tm_node,
uint8_t svc_class,
uint32_t user_num)
{
odp_tm_queue_params_t tm_queue_params;
odp_tm_node_params_t tm_node_params;
odp_tm_queue_t tm_queue;
odp_tm_node_t user_tm_node;
profile_set_t *profile_set;
uint32_t app_idx, queue_idx, svc_class_queue_num;
char user_name[ODP_TM_NAME_LEN];
int rc;
profile_set = &USER_PROFILE_SETS[svc_class];
odp_tm_node_params_init(&tm_node_params);
tm_node_params.max_fanin = 64;
tm_node_params.shaper_profile = profile_set->shaper_profile;
tm_node_params.threshold_profile = profile_set->threshold_profile;
tm_node_params.wred_profile[ODP_PACKET_GREEN] =
profile_set->wred_profiles[0];
tm_node_params.wred_profile[ODP_PACKET_YELLOW] =
profile_set->wred_profiles[1];
tm_node_params.wred_profile[ODP_PACKET_RED] =
profile_set->wred_profiles[2];
tm_node_params.level = 2;
snprintf(user_name, sizeof(user_name), "Subscriber-%" PRIu32, user_num);
user_tm_node = odp_tm_node_create(odp_tm_test, user_name,
&tm_node_params);
odp_tm_node_connect(user_tm_node, cos_tm_node);
for (app_idx = 0; app_idx < APPS_PER_USER; app_idx++) {
profile_set = &APP_PROFILE_SETS[svc_class][app_idx];
for (queue_idx = 0; queue_idx < TM_QUEUES_PER_APP;
queue_idx++) {
odp_tm_queue_params_init(&tm_queue_params);
tm_queue_params.shaper_profile =
profile_set->shaper_profile;
tm_queue_params.threshold_profile =
profile_set->threshold_profile;
tm_queue_params.priority = svc_class;
tm_queue_params.wred_profile[ODP_PACKET_GREEN] =
profile_set->wred_profiles[ODP_PACKET_GREEN];
tm_queue_params.wred_profile[ODP_PACKET_YELLOW] =
profile_set->wred_profiles[ODP_PACKET_YELLOW];
tm_queue_params.wred_profile[ODP_PACKET_RED] =
profile_set->wred_profiles[ODP_PACKET_RED];
tm_queue = odp_tm_queue_create(odp_tm_test,
&tm_queue_params);
rc = odp_tm_queue_connect(tm_queue, user_tm_node);
if (rc < 0)
return rc;
svc_class_queue_num = next_queue_nums[svc_class]++;
queue_num_tbls[svc_class][svc_class_queue_num] =
tm_queue;
}
}
return 0;
}
static int config_company_node(const char *company_name)
{
odp_tm_node_params_t tm_node_params;
profile_set_t *profile_set;
odp_tm_node_t company_tm_node, cos_tm_node;
uint32_t cos_idx, user_idx;
char cos_node_name[ODP_TM_NAME_LEN];
profile_set = &COMPANY_PROFILE_SET;
odp_tm_node_params_init(&tm_node_params);
tm_node_params.max_fanin = 64;
tm_node_params.shaper_profile = profile_set->shaper_profile;
tm_node_params.threshold_profile = profile_set->threshold_profile;
tm_node_params.wred_profile[ODP_PACKET_GREEN] =
profile_set->wred_profiles[0];
tm_node_params.wred_profile[ODP_PACKET_YELLOW] =
profile_set->wred_profiles[1];
tm_node_params.wred_profile[ODP_PACKET_RED] =
profile_set->wred_profiles[2];
tm_node_params.level = 0;
company_tm_node = odp_tm_node_create(odp_tm_test, company_name,
&tm_node_params);
for (cos_idx = 0; cos_idx < NUM_SVC_CLASSES; cos_idx++) {
odp_tm_node_params_init(&tm_node_params);
profile_set = &COS_PROFILE_SETS[cos_idx];
tm_node_params.max_fanin = 64;
tm_node_params.shaper_profile = profile_set->shaper_profile;
tm_node_params.threshold_profile =
profile_set->threshold_profile;
tm_node_params.level = 1;
tm_node_params.wred_profile[ODP_PACKET_GREEN] =
profile_set->wred_profiles[ODP_PACKET_GREEN];
tm_node_params.wred_profile[ODP_PACKET_YELLOW] =
profile_set->wred_profiles[ODP_PACKET_YELLOW];
tm_node_params.wred_profile[ODP_PACKET_RED] =
profile_set->wred_profiles[ODP_PACKET_RED];
snprintf(cos_node_name, sizeof(cos_node_name),
"%s-Class-%" PRIu32, company_name, cos_idx);
cos_tm_node = odp_tm_node_create(odp_tm_test, cos_node_name,
&tm_node_params);
odp_tm_node_connect(cos_tm_node, company_tm_node);
for (user_idx = 0; user_idx < USERS_PER_SVC_CLASS; user_idx++)
config_example_user(cos_tm_node, cos_idx,
cos_idx * 256 + user_idx);
}
odp_tm_node_connect(company_tm_node, ODP_TM_ROOT);
return 0;
}
static int create_and_config_tm(void)
{
odp_tm_requirements_t requirements;
uint32_t level, err_cnt;
odp_tm_requirements_init(&requirements);
requirements.max_tm_queues = 10 * NUM_TM_QUEUES;
requirements.num_levels = 3;
requirements.tm_queue_shaper_needed = true;
requirements.tm_queue_wred_needed = true;
for (level = 0; level < 3; level++) {
per_level = &requirements.per_level[level];
per_level->max_num_tm_nodes = MAX_NODES_PER_LEVEL;
per_level->max_fanin_per_node = 64;
per_level->max_priority = 3;
per_level->min_weight = 1;
per_level->max_weight = 255;
per_level->tm_node_shaper_needed = true;
per_level->tm_node_wred_needed = true;
per_level->tm_node_dual_slope_needed = true;
per_level->fair_queuing_needed = true;
per_level->weights_needed = true;
}
egress.egress_kind = ODP_TM_EGRESS_FN;
egress.egress_fcn = tester_egress_fcn;
odp_tm_test = odp_tm_create("TM test", &requirements, &egress);
if (odp_tm_test == ODP_TM_INVALID) {
printf("Error: failed to create TM\n");
return -1;
}
if (odp_tm_capability(odp_tm_test, &tm_capa) != 0) {
printf("Error: failed to get tm capability");
return -1;
}
tm_shaper_min_rate = tm_capa.per_level[0].min_rate;
tm_shaper_max_rate = tm_capa.per_level[0].max_rate;
tm_shaper_min_burst = tm_capa.per_level[0].min_burst;
tm_shaper_max_burst = tm_capa.per_level[0].max_burst;
for (level = 1; level < tm_capa.max_levels; level++) {
odp_tm_level_capabilities_t *level_capa = &tm_capa.per_level[level];
if (level_capa->min_rate > tm_shaper_min_rate)
tm_shaper_min_rate = level_capa->min_rate;
if (level_capa->min_burst > tm_shaper_min_burst)
tm_shaper_min_burst = level_capa->min_burst;
if (level_capa->max_rate < tm_shaper_max_rate)
tm_shaper_max_rate = level_capa->max_rate;
if (level_capa->max_burst < tm_shaper_max_burst)
tm_shaper_max_burst = level_capa->max_burst;
}
if (tm_shaper_min_rate > tm_shaper_max_rate ||
tm_shaper_min_burst > tm_shaper_max_burst) {
printf("Error: No shaper rate supported by all TM levels");
return -1;
}
err_cnt = init_profile_sets();
if (err_cnt != 0)
printf("%s init_profile_sets encountered %" PRIu32 " errors\n",
__func__, err_cnt);
config_company_node("TestCompany");
return err_cnt;
}
static uint32_t random_8(void)
{
uint32_t rand8;
if (RANDOM_BUF_LEN <= next_rand_byte) {
odp_random_data(random_buf, RANDOM_BUF_LEN, 1);
next_rand_byte = 0;
}
rand8 = random_buf[next_rand_byte++];
return rand8;
}
static uint32_t random_16(void)
{
uint8_t byte1, byte2;
if ((RANDOM_BUF_LEN - 1) <= next_rand_byte) {
odp_random_data(random_buf, RANDOM_BUF_LEN, 1);
next_rand_byte = 0;
}
byte1 = random_buf[next_rand_byte++];
byte2 = random_buf[next_rand_byte++];
return (((uint16_t)byte1) << 8) | ((uint16_t)byte2);
}
static uint32_t pkt_service_class(void)
{
uint32_t rand8;
/* Make most of the traffic use service class 3 to increase the amount
* of delayed traffic so as to stimulate more interesting behaviors.
*/
rand8 = random_8();
if (rand8 <= 24)
return 0;
else if (rand8 >= 25 && rand8 <= 49)
return 1;
else if (rand8 >= 50 && rand8 <= 150)
return 2;
else if (rand8 >= 151 && rand8 <= 255)
return 3;
else
return 3;
}
static odp_packet_t make_odp_packet(uint16_t pkt_len)
{
odp_packet_t odp_pkt;
uint8_t rand8a, rand8b, pkt_color, drop_eligible;
rand8a = random_8();
rand8b = random_8();
pkt_color = (rand8a < 224) ? 0 : ((rand8a < 248) ? 1 : 2);
drop_eligible = (rand8b < 240) ? 1 : 0;
odp_pkt = odp_packet_alloc(odp_pool, pkt_len);
if (odp_pkt == ODP_PACKET_INVALID) {
printf("%s odp_packet_alloc failure *******\n", __func__);
return 0;
}
odp_packet_color_set(odp_pkt, pkt_color);
odp_packet_drop_eligible_set(odp_pkt, drop_eligible);
return odp_pkt;
}
void tester_egress_fcn(odp_packet_t odp_pkt ODP_UNUSED)
{
odp_atomic_inc_u32(&atomic_pkts_from_tm);
}
static int traffic_generator(uint32_t pkts_to_send)
{
odp_pool_param_t pool_params;
odp_tm_queue_t tm_queue;
odp_bool_t tm_is_idle;
uint32_t svc_class, queue_num, pkt_len, pkts_into_tm;
uint32_t pkts_from_tm, pkt_cnt, millisecs, odp_tm_enq_errs;
int rc;
memset(&pool_params, 0, sizeof(odp_pool_param_t));
pool_params.type = ODP_POOL_PACKET;
pool_params.pkt.num = pkts_to_send + 10;
pool_params.pkt.len = 1600;
pool_params.pkt.seg_len = 0;
pool_params.pkt.uarea_size = 0;
odp_pool = odp_pool_create("MyPktPool", &pool_params);
odp_tm_enq_errs = 0;
pkt_cnt = 0;
while (pkt_cnt < pkts_to_send) {
svc_class = pkt_service_class();
queue_num = random_16() & (TM_QUEUES_PER_CLASS - 1);
tm_queue = queue_num_tbls[svc_class][queue_num];
pkt_len = ((uint32_t)((random_8() & 0x7F) + 2)) * 32;
pkt_len = ODPH_MIN(pkt_len, 1500u);
pkt = make_odp_packet(pkt_len);
pkt_cnt++;
rc = odp_tm_enq(tm_queue, pkt);
if (rc < 0) {
odp_tm_enq_errs++;
continue;
}
odp_atomic_inc_u32(&atomic_pkts_into_tm);
}
printf("%s odp_tm_enq_errs=%" PRIu32 "\n", __func__, odp_tm_enq_errs);
/* Wait until the main traffic mgmt worker thread is idle and has no
* outstanding events (i.e. no timers, empty work queue, etc), but
* not longer than 60 seconds.
*/
for (millisecs = 0; millisecs < 600000; millisecs++) {
usleep(100);
tm_is_idle = odp_tm_is_idle(odp_tm_test);
if (tm_is_idle)
break;
}
if (!tm_is_idle)
printf("%s WARNING stopped waiting for the TM system "
"to be IDLE!\n", __func__);
/* Wait for up to 2 seconds for pkts_from_tm to match pkts_into_tm. */
for (millisecs = 0; millisecs < 2000; millisecs++) {
usleep(1000);
pkts_into_tm = odp_atomic_load_u32(&atomic_pkts_into_tm);
pkts_from_tm = odp_atomic_load_u32(&atomic_pkts_from_tm);
if (pkts_into_tm <= pkts_from_tm)
break;
}
return 0;
}
static int process_cmd_line_options(uint32_t argc, char *argv[])
{
uint32_t arg_idx;
char *arg;
arg_idx = 1;
while (arg_idx < argc) {
arg = argv[arg_idx++];
if (!arg) {
return -1;
} else if (arg[0] == '-') {
switch (arg[1]) {
case 'n':
if (argc <= arg_idx)
return -1;
g_num_pkts_to_send =
atoi(argv[arg_idx++]);
break;
case 'q':
g_print_tm_stats = FALSE;
break;
default:
printf("Unrecognized cmd line option '%s'\n",
arg);
return -1;
}
} else {
/* Currently all cmd line options are '-' flag based. */
return -1;
}
}
return 0;
}
static void signal_handler(int signal)
{
size_t num_stack_frames;
const char *signal_name;
void *bt_array[128];
switch (signal) {
case SIGILL:
signal_name = "SIGILL"; break;
case SIGFPE:
signal_name = "SIGFPE"; break;
case SIGSEGV:
signal_name = "SIGSEGV"; break;
case SIGTERM:
signal_name = "SIGTERM"; break;
case SIGBUS:
signal_name = "SIGBUS"; break;
default:
signal_name = "UNKNOWN"; break;
}
num_stack_frames = backtrace(bt_array, 100);
printf("Received signal=%u (%s) exiting.", signal, signal_name);
backtrace_symbols_fd(bt_array, num_stack_frames, fileno(stderr));
fflush(NULL);
sync();
abort();
}
static int destroy_tm_queues(void)
{
int i;
int class;
int ret;
for (i = 0; i < NUM_SVC_CLASSES; i++)
for (class = 0; class < TM_QUEUES_PER_CLASS; class++) {
odp_tm_queue_t tm_queue;
tm_queue = queue_num_tbls[i][class];
ret = odp_tm_queue_info(tm_queue, &info);
if (ret) {
printf("Err: odp_tm_queue_info %d\n", ret);
return -1;
}
if (ret) {
printf("Err: odp_tm_node_disconnect %d\n", ret);
return -1;
}
ret = odp_tm_queue_disconnect(tm_queue);
if (ret) {
printf("odp_tm_queue_disconnect %d\n", ret);
return -1;
}
ret = odp_tm_queue_destroy(tm_queue);
if (ret) {
printf("odp_tm_queue_destroy %d\n", ret);
return -1;
}
}
return 0;
}
int main(int argc, char *argv[])
{
struct sigaction signal_action;
struct rlimit rlimit;
uint32_t pkts_into_tm, pkts_from_tm;
odp_instance_t instance;
int rc;
memset(&signal_action, 0, sizeof(signal_action));
signal_action.sa_handler = signal_handler;
sigfillset(&signal_action.sa_mask);
sigaction(SIGILL, &signal_action, NULL);
sigaction(SIGFPE, &signal_action, NULL);
sigaction(SIGSEGV, &signal_action, NULL);
sigaction(SIGTERM, &signal_action, NULL);
sigaction(SIGBUS, &signal_action, NULL);
getrlimit(RLIMIT_CORE, &rlimit);
rlimit.rlim_cur = rlimit.rlim_max;
setrlimit(RLIMIT_CORE, &rlimit);
rc = odp_init_global(&instance, &ODP_INIT_PARAMS, NULL);
if (rc != 0) {
printf("Error: odp_init_global() failed, rc = %d\n", rc);
return -1;
}
if (rc != 0) {
printf("Error: odp_init_local() failed, rc = %d\n", rc);
return -1;
}
if (process_cmd_line_options(argc, argv) < 0)
return -1;
rc = create_and_config_tm();
if (rc != 0)
return rc;
/* Start TM */
rc = odp_tm_start(odp_tm_test);
if (rc != 0) {
printf("Error: odp_tm_start() failed, rc=%d\n", rc);
return -1;
}
odp_random_data(random_buf, RANDOM_BUF_LEN, 1);
next_rand_byte = 0;
odp_atomic_init_u32(&atomic_pkts_into_tm, 0);
odp_atomic_init_u32(&atomic_pkts_from_tm, 0);
traffic_generator(g_num_pkts_to_send);
pkts_into_tm = odp_atomic_load_u32(&atomic_pkts_into_tm);
pkts_from_tm = odp_atomic_load_u32(&atomic_pkts_from_tm);
printf("pkts_into_tm=%" PRIu32 " pkts_from_tm=%" PRIu32 "\n",
pkts_into_tm, pkts_from_tm);
odp_tm_stats_print(odp_tm_test);
/* Stop TM */
rc = odp_tm_stop(odp_tm_test);
if (rc != 0) {
printf("Error: odp_tm_stop() failed, rc = %d\n", rc);
return -1;
}
rc = destroy_tm_queues();
if (rc != 0) {
printf("Error: destroy_tm_queues() failed, rc = %d\n", rc);
return -1;
}
rc = odp_pool_destroy(odp_pool);
if (rc != 0) {
printf("Error: odp_pool_destroy() failed, rc = %d\n", rc);
return -1;
}
rc = odp_tm_destroy(odp_tm_test);
if (rc != 0) {
printf("Error: odp_tm_destroy() failed, rc = %d\n", rc);
return -1;
}
if (rc != 0) {
printf("Error: odp_term_local() failed, rc = %d\n", rc);
return -1;
}
rc = odp_term_global(instance);
if (rc != 0) {
printf("Error: odp_term_global() failed, rc = %d\n", rc);
return -1;
}
printf("Quit\n");
return 0;
}
void odp_atomic_init_u32(odp_atomic_u32_t *atom, uint32_t val)
Initialize atomic uint32 variable.
uint32_t odp_atomic_load_u32(odp_atomic_u32_t *atom)
Load value of atomic uint32 variable.
void odp_atomic_inc_u32(odp_atomic_u32_t *atom)
Increment atomic uint32 variable.
#define ODP_UNUSED
Intentionally unused variables of functions.
Definition: spec/hints.h:54
int odp_init_local(odp_instance_t instance, odp_thread_type_t thr_type)
Thread local ODP initialization.
void odp_override_abort(void) ODP_NORETURN
ODP abort function.
int odp_override_log(odp_log_level_t level, const char *fmt,...)
ODP log function.
int odp_init_global(odp_instance_t *instance, const odp_init_t *params, const odp_platform_init_t *platform_params)
Global ODP initialization.
int odp_term_local(void)
Thread local ODP termination.
int odp_term_global(odp_instance_t instance)
Global ODP termination.
uint64_t odp_instance_t
ODP instance ID.
void odp_packet_color_set(odp_packet_t pkt, odp_packet_color_t color)
Set packet color.
#define ODP_NUM_PACKET_COLORS
Maximum number of packet colors which accommodates ODP_PACKET_GREEN, ODP_PACKET_YELLOW and ODP_PACKET...
void odp_packet_drop_eligible_set(odp_packet_t pkt, odp_bool_t status)
Set drop eligible status.
odp_packet_t odp_packet_alloc(odp_pool_t pool, uint32_t len)
Allocate a packet from a packet pool.
void odp_packet_shaper_len_adjust_set(odp_packet_t pkt, int8_t adj)
Set shaper length adjustment.
#define ODP_PACKET_INVALID
Invalid packet.
@ ODP_PACKET_YELLOW
Packet is yellow.
@ ODP_PACKET_RED
Packet is red.
@ ODP_PACKET_GREEN
Packet is green.
odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *param)
Create a pool.
int odp_pool_destroy(odp_pool_t pool)
Destroy a pool previously created by odp_pool_create()
@ ODP_POOL_PACKET
Packet pool.
int32_t odp_random_data(uint8_t *buf, uint32_t len, odp_random_kind_t kind)
Generate random byte data.
int odp_bool_t
Use odp boolean type to have it well-defined and known size, regardless which compiler is used as thi...
@ ODP_THREAD_CONTROL
Control thread.
int odp_tm_destroy(odp_tm_t tm)
Destroy a TM system.
odp_tm_node_t odp_tm_node_create(odp_tm_t tm, const char *name, const odp_tm_node_params_t *params)
Create an tm_node with a specific set of implemented strict priority levels as given by the prioritie...
odp_bool_t odp_tm_is_idle(odp_tm_t tm)
The odp_tm_is_idle function is used to determine if the specified ODP traffic management system still...
odp_tm_threshold_t odp_tm_threshold_create(const char *name, const odp_tm_threshold_params_t *params)
odp_tm_threshold_create() creates a queue threshold profile object, which can subsequently be attache...
odp_tm_wred_t odp_tm_wred_create(const char *name, const odp_tm_wred_params_t *params)
odp_tm_wred_create() creates a WRED (Weighted Random Early Detection) profile object,...
odp_tm_handle_t odp_tm_t
Each odp_tm_t value represents a specific TM system.
odp_tm_t odp_tm_create(const char *name, odp_tm_requirements_t *requirements, odp_tm_egress_t *egress)
Create/instantiate a TM Packet Scheduling system.
odp_tm_handle_t odp_tm_wred_t
Each odp_tm_wred_t value is an opaque ODP handle representing a specific WRED profile usable across a...
void odp_tm_egress_init(odp_tm_egress_t *egress)
Initialize Egress record.
void odp_tm_node_params_init(odp_tm_node_params_t *params)
Initialize TM node parameters.
odp_tm_handle_t odp_tm_node_t
Each odp_tm_node_t value is an opaque ODP handle representing a specific tm_node within a specific TM...
int odp_tm_node_disconnect(odp_tm_node_t src_tm_node)
Disconnect a tm_node to tm_node linkage.
void odp_tm_queue_params_init(odp_tm_queue_params_t *params)
Initialize TM queue parameters.
int odp_tm_enq(odp_tm_queue_t tm_queue, odp_packet_t pkt)
Send packet to TM system.
#define ODP_TM_NAME_LEN
Maximum traffic manager name length, including the null character.
int odp_tm_queue_connect(odp_tm_queue_t tm_queue, odp_tm_node_t dst_tm_node)
The odp_tm_queue_connect() function connects the indicated tm_queue to a parent tm_node or to the egr...
odp_tm_queue_t odp_tm_queue_create(odp_tm_t tm, const odp_tm_queue_params_t *params)
TM queue create.
odp_tm_handle_t odp_tm_queue_t
Each odp_tm_queue_t value is an opaque ODP handle representing a specific tm_queue within a specific ...
void odp_tm_shaper_params_init(odp_tm_shaper_params_t *params)
Initialize TM shaper parameters.
odp_tm_shaper_t odp_tm_shaper_create(const char *name, const odp_tm_shaper_params_t *params)
odp_tm_shaper_create() creates a shaper profile object, which can subsequently be attached to any num...
int odp_tm_start(odp_tm_t tm)
Start a TM system.
int odp_tm_node_connect(odp_tm_node_t src_tm_node, odp_tm_node_t dst_tm_node)
Connects two tm_nodes.
#define ODP_TM_INVALID
The ODP_TM_INVALID constant can be used with any ODP TM handle type and indicates that this value doe...
odp_tm_handle_t odp_tm_threshold_t
Each odp_tm_threshold_t value is an opaque ODP handle representing a specific queue threshold profile...
void odp_tm_threshold_params_init(odp_tm_threshold_params_t *params)
Initialize TM threshold parameters.
int odp_tm_queue_destroy(odp_tm_queue_t tm_queue)
Destroy an tm_queue object.
int odp_tm_capability(odp_tm_t tm, odp_tm_capabilities_t *capabilities)
Query Specific TM Capabilities.
int odp_tm_queue_disconnect(odp_tm_queue_t tm_queue)
Disconnect a tm_queue from a tm_system.
void odp_tm_stats_print(odp_tm_t tm)
The odp_tm_stats_print function is used to write implementation-defined information about the specifi...
void odp_tm_wred_params_init(odp_tm_wred_params_t *params)
Initialize TM WRED parameters.
int odp_tm_queue_info(odp_tm_queue_t tm_queue, odp_tm_queue_info_t *info)
Get tm_queue Info.
int odp_tm_stop(odp_tm_t tm)
Stop a TM system.
#define ODP_TM_ROOT
Constant that is used to refer to the egress/root node of the TM subsystem's tree/hierarchy of nodes.
void odp_tm_requirements_init(odp_tm_requirements_t *requirements)
Initialize Requirements record fields to their default values.
odp_tm_handle_t odp_tm_shaper_t
Each odp_tm_shaper_t value is an opaque ODP handle representing a specific shaper profile usable acro...
The OpenDataPlane API.
Global initialization parameters.
odp_log_func_t log_fn
Replacement for the default log fn.
Pool parameters.
uint32_t uarea_size
Minimum user area size in bytes.
uint32_t num
Number of buffers in the pool.
odp_pool_type_t type
Pool type.
struct odp_pool_param_t::@123 pkt
Parameters for packet pools.
uint32_t len
Minimum length of 'num' packets.
uint32_t seg_len
Minimum number of packet data bytes that can be stored in the first segment of a newly allocated pack...
TM Capabilities Record.
odp_tm_level_capabilities_t per_level[ODP_TM_MAX_LEVELS]
The per_level array specifies the TM system capabilities that can vary based upon the tm_node level.
uint8_t max_levels
max_levels specifies that maximum number of levels of hierarchical scheduling allowed by this TM Syst...
The odp_tm_egress_t type is used to describe that type of "egress spigot" associated with this TM sys...
odp_tm_egress_kind_t egress_kind
Union discriminator.
odp_tm_egress_fcn_t egress_fcn
Output to user func.
uint64_t max_rate
Maximum allowed value for odp_tm_shaper_params_t::commit_rate and odp_tm_shaper_params_t::peak_rate w...
uint64_t min_rate
Minimum allowed value for odp_tm_shaper_params_t::commit_rate and odp_tm_shaper_params_t::peak_rate w...
uint32_t max_burst
Maximum allowed value for odp_tm_shaper_params_t::commit_burst and odp_tm_shaper_params_t::peak_burst...
uint32_t min_burst
Minimum allowed value for odp_tm_shaper_params_t::commit_burst and odp_tm_shaper_params_t::peak_burst...
odp_bool_t tm_node_dual_slope_needed
tm_node_dual_slope_needed indicates that the tm_nodes at this level are expected to use the dual slop...
odp_bool_t tm_node_wred_needed
tm_node_wred_needed indicates that the tm_nodes at this level are expected to participate in some for...
uint32_t max_num_tm_nodes
max_num_tm_nodes specifies the maximum number of tm_nodes required at this level.
uint32_t max_weight
max_weight only has significance when the weights_supported field below is true, in which case it spe...
odp_bool_t weights_needed
weights_needed indicates that the tm_node schedulers at this level are expected have different weight...
uint32_t max_fanin_per_node
max_fanin_per_level specifies the maximum number of fan_in links to any given scheduler (whether weig...
odp_bool_t tm_node_shaper_needed
tm_node_shaper_needed indicates that the tm_nodes at this level are expected to do TM shaping,
uint8_t max_priority
max_priority specifies the maximum number of strict priority levels that will be used by any tm_node ...
uint32_t min_weight
min_weight only has significance when the weights_supported field below is true, in which case it spe...
odp_bool_t fair_queuing_needed
fair_queuing_needed indicates that the tm_node schedulers at this level are expected to implement WFQ...
odp_tm_shaper_t shaper_profile
The shaper profile to be associated with this tm_node.
uint32_t max_fanin
The max_fanin sets the maximum number of src tm_queues and producer tm_nodes that can be simultaneous...
odp_tm_threshold_t threshold_profile
The threshold profile to be used in setting the max queue fullness for WRED and/or tail drop.
uint8_t level
The level (or tm_node stage) sets the level for this tm_node It must be in range 0....
odp_tm_wred_t wred_profile[ODP_NUM_PACKET_COLORS]
The WRED profile(s) to be associated with this tm_node.
The odp_tm_queue_info_t record type is used to return various bits of information about a given tm_qu...
odp_tm_node_t next_tm_node
The next_tm_node is the "next" node in the tree - i.e.
odp_tm_threshold_t threshold_profile
The threshold profile to be used in setting the max queue fullness for WRED and/or tail drop.
uint8_t priority
The strict priority level assigned to packets in this tm_queue - in other words all packets associate...
odp_tm_wred_t wred_profile[ODP_NUM_PACKET_COLORS]
The WRED profile(s) to be associated with this tm_queue.
odp_tm_shaper_t shaper_profile
The shaper profile to be associated with this tm_queue.
TM Requirements Record.
uint32_t max_tm_queues
max_tm_queues specifies the maximum number of tm_queues that will be used for this TM System.
odp_bool_t tm_queue_wred_needed
tm_queue_wred_needed indicates that the tm_queues are expected to participate in some form of Random ...
uint8_t num_levels
num_levels specifies that number of levels of hierarchical scheduling that will be used.
odp_tm_level_requirements_t per_level[ODP_TM_MAX_LEVELS]
The per_level array specifies the TM system requirements that can vary based upon the tm_node level.
odp_bool_t tm_queue_shaper_needed
tm_queue_shaper_needed indicates that the tm_queues are expected to do TM shaping.
uint32_t commit_burst
The commit burst tolerance for this shaper profile.
odp_bool_t dual_rate
If dual_rate is TRUE it indicates the desire for the implementation to use dual rate shaping for pack...
int8_t shaper_len_adjust
The shaper_len_adjust is a value between -128 and 127 which is directly added to the frame_len of a p...
uint64_t commit_rate
The committed information rate for this shaper profile.
uint32_t peak_burst
The peak burst tolerance for this shaper profile.
uint64_t peak_rate
The peak information rate for this shaper profile.
TM threshold parameters.
uint64_t max_bytes
max byte cnt for this threshold profile
odp_bool_t enable_max_bytes
TRUE if max_bytes is valid.
odp_bool_t enable_max_pkts
TRUE if max_pkts is valid.
uint64_t max_pkts
max pkt cnt for this threshold profile
odp_tm_percent_t med_drop_prob
The med_drop_prob is only used when dual-slope WRED is being used, in which case med_drop_prob MUST b...
odp_bool_t use_byte_fullness
When use_byte_fullness is true then WRED will use queue memory usage as the fullness criterion,...
odp_tm_percent_t med_threshold
The meaning of med_threshold depends upon whether single-slope or dual-slope WRED is being used or no...
odp_bool_t enable_wred
When enable_wred is false, all tm_queues and tm_nodes that are attached to this profile will not take...
odp_tm_percent_t max_drop_prob
The max_drop_prob equals the drop probability when the queue fullness almost equals 100%.
odp_tm_percent_t min_threshold
When min_threshold is set to zero then single-slope WRED is enabled, as described in the description ...