15 #include <odp/helper/odph_api.h>
17 #include <odp_l3fwd_db.h>
37 #define JHASH_GOLDEN_RATIO 0x9e3779b9
38 #define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
39 #define FWD_BJ3_MIX(a, b, c) \
41 a -= c; a ^= rot(c, 4); c += b; \
42 b -= a; b ^= rot(a, 6); a += c; \
43 c -= b; c ^= rot(b, 8); b += a; \
44 a -= c; a ^= rot(c, 16); c += b; \
45 b -= a; b ^= rot(a, 19); a += c; \
46 c -= b; c ^= rot(b, 4); b += a; \
53 uint64_t l3fwd_calc_hash(ipv4_tuple5_t *key)
55 uint64_t l4_ports = 0;
56 uint32_t dst_ip, src_ip;
59 dst_ip = key->dst_ip + JHASH_GOLDEN_RATIO;
60 FWD_BJ3_MIX(src_ip, dst_ip, l4_ports);
77 int parse_ipv4_string(
char *ipaddress, uint32_t *addr, uint32_t *depth)
84 if (strchr(ipaddress,
'/')) {
85 converted = sscanf(ipaddress,
"%d.%d.%d.%d/%d",
86 &b[3], &b[2], &b[1], &b[0],
91 converted = sscanf(ipaddress,
"%d.%d.%d.%d",
92 &b[3], &b[2], &b[1], &b[0]);
97 if ((b[0] > 255) || (b[1] > 255) || (b[2] > 255) || (b[3] > 255))
99 if (!qualifier || (qualifier > 32))
102 addr_le = b[0] | b[1] << 8 | b[2] << 16 | b[3] << 24;
119 char *ipv4_subnet_str(
char *b, ip_addr_range_t *range)
121 sprintf(b,
"%d.%d.%d.%d/%d",
122 0xFF & ((range->addr) >> 24),
123 0xFF & ((range->addr) >> 16),
124 0xFF & ((range->addr) >> 8),
125 0xFF & ((range->addr) >> 0),
139 char *mac_addr_str(
char *b, odph_ethaddr_t *mac)
144 sprintf(b,
"%02X:%02X:%02X:%02X:%02X:%02X",
145 byte[0],
byte[1],
byte[2],
byte[3],
byte[4],
byte[5]);
152 typedef struct flow_entry_s {
154 struct flow_entry_s *next;
155 fwd_db_entry_t *fwd_entry;
161 typedef struct flow_bucket_s {
169 typedef struct flow_table_s {
172 flow_bucket_t *bucket;
178 static flow_table_t fwd_lookup_cache;
180 static void create_fwd_hash_cache(
void)
183 flow_bucket_t *bucket = NULL;
185 uint32_t bucket_count, flow_count, size;
188 flow_count = FWD_MAX_FLOW_COUNT;
189 bucket_count = flow_count / FWD_DEF_BUCKET_ENTRIES;
192 size =
sizeof(flow_bucket_t) * bucket_count +
193 sizeof(flow_entry_t) * flow_count;
194 hash_shm =
odp_shm_reserve(
"flow_table", size, ODP_CACHE_LINE_SIZE, 0);
201 bucket_count = flow_count / FWD_DEF_BUCKET_ENTRIES;
202 size =
sizeof(flow_bucket_t) * bucket_count +
203 sizeof(flow_entry_t) * flow_count;
205 ODP_CACHE_LINE_SIZE, 0);
207 ODPH_ERR(
"Error: shared mem reserve failed.\n");
213 ODPH_ERR(
"Error: shared mem alloc failed.\n");
218 size =
sizeof(flow_bucket_t) * bucket_count;
219 flows = (flow_entry_t *)(
void *)((
char *)bucket + size);
221 fwd_lookup_cache.bucket = bucket;
222 fwd_lookup_cache.bkt_cnt = bucket_count;
223 fwd_lookup_cache.flows = flows;
224 fwd_lookup_cache.flow_cnt = flow_count;
227 for (i = 0; i < bucket_count; i++) {
228 bucket = &fwd_lookup_cache.bucket[i];
233 memset(flows, 0,
sizeof(flow_entry_t) * flow_count);
235 fwd_lookup_cache.next_flow = 0;
238 static inline flow_entry_t *get_new_flow(
void)
241 flow_entry_t *flow = NULL;
244 next = fwd_lookup_cache.next_flow;
245 if (next < fwd_lookup_cache.flow_cnt) {
246 flow = &fwd_lookup_cache.flows[next];
247 fwd_lookup_cache.next_flow++;
255 int match_key_flow(ipv4_tuple5_t *key, flow_entry_t *flow)
257 if (key->hi64 == flow->key.hi64 && key->lo64 == flow->key.lo64)
264 flow_entry_t *lookup_fwd_cache(ipv4_tuple5_t *key, flow_bucket_t *bucket)
269 for (rst = bucket->next; rst != NULL; rst = rst->next) {
270 if (match_key_flow(key, rst))
279 flow_entry_t *insert_fwd_cache(ipv4_tuple5_t *key,
280 flow_bucket_t *bucket,
281 fwd_db_entry_t *entry)
288 flow = get_new_flow();
293 flow->fwd_entry = entry;
297 flow->next = bucket->next;
304 void init_fwd_hash_cache(
void)
306 fwd_db_entry_t *entry;
308 flow_bucket_t *bucket;
310 uint32_t i, nb_hosts;
313 create_fwd_hash_cache();
319 memset(&key, 0,
sizeof(key));
320 for (entry = fwd_db->list; NULL != entry; entry = entry->next) {
321 nb_hosts = 1 << (32 - entry->subnet.depth);
322 for (i = 0; i < nb_hosts; i++) {
323 key.dst_ip = entry->subnet.addr + i;
324 hash = l3fwd_calc_hash(&key);
325 hash &= fwd_lookup_cache.bkt_cnt - 1;
326 bucket = &fwd_lookup_cache.bucket[hash];
327 flow = lookup_fwd_cache(&key, bucket);
331 flow = insert_fwd_cache(&key, bucket, entry);
343 void init_fwd_db(
void)
353 ODPH_ERR(
"Error: shared mem reserve failed.\n");
359 if (fwd_db == NULL) {
360 ODPH_ERR(
"Error: shared mem alloc failed.\n");
363 memset(fwd_db, 0,
sizeof(*fwd_db));
366 int create_fwd_db_entry(
char *input,
char **oif, uint8_t **dst_mac)
373 fwd_db_entry_t *entry = &fwd_db->array[fwd_db->index];
379 if (MAX_DB <= fwd_db->index)
383 local = malloc(strlen(input) + 1);
386 strcpy(local, input);
393 while (NULL != (token = strtok_r(str,
",", &save))) {
399 parse_ipv4_string(token,
401 &entry->subnet.depth);
404 odph_strcpy(entry->oif, token, OIF_LEN);
408 if (odph_eth_addr_parse(&entry->dst_mac, token) < 0) {
412 *dst_mac = entry->dst_mac.addr;
416 printf(
"ERROR: extra token \"%s\" at position %d\n",
427 entry->next = fwd_db->list;
428 fwd_db->list = entry;
434 void resolve_fwd_db(
char *intf,
int portid, uint8_t *mac)
436 fwd_db_entry_t *entry;
439 for (entry = fwd_db->list; NULL != entry; entry = entry->next) {
440 if (strcmp(intf, entry->oif))
443 entry->oif_id = portid;
444 memcpy(entry->src_mac.addr, mac, ODPH_ETHADDR_LEN);
448 void dump_fwd_db_entry(fwd_db_entry_t *entry)
450 char subnet_str[MAX_STRING];
451 char mac_str[MAX_STRING];
453 mac_addr_str(mac_str, &entry->dst_mac);
454 printf(
"%-32s%-32s%-16s\n",
455 ipv4_subnet_str(subnet_str, &entry->subnet),
456 entry->oif, mac_str);
459 void dump_fwd_db(
void)
461 fwd_db_entry_t *entry;
463 printf(
"Routing table\n"
464 "-----------------\n"
466 "subnet",
"next_hop",
"dest_mac");
468 for (entry = fwd_db->list; NULL != entry; entry = entry->next)
469 dump_fwd_db_entry(entry);
474 fwd_db_entry_t *find_fwd_db_entry(ipv4_tuple5_t *key)
476 fwd_db_entry_t *entry;
478 flow_bucket_t *bucket;
480 ipv4_tuple5_t newkey;
484 newkey.dst_ip = key->dst_ip;
488 hash = l3fwd_calc_hash(key);
489 hash &= fwd_lookup_cache.bkt_cnt - 1;
490 bucket = &fwd_lookup_cache.bucket[hash];
491 flow = lookup_fwd_cache(key, bucket);
493 return flow->fwd_entry;
495 for (entry = fwd_db->list; NULL != entry; entry = entry->next) {
498 mask = ((1u << entry->subnet.depth) - 1) <<
499 (32 - entry->subnet.depth);
501 if (entry->subnet.addr == (key->dst_ip & mask))
505 insert_fwd_cache(key, bucket, entry);
uint32_t odp_le_to_cpu_32(odp_u32le_t le32)
Convert 32bit little endian to cpu native uint32_t.
void odp_rwlock_read_lock(odp_rwlock_t *rwlock)
Acquire read permission on a reader/writer lock.
void odp_rwlock_read_unlock(odp_rwlock_t *rwlock)
Release read permission on a reader/writer lock.
void odp_rwlock_write_unlock(odp_rwlock_t *rwlock)
Release write permission on a reader/writer lock.
void odp_rwlock_write_lock(odp_rwlock_t *rwlock)
Acquire write permission on a reader/writer lock.
void odp_rwlock_init(odp_rwlock_t *rwlock)
Initialize a reader/writer lock.
#define ODP_SHM_INVALID
Invalid shared memory block.
void * odp_shm_addr(odp_shm_t shm)
Shared memory block address.
odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align, uint32_t flags)
Reserve a contiguous block of shared memory.