43 #include <sys/queue.h> 58 #define RTE_LPM_NAMESIZE 32 61 #define RTE_LPM_MAX_DEPTH 32 64 #define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24) 67 #define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256 70 #define RTE_LPM_TBL8_NUM_GROUPS 256 73 #define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \ 74 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) 77 #if defined(RTE_LIBRTE_LPM_DEBUG) 78 #define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \ 79 if (cond) return (retval); \ 82 #define RTE_LPM_RETURN_IF_TRUE(cond, retval) 86 #define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x0300 89 #define RTE_LPM_LOOKUP_SUCCESS 0x0100 91 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 93 struct rte_lpm_tbl24_entry {
101 uint8_t ext_entry :1;
106 struct rte_lpm_tbl8_entry {
110 uint8_t valid_group :1;
114 struct rte_lpm_tbl24_entry {
116 uint8_t ext_entry :1;
124 struct rte_lpm_tbl8_entry {
126 uint8_t valid_group :1;
133 struct rte_lpm_rule {
139 struct rte_lpm_rule_info {
149 struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH];
152 struct rte_lpm_tbl24_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] \
154 struct rte_lpm_tbl8_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES] \
156 struct rte_lpm_rule rules_tbl[0] \
182 rte_lpm_create(
const char *name,
int socket_id,
int max_rules,
int flags);
223 rte_lpm_add(
struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop);
283 unsigned tbl24_index = (ip >> 8);
287 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
290 memcpy(&tbl_entry, &lpm->tbl24[tbl24_index],
sizeof(uint16_t));
293 if (
unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
294 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
296 unsigned tbl8_index = (uint8_t)ip +
297 ((uint8_t)tbl_entry * RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
299 memcpy(&tbl_entry, &lpm->tbl8[tbl8_index],
sizeof(uint16_t));
302 *next_hop = (uint8_t)tbl_entry;
326 #define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \ 327 rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n) 330 rte_lpm_lookup_bulk_func(
const struct rte_lpm *lpm,
const uint32_t * ips,
331 uint16_t * next_hops,
const unsigned n)
334 unsigned tbl24_indexes[n];
337 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
338 (next_hops == NULL)), -EINVAL);
340 for (i = 0; i < n; i++) {
341 tbl24_indexes[i] = ips[i] >> 8;
344 for (i = 0; i < n; i++) {
346 memcpy(&next_hops[i], &lpm->tbl24[tbl24_indexes[i]],
sizeof(uint16_t));
349 if (
unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
350 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
352 unsigned tbl8_index = (uint8_t)ips[i] +
353 ((uint8_t)next_hops[i] *
354 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
356 memcpy(&next_hops[i], &lpm->tbl8[tbl8_index],
sizeof(uint16_t));
363 #define RTE_LPM_MASKX4_RES UINT64_C(0x00ff00ff00ff00ff) 393 const __m128i mask8 =
394 _mm_set_epi32(UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX);
400 const uint64_t mask_xv =
401 ((uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK |
402 (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 16 |
403 (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32 |
404 (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 48);
410 const uint64_t mask_v =
417 i24 = _mm_srli_epi32(ip, CHAR_BIT);
420 idx = _mm_cvtsi128_si64(i24);
421 i24 = _mm_srli_si128(i24,
sizeof(uint64_t));
423 memcpy(&tbl[0], &lpm->tbl24[(uint32_t)idx],
sizeof(uint16_t));
424 memcpy(&tbl[1], &lpm->tbl24[idx >> 32],
sizeof(uint16_t));
426 idx = _mm_cvtsi128_si64(i24);
428 memcpy(&tbl[2], &lpm->tbl24[(uint32_t)idx],
sizeof(uint16_t));
429 memcpy(&tbl[3], &lpm->tbl24[idx >> 32],
sizeof(uint16_t));
432 i8.x = _mm_and_si128(ip, mask8);
434 pt = (uint64_t)tbl[0] |
435 (uint64_t)tbl[1] << 16 |
436 (uint64_t)tbl[2] << 32 |
437 (uint64_t)tbl[3] << 48;
440 if (
likely((pt & mask_xv) == mask_v)) {
441 uintptr_t ph = (uintptr_t)hop;
442 *(uint64_t *)ph = pt & RTE_LPM_MASKX4_RES;
446 if (
unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
447 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
448 i8.u32[0] = i8.u32[0] +
449 (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
450 memcpy(&tbl[0], &lpm->tbl8[i8.u32[0]],
sizeof(uint16_t));
452 if (
unlikely((pt >> 16 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
453 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
454 i8.u32[1] = i8.u32[1] +
455 (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
456 memcpy(&tbl[1], &lpm->tbl8[i8.u32[1]],
sizeof(uint16_t));
458 if (
unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
459 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
460 i8.u32[2] = i8.u32[2] +
461 (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
462 memcpy(&tbl[2], &lpm->tbl8[i8.u32[2]],
sizeof(uint16_t));
464 if (
unlikely((pt >> 48 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
465 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
466 i8.u32[3] = i8.u32[3] +
467 (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
468 memcpy(&tbl[3], &lpm->tbl8[i8.u32[3]],
sizeof(uint16_t));
void rte_lpm_free(struct rte_lpm *lpm)
void rte_lpm_delete_all(struct rte_lpm *lpm)
int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop)
static int rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)
static void rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint16_t hop[4], uint16_t defv)
int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t *next_hop)
int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
#define RTE_LPM_LOOKUP_SUCCESS
struct rte_lpm * rte_lpm_create(const char *name, int socket_id, int max_rules, int flags)
struct rte_lpm * rte_lpm_find_existing(const char *name)