34 #ifndef _RTE_IP_FRAG_H_ 35 #define _RTE_IP_FRAG_H_ 85 TAILQ_ENTRY(ip_frag_pkt) lru;
86 struct ip_frag_key key;
91 struct ip_frag frags[IP_MAX_FRAG_NUM];
94 #define IP_FRAG_DEATH_ROW_LEN 32 97 struct rte_ip_frag_death_row { 125 struct ip_pkt_list lru;
127 struct ip_frag_pkt pkt[0];
131 #define RTE_IPV6_EHDR_MF_SHIFT 0 132 #define RTE_IPV6_EHDR_MF_MASK 1 133 #define RTE_IPV6_EHDR_FO_SHIFT 3 134 #define RTE_IPV6_EHDR_FO_MASK (~((1 << RTE_IPV6_EHDR_FO_SHIFT) - 1)) 136 #define RTE_IPV6_FRAG_USED_MASK \ 137 (RTE_IPV6_EHDR_MF_MASK | RTE_IPV6_EHDR_FO_MASK) 139 #define RTE_IPV6_GET_MF(x) ((x) & RTE_IPV6_EHDR_MF_MASK) 140 #define RTE_IPV6_GET_FO(x) ((x) >> RTE_IPV6_EHDR_FO_SHIFT) 142 #define RTE_IPV6_SET_FRAG_DATA(fo, mf) \ 143 (((fo) & RTE_IPV6_EHDR_FO_MASK) | ((mf) & RTE_IPV6_EHDR_MF_MASK)) 145 struct ipv6_extension_fragment {
150 } __attribute__((__packed__));
213 uint16_t nb_pkts_out,
242 struct ipv6_extension_fragment *frag_hdr);
255 static inline struct ipv6_extension_fragment *
256 rte_ipv6_frag_get_ipv6_fragment_header(
struct ipv6_hdr *hdr)
258 if (hdr->
proto == IPPROTO_FRAGMENT) {
259 return (
struct ipv6_extension_fragment *) ++hdr;
290 uint16_t nb_pkts_out, uint16_t mtu_size,
326 rte_ipv4_frag_pkt_is_fragmented(
const struct ipv4_hdr * hdr) {
327 uint16_t flag_offset, ip_flag, ip_ofs;
330 ip_ofs = (uint16_t)(flag_offset & IPV4_HDR_OFFSET_MASK);
331 ip_flag = (uint16_t)(flag_offset & IPV4_HDR_MF_FLAG);
333 return ip_flag != 0 || ip_ofs != 0;
357 rte_ip_frag_table_statistics_dump(FILE * f,
const struct rte_ip_frag_tbl *tbl);
static uint16_t rte_be_to_cpu_16(uint16_t x)
int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in, struct rte_mbuf **pkts_out, uint16_t nb_pkts_out, uint16_t mtu_size, struct rte_mempool *pool_direct, struct rte_mempool *pool_indirect)
TAILQ_HEAD(rte_driver_list, rte_driver)
#define IP_FRAG_DEATH_ROW_LEN
struct ip_frag_pkt * last
int32_t rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in, struct rte_mbuf **pkts_out, uint16_t nb_pkts_out, uint16_t mtu_size, struct rte_mempool *pool_direct, struct rte_mempool *pool_indirect)
#define __rte_cache_aligned