@@ -77,27 +77,38 @@ fib_parse_packet(struct rte_mbuf *mbuf,
*/
#if !defined FIB_SEND_MULTI
static inline void
-fib_send_single(int nb_tx, struct lcore_conf *qconf,
- struct rte_mbuf **pkts_burst, uint16_t hops[nb_tx])
+process_packet(struct rte_mbuf *pkt, uint16_t *hop)
{
- int32_t j;
struct rte_ether_hdr *eth_hdr;
- for (j = 0; j < nb_tx; j++) {
- /* Run rfc1812 if packet is ipv4 and checks enabled. */
+ /* Run rfc1812 if packet is ipv4 and checks enabled. */
#if defined DO_RFC_1812_CHECKS
- rfc1812_process((struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(
- pkts_burst[j], struct rte_ether_hdr *) + 1),
- &hops[j], pkts_burst[j]->packet_type);
+ rfc1812_process(
+ (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(
+ pkt, struct rte_ether_hdr *) +
+ 1),
+ hop, pkt->packet_type,
+ pkt->ol_flags & RTE_MBUF_F_RX_IP_CKSUM_MASK);
#endif
- /* Set MAC addresses. */
- eth_hdr = rte_pktmbuf_mtod(pkts_burst[j],
- struct rte_ether_hdr *);
- *(uint64_t *)ð_hdr->dst_addr = dest_eth_addr[hops[j]];
- rte_ether_addr_copy(&ports_eth_addr[hops[j]],
- ð_hdr->src_addr);
+ /* Set MAC addresses. */
+ eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
+ *(uint64_t *)ð_hdr->dst_addr = dest_eth_addr[*hop];
+ rte_ether_addr_copy(&ports_eth_addr[*hop], ð_hdr->src_addr);
+}
+
+static inline void
+fib_send_single(int nb_tx, struct lcore_conf *qconf,
+ struct rte_mbuf **pkts_burst, uint16_t hops[nb_tx])
+{
+ int32_t j;
+ for (j = 0; j < nb_tx; j++) {
+ process_packet(pkts_burst[j], &hops[j]);
+ if (hops[j] == BAD_PORT) {
+ rte_pktmbuf_free(pkts_burst[j]);
+ continue;
+ }
/* Send single packet. */
send_single_packet(qconf, pkts_burst[j], hops[j]);
}
@@ -261,7 +272,7 @@ fib_event_loop(struct l3fwd_event_resources *evt_rsrc,
uint32_t ipv4_arr[MAX_PKT_BURST];
uint8_t ipv6_arr[MAX_PKT_BURST][RTE_FIB6_IPV6_ADDR_SIZE];
uint64_t hopsv4[MAX_PKT_BURST], hopsv6[MAX_PKT_BURST];
- uint16_t nh;
+ uint16_t nh, hops[MAX_PKT_BURST];
uint8_t type_arr[MAX_PKT_BURST];
uint32_t ipv4_cnt, ipv6_cnt;
uint32_t ipv4_arr_assem, ipv6_arr_assem;
@@ -350,7 +361,13 @@ fib_event_loop(struct l3fwd_event_resources *evt_rsrc,
else
nh = (uint16_t)hopsv6[ipv6_arr_assem++];
if (nh != FIB_DEFAULT_HOP)
- events[i].mbuf->port = nh;
+ hops[i] = nh != FIB_DEFAULT_HOP ?
+ nh :
+ events[i].mbuf->port;
+ process_packet(events[i].mbuf, &hops[i]);
+ events[i].mbuf->port = hops[i] != BAD_PORT ?
+ hops[i] :
+ events[i].mbuf->port;
}
if (flags & L3FWD_EVENT_TX_ENQ) {
@@ -418,14 +435,12 @@ fib_event_main_loop_tx_q_burst(__rte_unused void *dummy)
}
static __rte_always_inline void
-fib_process_event_vector(struct rte_event_vector *vec)
+fib_process_event_vector(struct rte_event_vector *vec, uint8_t *type_arr,
+ uint8_t **ipv6_arr, uint64_t *hopsv4, uint64_t *hopsv6,
+ uint32_t *ipv4_arr, uint16_t *hops)
{
- uint8_t ipv6_arr[MAX_PKT_BURST][RTE_FIB6_IPV6_ADDR_SIZE];
- uint64_t hopsv4[MAX_PKT_BURST], hopsv6[MAX_PKT_BURST];
uint32_t ipv4_arr_assem, ipv6_arr_assem;
struct rte_mbuf **mbufs = vec->mbufs;
- uint32_t ipv4_arr[MAX_PKT_BURST];
- uint8_t type_arr[MAX_PKT_BURST];
uint32_t ipv4_cnt, ipv6_cnt;
struct lcore_conf *lconf;
uint16_t nh;
@@ -463,16 +478,10 @@ fib_process_event_vector(struct rte_event_vector *vec)
/* Lookup IPv6 hops if IPv6 packets are present. */
if (ipv6_cnt > 0)
- rte_fib6_lookup_bulk(lconf->ipv6_lookup_struct, ipv6_arr,
- hopsv6, ipv6_cnt);
-
- if (vec->attr_valid) {
- nh = type_arr[0] ? (uint16_t)hopsv4[0] : (uint16_t)hopsv6[0];
- if (nh != FIB_DEFAULT_HOP)
- vec->port = nh;
- else
- vec->attr_valid = 0;
- }
+ rte_fib6_lookup_bulk(
+ lconf->ipv6_lookup_struct,
+ (uint8_t(*)[RTE_FIB6_IPV6_ADDR_SIZE])ipv6_arr, hopsv6,
+ ipv6_cnt);
/* Assign ports looked up in fib depending on IPv4 or IPv6 */
for (i = 0; i < vec->nb_elem; i++) {
@@ -481,9 +490,26 @@ fib_process_event_vector(struct rte_event_vector *vec)
else
nh = (uint16_t)hopsv6[ipv6_arr_assem++];
if (nh != FIB_DEFAULT_HOP)
- mbufs[i]->port = nh;
- event_vector_attr_validate(vec, mbufs[i]);
+ hops[i] = nh;
+ else
+ hops[i] = vec->attr_valid ? vec->port :
+ vec->mbufs[i]->port;
}
+
+#if defined FIB_SEND_MULTI
+ uint16_t k;
+ k = RTE_ALIGN_FLOOR(vec->nb_elem, FWDSTEP);
+
+ for (i = 0; i != k; i += FWDSTEP)
+ processx4_step3(&vec->mbufs[i], &hops[i]);
+ for (; i < vec->nb_elem; i++)
+ process_packet(vec->mbufs[i], &hops[i]);
+#else
+ for (i = 0; i < vec->nb_elem; i++)
+ process_packet(vec->mbufs[i], &hops[i]);
+#endif
+
+ process_event_vector(vec, hops);
}
static __rte_always_inline void
@@ -496,7 +522,32 @@ fib_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
const uint8_t event_d_id = evt_rsrc->event_d_id;
const uint16_t deq_len = evt_rsrc->deq_depth;
struct rte_event events[MAX_PKT_BURST];
+ uint8_t *type_arr, **ipv6_arr, *ptr;
int nb_enq = 0, nb_deq = 0, i;
+ uint64_t *hopsv4, *hopsv6;
+ uint32_t *ipv4_arr;
+ uint16_t *hops;
+ uintptr_t mem;
+
+ mem = (uintptr_t)rte_zmalloc(
+ "vector_fib",
+ (sizeof(uint32_t) + sizeof(uint8_t) + sizeof(uint64_t) +
+ sizeof(uint64_t) + sizeof(uint16_t) + sizeof(uint8_t *) +
+ (sizeof(uint8_t) * RTE_FIB6_IPV6_ADDR_SIZE)) *
+ evt_rsrc->vector_size,
+ RTE_CACHE_LINE_SIZE);
+ if (mem == 0)
+ return;
+ ipv4_arr = (uint32_t *)mem;
+ type_arr = (uint8_t *)&ipv4_arr[evt_rsrc->vector_size];
+ hopsv4 = (uint64_t *)&type_arr[evt_rsrc->vector_size];
+ hopsv6 = (uint64_t *)&hopsv4[evt_rsrc->vector_size];
+ hops = (uint16_t *)&hopsv6[evt_rsrc->vector_size];
+ ipv6_arr = (uint8_t **)&hops[evt_rsrc->vector_size];
+
+ ptr = (uint8_t *)&ipv6_arr[evt_rsrc->vector_size];
+ for (i = 0; i < evt_rsrc->vector_size; i++)
+ ipv6_arr[i] = &ptr[RTE_FIB6_IPV6_ADDR_SIZE + i];
if (event_p_id < 0)
return;
@@ -519,10 +570,9 @@ fib_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
events[i].op = RTE_EVENT_OP_FORWARD;
}
- fib_process_event_vector(events[i].vec);
-
- if (flags & L3FWD_EVENT_TX_DIRECT)
- event_vector_txq_set(events[i].vec, 0);
+ fib_process_event_vector(events[i].vec, type_arr,
+ ipv6_arr, hopsv4, hopsv6,
+ ipv4_arr, hops);
}
if (flags & L3FWD_EVENT_TX_ENQ) {