[v5,21/24] net/ngbe: support full-featured Tx path
Checks
Commit Message
Add the full-featured transmit function, which supports checksum, TSO,
tunnel parse, etc.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
doc/guides/nics/features/ngbe.ini | 3 +
doc/guides/nics/ngbe.rst | 3 +-
drivers/net/ngbe/meson.build | 2 +
drivers/net/ngbe/ngbe_ethdev.c | 16 +-
drivers/net/ngbe/ngbe_ethdev.h | 3 +
drivers/net/ngbe/ngbe_rxtx.c | 639 ++++++++++++++++++++++++++++++
drivers/net/ngbe/ngbe_rxtx.h | 56 +++
7 files changed, 720 insertions(+), 2 deletions(-)
Comments
On 6/2/21 12:41 PM, Jiawen Wu wrote:
> Add the full-featured transmit function, which supports checksum, TSO,
> tunnel parse, etc.
The patch should adviertise corresponding offloads support in features,
in dev_info Tx offloads.
Tx offloads require Tx prepare implemenation. Can you really skip it?
> Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
> ---
> doc/guides/nics/features/ngbe.ini | 3 +
> doc/guides/nics/ngbe.rst | 3 +-
> drivers/net/ngbe/meson.build | 2 +
> drivers/net/ngbe/ngbe_ethdev.c | 16 +-
> drivers/net/ngbe/ngbe_ethdev.h | 3 +
> drivers/net/ngbe/ngbe_rxtx.c | 639 ++++++++++++++++++++++++++++++
> drivers/net/ngbe/ngbe_rxtx.h | 56 +++
> 7 files changed, 720 insertions(+), 2 deletions(-)
>
> diff --git a/doc/guides/nics/features/ngbe.ini b/doc/guides/nics/features/ngbe.ini
> index e24d8d0b55..443c6691a3 100644
> --- a/doc/guides/nics/features/ngbe.ini
> +++ b/doc/guides/nics/features/ngbe.ini
> @@ -9,10 +9,13 @@ Link status = Y
> Link status event = Y
> Jumbo frame = Y
> Scattered Rx = Y
> +TSO = Y
> CRC offload = P
> VLAN offload = P
> L3 checksum offload = P
> L4 checksum offload = P
> +Inner L3 checksum = P
> +Inner L4 checksum = P
> Packet type parsing = Y
> Multiprocess aware = Y
> Linux = Y
> diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
> index e999e0b580..cf3fafabd8 100644
> --- a/doc/guides/nics/ngbe.rst
> +++ b/doc/guides/nics/ngbe.rst
> @@ -12,9 +12,10 @@ Features
>
> - Packet type information
> - Checksum offload
> +- TSO offload
> - Jumbo frames
> - Link state information
> -- Scattered and gather for RX
> +- Scattered and gather for TX and RX
TX -> Tx
>
> Prerequisites
> -------------
> diff --git a/drivers/net/ngbe/meson.build b/drivers/net/ngbe/meson.build
> index fd571399b3..069e648a36 100644
> --- a/drivers/net/ngbe/meson.build
> +++ b/drivers/net/ngbe/meson.build
> @@ -16,5 +16,7 @@ sources = files(
> 'ngbe_rxtx.c',
> )
>
> +deps += ['security']
> +
> includes += include_directories('base')
>
> diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
> index 260bca0e4f..1a6419e5a4 100644
> --- a/drivers/net/ngbe/ngbe_ethdev.c
> +++ b/drivers/net/ngbe/ngbe_ethdev.c
> @@ -110,7 +110,7 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
>
> eth_dev->dev_ops = &ngbe_eth_dev_ops;
> eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
> - eth_dev->tx_pkt_burst = &ngbe_xmit_pkts_simple;
> + eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
>
> /*
> * For secondary processes, we don't initialise any further as primary
> @@ -118,6 +118,20 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
> * RX and TX function.
> */
> if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
> + struct ngbe_tx_queue *txq;
> + /* TX queue function in primary, set by last queue initialized
TX -> Tx
> + * Tx queue may not initialized by primary process
> + */
> + if (eth_dev->data->tx_queues) {
> + uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
> + txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
> + ngbe_set_tx_function(eth_dev, txq);
> + } else {
> + /* Use default TX function if we get here */
> + PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
> + "Using default TX function.");
> + }
> +
> ngbe_set_rx_function(eth_dev);
>
> return 0;
> diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
> index 1e21db5e25..035b1ad5c8 100644
> --- a/drivers/net/ngbe/ngbe_ethdev.h
> +++ b/drivers/net/ngbe/ngbe_ethdev.h
> @@ -86,6 +86,9 @@ uint16_t ngbe_recv_pkts_sc_single_alloc(void *rx_queue,
> uint16_t ngbe_recv_pkts_sc_bulk_alloc(void *rx_queue,
> struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
>
> +uint16_t ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
> + uint16_t nb_pkts);
> +
> uint16_t ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
> uint16_t nb_pkts);
>
> diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
> index f633718237..3f3f2cab06 100644
> --- a/drivers/net/ngbe/ngbe_rxtx.c
> +++ b/drivers/net/ngbe/ngbe_rxtx.c
> @@ -8,6 +8,7 @@
> #include <stdint.h>
> #include <rte_ethdev.h>
> #include <ethdev_driver.h>
> +#include <rte_security_driver.h>
> #include <rte_malloc.h>
>
> #include "ngbe_logs.h"
> @@ -15,6 +16,18 @@
> #include "ngbe_ethdev.h"
> #include "ngbe_rxtx.h"
>
> +/* Bit Mask to indicate what bits required for building TX context */
> +static const u64 NGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
> + PKT_TX_OUTER_IPV6 |
> + PKT_TX_OUTER_IPV4 |
> + PKT_TX_IPV6 |
> + PKT_TX_IPV4 |
> + PKT_TX_VLAN_PKT |
> + PKT_TX_L4_MASK |
> + PKT_TX_TCP_SEG |
> + PKT_TX_TUNNEL_MASK |
> + PKT_TX_OUTER_IP_CKSUM);
> +
Can we add offloads one by one in a separate patches?
It would simplify review a lot. Right now it is very
hard to understand if you lost something or not.
> /*
> * Prefetch a cache line into all cache levels.
> */
> @@ -248,10 +261,608 @@ ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
> return nb_tx;
> }
>
> +static inline void
> +ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq,
> + volatile struct ngbe_tx_ctx_desc *ctx_txd,
> + uint64_t ol_flags, union ngbe_tx_offload tx_offload,
> + __rte_unused uint64_t *mdata)
> +{
> + union ngbe_tx_offload tx_offload_mask;
> + uint32_t type_tucmd_mlhl;
> + uint32_t mss_l4len_idx;
> + uint32_t ctx_idx;
> + uint32_t vlan_macip_lens;
> + uint32_t tunnel_seed;
> +
> + ctx_idx = txq->ctx_curr;
> + tx_offload_mask.data[0] = 0;
> + tx_offload_mask.data[1] = 0;
> +
> + /* Specify which HW CTX to upload. */
> + mss_l4len_idx = NGBE_TXD_IDX(ctx_idx);
> + type_tucmd_mlhl = NGBE_TXD_CTXT;
> +
> + tx_offload_mask.ptid |= ~0;
> + type_tucmd_mlhl |= NGBE_TXD_PTID(tx_offload.ptid);
> +
> + /* check if TCP segmentation required for this packet */
> + if (ol_flags & PKT_TX_TCP_SEG) {
> + tx_offload_mask.l2_len |= ~0;
> + tx_offload_mask.l3_len |= ~0;
> + tx_offload_mask.l4_len |= ~0;
> + tx_offload_mask.tso_segsz |= ~0;
> + mss_l4len_idx |= NGBE_TXD_MSS(tx_offload.tso_segsz);
> + mss_l4len_idx |= NGBE_TXD_L4LEN(tx_offload.l4_len);
> + } else { /* no TSO, check if hardware checksum is needed */
> + if (ol_flags & PKT_TX_IP_CKSUM) {
> + tx_offload_mask.l2_len |= ~0;
> + tx_offload_mask.l3_len |= ~0;
> + }
> +
> + switch (ol_flags & PKT_TX_L4_MASK) {
> + case PKT_TX_UDP_CKSUM:
> + mss_l4len_idx |=
> + NGBE_TXD_L4LEN(sizeof(struct rte_udp_hdr));
> + tx_offload_mask.l2_len |= ~0;
> + tx_offload_mask.l3_len |= ~0;
> + break;
> + case PKT_TX_TCP_CKSUM:
> + mss_l4len_idx |=
> + NGBE_TXD_L4LEN(sizeof(struct rte_tcp_hdr));
> + tx_offload_mask.l2_len |= ~0;
> + tx_offload_mask.l3_len |= ~0;
> + break;
> + case PKT_TX_SCTP_CKSUM:
> + mss_l4len_idx |=
> + NGBE_TXD_L4LEN(sizeof(struct rte_sctp_hdr));
> + tx_offload_mask.l2_len |= ~0;
> + tx_offload_mask.l3_len |= ~0;
> + break;
> + default:
> + break;
> + }
> + }
> +
> + vlan_macip_lens = NGBE_TXD_IPLEN(tx_offload.l3_len >> 1);
> +
> + if (ol_flags & PKT_TX_TUNNEL_MASK) {
> + tx_offload_mask.outer_tun_len |= ~0;
> + tx_offload_mask.outer_l2_len |= ~0;
> + tx_offload_mask.outer_l3_len |= ~0;
> + tx_offload_mask.l2_len |= ~0;
> + tunnel_seed = NGBE_TXD_ETUNLEN(tx_offload.outer_tun_len >> 1);
> + tunnel_seed |= NGBE_TXD_EIPLEN(tx_offload.outer_l3_len >> 2);
> +
> + switch (ol_flags & PKT_TX_TUNNEL_MASK) {
> + case PKT_TX_TUNNEL_IPIP:
> + /* for non UDP / GRE tunneling, set to 0b */
> + break;
> + default:
> + PMD_TX_LOG(ERR, "Tunnel type not supported");
> + return;
> + }
> + vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.outer_l2_len);
> + } else {
> + tunnel_seed = 0;
> + vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len);
> + }
> +
> + if (ol_flags & PKT_TX_VLAN_PKT) {
> + tx_offload_mask.vlan_tci |= ~0;
> + vlan_macip_lens |= NGBE_TXD_VLAN(tx_offload.vlan_tci);
> + }
> +
> + txq->ctx_cache[ctx_idx].flags = ol_flags;
> + txq->ctx_cache[ctx_idx].tx_offload.data[0] =
> + tx_offload_mask.data[0] & tx_offload.data[0];
> + txq->ctx_cache[ctx_idx].tx_offload.data[1] =
> + tx_offload_mask.data[1] & tx_offload.data[1];
> + txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
> +
> + ctx_txd->dw0 = rte_cpu_to_le_32(vlan_macip_lens);
> + ctx_txd->dw1 = rte_cpu_to_le_32(tunnel_seed);
> + ctx_txd->dw2 = rte_cpu_to_le_32(type_tucmd_mlhl);
> + ctx_txd->dw3 = rte_cpu_to_le_32(mss_l4len_idx);
> +}
> +
> +/*
> + * Check which hardware context can be used. Use the existing match
> + * or create a new context descriptor.
> + */
> +static inline uint32_t
> +what_ctx_update(struct ngbe_tx_queue *txq, uint64_t flags,
> + union ngbe_tx_offload tx_offload)
> +{
> + /* If match with the current used context */
> + if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
> + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
> + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
> + & tx_offload.data[0])) &&
> + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
> + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
> + & tx_offload.data[1]))))
> + return txq->ctx_curr;
> +
> + /* What if match with the next context */
> + txq->ctx_curr ^= 1;
> + if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
> + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
> + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
> + & tx_offload.data[0])) &&
> + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
> + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
> + & tx_offload.data[1]))))
> + return txq->ctx_curr;
> +
> + /* Mismatch, use the previous context */
> + return NGBE_CTX_NUM;
> +}
> +
> +static inline uint32_t
> +tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
> +{
> + uint32_t tmp = 0;
> +
> + if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM) {
> + tmp |= NGBE_TXD_CC;
> + tmp |= NGBE_TXD_L4CS;
> + }
> + if (ol_flags & PKT_TX_IP_CKSUM) {
> + tmp |= NGBE_TXD_CC;
> + tmp |= NGBE_TXD_IPCS;
> + }
> + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
> + tmp |= NGBE_TXD_CC;
> + tmp |= NGBE_TXD_EIPCS;
> + }
> + if (ol_flags & PKT_TX_TCP_SEG) {
> + tmp |= NGBE_TXD_CC;
> + /* implies IPv4 cksum */
> + if (ol_flags & PKT_TX_IPV4)
> + tmp |= NGBE_TXD_IPCS;
> + tmp |= NGBE_TXD_L4CS;
> + }
> + if (ol_flags & PKT_TX_VLAN_PKT)
> + tmp |= NGBE_TXD_CC;
> +
> + return tmp;
> +}
> +
> +static inline uint32_t
> +tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
> +{
> + uint32_t cmdtype = 0;
> +
> + if (ol_flags & PKT_TX_VLAN_PKT)
> + cmdtype |= NGBE_TXD_VLE;
> + if (ol_flags & PKT_TX_TCP_SEG)
> + cmdtype |= NGBE_TXD_TSE;
> + if (ol_flags & PKT_TX_MACSEC)
> + cmdtype |= NGBE_TXD_LINKSEC;
> + return cmdtype;
> +}
> +
> +static inline uint8_t
> +tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
> +{
> + bool tun;
> +
> + if (ptype)
> + return ngbe_encode_ptype(ptype);
> +
> + /* Only support flags in NGBE_TX_OFFLOAD_MASK */
> + tun = !!(oflags & PKT_TX_TUNNEL_MASK);
> +
> + /* L2 level */
> + ptype = RTE_PTYPE_L2_ETHER;
> + if (oflags & PKT_TX_VLAN)
> + ptype |= RTE_PTYPE_L2_ETHER_VLAN;
> +
> + /* L3 level */
> + if (oflags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM))
> + ptype |= RTE_PTYPE_L3_IPV4;
> + else if (oflags & (PKT_TX_OUTER_IPV6))
> + ptype |= RTE_PTYPE_L3_IPV6;
> +
> + if (oflags & (PKT_TX_IPV4 | PKT_TX_IP_CKSUM))
> + ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4);
> + else if (oflags & (PKT_TX_IPV6))
> + ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6);
> +
> + /* L4 level */
> + switch (oflags & (PKT_TX_L4_MASK)) {
> + case PKT_TX_TCP_CKSUM:
> + ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
> + break;
> + case PKT_TX_UDP_CKSUM:
> + ptype |= (tun ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP);
> + break;
> + case PKT_TX_SCTP_CKSUM:
> + ptype |= (tun ? RTE_PTYPE_INNER_L4_SCTP : RTE_PTYPE_L4_SCTP);
> + break;
> + }
> +
> + if (oflags & PKT_TX_TCP_SEG)
> + ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
> +
> + /* Tunnel */
> + switch (oflags & PKT_TX_TUNNEL_MASK) {
> + case PKT_TX_TUNNEL_VXLAN:
> + ptype |= RTE_PTYPE_L2_ETHER |
> + RTE_PTYPE_L3_IPV4 |
> + RTE_PTYPE_TUNNEL_VXLAN;
> + ptype |= RTE_PTYPE_INNER_L2_ETHER;
> + break;
> + case PKT_TX_TUNNEL_GRE:
> + ptype |= RTE_PTYPE_L2_ETHER |
> + RTE_PTYPE_L3_IPV4 |
> + RTE_PTYPE_TUNNEL_GRE;
> + ptype |= RTE_PTYPE_INNER_L2_ETHER;
> + break;
> + case PKT_TX_TUNNEL_GENEVE:
> + ptype |= RTE_PTYPE_L2_ETHER |
> + RTE_PTYPE_L3_IPV4 |
> + RTE_PTYPE_TUNNEL_GENEVE;
> + ptype |= RTE_PTYPE_INNER_L2_ETHER;
> + break;
> + case PKT_TX_TUNNEL_VXLAN_GPE:
> + ptype |= RTE_PTYPE_L2_ETHER |
> + RTE_PTYPE_L3_IPV4 |
> + RTE_PTYPE_TUNNEL_VXLAN_GPE;
> + break;
> + case PKT_TX_TUNNEL_IPIP:
> + case PKT_TX_TUNNEL_IP:
> + ptype |= RTE_PTYPE_L2_ETHER |
> + RTE_PTYPE_L3_IPV4 |
> + RTE_PTYPE_TUNNEL_IP;
> + break;
> + }
> +
> + return ngbe_encode_ptype(ptype);
> +}
> +
> #ifndef DEFAULT_TX_FREE_THRESH
> #define DEFAULT_TX_FREE_THRESH 32
> #endif
>
> +/* Reset transmit descriptors after they have been used */
> +static inline int
> +ngbe_xmit_cleanup(struct ngbe_tx_queue *txq)
> +{
> + struct ngbe_tx_entry *sw_ring = txq->sw_ring;
> + volatile struct ngbe_tx_desc *txr = txq->tx_ring;
> + uint16_t last_desc_cleaned = txq->last_desc_cleaned;
> + uint16_t nb_tx_desc = txq->nb_tx_desc;
> + uint16_t desc_to_clean_to;
> + uint16_t nb_tx_to_clean;
> + uint32_t status;
> +
> + /* Determine the last descriptor needing to be cleaned */
> + desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_free_thresh);
> + if (desc_to_clean_to >= nb_tx_desc)
> + desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
> +
> + /* Check to make sure the last descriptor to clean is done */
> + desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
> + status = txr[desc_to_clean_to].dw3;
> + if (!(status & rte_cpu_to_le_32(NGBE_TXD_DD))) {
> + PMD_TX_LOG(DEBUG,
> + "TX descriptor %4u is not done"
> + "(port=%d queue=%d)",
> + desc_to_clean_to,
> + txq->port_id, txq->queue_id);
> + if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
> + ngbe_set32_masked(txq->tdc_reg_addr,
> + NGBE_TXCFG_FLUSH, NGBE_TXCFG_FLUSH);
> + /* Failed to clean any descriptors, better luck next time */
> + return -(1);
> + }
> +
> + /* Figure out how many descriptors will be cleaned */
> + if (last_desc_cleaned > desc_to_clean_to)
> + nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
> + desc_to_clean_to);
> + else
> + nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
> + last_desc_cleaned);
> +
> + PMD_TX_LOG(DEBUG,
> + "Cleaning %4u TX descriptors: %4u to %4u "
> + "(port=%d queue=%d)",
> + nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
> + txq->port_id, txq->queue_id);
> +
> + /*
> + * The last descriptor to clean is done, so that means all the
> + * descriptors from the last descriptor that was cleaned
> + * up to the last descriptor with the RS bit set
> + * are done. Only reset the threshold descriptor.
> + */
> + txr[desc_to_clean_to].dw3 = 0;
> +
> + /* Update the txq to reflect the last descriptor that was cleaned */
> + txq->last_desc_cleaned = desc_to_clean_to;
> + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
> +
> + /* No Error */
> + return 0;
> +}
> +
> +uint16_t
> +ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
> + uint16_t nb_pkts)
> +{
> + struct ngbe_tx_queue *txq;
> + struct ngbe_tx_entry *sw_ring;
> + struct ngbe_tx_entry *txe, *txn;
> + volatile struct ngbe_tx_desc *txr;
> + volatile struct ngbe_tx_desc *txd;
> + struct rte_mbuf *tx_pkt;
> + struct rte_mbuf *m_seg;
> + uint64_t buf_dma_addr;
> + uint32_t olinfo_status;
> + uint32_t cmd_type_len;
> + uint32_t pkt_len;
> + uint16_t slen;
> + uint64_t ol_flags;
> + uint16_t tx_id;
> + uint16_t tx_last;
> + uint16_t nb_tx;
> + uint16_t nb_used;
> + uint64_t tx_ol_req;
> + uint32_t ctx = 0;
> + uint32_t new_ctx;
> + union ngbe_tx_offload tx_offload;
> +
> + tx_offload.data[0] = 0;
> + tx_offload.data[1] = 0;
> + txq = tx_queue;
> + sw_ring = txq->sw_ring;
> + txr = txq->tx_ring;
> + tx_id = txq->tx_tail;
> + txe = &sw_ring[tx_id];
> +
> + /* Determine if the descriptor ring needs to be cleaned. */
> + if (txq->nb_tx_free < txq->tx_free_thresh)
> + ngbe_xmit_cleanup(txq);
> +
> + rte_prefetch0(&txe->mbuf->pool);
> +
> + /* TX loop */
> + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
> + new_ctx = 0;
> + tx_pkt = *tx_pkts++;
> + pkt_len = tx_pkt->pkt_len;
> +
> + /*
> + * Determine how many (if any) context descriptors
> + * are needed for offload functionality.
> + */
> + ol_flags = tx_pkt->ol_flags;
> +
> + /* If hardware offload required */
> + tx_ol_req = ol_flags & NGBE_TX_OFFLOAD_MASK;
> + if (tx_ol_req) {
> + tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req,
> + tx_pkt->packet_type);
> + tx_offload.l2_len = tx_pkt->l2_len;
> + tx_offload.l3_len = tx_pkt->l3_len;
> + tx_offload.l4_len = tx_pkt->l4_len;
> + tx_offload.vlan_tci = tx_pkt->vlan_tci;
> + tx_offload.tso_segsz = tx_pkt->tso_segsz;
> + tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
> + tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
> + tx_offload.outer_tun_len = 0;
> +
> +
> + /* If new context need be built or reuse the exist ctx*/
> + ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
> + /* Only allocate context descriptor if required */
> + new_ctx = (ctx == NGBE_CTX_NUM);
> + ctx = txq->ctx_curr;
> + }
> +
> + /*
> + * Keep track of how many descriptors are used this loop
> + * This will always be the number of segments + the number of
> + * Context descriptors required to transmit the packet
> + */
> + nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
> +
> + /*
> + * The number of descriptors that must be allocated for a
> + * packet is the number of segments of that packet, plus 1
> + * Context Descriptor for the hardware offload, if any.
> + * Determine the last TX descriptor to allocate in the TX ring
> + * for the packet, starting from the current position (tx_id)
> + * in the ring.
> + */
> + tx_last = (uint16_t)(tx_id + nb_used - 1);
> +
> + /* Circular ring */
> + if (tx_last >= txq->nb_tx_desc)
> + tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
> +
> + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
> + " tx_first=%u tx_last=%u",
> + (uint16_t)txq->port_id,
> + (uint16_t)txq->queue_id,
> + (uint32_t)pkt_len,
> + (uint16_t)tx_id,
> + (uint16_t)tx_last);
> +
> + /*
> + * Make sure there are enough TX descriptors available to
> + * transmit the entire packet.
> + * nb_used better be less than or equal to txq->tx_free_thresh
> + */
> + if (nb_used > txq->nb_tx_free) {
> + PMD_TX_LOG(DEBUG,
> + "Not enough free TX descriptors "
> + "nb_used=%4u nb_free=%4u "
> + "(port=%d queue=%d)",
> + nb_used, txq->nb_tx_free,
> + txq->port_id, txq->queue_id);
> +
> + if (ngbe_xmit_cleanup(txq) != 0) {
> + /* Could not clean any descriptors */
> + if (nb_tx == 0)
> + return 0;
> + goto end_of_tx;
> + }
> +
> + /* nb_used better be <= txq->tx_free_thresh */
> + if (unlikely(nb_used > txq->tx_free_thresh)) {
> + PMD_TX_LOG(DEBUG,
> + "The number of descriptors needed to "
> + "transmit the packet exceeds the "
> + "RS bit threshold. This will impact "
> + "performance."
> + "nb_used=%4u nb_free=%4u "
> + "tx_free_thresh=%4u. "
> + "(port=%d queue=%d)",
> + nb_used, txq->nb_tx_free,
> + txq->tx_free_thresh,
> + txq->port_id, txq->queue_id);
> + /*
> + * Loop here until there are enough TX
> + * descriptors or until the ring cannot be
> + * cleaned.
> + */
> + while (nb_used > txq->nb_tx_free) {
> + if (ngbe_xmit_cleanup(txq) != 0) {
> + /*
> + * Could not clean any
> + * descriptors
> + */
> + if (nb_tx == 0)
> + return 0;
> + goto end_of_tx;
> + }
> + }
> + }
> + }
> +
> + /*
> + * By now there are enough free TX descriptors to transmit
> + * the packet.
> + */
> +
> + /*
> + * Set common flags of all TX Data Descriptors.
> + *
> + * The following bits must be set in the first Data Descriptor
> + * and are ignored in the other ones:
> + * - NGBE_TXD_FCS
> + *
> + * The following bits must only be set in the last Data
> + * Descriptor:
> + * - NGBE_TXD_EOP
> + */
> + cmd_type_len = NGBE_TXD_FCS;
> +
> + olinfo_status = 0;
> + if (tx_ol_req) {
> + if (ol_flags & PKT_TX_TCP_SEG) {
> + /* when TSO is on, paylen in descriptor is the
> + * not the packet len but the tcp payload len
> + */
> + pkt_len -= (tx_offload.l2_len +
> + tx_offload.l3_len + tx_offload.l4_len);
> + pkt_len -=
> + (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK)
> + ? tx_offload.outer_l2_len +
> + tx_offload.outer_l3_len : 0;
> + }
> +
> + /*
> + * Setup the TX Advanced Context Descriptor if required
> + */
> + if (new_ctx) {
> + volatile struct ngbe_tx_ctx_desc *ctx_txd;
> +
> + ctx_txd = (volatile struct ngbe_tx_ctx_desc *)
> + &txr[tx_id];
> +
> + txn = &sw_ring[txe->next_id];
> + rte_prefetch0(&txn->mbuf->pool);
> +
> + if (txe->mbuf != NULL) {
> + rte_pktmbuf_free_seg(txe->mbuf);
> + txe->mbuf = NULL;
> + }
> +
> + ngbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
> + tx_offload,
> + rte_security_dynfield(tx_pkt));
> +
> + txe->last_id = tx_last;
> + tx_id = txe->next_id;
> + txe = txn;
> + }
> +
> + /*
> + * Setup the TX Advanced Data Descriptor,
> + * This path will go through
> + * whatever new/reuse the context descriptor
> + */
> + cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
> + olinfo_status |=
> + tx_desc_cksum_flags_to_olinfo(ol_flags);
> + olinfo_status |= NGBE_TXD_IDX(ctx);
> + }
> +
> + olinfo_status |= NGBE_TXD_PAYLEN(pkt_len);
> +
> + m_seg = tx_pkt;
> + do {
> + txd = &txr[tx_id];
> + txn = &sw_ring[txe->next_id];
> + rte_prefetch0(&txn->mbuf->pool);
> +
> + if (txe->mbuf != NULL)
> + rte_pktmbuf_free_seg(txe->mbuf);
> + txe->mbuf = m_seg;
> +
> + /*
> + * Set up Transmit Data Descriptor.
> + */
> + slen = m_seg->data_len;
> + buf_dma_addr = rte_mbuf_data_iova(m_seg);
> + txd->qw0 = rte_cpu_to_le_64(buf_dma_addr);
> + txd->dw2 = rte_cpu_to_le_32(cmd_type_len | slen);
> + txd->dw3 = rte_cpu_to_le_32(olinfo_status);
> + txe->last_id = tx_last;
> + tx_id = txe->next_id;
> + txe = txn;
> + m_seg = m_seg->next;
> + } while (m_seg != NULL);
> +
> + /*
> + * The last packet data descriptor needs End Of Packet (EOP)
> + */
> + cmd_type_len |= NGBE_TXD_EOP;
> + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
> +
> + txd->dw2 |= rte_cpu_to_le_32(cmd_type_len);
> + }
> +
> +end_of_tx:
> +
> + rte_wmb();
> +
> + /*
> + * Set the Transmit Descriptor Tail (TDT)
> + */
> + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
> + (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
> + (uint16_t)tx_id, (uint16_t)nb_tx);
> + ngbe_set32_relaxed(txq->tdt_reg_addr, tx_id);
> + txq->tx_tail = tx_id;
> +
> + return nb_tx;
> +}
> +
> /*********************************************************************
> *
> * RX functions
> @@ -1123,6 +1734,31 @@ static const struct ngbe_txq_ops def_txq_ops = {
> .reset = ngbe_reset_tx_queue,
> };
>
> +/* Takes an ethdev and a queue and sets up the tx function to be used based on
> + * the queue parameters. Used in tx_queue_setup by primary process and then
> + * in dev_init by secondary process when attaching to an existing ethdev.
> + */
> +void __rte_cold
> +ngbe_set_tx_function(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq)
> +{
> + /* Use a simple Tx queue (no offloads, no multi segs) if possible */
> + if (txq->offloads == 0 &&
> + txq->tx_free_thresh >= RTE_PMD_NGBE_TX_MAX_BURST) {
> + PMD_INIT_LOG(DEBUG, "Using simple tx code path");
> + dev->tx_pkt_burst = ngbe_xmit_pkts_simple;
> + } else {
> + PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
> + PMD_INIT_LOG(DEBUG,
> + " - offloads = 0x%" PRIx64,
> + txq->offloads);
> + PMD_INIT_LOG(DEBUG,
> + " - tx_free_thresh = %lu [RTE_PMD_NGBE_TX_MAX_BURST=%lu]",
> + (unsigned long)txq->tx_free_thresh,
> + (unsigned long)RTE_PMD_NGBE_TX_MAX_BURST);
> + dev->tx_pkt_burst = ngbe_xmit_pkts;
> + }
> +}
> +
> uint64_t
> ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
> {
> @@ -1262,6 +1898,9 @@ ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
> PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
> txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
>
> + /* set up scalar TX function as appropriate */
> + ngbe_set_tx_function(dev, txq);
> +
> txq->ops->reset(txq);
>
> dev->data->tx_queues[queue_idx] = txq;
> diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h
> index 4b8596b24a..2cb98e2497 100644
> --- a/drivers/net/ngbe/ngbe_rxtx.h
> +++ b/drivers/net/ngbe/ngbe_rxtx.h
> @@ -135,8 +135,35 @@ struct ngbe_tx_ctx_desc {
> __le32 dw3; /* w.mss_l4len_idx */
> };
>
> +/* @ngbe_tx_ctx_desc.dw0 */
> +#define NGBE_TXD_IPLEN(v) LS(v, 0, 0x1FF) /* ip/fcoe header end */
> +#define NGBE_TXD_MACLEN(v) LS(v, 9, 0x7F) /* desc mac len */
> +#define NGBE_TXD_VLAN(v) LS(v, 16, 0xFFFF) /* vlan tag */
> +
> +/* @ngbe_tx_ctx_desc.dw1 */
> +/*** bit 0-31, when NGBE_TXD_DTYP_FCOE=0 ***/
> +#define NGBE_TXD_IPSEC_SAIDX(v) LS(v, 0, 0x3FF) /* ipsec SA index */
> +#define NGBE_TXD_ETYPE(v) LS(v, 11, 0x1) /* tunnel type */
> +#define NGBE_TXD_ETYPE_UDP LS(0, 11, 0x1)
> +#define NGBE_TXD_ETYPE_GRE LS(1, 11, 0x1)
> +#define NGBE_TXD_EIPLEN(v) LS(v, 12, 0x7F) /* tunnel ip header */
> +#define NGBE_TXD_DTYP_FCOE MS(16, 0x1) /* FCoE/IP descriptor */
> +#define NGBE_TXD_ETUNLEN(v) LS(v, 21, 0xFF) /* tunnel header */
> +#define NGBE_TXD_DECTTL(v) LS(v, 29, 0xF) /* decrease ip TTL */
> +
> +/* @ngbe_tx_ctx_desc.dw2 */
> +#define NGBE_TXD_IPSEC_ESPLEN(v) LS(v, 1, 0x1FF) /* ipsec ESP length */
> +#define NGBE_TXD_SNAP MS(10, 0x1) /* SNAP indication */
> +#define NGBE_TXD_TPID_SEL(v) LS(v, 11, 0x7) /* vlan tag index */
> +#define NGBE_TXD_IPSEC_ESP MS(14, 0x1) /* ipsec type: esp=1 ah=0 */
> +#define NGBE_TXD_IPSEC_ESPENC MS(15, 0x1) /* ESP encrypt */
> +#define NGBE_TXD_CTXT MS(20, 0x1) /* context descriptor */
> +#define NGBE_TXD_PTID(v) LS(v, 24, 0xFF) /* packet type */
> /* @ngbe_tx_ctx_desc.dw3 */
> #define NGBE_TXD_DD MS(0, 0x1) /* descriptor done */
> +#define NGBE_TXD_IDX(v) LS(v, 4, 0x1) /* ctxt desc index */
> +#define NGBE_TXD_L4LEN(v) LS(v, 8, 0xFF) /* l4 header length */
> +#define NGBE_TXD_MSS(v) LS(v, 16, 0xFFFF) /* l4 MSS */
>
> /**
> * Transmit Data Descriptor (NGBE_TXD_TYP=DATA)
> @@ -250,11 +277,34 @@ enum ngbe_ctx_num {
> NGBE_CTX_NUM = 2, /**< CTX NUMBER */
> };
>
> +/** Offload features */
> +union ngbe_tx_offload {
> + uint64_t data[2];
> + struct {
> + uint64_t ptid:8; /**< Packet Type Identifier. */
> + uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
> + uint64_t l3_len:9; /**< L3 (IP) Header Length. */
> + uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
> + uint64_t tso_segsz:16; /**< TCP TSO segment size */
> + uint64_t vlan_tci:16;
> + /**< VLAN Tag Control Identifier (CPU order). */
> +
> + /* fields for TX offloading of tunnels */
> + uint64_t outer_tun_len:8; /**< Outer TUN (Tunnel) Hdr Length. */
> + uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */
> + uint64_t outer_l3_len:16; /**< Outer L3 (IP) Hdr Length. */
> + };
> +};
> +
> /**
> * Structure to check if new context need be built
> */
> struct ngbe_ctx_info {
> uint64_t flags; /**< ol_flags for context build. */
> + /**< tx offload: vlan, tso, l2-l3-l4 lengths. */
> + union ngbe_tx_offload tx_offload;
> + /** compare mask for tx offload. */
> + union ngbe_tx_offload tx_offload_mask;
> };
>
> /**
> @@ -298,6 +348,12 @@ struct ngbe_txq_ops {
> void (*reset)(struct ngbe_tx_queue *txq);
> };
>
> +/* Takes an ethdev and a queue and sets up the tx function to be used based on
> + * the queue parameters. Used in tx_queue_setup by primary process and then
> + * in dev_init by secondary process when attaching to an existing ethdev.
> + */
> +void ngbe_set_tx_function(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq);
> +
> void ngbe_set_rx_function(struct rte_eth_dev *dev);
>
> uint64_t ngbe_get_tx_port_offloads(struct rte_eth_dev *dev);
>
On 6/14/21 10:22 PM, Andrew Rybchenko wrote:
> On 6/2/21 12:41 PM, Jiawen Wu wrote:
>> Add the full-featured transmit function, which supports checksum, TSO,
>> tunnel parse, etc.
>
> The patch should adviertise corresponding offloads support in features,
> in dev_info Tx offloads.
>
> Tx offloads require Tx prepare implemenation. Can you really skip it?
BTW, I've realized that the patch is a dead code since you can't use it
it without device start implemented in the next patch.
I.e. patches order is wrong.
>
>> Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
>> ---
>> doc/guides/nics/features/ngbe.ini | 3 +
>> doc/guides/nics/ngbe.rst | 3 +-
>> drivers/net/ngbe/meson.build | 2 +
>> drivers/net/ngbe/ngbe_ethdev.c | 16 +-
>> drivers/net/ngbe/ngbe_ethdev.h | 3 +
>> drivers/net/ngbe/ngbe_rxtx.c | 639 ++++++++++++++++++++++++++++++
>> drivers/net/ngbe/ngbe_rxtx.h | 56 +++
>> 7 files changed, 720 insertions(+), 2 deletions(-)
>>
>> diff --git a/doc/guides/nics/features/ngbe.ini
>> b/doc/guides/nics/features/ngbe.ini
>> index e24d8d0b55..443c6691a3 100644
>> --- a/doc/guides/nics/features/ngbe.ini
>> +++ b/doc/guides/nics/features/ngbe.ini
>> @@ -9,10 +9,13 @@ Link status = Y
>> Link status event = Y
>> Jumbo frame = Y
>> Scattered Rx = Y
>> +TSO = Y
>> CRC offload = P
>> VLAN offload = P
>> L3 checksum offload = P
>> L4 checksum offload = P
>> +Inner L3 checksum = P
>> +Inner L4 checksum = P
>> Packet type parsing = Y
>> Multiprocess aware = Y
>> Linux = Y
>> diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
>> index e999e0b580..cf3fafabd8 100644
>> --- a/doc/guides/nics/ngbe.rst
>> +++ b/doc/guides/nics/ngbe.rst
>> @@ -12,9 +12,10 @@ Features
>> - Packet type information
>> - Checksum offload
>> +- TSO offload
>> - Jumbo frames
>> - Link state information
>> -- Scattered and gather for RX
>> +- Scattered and gather for TX and RX
>
> TX -> Tx
>
>> Prerequisites
>> -------------
>> diff --git a/drivers/net/ngbe/meson.build b/drivers/net/ngbe/meson.build
>> index fd571399b3..069e648a36 100644
>> --- a/drivers/net/ngbe/meson.build
>> +++ b/drivers/net/ngbe/meson.build
>> @@ -16,5 +16,7 @@ sources = files(
>> 'ngbe_rxtx.c',
>> )
>> +deps += ['security']
>> +
>> includes += include_directories('base')
>> diff --git a/drivers/net/ngbe/ngbe_ethdev.c
>> b/drivers/net/ngbe/ngbe_ethdev.c
>> index 260bca0e4f..1a6419e5a4 100644
>> --- a/drivers/net/ngbe/ngbe_ethdev.c
>> +++ b/drivers/net/ngbe/ngbe_ethdev.c
>> @@ -110,7 +110,7 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev,
>> void *init_params __rte_unused)
>> eth_dev->dev_ops = &ngbe_eth_dev_ops;
>> eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
>> - eth_dev->tx_pkt_burst = &ngbe_xmit_pkts_simple;
>> + eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
>> /*
>> * For secondary processes, we don't initialise any further as
>> primary
>> @@ -118,6 +118,20 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev,
>> void *init_params __rte_unused)
>> * RX and TX function.
>> */
>> if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
>> + struct ngbe_tx_queue *txq;
>> + /* TX queue function in primary, set by last queue initialized
>
> TX -> Tx
>
>> + * Tx queue may not initialized by primary process
>> + */
>> + if (eth_dev->data->tx_queues) {
>> + uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
>> + txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
>> + ngbe_set_tx_function(eth_dev, txq);
>> + } else {
>> + /* Use default TX function if we get here */
>> + PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
>> + "Using default TX function.");
>> + }
>> +
>> ngbe_set_rx_function(eth_dev);
>> return 0;
>> diff --git a/drivers/net/ngbe/ngbe_ethdev.h
>> b/drivers/net/ngbe/ngbe_ethdev.h
>> index 1e21db5e25..035b1ad5c8 100644
>> --- a/drivers/net/ngbe/ngbe_ethdev.h
>> +++ b/drivers/net/ngbe/ngbe_ethdev.h
>> @@ -86,6 +86,9 @@ uint16_t ngbe_recv_pkts_sc_single_alloc(void *rx_queue,
>> uint16_t ngbe_recv_pkts_sc_bulk_alloc(void *rx_queue,
>> struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
>> +uint16_t ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>> + uint16_t nb_pkts);
>> +
>> uint16_t ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf
>> **tx_pkts,
>> uint16_t nb_pkts);
>> diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
>> index f633718237..3f3f2cab06 100644
>> --- a/drivers/net/ngbe/ngbe_rxtx.c
>> +++ b/drivers/net/ngbe/ngbe_rxtx.c
>> @@ -8,6 +8,7 @@
>> #include <stdint.h>
>> #include <rte_ethdev.h>
>> #include <ethdev_driver.h>
>> +#include <rte_security_driver.h>
>> #include <rte_malloc.h>
>> #include "ngbe_logs.h"
>> @@ -15,6 +16,18 @@
>> #include "ngbe_ethdev.h"
>> #include "ngbe_rxtx.h"
>> +/* Bit Mask to indicate what bits required for building TX context */
>> +static const u64 NGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
>> + PKT_TX_OUTER_IPV6 |
>> + PKT_TX_OUTER_IPV4 |
>> + PKT_TX_IPV6 |
>> + PKT_TX_IPV4 |
>> + PKT_TX_VLAN_PKT |
>> + PKT_TX_L4_MASK |
>> + PKT_TX_TCP_SEG |
>> + PKT_TX_TUNNEL_MASK |
>> + PKT_TX_OUTER_IP_CKSUM);
>> +
>
> Can we add offloads one by one in a separate patches?
> It would simplify review a lot. Right now it is very
> hard to understand if you lost something or not.
>
>> /*
>> * Prefetch a cache line into all cache levels.
>> */
>> @@ -248,10 +261,608 @@ ngbe_xmit_pkts_simple(void *tx_queue, struct
>> rte_mbuf **tx_pkts,
>> return nb_tx;
>> }
>> +static inline void
>> +ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq,
>> + volatile struct ngbe_tx_ctx_desc *ctx_txd,
>> + uint64_t ol_flags, union ngbe_tx_offload tx_offload,
>> + __rte_unused uint64_t *mdata)
>> +{
>> + union ngbe_tx_offload tx_offload_mask;
>> + uint32_t type_tucmd_mlhl;
>> + uint32_t mss_l4len_idx;
>> + uint32_t ctx_idx;
>> + uint32_t vlan_macip_lens;
>> + uint32_t tunnel_seed;
>> +
>> + ctx_idx = txq->ctx_curr;
>> + tx_offload_mask.data[0] = 0;
>> + tx_offload_mask.data[1] = 0;
>> +
>> + /* Specify which HW CTX to upload. */
>> + mss_l4len_idx = NGBE_TXD_IDX(ctx_idx);
>> + type_tucmd_mlhl = NGBE_TXD_CTXT;
>> +
>> + tx_offload_mask.ptid |= ~0;
>> + type_tucmd_mlhl |= NGBE_TXD_PTID(tx_offload.ptid);
>> +
>> + /* check if TCP segmentation required for this packet */
>> + if (ol_flags & PKT_TX_TCP_SEG) {
>> + tx_offload_mask.l2_len |= ~0;
>> + tx_offload_mask.l3_len |= ~0;
>> + tx_offload_mask.l4_len |= ~0;
>> + tx_offload_mask.tso_segsz |= ~0;
>> + mss_l4len_idx |= NGBE_TXD_MSS(tx_offload.tso_segsz);
>> + mss_l4len_idx |= NGBE_TXD_L4LEN(tx_offload.l4_len);
>> + } else { /* no TSO, check if hardware checksum is needed */
>> + if (ol_flags & PKT_TX_IP_CKSUM) {
>> + tx_offload_mask.l2_len |= ~0;
>> + tx_offload_mask.l3_len |= ~0;
>> + }
>> +
>> + switch (ol_flags & PKT_TX_L4_MASK) {
>> + case PKT_TX_UDP_CKSUM:
>> + mss_l4len_idx |=
>> + NGBE_TXD_L4LEN(sizeof(struct rte_udp_hdr));
>> + tx_offload_mask.l2_len |= ~0;
>> + tx_offload_mask.l3_len |= ~0;
>> + break;
>> + case PKT_TX_TCP_CKSUM:
>> + mss_l4len_idx |=
>> + NGBE_TXD_L4LEN(sizeof(struct rte_tcp_hdr));
>> + tx_offload_mask.l2_len |= ~0;
>> + tx_offload_mask.l3_len |= ~0;
>> + break;
>> + case PKT_TX_SCTP_CKSUM:
>> + mss_l4len_idx |=
>> + NGBE_TXD_L4LEN(sizeof(struct rte_sctp_hdr));
>> + tx_offload_mask.l2_len |= ~0;
>> + tx_offload_mask.l3_len |= ~0;
>> + break;
>> + default:
>> + break;
>> + }
>> + }
>> +
>> + vlan_macip_lens = NGBE_TXD_IPLEN(tx_offload.l3_len >> 1);
>> +
>> + if (ol_flags & PKT_TX_TUNNEL_MASK) {
>> + tx_offload_mask.outer_tun_len |= ~0;
>> + tx_offload_mask.outer_l2_len |= ~0;
>> + tx_offload_mask.outer_l3_len |= ~0;
>> + tx_offload_mask.l2_len |= ~0;
>> + tunnel_seed = NGBE_TXD_ETUNLEN(tx_offload.outer_tun_len >> 1);
>> + tunnel_seed |= NGBE_TXD_EIPLEN(tx_offload.outer_l3_len >> 2);
>> +
>> + switch (ol_flags & PKT_TX_TUNNEL_MASK) {
>> + case PKT_TX_TUNNEL_IPIP:
>> + /* for non UDP / GRE tunneling, set to 0b */
>> + break;
>> + default:
>> + PMD_TX_LOG(ERR, "Tunnel type not supported");
>> + return;
>> + }
>> + vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.outer_l2_len);
>> + } else {
>> + tunnel_seed = 0;
>> + vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len);
>> + }
>> +
>> + if (ol_flags & PKT_TX_VLAN_PKT) {
>> + tx_offload_mask.vlan_tci |= ~0;
>> + vlan_macip_lens |= NGBE_TXD_VLAN(tx_offload.vlan_tci);
>> + }
>> +
>> + txq->ctx_cache[ctx_idx].flags = ol_flags;
>> + txq->ctx_cache[ctx_idx].tx_offload.data[0] =
>> + tx_offload_mask.data[0] & tx_offload.data[0];
>> + txq->ctx_cache[ctx_idx].tx_offload.data[1] =
>> + tx_offload_mask.data[1] & tx_offload.data[1];
>> + txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
>> +
>> + ctx_txd->dw0 = rte_cpu_to_le_32(vlan_macip_lens);
>> + ctx_txd->dw1 = rte_cpu_to_le_32(tunnel_seed);
>> + ctx_txd->dw2 = rte_cpu_to_le_32(type_tucmd_mlhl);
>> + ctx_txd->dw3 = rte_cpu_to_le_32(mss_l4len_idx);
>> +}
>> +
>> +/*
>> + * Check which hardware context can be used. Use the existing match
>> + * or create a new context descriptor.
>> + */
>> +static inline uint32_t
>> +what_ctx_update(struct ngbe_tx_queue *txq, uint64_t flags,
>> + union ngbe_tx_offload tx_offload)
>> +{
>> + /* If match with the current used context */
>> + if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
>> + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
>> + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
>> + & tx_offload.data[0])) &&
>> + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
>> + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
>> + & tx_offload.data[1]))))
>> + return txq->ctx_curr;
>> +
>> + /* What if match with the next context */
>> + txq->ctx_curr ^= 1;
>> + if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
>> + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
>> + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
>> + & tx_offload.data[0])) &&
>> + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
>> + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
>> + & tx_offload.data[1]))))
>> + return txq->ctx_curr;
>> +
>> + /* Mismatch, use the previous context */
>> + return NGBE_CTX_NUM;
>> +}
>> +
>> +static inline uint32_t
>> +tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
>> +{
>> + uint32_t tmp = 0;
>> +
>> + if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM) {
>> + tmp |= NGBE_TXD_CC;
>> + tmp |= NGBE_TXD_L4CS;
>> + }
>> + if (ol_flags & PKT_TX_IP_CKSUM) {
>> + tmp |= NGBE_TXD_CC;
>> + tmp |= NGBE_TXD_IPCS;
>> + }
>> + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
>> + tmp |= NGBE_TXD_CC;
>> + tmp |= NGBE_TXD_EIPCS;
>> + }
>> + if (ol_flags & PKT_TX_TCP_SEG) {
>> + tmp |= NGBE_TXD_CC;
>> + /* implies IPv4 cksum */
>> + if (ol_flags & PKT_TX_IPV4)
>> + tmp |= NGBE_TXD_IPCS;
>> + tmp |= NGBE_TXD_L4CS;
>> + }
>> + if (ol_flags & PKT_TX_VLAN_PKT)
>> + tmp |= NGBE_TXD_CC;
>> +
>> + return tmp;
>> +}
>> +
>> +static inline uint32_t
>> +tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
>> +{
>> + uint32_t cmdtype = 0;
>> +
>> + if (ol_flags & PKT_TX_VLAN_PKT)
>> + cmdtype |= NGBE_TXD_VLE;
>> + if (ol_flags & PKT_TX_TCP_SEG)
>> + cmdtype |= NGBE_TXD_TSE;
>> + if (ol_flags & PKT_TX_MACSEC)
>> + cmdtype |= NGBE_TXD_LINKSEC;
>> + return cmdtype;
>> +}
>> +
>> +static inline uint8_t
>> +tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
>> +{
>> + bool tun;
>> +
>> + if (ptype)
>> + return ngbe_encode_ptype(ptype);
>> +
>> + /* Only support flags in NGBE_TX_OFFLOAD_MASK */
>> + tun = !!(oflags & PKT_TX_TUNNEL_MASK);
>> +
>> + /* L2 level */
>> + ptype = RTE_PTYPE_L2_ETHER;
>> + if (oflags & PKT_TX_VLAN)
>> + ptype |= RTE_PTYPE_L2_ETHER_VLAN;
>> +
>> + /* L3 level */
>> + if (oflags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM))
>> + ptype |= RTE_PTYPE_L3_IPV4;
>> + else if (oflags & (PKT_TX_OUTER_IPV6))
>> + ptype |= RTE_PTYPE_L3_IPV6;
>> +
>> + if (oflags & (PKT_TX_IPV4 | PKT_TX_IP_CKSUM))
>> + ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4);
>> + else if (oflags & (PKT_TX_IPV6))
>> + ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6);
>> +
>> + /* L4 level */
>> + switch (oflags & (PKT_TX_L4_MASK)) {
>> + case PKT_TX_TCP_CKSUM:
>> + ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
>> + break;
>> + case PKT_TX_UDP_CKSUM:
>> + ptype |= (tun ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP);
>> + break;
>> + case PKT_TX_SCTP_CKSUM:
>> + ptype |= (tun ? RTE_PTYPE_INNER_L4_SCTP : RTE_PTYPE_L4_SCTP);
>> + break;
>> + }
>> +
>> + if (oflags & PKT_TX_TCP_SEG)
>> + ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
>> +
>> + /* Tunnel */
>> + switch (oflags & PKT_TX_TUNNEL_MASK) {
>> + case PKT_TX_TUNNEL_VXLAN:
>> + ptype |= RTE_PTYPE_L2_ETHER |
>> + RTE_PTYPE_L3_IPV4 |
>> + RTE_PTYPE_TUNNEL_VXLAN;
>> + ptype |= RTE_PTYPE_INNER_L2_ETHER;
>> + break;
>> + case PKT_TX_TUNNEL_GRE:
>> + ptype |= RTE_PTYPE_L2_ETHER |
>> + RTE_PTYPE_L3_IPV4 |
>> + RTE_PTYPE_TUNNEL_GRE;
>> + ptype |= RTE_PTYPE_INNER_L2_ETHER;
>> + break;
>> + case PKT_TX_TUNNEL_GENEVE:
>> + ptype |= RTE_PTYPE_L2_ETHER |
>> + RTE_PTYPE_L3_IPV4 |
>> + RTE_PTYPE_TUNNEL_GENEVE;
>> + ptype |= RTE_PTYPE_INNER_L2_ETHER;
>> + break;
>> + case PKT_TX_TUNNEL_VXLAN_GPE:
>> + ptype |= RTE_PTYPE_L2_ETHER |
>> + RTE_PTYPE_L3_IPV4 |
>> + RTE_PTYPE_TUNNEL_VXLAN_GPE;
>> + break;
>> + case PKT_TX_TUNNEL_IPIP:
>> + case PKT_TX_TUNNEL_IP:
>> + ptype |= RTE_PTYPE_L2_ETHER |
>> + RTE_PTYPE_L3_IPV4 |
>> + RTE_PTYPE_TUNNEL_IP;
>> + break;
>> + }
>> +
>> + return ngbe_encode_ptype(ptype);
>> +}
>> +
>> #ifndef DEFAULT_TX_FREE_THRESH
>> #define DEFAULT_TX_FREE_THRESH 32
>> #endif
>> +/* Reset transmit descriptors after they have been used */
>> +static inline int
>> +ngbe_xmit_cleanup(struct ngbe_tx_queue *txq)
>> +{
>> + struct ngbe_tx_entry *sw_ring = txq->sw_ring;
>> + volatile struct ngbe_tx_desc *txr = txq->tx_ring;
>> + uint16_t last_desc_cleaned = txq->last_desc_cleaned;
>> + uint16_t nb_tx_desc = txq->nb_tx_desc;
>> + uint16_t desc_to_clean_to;
>> + uint16_t nb_tx_to_clean;
>> + uint32_t status;
>> +
>> + /* Determine the last descriptor needing to be cleaned */
>> + desc_to_clean_to = (uint16_t)(last_desc_cleaned +
>> txq->tx_free_thresh);
>> + if (desc_to_clean_to >= nb_tx_desc)
>> + desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
>> +
>> + /* Check to make sure the last descriptor to clean is done */
>> + desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
>> + status = txr[desc_to_clean_to].dw3;
>> + if (!(status & rte_cpu_to_le_32(NGBE_TXD_DD))) {
>> + PMD_TX_LOG(DEBUG,
>> + "TX descriptor %4u is not done"
>> + "(port=%d queue=%d)",
>> + desc_to_clean_to,
>> + txq->port_id, txq->queue_id);
>> + if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
>> + ngbe_set32_masked(txq->tdc_reg_addr,
>> + NGBE_TXCFG_FLUSH, NGBE_TXCFG_FLUSH);
>> + /* Failed to clean any descriptors, better luck next time */
>> + return -(1);
>> + }
>> +
>> + /* Figure out how many descriptors will be cleaned */
>> + if (last_desc_cleaned > desc_to_clean_to)
>> + nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
>> + desc_to_clean_to);
>> + else
>> + nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
>> + last_desc_cleaned);
>> +
>> + PMD_TX_LOG(DEBUG,
>> + "Cleaning %4u TX descriptors: %4u to %4u "
>> + "(port=%d queue=%d)",
>> + nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
>> + txq->port_id, txq->queue_id);
>> +
>> + /*
>> + * The last descriptor to clean is done, so that means all the
>> + * descriptors from the last descriptor that was cleaned
>> + * up to the last descriptor with the RS bit set
>> + * are done. Only reset the threshold descriptor.
>> + */
>> + txr[desc_to_clean_to].dw3 = 0;
>> +
>> + /* Update the txq to reflect the last descriptor that was cleaned */
>> + txq->last_desc_cleaned = desc_to_clean_to;
>> + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
>> +
>> + /* No Error */
>> + return 0;
>> +}
>> +
>> +uint16_t
>> +ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
>> + uint16_t nb_pkts)
>> +{
>> + struct ngbe_tx_queue *txq;
>> + struct ngbe_tx_entry *sw_ring;
>> + struct ngbe_tx_entry *txe, *txn;
>> + volatile struct ngbe_tx_desc *txr;
>> + volatile struct ngbe_tx_desc *txd;
>> + struct rte_mbuf *tx_pkt;
>> + struct rte_mbuf *m_seg;
>> + uint64_t buf_dma_addr;
>> + uint32_t olinfo_status;
>> + uint32_t cmd_type_len;
>> + uint32_t pkt_len;
>> + uint16_t slen;
>> + uint64_t ol_flags;
>> + uint16_t tx_id;
>> + uint16_t tx_last;
>> + uint16_t nb_tx;
>> + uint16_t nb_used;
>> + uint64_t tx_ol_req;
>> + uint32_t ctx = 0;
>> + uint32_t new_ctx;
>> + union ngbe_tx_offload tx_offload;
>> +
>> + tx_offload.data[0] = 0;
>> + tx_offload.data[1] = 0;
>> + txq = tx_queue;
>> + sw_ring = txq->sw_ring;
>> + txr = txq->tx_ring;
>> + tx_id = txq->tx_tail;
>> + txe = &sw_ring[tx_id];
>> +
>> + /* Determine if the descriptor ring needs to be cleaned. */
>> + if (txq->nb_tx_free < txq->tx_free_thresh)
>> + ngbe_xmit_cleanup(txq);
>> +
>> + rte_prefetch0(&txe->mbuf->pool);
>> +
>> + /* TX loop */
>> + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
>> + new_ctx = 0;
>> + tx_pkt = *tx_pkts++;
>> + pkt_len = tx_pkt->pkt_len;
>> +
>> + /*
>> + * Determine how many (if any) context descriptors
>> + * are needed for offload functionality.
>> + */
>> + ol_flags = tx_pkt->ol_flags;
>> +
>> + /* If hardware offload required */
>> + tx_ol_req = ol_flags & NGBE_TX_OFFLOAD_MASK;
>> + if (tx_ol_req) {
>> + tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req,
>> + tx_pkt->packet_type);
>> + tx_offload.l2_len = tx_pkt->l2_len;
>> + tx_offload.l3_len = tx_pkt->l3_len;
>> + tx_offload.l4_len = tx_pkt->l4_len;
>> + tx_offload.vlan_tci = tx_pkt->vlan_tci;
>> + tx_offload.tso_segsz = tx_pkt->tso_segsz;
>> + tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
>> + tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
>> + tx_offload.outer_tun_len = 0;
>> +
>> +
>> + /* If new context need be built or reuse the exist ctx*/
>> + ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
>> + /* Only allocate context descriptor if required */
>> + new_ctx = (ctx == NGBE_CTX_NUM);
>> + ctx = txq->ctx_curr;
>> + }
>> +
>> + /*
>> + * Keep track of how many descriptors are used this loop
>> + * This will always be the number of segments + the number of
>> + * Context descriptors required to transmit the packet
>> + */
>> + nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
>> +
>> + /*
>> + * The number of descriptors that must be allocated for a
>> + * packet is the number of segments of that packet, plus 1
>> + * Context Descriptor for the hardware offload, if any.
>> + * Determine the last TX descriptor to allocate in the TX ring
>> + * for the packet, starting from the current position (tx_id)
>> + * in the ring.
>> + */
>> + tx_last = (uint16_t)(tx_id + nb_used - 1);
>> +
>> + /* Circular ring */
>> + if (tx_last >= txq->nb_tx_desc)
>> + tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
>> +
>> + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
>> + " tx_first=%u tx_last=%u",
>> + (uint16_t)txq->port_id,
>> + (uint16_t)txq->queue_id,
>> + (uint32_t)pkt_len,
>> + (uint16_t)tx_id,
>> + (uint16_t)tx_last);
>> +
>> + /*
>> + * Make sure there are enough TX descriptors available to
>> + * transmit the entire packet.
>> + * nb_used better be less than or equal to txq->tx_free_thresh
>> + */
>> + if (nb_used > txq->nb_tx_free) {
>> + PMD_TX_LOG(DEBUG,
>> + "Not enough free TX descriptors "
>> + "nb_used=%4u nb_free=%4u "
>> + "(port=%d queue=%d)",
>> + nb_used, txq->nb_tx_free,
>> + txq->port_id, txq->queue_id);
>> +
>> + if (ngbe_xmit_cleanup(txq) != 0) {
>> + /* Could not clean any descriptors */
>> + if (nb_tx == 0)
>> + return 0;
>> + goto end_of_tx;
>> + }
>> +
>> + /* nb_used better be <= txq->tx_free_thresh */
>> + if (unlikely(nb_used > txq->tx_free_thresh)) {
>> + PMD_TX_LOG(DEBUG,
>> + "The number of descriptors needed to "
>> + "transmit the packet exceeds the "
>> + "RS bit threshold. This will impact "
>> + "performance."
>> + "nb_used=%4u nb_free=%4u "
>> + "tx_free_thresh=%4u. "
>> + "(port=%d queue=%d)",
>> + nb_used, txq->nb_tx_free,
>> + txq->tx_free_thresh,
>> + txq->port_id, txq->queue_id);
>> + /*
>> + * Loop here until there are enough TX
>> + * descriptors or until the ring cannot be
>> + * cleaned.
>> + */
>> + while (nb_used > txq->nb_tx_free) {
>> + if (ngbe_xmit_cleanup(txq) != 0) {
>> + /*
>> + * Could not clean any
>> + * descriptors
>> + */
>> + if (nb_tx == 0)
>> + return 0;
>> + goto end_of_tx;
>> + }
>> + }
>> + }
>> + }
>> +
>> + /*
>> + * By now there are enough free TX descriptors to transmit
>> + * the packet.
>> + */
>> +
>> + /*
>> + * Set common flags of all TX Data Descriptors.
>> + *
>> + * The following bits must be set in the first Data Descriptor
>> + * and are ignored in the other ones:
>> + * - NGBE_TXD_FCS
>> + *
>> + * The following bits must only be set in the last Data
>> + * Descriptor:
>> + * - NGBE_TXD_EOP
>> + */
>> + cmd_type_len = NGBE_TXD_FCS;
>> +
>> + olinfo_status = 0;
>> + if (tx_ol_req) {
>> + if (ol_flags & PKT_TX_TCP_SEG) {
>> + /* when TSO is on, paylen in descriptor is the
>> + * not the packet len but the tcp payload len
>> + */
>> + pkt_len -= (tx_offload.l2_len +
>> + tx_offload.l3_len + tx_offload.l4_len);
>> + pkt_len -=
>> + (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK)
>> + ? tx_offload.outer_l2_len +
>> + tx_offload.outer_l3_len : 0;
>> + }
>> +
>> + /*
>> + * Setup the TX Advanced Context Descriptor if required
>> + */
>> + if (new_ctx) {
>> + volatile struct ngbe_tx_ctx_desc *ctx_txd;
>> +
>> + ctx_txd = (volatile struct ngbe_tx_ctx_desc *)
>> + &txr[tx_id];
>> +
>> + txn = &sw_ring[txe->next_id];
>> + rte_prefetch0(&txn->mbuf->pool);
>> +
>> + if (txe->mbuf != NULL) {
>> + rte_pktmbuf_free_seg(txe->mbuf);
>> + txe->mbuf = NULL;
>> + }
>> +
>> + ngbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
>> + tx_offload,
>> + rte_security_dynfield(tx_pkt));
>> +
>> + txe->last_id = tx_last;
>> + tx_id = txe->next_id;
>> + txe = txn;
>> + }
>> +
>> + /*
>> + * Setup the TX Advanced Data Descriptor,
>> + * This path will go through
>> + * whatever new/reuse the context descriptor
>> + */
>> + cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
>> + olinfo_status |=
>> + tx_desc_cksum_flags_to_olinfo(ol_flags);
>> + olinfo_status |= NGBE_TXD_IDX(ctx);
>> + }
>> +
>> + olinfo_status |= NGBE_TXD_PAYLEN(pkt_len);
>> +
>> + m_seg = tx_pkt;
>> + do {
>> + txd = &txr[tx_id];
>> + txn = &sw_ring[txe->next_id];
>> + rte_prefetch0(&txn->mbuf->pool);
>> +
>> + if (txe->mbuf != NULL)
>> + rte_pktmbuf_free_seg(txe->mbuf);
>> + txe->mbuf = m_seg;
>> +
>> + /*
>> + * Set up Transmit Data Descriptor.
>> + */
>> + slen = m_seg->data_len;
>> + buf_dma_addr = rte_mbuf_data_iova(m_seg);
>> + txd->qw0 = rte_cpu_to_le_64(buf_dma_addr);
>> + txd->dw2 = rte_cpu_to_le_32(cmd_type_len | slen);
>> + txd->dw3 = rte_cpu_to_le_32(olinfo_status);
>> + txe->last_id = tx_last;
>> + tx_id = txe->next_id;
>> + txe = txn;
>> + m_seg = m_seg->next;
>> + } while (m_seg != NULL);
>> +
>> + /*
>> + * The last packet data descriptor needs End Of Packet (EOP)
>> + */
>> + cmd_type_len |= NGBE_TXD_EOP;
>> + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
>> +
>> + txd->dw2 |= rte_cpu_to_le_32(cmd_type_len);
>> + }
>> +
>> +end_of_tx:
>> +
>> + rte_wmb();
>> +
>> + /*
>> + * Set the Transmit Descriptor Tail (TDT)
>> + */
>> + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
>> + (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
>> + (uint16_t)tx_id, (uint16_t)nb_tx);
>> + ngbe_set32_relaxed(txq->tdt_reg_addr, tx_id);
>> + txq->tx_tail = tx_id;
>> +
>> + return nb_tx;
>> +}
>> +
>> /*********************************************************************
>> *
>> * RX functions
>> @@ -1123,6 +1734,31 @@ static const struct ngbe_txq_ops def_txq_ops = {
>> .reset = ngbe_reset_tx_queue,
>> };
>> +/* Takes an ethdev and a queue and sets up the tx function to be used
>> based on
>> + * the queue parameters. Used in tx_queue_setup by primary process
>> and then
>> + * in dev_init by secondary process when attaching to an existing
>> ethdev.
>> + */
>> +void __rte_cold
>> +ngbe_set_tx_function(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq)
>> +{
>> + /* Use a simple Tx queue (no offloads, no multi segs) if possible */
>> + if (txq->offloads == 0 &&
>> + txq->tx_free_thresh >= RTE_PMD_NGBE_TX_MAX_BURST) {
>> + PMD_INIT_LOG(DEBUG, "Using simple tx code path");
>> + dev->tx_pkt_burst = ngbe_xmit_pkts_simple;
>> + } else {
>> + PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
>> + PMD_INIT_LOG(DEBUG,
>> + " - offloads = 0x%" PRIx64,
>> + txq->offloads);
>> + PMD_INIT_LOG(DEBUG,
>> + " - tx_free_thresh = %lu
>> [RTE_PMD_NGBE_TX_MAX_BURST=%lu]",
>> + (unsigned long)txq->tx_free_thresh,
>> + (unsigned long)RTE_PMD_NGBE_TX_MAX_BURST);
>> + dev->tx_pkt_burst = ngbe_xmit_pkts;
>> + }
>> +}
>> +
>> uint64_t
>> ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
>> {
>> @@ -1262,6 +1898,9 @@ ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
>> PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
>> txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
>> + /* set up scalar TX function as appropriate */
>> + ngbe_set_tx_function(dev, txq);
>> +
>> txq->ops->reset(txq);
>> dev->data->tx_queues[queue_idx] = txq;
>> diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h
>> index 4b8596b24a..2cb98e2497 100644
>> --- a/drivers/net/ngbe/ngbe_rxtx.h
>> +++ b/drivers/net/ngbe/ngbe_rxtx.h
>> @@ -135,8 +135,35 @@ struct ngbe_tx_ctx_desc {
>> __le32 dw3; /* w.mss_l4len_idx */
>> };
>> +/* @ngbe_tx_ctx_desc.dw0 */
>> +#define NGBE_TXD_IPLEN(v) LS(v, 0, 0x1FF) /* ip/fcoe header
>> end */
>> +#define NGBE_TXD_MACLEN(v) LS(v, 9, 0x7F) /* desc mac len */
>> +#define NGBE_TXD_VLAN(v) LS(v, 16, 0xFFFF) /* vlan tag */
>> +
>> +/* @ngbe_tx_ctx_desc.dw1 */
>> +/*** bit 0-31, when NGBE_TXD_DTYP_FCOE=0 ***/
>> +#define NGBE_TXD_IPSEC_SAIDX(v) LS(v, 0, 0x3FF) /* ipsec SA index */
>> +#define NGBE_TXD_ETYPE(v) LS(v, 11, 0x1) /* tunnel type */
>> +#define NGBE_TXD_ETYPE_UDP LS(0, 11, 0x1)
>> +#define NGBE_TXD_ETYPE_GRE LS(1, 11, 0x1)
>> +#define NGBE_TXD_EIPLEN(v) LS(v, 12, 0x7F) /* tunnel ip header */
>> +#define NGBE_TXD_DTYP_FCOE MS(16, 0x1) /* FCoE/IP descriptor */
>> +#define NGBE_TXD_ETUNLEN(v) LS(v, 21, 0xFF) /* tunnel header */
>> +#define NGBE_TXD_DECTTL(v) LS(v, 29, 0xF) /* decrease ip TTL */
>> +
>> +/* @ngbe_tx_ctx_desc.dw2 */
>> +#define NGBE_TXD_IPSEC_ESPLEN(v) LS(v, 1, 0x1FF) /* ipsec ESP length */
>> +#define NGBE_TXD_SNAP MS(10, 0x1) /* SNAP indication */
>> +#define NGBE_TXD_TPID_SEL(v) LS(v, 11, 0x7) /* vlan tag index */
>> +#define NGBE_TXD_IPSEC_ESP MS(14, 0x1) /* ipsec type: esp=1
>> ah=0 */
>> +#define NGBE_TXD_IPSEC_ESPENC MS(15, 0x1) /* ESP encrypt */
>> +#define NGBE_TXD_CTXT MS(20, 0x1) /* context descriptor */
>> +#define NGBE_TXD_PTID(v) LS(v, 24, 0xFF) /* packet type */
>> /* @ngbe_tx_ctx_desc.dw3 */
>> #define NGBE_TXD_DD MS(0, 0x1) /* descriptor done */
>> +#define NGBE_TXD_IDX(v) LS(v, 4, 0x1) /* ctxt desc index */
>> +#define NGBE_TXD_L4LEN(v) LS(v, 8, 0xFF) /* l4 header length */
>> +#define NGBE_TXD_MSS(v) LS(v, 16, 0xFFFF) /* l4 MSS */
>> /**
>> * Transmit Data Descriptor (NGBE_TXD_TYP=DATA)
>> @@ -250,11 +277,34 @@ enum ngbe_ctx_num {
>> NGBE_CTX_NUM = 2, /**< CTX NUMBER */
>> };
>> +/** Offload features */
>> +union ngbe_tx_offload {
>> + uint64_t data[2];
>> + struct {
>> + uint64_t ptid:8; /**< Packet Type Identifier. */
>> + uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
>> + uint64_t l3_len:9; /**< L3 (IP) Header Length. */
>> + uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
>> + uint64_t tso_segsz:16; /**< TCP TSO segment size */
>> + uint64_t vlan_tci:16;
>> + /**< VLAN Tag Control Identifier (CPU order). */
>> +
>> + /* fields for TX offloading of tunnels */
>> + uint64_t outer_tun_len:8; /**< Outer TUN (Tunnel) Hdr Length. */
>> + uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */
>> + uint64_t outer_l3_len:16; /**< Outer L3 (IP) Hdr Length. */
>> + };
>> +};
>> +
>> /**
>> * Structure to check if new context need be built
>> */
>> struct ngbe_ctx_info {
>> uint64_t flags; /**< ol_flags for context build. */
>> + /**< tx offload: vlan, tso, l2-l3-l4 lengths. */
>> + union ngbe_tx_offload tx_offload;
>> + /** compare mask for tx offload. */
>> + union ngbe_tx_offload tx_offload_mask;
>> };
>> /**
>> @@ -298,6 +348,12 @@ struct ngbe_txq_ops {
>> void (*reset)(struct ngbe_tx_queue *txq);
>> };
>> +/* Takes an ethdev and a queue and sets up the tx function to be used
>> based on
>> + * the queue parameters. Used in tx_queue_setup by primary process
>> and then
>> + * in dev_init by secondary process when attaching to an existing
>> ethdev.
>> + */
>> +void ngbe_set_tx_function(struct rte_eth_dev *dev, struct
>> ngbe_tx_queue *txq);
>> +
>> void ngbe_set_rx_function(struct rte_eth_dev *dev);
>> uint64_t ngbe_get_tx_port_offloads(struct rte_eth_dev *dev);
>>
@@ -9,10 +9,13 @@ Link status = Y
Link status event = Y
Jumbo frame = Y
Scattered Rx = Y
+TSO = Y
CRC offload = P
VLAN offload = P
L3 checksum offload = P
L4 checksum offload = P
+Inner L3 checksum = P
+Inner L4 checksum = P
Packet type parsing = Y
Multiprocess aware = Y
Linux = Y
@@ -12,9 +12,10 @@ Features
- Packet type information
- Checksum offload
+- TSO offload
- Jumbo frames
- Link state information
-- Scattered and gather for RX
+- Scattered and gather for TX and RX
Prerequisites
-------------
@@ -16,5 +16,7 @@ sources = files(
'ngbe_rxtx.c',
)
+deps += ['security']
+
includes += include_directories('base')
@@ -110,7 +110,7 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
eth_dev->dev_ops = &ngbe_eth_dev_ops;
eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
- eth_dev->tx_pkt_burst = &ngbe_xmit_pkts_simple;
+ eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
/*
* For secondary processes, we don't initialise any further as primary
@@ -118,6 +118,20 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
* RX and TX function.
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ struct ngbe_tx_queue *txq;
+ /* TX queue function in primary, set by last queue initialized
+ * Tx queue may not initialized by primary process
+ */
+ if (eth_dev->data->tx_queues) {
+ uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
+ txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
+ ngbe_set_tx_function(eth_dev, txq);
+ } else {
+ /* Use default TX function if we get here */
+ PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
+ "Using default TX function.");
+ }
+
ngbe_set_rx_function(eth_dev);
return 0;
@@ -86,6 +86,9 @@ uint16_t ngbe_recv_pkts_sc_single_alloc(void *rx_queue,
uint16_t ngbe_recv_pkts_sc_bulk_alloc(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
uint16_t ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -8,6 +8,7 @@
#include <stdint.h>
#include <rte_ethdev.h>
#include <ethdev_driver.h>
+#include <rte_security_driver.h>
#include <rte_malloc.h>
#include "ngbe_logs.h"
@@ -15,6 +16,18 @@
#include "ngbe_ethdev.h"
#include "ngbe_rxtx.h"
+/* Bit Mask to indicate what bits required for building TX context */
+static const u64 NGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
+ PKT_TX_OUTER_IPV6 |
+ PKT_TX_OUTER_IPV4 |
+ PKT_TX_IPV6 |
+ PKT_TX_IPV4 |
+ PKT_TX_VLAN_PKT |
+ PKT_TX_L4_MASK |
+ PKT_TX_TCP_SEG |
+ PKT_TX_TUNNEL_MASK |
+ PKT_TX_OUTER_IP_CKSUM);
+
/*
* Prefetch a cache line into all cache levels.
*/
@@ -248,10 +261,608 @@ ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
+static inline void
+ngbe_set_xmit_ctx(struct ngbe_tx_queue *txq,
+ volatile struct ngbe_tx_ctx_desc *ctx_txd,
+ uint64_t ol_flags, union ngbe_tx_offload tx_offload,
+ __rte_unused uint64_t *mdata)
+{
+ union ngbe_tx_offload tx_offload_mask;
+ uint32_t type_tucmd_mlhl;
+ uint32_t mss_l4len_idx;
+ uint32_t ctx_idx;
+ uint32_t vlan_macip_lens;
+ uint32_t tunnel_seed;
+
+ ctx_idx = txq->ctx_curr;
+ tx_offload_mask.data[0] = 0;
+ tx_offload_mask.data[1] = 0;
+
+ /* Specify which HW CTX to upload. */
+ mss_l4len_idx = NGBE_TXD_IDX(ctx_idx);
+ type_tucmd_mlhl = NGBE_TXD_CTXT;
+
+ tx_offload_mask.ptid |= ~0;
+ type_tucmd_mlhl |= NGBE_TXD_PTID(tx_offload.ptid);
+
+ /* check if TCP segmentation required for this packet */
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ tx_offload_mask.l4_len |= ~0;
+ tx_offload_mask.tso_segsz |= ~0;
+ mss_l4len_idx |= NGBE_TXD_MSS(tx_offload.tso_segsz);
+ mss_l4len_idx |= NGBE_TXD_L4LEN(tx_offload.l4_len);
+ } else { /* no TSO, check if hardware checksum is needed */
+ if (ol_flags & PKT_TX_IP_CKSUM) {
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ }
+
+ switch (ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ mss_l4len_idx |=
+ NGBE_TXD_L4LEN(sizeof(struct rte_udp_hdr));
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ break;
+ case PKT_TX_TCP_CKSUM:
+ mss_l4len_idx |=
+ NGBE_TXD_L4LEN(sizeof(struct rte_tcp_hdr));
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ mss_l4len_idx |=
+ NGBE_TXD_L4LEN(sizeof(struct rte_sctp_hdr));
+ tx_offload_mask.l2_len |= ~0;
+ tx_offload_mask.l3_len |= ~0;
+ break;
+ default:
+ break;
+ }
+ }
+
+ vlan_macip_lens = NGBE_TXD_IPLEN(tx_offload.l3_len >> 1);
+
+ if (ol_flags & PKT_TX_TUNNEL_MASK) {
+ tx_offload_mask.outer_tun_len |= ~0;
+ tx_offload_mask.outer_l2_len |= ~0;
+ tx_offload_mask.outer_l3_len |= ~0;
+ tx_offload_mask.l2_len |= ~0;
+ tunnel_seed = NGBE_TXD_ETUNLEN(tx_offload.outer_tun_len >> 1);
+ tunnel_seed |= NGBE_TXD_EIPLEN(tx_offload.outer_l3_len >> 2);
+
+ switch (ol_flags & PKT_TX_TUNNEL_MASK) {
+ case PKT_TX_TUNNEL_IPIP:
+ /* for non UDP / GRE tunneling, set to 0b */
+ break;
+ default:
+ PMD_TX_LOG(ERR, "Tunnel type not supported");
+ return;
+ }
+ vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.outer_l2_len);
+ } else {
+ tunnel_seed = 0;
+ vlan_macip_lens |= NGBE_TXD_MACLEN(tx_offload.l2_len);
+ }
+
+ if (ol_flags & PKT_TX_VLAN_PKT) {
+ tx_offload_mask.vlan_tci |= ~0;
+ vlan_macip_lens |= NGBE_TXD_VLAN(tx_offload.vlan_tci);
+ }
+
+ txq->ctx_cache[ctx_idx].flags = ol_flags;
+ txq->ctx_cache[ctx_idx].tx_offload.data[0] =
+ tx_offload_mask.data[0] & tx_offload.data[0];
+ txq->ctx_cache[ctx_idx].tx_offload.data[1] =
+ tx_offload_mask.data[1] & tx_offload.data[1];
+ txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
+
+ ctx_txd->dw0 = rte_cpu_to_le_32(vlan_macip_lens);
+ ctx_txd->dw1 = rte_cpu_to_le_32(tunnel_seed);
+ ctx_txd->dw2 = rte_cpu_to_le_32(type_tucmd_mlhl);
+ ctx_txd->dw3 = rte_cpu_to_le_32(mss_l4len_idx);
+}
+
+/*
+ * Check which hardware context can be used. Use the existing match
+ * or create a new context descriptor.
+ */
+static inline uint32_t
+what_ctx_update(struct ngbe_tx_queue *txq, uint64_t flags,
+ union ngbe_tx_offload tx_offload)
+{
+ /* If match with the current used context */
+ if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+ & tx_offload.data[0])) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+ & tx_offload.data[1]))))
+ return txq->ctx_curr;
+
+ /* What if match with the next context */
+ txq->ctx_curr ^= 1;
+ if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+ & tx_offload.data[0])) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+ & tx_offload.data[1]))))
+ return txq->ctx_curr;
+
+ /* Mismatch, use the previous context */
+ return NGBE_CTX_NUM;
+}
+
+static inline uint32_t
+tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
+{
+ uint32_t tmp = 0;
+
+ if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM) {
+ tmp |= NGBE_TXD_CC;
+ tmp |= NGBE_TXD_L4CS;
+ }
+ if (ol_flags & PKT_TX_IP_CKSUM) {
+ tmp |= NGBE_TXD_CC;
+ tmp |= NGBE_TXD_IPCS;
+ }
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+ tmp |= NGBE_TXD_CC;
+ tmp |= NGBE_TXD_EIPCS;
+ }
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ tmp |= NGBE_TXD_CC;
+ /* implies IPv4 cksum */
+ if (ol_flags & PKT_TX_IPV4)
+ tmp |= NGBE_TXD_IPCS;
+ tmp |= NGBE_TXD_L4CS;
+ }
+ if (ol_flags & PKT_TX_VLAN_PKT)
+ tmp |= NGBE_TXD_CC;
+
+ return tmp;
+}
+
+static inline uint32_t
+tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
+{
+ uint32_t cmdtype = 0;
+
+ if (ol_flags & PKT_TX_VLAN_PKT)
+ cmdtype |= NGBE_TXD_VLE;
+ if (ol_flags & PKT_TX_TCP_SEG)
+ cmdtype |= NGBE_TXD_TSE;
+ if (ol_flags & PKT_TX_MACSEC)
+ cmdtype |= NGBE_TXD_LINKSEC;
+ return cmdtype;
+}
+
+static inline uint8_t
+tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
+{
+ bool tun;
+
+ if (ptype)
+ return ngbe_encode_ptype(ptype);
+
+ /* Only support flags in NGBE_TX_OFFLOAD_MASK */
+ tun = !!(oflags & PKT_TX_TUNNEL_MASK);
+
+ /* L2 level */
+ ptype = RTE_PTYPE_L2_ETHER;
+ if (oflags & PKT_TX_VLAN)
+ ptype |= RTE_PTYPE_L2_ETHER_VLAN;
+
+ /* L3 level */
+ if (oflags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM))
+ ptype |= RTE_PTYPE_L3_IPV4;
+ else if (oflags & (PKT_TX_OUTER_IPV6))
+ ptype |= RTE_PTYPE_L3_IPV6;
+
+ if (oflags & (PKT_TX_IPV4 | PKT_TX_IP_CKSUM))
+ ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4);
+ else if (oflags & (PKT_TX_IPV6))
+ ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6);
+
+ /* L4 level */
+ switch (oflags & (PKT_TX_L4_MASK)) {
+ case PKT_TX_TCP_CKSUM:
+ ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
+ break;
+ case PKT_TX_UDP_CKSUM:
+ ptype |= (tun ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP);
+ break;
+ case PKT_TX_SCTP_CKSUM:
+ ptype |= (tun ? RTE_PTYPE_INNER_L4_SCTP : RTE_PTYPE_L4_SCTP);
+ break;
+ }
+
+ if (oflags & PKT_TX_TCP_SEG)
+ ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
+
+ /* Tunnel */
+ switch (oflags & PKT_TX_TUNNEL_MASK) {
+ case PKT_TX_TUNNEL_VXLAN:
+ ptype |= RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_TUNNEL_VXLAN;
+ ptype |= RTE_PTYPE_INNER_L2_ETHER;
+ break;
+ case PKT_TX_TUNNEL_GRE:
+ ptype |= RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_TUNNEL_GRE;
+ ptype |= RTE_PTYPE_INNER_L2_ETHER;
+ break;
+ case PKT_TX_TUNNEL_GENEVE:
+ ptype |= RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_TUNNEL_GENEVE;
+ ptype |= RTE_PTYPE_INNER_L2_ETHER;
+ break;
+ case PKT_TX_TUNNEL_VXLAN_GPE:
+ ptype |= RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_TUNNEL_VXLAN_GPE;
+ break;
+ case PKT_TX_TUNNEL_IPIP:
+ case PKT_TX_TUNNEL_IP:
+ ptype |= RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 |
+ RTE_PTYPE_TUNNEL_IP;
+ break;
+ }
+
+ return ngbe_encode_ptype(ptype);
+}
+
#ifndef DEFAULT_TX_FREE_THRESH
#define DEFAULT_TX_FREE_THRESH 32
#endif
+/* Reset transmit descriptors after they have been used */
+static inline int
+ngbe_xmit_cleanup(struct ngbe_tx_queue *txq)
+{
+ struct ngbe_tx_entry *sw_ring = txq->sw_ring;
+ volatile struct ngbe_tx_desc *txr = txq->tx_ring;
+ uint16_t last_desc_cleaned = txq->last_desc_cleaned;
+ uint16_t nb_tx_desc = txq->nb_tx_desc;
+ uint16_t desc_to_clean_to;
+ uint16_t nb_tx_to_clean;
+ uint32_t status;
+
+ /* Determine the last descriptor needing to be cleaned */
+ desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_free_thresh);
+ if (desc_to_clean_to >= nb_tx_desc)
+ desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
+
+ /* Check to make sure the last descriptor to clean is done */
+ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
+ status = txr[desc_to_clean_to].dw3;
+ if (!(status & rte_cpu_to_le_32(NGBE_TXD_DD))) {
+ PMD_TX_LOG(DEBUG,
+ "TX descriptor %4u is not done"
+ "(port=%d queue=%d)",
+ desc_to_clean_to,
+ txq->port_id, txq->queue_id);
+ if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
+ ngbe_set32_masked(txq->tdc_reg_addr,
+ NGBE_TXCFG_FLUSH, NGBE_TXCFG_FLUSH);
+ /* Failed to clean any descriptors, better luck next time */
+ return -(1);
+ }
+
+ /* Figure out how many descriptors will be cleaned */
+ if (last_desc_cleaned > desc_to_clean_to)
+ nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
+ desc_to_clean_to);
+ else
+ nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
+ last_desc_cleaned);
+
+ PMD_TX_LOG(DEBUG,
+ "Cleaning %4u TX descriptors: %4u to %4u "
+ "(port=%d queue=%d)",
+ nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
+ txq->port_id, txq->queue_id);
+
+ /*
+ * The last descriptor to clean is done, so that means all the
+ * descriptors from the last descriptor that was cleaned
+ * up to the last descriptor with the RS bit set
+ * are done. Only reset the threshold descriptor.
+ */
+ txr[desc_to_clean_to].dw3 = 0;
+
+ /* Update the txq to reflect the last descriptor that was cleaned */
+ txq->last_desc_cleaned = desc_to_clean_to;
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
+
+ /* No Error */
+ return 0;
+}
+
+uint16_t
+ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ngbe_tx_queue *txq;
+ struct ngbe_tx_entry *sw_ring;
+ struct ngbe_tx_entry *txe, *txn;
+ volatile struct ngbe_tx_desc *txr;
+ volatile struct ngbe_tx_desc *txd;
+ struct rte_mbuf *tx_pkt;
+ struct rte_mbuf *m_seg;
+ uint64_t buf_dma_addr;
+ uint32_t olinfo_status;
+ uint32_t cmd_type_len;
+ uint32_t pkt_len;
+ uint16_t slen;
+ uint64_t ol_flags;
+ uint16_t tx_id;
+ uint16_t tx_last;
+ uint16_t nb_tx;
+ uint16_t nb_used;
+ uint64_t tx_ol_req;
+ uint32_t ctx = 0;
+ uint32_t new_ctx;
+ union ngbe_tx_offload tx_offload;
+
+ tx_offload.data[0] = 0;
+ tx_offload.data[1] = 0;
+ txq = tx_queue;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+ tx_id = txq->tx_tail;
+ txe = &sw_ring[tx_id];
+
+ /* Determine if the descriptor ring needs to be cleaned. */
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ ngbe_xmit_cleanup(txq);
+
+ rte_prefetch0(&txe->mbuf->pool);
+
+ /* TX loop */
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ new_ctx = 0;
+ tx_pkt = *tx_pkts++;
+ pkt_len = tx_pkt->pkt_len;
+
+ /*
+ * Determine how many (if any) context descriptors
+ * are needed for offload functionality.
+ */
+ ol_flags = tx_pkt->ol_flags;
+
+ /* If hardware offload required */
+ tx_ol_req = ol_flags & NGBE_TX_OFFLOAD_MASK;
+ if (tx_ol_req) {
+ tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req,
+ tx_pkt->packet_type);
+ tx_offload.l2_len = tx_pkt->l2_len;
+ tx_offload.l3_len = tx_pkt->l3_len;
+ tx_offload.l4_len = tx_pkt->l4_len;
+ tx_offload.vlan_tci = tx_pkt->vlan_tci;
+ tx_offload.tso_segsz = tx_pkt->tso_segsz;
+ tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
+ tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
+ tx_offload.outer_tun_len = 0;
+
+
+ /* If new context need be built or reuse the exist ctx*/
+ ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
+ /* Only allocate context descriptor if required */
+ new_ctx = (ctx == NGBE_CTX_NUM);
+ ctx = txq->ctx_curr;
+ }
+
+ /*
+ * Keep track of how many descriptors are used this loop
+ * This will always be the number of segments + the number of
+ * Context descriptors required to transmit the packet
+ */
+ nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
+
+ /*
+ * The number of descriptors that must be allocated for a
+ * packet is the number of segments of that packet, plus 1
+ * Context Descriptor for the hardware offload, if any.
+ * Determine the last TX descriptor to allocate in the TX ring
+ * for the packet, starting from the current position (tx_id)
+ * in the ring.
+ */
+ tx_last = (uint16_t)(tx_id + nb_used - 1);
+
+ /* Circular ring */
+ if (tx_last >= txq->nb_tx_desc)
+ tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
+ " tx_first=%u tx_last=%u",
+ (uint16_t)txq->port_id,
+ (uint16_t)txq->queue_id,
+ (uint32_t)pkt_len,
+ (uint16_t)tx_id,
+ (uint16_t)tx_last);
+
+ /*
+ * Make sure there are enough TX descriptors available to
+ * transmit the entire packet.
+ * nb_used better be less than or equal to txq->tx_free_thresh
+ */
+ if (nb_used > txq->nb_tx_free) {
+ PMD_TX_LOG(DEBUG,
+ "Not enough free TX descriptors "
+ "nb_used=%4u nb_free=%4u "
+ "(port=%d queue=%d)",
+ nb_used, txq->nb_tx_free,
+ txq->port_id, txq->queue_id);
+
+ if (ngbe_xmit_cleanup(txq) != 0) {
+ /* Could not clean any descriptors */
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+
+ /* nb_used better be <= txq->tx_free_thresh */
+ if (unlikely(nb_used > txq->tx_free_thresh)) {
+ PMD_TX_LOG(DEBUG,
+ "The number of descriptors needed to "
+ "transmit the packet exceeds the "
+ "RS bit threshold. This will impact "
+ "performance."
+ "nb_used=%4u nb_free=%4u "
+ "tx_free_thresh=%4u. "
+ "(port=%d queue=%d)",
+ nb_used, txq->nb_tx_free,
+ txq->tx_free_thresh,
+ txq->port_id, txq->queue_id);
+ /*
+ * Loop here until there are enough TX
+ * descriptors or until the ring cannot be
+ * cleaned.
+ */
+ while (nb_used > txq->nb_tx_free) {
+ if (ngbe_xmit_cleanup(txq) != 0) {
+ /*
+ * Could not clean any
+ * descriptors
+ */
+ if (nb_tx == 0)
+ return 0;
+ goto end_of_tx;
+ }
+ }
+ }
+ }
+
+ /*
+ * By now there are enough free TX descriptors to transmit
+ * the packet.
+ */
+
+ /*
+ * Set common flags of all TX Data Descriptors.
+ *
+ * The following bits must be set in the first Data Descriptor
+ * and are ignored in the other ones:
+ * - NGBE_TXD_FCS
+ *
+ * The following bits must only be set in the last Data
+ * Descriptor:
+ * - NGBE_TXD_EOP
+ */
+ cmd_type_len = NGBE_TXD_FCS;
+
+ olinfo_status = 0;
+ if (tx_ol_req) {
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ /* when TSO is on, paylen in descriptor is the
+ * not the packet len but the tcp payload len
+ */
+ pkt_len -= (tx_offload.l2_len +
+ tx_offload.l3_len + tx_offload.l4_len);
+ pkt_len -=
+ (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK)
+ ? tx_offload.outer_l2_len +
+ tx_offload.outer_l3_len : 0;
+ }
+
+ /*
+ * Setup the TX Advanced Context Descriptor if required
+ */
+ if (new_ctx) {
+ volatile struct ngbe_tx_ctx_desc *ctx_txd;
+
+ ctx_txd = (volatile struct ngbe_tx_ctx_desc *)
+ &txr[tx_id];
+
+ txn = &sw_ring[txe->next_id];
+ rte_prefetch0(&txn->mbuf->pool);
+
+ if (txe->mbuf != NULL) {
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = NULL;
+ }
+
+ ngbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
+ tx_offload,
+ rte_security_dynfield(tx_pkt));
+
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ }
+
+ /*
+ * Setup the TX Advanced Data Descriptor,
+ * This path will go through
+ * whatever new/reuse the context descriptor
+ */
+ cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
+ olinfo_status |=
+ tx_desc_cksum_flags_to_olinfo(ol_flags);
+ olinfo_status |= NGBE_TXD_IDX(ctx);
+ }
+
+ olinfo_status |= NGBE_TXD_PAYLEN(pkt_len);
+
+ m_seg = tx_pkt;
+ do {
+ txd = &txr[tx_id];
+ txn = &sw_ring[txe->next_id];
+ rte_prefetch0(&txn->mbuf->pool);
+
+ if (txe->mbuf != NULL)
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = m_seg;
+
+ /*
+ * Set up Transmit Data Descriptor.
+ */
+ slen = m_seg->data_len;
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
+ txd->qw0 = rte_cpu_to_le_64(buf_dma_addr);
+ txd->dw2 = rte_cpu_to_le_32(cmd_type_len | slen);
+ txd->dw3 = rte_cpu_to_le_32(olinfo_status);
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ m_seg = m_seg->next;
+ } while (m_seg != NULL);
+
+ /*
+ * The last packet data descriptor needs End Of Packet (EOP)
+ */
+ cmd_type_len |= NGBE_TXD_EOP;
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
+
+ txd->dw2 |= rte_cpu_to_le_32(cmd_type_len);
+ }
+
+end_of_tx:
+
+ rte_wmb();
+
+ /*
+ * Set the Transmit Descriptor Tail (TDT)
+ */
+ PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+ (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
+ (uint16_t)tx_id, (uint16_t)nb_tx);
+ ngbe_set32_relaxed(txq->tdt_reg_addr, tx_id);
+ txq->tx_tail = tx_id;
+
+ return nb_tx;
+}
+
/*********************************************************************
*
* RX functions
@@ -1123,6 +1734,31 @@ static const struct ngbe_txq_ops def_txq_ops = {
.reset = ngbe_reset_tx_queue,
};
+/* Takes an ethdev and a queue and sets up the tx function to be used based on
+ * the queue parameters. Used in tx_queue_setup by primary process and then
+ * in dev_init by secondary process when attaching to an existing ethdev.
+ */
+void __rte_cold
+ngbe_set_tx_function(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq)
+{
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (txq->offloads == 0 &&
+ txq->tx_free_thresh >= RTE_PMD_NGBE_TX_MAX_BURST) {
+ PMD_INIT_LOG(DEBUG, "Using simple tx code path");
+ dev->tx_pkt_burst = ngbe_xmit_pkts_simple;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
+ PMD_INIT_LOG(DEBUG,
+ " - offloads = 0x%" PRIx64,
+ txq->offloads);
+ PMD_INIT_LOG(DEBUG,
+ " - tx_free_thresh = %lu [RTE_PMD_NGBE_TX_MAX_BURST=%lu]",
+ (unsigned long)txq->tx_free_thresh,
+ (unsigned long)RTE_PMD_NGBE_TX_MAX_BURST);
+ dev->tx_pkt_burst = ngbe_xmit_pkts;
+ }
+}
+
uint64_t
ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
{
@@ -1262,6 +1898,9 @@ ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+ /* set up scalar TX function as appropriate */
+ ngbe_set_tx_function(dev, txq);
+
txq->ops->reset(txq);
dev->data->tx_queues[queue_idx] = txq;
@@ -135,8 +135,35 @@ struct ngbe_tx_ctx_desc {
__le32 dw3; /* w.mss_l4len_idx */
};
+/* @ngbe_tx_ctx_desc.dw0 */
+#define NGBE_TXD_IPLEN(v) LS(v, 0, 0x1FF) /* ip/fcoe header end */
+#define NGBE_TXD_MACLEN(v) LS(v, 9, 0x7F) /* desc mac len */
+#define NGBE_TXD_VLAN(v) LS(v, 16, 0xFFFF) /* vlan tag */
+
+/* @ngbe_tx_ctx_desc.dw1 */
+/*** bit 0-31, when NGBE_TXD_DTYP_FCOE=0 ***/
+#define NGBE_TXD_IPSEC_SAIDX(v) LS(v, 0, 0x3FF) /* ipsec SA index */
+#define NGBE_TXD_ETYPE(v) LS(v, 11, 0x1) /* tunnel type */
+#define NGBE_TXD_ETYPE_UDP LS(0, 11, 0x1)
+#define NGBE_TXD_ETYPE_GRE LS(1, 11, 0x1)
+#define NGBE_TXD_EIPLEN(v) LS(v, 12, 0x7F) /* tunnel ip header */
+#define NGBE_TXD_DTYP_FCOE MS(16, 0x1) /* FCoE/IP descriptor */
+#define NGBE_TXD_ETUNLEN(v) LS(v, 21, 0xFF) /* tunnel header */
+#define NGBE_TXD_DECTTL(v) LS(v, 29, 0xF) /* decrease ip TTL */
+
+/* @ngbe_tx_ctx_desc.dw2 */
+#define NGBE_TXD_IPSEC_ESPLEN(v) LS(v, 1, 0x1FF) /* ipsec ESP length */
+#define NGBE_TXD_SNAP MS(10, 0x1) /* SNAP indication */
+#define NGBE_TXD_TPID_SEL(v) LS(v, 11, 0x7) /* vlan tag index */
+#define NGBE_TXD_IPSEC_ESP MS(14, 0x1) /* ipsec type: esp=1 ah=0 */
+#define NGBE_TXD_IPSEC_ESPENC MS(15, 0x1) /* ESP encrypt */
+#define NGBE_TXD_CTXT MS(20, 0x1) /* context descriptor */
+#define NGBE_TXD_PTID(v) LS(v, 24, 0xFF) /* packet type */
/* @ngbe_tx_ctx_desc.dw3 */
#define NGBE_TXD_DD MS(0, 0x1) /* descriptor done */
+#define NGBE_TXD_IDX(v) LS(v, 4, 0x1) /* ctxt desc index */
+#define NGBE_TXD_L4LEN(v) LS(v, 8, 0xFF) /* l4 header length */
+#define NGBE_TXD_MSS(v) LS(v, 16, 0xFFFF) /* l4 MSS */
/**
* Transmit Data Descriptor (NGBE_TXD_TYP=DATA)
@@ -250,11 +277,34 @@ enum ngbe_ctx_num {
NGBE_CTX_NUM = 2, /**< CTX NUMBER */
};
+/** Offload features */
+union ngbe_tx_offload {
+ uint64_t data[2];
+ struct {
+ uint64_t ptid:8; /**< Packet Type Identifier. */
+ uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
+ uint64_t tso_segsz:16; /**< TCP TSO segment size */
+ uint64_t vlan_tci:16;
+ /**< VLAN Tag Control Identifier (CPU order). */
+
+ /* fields for TX offloading of tunnels */
+ uint64_t outer_tun_len:8; /**< Outer TUN (Tunnel) Hdr Length. */
+ uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */
+ uint64_t outer_l3_len:16; /**< Outer L3 (IP) Hdr Length. */
+ };
+};
+
/**
* Structure to check if new context need be built
*/
struct ngbe_ctx_info {
uint64_t flags; /**< ol_flags for context build. */
+ /**< tx offload: vlan, tso, l2-l3-l4 lengths. */
+ union ngbe_tx_offload tx_offload;
+ /** compare mask for tx offload. */
+ union ngbe_tx_offload tx_offload_mask;
};
/**
@@ -298,6 +348,12 @@ struct ngbe_txq_ops {
void (*reset)(struct ngbe_tx_queue *txq);
};
+/* Takes an ethdev and a queue and sets up the tx function to be used based on
+ * the queue parameters. Used in tx_queue_setup by primary process and then
+ * in dev_init by secondary process when attaching to an existing ethdev.
+ */
+void ngbe_set_tx_function(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq);
+
void ngbe_set_rx_function(struct rte_eth_dev *dev);
uint64_t ngbe_get_tx_port_offloads(struct rte_eth_dev *dev);