[1/3] net/igc: code refactoring
Checks
Commit Message
Move related structures for Rx/Tx queue from igc_txrx.c to igc_txrx.h
to make code cleaner and variables used more conveniently.
Signed-off-by: Simei Su <simei.su@intel.com>
---
drivers/net/igc/igc_txrx.c | 118 ---------------------------------------------
drivers/net/igc/igc_txrx.h | 115 +++++++++++++++++++++++++++++++++++++++++++
2 files changed, 115 insertions(+), 118 deletions(-)
Comments
> -----Original Message-----
> From: Su, Simei <simei.su@intel.com>
> Sent: Tuesday, December 20, 2022 11:41 AM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Guo, Junfeng
> <junfeng.guo@intel.com>
> Cc: dev@dpdk.org; Wu, Wenjun1 <wenjun1.wu@intel.com>; Su, Simei
> <simei.su@intel.com>
> Subject: [PATCH 1/3] net/igc: code refactoring
>
> Move related structures for Rx/Tx queue from igc_txrx.c to igc_txrx.h to
> make code cleaner and variables used more conveniently.
Not sure if this is necessary.
If a structure only be used internally, keep it internally should be OK.
Otherwise need to give a reason why to expose it.
>
> Signed-off-by: Simei Su <simei.su@intel.com>
> ---
> drivers/net/igc/igc_txrx.c | 118 ---------------------------------------------
> drivers/net/igc/igc_txrx.h | 115
> +++++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 115 insertions(+), 118 deletions(-)
>
> diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c index
> ffd219b..c462e91 100644
> --- a/drivers/net/igc/igc_txrx.c
> +++ b/drivers/net/igc/igc_txrx.c
> @@ -93,124 +93,6 @@
>
> #define IGC_TX_OFFLOAD_NOTSUP_MASK
> (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK)
>
> -/**
> - * Structure associated with each descriptor of the RX ring of a RX queue.
> - */
> -struct igc_rx_entry {
> - struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
> -};
> -
> -/**
> - * Structure associated with each RX queue.
> - */
> -struct igc_rx_queue {
> - struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring.
> */
> - volatile union igc_adv_rx_desc *rx_ring;
> - /**< RX ring virtual address. */
> - uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
> - volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
> - volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
> - struct igc_rx_entry *sw_ring; /**< address of RX software ring. */
> - struct rte_mbuf *pkt_first_seg; /**< First segment of current packet.
> */
> - struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet.
> */
> - uint16_t nb_rx_desc; /**< number of RX descriptors. */
> - uint16_t rx_tail; /**< current value of RDT register. */
> - uint16_t nb_rx_hold; /**< number of held free RX desc. */
> - uint16_t rx_free_thresh; /**< max free RX desc to hold. */
> - uint16_t queue_id; /**< RX queue index. */
> - uint16_t reg_idx; /**< RX queue register index. */
> - uint16_t port_id; /**< Device port identifier. */
> - uint8_t pthresh; /**< Prefetch threshold register. */
> - uint8_t hthresh; /**< Host threshold register. */
> - uint8_t wthresh; /**< Write-back threshold register. */
> - uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
> - uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
> - uint32_t flags; /**< RX flags. */
> - uint64_t offloads; /**< offloads of
> RTE_ETH_RX_OFFLOAD_* */
> -};
> -
> -/** Offload features */
> -union igc_tx_offload {
> - uint64_t data;
> - struct {
> - uint64_t l3_len:9; /**< L3 (IP) Header Length. */
> - uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
> - uint64_t vlan_tci:16;
> - /**< VLAN Tag Control Identifier(CPU order). */
> - uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
> - uint64_t tso_segsz:16; /**< TCP TSO segment size. */
> - /* uint64_t unused:8; */
> - };
> -};
> -
> -/*
> - * Compare mask for igc_tx_offload.data,
> - * should be in sync with igc_tx_offload layout.
> - */
> -#define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3
> header mask. */
> -#define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan
> mask. */
> -#define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP
> header mask. */
> -#define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO
> segsz mask. */
> -/** Mac + IP + TCP + Mss mask. */
> -#define TX_TSO_CMP_MASK \
> - (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK |
> TX_TSO_MSS_CMP_MASK)
> -
> -/**
> - * Structure to check if new context need be built
> - */
> -struct igc_advctx_info {
> - uint64_t flags; /**< ol_flags related to context build. */
> - /** tx offload: vlan, tso, l2-l3-l4 lengths. */
> - union igc_tx_offload tx_offload;
> - /** compare mask for tx offload. */
> - union igc_tx_offload tx_offload_mask;
> -};
> -
> -/**
> - * Hardware context number
> - */
> -enum {
> - IGC_CTX_0 = 0, /**< CTX0 */
> - IGC_CTX_1 = 1, /**< CTX1 */
> - IGC_CTX_NUM = 2, /**< CTX_NUM */
> -};
> -
> -/**
> - * Structure associated with each descriptor of the TX ring of a TX queue.
> - */
> -struct igc_tx_entry {
> - struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
> - uint16_t next_id; /**< Index of next descriptor in ring. */
> - uint16_t last_id; /**< Index of last scattered descriptor. */
> -};
> -
> -/**
> - * Structure associated with each TX queue.
> - */
> -struct igc_tx_queue {
> - volatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */
> - uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
> - struct igc_tx_entry *sw_ring; /**< virtual address of SW ring. */
> - volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
> - uint32_t txd_type; /**< Device-specific TXD type */
> - uint16_t nb_tx_desc; /**< number of TX descriptors. */
> - uint16_t tx_tail; /**< Current value of TDT register. */
> - uint16_t tx_head;
> - /**< Index of first used TX descriptor. */
> - uint16_t queue_id; /**< TX queue index. */
> - uint16_t reg_idx; /**< TX queue register index. */
> - uint16_t port_id; /**< Device port identifier. */
> - uint8_t pthresh; /**< Prefetch threshold register. */
> - uint8_t hthresh; /**< Host threshold register. */
> - uint8_t wthresh; /**< Write-back threshold register. */
> - uint8_t ctx_curr;
> -
> - /**< Start context position for transmit queue. */
> - struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
> - /**< Hardware context history.*/
> - uint64_t offloads; /**< offloads of
> RTE_ETH_TX_OFFLOAD_* */
> -};
> -
> static inline uint64_t
> rx_desc_statuserr_to_pkt_flags(uint32_t statuserr) { diff --git
> a/drivers/net/igc/igc_txrx.h b/drivers/net/igc/igc_txrx.h index
> 02a0a05..5731761 100644
> --- a/drivers/net/igc/igc_txrx.h
> +++ b/drivers/net/igc/igc_txrx.h
> @@ -11,6 +11,121 @@
> extern "C" {
> #endif
>
> +struct igc_rx_entry {
> + struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor.
> */ };
> +
> +/**
> + * Structure associated with each RX queue.
> + */
> +struct igc_rx_queue {
> + struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring.
> */
> + volatile union igc_adv_rx_desc *rx_ring;
> + /**< RX ring virtual address. */
> + uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
> + volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
> + volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
> + struct igc_rx_entry *sw_ring; /**< address of RX software ring. */
> + struct rte_mbuf *pkt_first_seg; /**< First segment of current packet.
> */
> + struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet.
> */
> + uint16_t nb_rx_desc; /**< number of RX descriptors. */
> + uint16_t rx_tail; /**< current value of RDT register. */
> + uint16_t nb_rx_hold; /**< number of held free RX desc. */
> + uint16_t rx_free_thresh; /**< max free RX desc to hold. */
> + uint16_t queue_id; /**< RX queue index. */
> + uint16_t reg_idx; /**< RX queue register index. */
> + uint16_t port_id; /**< Device port identifier. */
> + uint8_t pthresh; /**< Prefetch threshold register. */
> + uint8_t hthresh; /**< Host threshold register. */
> + uint8_t wthresh; /**< Write-back threshold register. */
> + uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
> + uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
> + uint32_t flags; /**< RX flags. */
> + uint64_t offloads; /**< offloads of RTE_ETH_RX_OFFLOAD_*
> */
> +};
> +
> +/** Offload features */
> +union igc_tx_offload {
> + uint64_t data;
> + struct {
> + uint64_t l3_len:9; /**< L3 (IP) Header Length. */
> + uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
> + uint64_t vlan_tci:16;
> + /**< VLAN Tag Control Identifier(CPU order). */
> + uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
> + uint64_t tso_segsz:16; /**< TCP TSO segment size. */
> + /* uint64_t unused:8; */
> + };
> +};
> +
> +/**
> + * Compare mask for igc_tx_offload.data,
> + * should be in sync with igc_tx_offload layout.
> + */
> +#define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3
> header mask. */
> +#define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask.
> */
> +#define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP
> header mask. */
> +#define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO
> segsz mask. */
> +/** Mac + IP + TCP + Mss mask. */
> +#define TX_TSO_CMP_MASK \
> + (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK |
> TX_TSO_MSS_CMP_MASK)
> +
> +/**
> + * Structure to check if new context need be built */ struct
> +igc_advctx_info {
> + uint64_t flags; /**< ol_flags related to context build. */
> + /** tx offload: vlan, tso, l2-l3-l4 lengths. */
> + union igc_tx_offload tx_offload;
> + /** compare mask for tx offload. */
> + union igc_tx_offload tx_offload_mask;
> +};
> +
> +/**
> + * Hardware context number
> + */
> +enum {
> + IGC_CTX_0 = 0, /**< CTX0 */
> + IGC_CTX_1 = 1, /**< CTX1 */
> + IGC_CTX_NUM = 2, /**< CTX_NUM */
> +};
> +
> +/**
> + * Structure associated with each descriptor of the TX ring of a TX queue.
> + */
> +struct igc_tx_entry {
> + struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
> + uint16_t next_id; /**< Index of next descriptor in ring. */
> + uint16_t last_id; /**< Index of last scattered descriptor. */ };
> +
> +/**
> + * Structure associated with each TX queue.
> + */
> +struct igc_tx_queue {
> + volatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */
> + uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
> + struct igc_tx_entry *sw_ring; /**< virtual address of SW ring. */
> + volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
> + uint32_t txd_type; /**< Device-specific TXD type */
> + uint16_t nb_tx_desc; /**< number of TX descriptors. */
> + uint16_t tx_tail; /**< Current value of TDT register. */
> + uint16_t tx_head;
> + /**< Index of first used TX descriptor. */
> + uint16_t queue_id; /**< TX queue index. */
> + uint16_t reg_idx; /**< TX queue register index. */
> + uint16_t port_id; /**< Device port identifier. */
> + uint8_t pthresh; /**< Prefetch threshold register. */
> + uint8_t hthresh; /**< Host threshold register. */
> + uint8_t wthresh; /**< Write-back threshold register. */
> + uint8_t ctx_curr;
> +
> + /**< Start context position for transmit queue. */
> + struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
> + /**< Hardware context history.*/
> + uint64_t offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_*
> */
> +};
> +
> /*
> * RX/TX function prototypes
> */
> --
> 2.9.5
Hi Qi,
> -----Original Message-----
> From: Zhang, Qi Z <qi.z.zhang@intel.com>
> Sent: Tuesday, January 17, 2023 10:25 AM
> To: Su, Simei <simei.su@intel.com>; Guo, Junfeng <junfeng.guo@intel.com>
> Cc: dev@dpdk.org; Wu, Wenjun1 <wenjun1.wu@intel.com>
> Subject: RE: [PATCH 1/3] net/igc: code refactoring
>
>
>
> > -----Original Message-----
> > From: Su, Simei <simei.su@intel.com>
> > Sent: Tuesday, December 20, 2022 11:41 AM
> > To: Zhang, Qi Z <qi.z.zhang@intel.com>; Guo, Junfeng
> > <junfeng.guo@intel.com>
> > Cc: dev@dpdk.org; Wu, Wenjun1 <wenjun1.wu@intel.com>; Su, Simei
> > <simei.su@intel.com>
> > Subject: [PATCH 1/3] net/igc: code refactoring
> >
> > Move related structures for Rx/Tx queue from igc_txrx.c to igc_txrx.h
> > to make code cleaner and variables used more conveniently.
>
> Not sure if this is necessary.
> If a structure only be used internally, keep it internally should be OK.
> Otherwise need to give a reason why to expose it.
OK. I will rework commit log to give detailed reason in v2.
Thanks,
Simei
>
> >
> > Signed-off-by: Simei Su <simei.su@intel.com>
> > ---
> > drivers/net/igc/igc_txrx.c | 118
> > ---------------------------------------------
> > drivers/net/igc/igc_txrx.h | 115
> > +++++++++++++++++++++++++++++++++++++++++++
> > 2 files changed, 115 insertions(+), 118 deletions(-)
> >
> > diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c
> > index
> > ffd219b..c462e91 100644
> > --- a/drivers/net/igc/igc_txrx.c
> > +++ b/drivers/net/igc/igc_txrx.c
> > @@ -93,124 +93,6 @@
> >
> > #define IGC_TX_OFFLOAD_NOTSUP_MASK
> > (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK)
> >
> > -/**
> > - * Structure associated with each descriptor of the RX ring of a RX queue.
> > - */
> > -struct igc_rx_entry {
> > - struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
> > -};
> > -
> > -/**
> > - * Structure associated with each RX queue.
> > - */
> > -struct igc_rx_queue {
> > - struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring.
> > */
> > - volatile union igc_adv_rx_desc *rx_ring;
> > - /**< RX ring virtual address. */
> > - uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
> > - volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
> > - volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
> > - struct igc_rx_entry *sw_ring; /**< address of RX software ring. */
> > - struct rte_mbuf *pkt_first_seg; /**< First segment of current packet.
> > */
> > - struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet.
> > */
> > - uint16_t nb_rx_desc; /**< number of RX descriptors. */
> > - uint16_t rx_tail; /**< current value of RDT register. */
> > - uint16_t nb_rx_hold; /**< number of held free RX desc. */
> > - uint16_t rx_free_thresh; /**< max free RX desc to hold. */
> > - uint16_t queue_id; /**< RX queue index. */
> > - uint16_t reg_idx; /**< RX queue register index. */
> > - uint16_t port_id; /**< Device port identifier. */
> > - uint8_t pthresh; /**< Prefetch threshold register. */
> > - uint8_t hthresh; /**< Host threshold register. */
> > - uint8_t wthresh; /**< Write-back threshold register. */
> > - uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
> > - uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
> > - uint32_t flags; /**< RX flags. */
> > - uint64_t offloads; /**< offloads of
> > RTE_ETH_RX_OFFLOAD_* */
> > -};
> > -
> > -/** Offload features */
> > -union igc_tx_offload {
> > - uint64_t data;
> > - struct {
> > - uint64_t l3_len:9; /**< L3 (IP) Header Length. */
> > - uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
> > - uint64_t vlan_tci:16;
> > - /**< VLAN Tag Control Identifier(CPU order). */
> > - uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
> > - uint64_t tso_segsz:16; /**< TCP TSO segment size. */
> > - /* uint64_t unused:8; */
> > - };
> > -};
> > -
> > -/*
> > - * Compare mask for igc_tx_offload.data,
> > - * should be in sync with igc_tx_offload layout.
> > - */
> > -#define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3
> > header mask. */
> > -#define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan
> > mask. */
> > -#define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP
> > header mask. */
> > -#define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO
> > segsz mask. */
> > -/** Mac + IP + TCP + Mss mask. */
> > -#define TX_TSO_CMP_MASK \
> > - (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK |
> > TX_TSO_MSS_CMP_MASK)
> > -
> > -/**
> > - * Structure to check if new context need be built
> > - */
> > -struct igc_advctx_info {
> > - uint64_t flags; /**< ol_flags related to context build. */
> > - /** tx offload: vlan, tso, l2-l3-l4 lengths. */
> > - union igc_tx_offload tx_offload;
> > - /** compare mask for tx offload. */
> > - union igc_tx_offload tx_offload_mask;
> > -};
> > -
> > -/**
> > - * Hardware context number
> > - */
> > -enum {
> > - IGC_CTX_0 = 0, /**< CTX0 */
> > - IGC_CTX_1 = 1, /**< CTX1 */
> > - IGC_CTX_NUM = 2, /**< CTX_NUM */
> > -};
> > -
> > -/**
> > - * Structure associated with each descriptor of the TX ring of a TX queue.
> > - */
> > -struct igc_tx_entry {
> > - struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
> > - uint16_t next_id; /**< Index of next descriptor in ring. */
> > - uint16_t last_id; /**< Index of last scattered descriptor. */
> > -};
> > -
> > -/**
> > - * Structure associated with each TX queue.
> > - */
> > -struct igc_tx_queue {
> > - volatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */
> > - uint64_t tx_ring_phys_addr; /**< TX ring DMA address.
> */
> > - struct igc_tx_entry *sw_ring; /**< virtual address of SW ring. */
> > - volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
> > - uint32_t txd_type; /**< Device-specific TXD type
> */
> > - uint16_t nb_tx_desc; /**< number of TX descriptors.
> */
> > - uint16_t tx_tail; /**< Current value of TDT register. */
> > - uint16_t tx_head;
> > - /**< Index of first used TX descriptor. */
> > - uint16_t queue_id; /**< TX queue index. */
> > - uint16_t reg_idx; /**< TX queue register index. */
> > - uint16_t port_id; /**< Device port identifier. */
> > - uint8_t pthresh; /**< Prefetch threshold register. */
> > - uint8_t hthresh; /**< Host threshold register. */
> > - uint8_t wthresh; /**< Write-back threshold register.
> */
> > - uint8_t ctx_curr;
> > -
> > - /**< Start context position for transmit queue. */
> > - struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
> > - /**< Hardware context history.*/
> > - uint64_t offloads; /**< offloads of
> > RTE_ETH_TX_OFFLOAD_* */
> > -};
> > -
> > static inline uint64_t
> > rx_desc_statuserr_to_pkt_flags(uint32_t statuserr) { diff --git
> > a/drivers/net/igc/igc_txrx.h b/drivers/net/igc/igc_txrx.h index
> > 02a0a05..5731761 100644
> > --- a/drivers/net/igc/igc_txrx.h
> > +++ b/drivers/net/igc/igc_txrx.h
> > @@ -11,6 +11,121 @@
> > extern "C" {
> > #endif
> >
> > +struct igc_rx_entry {
> > + struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor.
> > */ };
> > +
> > +/**
> > + * Structure associated with each RX queue.
> > + */
> > +struct igc_rx_queue {
> > + struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring.
> > */
> > + volatile union igc_adv_rx_desc *rx_ring;
> > + /**< RX ring virtual address. */
> > + uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
> > + volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
> > + volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
> > + struct igc_rx_entry *sw_ring; /**< address of RX software ring. */
> > + struct rte_mbuf *pkt_first_seg; /**< First segment of current packet.
> > */
> > + struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet.
> > */
> > + uint16_t nb_rx_desc; /**< number of RX descriptors. */
> > + uint16_t rx_tail; /**< current value of RDT register. */
> > + uint16_t nb_rx_hold; /**< number of held free RX desc. */
> > + uint16_t rx_free_thresh; /**< max free RX desc to hold. */
> > + uint16_t queue_id; /**< RX queue index. */
> > + uint16_t reg_idx; /**< RX queue register index. */
> > + uint16_t port_id; /**< Device port identifier. */
> > + uint8_t pthresh; /**< Prefetch threshold register. */
> > + uint8_t hthresh; /**< Host threshold register. */
> > + uint8_t wthresh; /**< Write-back threshold register. */
> > + uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
> > + uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
> > + uint32_t flags; /**< RX flags. */
> > + uint64_t offloads; /**< offloads of
> RTE_ETH_RX_OFFLOAD_*
> > */
> > +};
> > +
> > +/** Offload features */
> > +union igc_tx_offload {
> > + uint64_t data;
> > + struct {
> > + uint64_t l3_len:9; /**< L3 (IP) Header Length. */
> > + uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
> > + uint64_t vlan_tci:16;
> > + /**< VLAN Tag Control Identifier(CPU order). */
> > + uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
> > + uint64_t tso_segsz:16; /**< TCP TSO segment size. */
> > + /* uint64_t unused:8; */
> > + };
> > +};
> > +
> > +/**
> > + * Compare mask for igc_tx_offload.data,
> > + * should be in sync with igc_tx_offload layout.
> > + */
> > +#define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3
> > header mask. */
> > +#define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan
> mask.
> > */
> > +#define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP
> > header mask. */
> > +#define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO
> > segsz mask. */
> > +/** Mac + IP + TCP + Mss mask. */
> > +#define TX_TSO_CMP_MASK \
> > + (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK |
> > TX_TSO_MSS_CMP_MASK)
> > +
> > +/**
> > + * Structure to check if new context need be built */ struct
> > +igc_advctx_info {
> > + uint64_t flags; /**< ol_flags related to context build. */
> > + /** tx offload: vlan, tso, l2-l3-l4 lengths. */
> > + union igc_tx_offload tx_offload;
> > + /** compare mask for tx offload. */
> > + union igc_tx_offload tx_offload_mask; };
> > +
> > +/**
> > + * Hardware context number
> > + */
> > +enum {
> > + IGC_CTX_0 = 0, /**< CTX0 */
> > + IGC_CTX_1 = 1, /**< CTX1 */
> > + IGC_CTX_NUM = 2, /**< CTX_NUM */
> > +};
> > +
> > +/**
> > + * Structure associated with each descriptor of the TX ring of a TX queue.
> > + */
> > +struct igc_tx_entry {
> > + struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
> > + uint16_t next_id; /**< Index of next descriptor in ring. */
> > + uint16_t last_id; /**< Index of last scattered descriptor. */ };
> > +
> > +/**
> > + * Structure associated with each TX queue.
> > + */
> > +struct igc_tx_queue {
> > + volatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */
> > + uint64_t tx_ring_phys_addr; /**< TX ring DMA address.
> */
> > + struct igc_tx_entry *sw_ring; /**< virtual address of SW ring. */
> > + volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
> > + uint32_t txd_type; /**< Device-specific TXD type
> */
> > + uint16_t nb_tx_desc; /**< number of TX descriptors.
> */
> > + uint16_t tx_tail; /**< Current value of TDT register. */
> > + uint16_t tx_head;
> > + /**< Index of first used TX descriptor. */
> > + uint16_t queue_id; /**< TX queue index. */
> > + uint16_t reg_idx; /**< TX queue register index. */
> > + uint16_t port_id; /**< Device port identifier. */
> > + uint8_t pthresh; /**< Prefetch threshold register. */
> > + uint8_t hthresh; /**< Host threshold register. */
> > + uint8_t wthresh; /**< Write-back threshold register.
> */
> > + uint8_t ctx_curr;
> > +
> > + /**< Start context position for transmit queue. */
> > + struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
> > + /**< Hardware context history.*/
> > + uint64_t offloads; /**< offloads of
> RTE_ETH_TX_OFFLOAD_*
> > */
> > +};
> > +
> > /*
> > * RX/TX function prototypes
> > */
> > --
> > 2.9.5
@@ -93,124 +93,6 @@
#define IGC_TX_OFFLOAD_NOTSUP_MASK (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK)
-/**
- * Structure associated with each descriptor of the RX ring of a RX queue.
- */
-struct igc_rx_entry {
- struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
-};
-
-/**
- * Structure associated with each RX queue.
- */
-struct igc_rx_queue {
- struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
- volatile union igc_adv_rx_desc *rx_ring;
- /**< RX ring virtual address. */
- uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
- volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
- volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
- struct igc_rx_entry *sw_ring; /**< address of RX software ring. */
- struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
- struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
- uint16_t nb_rx_desc; /**< number of RX descriptors. */
- uint16_t rx_tail; /**< current value of RDT register. */
- uint16_t nb_rx_hold; /**< number of held free RX desc. */
- uint16_t rx_free_thresh; /**< max free RX desc to hold. */
- uint16_t queue_id; /**< RX queue index. */
- uint16_t reg_idx; /**< RX queue register index. */
- uint16_t port_id; /**< Device port identifier. */
- uint8_t pthresh; /**< Prefetch threshold register. */
- uint8_t hthresh; /**< Host threshold register. */
- uint8_t wthresh; /**< Write-back threshold register. */
- uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
- uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
- uint32_t flags; /**< RX flags. */
- uint64_t offloads; /**< offloads of RTE_ETH_RX_OFFLOAD_* */
-};
-
-/** Offload features */
-union igc_tx_offload {
- uint64_t data;
- struct {
- uint64_t l3_len:9; /**< L3 (IP) Header Length. */
- uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
- uint64_t vlan_tci:16;
- /**< VLAN Tag Control Identifier(CPU order). */
- uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
- uint64_t tso_segsz:16; /**< TCP TSO segment size. */
- /* uint64_t unused:8; */
- };
-};
-
-/*
- * Compare mask for igc_tx_offload.data,
- * should be in sync with igc_tx_offload layout.
- */
-#define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */
-#define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */
-#define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */
-#define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */
-/** Mac + IP + TCP + Mss mask. */
-#define TX_TSO_CMP_MASK \
- (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
-
-/**
- * Structure to check if new context need be built
- */
-struct igc_advctx_info {
- uint64_t flags; /**< ol_flags related to context build. */
- /** tx offload: vlan, tso, l2-l3-l4 lengths. */
- union igc_tx_offload tx_offload;
- /** compare mask for tx offload. */
- union igc_tx_offload tx_offload_mask;
-};
-
-/**
- * Hardware context number
- */
-enum {
- IGC_CTX_0 = 0, /**< CTX0 */
- IGC_CTX_1 = 1, /**< CTX1 */
- IGC_CTX_NUM = 2, /**< CTX_NUM */
-};
-
-/**
- * Structure associated with each descriptor of the TX ring of a TX queue.
- */
-struct igc_tx_entry {
- struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
- uint16_t next_id; /**< Index of next descriptor in ring. */
- uint16_t last_id; /**< Index of last scattered descriptor. */
-};
-
-/**
- * Structure associated with each TX queue.
- */
-struct igc_tx_queue {
- volatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */
- uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
- struct igc_tx_entry *sw_ring; /**< virtual address of SW ring. */
- volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
- uint32_t txd_type; /**< Device-specific TXD type */
- uint16_t nb_tx_desc; /**< number of TX descriptors. */
- uint16_t tx_tail; /**< Current value of TDT register. */
- uint16_t tx_head;
- /**< Index of first used TX descriptor. */
- uint16_t queue_id; /**< TX queue index. */
- uint16_t reg_idx; /**< TX queue register index. */
- uint16_t port_id; /**< Device port identifier. */
- uint8_t pthresh; /**< Prefetch threshold register. */
- uint8_t hthresh; /**< Host threshold register. */
- uint8_t wthresh; /**< Write-back threshold register. */
- uint8_t ctx_curr;
-
- /**< Start context position for transmit queue. */
- struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
- /**< Hardware context history.*/
- uint64_t offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
-};
-
static inline uint64_t
rx_desc_statuserr_to_pkt_flags(uint32_t statuserr)
{
@@ -11,6 +11,121 @@
extern "C" {
#endif
+struct igc_rx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct igc_rx_queue {
+ struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
+ volatile union igc_adv_rx_desc *rx_ring;
+ /**< RX ring virtual address. */
+ uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
+ volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
+ volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
+ struct igc_rx_entry *sw_ring; /**< address of RX software ring. */
+ struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+ struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
+ uint16_t nb_rx_desc; /**< number of RX descriptors. */
+ uint16_t rx_tail; /**< current value of RDT register. */
+ uint16_t nb_rx_hold; /**< number of held free RX desc. */
+ uint16_t rx_free_thresh; /**< max free RX desc to hold. */
+ uint16_t queue_id; /**< RX queue index. */
+ uint16_t reg_idx; /**< RX queue register index. */
+ uint16_t port_id; /**< Device port identifier. */
+ uint8_t pthresh; /**< Prefetch threshold register. */
+ uint8_t hthresh; /**< Host threshold register. */
+ uint8_t wthresh; /**< Write-back threshold register. */
+ uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
+ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
+ uint32_t flags; /**< RX flags. */
+ uint64_t offloads; /**< offloads of RTE_ETH_RX_OFFLOAD_* */
+};
+
+/** Offload features */
+union igc_tx_offload {
+ uint64_t data;
+ struct {
+ uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint64_t vlan_tci:16;
+ /**< VLAN Tag Control Identifier(CPU order). */
+ uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
+ uint64_t tso_segsz:16; /**< TCP TSO segment size. */
+ /* uint64_t unused:8; */
+ };
+};
+
+/**
+ * Compare mask for igc_tx_offload.data,
+ * should be in sync with igc_tx_offload layout.
+ */
+#define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */
+#define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */
+#define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */
+#define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */
+/** Mac + IP + TCP + Mss mask. */
+#define TX_TSO_CMP_MASK \
+ (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
+
+/**
+ * Structure to check if new context need be built
+ */
+struct igc_advctx_info {
+ uint64_t flags; /**< ol_flags related to context build. */
+ /** tx offload: vlan, tso, l2-l3-l4 lengths. */
+ union igc_tx_offload tx_offload;
+ /** compare mask for tx offload. */
+ union igc_tx_offload tx_offload_mask;
+};
+
+/**
+ * Hardware context number
+ */
+enum {
+ IGC_CTX_0 = 0, /**< CTX0 */
+ IGC_CTX_1 = 1, /**< CTX1 */
+ IGC_CTX_NUM = 2, /**< CTX_NUM */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct igc_tx_entry {
+ struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+ uint16_t next_id; /**< Index of next descriptor in ring. */
+ uint16_t last_id; /**< Index of last scattered descriptor. */
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct igc_tx_queue {
+ volatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */
+ uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
+ struct igc_tx_entry *sw_ring; /**< virtual address of SW ring. */
+ volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
+ uint32_t txd_type; /**< Device-specific TXD type */
+ uint16_t nb_tx_desc; /**< number of TX descriptors. */
+ uint16_t tx_tail; /**< Current value of TDT register. */
+ uint16_t tx_head;
+ /**< Index of first used TX descriptor. */
+ uint16_t queue_id; /**< TX queue index. */
+ uint16_t reg_idx; /**< TX queue register index. */
+ uint16_t port_id; /**< Device port identifier. */
+ uint8_t pthresh; /**< Prefetch threshold register. */
+ uint8_t hthresh; /**< Host threshold register. */
+ uint8_t wthresh; /**< Write-back threshold register. */
+ uint8_t ctx_curr;
+
+ /**< Start context position for transmit queue. */
+ struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
+ /**< Hardware context history.*/
+ uint64_t offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
+};
+
/*
* RX/TX function prototypes
*/