[v3,1/1] net/mana: add vlan tagging support
Checks
Commit Message
For tx path, use LONG_PACKET_FORMAT if vlan tag is present. For rx,
extract vlan id from oob, put into mbuf and set the vlan flags in
mbuf.
Signed-off-by: Wei Hu <weh@microsoft.com>
---
v3:
- Adjust the pkt_idx position in the code so it will be executed even when
adding vlan header fails.
v2:
- Use existing vlan tag processing macros.
- Add vlan header back if vlan_strip flag is not set on the receiving path.
drivers/net/mana/mana.c | 3 +++
drivers/net/mana/mana.h | 4 ++++
drivers/net/mana/rx.c | 22 ++++++++++++++++++----
drivers/net/mana/tx.c | 21 ++++++++++++++++++---
4 files changed, 43 insertions(+), 7 deletions(-)
Comments
> Subject: [PATCH v3 1/1] net/mana: add vlan tagging support
>
> For tx path, use LONG_PACKET_FORMAT if vlan tag is present. For rx, extract vlan
> id from oob, put into mbuf and set the vlan flags in mbuf.
>
> Signed-off-by: Wei Hu <weh@microsoft.com>
Acked-by: Long Li <longli@microsoft.com>
> ---
>
> v3:
> - Adjust the pkt_idx position in the code so it will be executed even when adding
> vlan header fails.
>
> v2:
> - Use existing vlan tag processing macros.
> - Add vlan header back if vlan_strip flag is not set on the receiving path.
>
> drivers/net/mana/mana.c | 3 +++
> drivers/net/mana/mana.h | 4 ++++
> drivers/net/mana/rx.c | 22 ++++++++++++++++++----
> drivers/net/mana/tx.c | 21 ++++++++++++++++++---
> 4 files changed, 43 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/net/mana/mana.c b/drivers/net/mana/mana.c index
> 2df2461d2f..68c625258e 100644
> --- a/drivers/net/mana/mana.c
> +++ b/drivers/net/mana/mana.c
> @@ -94,6 +94,9 @@ mana_dev_configure(struct rte_eth_dev *dev)
> return -EINVAL;
> }
>
> + priv->vlan_strip = !!(dev_conf->rxmode.offloads &
> + RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
> +
> priv->num_queues = dev->data->nb_rx_queues;
>
> manadv_set_context_attr(priv->ib_ctx,
> MANADV_CTX_ATTR_BUF_ALLOCATORS, diff --git a/drivers/net/mana/mana.h
> b/drivers/net/mana/mana.h index 3626925871..37f654f0e6 100644
> --- a/drivers/net/mana/mana.h
> +++ b/drivers/net/mana/mana.h
> @@ -21,10 +21,12 @@ struct mana_shared_data { #define
> MANA_MAX_MAC_ADDR 1
>
> #define MANA_DEV_RX_OFFLOAD_SUPPORT ( \
> + RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
> RTE_ETH_RX_OFFLOAD_CHECKSUM | \
> RTE_ETH_RX_OFFLOAD_RSS_HASH)
>
> #define MANA_DEV_TX_OFFLOAD_SUPPORT ( \
> + RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
> RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
> RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
> RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
> @@ -345,6 +347,8 @@ struct mana_priv {
> /* IB device port */
> uint8_t dev_port;
>
> + uint8_t vlan_strip;
> +
> struct ibv_context *ib_ctx;
> struct ibv_pd *ib_pd;
> struct ibv_pd *ib_parent_pd;
> diff --git a/drivers/net/mana/rx.c b/drivers/net/mana/rx.c index
> 16e647baf5..0c26702b73 100644
> --- a/drivers/net/mana/rx.c
> +++ b/drivers/net/mana/rx.c
> @@ -532,10 +532,6 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
> uint16_t pkts_n)
> mbuf->hash.rss = oob-
> >packet_info[pkt_idx].packet_hash;
> }
>
> - pkts[pkt_received++] = mbuf;
> - rxq->stats.packets++;
> - rxq->stats.bytes += mbuf->data_len;
> -
> pkt_idx++;
> /* Move on the next completion if all packets are processed */
> if (pkt_idx >= RX_COM_OOB_NUM_PACKETINFO_SEGMENTS)
> { @@ -543,6 +539,24 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf
> **pkts, uint16_t pkts_n)
> i++;
> }
>
> + if (oob->rx_vlan_tag_present) {
> + mbuf->ol_flags |=
> + RTE_MBUF_F_RX_VLAN |
> RTE_MBUF_F_RX_VLAN_STRIPPED;
> + mbuf->vlan_tci = oob->rx_vlan_id;
> +
> + if (!priv->vlan_strip && rte_vlan_insert(&mbuf)) {
> + DRV_LOG(ERR, "vlan insert failed");
> + rxq->stats.errors++;
> + rte_pktmbuf_free(mbuf);
> +
> + goto drop;
> + }
> + }
> +
> + pkts[pkt_received++] = mbuf;
> + rxq->stats.packets++;
> + rxq->stats.bytes += mbuf->data_len;
> +
> drop:
> rxq->desc_ring_tail++;
> if (rxq->desc_ring_tail >= rxq->num_desc) diff --git
> a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c index
> 58c4a1d976..272a28bcba 100644
> --- a/drivers/net/mana/tx.c
> +++ b/drivers/net/mana/tx.c
> @@ -254,7 +254,18 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
> }
>
> /* Fill in the oob */
> - tx_oob.short_oob.packet_format = SHORT_PACKET_FORMAT;
> + if (m_pkt->ol_flags & RTE_MBUF_F_TX_VLAN) {
> + tx_oob.short_oob.packet_format =
> LONG_PACKET_FORMAT;
> + tx_oob.long_oob.inject_vlan_prior_tag = 1;
> + tx_oob.long_oob.priority_code_point =
> + RTE_VLAN_TCI_PRI(m_pkt->vlan_tci);
> + tx_oob.long_oob.drop_eligible_indicator =
> + RTE_VLAN_TCI_DEI(m_pkt->vlan_tci);
> + tx_oob.long_oob.vlan_identifier =
> + RTE_VLAN_TCI_ID(m_pkt->vlan_tci);
> + } else {
> + tx_oob.short_oob.packet_format =
> SHORT_PACKET_FORMAT;
> + }
> tx_oob.short_oob.tx_is_outer_ipv4 =
> m_pkt->ol_flags & RTE_MBUF_F_TX_IPV4 ? 1 : 0;
> tx_oob.short_oob.tx_is_outer_ipv6 =
> @@ -409,8 +420,12 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>
> work_req.sgl = sgl.gdma_sgl;
> work_req.num_sgl_elements = m_pkt->nb_segs;
> - work_req.inline_oob_size_in_bytes =
> - sizeof(struct transmit_short_oob_v2);
> + if (tx_oob.short_oob.packet_format ==
> SHORT_PACKET_FORMAT)
> + work_req.inline_oob_size_in_bytes =
> + sizeof(struct transmit_short_oob_v2);
> + else
> + work_req.inline_oob_size_in_bytes =
> + sizeof(struct transmit_oob_v2);
> work_req.inline_oob_data = &tx_oob;
> work_req.flags = 0;
> work_req.client_data_unit = NOT_USING_CLIENT_DATA_UNIT;
> --
> 2.34.1
Recheck-request: iol-broadcom-Performance
I am seeing some increased throughput variance on this NIC recently
(may be related to upgrading device firmware). The Community Lab team
will look. This fail can be ignored of course, and should be
overwritten once the retest is picked up.
On 3/13/2024 5:57 PM, Long Li wrote:
>> Subject: [PATCH v3 1/1] net/mana: add vlan tagging support
>>
>> For tx path, use LONG_PACKET_FORMAT if vlan tag is present. For rx, extract vlan
>> id from oob, put into mbuf and set the vlan flags in mbuf.
>>
>> Signed-off-by: Wei Hu <weh@microsoft.com>
> Acked-by: Long Li <longli@microsoft.com>
>
Applied to dpdk-next-net/main, thanks.
@@ -94,6 +94,9 @@ mana_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
+ priv->vlan_strip = !!(dev_conf->rxmode.offloads &
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
+
priv->num_queues = dev->data->nb_rx_queues;
manadv_set_context_attr(priv->ib_ctx, MANADV_CTX_ATTR_BUF_ALLOCATORS,
@@ -21,10 +21,12 @@ struct mana_shared_data {
#define MANA_MAX_MAC_ADDR 1
#define MANA_DEV_RX_OFFLOAD_SUPPORT ( \
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
RTE_ETH_RX_OFFLOAD_CHECKSUM | \
RTE_ETH_RX_OFFLOAD_RSS_HASH)
#define MANA_DEV_TX_OFFLOAD_SUPPORT ( \
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
@@ -345,6 +347,8 @@ struct mana_priv {
/* IB device port */
uint8_t dev_port;
+ uint8_t vlan_strip;
+
struct ibv_context *ib_ctx;
struct ibv_pd *ib_pd;
struct ibv_pd *ib_parent_pd;
@@ -532,10 +532,6 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
mbuf->hash.rss = oob->packet_info[pkt_idx].packet_hash;
}
- pkts[pkt_received++] = mbuf;
- rxq->stats.packets++;
- rxq->stats.bytes += mbuf->data_len;
-
pkt_idx++;
/* Move on the next completion if all packets are processed */
if (pkt_idx >= RX_COM_OOB_NUM_PACKETINFO_SEGMENTS) {
@@ -543,6 +539,24 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
i++;
}
+ if (oob->rx_vlan_tag_present) {
+ mbuf->ol_flags |=
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
+ mbuf->vlan_tci = oob->rx_vlan_id;
+
+ if (!priv->vlan_strip && rte_vlan_insert(&mbuf)) {
+ DRV_LOG(ERR, "vlan insert failed");
+ rxq->stats.errors++;
+ rte_pktmbuf_free(mbuf);
+
+ goto drop;
+ }
+ }
+
+ pkts[pkt_received++] = mbuf;
+ rxq->stats.packets++;
+ rxq->stats.bytes += mbuf->data_len;
+
drop:
rxq->desc_ring_tail++;
if (rxq->desc_ring_tail >= rxq->num_desc)
@@ -254,7 +254,18 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* Fill in the oob */
- tx_oob.short_oob.packet_format = SHORT_PACKET_FORMAT;
+ if (m_pkt->ol_flags & RTE_MBUF_F_TX_VLAN) {
+ tx_oob.short_oob.packet_format = LONG_PACKET_FORMAT;
+ tx_oob.long_oob.inject_vlan_prior_tag = 1;
+ tx_oob.long_oob.priority_code_point =
+ RTE_VLAN_TCI_PRI(m_pkt->vlan_tci);
+ tx_oob.long_oob.drop_eligible_indicator =
+ RTE_VLAN_TCI_DEI(m_pkt->vlan_tci);
+ tx_oob.long_oob.vlan_identifier =
+ RTE_VLAN_TCI_ID(m_pkt->vlan_tci);
+ } else {
+ tx_oob.short_oob.packet_format = SHORT_PACKET_FORMAT;
+ }
tx_oob.short_oob.tx_is_outer_ipv4 =
m_pkt->ol_flags & RTE_MBUF_F_TX_IPV4 ? 1 : 0;
tx_oob.short_oob.tx_is_outer_ipv6 =
@@ -409,8 +420,12 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
work_req.sgl = sgl.gdma_sgl;
work_req.num_sgl_elements = m_pkt->nb_segs;
- work_req.inline_oob_size_in_bytes =
- sizeof(struct transmit_short_oob_v2);
+ if (tx_oob.short_oob.packet_format == SHORT_PACKET_FORMAT)
+ work_req.inline_oob_size_in_bytes =
+ sizeof(struct transmit_short_oob_v2);
+ else
+ work_req.inline_oob_size_in_bytes =
+ sizeof(struct transmit_oob_v2);
work_req.inline_oob_data = &tx_oob;
work_req.flags = 0;
work_req.client_data_unit = NOT_USING_CLIENT_DATA_UNIT;