[v1] net/iavf: fix function pointer in multi-process
Checks
Commit Message
This patch uses the index value to call the function, instead of the
function pointer assignment to save the selection of Receive Flex
Descriptor profile ID.
Otherwise the secondary process will run with wrong function address
from primary process.
Fixes: 12b435bf8f2f ("net/iavf: support flex desc metadata extraction")
Cc: stable@dpdk.org
Signed-off-by: Steve Yang <stevex.yang@intel.com>
---
drivers/net/iavf/iavf_rxtx.c | 40 +++++++++++++++++++-----------------
drivers/net/iavf/iavf_rxtx.h | 2 --
2 files changed, 21 insertions(+), 21 deletions(-)
Comments
> -----Original Message-----
> From: Steve Yang <stevex.yang@intel.com>
> Sent: Monday, February 28, 2022 5:49 PM
> To: dev@dpdk.org
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Yang, Qiming <qiming.yang@intel.com>; Yang,
> SteveX <stevex.yang@intel.com>; stable@dpdk.org
> Subject: [PATCH v1] net/iavf: fix function pointer in multi-process
>
> This patch uses the index value to call the function, instead of the function
> pointer assignment to save the selection of Receive Flex Descriptor profile ID.
>
> Otherwise the secondary process will run with wrong function address from
> primary process.
>
> Fixes: 12b435bf8f2f ("net/iavf: support flex desc metadata extraction")
> Cc: stable@dpdk.org
>
> Signed-off-by: Steve Yang <stevex.yang@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Applied to dpdk-next-net-intel.
Thanks
Qi
@@ -475,54 +475,56 @@ iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
#endif
}
+static const
+iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = {
+ [IAVF_RXDID_COMMS_AUX_VLAN] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+ [IAVF_RXDID_COMMS_AUX_IPV4] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+ [IAVF_RXDID_COMMS_AUX_IPV6] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+ [IAVF_RXDID_COMMS_AUX_IPV6_FLOW] =
+ iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+ [IAVF_RXDID_COMMS_AUX_TCP] = iavf_rxd_to_pkt_fields_by_comms_aux_v1,
+ [IAVF_RXDID_COMMS_AUX_IP_OFFSET] =
+ iavf_rxd_to_pkt_fields_by_comms_aux_v2,
+ [IAVF_RXDID_COMMS_IPSEC_CRYPTO] =
+ iavf_rxd_to_pkt_fields_by_comms_aux_v2,
+ [IAVF_RXDID_COMMS_OVS_1] = iavf_rxd_to_pkt_fields_by_comms_ovs,
+};
+
static void
iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
{
+ rxq->rxdid = rxdid;
+
switch (rxdid) {
case IAVF_RXDID_COMMS_AUX_VLAN:
rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_vlan_mask;
- rxq->rxd_to_pkt_fields =
- iavf_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case IAVF_RXDID_COMMS_AUX_IPV4:
rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv4_mask;
- rxq->rxd_to_pkt_fields =
- iavf_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case IAVF_RXDID_COMMS_AUX_IPV6:
rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
- rxq->rxd_to_pkt_fields =
- iavf_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case IAVF_RXDID_COMMS_AUX_IPV6_FLOW:
rxq->xtr_ol_flag =
rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
- rxq->rxd_to_pkt_fields =
- iavf_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case IAVF_RXDID_COMMS_AUX_TCP:
rxq->xtr_ol_flag = rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
- rxq->rxd_to_pkt_fields =
- iavf_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case IAVF_RXDID_COMMS_AUX_IP_OFFSET:
rxq->xtr_ol_flag =
rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
- rxq->rxd_to_pkt_fields =
- iavf_rxd_to_pkt_fields_by_comms_aux_v2;
break;
case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
rxq->xtr_ol_flag =
rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
- rxq->rxd_to_pkt_fields =
- iavf_rxd_to_pkt_fields_by_comms_aux_v2;
break;
case IAVF_RXDID_COMMS_OVS_1:
- rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
break;
default:
/* update this according to the RXDID for FLEX_DESC_NONE */
- rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
+ rxq->rxdid = IAVF_RXDID_COMMS_OVS_1;
break;
}
@@ -1483,7 +1485,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
&rxq->stats.ipsec_crypto);
- rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
rxm->ol_flags |= pkt_flags;
@@ -1627,7 +1629,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
&rxq->stats.ipsec_crypto);
- rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
first_seg->ol_flags |= pkt_flags;
@@ -1885,7 +1887,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
&rxq->stats.ipsec_crypto);
- rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -217,8 +217,6 @@ struct iavf_rx_queue {
uint8_t proto_xtr; /* protocol extraction type */
uint64_t xtr_ol_flag;
/* flexible descriptor metadata extraction offload flag */
- iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
- /* handle flexible descriptor by RXDID */
struct iavf_rx_queue_stats stats;
uint64_t offloads;
};