From patchwork Wed Jun 30 06:53:43 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xu, Ting" X-Patchwork-Id: 95037 X-Patchwork-Delegate: qi.z.zhang@intel.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 07429A0A0F; Wed, 30 Jun 2021 08:51:19 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id DA1D941250; Wed, 30 Jun 2021 08:50:59 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by mails.dpdk.org (Postfix) with ESMTP id A91C040141 for ; Wed, 30 Jun 2021 08:50:54 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10030"; a="208247082" X-IronPort-AV: E=Sophos;i="5.83,311,1616482800"; d="scan'208";a="208247082" Received: from orsmga005.jf.intel.com ([10.7.209.41]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 29 Jun 2021 23:50:54 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.83,311,1616482800"; d="scan'208";a="625911259" Received: from dpdk-xuting-second.sh.intel.com ([10.67.116.193]) by orsmga005.jf.intel.com with ESMTP; 29 Jun 2021 23:50:51 -0700 From: Ting Xu To: dev@dpdk.org Cc: qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com, qiming.yang@intel.com, Ting Xu Date: Wed, 30 Jun 2021 14:53:43 +0800 Message-Id: <20210630065344.50352-7-ting.xu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210630065344.50352-1-ting.xu@intel.com> References: <20210601014034.36100-1-ting.xu@intel.com> <20210630065344.50352-1-ting.xu@intel.com> Subject: [dpdk-dev] [PATCH v4 6/7] net/iavf: check Tx packet with correct UP and queue X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add check in the Tx packet preparation function, to guarantee that the packet with specific user priority is distributed to the correct Tx queue according to the configured Tx queue TC mapping. Signed-off-by: Ting Xu --- drivers/net/iavf/iavf.h | 10 +++++++++ drivers/net/iavf/iavf_rxtx.c | 43 ++++++++++++++++++++++++++++++++++++ drivers/net/iavf/iavf_tm.c | 13 +++++++++++ 3 files changed, 66 insertions(+) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index feb8337b55..b3bd078111 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -86,6 +86,8 @@ #define IAVF_BITS_PER_BYTE 8 +#define IAVF_VLAN_TAG_PCP_OFFSET 13 + struct iavf_adapter; struct iavf_rx_queue; struct iavf_tx_queue; @@ -165,6 +167,13 @@ struct iavf_tm_conf { bool committed; }; +/* Struct to store queue TC mapping. Queue is continuous in one TC */ +struct iavf_qtc_map { + uint8_t tc; + uint16_t start_queue_id; + uint16_t queue_count; +}; + /* Structure to store private data specific for VF instance. */ struct iavf_info { uint16_t num_queue_pairs; @@ -213,6 +222,7 @@ struct iavf_info { bool lv_enabled; struct virtchnl_qos_cap_list *qos_cap; + struct iavf_qtc_map *qtc_map; struct iavf_tm_conf tm_conf; }; diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index 0361af0d85..eb6d83a165 100644 --- a/drivers/net/iavf/iavf_rxtx.c +++ b/drivers/net/iavf/iavf_rxtx.c @@ -2342,14 +2342,49 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) return nb_tx; } +/* Check if the packet with vlan user priority is transmitted in the + * correct queue. + */ +static int +iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, uint8_t tc, struct rte_mbuf *m) +{ + struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id]; + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + uint16_t up; + + up = m->vlan_tci >> IAVF_VLAN_TAG_PCP_OFFSET; + + if (!(vf->qos_cap->cap[tc].tc_prio & BIT(up))) { + PMD_TX_LOG(ERR, "packet with vlan pcp %u cannot transmit in queue %u\n", + up, txq->queue_id); + return -1; + } else { + return 0; + } +} + /* TX prep functions */ uint16_t iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { int i, ret; + uint8_t tc = 0; uint64_t ol_flags; struct rte_mbuf *m; + struct iavf_tx_queue *txq = tx_queue; + struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id]; + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + if (vf->tm_conf.committed) { + for (i = 0; i < vf->qos_cap->num_elem; i++) { + if (txq->queue_id >= vf->qtc_map[i].start_queue_id && + txq->queue_id < (vf->qtc_map[i].start_queue_id + + vf->qtc_map[i].queue_count)) + break; + } + tc = i; + } for (i = 0; i < nb_pkts; i++) { m = tx_pkts[i]; @@ -2385,6 +2420,14 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, rte_errno = -ret; return i; } + + if (ol_flags & (PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN)) { + ret = iavf_check_vlan_up2tc(txq, tc, m); + if (ret != 0) { + rte_errno = -ret; + return i; + } + } } return i; diff --git a/drivers/net/iavf/iavf_tm.c b/drivers/net/iavf/iavf_tm.c index a8fc142c89..185b37b970 100644 --- a/drivers/net/iavf/iavf_tm.c +++ b/drivers/net/iavf/iavf_tm.c @@ -653,6 +653,7 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev, struct virtchnl_queue_tc_mapping *q_tc_mapping; struct iavf_tm_node_list *queue_list = &vf->tm_conf.queue_list; struct iavf_tm_node *tm_node; + struct iavf_qtc_map *qtc_map; uint16_t size; int index = 0, node_committed = 0; int i, ret_val = IAVF_SUCCESS; @@ -675,6 +676,7 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev, q_tc_mapping->vsi_id = vf->vsi.vsi_id; q_tc_mapping->num_tc = vf->qos_cap->num_elem; q_tc_mapping->num_queue_pairs = vf->num_queue_pairs; + TAILQ_FOREACH(tm_node, queue_list, node) { if (tm_node->tc >= q_tc_mapping->num_tc) { PMD_DRV_LOG(ERR, "TC%d is not enabled", tm_node->tc); @@ -692,15 +694,26 @@ static int iavf_hierarchy_commit(struct rte_eth_dev *dev, goto fail_clear; } + /* store the queue TC mapping info */ + qtc_map = rte_zmalloc("qtc_map", + sizeof(struct iavf_qtc_map) * q_tc_mapping->num_tc, 0); + if (!qtc_map) + return IAVF_ERR_NO_MEMORY; + for (i = 0; i < q_tc_mapping->num_tc; i++) { q_tc_mapping->tc[i].req.start_queue_id = index; index += q_tc_mapping->tc[i].req.queue_count; + qtc_map[i].tc = i; + qtc_map[i].start_queue_id = + q_tc_mapping->tc[i].req.start_queue_id; + qtc_map[i].queue_count = q_tc_mapping->tc[i].req.queue_count; } ret_val = iavf_set_q_tc_map(dev, q_tc_mapping, size); if (ret_val) goto fail_clear; + vf->qtc_map = qtc_map; vf->tm_conf.committed = true; return ret_val;