From patchwork Fri Sep 26 06:03:19 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jingjing Wu X-Patchwork-Id: 541 Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [IPv6:::1]) by dpdk.org (Postfix) with ESMTP id 444E47DEB; Fri, 26 Sep 2014 07:57:30 +0200 (CEST) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 07CAC678C for ; Fri, 26 Sep 2014 07:57:27 +0200 (CEST) Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga103.jf.intel.com with ESMTP; 25 Sep 2014 23:01:45 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.04,603,1406617200"; d="scan'208";a="608917271" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by orsmga002.jf.intel.com with ESMTP; 25 Sep 2014 23:03:46 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id s8Q63iuD027969; Fri, 26 Sep 2014 14:03:44 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id s8Q63gZ6012923; Fri, 26 Sep 2014 14:03:44 +0800 Received: (from wujingji@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id s8Q63grm012919; Fri, 26 Sep 2014 14:03:42 +0800 From: Jingjing Wu To: dev@dpdk.org Date: Fri, 26 Sep 2014 14:03:19 +0800 Message-Id: <1411711418-12881-2-git-send-email-jingjing.wu@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1411711418-12881-1-git-send-email-jingjing.wu@intel.com> References: <1411711418-12881-1-git-send-email-jingjing.wu@intel.com> Subject: [dpdk-dev] [PATCH v3 01/20] i40e: set up and initialize flow director X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" set up fortville resources to support flow director, includes - queue 0 pair allocated and set up for flow director - create vsi - reserve memzone for flow director programming packet Signed-off-by: Jingjing Wu Acked-by: Chen Jing D(Mark) Acked-by: Helin Zhang --- lib/librte_pmd_i40e/Makefile | 2 + lib/librte_pmd_i40e/i40e_ethdev.c | 77 +++++++++++-- lib/librte_pmd_i40e/i40e_ethdev.h | 30 +++++- lib/librte_pmd_i40e/i40e_fdir.c | 220 ++++++++++++++++++++++++++++++++++++++ lib/librte_pmd_i40e/i40e_rxtx.c | 127 ++++++++++++++++++++++ 5 files changed, 444 insertions(+), 12 deletions(-) create mode 100644 lib/librte_pmd_i40e/i40e_fdir.c diff --git a/lib/librte_pmd_i40e/Makefile b/lib/librte_pmd_i40e/Makefile index 4b31675..fdb9e26 100644 --- a/lib/librte_pmd_i40e/Makefile +++ b/lib/librte_pmd_i40e/Makefile @@ -87,6 +87,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c + # this lib depends upon: DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_eal lib/librte_ether DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_mempool lib/librte_mbuf diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c b/lib/librte_pmd_i40e/i40e_ethdev.c index a00d6ca..d0413cb 100644 --- a/lib/librte_pmd_i40e/i40e_ethdev.c +++ b/lib/librte_pmd_i40e/i40e_ethdev.c @@ -779,6 +779,12 @@ i40e_dev_start(struct rte_eth_dev *dev) i40e_vsi_queues_bind_intr(vsi); i40e_vsi_enable_queues_intr(vsi); + /* enable FDIR MSIX interrupt */ + if (pf->flags & I40E_FLAG_FDIR) { + i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi); + i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi); + } + /* Enable all queues which have been configured */ ret = i40e_vsi_switch_queues(vsi, TRUE); if (ret != I40E_SUCCESS) { @@ -2587,16 +2593,30 @@ i40e_vsi_setup(struct i40e_pf *pf, case I40E_VSI_SRIOV : vsi->nb_qps = pf->vf_nb_qps; break; + case I40E_VSI_FDIR: + vsi->nb_qps = pf->fdir_nb_qps; + break; default: goto fail_mem; } - ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps); - if (ret < 0) { - PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d", - vsi->seid, ret); - goto fail_mem; - } - vsi->base_queue = ret; + /* + * The filter status descriptor is reported in rx queue 0, + * while the tx queue for fdir filter programming has no + * such constraints, can be non-zero queues. + * To simplify it, choose FDIR vsi use queue 0 pair. + * To make sure it will use queue 0 pair, queue allocation + * need be done before this function is called + */ + if (type != I40E_VSI_FDIR) { + ret = i40e_res_pool_alloc(&pf->qp_pool, vsi->nb_qps); + if (ret < 0) { + PMD_DRV_LOG(ERR, "VSI %d allocate queue failed %d", + vsi->seid, ret); + goto fail_mem; + } + vsi->base_queue = ret; + } else + vsi->base_queue = I40E_FDIR_QUEUE_ID; /* VF has MSIX interrupt in VF range, don't allocate here */ if (type != I40E_VSI_SRIOV) { @@ -2728,8 +2748,24 @@ i40e_vsi_setup(struct i40e_pf *pf, * Since VSI is not created yet, only configure parameter, * will add vsi below. */ - } - else { + } else if (type == I40E_VSI_FDIR) { + vsi->uplink_seid = uplink_vsi->uplink_seid; + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.connection_type = 0x1; /* regular data port */ + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, + I40E_DEFAULT_TCMAP); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to configure " + "TC queue mapping\n"); + goto fail_msix_alloc; + } + ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP; + ctxt.info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID); + } else { PMD_DRV_LOG(ERR, "VSI: Not support other type VSI yet"); goto fail_msix_alloc; } @@ -2915,8 +2951,16 @@ i40e_pf_setup(struct i40e_pf *pf) PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret); return ret; } - - /* VSI setup */ + if (pf->flags & I40E_FLAG_FDIR) { + /* make queue allocated first, let FDIR use queue pair 0*/ + ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR); + if (ret != I40E_FDIR_QUEUE_ID) { + PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :" + " ret =%d", ret); + pf->flags &= ~I40E_FLAG_FDIR; + } + } + /* main VSI setup */ vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, NULL, 0); if (!vsi) { PMD_DRV_LOG(ERR, "Setup of main vsi failed"); @@ -2926,9 +2970,20 @@ i40e_pf_setup(struct i40e_pf *pf) dev_data->nb_rx_queues = vsi->nb_qps; dev_data->nb_tx_queues = vsi->nb_qps; + /* setup FDIR after main vsi created.*/ + if (pf->flags & I40E_FLAG_FDIR) { + ret = i40e_fdir_setup(pf); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to setup flow director\n"); + pf->flags &= ~I40E_FLAG_FDIR; + } + } + /* Configure filter control */ memset(&settings, 0, sizeof(settings)); settings.hash_lut_size = I40E_HASH_LUT_SIZE_128; + if (pf->flags & I40E_FLAG_FDIR) + settings.enable_fdir = TRUE; /* Enable ethtype and macvlan filters */ settings.enable_ethtype = TRUE; settings.enable_macvlan = TRUE; diff --git a/lib/librte_pmd_i40e/i40e_ethdev.h b/lib/librte_pmd_i40e/i40e_ethdev.h index 64deef2..a2b1578 100644 --- a/lib/librte_pmd_i40e/i40e_ethdev.h +++ b/lib/librte_pmd_i40e/i40e_ethdev.h @@ -46,11 +46,12 @@ /* number of VSIs and queue default setting */ #define I40E_MAX_QP_NUM_PER_VF 16 #define I40E_DEFAULT_QP_NUM_VMDQ 64 -#define I40E_DEFAULT_QP_NUM_FDIR 64 +#define I40E_DEFAULT_QP_NUM_FDIR 1 #define I40E_UINT32_BIT_SIZE (CHAR_BIT * sizeof(uint32_t)) #define I40E_VFTA_SIZE (4096 / I40E_UINT32_BIT_SIZE) /* Default TC traffic in case DCB is not enabled */ #define I40E_DEFAULT_TCMAP 0x1 +#define I40E_FDIR_QUEUE_ID 0 /* i40e flags */ #define I40E_FLAG_RSS (1ULL << 0) @@ -189,6 +190,27 @@ struct i40e_pf_vf { }; /* + * A structure used to define fields of a FDIR related info. + */ +struct i40e_fdir_info { + struct i40e_vsi *fdir_vsi; /* pointer to fdir VSI structure */ + uint16_t match_counter_index; /* Statistic counter index used for fdir*/ + struct i40e_tx_queue *txq; + struct i40e_rx_queue *rxq; + void *prg_pkt; /* memory for fdir program packet */ + uint64_t dma_addr; /* physic address of packet memory*/ + /* + * the rule how bytes stream is extracted as flexible payload + * for each payload layer, the setting can up to three elements + */ + struct { + uint8_t offset; /* offset in words from the beginning of payload */ + uint8_t size; /* size in words */ + } flex_set[3][3]; + +}; + +/* * Structure to store private data specific for PF instance. */ struct i40e_pf { @@ -216,6 +238,7 @@ struct i40e_pf { uint16_t vmdq_nb_qps; /* The number of queue pairs of VMDq */ uint16_t vf_nb_qps; /* The number of queue pairs of VF */ uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ + struct i40e_fdir_info fdir; /* flow director info */ }; enum pending_msg { @@ -312,6 +335,11 @@ void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi); int i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi, struct i40e_vsi_vlan_pvid_info *info); int i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on); +enum i40e_status_code i40e_fdir_setup_tx_resources(struct i40e_pf *pf, + unsigned int socket_id); +enum i40e_status_code i40e_fdir_setup_rx_resources(struct i40e_pf *pf, + unsigned int socket_id); +int i40e_fdir_setup(struct i40e_pf *pf); /* I40E_DEV_PRIVATE_TO */ #define I40E_DEV_PRIVATE_TO_PF(adapter) \ diff --git a/lib/librte_pmd_i40e/i40e_fdir.c b/lib/librte_pmd_i40e/i40e_fdir.c new file mode 100644 index 0000000..5872494 --- /dev/null +++ b/lib/librte_pmd_i40e/i40e_fdir.c @@ -0,0 +1,220 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "i40e_logs.h" +#include "i40e/i40e_type.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" + +#define I40E_FDIR_MZ_NAME "FDIR_MEMZONE" +#define I40E_FDIR_PKT_LEN 512 +#define I40E_COUNTER_PF 2 +/* Statistic counter index for one pf */ +#define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF) + +static int i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq); + +static int +i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi); + struct i40e_hmc_obj_rxq rx_ctx; + int err = I40E_SUCCESS; + + memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); + /* Init the RX queue in hardware */ + rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT; + rx_ctx.hbuff = 0; + rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT; + rx_ctx.qlen = rxq->nb_rx_desc; +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + rx_ctx.dsize = 1; +#endif + rx_ctx.dtype = i40e_header_split_none; + rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE; + rx_ctx.rxmax = ETHER_MAX_LEN; + rx_ctx.tphrdesc_ena = 1; + rx_ctx.tphwdesc_ena = 1; + rx_ctx.tphdata_ena = 1; + rx_ctx.tphhead_ena = 1; + rx_ctx.lrxqthresh = 2; + rx_ctx.crcstrip = 0; + rx_ctx.l2tsel = 1; + rx_ctx.showiv = 1; + rx_ctx.prefena = 1; + + err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx); + if (err != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context."); + return err; + } + err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx); + if (err != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context."); + return err; + } + rxq->qrx_tail = hw->hw_addr + + I40E_QRX_TAIL(rxq->vsi->base_queue); + + rte_wmb(); + /* Init the RX tail regieter. */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, 0); + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + + return err; +} + +/* + * i40e_fdir_setup - reserve and initialize the Flow Director resources + * @pf: board private structure + */ +int +i40e_fdir_setup(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi *vsi; + int err = I40E_SUCCESS; + char z_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz = NULL; + struct rte_eth_dev *eth_dev = pf->adapter->eth_dev; + + PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u," + " num_filters_best_effort = %u.\n", + hw->func_caps.fd_filters_guaranteed, + hw->func_caps.fd_filters_best_effort); + + vsi = pf->fdir.fdir_vsi; + if (vsi) { + PMD_DRV_LOG(ERR, "FDIR vsi pointer needs" + "to be null before creation."); + return I40E_ERR_BAD_PTR; + } + /* make new FDIR VSI */ + vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0); + if (!vsi) { + PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI."); + return I40E_ERR_NO_AVAILABLE_VSI; + } + pf->fdir.fdir_vsi = vsi; + + /*Fdir tx queue setup*/ + err = i40e_fdir_setup_tx_resources(pf, 0); + if (err) { + PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources."); + goto fail_setup_tx; + } + + /*Fdir rx queue setup*/ + err = i40e_fdir_setup_rx_resources(pf, 0); + if (err) { + PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources."); + goto fail_setup_rx; + } + + err = i40e_tx_queue_init(pf->fdir.txq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization."); + goto fail_mem; + } + + /* need switch on before dev start*/ + err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on."); + goto fail_mem; + } + + /* Init the rx queue in hardware */ + err = i40e_fdir_rx_queue_init(pf->fdir.rxq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization."); + goto fail_mem; + } + + /* switch on rx queue */ + err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on."); + goto fail_mem; + } + + /* reserve memory for the fdir programming packet */ + snprintf(z_name, sizeof(z_name), "%s_%s_%d", + eth_dev->driver->pci_drv.name, + I40E_FDIR_MZ_NAME, + eth_dev->data->port_id); + mz = rte_memzone_lookup(z_name); + if (!mz) { + mz = rte_memzone_reserve(z_name, + I40E_FDIR_PKT_LEN, + rte_socket_id(), + 0); + if (!mz) { + PMD_DRV_LOG(ERR, "Cannot init memzone for" + "flow director program packet."); + err = I40E_ERR_NO_MEMORY; + goto fail_mem; + } + } + pf->fdir.prg_pkt = mz->addr; + pf->fdir.dma_addr = (uint64_t)mz->phys_addr; + pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id); + PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.\n", + vsi->base_queue); + return I40E_SUCCESS; + +fail_mem: + i40e_dev_rx_queue_release(pf->fdir.rxq); + pf->fdir.rxq = NULL; +fail_setup_rx: + i40e_dev_tx_queue_release(pf->fdir.txq); + pf->fdir.txq = NULL; +fail_setup_tx: + i40e_vsi_release(vsi); + pf->fdir.fdir_vsi = NULL; + return err; +} diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c index 099699c..4435367 100644 --- a/lib/librte_pmd_i40e/i40e_rxtx.c +++ b/lib/librte_pmd_i40e/i40e_rxtx.c @@ -2107,6 +2107,8 @@ i40e_tx_queue_init(struct i40e_tx_queue *txq) tx_ctx.base = txq->tx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT; tx_ctx.qlen = txq->nb_tx_desc; tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[0]); + if (vsi->type == I40E_VSI_FDIR) + tx_ctx.fd_ena = TRUE; err = i40e_clear_lan_tx_queue_context(hw, pf_q); if (err != I40E_SUCCESS) { @@ -2323,3 +2325,128 @@ i40e_dev_clear_queues(struct rte_eth_dev *dev) i40e_reset_rx_queue(dev->data->rx_queues[i]); } } + +enum i40e_status_code +i40e_fdir_setup_tx_resources(struct i40e_pf *pf, + unsigned int socket_id) +{ + struct i40e_tx_queue *txq; + const struct rte_memzone *tz = NULL; + uint32_t ring_size; + struct rte_eth_dev *dev = pf->adapter->eth_dev; + +#define I40E_FDIR_NUM_TX_DESC I40E_MIN_RING_DESC + if (!pf) { + PMD_DRV_LOG(ERR, "PF is not available"); + return I40E_ERR_BAD_PTR; + } + + /* Allocate the TX queue data structure. */ + txq = rte_zmalloc_socket("i40e fdir tx queue", + sizeof(struct i40e_tx_queue), + CACHE_LINE_SIZE, + socket_id); + if (!txq) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for " + "tx queue structure\n"); + return I40E_ERR_NO_MEMORY; + } + + /* Allocate TX hardware ring descriptors. */ + ring_size = sizeof(struct i40e_tx_desc) * I40E_FDIR_NUM_TX_DESC; + ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN); + + tz = i40e_ring_dma_zone_reserve(dev, + "fdir_tx_ring", + I40E_FDIR_QUEUE_ID, + ring_size, + socket_id); + if (!tz) { + i40e_dev_tx_queue_release(txq); + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX\n"); + return I40E_ERR_NO_MEMORY; + } + + txq->nb_tx_desc = I40E_FDIR_NUM_TX_DESC; + txq->queue_id = I40E_FDIR_QUEUE_ID; + txq->reg_idx = pf->fdir.fdir_vsi->base_queue; + txq->vsi = pf->fdir.fdir_vsi; + +#ifdef RTE_LIBRTE_XEN_DOM0 + txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); +#else + txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr; +#endif + txq->tx_ring = (struct i40e_tx_desc *)tz->addr; + /* + * don't need to allocate software ring and reset for the fdir + * program queue just set the queue has been configured. + */ + txq->q_set = TRUE; + pf->fdir.txq = txq; + + return I40E_SUCCESS; +} + +enum i40e_status_code +i40e_fdir_setup_rx_resources(struct i40e_pf *pf, + unsigned int socket_id) +{ + struct i40e_rx_queue *rxq; + const struct rte_memzone *rz = NULL; + uint32_t ring_size; + struct rte_eth_dev *dev = pf->adapter->eth_dev; + +#define I40E_FDIR_NUM_RX_DESC I40E_MIN_RING_DESC + if (!pf) { + PMD_DRV_LOG(ERR, "PF is not available"); + return I40E_ERR_BAD_PTR; + } + + /* Allocate the TX queue data structure. */ + rxq = rte_zmalloc_socket("i40e fdir rx queue", + sizeof(struct i40e_rx_queue), + CACHE_LINE_SIZE, + socket_id); + if (!rxq) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for " + "rx queue structure\n"); + return I40E_ERR_NO_MEMORY; + } + + /* Allocate TX hardware ring descriptors. */ + ring_size = sizeof(union i40e_rx_desc) * I40E_FDIR_NUM_RX_DESC; + ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN); + + rz = i40e_ring_dma_zone_reserve(dev, + "fdir_rx_ring", + I40E_FDIR_QUEUE_ID, + ring_size, + socket_id); + if (!rz) { + i40e_dev_rx_queue_release(rxq); + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX\n"); + return I40E_ERR_NO_MEMORY; + } + + rxq->nb_rx_desc = I40E_FDIR_NUM_RX_DESC; + rxq->queue_id = I40E_FDIR_QUEUE_ID; + rxq->reg_idx = pf->fdir.fdir_vsi->base_queue; + rxq->vsi = pf->fdir.fdir_vsi; + +#ifdef RTE_LIBRTE_XEN_DOM0 + rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); +#else + rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr; +#endif + rxq->rx_ring = (union i40e_rx_desc *)rz->addr; + + /* + * Don't need to allocate software ring and reset for the fdir + * rx queue, just set the queue has been configured. + */ + rxq->q_set = TRUE; + pf->fdir.rxq = rxq; + + return I40E_SUCCESS; +}