From patchwork Fri Sep 27 17:04:17 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Cao, Yahui" X-Patchwork-Id: 59998 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8FB3D3195; Fri, 27 Sep 2019 11:19:33 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id B5B6F2C38 for ; Fri, 27 Sep 2019 11:19:30 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 27 Sep 2019 02:19:30 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,554,1559545200"; d="scan'208";a="341742508" Received: from dpdk-yahui-skylake.sh.intel.com ([10.67.119.16]) by orsmga004.jf.intel.com with ESMTP; 27 Sep 2019 02:19:28 -0700 From: Yahui Cao To: Qiming Yang , Wenzhuo Lu Cc: dev@dpdk.org, Qi Zhang , Xiaolong Ye , Beilei Xing , Yahui Cao Date: Sat, 28 Sep 2019 01:04:17 +0800 Message-Id: <20190927170424.71348-2-yahui.cao@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190927170424.71348-1-yahui.cao@intel.com> References: <20190924215243.81474-1-yahui.cao@intel.com> <20190927170424.71348-1-yahui.cao@intel.com> Subject: [dpdk-dev] [PATCH v4 1/8] net/ice: enable flow director engine X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Beilei Xing Enable flow director engine, including initialization and teardown. - Control VSI create and release. - Queue pair allocated, set up and release. - Programming packet create and release. - FDIR profile create and release. Signed-off-by: Beilei Xing --- drivers/net/ice/Makefile | 1 + drivers/net/ice/ice_ethdev.c | 107 ++++++-- drivers/net/ice/ice_ethdev.h | 19 ++ drivers/net/ice/ice_fdir_filter.c | 225 +++++++++++++++++ drivers/net/ice/ice_rxtx.c | 403 ++++++++++++++++++++++++++++++ drivers/net/ice/ice_rxtx.h | 9 + drivers/net/ice/meson.build | 3 +- 7 files changed, 747 insertions(+), 20 deletions(-) create mode 100644 drivers/net/ice/ice_fdir_filter.c diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile index 4a279f196..f7e185288 100644 --- a/drivers/net/ice/Makefile +++ b/drivers/net/ice/Makefile @@ -62,6 +62,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c endif SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_fdir_filter.c ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2) CC_AVX2_SUPPORT=1 else diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index 3abdaffbc..4747ef1a5 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -1387,10 +1387,20 @@ ice_pf_sw_init(struct rte_eth_dev *dev) else PMD_DRV_LOG(NOTICE, "Protocol extraction is disabled"); + if (hw->func_caps.fd_fltr_guar > 0 || + hw->func_caps.fd_fltr_best_effort > 0) { + pf->flags |= ICE_FLAG_FDIR; + pf->fdir_nb_qps = ICE_DEFAULT_QP_NUM_FDIR; + pf->lan_nb_qps = pf->lan_nb_qp_max - pf->fdir_nb_qps; + } else { + pf->fdir_nb_qps = 0; + } + pf->fdir_qp_offset = 0; + return 0; } -static struct ice_vsi * +struct ice_vsi * ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) { struct ice_hw *hw = ICE_PF_TO_HW(pf); @@ -1402,6 +1412,7 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) struct rte_ether_addr mac_addr; uint16_t max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; uint8_t tc_bitmap = 0x1; + uint16_t cfg; /* hw->num_lports = 1 in NIC mode */ vsi = rte_zmalloc(NULL, sizeof(struct ice_vsi), 0); @@ -1425,14 +1436,10 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) pf->flags |= ICE_FLAG_RSS_AQ_CAPABLE; memset(&vsi_ctx, 0, sizeof(vsi_ctx)); - /* base_queue in used in queue mapping of VSI add/update command. - * Suppose vsi->base_queue is 0 now, don't consider SRIOV, VMDQ - * cases in the first stage. Only Main VSI. - */ - vsi->base_queue = 0; switch (type) { case ICE_VSI_PF: vsi->nb_qps = pf->lan_nb_qps; + vsi->base_queue = 1; ice_vsi_config_default_rss(&vsi_ctx.info); vsi_ctx.alloc_from_pool = true; vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF; @@ -1446,6 +1453,18 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) vsi_ctx.info.vlan_flags |= ICE_AQ_VSI_VLAN_EMOD_NOTHING; vsi_ctx.info.q_opt_rss = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF | ICE_AQ_VSI_Q_OPT_RSS_TPLZ; + + /* FDIR */ + cfg = ICE_AQ_VSI_PROP_SECURITY_VALID | + ICE_AQ_VSI_PROP_FLOW_DIR_VALID; + vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg); + cfg = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE; + vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg); + vsi_ctx.info.max_fd_fltr_dedicated = + rte_cpu_to_le_16(hw->func_caps.fd_fltr_guar); + vsi_ctx.info.max_fd_fltr_shared = + rte_cpu_to_le_16(hw->func_caps.fd_fltr_best_effort); + /* Enable VLAN/UP trip */ ret = ice_vsi_config_tc_queue_mapping(vsi, &vsi_ctx.info, @@ -1458,6 +1477,28 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) goto fail_mem; } + break; + case ICE_VSI_CTRL: + vsi->nb_qps = pf->fdir_nb_qps; + vsi->base_queue = ICE_FDIR_QUEUE_ID; + vsi_ctx.alloc_from_pool = true; + vsi_ctx.flags = ICE_AQ_VSI_TYPE_PF; + + cfg = ICE_AQ_VSI_PROP_FLOW_DIR_VALID; + vsi_ctx.info.valid_sections |= rte_cpu_to_le_16(cfg); + cfg = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE; + vsi_ctx.info.fd_options = rte_cpu_to_le_16(cfg); + vsi_ctx.info.sw_id = hw->port_info->sw_id; + ret = ice_vsi_config_tc_queue_mapping(vsi, + &vsi_ctx.info, + ICE_DEFAULT_TCMAP); + if (ret) { + PMD_INIT_LOG(ERR, + "tc queue mapping with vsi failed, " + "err = %d", + ret); + goto fail_mem; + } break; default: /* for other types of VSI */ @@ -1476,6 +1517,14 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) } vsi->msix_intr = ret; vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID); + } else if (type == ICE_VSI_CTRL) { + ret = ice_res_pool_alloc(&pf->msix_pool, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "VSI %d get heap failed %d", + vsi->vsi_id, ret); + } + vsi->msix_intr = ret; + vsi->nb_msix = 1; } else { vsi->msix_intr = 0; vsi->nb_msix = 0; @@ -1491,20 +1540,22 @@ ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type) pf->vsis_allocated = vsi_ctx.vsis_allocd; pf->vsis_unallocated = vsi_ctx.vsis_unallocated; - /* MAC configuration */ - rte_memcpy(pf->dev_addr.addr_bytes, - hw->port_info->mac.perm_addr, - ETH_ADDR_LEN); + if (type == ICE_VSI_PF) { + /* MAC configuration */ + rte_memcpy(pf->dev_addr.addr_bytes, + hw->port_info->mac.perm_addr, + ETH_ADDR_LEN); - rte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN); - ret = ice_add_mac_filter(vsi, &mac_addr); - if (ret != ICE_SUCCESS) - PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter"); + rte_memcpy(&mac_addr, &pf->dev_addr, RTE_ETHER_ADDR_LEN); + ret = ice_add_mac_filter(vsi, &mac_addr); + if (ret != ICE_SUCCESS) + PMD_INIT_LOG(ERR, "Failed to add dflt MAC filter"); - rte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN); - ret = ice_add_mac_filter(vsi, &mac_addr); - if (ret != ICE_SUCCESS) - PMD_INIT_LOG(ERR, "Failed to add MAC filter"); + rte_memcpy(&mac_addr, &broadcast, RTE_ETHER_ADDR_LEN); + ret = ice_add_mac_filter(vsi, &mac_addr); + if (ret != ICE_SUCCESS) + PMD_INIT_LOG(ERR, "Failed to add MAC filter"); + } /* At the beginning, only TC0. */ /* What we need here is the maximam number of the TX queues. @@ -1542,7 +1593,9 @@ ice_send_driver_ver(struct ice_hw *hw) static int ice_pf_setup(struct ice_pf *pf) { + struct ice_hw *hw = ICE_PF_TO_HW(pf); struct ice_vsi *vsi; + uint16_t unused; /* Clear all stats counters */ pf->offset_loaded = FALSE; @@ -1551,6 +1604,13 @@ ice_pf_setup(struct ice_pf *pf) memset(&pf->internal_stats, 0, sizeof(struct ice_eth_stats)); memset(&pf->internal_stats_offset, 0, sizeof(struct ice_eth_stats)); + /* force guaranteed filter pool for PF */ + ice_alloc_fd_guar_item(hw, &unused, + hw->func_caps.fd_fltr_guar); + /* force shared filter pool for PF */ + ice_alloc_fd_shrd_item(hw, &unused, + hw->func_caps.fd_fltr_best_effort); + vsi = ice_setup_vsi(pf, ICE_VSI_PF); if (!vsi) { PMD_INIT_LOG(ERR, "Failed to add vsi for PF"); @@ -1999,7 +2059,7 @@ ice_dev_init(struct rte_eth_dev *dev) return ret; } -static int +int ice_release_vsi(struct ice_vsi *vsi) { struct ice_hw *hw; @@ -2081,6 +2141,9 @@ ice_dev_stop(struct rte_eth_dev *dev) /* disable all queue interrupts */ ice_vsi_disable_queues_intr(main_vsi); + if (pf->fdir.fdir_vsi) + ice_vsi_disable_queues_intr(pf->fdir.fdir_vsi); + /* Clear all queues and release mbufs */ ice_clear_queues(dev); @@ -2424,6 +2487,12 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev) /* Enable interrupts for all the queues */ ice_vsi_enable_queues_intr(vsi); + /* Enable FDIR MSIX interrupt */ + if (pf->fdir.fdir_vsi) { + ice_vsi_queues_bind_intr(pf->fdir.fdir_vsi); + ice_vsi_enable_queues_intr(pf->fdir.fdir_vsi); + } + rte_intr_enable(intr_handle); return 0; diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 478652cbb..39f8ab761 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -247,6 +247,17 @@ TAILQ_HEAD(ice_flow_list, rte_flow); struct ice_flow_parser_node; TAILQ_HEAD(ice_parser_list, ice_flow_parser_node); +/** + * A structure used to define fields of a FDIR related info. + */ +struct ice_fdir_info { + struct ice_vsi *fdir_vsi; /* pointer to fdir VSI structure */ + struct ice_tx_queue *txq; + struct ice_rx_queue *rxq; + void *prg_pkt; /* memory for fdir program packet */ + uint64_t dma_addr; /* physic address of packet memory*/ +}; + struct ice_pf { struct ice_adapter *adapter; /* The adapter this PF associate to */ struct ice_vsi *main_vsi; /* pointer to main VSI structure */ @@ -267,6 +278,9 @@ struct ice_pf { uint16_t lan_nb_qps; /* The number of queue pairs of LAN */ uint16_t base_queue; /* The base queue pairs index in the device */ uint8_t *proto_xtr; /* Protocol extraction type for all queues */ + uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ + uint16_t fdir_qp_offset; + struct ice_fdir_info fdir; /* flow director info */ struct ice_hw_port_stats stats_offset; struct ice_hw_port_stats stats; /* internal packet statistics, it should be excluded from the total */ @@ -352,6 +366,11 @@ struct ice_vsi_vlan_pvid_info { #define ICE_PF_TO_ETH_DEV(pf) \ (((struct ice_pf *)pf)->adapter->eth_dev) +struct ice_vsi * +ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type); +int +ice_release_vsi(struct ice_vsi *vsi); + static inline int ice_align_floor(int n) { diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c new file mode 100644 index 000000000..0fb3054f5 --- /dev/null +++ b/drivers/net/ice/ice_fdir_filter.c @@ -0,0 +1,225 @@ +#include +#include +#include "base/ice_fdir.h" +#include "base/ice_flow.h" +#include "base/ice_type.h" +#include "ice_ethdev.h" +#include "ice_rxtx.h" +#include "ice_generic_flow.h" + +static const struct rte_memzone * +ice_memzone_reserve(const char *name, uint32_t len, int socket_id) +{ + return rte_memzone_reserve_aligned(name, len, socket_id, + RTE_MEMZONE_IOVA_CONTIG, + ICE_RING_BASE_ALIGN); +} + +#define ICE_FDIR_MZ_NAME "FDIR_MEMZONE" + +static int +ice_fdir_prof_alloc(struct ice_hw *hw) +{ + enum ice_fltr_ptype ptype, fltr_ptype; + + if (!hw->fdir_prof) { + hw->fdir_prof = (struct ice_fd_hw_prof **) + ice_malloc(hw, ICE_FLTR_PTYPE_MAX * + sizeof(*hw->fdir_prof)); + if (!hw->fdir_prof) + return -ENOMEM; + } + for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP; + ptype < ICE_FLTR_PTYPE_MAX; + ptype++) { + if (!hw->fdir_prof[ptype]) { + hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *) + ice_malloc(hw, sizeof(**hw->fdir_prof)); + if (!hw->fdir_prof[ptype]) + goto fail_mem; + } + } + return 0; + +fail_mem: + for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP; + fltr_ptype < ptype; + fltr_ptype++) + rte_free(hw->fdir_prof[fltr_ptype]); + rte_free(hw->fdir_prof); + return -ENOMEM; +} + +/* + * ice_fdir_setup - reserve and initialize the Flow Director resources + * @pf: board private structure + */ +static int +ice_fdir_setup(struct ice_pf *pf) +{ + struct rte_eth_dev *eth_dev = pf->adapter->eth_dev; + struct ice_hw *hw = ICE_PF_TO_HW(pf); + const struct rte_memzone *mz = NULL; + char z_name[RTE_MEMZONE_NAMESIZE]; + struct ice_vsi *vsi; + int err = ICE_SUCCESS; + + if ((pf->flags & ICE_FLAG_FDIR) == 0) { + PMD_INIT_LOG(ERR, "HW doesn't support FDIR"); + return -ENOTSUP; + } + + PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u," + " fd_fltr_best_effort = %u.", + hw->func_caps.fd_fltr_guar, + hw->func_caps.fd_fltr_best_effort); + + if (pf->fdir.fdir_vsi) { + PMD_DRV_LOG(INFO, "FDIR initialization has been done."); + return ICE_SUCCESS; + } + + /* make new FDIR VSI */ + vsi = ice_setup_vsi(pf, ICE_VSI_CTRL); + if (!vsi) { + PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI."); + return -EINVAL; + } + pf->fdir.fdir_vsi = vsi; + + /*Fdir tx queue setup*/ + err = ice_fdir_setup_tx_resources(pf); + if (err) { + PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources."); + goto fail_setup_tx; + } + + /*Fdir rx queue setup*/ + err = ice_fdir_setup_rx_resources(pf); + if (err) { + PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources."); + goto fail_setup_rx; + } + + err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id); + if (err) { + PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue."); + goto fail_mem; + } + + err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id); + if (err) { + PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue."); + goto fail_mem; + } + + /* reserve memory for the fdir programming packet */ + snprintf(z_name, sizeof(z_name), "ICE_%s_%d", + ICE_FDIR_MZ_NAME, + eth_dev->data->port_id); + mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY); + if (!mz) { + PMD_DRV_LOG(ERR, "Cannot init memzone for " + "flow director program packet."); + err = -ENOMEM; + goto fail_mem; + } + pf->fdir.prg_pkt = mz->addr; + pf->fdir.dma_addr = mz->iova; + + err = ice_fdir_prof_alloc(hw); + if (err) { + PMD_DRV_LOG(ERR, "Cannot allocate memory for " + "flow director profile."); + err = -ENOMEM; + goto fail_mem; + } + + PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.", + vsi->base_queue); + return ICE_SUCCESS; + +fail_mem: + ice_rx_queue_release(pf->fdir.rxq); + pf->fdir.rxq = NULL; +fail_setup_rx: + ice_tx_queue_release(pf->fdir.txq); + pf->fdir.txq = NULL; +fail_setup_tx: + ice_release_vsi(vsi); + pf->fdir.fdir_vsi = NULL; + return err; +} + +static void +ice_fdir_prof_free(struct ice_hw *hw) +{ + enum ice_fltr_ptype ptype; + + for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP; + ptype < ICE_FLTR_PTYPE_MAX; + ptype++) + rte_free(hw->fdir_prof[ptype]); + + rte_free(hw->fdir_prof); +} + +/* + * ice_fdir_teardown - release the Flow Director resources + * @pf: board private structure + */ +static void +ice_fdir_teardown(struct ice_pf *pf) +{ + struct rte_eth_dev *eth_dev = pf->adapter->eth_dev; + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_vsi *vsi; + int err; + + vsi = pf->fdir.fdir_vsi; + if (!vsi) + return; + + err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id); + if (err) + PMD_DRV_LOG(ERR, "Failed to stop TX queue."); + + err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id); + if (err) + PMD_DRV_LOG(ERR, "Failed to stop RX queue."); + + ice_tx_queue_release(pf->fdir.txq); + pf->fdir.txq = NULL; + ice_rx_queue_release(pf->fdir.rxq); + pf->fdir.rxq = NULL; + ice_release_vsi(vsi); + pf->fdir.fdir_vsi = NULL; + ice_fdir_prof_free(hw); +} + +static int +ice_fdir_init(struct ice_adapter *ad) +{ + struct ice_pf *pf = &ad->pf; + + return ice_fdir_setup(pf); +} + +static void +ice_fdir_uninit(struct ice_adapter *ad) +{ + struct ice_pf *pf = &ad->pf; + + ice_fdir_teardown(pf); +} + +static struct ice_flow_engine ice_fdir_engine = { + .init = ice_fdir_init, + .uninit = ice_fdir_uninit, + .type = ICE_FLOW_ENGINE_FDIR, +}; + +RTE_INIT(ice_fdir_engine_register) +{ + ice_register_flow_engine(&ice_fdir_engine); +} diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index af96c0f41..7ee01374a 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -513,6 +513,179 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) return 0; } +static enum ice_status +ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq) +{ + struct ice_vsi *vsi = rxq->vsi; + struct ice_hw *hw = ICE_VSI_TO_HW(vsi); + uint32_t rxdid = ICE_RXDID_COMMS_GENERIC; + struct ice_rlan_ctx rx_ctx; + enum ice_status err; + uint32_t regval; + + rxq->rx_hdr_len = 0; + rxq->rx_buf_len = 1024; + + memset(&rx_ctx, 0, sizeof(rx_ctx)); + + rx_ctx.base = rxq->rx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; + rx_ctx.qlen = rxq->nb_rx_desc; + rx_ctx.dbuf = rxq->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; + rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S; + rx_ctx.dtype = 0; /* No Header Split mode */ +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + rx_ctx.dsize = 1; /* 32B descriptors */ +#endif + rx_ctx.rxmax = RTE_ETHER_MAX_LEN; + /* TPH: Transaction Layer Packet (TLP) processing hints */ + rx_ctx.tphrdesc_ena = 1; + rx_ctx.tphwdesc_ena = 1; + rx_ctx.tphdata_ena = 1; + rx_ctx.tphhead_ena = 1; + /* Low Receive Queue Threshold defined in 64 descriptors units. + * When the number of free descriptors goes below the lrxqthresh, + * an immediate interrupt is triggered. + */ + rx_ctx.lrxqthresh = 2; + /*default use 32 byte descriptor, vlan tag extract to L2TAG2(1st)*/ + rx_ctx.l2tsel = 1; + rx_ctx.showiv = 0; + rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0; + + /* Enable Flexible Descriptors in the queue context which + * allows this driver to select a specific receive descriptor format + */ + regval = (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & + QRXFLXP_CNTXT_RXDID_IDX_M; + + /* increasing context priority to pick up profile ID; + * default is 0x01; setting to 0x03 to ensure profile + * is programming if prev context is of same priority + */ + regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & + QRXFLXP_CNTXT_RXDID_PRIO_M; + + ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval); + + err = ice_clear_rxq_ctx(hw, rxq->reg_idx); + if (err) { + PMD_DRV_LOG(ERR, "Failed to clear Lan Rx queue (%u) context", + rxq->queue_id); + return -EINVAL; + } + err = ice_write_rxq_ctx(hw, &rx_ctx, rxq->reg_idx); + if (err) { + PMD_DRV_LOG(ERR, "Failed to write Lan Rx queue (%u) context", + rxq->queue_id); + return -EINVAL; + } + + rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx); + + /* Init the Rx tail register*/ + ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + + return 0; +} + +int +ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ice_rx_queue *rxq; + int err; + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + rxq = pf->fdir.rxq; + if (!rxq || !rxq->q_set) { + PMD_DRV_LOG(ERR, "FDIR RX queue %u not available or setup", + rx_queue_id); + return -EINVAL; + } + + err = ice_fdir_program_hw_rx_queue(rxq); + if (err) { + PMD_DRV_LOG(ERR, "fail to program FDIR RX queue %u", + rx_queue_id); + return -EIO; + } + + /* Init the RX tail register. */ + ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + + err = ice_switch_rx_queue(hw, rxq->reg_idx, TRUE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u on", + rx_queue_id); + + ice_reset_rx_queue(rxq); + return -EINVAL; + } + + return 0; +} + +int +ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_tx_queue *txq; + int err; + struct ice_vsi *vsi; + struct ice_hw *hw; + struct ice_aqc_add_tx_qgrp txq_elem; + struct ice_tlan_ctx tx_ctx; + + PMD_INIT_FUNC_TRACE(); + + txq = pf->fdir.txq; + if (!txq || !txq->q_set) { + PMD_DRV_LOG(ERR, "FDIR TX queue %u is not available or setup", + tx_queue_id); + return -EINVAL; + } + + vsi = txq->vsi; + hw = ICE_VSI_TO_HW(vsi); + + memset(&txq_elem, 0, sizeof(txq_elem)); + memset(&tx_ctx, 0, sizeof(tx_ctx)); + txq_elem.num_txqs = 1; + txq_elem.txqs[0].txq_id = rte_cpu_to_le_16(txq->reg_idx); + + tx_ctx.base = txq->tx_ring_dma / ICE_QUEUE_BASE_ADDR_UNIT; + tx_ctx.qlen = txq->nb_tx_desc; + tx_ctx.pf_num = hw->pf_id; + tx_ctx.vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; + tx_ctx.src_vsi = vsi->vsi_id; + tx_ctx.port_num = hw->port_info->lport; + tx_ctx.tso_ena = 1; /* tso enable */ + tx_ctx.tso_qnum = txq->reg_idx; /* index for tso state structure */ + tx_ctx.legacy_int = 1; /* Legacy or Advanced Host Interface */ + + ice_set_ctx((uint8_t *)&tx_ctx, txq_elem.txqs[0].txq_ctx, + ice_tlan_ctx_info); + + txq->qtx_tail = hw->hw_addr + QTX_COMM_DBELL(txq->reg_idx); + + /* Init the Tx tail register*/ + ICE_PCI_REG_WRITE(txq->qtx_tail, 0); + + /* Fix me, we assume TC always 0 here */ + err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1, + &txq_elem, sizeof(txq_elem), NULL); + if (err) { + PMD_DRV_LOG(ERR, "Failed to add FDIR txq"); + return -EIO; + } + /* store the schedule node id */ + txq->q_teid = txq_elem.txqs[0].q_teid; + + return 0; +} + /* Free all mbufs for descriptors in tx queue */ static void _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq) @@ -618,6 +791,63 @@ ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) return 0; } +int +ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ice_rx_queue *rxq; + int err; + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + rxq = pf->fdir.rxq; + + err = ice_switch_rx_queue(hw, rxq->reg_idx, FALSE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch FDIR RX queue %u off", + rx_queue_id); + return -EINVAL; + } + ice_rx_queue_release_mbufs(rxq); + + return 0; +} + +int +ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ice_tx_queue *txq; + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_vsi *vsi = pf->main_vsi; + enum ice_status status; + uint16_t q_ids[1]; + uint32_t q_teids[1]; + uint16_t q_handle = tx_queue_id; + + txq = pf->fdir.txq; + if (!txq) { + PMD_DRV_LOG(ERR, "TX queue %u is not available", + tx_queue_id); + return -EINVAL; + } + vsi = txq->vsi; + + q_ids[0] = txq->reg_idx; + q_teids[0] = txq->q_teid; + + /* Fix me, we assume TC always 0 here */ + status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle, + q_ids, q_teids, ICE_NO_RESET, 0, NULL); + if (status != ICE_SUCCESS) { + PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue"); + return -EINVAL; + } + + ice_tx_queue_release_mbufs(txq); + + return 0; +} + int ice_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1133,6 +1363,11 @@ ice_rxd_to_pkt_fields(struct rte_mbuf *mb, xtr->type = ice_rxdid_to_proto_xtr_type(desc->rxdid); xtr->magic = PROTO_XTR_MAGIC_ID; } + + if (desc->flow_id != 0xFFFFFFFF) { + mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); + } #endif } @@ -1687,6 +1922,128 @@ ice_free_queues(struct rte_eth_dev *dev) dev->data->nb_tx_queues = 0; } +#define ICE_FDIR_NUM_TX_DESC ICE_MIN_RING_DESC +#define ICE_FDIR_NUM_RX_DESC ICE_MIN_RING_DESC + +int +ice_fdir_setup_tx_resources(struct ice_pf *pf) +{ + struct ice_tx_queue *txq; + const struct rte_memzone *tz = NULL; + uint32_t ring_size; + struct rte_eth_dev *dev; + + if (!pf) { + PMD_DRV_LOG(ERR, "PF is not available"); + return -EINVAL; + } + + dev = pf->adapter->eth_dev; + + /* Allocate the TX queue data structure. */ + txq = rte_zmalloc_socket("ice fdir tx queue", + sizeof(struct ice_tx_queue), + RTE_CACHE_LINE_SIZE, + SOCKET_ID_ANY); + if (!txq) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for " + "tx queue structure."); + return -ENOMEM; + } + + /* Allocate TX hardware ring descriptors. */ + ring_size = sizeof(struct ice_tx_desc) * ICE_FDIR_NUM_TX_DESC; + ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); + + tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring", + ICE_FDIR_QUEUE_ID, ring_size, + ICE_RING_BASE_ALIGN, SOCKET_ID_ANY); + if (!tz) { + ice_tx_queue_release(txq); + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX."); + return -ENOMEM; + } + + txq->nb_tx_desc = ICE_FDIR_NUM_TX_DESC; + txq->queue_id = ICE_FDIR_QUEUE_ID; + txq->reg_idx = pf->fdir.fdir_vsi->base_queue; + txq->vsi = pf->fdir.fdir_vsi; + + txq->tx_ring_dma = tz->iova; + txq->tx_ring = (struct ice_tx_desc *)tz->addr; + /* + * don't need to allocate software ring and reset for the fdir + * program queue just set the queue has been configured. + */ + txq->q_set = TRUE; + pf->fdir.txq = txq; + + txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs; + + return ICE_SUCCESS; +} + +int +ice_fdir_setup_rx_resources(struct ice_pf *pf) +{ + struct ice_rx_queue *rxq; + const struct rte_memzone *rz = NULL; + uint32_t ring_size; + struct rte_eth_dev *dev; + + if (!pf) { + PMD_DRV_LOG(ERR, "PF is not available"); + return -EINVAL; + } + + dev = pf->adapter->eth_dev; + + /* Allocate the RX queue data structure. */ + rxq = rte_zmalloc_socket("ice fdir rx queue", + sizeof(struct ice_rx_queue), + RTE_CACHE_LINE_SIZE, + SOCKET_ID_ANY); + if (!rxq) { + PMD_DRV_LOG(ERR, "Failed to allocate memory for " + "rx queue structure."); + return -ENOMEM; + } + + /* Allocate RX hardware ring descriptors. */ + ring_size = sizeof(union ice_rx_flex_desc) * ICE_FDIR_NUM_RX_DESC; + ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); + + rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring", + ICE_FDIR_QUEUE_ID, ring_size, + ICE_RING_BASE_ALIGN, SOCKET_ID_ANY); + if (!rz) { + ice_rx_queue_release(rxq); + PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX."); + return -ENOMEM; + } + + rxq->nb_rx_desc = ICE_FDIR_NUM_RX_DESC; + rxq->queue_id = ICE_FDIR_QUEUE_ID; + rxq->reg_idx = pf->fdir.fdir_vsi->base_queue; + rxq->vsi = pf->fdir.fdir_vsi; + + rxq->rx_ring_dma = rz->iova; + memset(rz->addr, 0, ICE_FDIR_NUM_RX_DESC * + sizeof(union ice_rx_flex_desc)); + rxq->rx_ring = (union ice_rx_flex_desc *)rz->addr; + + /* + * Don't need to allocate software ring and reset for the fdir + * rx queue, just set the queue has been configured. + */ + rxq->q_set = TRUE; + pf->fdir.rxq = rxq; + + rxq->rx_rel_mbufs = _ice_rx_queue_release_mbufs; + + return ICE_SUCCESS; +} + uint16_t ice_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, @@ -3128,3 +3485,49 @@ ice_set_default_ptype_table(struct rte_eth_dev *dev) for (i = 0; i < ICE_MAX_PKT_TYPE; i++) ad->ptype_tbl[i] = ice_get_default_pkt_type(i); } + +#define ICE_FDIR_MAX_WAIT_US 10000 + +int +ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc) +{ + struct ice_tx_queue *txq = pf->fdir.txq; + volatile struct ice_fltr_desc *fdirdp; + volatile struct ice_tx_desc *txdp; + uint32_t td_cmd; + uint16_t i; + + fdirdp = (volatile struct ice_fltr_desc *) + (&txq->tx_ring[txq->tx_tail]); + fdirdp->qidx_compq_space_stat = fdir_desc->qidx_compq_space_stat; + fdirdp->dtype_cmd_vsi_fdid = fdir_desc->dtype_cmd_vsi_fdid; + + txdp = &txq->tx_ring[txq->tx_tail + 1]; + txdp->buf_addr = rte_cpu_to_le_64(pf->fdir.dma_addr); + td_cmd = ICE_TX_DESC_CMD_EOP | + ICE_TX_DESC_CMD_RS | + ICE_TX_DESC_CMD_DUMMY; + + txdp->cmd_type_offset_bsz = + ice_build_ctob(td_cmd, 0, ICE_FDIR_PKT_LEN, 0); + + txq->tx_tail += 2; + if (txq->tx_tail >= txq->nb_tx_desc) + txq->tx_tail = 0; + /* Update the tx tail register */ + ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + for (i = 0; i < ICE_FDIR_MAX_WAIT_US; i++) { + if ((txdp->cmd_type_offset_bsz & + rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) == + rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) + break; + rte_delay_us(1); + } + if (i >= ICE_FDIR_MAX_WAIT_US) { + PMD_DRV_LOG(ERR, + "Failed to program FDIR filter: time out to get DD on tx queue."); + return -ETIMEDOUT; + } + + return 0; +} diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index 25b3822df..c29a2fc45 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -37,6 +37,8 @@ #define ICE_TX_MAX_FREE_BUF_SZ 64 #define ICE_DESCS_PER_LOOP 4 +#define ICE_FDIR_PKT_LEN 512 + typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq); typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq); @@ -149,10 +151,16 @@ int ice_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); int ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); int ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); int ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int ice_fdir_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); +int ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); +int ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); void ice_rx_queue_release(void *rxq); void ice_tx_queue_release(void *txq); void ice_clear_queues(struct rte_eth_dev *dev); void ice_free_queues(struct rte_eth_dev *dev); +int ice_fdir_setup_tx_resources(struct ice_pf *pf); +int ice_fdir_setup_rx_resources(struct ice_pf *pf); uint16_t ice_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); uint16_t ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, @@ -190,4 +198,5 @@ uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue, uint16_t nb_pkts); uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc); #endif /* _ICE_RXTX_H_ */ diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build index 6828170a9..908a2cab7 100644 --- a/drivers/net/ice/meson.build +++ b/drivers/net/ice/meson.build @@ -10,7 +10,8 @@ sources = files( 'ice_ethdev.c', 'ice_rxtx.c', 'ice_switch_filter.c', - 'ice_generic_flow.c' + 'ice_generic_flow.c', + 'ice_fdir_filter.c' ) deps += ['hash'] From patchwork Fri Sep 27 17:04:18 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Cao, Yahui" X-Patchwork-Id: 59999 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 611A437B0; Fri, 27 Sep 2019 11:19:36 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id 65F7B2C38 for ; Fri, 27 Sep 2019 11:19:32 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 27 Sep 2019 02:19:31 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,554,1559545200"; d="scan'208";a="341742518" Received: from dpdk-yahui-skylake.sh.intel.com ([10.67.119.16]) by orsmga004.jf.intel.com with ESMTP; 27 Sep 2019 02:19:30 -0700 From: Yahui Cao To: Qiming Yang , Wenzhuo Lu Cc: dev@dpdk.org, Qi Zhang , Xiaolong Ye , Beilei Xing , Yahui Cao Date: Sat, 28 Sep 2019 01:04:18 +0800 Message-Id: <20190927170424.71348-3-yahui.cao@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190927170424.71348-1-yahui.cao@intel.com> References: <20190924215243.81474-1-yahui.cao@intel.com> <20190927170424.71348-1-yahui.cao@intel.com> Subject: [dpdk-dev] [PATCH v4 2/8] net/ice: configure HW FDIR rule X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Beilei Xing This patch adds a HW FDIR rule to the FDIR HW table without adding a FDIR filter. Signed-off-by: Beilei Xing --- drivers/net/ice/ice_ethdev.h | 1 + drivers/net/ice/ice_fdir_filter.c | 255 +++++++++++++++++++++++++++++- 2 files changed, 255 insertions(+), 1 deletion(-) diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 39f8ab761..e85374946 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -281,6 +281,7 @@ struct ice_pf { uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ uint16_t fdir_qp_offset; struct ice_fdir_info fdir; /* flow director info */ + uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; struct ice_hw_port_stats stats_offset; struct ice_hw_port_stats stats; /* internal packet statistics, it should be excluded from the total */ diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c index 0fb3054f5..81a4f1a97 100644 --- a/drivers/net/ice/ice_fdir_filter.c +++ b/drivers/net/ice/ice_fdir_filter.c @@ -164,6 +164,56 @@ ice_fdir_prof_free(struct ice_hw *hw) rte_free(hw->fdir_prof); } +/* Remove a profile for some filter type */ +static void +ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_fd_hw_prof *hw_prof; + uint64_t prof_id; + uint16_t vsi_num; + int i; + + if (!hw->fdir_prof || !hw->fdir_prof[ptype]) + return; + + hw_prof = hw->fdir_prof[ptype]; + + prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX; + for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) { + if (hw_prof->entry_h[i][is_tunnel]) { + vsi_num = ice_get_hw_vsi_num(hw, + hw_prof->vsi_h[i]); + ice_rem_prof_id_flow(hw, ICE_BLK_FD, + vsi_num, ptype); + ice_flow_rem_entry(hw, + hw_prof->entry_h[i][is_tunnel]); + hw_prof->entry_h[i][is_tunnel] = 0; + } + } + ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); + rte_free(hw_prof->fdir_seg[is_tunnel]); + hw_prof->fdir_seg[is_tunnel] = NULL; + + for (i = 0; i < hw_prof->cnt; i++) + hw_prof->vsi_h[i] = 0; + pf->hw_prof_cnt[ptype][is_tunnel] = 0; +} + +/* Remove all created profiles */ +static void +ice_fdir_prof_rm_all(struct ice_pf *pf) +{ + enum ice_fltr_ptype ptype; + + for (ptype = ICE_FLTR_PTYPE_NONF_NONE; + ptype < ICE_FLTR_PTYPE_MAX; + ptype++) { + ice_fdir_prof_rm(pf, ptype, false); + ice_fdir_prof_rm(pf, ptype, true); + } +} + /* * ice_fdir_teardown - release the Flow Director resources * @pf: board private structure @@ -192,9 +242,212 @@ ice_fdir_teardown(struct ice_pf *pf) pf->fdir.txq = NULL; ice_rx_queue_release(pf->fdir.rxq); pf->fdir.rxq = NULL; + ice_fdir_prof_rm_all(pf); + ice_fdir_prof_free(hw); ice_release_vsi(vsi); pf->fdir.fdir_vsi = NULL; - ice_fdir_prof_free(hw); +} + +static int +ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi, + struct ice_vsi *ctrl_vsi, + struct ice_flow_seg_info *seg, + enum ice_fltr_ptype ptype, + bool is_tunnel) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + enum ice_flow_dir dir = ICE_FLOW_RX; + struct ice_flow_seg_info *ori_seg; + struct ice_fd_hw_prof *hw_prof; + struct ice_flow_prof *prof; + uint64_t entry_1 = 0; + uint64_t entry_2 = 0; + uint16_t vsi_num; + int ret; + uint64_t prof_id; + + hw_prof = hw->fdir_prof[ptype]; + ori_seg = hw_prof->fdir_seg[is_tunnel]; + if (ori_seg) { + if (!memcmp(ori_seg, seg, sizeof(*seg))) + return -EAGAIN; + if (hw->fdir_fltr_cnt[ptype]) + return -EINVAL; + + ice_fdir_prof_rm(pf, ptype, is_tunnel); + } + + prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX; + ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg, + (is_tunnel) ? 2 : 1, NULL, 0, &prof); + if (ret) + return ret; + ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx, + vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, NULL, 0, &entry_1); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.", + ptype); + goto err_add_prof; + } + ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx, + ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL, + seg, NULL, 0, &entry_2); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.", + ptype); + goto err_add_entry; + } + + pf->hw_prof_cnt[ptype][is_tunnel] = 0; + hw_prof->cnt = 0; + hw_prof->fdir_seg[is_tunnel] = seg; + hw_prof->vsi_h[hw_prof->cnt] = vsi->idx; + hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1; + pf->hw_prof_cnt[ptype][is_tunnel]++; + hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx; + hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2; + pf->hw_prof_cnt[ptype][is_tunnel]++; + + return ret; + +err_add_entry: + vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); + ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id); + ice_flow_rem_entry(hw, entry_1); +err_add_prof: + ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id); + + return ret; +} + +static void +ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field) +{ + uint32_t i, j; + + struct ice_inset_map { + uint64_t inset; + enum ice_flow_field fld; + }; + static const struct ice_inset_map ice_inset_map[] = { + {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA}, + {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA}, + {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA}, + {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP}, + {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL}, + {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT}, + {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA}, + {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA}, + {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP}, + {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT}, + {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL}, + {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT}, + {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT}, + {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT}, + {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT}, + {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT}, + {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT}, + }; + + for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) { + if ((inset & ice_inset_map[i].inset) == + ice_inset_map[i].inset) + field[j++] = ice_inset_map[i].fld; + } +} + +static int __rte_unused +ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow, + uint64_t input_set, bool is_tunnel) +{ + struct ice_flow_seg_info *seg, *seg_tun; + enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX]; + int i, ret; + + if (!input_set) + return -EINVAL; + + seg = (struct ice_flow_seg_info *) + ice_malloc(hw, sizeof(*seg)); + if (!seg) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + return -ENOMEM; + } + + for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++) + field[i] = ICE_FLOW_FIELD_IDX_MAX; + ice_fdir_input_set_parse(input_set, field); + + switch (flow) { + case ICE_FLTR_PTYPE_NONF_IPV4_UDP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | + ICE_FLOW_SEG_HDR_IPV4); + break; + case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_UDP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_TCP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | + ICE_FLOW_SEG_HDR_IPV6); + break; + case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6); + break; + default: + PMD_DRV_LOG(ERR, "not supported filter type."); + break; + } + + for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) { + ice_flow_set_fld(seg, field[i], + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + } + + if (!is_tunnel) { + ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi, + seg, flow, false); + } else { + seg_tun = (struct ice_flow_seg_info *) + ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX); + if (!seg_tun) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + rte_free(seg); + return -ENOMEM; + } + rte_memcpy(&seg_tun[1], seg, sizeof(*seg)); + ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi, + seg_tun, flow, true); + } + + if (!ret) { + return ret; + } else if (ret < 0) { + rte_free(seg); + if (is_tunnel) + rte_free(seg_tun); + return (ret == -EAGAIN) ? 0 : ret; + } else { + return ret; + } } static int From patchwork Fri Sep 27 17:04:19 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Cao, Yahui" X-Patchwork-Id: 60000 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 341D44CE4; Fri, 27 Sep 2019 11:19:41 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id 8B969324D for ; Fri, 27 Sep 2019 11:19:34 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 27 Sep 2019 02:19:33 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,554,1559545200"; d="scan'208";a="341742530" Received: from dpdk-yahui-skylake.sh.intel.com ([10.67.119.16]) by orsmga004.jf.intel.com with ESMTP; 27 Sep 2019 02:19:32 -0700 From: Yahui Cao To: Qiming Yang , Wenzhuo Lu Cc: dev@dpdk.org, Qi Zhang , Xiaolong Ye , Beilei Xing , Yahui Cao Date: Sat, 28 Sep 2019 01:04:19 +0800 Message-Id: <20190927170424.71348-4-yahui.cao@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190927170424.71348-1-yahui.cao@intel.com> References: <20190924215243.81474-1-yahui.cao@intel.com> <20190927170424.71348-1-yahui.cao@intel.com> Subject: [dpdk-dev] [PATCH v4 3/8] net/ice: add FDIR create and destroy X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add ice_create_fdir_filter to create a rule. If a flow is matched by flow director filter, filter rule will be set to HW. For now common pattern and queue/passthru/drop/mark actions are supported. Signed-off-by: Yahui Cao --- doc/guides/rel_notes/release_19_11.rst | 1 + drivers/net/ice/ice_ethdev.h | 6 + drivers/net/ice/ice_fdir_filter.c | 575 ++++++++++++++++++++++++- 3 files changed, 580 insertions(+), 2 deletions(-) diff --git a/doc/guides/rel_notes/release_19_11.rst b/doc/guides/rel_notes/release_19_11.rst index bc77daff0..44a183fb1 100644 --- a/doc/guides/rel_notes/release_19_11.rst +++ b/doc/guides/rel_notes/release_19_11.rst @@ -63,6 +63,7 @@ New Features * Added support for device-specific DDP package loading. * Added support for handling Receive Flex Descriptor. * Added support for protocol extraction on per Rx queue. + * Added support for Flow Director filter based on generic filter framework. * Generic filter enhancement - Supported pipeline mode. - Supported new packet type like PPPoE for switch filter. diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index e85374946..20c0e2b44 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -247,6 +247,11 @@ TAILQ_HEAD(ice_flow_list, rte_flow); struct ice_flow_parser_node; TAILQ_HEAD(ice_parser_list, ice_flow_parser_node); +struct ice_fdir_filter_conf { + struct ice_fdir_fltr input; + uint64_t input_set; +}; + /** * A structure used to define fields of a FDIR related info. */ @@ -256,6 +261,7 @@ struct ice_fdir_info { struct ice_rx_queue *rxq; void *prg_pkt; /* memory for fdir program packet */ uint64_t dma_addr; /* physic address of packet memory*/ + struct ice_fdir_filter_conf conf; }; struct ice_pf { diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c index 81a4f1a97..8d136a53a 100644 --- a/drivers/net/ice/ice_fdir_filter.c +++ b/drivers/net/ice/ice_fdir_filter.c @@ -7,6 +7,55 @@ #include "ice_rxtx.h" #include "ice_generic_flow.h" +#define ICE_FDIR_IPV6_TC_OFFSET 20 +#define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET) + +#define ICE_FDIR_INSET_ETH_IPV4 (\ + ICE_INSET_DMAC | \ + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \ + ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO) + +#define ICE_FDIR_INSET_ETH_IPV4_UDP (\ + ICE_FDIR_INSET_ETH_IPV4 | \ + ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT) + +#define ICE_FDIR_INSET_ETH_IPV4_TCP (\ + ICE_FDIR_INSET_ETH_IPV4 | \ + ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT) + +#define ICE_FDIR_INSET_ETH_IPV4_SCTP (\ + ICE_FDIR_INSET_ETH_IPV4 | \ + ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT) + +#define ICE_FDIR_INSET_ETH_IPV6 (\ + ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \ + ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR) + +#define ICE_FDIR_INSET_ETH_IPV6_UDP (\ + ICE_FDIR_INSET_ETH_IPV6 | \ + ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT) + +#define ICE_FDIR_INSET_ETH_IPV6_TCP (\ + ICE_FDIR_INSET_ETH_IPV6 | \ + ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT) + +#define ICE_FDIR_INSET_ETH_IPV6_SCTP (\ + ICE_FDIR_INSET_ETH_IPV6 | \ + ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT) + +static struct ice_pattern_match_item ice_fdir_pattern[] = { + {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE}, + {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE}, +}; + +static struct ice_flow_parser ice_fdir_parser; + static const struct rte_memzone * ice_memzone_reserve(const char *name, uint32_t len, int socket_id) { @@ -357,7 +406,7 @@ ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field) } } -static int __rte_unused +static int ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow, uint64_t input_set, bool is_tunnel) { @@ -454,8 +503,13 @@ static int ice_fdir_init(struct ice_adapter *ad) { struct ice_pf *pf = &ad->pf; + int ret; + + ret = ice_fdir_setup(pf); + if (ret) + return ret; - return ice_fdir_setup(pf); + return ice_register_parser(&ice_fdir_parser, ad); } static void @@ -463,15 +517,532 @@ ice_fdir_uninit(struct ice_adapter *ad) { struct ice_pf *pf = &ad->pf; + ice_unregister_parser(&ice_fdir_parser, ad); + ice_fdir_teardown(pf); } +static int +ice_fdir_add_del_filter(struct ice_pf *pf, + struct ice_fdir_filter_conf *filter, + bool add) +{ + struct ice_fltr_desc desc; + struct ice_hw *hw = ICE_PF_TO_HW(pf); + unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt; + int ret; + + filter->input.dest_vsi = pf->main_vsi->idx; + + memset(&desc, 0, sizeof(desc)); + ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add); + + memset(pkt, 0, ICE_FDIR_PKT_LEN); + ret = ice_fdir_get_prgm_pkt(&filter->input, pkt, false); + if (ret) { + PMD_DRV_LOG(ERR, "Generate dummy packet failed"); + return -EINVAL; + } + + return ice_fdir_programming(pf, &desc); +} + +static int +ice_fdir_create_filter(struct ice_adapter *ad, + struct rte_flow *flow, + void *meta, + struct rte_flow_error *error) +{ + struct ice_pf *pf = &ad->pf; + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_fdir_filter_conf *filter = meta; + struct ice_fdir_filter_conf *rule; + int ret; + + rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0); + if (!rule) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to allocate memory"); + return -rte_errno; + } + + ret = ice_fdir_input_set_conf(pf, filter->input.flow_type, + filter->input_set, false); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Profile configure failed."); + goto free_entry; + } + + ret = ice_fdir_add_del_filter(pf, filter, true); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Add filter rule failed."); + goto free_entry; + } + + rte_memcpy(rule, filter, sizeof(*rule)); + flow->rule = rule; + ice_fdir_update_cntrs(hw, filter->input.flow_type, true); + return 0; + +free_entry: + rte_free(rule); + return -rte_errno; +} + +static int +ice_fdir_destroy_filter(struct ice_adapter *ad, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct ice_pf *pf = &ad->pf; + struct ice_fdir_filter_conf *filter; + int ret; + + filter = (struct ice_fdir_filter_conf *)flow->rule; + + ret = ice_fdir_add_del_filter(pf, filter, false); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Del filter rule failed."); + return -rte_errno; + } + + rte_free(filter); + + return 0; +} + static struct ice_flow_engine ice_fdir_engine = { .init = ice_fdir_init, .uninit = ice_fdir_uninit, + .create = ice_fdir_create_filter, + .destroy = ice_fdir_destroy_filter, .type = ICE_FLOW_ENGINE_FDIR, }; +static int +ice_fdir_parse_action(struct ice_adapter *ad, + const struct rte_flow_action actions[], + struct rte_flow_error *error, + struct ice_fdir_filter_conf *filter) +{ + struct ice_pf *pf = &ad->pf; + const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_mark *mark_spec = NULL; + uint32_t dest_num = 0; + uint32_t mark_num = 0; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + dest_num++; + + act_q = actions->conf; + filter->input.q_index = act_q->index; + if (filter->input.q_index >= + pf->dev_data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid queue for FDIR."); + return -rte_errno; + } + filter->input.dest_ctl = + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; + break; + case RTE_FLOW_ACTION_TYPE_DROP: + dest_num++; + + filter->input.dest_ctl = + ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; + break; + case RTE_FLOW_ACTION_TYPE_PASSTHRU: + dest_num++; + + filter->input.dest_ctl = + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; + filter->input.q_index = 0; + break; + case RTE_FLOW_ACTION_TYPE_MARK: + mark_num++; + + mark_spec = actions->conf; + filter->input.fltr_id = mark_spec->id; + break; + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Invalid action."); + return -rte_errno; + } + } + + if (dest_num == 0 || dest_num >= 2) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Unsupported action combination"); + return -rte_errno; + } + + if (mark_num >= 2) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Too many mark actions"); + return -rte_errno; + } + + return 0; +} + +static int +ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, + const struct rte_flow_item pattern[], + struct rte_flow_error *error, + struct ice_fdir_filter_conf *filter) +{ + const struct rte_flow_item *item = pattern; + enum rte_flow_item_type item_type; + enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END; + const struct rte_flow_item_eth *eth_spec, *eth_mask; + const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; + const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; + const struct rte_flow_item_udp *udp_spec, *udp_mask; + const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; + uint64_t input_set = ICE_INSET_NONE; + uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE; + uint8_t ipv6_addr_mask[16] = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF + }; + uint32_t vtc_flow_cpu; + + + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support range"); + return -rte_errno; + } + item_type = item->type; + + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth_spec = item->spec; + eth_mask = item->mask; + + if (eth_spec && eth_mask) { + if (!rte_is_zero_ether_addr(ð_spec->src) || + !rte_is_zero_ether_addr(ð_mask->src)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Src mac not support"); + return -rte_errno; + } + + if (!rte_is_broadcast_ether_addr(ð_mask->dst)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid mac addr mask"); + return -rte_errno; + } + + input_set |= ICE_INSET_DMAC; + rte_memcpy(&filter->input.ext_data.dst_mac, + ð_spec->dst, + RTE_ETHER_ADDR_LEN); + } + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + l3 = RTE_FLOW_ITEM_TYPE_IPV4; + ipv4_spec = item->spec; + ipv4_mask = item->mask; + + if (ipv4_spec && ipv4_mask) { + /* Check IPv4 mask and update input set */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv4 mask."); + return -rte_errno; + } + if (ipv4_mask->hdr.src_addr == UINT32_MAX) + input_set |= ICE_INSET_IPV4_SRC; + if (ipv4_mask->hdr.dst_addr == UINT32_MAX) + input_set |= ICE_INSET_IPV4_DST; + if (ipv4_mask->hdr.type_of_service == UINT8_MAX) + input_set |= ICE_INSET_IPV4_TOS; + if (ipv4_mask->hdr.time_to_live == UINT8_MAX) + input_set |= ICE_INSET_IPV4_TTL; + if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) + input_set |= ICE_INSET_IPV4_PROTO; + + filter->input.ip.v4.dst_ip = + ipv4_spec->hdr.src_addr; + filter->input.ip.v4.src_ip = + ipv4_spec->hdr.dst_addr; + filter->input.ip.v4.tos = + ipv4_spec->hdr.type_of_service; + filter->input.ip.v4.ttl = + ipv4_spec->hdr.time_to_live; + filter->input.ip.v4.proto = + ipv4_spec->hdr.next_proto_id; + } + + flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + l3 = RTE_FLOW_ITEM_TYPE_IPV6; + ipv6_spec = item->spec; + ipv6_mask = item->mask; + + if (ipv6_spec && ipv6_mask) { + /* Check IPv6 mask and update input set */ + if (ipv6_mask->hdr.payload_len) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv6 mask"); + return -rte_errno; + } + + if (!memcmp(ipv6_mask->hdr.src_addr, + ipv6_addr_mask, + RTE_DIM(ipv6_mask->hdr.src_addr))) + input_set |= ICE_INSET_IPV6_SRC; + if (!memcmp(ipv6_mask->hdr.dst_addr, + ipv6_addr_mask, + RTE_DIM(ipv6_mask->hdr.dst_addr))) + input_set |= ICE_INSET_IPV6_DST; + + if ((ipv6_mask->hdr.vtc_flow & + rte_cpu_to_be_32(ICE_IPV6_TC_MASK)) + == rte_cpu_to_be_32(ICE_IPV6_TC_MASK)) + input_set |= ICE_INSET_IPV6_TC; + if (ipv6_mask->hdr.proto == UINT8_MAX) + input_set |= ICE_INSET_IPV6_NEXT_HDR; + if (ipv6_mask->hdr.hop_limits == UINT8_MAX) + input_set |= ICE_INSET_IPV6_HOP_LIMIT; + + rte_memcpy(filter->input.ip.v6.dst_ip, + ipv6_spec->hdr.src_addr, 16); + rte_memcpy(filter->input.ip.v6.src_ip, + ipv6_spec->hdr.dst_addr, 16); + + vtc_flow_cpu = + rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow); + filter->input.ip.v6.tc = + (uint8_t)(vtc_flow_cpu >> + ICE_FDIR_IPV6_TC_OFFSET); + filter->input.ip.v6.proto = + ipv6_spec->hdr.proto; + filter->input.ip.v6.hlim = + ipv6_spec->hdr.hop_limits; + } + + flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER; + break; + case RTE_FLOW_ITEM_TYPE_TCP: + tcp_spec = item->spec; + tcp_mask = item->mask; + + if (tcp_spec && tcp_mask) { + /* Check TCP mask and update input set */ + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid TCP mask"); + return -rte_errno; + } + + if (tcp_mask->hdr.src_port == UINT16_MAX) + input_set |= ICE_INSET_TCP_SRC_PORT; + if (tcp_mask->hdr.dst_port == UINT16_MAX) + input_set |= ICE_INSET_TCP_DST_PORT; + + /* Get filter info */ + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { + filter->input.ip.v4.dst_port = + tcp_spec->hdr.src_port; + filter->input.ip.v4.src_port = + tcp_spec->hdr.dst_port; + flow_type = + ICE_FLTR_PTYPE_NONF_IPV4_TCP; + } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { + filter->input.ip.v6.dst_port = + tcp_spec->hdr.src_port; + filter->input.ip.v6.src_port = + tcp_spec->hdr.dst_port; + flow_type = + ICE_FLTR_PTYPE_NONF_IPV6_TCP; + } + } + break; + case RTE_FLOW_ITEM_TYPE_UDP: + udp_spec = item->spec; + udp_mask = item->mask; + + if (udp_spec && udp_mask) { + /* Check UDP mask and update input set*/ + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid UDP mask"); + return -rte_errno; + } + + if (udp_mask->hdr.src_port == UINT16_MAX) + input_set |= ICE_INSET_UDP_SRC_PORT; + if (udp_mask->hdr.dst_port == UINT16_MAX) + input_set |= ICE_INSET_UDP_DST_PORT; + + /* Get filter info */ + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { + filter->input.ip.v4.dst_port = + udp_spec->hdr.src_port; + filter->input.ip.v4.src_port = + udp_spec->hdr.dst_port; + flow_type = + ICE_FLTR_PTYPE_NONF_IPV4_UDP; + } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { + filter->input.ip.v6.src_port = + udp_spec->hdr.src_port; + filter->input.ip.v6.dst_port = + udp_spec->hdr.dst_port; + flow_type = + ICE_FLTR_PTYPE_NONF_IPV6_UDP; + } + } + break; + case RTE_FLOW_ITEM_TYPE_SCTP: + sctp_spec = item->spec; + sctp_mask = item->mask; + + if (sctp_spec && sctp_mask) { + /* Check SCTP mask and update input set */ + if (sctp_mask->hdr.cksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid UDP mask"); + return -rte_errno; + } + + if (sctp_mask->hdr.src_port == UINT16_MAX) + input_set |= ICE_INSET_SCTP_SRC_PORT; + if (sctp_mask->hdr.dst_port == UINT16_MAX) + input_set |= ICE_INSET_SCTP_DST_PORT; + + /* Get filter info */ + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { + filter->input.ip.v4.dst_port = + sctp_spec->hdr.src_port; + filter->input.ip.v4.src_port = + sctp_spec->hdr.dst_port; + flow_type = + ICE_FLTR_PTYPE_NONF_IPV4_SCTP; + } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) { + filter->input.ip.v6.dst_port = + sctp_spec->hdr.src_port; + filter->input.ip.v6.src_port = + sctp_spec->hdr.dst_port; + flow_type = + ICE_FLTR_PTYPE_NONF_IPV6_SCTP; + } + } + break; + case RTE_FLOW_ITEM_TYPE_VOID: + break; + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid pattern item."); + return -rte_errno; + } + } + + filter->input.flow_type = flow_type; + filter->input_set = input_set; + + return 0; +} + +static int +ice_fdir_parse(struct ice_adapter *ad, + struct ice_pattern_match_item *array, + uint32_t array_len, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + void **meta, + struct rte_flow_error *error) +{ + struct ice_pf *pf = &ad->pf; + struct ice_fdir_filter_conf *filter = &pf->fdir.conf; + struct ice_pattern_match_item *item = NULL; + uint64_t input_set; + int ret; + + memset(filter, 0, sizeof(*filter)); + item = ice_search_pattern_match_item(pattern, array, array_len, error); + if (!item) + return -rte_errno; + + ret = ice_fdir_parse_pattern(ad, pattern, error, filter); + if (ret) + return ret; + input_set = filter->input_set; + if (!input_set || input_set & ~item->input_set_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + pattern, + "Invalid input set"); + return -rte_errno; + } + + ret = ice_fdir_parse_action(ad, actions, error, filter); + if (ret) + return ret; + + *meta = filter; + + return 0; +} + +static struct ice_flow_parser ice_fdir_parser = { + .engine = &ice_fdir_engine, + .array = ice_fdir_pattern, + .array_len = RTE_DIM(ice_fdir_pattern), + .parse_pattern_action = ice_fdir_parse, + .stage = ICE_FLOW_STAGE_DISTRIBUTOR, +}; + RTE_INIT(ice_fdir_engine_register) { ice_register_flow_engine(&ice_fdir_engine); From patchwork Fri Sep 27 17:04:20 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Cao, Yahui" X-Patchwork-Id: 60001 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E56231B994; Fri, 27 Sep 2019 11:19:43 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id 60D8E37A2 for ; Fri, 27 Sep 2019 11:19:36 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 27 Sep 2019 02:19:35 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,554,1559545200"; d="scan'208";a="341742538" Received: from dpdk-yahui-skylake.sh.intel.com ([10.67.119.16]) by orsmga004.jf.intel.com with ESMTP; 27 Sep 2019 02:19:34 -0700 From: Yahui Cao To: Qiming Yang , Wenzhuo Lu Cc: dev@dpdk.org, Qi Zhang , Xiaolong Ye , Beilei Xing , Yahui Cao Date: Sat, 28 Sep 2019 01:04:20 +0800 Message-Id: <20190927170424.71348-5-yahui.cao@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190927170424.71348-1-yahui.cao@intel.com> References: <20190924215243.81474-1-yahui.cao@intel.com> <20190927170424.71348-1-yahui.cao@intel.com> Subject: [dpdk-dev] [PATCH v4 4/8] net/ice: enable FDIR queue group X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" FDIR can send packet to a group of queues and distruibte it by RSS. Signed-off-by: Yahui Cao --- drivers/net/ice/ice_fdir_filter.c | 68 +++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c index 8d136a53a..d6cf9313e 100644 --- a/drivers/net/ice/ice_fdir_filter.c +++ b/drivers/net/ice/ice_fdir_filter.c @@ -10,6 +10,8 @@ #define ICE_FDIR_IPV6_TC_OFFSET 20 #define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET) +#define ICE_FDIR_MAX_QREGION_SIZE 128 + #define ICE_FDIR_INSET_ETH_IPV4 (\ ICE_INSET_DMAC | \ ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \ @@ -626,6 +628,63 @@ static struct ice_flow_engine ice_fdir_engine = { .type = ICE_FLOW_ENGINE_FDIR, }; +static int +ice_fdir_parse_action_qregion(struct ice_pf *pf, + struct rte_flow_error *error, + const struct rte_flow_action *act, + struct ice_fdir_filter_conf *filter) +{ + const struct rte_flow_action_rss *rss = act->conf; + uint32_t i; + + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid action."); + return -rte_errno; + } + + if (rss->queue_num <= 1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Queue region size can't be 0 or 1."); + return -rte_errno; + } + + /* check if queue index for queue region is continuous */ + for (i = 0; i < rss->queue_num - 1; i++) { + if (rss->queue[i + 1] != rss->queue[i] + 1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Discontinuous queue region"); + return -rte_errno; + } + } + + if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid queue region indexes."); + return -rte_errno; + } + + if (!(rte_is_power_of_2(rss->queue_num) && + (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "The region size should be any of the following values:" + "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number " + "of queues do not exceed the VSI allocation."); + return -rte_errno; + } + + filter->input.q_index = rss->queue[0]; + filter->input.q_region = rte_fls_u32(rss->queue_num) - 1; + filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP; + + return 0; +} + static int ice_fdir_parse_action(struct ice_adapter *ad, const struct rte_flow_action actions[], @@ -637,6 +696,7 @@ ice_fdir_parse_action(struct ice_adapter *ad, const struct rte_flow_action_mark *mark_spec = NULL; uint32_t dest_num = 0; uint32_t mark_num = 0; + int ret; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { @@ -671,6 +731,14 @@ ice_fdir_parse_action(struct ice_adapter *ad, ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; filter->input.q_index = 0; break; + case RTE_FLOW_ACTION_TYPE_RSS: + dest_num++; + + ret = ice_fdir_parse_action_qregion(pf, + error, actions, filter); + if (ret) + return ret; + break; case RTE_FLOW_ACTION_TYPE_MARK: mark_num++; From patchwork Fri Sep 27 17:04:21 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Cao, Yahui" X-Patchwork-Id: 60002 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 464EC1B9BF; Fri, 27 Sep 2019 11:19:47 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id 4A3404C77 for ; Fri, 27 Sep 2019 11:19:37 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 27 Sep 2019 02:19:37 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,554,1559545200"; d="scan'208";a="341742546" Received: from dpdk-yahui-skylake.sh.intel.com ([10.67.119.16]) by orsmga004.jf.intel.com with ESMTP; 27 Sep 2019 02:19:36 -0700 From: Yahui Cao To: Qiming Yang , Wenzhuo Lu Cc: dev@dpdk.org, Qi Zhang , Xiaolong Ye , Beilei Xing , Yahui Cao Date: Sat, 28 Sep 2019 01:04:21 +0800 Message-Id: <20190927170424.71348-6-yahui.cao@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190927170424.71348-1-yahui.cao@intel.com> References: <20190924215243.81474-1-yahui.cao@intel.com> <20190927170424.71348-1-yahui.cao@intel.com> Subject: [dpdk-dev] [PATCH v4 5/8] net/ice: add FDIR counter resource init/release X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The patch integrates the counter resource init/release into fdir's init/release scenario Signed-off-by: Yahui Cao --- drivers/net/ice/ice_ethdev.h | 33 +++++++++++ drivers/net/ice/ice_fdir_filter.c | 92 +++++++++++++++++++++++++++++++ 2 files changed, 125 insertions(+) diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 20c0e2b44..4f8672543 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -252,6 +252,37 @@ struct ice_fdir_filter_conf { uint64_t input_set; }; +#define ICE_FDIR_COUNTER_DEFAULT_POOL_SIZE 1 +#define ICE_FDIR_COUNTER_MAX_POOL_SIZE 32 +#define ICE_FDIR_COUNTERS_PER_BLOCK 256 +#define ICE_FDIR_COUNTER_INDEX(base_idx) \ + ((base_idx) * ICE_FDIR_COUNTERS_PER_BLOCK) +struct ice_fdir_counter { + TAILQ_ENTRY(ice_fdir_counter) next; + uint8_t shared; + uint32_t ref_cnt; + uint32_t id; + uint64_t hits; + uint64_t bytes; + uint32_t hw_index; +}; + +TAILQ_HEAD(ice_fdir_counter_list, ice_fdir_counter); + +struct ice_fdir_counter_pool { + TAILQ_ENTRY(ice_fdir_counter_pool) next; + struct ice_fdir_counter_list counter_list; + struct ice_fdir_counter counters[0]; +}; + +TAILQ_HEAD(ice_fdir_counter_pool_list, ice_fdir_counter_pool); + +struct ice_fdir_counter_pool_container { + struct ice_fdir_counter_pool_list pool_list; + struct ice_fdir_counter_pool *pools[ICE_FDIR_COUNTER_MAX_POOL_SIZE]; + uint8_t index_free; +}; + /** * A structure used to define fields of a FDIR related info. */ @@ -262,6 +293,8 @@ struct ice_fdir_info { void *prg_pkt; /* memory for fdir program packet */ uint64_t dma_addr; /* physic address of packet memory*/ struct ice_fdir_filter_conf conf; + + struct ice_fdir_counter_pool_container counter; }; struct ice_pf { diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c index d6cf9313e..13a446fce 100644 --- a/drivers/net/ice/ice_fdir_filter.c +++ b/drivers/net/ice/ice_fdir_filter.c @@ -101,6 +101,88 @@ ice_fdir_prof_alloc(struct ice_hw *hw) return -ENOMEM; } +static int +ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf, + struct ice_fdir_counter_pool_container *container, + uint32_t index_start, + uint32_t len) +{ + struct ice_fdir_counter_pool *pool; + uint32_t i; + int ret = 0; + + pool = rte_zmalloc("ice_fdir_counter_pool", + sizeof(*pool) + + sizeof(struct ice_fdir_counter) * len, + 0); + if (!pool) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir counter pool"); + return -ENOMEM; + } + + TAILQ_INIT(&pool->counter_list); + TAILQ_INSERT_TAIL(&container->pool_list, pool, next); + + for (i = 0; i < len; i++) { + struct ice_fdir_counter *counter = &pool->counters[i]; + + counter->hw_index = index_start + i; + TAILQ_INSERT_TAIL(&pool->counter_list, counter, next); + } + + if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) { + PMD_INIT_LOG(ERR, "FDIR counter pool is full"); + ret = -EINVAL; + goto free_pool; + } + + container->pools[container->index_free++] = pool; + return 0; + +free_pool: + rte_free(pool); + return ret; +} + +static int +ice_fdir_counter_init(struct ice_pf *pf) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_fdir_info *fdir_info = &pf->fdir; + struct ice_fdir_counter_pool_container *container = + &fdir_info->counter; + uint32_t cnt_index, len; + int ret; + + TAILQ_INIT(&container->pool_list); + + cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base); + len = ICE_FDIR_COUNTERS_PER_BLOCK; + + ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to add fdir pool to container"); + return ret; + } + + return 0; +} + +static int +ice_fdir_counter_release(struct ice_pf *pf) +{ + struct ice_fdir_info *fdir_info = &pf->fdir; + struct ice_fdir_counter_pool_container *container = + &fdir_info->counter; + uint8_t i; + + for (i = 0; i < container->index_free; i++) + rte_free(container->pools[i]); + + return 0; +} + /* * ice_fdir_setup - reserve and initialize the Flow Director resources * @pf: board private structure @@ -138,6 +220,12 @@ ice_fdir_setup(struct ice_pf *pf) } pf->fdir.fdir_vsi = vsi; + err = ice_fdir_counter_init(pf); + if (err) { + PMD_DRV_LOG(ERR, "Failed to init FDIR counter."); + return -EINVAL; + } + /*Fdir tx queue setup*/ err = ice_fdir_setup_tx_resources(pf); if (err) { @@ -289,6 +377,10 @@ ice_fdir_teardown(struct ice_pf *pf) if (err) PMD_DRV_LOG(ERR, "Failed to stop RX queue."); + err = ice_fdir_counter_release(pf); + if (err) + PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource."); + ice_tx_queue_release(pf->fdir.txq); pf->fdir.txq = NULL; ice_rx_queue_release(pf->fdir.rxq); From patchwork Fri Sep 27 17:04:22 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Cao, Yahui" X-Patchwork-Id: 60003 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C0B901BE8E; Fri, 27 Sep 2019 11:19:51 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id DBBE7378E for ; Fri, 27 Sep 2019 11:19:39 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 27 Sep 2019 02:19:39 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,554,1559545200"; d="scan'208";a="341742556" Received: from dpdk-yahui-skylake.sh.intel.com ([10.67.119.16]) by orsmga004.jf.intel.com with ESMTP; 27 Sep 2019 02:19:37 -0700 From: Yahui Cao To: Qiming Yang , Wenzhuo Lu Cc: dev@dpdk.org, Qi Zhang , Xiaolong Ye , Beilei Xing , Yahui Cao Date: Sat, 28 Sep 2019 01:04:22 +0800 Message-Id: <20190927170424.71348-7-yahui.cao@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190927170424.71348-1-yahui.cao@intel.com> References: <20190924215243.81474-1-yahui.cao@intel.com> <20190927170424.71348-1-yahui.cao@intel.com> Subject: [dpdk-dev] [PATCH v4 6/8] net/ice: add FDIR counter support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch add FDIR statistical counter support and it includes RTE_FLOW count actions support and query support. RTE_FLOW count actions support id and shared. RTE_FLOW query record packet hits by default. Signed-off-by: Yahui Cao --- drivers/net/ice/ice_ethdev.h | 7 ++ drivers/net/ice/ice_fdir_filter.c | 179 +++++++++++++++++++++++++++++- 2 files changed, 185 insertions(+), 1 deletion(-) diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 4f8672543..12df818c3 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -249,6 +249,10 @@ TAILQ_HEAD(ice_parser_list, ice_flow_parser_node); struct ice_fdir_filter_conf { struct ice_fdir_fltr input; + + struct ice_fdir_counter *counter; /* flow specific counter context */ + struct rte_flow_action_count act_count; + uint64_t input_set; }; @@ -257,8 +261,11 @@ struct ice_fdir_filter_conf { #define ICE_FDIR_COUNTERS_PER_BLOCK 256 #define ICE_FDIR_COUNTER_INDEX(base_idx) \ ((base_idx) * ICE_FDIR_COUNTERS_PER_BLOCK) +struct ice_fdir_counter_pool; + struct ice_fdir_counter { TAILQ_ENTRY(ice_fdir_counter) next; + struct ice_fdir_counter_pool *pool; uint8_t shared; uint32_t ref_cnt; uint32_t id; diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c index 13a446fce..9c4294f63 100644 --- a/drivers/net/ice/ice_fdir_filter.c +++ b/drivers/net/ice/ice_fdir_filter.c @@ -183,6 +183,95 @@ ice_fdir_counter_release(struct ice_pf *pf) return 0; } +static struct ice_fdir_counter * +ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container + *container, + uint32_t id) +{ + struct ice_fdir_counter_pool *pool; + struct ice_fdir_counter *counter; + int i; + + TAILQ_FOREACH(pool, &container->pool_list, next) { + for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) { + counter = &pool->counters[i]; + + if (counter->shared && + counter->ref_cnt && + counter->id == id) + return counter; + } + } + + return NULL; +} + +static struct ice_fdir_counter * +ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_fdir_info *fdir_info = &pf->fdir; + struct ice_fdir_counter_pool_container *container = + &fdir_info->counter; + struct ice_fdir_counter_pool *pool = NULL; + struct ice_fdir_counter *counter_free = NULL; + + if (shared) { + counter_free = ice_fdir_counter_shared_search(container, id); + if (counter_free) { + if (counter_free->ref_cnt + 1 == 0) { + rte_errno = E2BIG; + return NULL; + } + counter_free->ref_cnt++; + return counter_free; + } + } + + TAILQ_FOREACH(pool, &container->pool_list, next) { + counter_free = TAILQ_FIRST(&pool->counter_list); + if (counter_free) + break; + counter_free = NULL; + } + + if (!counter_free) { + PMD_DRV_LOG(ERR, "No free counter found\n"); + return NULL; + } + + counter_free->shared = shared; + counter_free->id = id; + counter_free->ref_cnt = 1; + counter_free->pool = pool; + + /* reset statistic counter value */ + ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0); + ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0); + + TAILQ_REMOVE(&pool->counter_list, counter_free, next); + if (TAILQ_EMPTY(&pool->counter_list)) { + TAILQ_REMOVE(&container->pool_list, pool, next); + TAILQ_INSERT_TAIL(&container->pool_list, pool, next); + } + + return counter_free; +} + +static void +ice_fdir_counter_free(__rte_unused struct ice_pf *pf, + struct ice_fdir_counter *counter) +{ + if (!counter) + return; + + if (--counter->ref_cnt == 0) { + struct ice_fdir_counter_pool *pool = counter->pool; + + TAILQ_INSERT_TAIL(&pool->counter_list, counter, next); + } +} + /* * ice_fdir_setup - reserve and initialize the Flow Director resources * @pf: board private structure @@ -670,12 +759,28 @@ ice_fdir_create_filter(struct ice_adapter *ad, goto free_entry; } + /* alloc counter for FDIR */ + if (filter->input.cnt_ena) { + struct rte_flow_action_count *act_count = &filter->act_count; + + filter->counter = ice_fdir_counter_alloc(pf, + act_count->shared, + act_count->id); + if (!filter->counter) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Failed to alloc FDIR counter."); + goto free_entry; + } + filter->input.cnt_index = filter->counter->hw_index; + } + ret = ice_fdir_add_del_filter(pf, filter, true); if (ret) { rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Add filter rule failed."); - goto free_entry; + goto free_counter; } rte_memcpy(rule, filter, sizeof(*rule)); @@ -683,6 +788,12 @@ ice_fdir_create_filter(struct ice_adapter *ad, ice_fdir_update_cntrs(hw, filter->input.flow_type, true); return 0; +free_counter: + if (filter->counter) { + ice_fdir_counter_free(pf, filter->counter); + filter->counter = NULL; + } + free_entry: rte_free(rule); return -rte_errno; @@ -699,6 +810,11 @@ ice_fdir_destroy_filter(struct ice_adapter *ad, filter = (struct ice_fdir_filter_conf *)flow->rule; + if (filter->counter) { + ice_fdir_counter_free(pf, filter->counter); + filter->counter = NULL; + } + ret = ice_fdir_add_del_filter(pf, filter, false); if (ret) { rte_flow_error_set(error, -ret, @@ -712,11 +828,54 @@ ice_fdir_destroy_filter(struct ice_adapter *ad, return 0; } +static int +ice_fdir_query_count(struct ice_adapter *ad, + struct rte_flow *flow, + struct rte_flow_query_count *flow_stats, + struct rte_flow_error *error) +{ + struct ice_pf *pf = &ad->pf; + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_fdir_filter_conf *filter = flow->rule; + struct ice_fdir_counter *counter = filter->counter; + uint64_t hits_lo, hits_hi; + + if (!counter) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "FDIR counters not available"); + return -rte_errno; + } + + /* + * Reading the low 32-bits latches the high 32-bits into a shadow + * register. Reading the high 32-bit returns the value in the + * shadow register. + */ + hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index)); + hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index)); + + flow_stats->hits_set = 1; + flow_stats->hits = hits_lo | (hits_hi << 32); + flow_stats->bytes_set = 0; + flow_stats->bytes = 0; + + if (flow_stats->reset) { + /* reset statistic counter value */ + ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0); + ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0); + } + + return 0; +} + static struct ice_flow_engine ice_fdir_engine = { .init = ice_fdir_init, .uninit = ice_fdir_uninit, .create = ice_fdir_create_filter, .destroy = ice_fdir_destroy_filter, + .query_count = ice_fdir_query_count, .type = ICE_FLOW_ENGINE_FDIR, }; @@ -786,8 +945,10 @@ ice_fdir_parse_action(struct ice_adapter *ad, struct ice_pf *pf = &ad->pf; const struct rte_flow_action_queue *act_q; const struct rte_flow_action_mark *mark_spec = NULL; + const struct rte_flow_action_count *act_count; uint32_t dest_num = 0; uint32_t mark_num = 0; + uint32_t counter_num = 0; int ret; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { @@ -836,6 +997,15 @@ ice_fdir_parse_action(struct ice_adapter *ad, mark_spec = actions->conf; filter->input.fltr_id = mark_spec->id; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + counter_num++; + + act_count = actions->conf; + filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS; + rte_memcpy(&filter->act_count, act_count, + sizeof(filter->act_count)); + break; default: rte_flow_error_set(error, EINVAL, @@ -859,6 +1029,13 @@ ice_fdir_parse_action(struct ice_adapter *ad, return -rte_errno; } + if (counter_num >= 2) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Too many count actions"); + return -rte_errno; + } + return 0; } From patchwork Fri Sep 27 17:04:23 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Cao, Yahui" X-Patchwork-Id: 60004 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id CF6A61BE93; Fri, 27 Sep 2019 11:19:54 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id BDE1E1B203 for ; Fri, 27 Sep 2019 11:19:41 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 27 Sep 2019 02:19:41 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,554,1559545200"; d="scan'208";a="341742560" Received: from dpdk-yahui-skylake.sh.intel.com ([10.67.119.16]) by orsmga004.jf.intel.com with ESMTP; 27 Sep 2019 02:19:39 -0700 From: Yahui Cao To: Qiming Yang , Wenzhuo Lu Cc: dev@dpdk.org, Qi Zhang , Xiaolong Ye , Beilei Xing , Yahui Cao Date: Sat, 28 Sep 2019 01:04:23 +0800 Message-Id: <20190927170424.71348-8-yahui.cao@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190927170424.71348-1-yahui.cao@intel.com> References: <20190924215243.81474-1-yahui.cao@intel.com> <20190927170424.71348-1-yahui.cao@intel.com> Subject: [dpdk-dev] [PATCH v4 7/8] net/ice: reject duplicate flow for FDIR X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Enable duplication lookup for existing flow director rule entry. Signed-off-by: Yahui Cao --- drivers/net/ice/ice_ethdev.h | 17 +++ drivers/net/ice/ice_fdir_filter.c | 186 ++++++++++++++++++++++++++++-- 2 files changed, 196 insertions(+), 7 deletions(-) diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 12df818c3..70f0f0bdf 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -256,6 +256,20 @@ struct ice_fdir_filter_conf { uint64_t input_set; }; +#define ICE_MAX_FDIR_FILTER_NUM (1024 * 16) + +struct ice_fdir_fltr_pattern { + enum ice_fltr_ptype flow_type; + + union { + struct ice_fdir_v4 v4; + struct ice_fdir_v6 v6; + } ip, mask; + + struct ice_fdir_extra ext_data; + struct ice_fdir_extra ext_mask; +}; + #define ICE_FDIR_COUNTER_DEFAULT_POOL_SIZE 1 #define ICE_FDIR_COUNTER_MAX_POOL_SIZE 32 #define ICE_FDIR_COUNTERS_PER_BLOCK 256 @@ -301,6 +315,9 @@ struct ice_fdir_info { uint64_t dma_addr; /* physic address of packet memory*/ struct ice_fdir_filter_conf conf; + struct ice_fdir_filter_conf **hash_map; + struct rte_hash *hash_table; + struct ice_fdir_counter_pool_container counter; }; diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c index 9c4294f63..03983246f 100644 --- a/drivers/net/ice/ice_fdir_filter.c +++ b/drivers/net/ice/ice_fdir_filter.c @@ -1,5 +1,7 @@ #include #include +#include +#include #include "base/ice_fdir.h" #include "base/ice_flow.h" #include "base/ice_type.h" @@ -272,6 +274,60 @@ ice_fdir_counter_free(__rte_unused struct ice_pf *pf, } } +static int +ice_fdir_init_filter_list(struct ice_pf *pf) +{ + struct rte_eth_dev *dev = pf->adapter->eth_dev; + struct ice_fdir_info *fdir_info = &pf->fdir; + char fdir_hash_name[RTE_HASH_NAMESIZE]; + int ret; + + struct rte_hash_parameters fdir_hash_params = { + .name = fdir_hash_name, + .entries = ICE_MAX_FDIR_FILTER_NUM, + .key_len = sizeof(struct ice_fdir_fltr_pattern), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; + + /* Initialize hash */ + snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, + "fdir_%s", dev->device->name); + fdir_info->hash_table = rte_hash_create(&fdir_hash_params); + if (!fdir_info->hash_table) { + PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); + return -EINVAL; + } + fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map", + sizeof(*fdir_info->hash_map) * + ICE_MAX_FDIR_FILTER_NUM, + 0); + if (!fdir_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir hash map!"); + ret = -ENOMEM; + goto err_fdir_hash_map_alloc; + } + return 0; + +err_fdir_hash_map_alloc: + rte_hash_free(fdir_info->hash_table); + + return ret; +} + +static void +ice_fdir_release_filter_list(struct ice_pf *pf) +{ + struct ice_fdir_info *fdir_info = &pf->fdir; + + if (fdir_info->hash_map) + rte_free(fdir_info->hash_map); + if (fdir_info->hash_table) + rte_hash_free(fdir_info->hash_table); +} + /* * ice_fdir_setup - reserve and initialize the Flow Director resources * @pf: board private structure @@ -309,6 +365,12 @@ ice_fdir_setup(struct ice_pf *pf) } pf->fdir.fdir_vsi = vsi; + err = ice_fdir_init_filter_list(pf); + if (err) { + PMD_DRV_LOG(ERR, "Failed to init FDIR filter list."); + return -EINVAL; + } + err = ice_fdir_counter_init(pf); if (err) { PMD_DRV_LOG(ERR, "Failed to init FDIR counter."); @@ -470,6 +532,8 @@ ice_fdir_teardown(struct ice_pf *pf) if (err) PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource."); + ice_fdir_release_filter_list(pf); + ice_tx_queue_release(pf->fdir.txq); pf->fdir.txq = NULL; ice_rx_queue_release(pf->fdir.rxq); @@ -730,6 +794,74 @@ ice_fdir_add_del_filter(struct ice_pf *pf, return ice_fdir_programming(pf, &desc); } +static void +ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key, + struct ice_fdir_filter_conf *filter) +{ + struct ice_fdir_fltr *input = &filter->input; + memset(key, 0, sizeof(*key)); + + key->flow_type = input->flow_type; + rte_memcpy(&key->ip, &input->ip, sizeof(key->ip)); + rte_memcpy(&key->mask, &input->mask, sizeof(key->mask)); + rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data)); + rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask)); +} + +/* Check if there exists the flow director filter */ +static struct ice_fdir_filter_conf * +ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info, + const struct ice_fdir_fltr_pattern *key) +{ + int ret; + + ret = rte_hash_lookup(fdir_info->hash_table, key); + if (ret < 0) + return NULL; + + return fdir_info->hash_map[ret]; +} + +/* Add a flow director entry into the SW list */ +static int +ice_fdir_entry_insert(struct ice_pf *pf, + struct ice_fdir_filter_conf *entry, + struct ice_fdir_fltr_pattern *key) +{ + struct ice_fdir_info *fdir_info = &pf->fdir; + int ret; + + ret = rte_hash_add_key(fdir_info->hash_table, key); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to insert fdir entry to hash table %d!", + ret); + return ret; + } + fdir_info->hash_map[ret] = entry; + + return 0; +} + +/* Delete a flow director entry from the SW list */ +static int +ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key) +{ + struct ice_fdir_info *fdir_info = &pf->fdir; + int ret; + + ret = rte_hash_del_key(fdir_info->hash_table, key); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to delete fdir filter to hash table %d!", + ret); + return ret; + } + fdir_info->hash_map[ret] = NULL; + + return 0; +} + static int ice_fdir_create_filter(struct ice_adapter *ad, struct rte_flow *flow, @@ -739,11 +871,22 @@ ice_fdir_create_filter(struct ice_adapter *ad, struct ice_pf *pf = &ad->pf; struct ice_hw *hw = ICE_PF_TO_HW(pf); struct ice_fdir_filter_conf *filter = meta; - struct ice_fdir_filter_conf *rule; + struct ice_fdir_info *fdir_info = &pf->fdir; + struct ice_fdir_filter_conf *entry, *node; + struct ice_fdir_fltr_pattern key; int ret; - rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0); - if (!rule) { + ice_fdir_extract_fltr_key(&key, filter); + node = ice_fdir_entry_lookup(fdir_info, &key); + if (node) { + rte_flow_error_set(error, EEXIST, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Rule already exists!"); + return -rte_errno; + } + + entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0); + if (!entry) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Failed to allocate memory"); @@ -783,9 +926,18 @@ ice_fdir_create_filter(struct ice_adapter *ad, goto free_counter; } - rte_memcpy(rule, filter, sizeof(*rule)); - flow->rule = rule; + rte_memcpy(entry, filter, sizeof(*entry)); + ret = ice_fdir_entry_insert(pf, entry, &key); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Insert entry to table failed."); + goto free_entry; + } + + flow->rule = entry; ice_fdir_update_cntrs(hw, filter->input.flow_type, true); + return 0; free_counter: @@ -795,7 +947,7 @@ ice_fdir_create_filter(struct ice_adapter *ad, } free_entry: - rte_free(rule); + rte_free(entry); return -rte_errno; } @@ -805,7 +957,9 @@ ice_fdir_destroy_filter(struct ice_adapter *ad, struct rte_flow_error *error) { struct ice_pf *pf = &ad->pf; - struct ice_fdir_filter_conf *filter; + struct ice_fdir_info *fdir_info = &pf->fdir; + struct ice_fdir_filter_conf *filter, *entry; + struct ice_fdir_fltr_pattern key; int ret; filter = (struct ice_fdir_filter_conf *)flow->rule; @@ -815,6 +969,15 @@ ice_fdir_destroy_filter(struct ice_adapter *ad, filter->counter = NULL; } + ice_fdir_extract_fltr_key(&key, filter); + entry = ice_fdir_entry_lookup(fdir_info, &key); + if (!entry) { + rte_flow_error_set(error, ENOENT, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Can't find entry."); + return -rte_errno; + } + ret = ice_fdir_add_del_filter(pf, filter, false); if (ret) { rte_flow_error_set(error, -ret, @@ -823,7 +986,16 @@ ice_fdir_destroy_filter(struct ice_adapter *ad, return -rte_errno; } + ret = ice_fdir_entry_del(pf, &key); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Remove entry from table failed."); + return -rte_errno; + } + rte_free(filter); + flow->rule = NULL; return 0; } From patchwork Fri Sep 27 17:04:24 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Cao, Yahui" X-Patchwork-Id: 60005 X-Patchwork-Delegate: xiaolong.ye@intel.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 5C5FE1BE9C; Fri, 27 Sep 2019 11:19:57 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id A45131B203 for ; Fri, 27 Sep 2019 11:19:43 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 27 Sep 2019 02:19:43 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,554,1559545200"; d="scan'208";a="341742569" Received: from dpdk-yahui-skylake.sh.intel.com ([10.67.119.16]) by orsmga004.jf.intel.com with ESMTP; 27 Sep 2019 02:19:41 -0700 From: Yahui Cao To: Qiming Yang , Wenzhuo Lu Cc: dev@dpdk.org, Qi Zhang , Xiaolong Ye , Beilei Xing , Yahui Cao Date: Sat, 28 Sep 2019 01:04:24 +0800 Message-Id: <20190927170424.71348-9-yahui.cao@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190927170424.71348-1-yahui.cao@intel.com> References: <20190924215243.81474-1-yahui.cao@intel.com> <20190927170424.71348-1-yahui.cao@intel.com> Subject: [dpdk-dev] [PATCH v4 8/8] net/ice: add FDIR vxlan tunnel support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Enable FDIR vxlan tunnel matching for RTE_FLOW Signed-off-by: Yahui Cao --- drivers/net/ice/ice_ethdev.h | 8 +++ drivers/net/ice/ice_fdir_filter.c | 101 +++++++++++++++++++++++++++--- 2 files changed, 99 insertions(+), 10 deletions(-) diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 70f0f0bdf..6ce2eac38 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -241,6 +241,11 @@ struct ice_vsi { bool offset_loaded; }; +enum ice_fdir_tunnel_type { + ICE_FDIR_TUNNEL_TYPE_NONE = 0, + ICE_FDIR_TUNNEL_TYPE_VXLAN, +}; + struct rte_flow; TAILQ_HEAD(ice_flow_list, rte_flow); @@ -249,6 +254,7 @@ TAILQ_HEAD(ice_parser_list, ice_flow_parser_node); struct ice_fdir_filter_conf { struct ice_fdir_fltr input; + enum ice_fdir_tunnel_type tunnel_type; struct ice_fdir_counter *counter; /* flow specific counter context */ struct rte_flow_action_count act_count; @@ -268,6 +274,8 @@ struct ice_fdir_fltr_pattern { struct ice_fdir_extra ext_data; struct ice_fdir_extra ext_mask; + + enum ice_fdir_tunnel_type tunnel_type; }; #define ICE_FDIR_COUNTER_DEFAULT_POOL_SIZE 1 diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c index 03983246f..2fb48df51 100644 --- a/drivers/net/ice/ice_fdir_filter.c +++ b/drivers/net/ice/ice_fdir_filter.c @@ -47,6 +47,21 @@ ICE_FDIR_INSET_ETH_IPV6 | \ ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT) +#define ICE_FDIR_INSET_VXLAN_IPV4 (\ + ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST) + +#define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\ + ICE_FDIR_INSET_VXLAN_IPV4 | \ + ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT) + +#define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\ + ICE_FDIR_INSET_VXLAN_IPV4 | \ + ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT) + +#define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\ + ICE_FDIR_INSET_VXLAN_IPV4 | \ + ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT) + static struct ice_pattern_match_item ice_fdir_pattern[] = { {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE}, {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE}, @@ -56,6 +71,22 @@ static struct ice_pattern_match_item ice_fdir_pattern[] = { {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE}, {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE}, {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_ipv4, + ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_ipv4_udp, + ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_ipv4_tcp, + ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_ipv4_sctp, + ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4, + ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, + ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, + ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp, + ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE}, }; static struct ice_flow_parser ice_fdir_parser; @@ -644,6 +675,14 @@ ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field) {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT}, {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT}, {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT}, + {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA}, + {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA}, + {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT}, + {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT}, + {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT}, + {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT}, + {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT}, + {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT}, }; for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) { @@ -777,6 +816,7 @@ ice_fdir_add_del_filter(struct ice_pf *pf, struct ice_fltr_desc desc; struct ice_hw *hw = ICE_PF_TO_HW(pf); unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt; + bool is_tun; int ret; filter->input.dest_vsi = pf->main_vsi->idx; @@ -784,8 +824,10 @@ ice_fdir_add_del_filter(struct ice_pf *pf, memset(&desc, 0, sizeof(desc)); ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add); + is_tun = filter->tunnel_type ? true : false; + memset(pkt, 0, ICE_FDIR_PKT_LEN); - ret = ice_fdir_get_prgm_pkt(&filter->input, pkt, false); + ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun); if (ret) { PMD_DRV_LOG(ERR, "Generate dummy packet failed"); return -EINVAL; @@ -806,6 +848,8 @@ ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key, rte_memcpy(&key->mask, &input->mask, sizeof(key->mask)); rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data)); rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask)); + + key->tunnel_type = filter->tunnel_type; } /* Check if there exists the flow director filter */ @@ -874,6 +918,7 @@ ice_fdir_create_filter(struct ice_adapter *ad, struct ice_fdir_info *fdir_info = &pf->fdir; struct ice_fdir_filter_conf *entry, *node; struct ice_fdir_fltr_pattern key; + bool is_tun; int ret; ice_fdir_extract_fltr_key(&key, filter); @@ -893,8 +938,10 @@ ice_fdir_create_filter(struct ice_adapter *ad, return -rte_errno; } + is_tun = filter->tunnel_type ? true : false; + ret = ice_fdir_input_set_conf(pf, filter->input.flow_type, - filter->input_set, false); + filter->input_set, is_tun); if (ret) { rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -1220,12 +1267,14 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, const struct rte_flow_item *item = pattern; enum rte_flow_item_type item_type; enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END; + enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE; const struct rte_flow_item_eth *eth_spec, *eth_mask; const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; const struct rte_flow_item_udp *udp_spec, *udp_mask; const struct rte_flow_item_sctp *sctp_spec, *sctp_mask; + const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask; uint64_t input_set = ICE_INSET_NONE; uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE; uint8_t ipv6_addr_mask[16] = { @@ -1293,9 +1342,13 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, return -rte_errno; } if (ipv4_mask->hdr.src_addr == UINT32_MAX) - input_set |= ICE_INSET_IPV4_SRC; + input_set |= tunnel_type ? + ICE_INSET_TUN_IPV4_SRC : + ICE_INSET_IPV4_SRC; if (ipv4_mask->hdr.dst_addr == UINT32_MAX) - input_set |= ICE_INSET_IPV4_DST; + input_set |= tunnel_type ? + ICE_INSET_TUN_IPV4_DST : + ICE_INSET_IPV4_DST; if (ipv4_mask->hdr.type_of_service == UINT8_MAX) input_set |= ICE_INSET_IPV4_TOS; if (ipv4_mask->hdr.time_to_live == UINT8_MAX) @@ -1389,9 +1442,13 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, } if (tcp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_TCP_SRC_PORT; + input_set |= tunnel_type ? + ICE_INSET_TUN_TCP_SRC_PORT : + ICE_INSET_TCP_SRC_PORT; if (tcp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_TCP_DST_PORT; + input_set |= tunnel_type ? + ICE_INSET_TUN_TCP_DST_PORT : + ICE_INSET_TCP_DST_PORT; /* Get filter info */ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { @@ -1427,9 +1484,13 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, } if (udp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_UDP_SRC_PORT; + input_set |= tunnel_type ? + ICE_INSET_TUN_UDP_SRC_PORT : + ICE_INSET_UDP_SRC_PORT; if (udp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_UDP_DST_PORT; + input_set |= tunnel_type ? + ICE_INSET_TUN_UDP_DST_PORT : + ICE_INSET_UDP_DST_PORT; /* Get filter info */ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { @@ -1464,9 +1525,13 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, } if (sctp_mask->hdr.src_port == UINT16_MAX) - input_set |= ICE_INSET_SCTP_SRC_PORT; + input_set |= tunnel_type ? + ICE_INSET_TUN_SCTP_SRC_PORT : + ICE_INSET_SCTP_SRC_PORT; if (sctp_mask->hdr.dst_port == UINT16_MAX) - input_set |= ICE_INSET_SCTP_DST_PORT; + input_set |= tunnel_type ? + ICE_INSET_TUN_SCTP_DST_PORT : + ICE_INSET_SCTP_DST_PORT; /* Get filter info */ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) { @@ -1488,6 +1553,21 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, break; case RTE_FLOW_ITEM_TYPE_VOID: break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + l3 = RTE_FLOW_ITEM_TYPE_END; + vxlan_spec = item->spec; + vxlan_mask = item->mask; + + if (vxlan_spec || vxlan_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid vxlan field"); + return -rte_errno; + } + + tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN; + break; default: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1497,6 +1577,7 @@ ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad, } } + filter->tunnel_type = tunnel_type; filter->input.flow_type = flow_type; filter->input_set = input_set;