From patchwork Thu Jun 28 03:19:35 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "John Daley (johndale)" X-Patchwork-Id: 41779 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8A3DE1B3A0; Thu, 28 Jun 2018 05:23:00 +0200 (CEST) Received: from alln-iport-2.cisco.com (alln-iport-2.cisco.com [173.37.142.89]) by dpdk.org (Postfix) with ESMTP id BAE621B3A0 for ; Thu, 28 Jun 2018 05:22:58 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=cisco.com; i=@cisco.com; l=5414; q=dns/txt; s=iport; t=1530156178; x=1531365778; h=from:to:cc:subject:date:message-id:in-reply-to: references; bh=8rNu1YJDYg4Tmov1cxvCbPi5yE+srLk4h/zDEIwirOI=; b=AjBE0xApIExm1jwIfQXjObUgq2UQKdr0cKv9LWb0nXlOwxpGTesoFc+Y DUgz5w5KFK+ctih/6SLdq3h7Ge/jyI8HlFWDNxVR+MPlLkFjq+Awf6GUi v2O292GV8Bt1bOj9oymFyzUhRKN4f+cYIyU+ur2oACWvwLAoh/KWPoE6w I=; X-IronPort-AV: E=Sophos;i="5.51,281,1526342400"; d="scan'208";a="135715762" Received: from alln-core-3.cisco.com ([173.36.13.136]) by alln-iport-2.cisco.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 28 Jun 2018 03:22:58 +0000 Received: from cisco.com (savbu-usnic-a.cisco.com [10.193.184.48]) by alln-core-3.cisco.com (8.14.5/8.14.5) with ESMTP id w5S3Mv0Z030952; Thu, 28 Jun 2018 03:22:57 GMT Received: by cisco.com (Postfix, from userid 392789) id B4BE020F2001; Wed, 27 Jun 2018 20:22:57 -0700 (PDT) From: John Daley To: ferruh.yigit@intel.com Cc: dev@dpdk.org, Hyong Youb Kim Date: Wed, 27 Jun 2018 20:19:35 -0700 Message-Id: <20180628031940.17397-9-johndale@cisco.com> X-Mailer: git-send-email 2.16.2 In-Reply-To: <20180628031940.17397-1-johndale@cisco.com> References: <20180628031940.17397-1-johndale@cisco.com> Subject: [dpdk-dev] [PATCH 09/14] net/enic: support mbuf fast free offload X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Hyong Youb Kim Signed-off-by: Hyong Youb Kim Reviewed-by: John Daley --- drivers/net/enic/base/vnic_wq.h | 1 + drivers/net/enic/enic.h | 1 + drivers/net/enic/enic_ethdev.c | 11 ++++++++--- drivers/net/enic/enic_res.c | 2 ++ drivers/net/enic/enic_rxtx.c | 30 +++++++++++++++++++++++++++++- 5 files changed, 41 insertions(+), 4 deletions(-) diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h index 86ac10e28..6622a8a2d 100644 --- a/drivers/net/enic/base/vnic_wq.h +++ b/drivers/net/enic/base/vnic_wq.h @@ -48,6 +48,7 @@ struct vnic_wq { unsigned int socket_id; const struct rte_memzone *cqmsg_rz; uint16_t last_completed_index; + uint64_t offloads; }; static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h index b611f0a24..af790fc2e 100644 --- a/drivers/net/enic/enic.h +++ b/drivers/net/enic/enic.h @@ -183,6 +183,7 @@ struct enic { uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */ uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */ + uint64_t tx_queue_offload_capa; /* DEV_TX_OFFLOAD flags */ uint64_t tx_offload_mask; /* PKT_TX flags accepted */ }; diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c index 117b362de..ef18f8802 100644 --- a/drivers/net/enic/enic_ethdev.c +++ b/drivers/net/enic/enic_ethdev.c @@ -185,17 +185,21 @@ static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, - __rte_unused const struct rte_eth_txconf *tx_conf) + const struct rte_eth_txconf *tx_conf) { int ret; struct enic *enic = pmd_priv(eth_dev); + struct vnic_wq *wq; if (rte_eal_process_type() != RTE_PROC_PRIMARY) return -E_RTE_SECONDARY; ENICPMD_FUNC_TRACE(); RTE_ASSERT(queue_idx < enic->conf_wq_count); - eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx]; + wq = &enic->wq[queue_idx]; + wq->offloads = tx_conf->offloads | + eth_dev->data->dev_conf.txmode.offloads; + eth_dev->data->tx_queues[queue_idx] = (void *)wq; ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc); if (ret) { @@ -477,6 +481,7 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR; device_info->rx_offload_capa = enic->rx_offload_capa; device_info->tx_offload_capa = enic->tx_offload_capa; + device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa; device_info->default_rxconf = (struct rte_eth_rxconf) { .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH }; @@ -765,7 +770,7 @@ static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev, ENICPMD_FUNC_TRACE(); qinfo->nb_desc = wq->ring.desc_count; memset(&qinfo->conf, 0, sizeof(qinfo->conf)); - qinfo->conf.offloads = enic->tx_offload_capa; + qinfo->conf.offloads = wq->offloads; /* tx_thresh, and all the other fields are not applicable for enic */ } diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c index d1113b2f1..11d66a626 100644 --- a/drivers/net/enic/enic_res.c +++ b/drivers/net/enic/enic_res.c @@ -183,7 +183,9 @@ int enic_get_vnic_config(struct enic *enic) * Default hardware capabilities. enic_dev_init() may add additional * flags if it enables overlay offloads. */ + enic->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE; enic->tx_offload_capa = + enic->tx_queue_offload_capa | DEV_TX_OFFLOAD_MULTI_SEGS | DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c index 549288c20..89a1e66fe 100644 --- a/drivers/net/enic/enic_rxtx.c +++ b/drivers/net/enic/enic_rxtx.c @@ -471,6 +471,31 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return nb_rx; } +static void enic_fast_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) +{ + unsigned int desc_count, n, nb_to_free, tail_idx; + struct rte_mempool *pool; + struct rte_mbuf **m; + + desc_count = wq->ring.desc_count; + nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index) + + 1; + tail_idx = wq->tail_idx; + wq->tail_idx += nb_to_free; + wq->ring.desc_avail += nb_to_free; + if (wq->tail_idx >= desc_count) + wq->tail_idx -= desc_count; + /* First, free at most until the end of ring */ + m = &wq->bufs[tail_idx]; + pool = (*m)->pool; + n = RTE_MIN(nb_to_free, desc_count - tail_idx); + rte_mempool_put_bulk(pool, (void **)m, n); + n = nb_to_free - n; + /* Then wrap and free the rest */ + if (unlikely(n)) + rte_mempool_put_bulk(pool, (void **)wq->bufs, n); +} + static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) { struct rte_mbuf *buf; @@ -518,7 +543,10 @@ unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq) completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff; if (wq->last_completed_index != completed_index) { - enic_free_wq_bufs(wq, completed_index); + if (wq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) + enic_fast_free_wq_bufs(wq, completed_index); + else + enic_free_wq_bufs(wq, completed_index); wq->last_completed_index = completed_index; } return 0;