From patchwork Mon Sep 5 13:32:09 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Nithin Dabilpuram X-Patchwork-Id: 115916 X-Patchwork-Delegate: jerinj@marvell.com Return-Path: X-Original-To: patchwork@inbox.dpdk.org Delivered-To: patchwork@inbox.dpdk.org Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 793B1A0558; Mon, 5 Sep 2022 15:33:19 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id E6F214281B; Mon, 5 Sep 2022 15:33:09 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id 5BF11427F9 for ; Mon, 5 Sep 2022 15:33:08 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 285DAeOI009446 for ; Mon, 5 Sep 2022 06:33:07 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=IXiQ6G+jd6ai/j8lSlY4gkwuiKs2vy6LSoSiTp57XTI=; b=IWJU0U1Ua1DTLe4+4Zd8kE5WD5SvlIIFhGfPDPaAE8EgQbbrZhjlRH/E6Qa1Jj6TWOW1 U6VMEoQiVEuHrChUyQhAZGEexao7afdDwZL9F3JFaqeACsIPHAbPAGwjTbRdycii9uby gXQfeAUXwfBRGTIgnzzVKjPV0wwzQmBcnKiYa0cYW2UUqL6tCWaTamdwUN8mLX/glg/4 T8VfvN/IjokLvWdcSqLBW1XvAmjrzrcscrBnHiW0Q0tT4bViGLCOCKmwp2OJx+gOevam ZbvvTE0ZXN50RvO1UFQmtO+BX+RhiMVqnfKYCoRkoC4uaCmaNIcbnw7cgZiLehie/XvQ 9g== Received: from dc5-exch02.marvell.com ([199.233.59.182]) by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3jc6epngf0-2 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Mon, 05 Sep 2022 06:33:07 -0700 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Mon, 5 Sep 2022 06:33:06 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Mon, 5 Sep 2022 06:33:06 -0700 Received: from localhost.localdomain (unknown [10.29.52.204]) by maili.marvell.com (Postfix) with ESMTP id B5B7B5E6875; Mon, 5 Sep 2022 06:33:03 -0700 (PDT) From: Nithin Dabilpuram To: Pavan Nikhilesh , Shijith Thotton , Nithin Dabilpuram , "Kiran Kumar K" , Sunil Kumar Kori , Satha Rao CC: , Subject: [PATCH v2 12/31] net/cnxk: support for zero aura for inline meta Date: Mon, 5 Sep 2022 19:02:09 +0530 Message-ID: <20220905133228.818616-12-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220905133228.818616-1-ndabilpuram@marvell.com> References: <20220809184908.24030-1-ndabilpuram@marvell.com> <20220905133228.818616-1-ndabilpuram@marvell.com> MIME-Version: 1.0 X-Proofpoint-ORIG-GUID: -zUSgLgSF5U3Z4JGp56pY0p2VLGgUPE5 X-Proofpoint-GUID: -zUSgLgSF5U3Z4JGp56pY0p2VLGgUPE5 X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.205,Aquarius:18.0.895,Hydra:6.0.517,FMLib:17.11.122.1 definitions=2022-09-05_09,2022-09-05_02,2022-06-22_01 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add support for zero aura for inline meta pkts and register callback to ROC to create meta pool via mempool. Also add devargs to override meta buffer count and size. Signed-off-by: Nithin Dabilpuram --- drivers/event/cnxk/cn10k_eventdev.c | 8 +- drivers/event/cnxk/cn10k_worker.h | 32 ++++---- drivers/event/cnxk/cnxk_eventdev.h | 1 + drivers/event/cnxk/cnxk_eventdev_adptr.c | 2 +- drivers/net/cnxk/cn10k_ethdev.c | 8 +- drivers/net/cnxk/cn10k_ethdev.h | 2 +- drivers/net/cnxk/cn10k_rx.h | 35 +++++---- drivers/net/cnxk/cnxk_ethdev.c | 3 + drivers/net/cnxk/cnxk_ethdev.h | 2 + drivers/net/cnxk/cnxk_ethdev_sec.c | 97 +++++++++++++++++++++++- 10 files changed, 154 insertions(+), 36 deletions(-) diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c index fee01713b4..1774455b4c 100644 --- a/drivers/event/cnxk/cn10k_eventdev.c +++ b/drivers/event/cnxk/cn10k_eventdev.c @@ -694,7 +694,7 @@ cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev, } static void -cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem) +cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem, uint64_t meta_aura) { struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); int i; @@ -703,6 +703,8 @@ cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem) struct cn10k_sso_hws *ws = event_dev->data->ports[i]; ws->lookup_mem = lookup_mem; ws->tstamp = dev->tstamp; + if (meta_aura) + ws->meta_aura = meta_aura; } } @@ -713,6 +715,7 @@ cn10k_sso_rx_adapter_queue_add( const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) { struct cn10k_eth_rxq *rxq; + uint64_t meta_aura; void *lookup_mem; int rc; @@ -726,7 +729,8 @@ cn10k_sso_rx_adapter_queue_add( return -EINVAL; rxq = eth_dev->data->rx_queues[0]; lookup_mem = rxq->lookup_mem; - cn10k_sso_set_priv_mem(event_dev, lookup_mem); + meta_aura = rxq->meta_aura; + cn10k_sso_set_priv_mem(event_dev, lookup_mem, meta_aura); cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); return 0; diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h index db56d96404..47ce423da2 100644 --- a/drivers/event/cnxk/cn10k_worker.h +++ b/drivers/event/cnxk/cn10k_worker.h @@ -127,12 +127,14 @@ cn10k_sso_process_tstamp(uint64_t u64, uint64_t mbuf, } static __rte_always_inline void -cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, - void *lookup_mem, void *tstamp, uintptr_t lbase) +cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, struct cn10k_sso_hws *ws) { uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM; + struct cnxk_timesync_info *tstamp = ws->tstamp[port_id]; + void *lookup_mem = ws->lookup_mem; + uintptr_t lbase = ws->lmt_base; struct rte_event_vector *vec; - uint64_t aura_handle, laddr; + uint64_t meta_aura, laddr; uint16_t nb_mbufs, non_vec; uint16_t lmt_id, d_off; struct rte_mbuf **wqe; @@ -153,25 +155,31 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, if (flags & NIX_RX_OFFLOAD_TSTAMP_F && tstamp) mbuf_init |= 8; + meta_aura = ws->meta_aura; nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP); nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, wqe, nb_mbufs, - flags | NIX_RX_VWQE_F, lookup_mem, - tstamp, lbase); + flags | NIX_RX_VWQE_F, + lookup_mem, tstamp, + lbase, meta_aura); wqe += nb_mbufs; non_vec = vec->nb_elem - nb_mbufs; if (flags & NIX_RX_OFFLOAD_SECURITY_F && non_vec) { + uint64_t sg_w1; + mbuf = (struct rte_mbuf *)((uintptr_t)wqe[0] - sizeof(struct rte_mbuf)); /* Pick first mbuf's aura handle assuming all * mbufs are from a vec and are from same RQ. */ - aura_handle = mbuf->pool->pool_id; + meta_aura = ws->meta_aura; + if (!meta_aura) + meta_aura = mbuf->pool->pool_id; ROC_LMT_BASE_ID_GET(lbase, lmt_id); laddr = lbase; laddr += 8; - d_off = ((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf); - d_off += (mbuf_init & 0xFFFF); + sg_w1 = *(uint64_t *)(((uintptr_t)wqe[0]) + 72); + d_off = sg_w1 - (uintptr_t)mbuf; sa_base = cnxk_nix_sa_base_get(mbuf_init >> 48, lookup_mem); sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1); } @@ -208,7 +216,7 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, /* Free remaining meta buffers if any */ if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff) { - nix_sec_flush_meta(laddr, lmt_id, loff, aura_handle); + nix_sec_flush_meta(laddr, lmt_id, loff, meta_aura); plt_io_wmb(); } } @@ -241,8 +249,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64, uint64_t cq_w5; m = (struct rte_mbuf *)mbuf; - d_off = (uintptr_t)(m->buf_addr) - (uintptr_t)m; - d_off += RTE_PKTMBUF_HEADROOM; + d_off = (*(uint64_t *)(u64[1] + 72)) - (uintptr_t)m; cq_w1 = *(uint64_t *)(u64[1] + 8); cq_w5 = *(uint64_t *)(u64[1] + 40); @@ -273,8 +280,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64, vwqe_hdr = ((vwqe_hdr >> 64) & 0xFFF) | BIT_ULL(31) | ((vwqe_hdr & 0xFFFF) << 48) | ((uint64_t)port << 32); *(uint64_t *)u64[1] = (uint64_t)vwqe_hdr; - cn10k_process_vwqe(u64[1], port, flags, ws->lookup_mem, - ws->tstamp[port], ws->lmt_base); + cn10k_process_vwqe(u64[1], port, flags, ws); /* Mark vector mempool object as get */ RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]), (void **)&u64[1], 1, 1); diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h index fae4484758..d61e60dd2d 100644 --- a/drivers/event/cnxk/cnxk_eventdev.h +++ b/drivers/event/cnxk/cnxk_eventdev.h @@ -148,6 +148,7 @@ struct cn10k_sso_hws { uint8_t hws_id; /* PTP timestamp */ struct cnxk_timesync_info **tstamp; + uint64_t meta_aura; /* Add Work Fastpath data */ uint64_t xaq_lmt __rte_cache_aligned; uint64_t *fc_mem; diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c index 7937cadd25..5f51c504b5 100644 --- a/drivers/event/cnxk/cnxk_eventdev_adptr.c +++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c @@ -194,7 +194,7 @@ cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev, rq->vwqe_ena = 1; rq->vwqe_first_skip = 0; - rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id); + rq->vwqe_aura_handle = vmp->pool_id; rq->vwqe_max_sz_exp = rte_log2_u32(sz); rq->vwqe_wait_tmo = tmo_ns / diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c index 80c5c0e962..e8faeebe1f 100644 --- a/drivers/net/cnxk/cn10k_ethdev.c +++ b/drivers/net/cnxk/cn10k_ethdev.c @@ -282,9 +282,13 @@ cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, rxq->lmt_base = dev->nix.lmt_base; rxq->sa_base = roc_nix_inl_inb_sa_base_get(&dev->nix, dev->inb.inl_dev); + rxq->meta_aura = rq->meta_aura_handle; + rxq_sp = cnxk_eth_rxq_to_sp(rxq); + /* Assume meta packet from normal aura if meta aura is not setup + */ + if (!rxq->meta_aura) + rxq->meta_aura = rxq_sp->qconf.mp->pool_id; } - rxq_sp = cnxk_eth_rxq_to_sp(rxq); - rxq->aura_handle = rxq_sp->qconf.mp->pool_id; /* Lookup mem */ rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get(); diff --git a/drivers/net/cnxk/cn10k_ethdev.h b/drivers/net/cnxk/cn10k_ethdev.h index acfdbb66aa..d0a5b136e3 100644 --- a/drivers/net/cnxk/cn10k_ethdev.h +++ b/drivers/net/cnxk/cn10k_ethdev.h @@ -39,7 +39,7 @@ struct cn10k_eth_rxq { uint16_t data_off; uint64_t sa_base; uint64_t lmt_base; - uint64_t aura_handle; + uint64_t meta_aura; uint16_t rq; struct cnxk_timesync_info *tstamp; } __plt_cache_aligned; diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h index 0f8790b8c7..2cd297eb82 100644 --- a/drivers/net/cnxk/cn10k_rx.h +++ b/drivers/net/cnxk/cn10k_rx.h @@ -877,7 +877,7 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts, nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask); if (flags & NIX_RX_OFFLOAD_SECURITY_F) { - aura_handle = rxq->aura_handle; + aura_handle = rxq->meta_aura; sa_base = rxq->sa_base; sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1); ROC_LMT_BASE_ID_GET(lbase, lmt_id); @@ -984,7 +984,7 @@ static __rte_always_inline uint16_t cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, const uint16_t flags, void *lookup_mem, struct cnxk_timesync_info *tstamp, - uintptr_t lmt_base) + uintptr_t lmt_base, uint64_t meta_aura) { struct cn10k_eth_rxq *rxq = args; const uint64_t mbuf_initializer = (flags & NIX_RX_VWQE_F) ? @@ -1003,10 +1003,10 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer); uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer); struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3; - uint64_t aura_handle, lbase, laddr; uint8_t loff = 0, lnum = 0, shft = 0; uint8x16_t f0, f1, f2, f3; uint16_t lmt_id, d_off; + uint64_t lbase, laddr; uint16_t packets = 0; uint16_t pkts_left; uintptr_t sa_base; @@ -1035,6 +1035,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, if (flags & NIX_RX_OFFLOAD_SECURITY_F) { if (flags & NIX_RX_VWQE_F) { + uint64_t sg_w1; uint16_t port; mbuf0 = (struct rte_mbuf *)((uintptr_t)mbufs[0] - @@ -1042,10 +1043,15 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, /* Pick first mbuf's aura handle assuming all * mbufs are from a vec and are from same RQ. */ - aura_handle = mbuf0->pool->pool_id; + if (!meta_aura) + meta_aura = mbuf0->pool->pool_id; /* Calculate offset from mbuf to actual data area */ - d_off = ((uintptr_t)mbuf0->buf_addr - (uintptr_t)mbuf0); - d_off += (mbuf_initializer & 0xFFFF); + /* Zero aura's first skip i.e mbuf setup might not match the actual + * offset as first skip is taken from second pass RQ. So compute + * using diff b/w first SG pointer and mbuf addr. + */ + sg_w1 = *(uint64_t *)((uintptr_t)mbufs[0] + 72); + d_off = (sg_w1 - (uint64_t)mbuf0); /* Get SA Base from lookup tbl using port_id */ port = mbuf_initializer >> 48; @@ -1053,7 +1059,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, lbase = lmt_base; } else { - aura_handle = rxq->aura_handle; + meta_aura = rxq->meta_aura; d_off = rxq->data_off; sa_base = rxq->sa_base; lbase = rxq->lmt_base; @@ -1721,7 +1727,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, /* Update aura handle */ *(uint64_t *)(laddr - 8) = (((uint64_t)(15 & 0x1) << 32) | - roc_npa_aura_handle_to_aura(aura_handle)); + roc_npa_aura_handle_to_aura(meta_aura)); loff = loff - 15; shft += 3; @@ -1744,14 +1750,14 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, /* Update aura handle */ *(uint64_t *)(laddr - 8) = (((uint64_t)(loff & 0x1) << 32) | - roc_npa_aura_handle_to_aura(aura_handle)); + roc_npa_aura_handle_to_aura(meta_aura)); data = (data & ~(0x7UL << shft)) | (((uint64_t)loff >> 1) << shft); /* Send up to 16 lmt lines of pointers */ nix_sec_flush_meta_burst(lmt_id, data, lnum + 1, - aura_handle); + meta_aura); rte_io_wmb(); lnum = 0; loff = 0; @@ -1769,13 +1775,13 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, /* Update aura handle */ *(uint64_t *)(laddr - 8) = (((uint64_t)(loff & 0x1) << 32) | - roc_npa_aura_handle_to_aura(aura_handle)); + roc_npa_aura_handle_to_aura(meta_aura)); data = (data & ~(0x7UL << shft)) | (((uint64_t)loff >> 1) << shft); /* Send up to 16 lmt lines of pointers */ - nix_sec_flush_meta_burst(lmt_id, data, lnum + 1, aura_handle); + nix_sec_flush_meta_burst(lmt_id, data, lnum + 1, meta_aura); if (flags & NIX_RX_VWQE_F) plt_io_wmb(); } @@ -1803,7 +1809,7 @@ static inline uint16_t cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, const uint16_t flags, void *lookup_mem, struct cnxk_timesync_info *tstamp, - uintptr_t lmt_base) + uintptr_t lmt_base, uint64_t meta_aura) { RTE_SET_USED(args); RTE_SET_USED(mbufs); @@ -1812,6 +1818,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, RTE_SET_USED(lookup_mem); RTE_SET_USED(tstamp); RTE_SET_USED(lmt_base); + RTE_SET_USED(meta_aura); return 0; } @@ -2038,7 +2045,7 @@ NIX_RX_FASTPATH_MODES void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \ { \ return cn10k_nix_recv_pkts_vector(rx_queue, rx_pkts, pkts, \ - (flags), NULL, NULL, 0); \ + (flags), NULL, NULL, 0, 0); \ } #define NIX_RX_RECV_VEC_MSEG(fn, flags) \ diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c index 80ab3cfedd..85ad70e50b 100644 --- a/drivers/net/cnxk/cnxk_ethdev.c +++ b/drivers/net/cnxk/cnxk_ethdev.c @@ -1732,6 +1732,9 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev) roc_nix_mac_link_info_get_cb_register(nix, cnxk_eth_dev_link_status_get_cb); + /* Register callback for inline meta pool create */ + roc_nix_inl_meta_pool_cb_register(cnxk_nix_inl_meta_pool_cb); + dev->eth_dev = eth_dev; dev->configured = 0; dev->ptype_disable = 0; diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h index f11a9a0b63..a4178cfeff 100644 --- a/drivers/net/cnxk/cnxk_ethdev.h +++ b/drivers/net/cnxk/cnxk_ethdev.h @@ -642,6 +642,8 @@ struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev, struct cnxk_eth_sec_sess * cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev, struct rte_security_session *sess); +int cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs, + bool destroy); /* Other private functions */ int nix_recalc_mtu(struct rte_eth_dev *eth_dev); diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c index 1de3454398..9304b1465d 100644 --- a/drivers/net/cnxk/cnxk_ethdev_sec.c +++ b/drivers/net/cnxk/cnxk_ethdev_sec.c @@ -4,10 +4,14 @@ #include +#define CNXK_NIX_INL_META_POOL_NAME "NIX_INL_META_POOL" + #define CNXK_NIX_INL_SELFTEST "selftest" #define CNXK_NIX_INL_IPSEC_IN_MIN_SPI "ipsec_in_min_spi" #define CNXK_NIX_INL_IPSEC_IN_MAX_SPI "ipsec_in_max_spi" #define CNXK_INL_CPT_CHANNEL "inl_cpt_channel" +#define CNXK_NIX_INL_NB_META_BUFS "nb_meta_bufs" +#define CNXK_NIX_INL_META_BUF_SZ "meta_buf_sz" struct inl_cpt_channel { bool is_multi_channel; @@ -28,6 +32,85 @@ bitmap_ctzll(uint64_t slab) return __builtin_ctzll(slab); } +int +cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs, bool destroy) +{ + const char *mp_name = CNXK_NIX_INL_META_POOL_NAME; + struct rte_pktmbuf_pool_private mbp_priv; + struct npa_aura_s *aura; + struct rte_mempool *mp; + uint16_t first_skip; + int rc; + + /* Destroy the mempool if requested */ + if (destroy) { + mp = rte_mempool_lookup(mp_name); + if (!mp) + return -ENOENT; + + if (mp->pool_id != *aura_handle) { + plt_err("Meta pool aura mismatch"); + return -EINVAL; + } + + plt_free(mp->pool_config); + rte_mempool_free(mp); + + *aura_handle = 0; + return 0; + } + + /* Need to make it similar to rte_pktmbuf_pool() for sake of OOP + * support. + */ + mp = rte_mempool_create_empty(mp_name, nb_bufs, buf_sz, 0, + sizeof(struct rte_pktmbuf_pool_private), + SOCKET_ID_ANY, 0); + if (!mp) { + plt_err("Failed to create inline meta pool"); + return -EIO; + } + + /* Indicate to allocate zero aura */ + aura = plt_zmalloc(sizeof(struct npa_aura_s), 0); + if (!aura) { + rc = -ENOMEM; + goto free_mp; + } + aura->ena = 1; + aura->pool_addr = 0x0; + + rc = rte_mempool_set_ops_byname(mp, rte_mbuf_platform_mempool_ops(), + aura); + if (rc) { + plt_err("Failed to setup mempool ops for meta, rc=%d", rc); + goto free_aura; + } + + /* Init mempool private area */ + first_skip = sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM; + memset(&mbp_priv, 0, sizeof(mbp_priv)); + mbp_priv.mbuf_data_room_size = (buf_sz - first_skip + + RTE_PKTMBUF_HEADROOM); + rte_pktmbuf_pool_init(mp, &mbp_priv); + + /* Populate buffer */ + rc = rte_mempool_populate_default(mp); + if (rc < 0) { + plt_err("Failed to create inline meta pool, rc=%d", rc); + goto free_aura; + } + + rte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL); + *aura_handle = mp->pool_id; + return 0; +free_aura: + plt_free(aura); +free_mp: + rte_mempool_free(mp); + return rc; +} + int cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p, uint32_t spi) @@ -128,7 +211,7 @@ struct rte_security_ops cnxk_eth_sec_ops = { }; static int -parse_ipsec_in_spi_range(const char *key, const char *value, void *extra_args) +parse_val_u32(const char *key, const char *value, void *extra_args) { RTE_SET_USED(key); uint32_t val; @@ -184,6 +267,8 @@ nix_inl_parse_devargs(struct rte_devargs *devargs, uint32_t ipsec_in_min_spi = 0; struct inl_cpt_channel cpt_channel; struct rte_kvargs *kvlist; + uint32_t nb_meta_bufs = 0; + uint32_t meta_buf_sz = 0; uint8_t selftest = 0; memset(&cpt_channel, 0, sizeof(cpt_channel)); @@ -198,11 +283,15 @@ nix_inl_parse_devargs(struct rte_devargs *devargs, rte_kvargs_process(kvlist, CNXK_NIX_INL_SELFTEST, &parse_selftest, &selftest); rte_kvargs_process(kvlist, CNXK_NIX_INL_IPSEC_IN_MIN_SPI, - &parse_ipsec_in_spi_range, &ipsec_in_min_spi); + &parse_val_u32, &ipsec_in_min_spi); rte_kvargs_process(kvlist, CNXK_NIX_INL_IPSEC_IN_MAX_SPI, - &parse_ipsec_in_spi_range, &ipsec_in_max_spi); + &parse_val_u32, &ipsec_in_max_spi); rte_kvargs_process(kvlist, CNXK_INL_CPT_CHANNEL, &parse_inl_cpt_channel, &cpt_channel); + rte_kvargs_process(kvlist, CNXK_NIX_INL_NB_META_BUFS, &parse_val_u32, + &nb_meta_bufs); + rte_kvargs_process(kvlist, CNXK_NIX_INL_META_BUF_SZ, &parse_val_u32, + &meta_buf_sz); rte_kvargs_free(kvlist); null_devargs: @@ -212,6 +301,8 @@ nix_inl_parse_devargs(struct rte_devargs *devargs, inl_dev->channel = cpt_channel.channel; inl_dev->chan_mask = cpt_channel.mask; inl_dev->is_multi_channel = cpt_channel.is_multi_channel; + inl_dev->nb_meta_bufs = nb_meta_bufs; + inl_dev->meta_buf_sz = meta_buf_sz; return 0; exit: return -EINVAL;