From patchwork Mon Jun 4 12:09:41 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Michal Krawczyk X-Patchwork-Id: 40615 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E59AB6CD0; Mon, 4 Jun 2018 14:10:07 +0200 (CEST) Received: from mail-lf0-f65.google.com (mail-lf0-f65.google.com [209.85.215.65]) by dpdk.org (Postfix) with ESMTP id CA10C6CA2 for ; Mon, 4 Jun 2018 14:10:04 +0200 (CEST) Received: by mail-lf0-f65.google.com with SMTP id y20-v6so24825592lfy.0 for ; Mon, 04 Jun 2018 05:10:04 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=semihalf-com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=tShdAhP28z+cxDmKYjl0UNguuS2iPwYKXXrVX1IXoA8=; b=QadyQiNjRmKVURV/M1+DVuwERFM+TpicHqdKySOYHz32INi3LnN0UFbEuDfT4F2w5q EkTxj842hkzNnGOIVfVtP7OzJ+ZwgyDTp/X2crou3651fKoXo8lbjx1ZTe0R51cGVvbI rxm6ikIUNoO9OUBTqLNE0tszB6d+nDU/HYHLnyL18RSwo3hbpC7ur6tssjR0EuIVr4iN JPZMx1P/6/o7k1Hwa57Zes3tyohR2cPCcP2Tjey1/z/zR2O78e1twyJ0pIo6n0D+t5WZ j3J22XsXpRt361N7uQAqrYOO4tgiBexS129G0BxuUTmrJhOBfx0K5YmG6w2Ogsr5HhP4 iK7w== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=tShdAhP28z+cxDmKYjl0UNguuS2iPwYKXXrVX1IXoA8=; b=dk52yl13OFxll7rWbdqdfepDmDgWVieb2AqDwTpyGLtTMbOniL0JrB4AHg0sNL4O6J E3LH88ZBSu6zW/Y2yzKiIyZIEvmP2+TZH6H5Y3LsOblvYlPs4i7C4cAANOl0sUUxfxJV Jp0xsqE3HaRDZArJYa3E+UFTABUHJ/PB2tmpTNY0P1o13i+BDIGn2WLzHTc1zXCngIme eLsXwXB4gWTnjgvNcirLtSv2XrSA/2LnoApiop1hGY84yetnhoUBPzNitN6ZkMbGCwQT 8NC5yqVa1wY3pJ54ASC4cRU/qpq7SsH+t66pGIzZqnqx1p0CTqEzgeiz9LSPutaaErPt 981A== X-Gm-Message-State: ALKqPwcidY+h0RZO9p1cDRQUwIi0F5NO2ODFA098r2ZcbhPr2F8TddtU jTB4T1VyHjLuQwhmclSG0eSseQ== X-Google-Smtp-Source: ADUXVKKifarB0fL76lbrnjF2DX6taJpxMjeU7KsOzoW7TkZJiYcKRUiwzk3H5zgrO/KClJidMj8Vjw== X-Received: by 2002:a19:e544:: with SMTP id c65-v6mr13188002lfh.134.1528114204442; Mon, 04 Jun 2018 05:10:04 -0700 (PDT) Received: from mkPC.semihalf.local (31-172-191-173.noc.fibertech.net.pl. [31.172.191.173]) by smtp.gmail.com with ESMTPSA id g23-v6sm3817415lfi.49.2018.06.04.05.10.03 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Mon, 04 Jun 2018 05:10:03 -0700 (PDT) From: Michal Krawczyk To: Marcin Wojtas , Michal Krawczyk , Guy Tzalik , Evgeny Schemeilin Cc: dev@dpdk.org, matua@amazon.com Date: Mon, 4 Jun 2018 14:09:41 +0200 Message-Id: <20180604120955.17319-4-mk@semihalf.com> X-Mailer: git-send-email 2.14.1 In-Reply-To: <20180604120955.17319-1-mk@semihalf.com> References: <20180604120955.17319-1-mk@semihalf.com> Subject: [dpdk-dev] [PATCH v2 13/26] net/ena: add RX out of order completion X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This feature allows RX packets to be cleaned up out of order. Signed-off-by: Michal Krawczyk --- drivers/net/ena/ena_ethdev.c | 48 ++++++++++++++++++++++++++++++++++++++++---- drivers/net/ena/ena_ethdev.h | 8 ++++++-- 2 files changed, 50 insertions(+), 6 deletions(-) diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index 3f72272eb..598c698a3 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -368,6 +368,19 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, } } +static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) +{ + if (likely(req_id < rx_ring->ring_size)) + return 0; + + RTE_LOG(ERR, PMD, "Invalid rx req_id: %hu\n", req_id); + + rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; + rx_ring->adapter->trigger_reset = true; + + return -EFAULT; +} + static void ena_config_host_info(struct ena_com_dev *ena_dev) { struct ena_admin_host_info *host_info; @@ -724,6 +737,10 @@ static void ena_rx_queue_release(void *queue) rte_free(ring->rx_buffer_info); ring->rx_buffer_info = NULL; + if (ring->empty_rx_reqs) + rte_free(ring->empty_rx_reqs); + ring->empty_rx_reqs = NULL; + ring->configured = 0; RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n", @@ -1176,7 +1193,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, (struct ena_adapter *)(dev->data->dev_private); struct ena_ring *rxq = NULL; uint16_t ena_qid = 0; - int rc = 0; + int i, rc = 0; struct ena_com_dev *ena_dev = &adapter->ena_dev; rxq = &adapter->rx_ring[queue_idx]; @@ -1242,6 +1259,19 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } + rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs", + sizeof(uint16_t) * nb_desc, + RTE_CACHE_LINE_SIZE); + if (!rxq->empty_rx_reqs) { + RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n"); + rte_free(rxq->rx_buffer_info); + rxq->rx_buffer_info = NULL; + return -ENOMEM; + } + + for (i = 0; i < nb_desc; i++) + rxq->empty_tx_reqs[i] = i; + /* Store pointer to this queue in upper layer */ rxq->configured = 1; dev->data->rx_queues[queue_idx] = rxq; @@ -1256,7 +1286,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) uint16_t ring_size = rxq->ring_size; uint16_t ring_mask = ring_size - 1; uint16_t next_to_use = rxq->next_to_use; - uint16_t in_use; + uint16_t in_use, req_id; struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0]; if (unlikely(!count)) @@ -1284,12 +1314,14 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) struct ena_com_buf ebuf; rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]); + + req_id = rxq->empty_rx_reqs[next_to_use_masked]; /* prepare physical address for DMA transaction */ ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; /* pass resource to device */ rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, - &ebuf, next_to_use_masked); + &ebuf, req_id); if (unlikely(rc)) { rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf), count - i); @@ -1710,6 +1742,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, unsigned int ring_mask = ring_size - 1; uint16_t next_to_clean = rx_ring->next_to_clean; uint16_t desc_in_use = 0; + uint16_t req_id; unsigned int recv_idx = 0; struct rte_mbuf *mbuf = NULL; struct rte_mbuf *mbuf_head = NULL; @@ -1750,7 +1783,12 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, break; while (segments < ena_rx_ctx.descs) { - mbuf = rx_buff_info[next_to_clean & ring_mask]; + req_id = ena_rx_ctx.ena_bufs[segments].req_id; + rc = validate_rx_req_id(rx_ring, req_id); + if (unlikely(rc)) + break; + + mbuf = rx_buff_info[req_id]; mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len; mbuf->data_off = RTE_PKTMBUF_HEADROOM; mbuf->refcnt = 1; @@ -1767,6 +1805,8 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, mbuf_head->pkt_len += mbuf->data_len; mbuf_prev = mbuf; + rx_ring->empty_rx_reqs[next_to_clean & ring_mask] = + req_id; segments++; next_to_clean++; } diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h index 594e643e2..bba5ad53a 100644 --- a/drivers/net/ena/ena_ethdev.h +++ b/drivers/net/ena/ena_ethdev.h @@ -75,8 +75,12 @@ struct ena_ring { enum ena_ring_type type; enum ena_admin_placement_policy_type tx_mem_queue_type; - /* Holds the empty requests for TX OOO completions */ - uint16_t *empty_tx_reqs; + /* Holds the empty requests for TX/RX OOO completions */ + union { + uint16_t *empty_tx_reqs; + uint16_t *empty_rx_reqs; + }; + union { struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */ struct rte_mbuf **rx_buffer_info; /* contex of rx packet */