get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/29704/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 29704,
    "url": "http://patches.dpdk.org/api/patches/29704/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/dd4cdf6d733af345f044d1fcdcf4afc550f7eb52.1507207731.git.nelio.laranjeiro@6wind.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<dd4cdf6d733af345f044d1fcdcf4afc550f7eb52.1507207731.git.nelio.laranjeiro@6wind.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/dd4cdf6d733af345f044d1fcdcf4afc550f7eb52.1507207731.git.nelio.laranjeiro@6wind.com",
    "date": "2017-10-05T12:49:45",
    "name": "[dpdk-dev,v2,13/30] net/mlx5: add reference counter on DPDK Rx queues",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "5dbc007c32bfd7ae20648600304eb3f454ef384d",
    "submitter": {
        "id": 243,
        "url": "http://patches.dpdk.org/api/people/243/?format=api",
        "name": "Nélio Laranjeiro",
        "email": "nelio.laranjeiro@6wind.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/dd4cdf6d733af345f044d1fcdcf4afc550f7eb52.1507207731.git.nelio.laranjeiro@6wind.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/29704/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/29704/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id C94E31B22C;\n\tThu,  5 Oct 2017 14:50:46 +0200 (CEST)",
            "from mail-wm0-f45.google.com (mail-wm0-f45.google.com\n\t[74.125.82.45]) by dpdk.org (Postfix) with ESMTP id 0C1FF1B1EF\n\tfor <dev@dpdk.org>; Thu,  5 Oct 2017 14:50:37 +0200 (CEST)",
            "by mail-wm0-f45.google.com with SMTP id q132so1995208wmd.2\n\tfor <dev@dpdk.org>; Thu, 05 Oct 2017 05:50:37 -0700 (PDT)",
            "from ping.dev.6wind.com (host.78.145.23.62.rev.coltfrance.com.\n\t[62.23.145.78]) by smtp.gmail.com with ESMTPSA id\n\tk9sm26405823wrk.27.2017.10.05.05.50.35\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128);\n\tThu, 05 Oct 2017 05:50:35 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=6wind-com.20150623.gappssmtp.com; s=20150623;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references\n\t:in-reply-to:references;\n\tbh=cJJzo84cOZfODOOCA0jysmLR0wugylmK6kbN06cJ/gU=;\n\tb=blXNLn2mQHsPKK6L6kU1Y+EzAQjT8HYTThHylHZ7/qo5/pW0hrmxYHAyQQ1dX8/QCb\n\tCAzTTat8G7FAisBg9JKiJsI54Okhh8XRq7pAveFEPAaTueZVcWDKcMi0SStXeaa0WXqA\n\tSTjSa93+csD3/O/XNp5evvjuPMQh/iv9yVjfTLtzI74AI0TeldMLKKtBo5f6XEwc8uub\n\tLx+vtE4eY7msErjdSlTmnMei+FLTsQGuh6prG119EQmo6hGCk4sEzvlb2uBlEXMwCFWa\n\t+aILXvxQ0XeK/CBjS0AEZQMCS5SAPjox3v/o+fT+WwiwEHEI48nmj5bkt2nFyUdvY4nd\n\t/BjA==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references:in-reply-to:references;\n\tbh=cJJzo84cOZfODOOCA0jysmLR0wugylmK6kbN06cJ/gU=;\n\tb=RVUJTWG/dsrHGyMteUUahwe/CFCiN6p+K4CvuMEjnMLjWq+FuTd0hQplBgBZHXVDbs\n\tLPhWwVzEafzo17wBtzDc6uhXplynNy8VPxBLct1J28fHJ+dLUugRUqYHNRvIOtLRwANs\n\t6W9hDagCNvnwI4VzgqsWhI5rz0n7JhJF3xP/WcmnEctLyZUhQltjVBHt4UwuEMXWtkcG\n\tIYv5OQGP+vong6xvomX9F3pwaPLCcRQAnWRyhrG3bh3pE4Ox3kNL75DB7jEc4PB2xUbI\n\tz4dMT9DKHhp9ci99bTRVe/Tv5ipMZ7FObtY+3SS5zl8rl0yyfEcymlIoB32TvGRzbW/E\n\tRXYg==",
        "X-Gm-Message-State": "AMCzsaWoiev++gAJJcLzyVu72MqsSum5myPZ63qK/f7RTDt8w9xHzqMm\n\tYS452V7daN3O/Sp7ZNg2NpgzDM7h7w==",
        "X-Google-Smtp-Source": "AOwi7QAhqK7i6AnDp8w/3ScmETGHQn8RNB4EHtdtDZo9Gq+PS8Fn4tJkHG8pvS4XxhZDjwd/bArq3g==",
        "X-Received": "by 10.28.30.196 with SMTP id e187mr7085341wme.36.1507207836051; \n\tThu, 05 Oct 2017 05:50:36 -0700 (PDT)",
        "From": "Nelio Laranjeiro <nelio.laranjeiro@6wind.com>",
        "To": "dev@dpdk.org",
        "Cc": "adrien.mazarguil@6wind.com, yskoh@mellanox.com, ferruh.yigit@intel.com",
        "Date": "Thu,  5 Oct 2017 14:49:45 +0200",
        "Message-Id": "<dd4cdf6d733af345f044d1fcdcf4afc550f7eb52.1507207731.git.nelio.laranjeiro@6wind.com>",
        "X-Mailer": "git-send-email 2.1.4",
        "In-Reply-To": [
            "<cover.1507205686.git.nelio.laranjeiro@6wind.com>",
            "<cover.1507207731.git.nelio.laranjeiro@6wind.com>"
        ],
        "References": [
            "<cover.1507205686.git.nelio.laranjeiro@6wind.com>",
            "<cover.1501681927.git.nelio.laranjeiro@6wind.com>\n\t<cover.1507207731.git.nelio.laranjeiro@6wind.com>"
        ],
        "Subject": "[dpdk-dev] [PATCH v2 13/30] net/mlx5: add reference counter on DPDK\n\tRx queues",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Use the same design for DPDK queue as for Verbs queue for symmetry, this\nalso helps in fixing some issues like the DPDK release queue API which is\nnot expected to fail.  With such design, the queue is released when the\nreference counters reaches 0.\n\nSigned-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>\n---\n drivers/net/mlx5/mlx5.c         |  16 +-\n drivers/net/mlx5/mlx5.h         |   1 +\n drivers/net/mlx5/mlx5_rxq.c     | 488 +++++++++++++++++++++-------------------\n drivers/net/mlx5/mlx5_rxtx.h    |  10 +\n drivers/net/mlx5/mlx5_trigger.c |  47 +++-\n 5 files changed, 321 insertions(+), 241 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex cbf22eb..22fd5e4 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -208,17 +208,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)\n \tif (priv->rxqs != NULL) {\n \t\t/* XXX race condition if mlx5_rx_burst() is still running. */\n \t\tusleep(1000);\n-\t\tfor (i = 0; (i != priv->rxqs_n); ++i) {\n-\t\t\tstruct mlx5_rxq_data *rxq = (*priv->rxqs)[i];\n-\t\t\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n-\n-\t\t\tif (rxq == NULL)\n-\t\t\t\tcontinue;\n-\t\t\trxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);\n-\t\t\t(*priv->rxqs)[i] = NULL;\n-\t\t\tmlx5_rxq_cleanup(rxq_ctrl);\n-\t\t\trte_free(rxq_ctrl);\n-\t\t}\n+\t\tfor (i = 0; (i != priv->rxqs_n); ++i)\n+\t\t\tmlx5_priv_rxq_release(priv, i);\n \t\tpriv->rxqs_n = 0;\n \t\tpriv->rxqs = NULL;\n \t}\n@@ -247,6 +238,9 @@ mlx5_dev_close(struct rte_eth_dev *dev)\n \tret = mlx5_priv_rxq_ibv_verify(priv);\n \tif (ret)\n \t\tWARN(\"%p: some Verbs Rx queue still remain\", (void *)priv);\n+\tret = mlx5_priv_rxq_verify(priv);\n+\tif (ret)\n+\t\tWARN(\"%p: some Rx Queues still remain\", (void *)priv);\n \tret = mlx5_priv_txq_ibv_verify(priv);\n \tif (ret)\n \t\tWARN(\"%p: some Verbs Tx queue still remain\", (void *)priv);\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex b20c39c..d0ef21a 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -147,6 +147,7 @@ struct priv {\n \tstruct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */\n \tTAILQ_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */\n \tLIST_HEAD(mr, mlx5_mr) mr; /* Memory region. */\n+\tLIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */\n \tLIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */\n \tLIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */\n \tLIST_HEAD(txqibv, mlx5_txq_ibv) txqsibv; /* Verbs Tx queues. */\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex 89c2cdb..87efeed 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -37,6 +37,7 @@\n #include <string.h>\n #include <stdint.h>\n #include <fcntl.h>\n+#include <sys/queue.h>\n \n /* Verbs header. */\n /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */\n@@ -629,16 +630,15 @@ priv_rehash_flows(struct priv *priv)\n  *\n  * @param rxq_ctrl\n  *   Pointer to RX queue structure.\n- * @param elts_n\n- *   Number of elements to allocate.\n  *\n  * @return\n  *   0 on success, errno value on failure.\n  */\n-static int\n-rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl, unsigned int elts_n)\n+int\n+rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)\n {\n \tconst unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;\n+\tunsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;\n \tunsigned int i;\n \tint ret = 0;\n \n@@ -667,9 +667,11 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl, unsigned int elts_n)\n \t\tNB_SEGS(buf) = 1;\n \t\t(*rxq_ctrl->rxq.elts)[i] = buf;\n \t}\n+\t/* If Rx vector is activated. */\n \tif (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {\n \t\tstruct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;\n \t\tstruct rte_mbuf *mbuf_init = &rxq->fake_mbuf;\n+\t\tint j;\n \n \t\t/* Initialize default rearm_data for vPMD. */\n \t\tmbuf_init->data_off = RTE_PKTMBUF_HEADROOM;\n@@ -681,10 +683,11 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl, unsigned int elts_n)\n \t\t * rearm_data covers previous fields.\n \t\t */\n \t\trte_compiler_barrier();\n-\t\trxq->mbuf_initializer = *(uint64_t *)&mbuf_init->rearm_data;\n+\t\trxq->mbuf_initializer =\n+\t\t\t*(uint64_t *)&mbuf_init->rearm_data;\n \t\t/* Padding with a fake mbuf for vectorized Rx. */\n-\t\tfor (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)\n-\t\t\t(*rxq->elts)[elts_n + i] = &rxq->fake_mbuf;\n+\t\tfor (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)\n+\t\t\t(*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;\n \t}\n \tDEBUG(\"%p: allocated and configured %u segments (max %u packets)\",\n \t      (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));\n@@ -754,170 +757,6 @@ mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)\n }\n \n /**\n- * Configure a RX queue.\n- *\n- * @param dev\n- *   Pointer to Ethernet device structure.\n- * @param rxq_ctrl\n- *   Pointer to RX queue structure.\n- * @param desc\n- *   Number of descriptors to configure in queue.\n- * @param socket\n- *   NUMA socket on which memory must be allocated.\n- * @param[in] conf\n- *   Thresholds parameters.\n- * @param mp\n- *   Memory pool for buffer allocations.\n- *\n- * @return\n- *   0 on success, errno value on failure.\n- */\n-static int\n-rxq_ctrl_setup(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl,\n-\t       uint16_t desc, unsigned int socket,\n-\t       const struct rte_eth_rxconf *conf, struct rte_mempool *mp)\n-{\n-\tstruct priv *priv = dev->data->dev_private;\n-\tconst uint16_t desc_n =\n-\t\tdesc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;\n-\tstruct mlx5_rxq_ctrl tmpl = {\n-\t\t.priv = priv,\n-\t\t.socket = socket,\n-\t\t.rxq = {\n-\t\t\t.elts = rte_calloc_socket(\"RXQ\", 1,\n-\t\t\t\t\t\t  desc_n *\n-\t\t\t\t\t\t  sizeof(struct rte_mbuf *), 0,\n-\t\t\t\t\t\t  socket),\n-\t\t\t.elts_n = log2above(desc),\n-\t\t\t.mp = mp,\n-\t\t\t.rss_hash = priv->rxqs_n > 1,\n-\t\t},\n-\t};\n-\tunsigned int mb_len = rte_pktmbuf_data_room_size(mp);\n-\tstruct rte_mbuf *(*elts)[desc_n] = NULL;\n-\tint ret = 0;\n-\n-\t(void)conf; /* Thresholds configuration (ignored). */\n-\tif (dev->data->dev_conf.intr_conf.rxq)\n-\t\ttmpl.irq = 1;\n-\t/* Enable scattered packets support for this queue if necessary. */\n-\tassert(mb_len >= RTE_PKTMBUF_HEADROOM);\n-\tif (dev->data->dev_conf.rxmode.max_rx_pkt_len <=\n-\t    (mb_len - RTE_PKTMBUF_HEADROOM)) {\n-\t\ttmpl.rxq.sges_n = 0;\n-\t} else if (dev->data->dev_conf.rxmode.enable_scatter) {\n-\t\tunsigned int size =\n-\t\t\tRTE_PKTMBUF_HEADROOM +\n-\t\t\tdev->data->dev_conf.rxmode.max_rx_pkt_len;\n-\t\tunsigned int sges_n;\n-\n-\t\t/*\n-\t\t * Determine the number of SGEs needed for a full packet\n-\t\t * and round it to the next power of two.\n-\t\t */\n-\t\tsges_n = log2above((size / mb_len) + !!(size % mb_len));\n-\t\ttmpl.rxq.sges_n = sges_n;\n-\t\t/* Make sure rxq.sges_n did not overflow. */\n-\t\tsize = mb_len * (1 << tmpl.rxq.sges_n);\n-\t\tsize -= RTE_PKTMBUF_HEADROOM;\n-\t\tif (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {\n-\t\t\tERROR(\"%p: too many SGEs (%u) needed to handle\"\n-\t\t\t      \" requested maximum packet size %u\",\n-\t\t\t      (void *)dev,\n-\t\t\t      1 << sges_n,\n-\t\t\t      dev->data->dev_conf.rxmode.max_rx_pkt_len);\n-\t\t\treturn EOVERFLOW;\n-\t\t}\n-\t} else {\n-\t\tWARN(\"%p: the requested maximum Rx packet size (%u) is\"\n-\t\t     \" larger than a single mbuf (%u) and scattered\"\n-\t\t     \" mode has not been requested\",\n-\t\t     (void *)dev,\n-\t\t     dev->data->dev_conf.rxmode.max_rx_pkt_len,\n-\t\t     mb_len - RTE_PKTMBUF_HEADROOM);\n-\t}\n-\tDEBUG(\"%p: maximum number of segments per packet: %u\",\n-\t      (void *)dev, 1 << tmpl.rxq.sges_n);\n-\tif (desc % (1 << tmpl.rxq.sges_n)) {\n-\t\tERROR(\"%p: number of RX queue descriptors (%u) is not a\"\n-\t\t      \" multiple of SGEs per packet (%u)\",\n-\t\t      (void *)dev,\n-\t\t      desc,\n-\t\t      1 << tmpl.rxq.sges_n);\n-\t\treturn EINVAL;\n-\t}\n-\t/* Toggle RX checksum offload if hardware supports it. */\n-\tif (priv->hw_csum)\n-\t\ttmpl.rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;\n-\tif (priv->hw_csum_l2tun)\n-\t\ttmpl.rxq.csum_l2tun =\n-\t\t\t!!dev->data->dev_conf.rxmode.hw_ip_checksum;\n-\t/* Configure VLAN stripping. */\n-\ttmpl.rxq.vlan_strip = (priv->hw_vlan_strip &&\n-\t\t\t       !!dev->data->dev_conf.rxmode.hw_vlan_strip);\n-\t/* By default, FCS (CRC) is stripped by hardware. */\n-\tif (dev->data->dev_conf.rxmode.hw_strip_crc) {\n-\t\ttmpl.rxq.crc_present = 0;\n-\t} else if (priv->hw_fcs_strip) {\n-\t\ttmpl.rxq.crc_present = 1;\n-\t} else {\n-\t\tWARN(\"%p: CRC stripping has been disabled but will still\"\n-\t\t     \" be performed by hardware, make sure MLNX_OFED and\"\n-\t\t     \" firmware are up to date\",\n-\t\t     (void *)dev);\n-\t\ttmpl.rxq.crc_present = 0;\n-\t}\n-\tDEBUG(\"%p: CRC stripping is %s, %u bytes will be subtracted from\"\n-\t      \" incoming frames to hide it\",\n-\t      (void *)dev,\n-\t      tmpl.rxq.crc_present ? \"disabled\" : \"enabled\",\n-\t      tmpl.rxq.crc_present << 2);\n-#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING\n-\tif (!mlx5_getenv_int(\"MLX5_PMD_ENABLE_PADDING\")) {\n-\t\t; /* Nothing else to do. */\n-\t} else if (priv->hw_padding) {\n-\t\tINFO(\"%p: enabling packet padding on queue %p\",\n-\t\t     (void *)dev, (void *)rxq_ctrl);\n-\t} else {\n-\t\tWARN(\"%p: packet padding has been requested but is not\"\n-\t\t     \" supported, make sure MLNX_OFED and firmware are\"\n-\t\t     \" up to date\",\n-\t\t     (void *)dev);\n-\t}\n-#endif\n-\t/* Save port ID. */\n-\ttmpl.rxq.port_id = dev->data->port_id;\n-\tDEBUG(\"%p: RTE port ID: %u\", (void *)rxq_ctrl, tmpl.rxq.port_id);\n-\tret = rxq_alloc_elts(&tmpl, desc);\n-\tif (ret) {\n-\t\tERROR(\"%p: RXQ allocation failed: %s\",\n-\t\t      (void *)dev, strerror(ret));\n-\t\tgoto error;\n-\t}\n-\t/* Clean up rxq in case we're reinitializing it. */\n-\tDEBUG(\"%p: cleaning-up old rxq just in case\", (void *)rxq_ctrl);\n-\tmlx5_rxq_cleanup(rxq_ctrl);\n-\t/* Move mbuf pointers to dedicated storage area in RX queue. */\n-\telts = (void *)(rxq_ctrl + 1);\n-\trte_memcpy(elts, tmpl.rxq.elts, sizeof(*elts));\n-#ifndef NDEBUG\n-\tmemset(tmpl.rxq.elts, 0x55, sizeof(*elts));\n-#endif\n-\trte_free(tmpl.rxq.elts);\n-\ttmpl.rxq.elts = elts;\n-\t*rxq_ctrl = tmpl;\n-\tDEBUG(\"%p: rxq updated with %p\", (void *)rxq_ctrl, (void *)&tmpl);\n-\tassert(ret == 0);\n-\treturn 0;\n-error:\n-\trte_free(tmpl.rxq.elts);\n-\tmlx5_rxq_cleanup(&tmpl);\n-\tassert(ret > 0);\n-\treturn ret;\n-}\n-\n-/**\n- * DPDK callback to configure a RX queue.\n  *\n  * @param dev\n  *   Pointer to Ethernet device structure.\n@@ -944,13 +783,11 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \tstruct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];\n \tstruct mlx5_rxq_ctrl *rxq_ctrl =\n \t\tcontainer_of(rxq, struct mlx5_rxq_ctrl, rxq);\n-\tconst uint16_t desc_n =\n-\t\tdesc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;\n-\tint ret;\n+\tint ret = 0;\n \n+\t(void)conf;\n \tif (mlx5_is_secondary())\n \t\treturn -E_RTE_SECONDARY;\n-\n \tpriv_lock(priv);\n \tif (!rte_is_power_of_2(desc)) {\n \t\tdesc = 1 << log2above(desc);\n@@ -966,54 +803,23 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,\n \t\tpriv_unlock(priv);\n \t\treturn -EOVERFLOW;\n \t}\n-\tif (rxq != NULL) {\n-\t\tDEBUG(\"%p: reusing already allocated queue index %u (%p)\",\n-\t\t      (void *)dev, idx, (void *)rxq);\n-\t\tif (dev->data->dev_started) {\n-\t\t\tpriv_unlock(priv);\n-\t\t\treturn -EEXIST;\n-\t\t}\n-\t\t(*priv->rxqs)[idx] = NULL;\n-\t\tmlx5_rxq_cleanup(rxq_ctrl);\n-\t\t/* Resize if rxq size is changed. */\n-\t\tif (rxq_ctrl->rxq.elts_n != log2above(desc)) {\n-\t\t\trxq_ctrl = rte_realloc(rxq_ctrl,\n-\t\t\t\t\t       sizeof(*rxq_ctrl) + desc_n *\n-\t\t\t\t\t       sizeof(struct rte_mbuf *),\n-\t\t\t\t\t       RTE_CACHE_LINE_SIZE);\n-\t\t\tif (!rxq_ctrl) {\n-\t\t\t\tERROR(\"%p: unable to reallocate queue index %u\",\n-\t\t\t\t\t(void *)dev, idx);\n-\t\t\t\tpriv_unlock(priv);\n-\t\t\t\treturn -ENOMEM;\n-\t\t\t}\n-\t\t}\n-\t} else {\n-\t\trxq_ctrl = rte_calloc_socket(\"RXQ\", 1, sizeof(*rxq_ctrl) +\n-\t\t\t\t\t     desc_n *\n-\t\t\t\t\t     sizeof(struct rte_mbuf *),\n-\t\t\t\t\t     0, socket);\n-\t\tif (rxq_ctrl == NULL) {\n-\t\t\tERROR(\"%p: unable to allocate queue index %u\",\n-\t\t\t      (void *)dev, idx);\n-\t\t\tpriv_unlock(priv);\n-\t\t\treturn -ENOMEM;\n-\t\t}\n+\tif (!mlx5_priv_rxq_releasable(priv, idx)) {\n+\t\tret = EBUSY;\n+\t\tERROR(\"%p: unable to release queue index %u\",\n+\t\t      (void *)dev, idx);\n+\t\tgoto out;\n \t}\n-\tret = rxq_ctrl_setup(dev, rxq_ctrl, desc, socket, conf, mp);\n-\tif (ret) {\n-\t\trte_free(rxq_ctrl);\n+\tmlx5_priv_rxq_release(priv, idx);\n+\trxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, mp);\n+\tif (!rxq_ctrl) {\n+\t\tERROR(\"%p: unable to allocate queue index %u\",\n+\t\t      (void *)dev, idx);\n+\t\tret = ENOMEM;\n \t\tgoto out;\n \t}\n-\trxq_ctrl->rxq.stats.idx = idx;\n \tDEBUG(\"%p: adding RX queue %p to list\",\n \t      (void *)dev, (void *)rxq_ctrl);\n \t(*priv->rxqs)[idx] = &rxq_ctrl->rxq;\n-\trxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, idx);\n-\tif (!rxq_ctrl->ibv) {\n-\t\tret = EAGAIN;\n-\t\tgoto out;\n-\t}\n out:\n \tpriv_unlock(priv);\n \treturn -ret;\n@@ -1031,7 +837,6 @@ mlx5_rx_queue_release(void *dpdk_rxq)\n \tstruct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;\n \tstruct mlx5_rxq_ctrl *rxq_ctrl;\n \tstruct priv *priv;\n-\tunsigned int i;\n \n \tif (mlx5_is_secondary())\n \t\treturn;\n@@ -1041,18 +846,10 @@ mlx5_rx_queue_release(void *dpdk_rxq)\n \trxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);\n \tpriv = rxq_ctrl->priv;\n \tpriv_lock(priv);\n-\tif (!mlx5_priv_rxq_ibv_releasable(priv, rxq_ctrl->ibv))\n+\tif (!mlx5_priv_rxq_releasable(priv, rxq_ctrl->rxq.stats.idx))\n \t\trte_panic(\"Rx queue %p is still used by a flow and cannot be\"\n \t\t\t  \" removed\\n\", (void *)rxq_ctrl);\n-\tfor (i = 0; (i != priv->rxqs_n); ++i)\n-\t\tif ((*priv->rxqs)[i] == rxq) {\n-\t\t\tDEBUG(\"%p: removing RX queue %p from list\",\n-\t\t\t      (void *)priv->dev, (void *)rxq_ctrl);\n-\t\t\t(*priv->rxqs)[i] = NULL;\n-\t\t\tbreak;\n-\t\t}\n-\tmlx5_rxq_cleanup(rxq_ctrl);\n-\trte_free(rxq_ctrl);\n+\tmlx5_priv_rxq_release(priv, rxq_ctrl->rxq.stats.idx);\n \tpriv_unlock(priv);\n }\n \n@@ -1590,3 +1387,238 @@ mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)\n \tassert(rxq_ibv);\n \treturn (rte_atomic32_read(&rxq_ibv->refcnt) == 1);\n }\n+\n+/**\n+ * Create a DPDK Rx queue.\n+ *\n+ * @param priv\n+ *   Pointer to private structure.\n+ * @param idx\n+ *   TX queue index.\n+ * @param desc\n+ *   Number of descriptors to configure in queue.\n+ * @param socket\n+ *   NUMA socket on which memory must be allocated.\n+ *\n+ * @return\n+ *   A DPDK queue object on success.\n+ */\n+struct mlx5_rxq_ctrl*\n+mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,\n+\t\t  unsigned int socket, struct rte_mempool *mp)\n+{\n+\tstruct rte_eth_dev *dev = priv->dev;\n+\tstruct mlx5_rxq_ctrl *tmpl;\n+\tconst uint16_t desc_n =\n+\t\tdesc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;\n+\tunsigned int mb_len = rte_pktmbuf_data_room_size(mp);\n+\n+\ttmpl = rte_calloc_socket(\"RXQ\", 1,\n+\t\t\t\t sizeof(*tmpl) +\n+\t\t\t\t desc_n * sizeof(struct rte_mbuf *),\n+\t\t\t\t 0, socket);\n+\tif (!tmpl)\n+\t\treturn NULL;\n+\tif (priv->dev->data->dev_conf.intr_conf.rxq)\n+\t\ttmpl->irq = 1;\n+\t/* Enable scattered packets support for this queue if necessary. */\n+\tassert(mb_len >= RTE_PKTMBUF_HEADROOM);\n+\tif (dev->data->dev_conf.rxmode.max_rx_pkt_len <=\n+\t    (mb_len - RTE_PKTMBUF_HEADROOM)) {\n+\t\ttmpl->rxq.sges_n = 0;\n+\t} else if (dev->data->dev_conf.rxmode.enable_scatter) {\n+\t\tunsigned int size =\n+\t\t\tRTE_PKTMBUF_HEADROOM +\n+\t\t\tdev->data->dev_conf.rxmode.max_rx_pkt_len;\n+\t\tunsigned int sges_n;\n+\n+\t\t/*\n+\t\t * Determine the number of SGEs needed for a full packet\n+\t\t * and round it to the next power of two.\n+\t\t */\n+\t\tsges_n = log2above((size / mb_len) + !!(size % mb_len));\n+\t\ttmpl->rxq.sges_n = sges_n;\n+\t\t/* Make sure rxq.sges_n did not overflow. */\n+\t\tsize = mb_len * (1 << tmpl->rxq.sges_n);\n+\t\tsize -= RTE_PKTMBUF_HEADROOM;\n+\t\tif (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {\n+\t\t\tERROR(\"%p: too many SGEs (%u) needed to handle\"\n+\t\t\t      \" requested maximum packet size %u\",\n+\t\t\t      (void *)dev,\n+\t\t\t      1 << sges_n,\n+\t\t\t      dev->data->dev_conf.rxmode.max_rx_pkt_len);\n+\t\t\tgoto error;\n+\t\t}\n+\t} else {\n+\t\tWARN(\"%p: the requested maximum Rx packet size (%u) is\"\n+\t\t     \" larger than a single mbuf (%u) and scattered\"\n+\t\t     \" mode has not been requested\",\n+\t\t     (void *)dev,\n+\t\t     dev->data->dev_conf.rxmode.max_rx_pkt_len,\n+\t\t     mb_len - RTE_PKTMBUF_HEADROOM);\n+\t}\n+\tDEBUG(\"%p: maximum number of segments per packet: %u\",\n+\t      (void *)dev, 1 << tmpl->rxq.sges_n);\n+\tif (desc % (1 << tmpl->rxq.sges_n)) {\n+\t\tERROR(\"%p: number of RX queue descriptors (%u) is not a\"\n+\t\t      \" multiple of SGEs per packet (%u)\",\n+\t\t      (void *)dev,\n+\t\t      desc,\n+\t\t      1 << tmpl->rxq.sges_n);\n+\t\tgoto error;\n+\t}\n+\t/* Toggle RX checksum offload if hardware supports it. */\n+\tif (priv->hw_csum)\n+\t\ttmpl->rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;\n+\tif (priv->hw_csum_l2tun)\n+\t\ttmpl->rxq.csum_l2tun =\n+\t\t\t!!dev->data->dev_conf.rxmode.hw_ip_checksum;\n+\t/* Configure VLAN stripping. */\n+\ttmpl->rxq.vlan_strip = (priv->hw_vlan_strip &&\n+\t\t\t       !!dev->data->dev_conf.rxmode.hw_vlan_strip);\n+\t/* By default, FCS (CRC) is stripped by hardware. */\n+\tif (dev->data->dev_conf.rxmode.hw_strip_crc) {\n+\t\ttmpl->rxq.crc_present = 0;\n+\t} else if (priv->hw_fcs_strip) {\n+\t\ttmpl->rxq.crc_present = 1;\n+\t} else {\n+\t\tWARN(\"%p: CRC stripping has been disabled but will still\"\n+\t\t     \" be performed by hardware, make sure MLNX_OFED and\"\n+\t\t     \" firmware are up to date\",\n+\t\t     (void *)dev);\n+\t\ttmpl->rxq.crc_present = 0;\n+\t}\n+\tDEBUG(\"%p: CRC stripping is %s, %u bytes will be subtracted from\"\n+\t      \" incoming frames to hide it\",\n+\t      (void *)dev,\n+\t      tmpl->rxq.crc_present ? \"disabled\" : \"enabled\",\n+\t      tmpl->rxq.crc_present << 2);\n+\t/* Save port ID. */\n+\ttmpl->rxq.rss_hash = priv->rxqs_n > 1;\n+\ttmpl->rxq.port_id = dev->data->port_id;\n+\ttmpl->priv = priv;\n+\ttmpl->rxq.mp = mp;\n+\ttmpl->rxq.stats.idx = idx;\n+\ttmpl->rxq.elts_n = log2above(desc);\n+\ttmpl->rxq.elts =\n+\t\t(struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);\n+\trte_atomic32_inc(&tmpl->refcnt);\n+\tDEBUG(\"%p: Rx queue %p: refcnt %d\", (void *)priv,\n+\t      (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));\n+\tLIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);\n+\treturn tmpl;\n+error:\n+\trte_free(tmpl);\n+\treturn NULL;\n+}\n+\n+/**\n+ * Get a Rx queue.\n+ *\n+ * @param priv\n+ *   Pointer to private structure.\n+ * @param idx\n+ *   TX queue index.\n+ *\n+ * @return\n+ *   A pointer to the queue if it exists.\n+ */\n+struct mlx5_rxq_ctrl*\n+mlx5_priv_rxq_get(struct priv *priv, uint16_t idx)\n+{\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl = NULL;\n+\n+\tif ((*priv->rxqs)[idx]) {\n+\t\trxq_ctrl = container_of((*priv->rxqs)[idx],\n+\t\t\t\t\tstruct mlx5_rxq_ctrl,\n+\t\t\t\t\trxq);\n+\n+\t\tmlx5_priv_rxq_ibv_get(priv, idx);\n+\t\trte_atomic32_inc(&rxq_ctrl->refcnt);\n+\t\tDEBUG(\"%p: Rx queue %p: refcnt %d\", (void *)priv,\n+\t\t      (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));\n+\t}\n+\treturn rxq_ctrl;\n+}\n+\n+/**\n+ * Release a Rx queue.\n+ *\n+ * @param priv\n+ *   Pointer to private structure.\n+ * @param idx\n+ *   TX queue index.\n+ *\n+ * @return\n+ *   0 on success, errno value on failure.\n+ */\n+int\n+mlx5_priv_rxq_release(struct priv *priv, uint16_t idx)\n+{\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n+\n+\tif (!(*priv->rxqs)[idx])\n+\t\treturn 0;\n+\trxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);\n+\tassert(rxq_ctrl->priv);\n+\tif (rxq_ctrl->ibv) {\n+\t\tint ret;\n+\n+\t\tret = mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv);\n+\t\tif (!ret)\n+\t\t\trxq_ctrl->ibv = NULL;\n+\t}\n+\tDEBUG(\"%p: Rx queue %p: refcnt %d\", (void *)priv,\n+\t      (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));\n+\tif (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {\n+\t\tLIST_REMOVE(rxq_ctrl, next);\n+\t\trte_free(rxq_ctrl);\n+\t\t(*priv->rxqs)[idx] = NULL;\n+\t\treturn 0;\n+\t}\n+\treturn EBUSY;\n+}\n+\n+/**\n+ * Verify if the queue can be released.\n+ *\n+ * @param priv\n+ *   Pointer to private structure.\n+ * @param idx\n+ *   TX queue index.\n+ *\n+ * @return\n+ *   1 if the queue can be released.\n+ */\n+int\n+mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx)\n+{\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n+\n+\tif (!(*priv->rxqs)[idx])\n+\t\treturn -1;\n+\trxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);\n+\treturn (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);\n+}\n+\n+/**\n+ * Verify the Rx Queue list is empty\n+ *\n+ * @param priv\n+ *  Pointer to private structure.\n+ *\n+ * @return the number of object not released.\n+ */\n+int\n+mlx5_priv_rxq_verify(struct priv *priv)\n+{\n+\tstruct mlx5_rxq_ctrl *rxq_ctrl;\n+\tint ret = 0;\n+\n+\tLIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {\n+\t\tDEBUG(\"%p: Rx Queue %p still referenced\", (void *)priv,\n+\t\t      (void *)rxq_ctrl);\n+\t\t++ret;\n+\t}\n+\treturn ret;\n+}\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h\nindex f3a2f41..57935cb 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.h\n+++ b/drivers/net/mlx5/mlx5_rxtx.h\n@@ -147,6 +147,8 @@ struct mlx5_rxq_ibv {\n \n /* RX queue control descriptor. */\n struct mlx5_rxq_ctrl {\n+\tLIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */\n+\trte_atomic32_t refcnt; /* Reference counter. */\n \tstruct priv *priv; /* Back pointer to private data. */\n \tstruct mlx5_rxq_ibv *ibv; /* Verbs elements. */\n \tstruct mlx5_rxq_data rxq; /* Data path structure. */\n@@ -335,6 +337,14 @@ struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_get(struct priv *, uint16_t);\n int mlx5_priv_rxq_ibv_release(struct priv *, struct mlx5_rxq_ibv *);\n int mlx5_priv_rxq_ibv_releasable(struct priv *, struct mlx5_rxq_ibv *);\n int mlx5_priv_rxq_ibv_verify(struct priv *);\n+struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *, uint16_t,\n+\t\t\t\t\tuint16_t, unsigned int,\n+\t\t\t\t\tstruct rte_mempool *);\n+struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *, uint16_t);\n+int mlx5_priv_rxq_release(struct priv *, uint16_t);\n+int mlx5_priv_rxq_releasable(struct priv *, uint16_t);\n+int mlx5_priv_rxq_verify(struct priv *);\n+int rxq_alloc_elts(struct mlx5_rxq_ctrl *);\n \n /* mlx5_txq.c */\n \ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex 7a12768..a311499 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -79,6 +79,41 @@ priv_txq_start(struct priv *priv)\n \treturn -ret;\n }\n \n+static void\n+priv_rxq_stop(struct priv *priv)\n+{\n+\tunsigned int i;\n+\n+\tfor (i = 0; i != priv->rxqs_n; ++i)\n+\t\tmlx5_priv_rxq_release(priv, i);\n+}\n+\n+static int\n+priv_rxq_start(struct priv *priv)\n+{\n+\tunsigned int i;\n+\tint ret = 0;\n+\n+\tfor (i = 0; i != priv->rxqs_n; ++i) {\n+\t\tstruct mlx5_rxq_ctrl *rxq_ctrl = mlx5_priv_rxq_get(priv, i);\n+\n+\t\tif (!rxq_ctrl)\n+\t\t\tcontinue;\n+\t\tret = rxq_alloc_elts(rxq_ctrl);\n+\t\tif (ret)\n+\t\t\tgoto error;\n+\t\trxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, i);\n+\t\tif (!rxq_ctrl->ibv) {\n+\t\t\tret = ENOMEM;\n+\t\t\tgoto error;\n+\t\t}\n+\t}\n+\treturn -ret;\n+error:\n+\tpriv_rxq_stop(priv);\n+\treturn -ret;\n+}\n+\n /**\n  * DPDK callback to start the device.\n  *\n@@ -101,8 +136,6 @@ mlx5_dev_start(struct rte_eth_dev *dev)\n \t\treturn -E_RTE_SECONDARY;\n \n \tpriv_lock(priv);\n-\t/* Update Rx/Tx callback. */\n-\tpriv_dev_select_rx_function(priv, dev);\n \tDEBUG(\"%p: allocating and configuring hash RX queues\", (void *)dev);\n \trte_mempool_walk(mlx5_mp2mr_iter, priv);\n \terr = priv_txq_start(priv);\n@@ -113,6 +146,14 @@ mlx5_dev_start(struct rte_eth_dev *dev)\n \t}\n \t/* Update send callback. */\n \tpriv_dev_select_tx_function(priv, dev);\n+\terr = priv_rxq_start(priv);\n+\tif (err) {\n+\t\tERROR(\"%p: RXQ allocation failed: %s\",\n+\t\t      (void *)dev, strerror(err));\n+\t\tgoto error;\n+\t}\n+\t/* Update receive callback. */\n+\tpriv_dev_select_rx_function(priv, dev);\n \terr = priv_create_hash_rxqs(priv);\n \tif (!err)\n \t\terr = priv_rehash_flows(priv);\n@@ -147,6 +188,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)\n \tpriv_mac_addrs_disable(priv);\n \tpriv_destroy_hash_rxqs(priv);\n \tpriv_flow_stop(priv);\n+\tpriv_rxq_stop(priv);\n \tpriv_txq_stop(priv);\n \tpriv_unlock(priv);\n \treturn -err;\n@@ -183,6 +225,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)\n \tpriv_flow_stop(priv);\n \tpriv_rx_intr_vec_disable(priv);\n \tpriv_txq_stop(priv);\n+\tpriv_rxq_stop(priv);\n \tLIST_FOREACH(mr, &priv->mr, next) {\n \t\tpriv_mr_release(priv, mr);\n \t}\n",
    "prefixes": [
        "dpdk-dev",
        "v2",
        "13/30"
    ]
}