get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/53893/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 53893,
    "url": "http://patches.dpdk.org/api/patches/53893/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20190530090707.36290-3-xiaolong.ye@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190530090707.36290-3-xiaolong.ye@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190530090707.36290-3-xiaolong.ye@intel.com",
    "date": "2019-05-30T09:07:06",
    "name": "[v2,2/3] net/af_xdp: add multi-queue support",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "417640fd1f1e74fdc786aab25a9cec35e42dcab5",
    "submitter": {
        "id": 1120,
        "url": "http://patches.dpdk.org/api/people/1120/?format=api",
        "name": "Xiaolong Ye",
        "email": "xiaolong.ye@intel.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20190530090707.36290-3-xiaolong.ye@intel.com/mbox/",
    "series": [
        {
            "id": 4822,
            "url": "http://patches.dpdk.org/api/series/4822/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=4822",
            "date": "2019-05-30T09:07:04",
            "name": "add more features for AF_XDP pmd",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/4822/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/53893/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/53893/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 2B57C1B959;\n\tThu, 30 May 2019 11:16:03 +0200 (CEST)",
            "from mga05.intel.com (mga05.intel.com [192.55.52.43])\n\tby dpdk.org (Postfix) with ESMTP id AFDEF1B951\n\tfor <dev@dpdk.org>; Thu, 30 May 2019 11:15:57 +0200 (CEST)",
            "from fmsmga004.fm.intel.com ([10.253.24.48])\n\tby fmsmga105.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t30 May 2019 02:15:57 -0700",
            "from yexl-server.sh.intel.com (HELO\n\tNPG-DPDK-XDP-yexl-server.sh.intel.com) ([10.67.110.206])\n\tby fmsmga004.fm.intel.com with ESMTP; 30 May 2019 02:15:56 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "Xiaolong Ye <xiaolong.ye@intel.com>",
        "To": "Xiaolong Ye <xiaolong.ye@intel.com>, Qi Zhang <qi.z.zhang@intel.com>,\n\tJohn McNamara <john.mcnamara@intel.com>,\n\tMarko Kovacevic <marko.kovacevic@intel.com>",
        "Cc": "Karlsson Magnus <magnus.karlsson@intel.com>,\n\tTopel Bjorn <bjorn.topel@intel.com>, dev@dpdk.org",
        "Date": "Thu, 30 May 2019 17:07:06 +0800",
        "Message-Id": "<20190530090707.36290-3-xiaolong.ye@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20190530090707.36290-1-xiaolong.ye@intel.com>",
        "References": "<20190515083842.15116-1-xiaolong.ye@intel.com>\n\t<20190530090707.36290-1-xiaolong.ye@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v2 2/3] net/af_xdp: add multi-queue support",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch adds two parameters `start_queue` and `queue_count` to\nspecify the range of netdev queues used by AF_XDP pmd.\n\nSigned-off-by: Xiaolong Ye <xiaolong.ye@intel.com>\n---\n doc/guides/nics/af_xdp.rst          |   3 +-\n drivers/net/af_xdp/rte_eth_af_xdp.c | 194 ++++++++++++++++++++--------\n 2 files changed, 141 insertions(+), 56 deletions(-)",
    "diff": "diff --git a/doc/guides/nics/af_xdp.rst b/doc/guides/nics/af_xdp.rst\nindex 0bd4239fe..18defcda3 100644\n--- a/doc/guides/nics/af_xdp.rst\n+++ b/doc/guides/nics/af_xdp.rst\n@@ -27,7 +27,8 @@ Options\n The following options can be provided to set up an af_xdp port in DPDK.\n \n *   ``iface`` - name of the Kernel interface to attach to (required);\n-*   ``queue`` - netdev queue id (optional, default 0);\n+*   ``start_queue`` - starting netdev queue id (optional, default 0);\n+*   ``queue_count`` - total netdev queue number (optional, default 1);\n *   ``pmd_zero_copy`` - enable zero copy or not (optional, default 0);\n \n Prerequisites\ndiff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c\nindex 014cd5691..f56aabcae 100644\n--- a/drivers/net/af_xdp/rte_eth_af_xdp.c\n+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c\n@@ -12,6 +12,8 @@\n #include <linux/if_ether.h>\n #include <linux/if_xdp.h>\n #include <linux/if_link.h>\n+#include <linux/ethtool.h>\n+#include <linux/sockios.h>\n #include \"af_xdp_deps.h\"\n #include <bpf/xsk.h>\n \n@@ -57,12 +59,12 @@ static int af_xdp_logtype;\n #define ETH_AF_XDP_NUM_BUFFERS\t\t4096\n #define ETH_AF_XDP_DATA_HEADROOM\t0\n #define ETH_AF_XDP_DFLT_NUM_DESCS\tXSK_RING_CONS__DEFAULT_NUM_DESCS\n-#define ETH_AF_XDP_DFLT_QUEUE_IDX\t0\n+#define ETH_AF_XDP_DFLT_START_QUEUE_IDX\t0\n+#define ETH_AF_XDP_DFLT_QUEUE_COUNT\t1\n \n #define ETH_AF_XDP_RX_BATCH_SIZE\t32\n #define ETH_AF_XDP_TX_BATCH_SIZE\t32\n \n-#define ETH_AF_XDP_MAX_QUEUE_PAIRS     16\n \n struct xsk_umem_info {\n \tstruct xsk_ring_prod fq;\n@@ -88,7 +90,7 @@ struct pkt_rx_queue {\n \tstruct rx_stats stats;\n \n \tstruct pkt_tx_queue *pair;\n-\tuint16_t queue_idx;\n+\tint xsk_queue_idx;\n };\n \n struct tx_stats {\n@@ -103,28 +105,34 @@ struct pkt_tx_queue {\n \tstruct tx_stats stats;\n \n \tstruct pkt_rx_queue *pair;\n-\tuint16_t queue_idx;\n+\tint xsk_queue_idx;\n };\n \n struct pmd_internals {\n \tint if_index;\n \tchar if_name[IFNAMSIZ];\n-\tuint16_t queue_idx;\n+\tint start_queue_idx;\n+\tint queue_cnt;\n+\tint max_queue_cnt;\n+\tint combined_queue_cnt;\n+\n \tint pmd_zc;\n \tstruct ether_addr eth_addr;\n \tstruct rte_mempool *mb_pool_share;\n \n-\tstruct pkt_rx_queue rx_queues[ETH_AF_XDP_MAX_QUEUE_PAIRS];\n-\tstruct pkt_tx_queue tx_queues[ETH_AF_XDP_MAX_QUEUE_PAIRS];\n+\tstruct pkt_rx_queue *rx_queues;\n+\tstruct pkt_tx_queue *tx_queues;\n };\n \n #define ETH_AF_XDP_IFACE_ARG\t\t\t\"iface\"\n-#define ETH_AF_XDP_QUEUE_IDX_ARG\t\t\"queue\"\n+#define ETH_AF_XDP_START_QUEUE_ARG\t\t\"start_queue\"\n+#define ETH_AF_XDP_QUEUE_COUNT_ARG\t\t\"queue_count\"\n #define ETH_AF_XDP_PMD_ZC_ARG\t\t\t\"pmd_zero_copy\"\n \n static const char * const valid_arguments[] = {\n \tETH_AF_XDP_IFACE_ARG,\n-\tETH_AF_XDP_QUEUE_IDX_ARG,\n+\tETH_AF_XDP_START_QUEUE_ARG,\n+\tETH_AF_XDP_QUEUE_COUNT_ARG,\n \tETH_AF_XDP_PMD_ZC_ARG,\n \tNULL\n };\n@@ -394,8 +402,8 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n \tdev_info->if_index = internals->if_index;\n \tdev_info->max_mac_addrs = 1;\n \tdev_info->max_rx_pktlen = ETH_FRAME_LEN;\n-\tdev_info->max_rx_queues = 1;\n-\tdev_info->max_tx_queues = 1;\n+\tdev_info->max_rx_queues = internals->queue_cnt;\n+\tdev_info->max_tx_queues = internals->queue_cnt;\n \n \tdev_info->min_mtu = ETHER_MIN_MTU;\n \tdev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - ETH_AF_XDP_DATA_HEADROOM;\n@@ -412,21 +420,23 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)\n \tstruct pmd_internals *internals = dev->data->dev_private;\n \tstruct xdp_statistics xdp_stats;\n \tstruct pkt_rx_queue *rxq;\n+\tstruct pkt_tx_queue *txq;\n \tsocklen_t optlen;\n \tint i, ret;\n \n \tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n \t\toptlen = sizeof(struct xdp_statistics);\n \t\trxq = &internals->rx_queues[i];\n-\t\tstats->q_ipackets[i] = internals->rx_queues[i].stats.rx_pkts;\n-\t\tstats->q_ibytes[i] = internals->rx_queues[i].stats.rx_bytes;\n+\t\ttxq = rxq->pair;\n+\t\tstats->q_ipackets[i] = rxq->stats.rx_pkts;\n+\t\tstats->q_ibytes[i] = rxq->stats.rx_bytes;\n \n-\t\tstats->q_opackets[i] = internals->tx_queues[i].stats.tx_pkts;\n-\t\tstats->q_obytes[i] = internals->tx_queues[i].stats.tx_bytes;\n+\t\tstats->q_opackets[i] = txq->stats.tx_pkts;\n+\t\tstats->q_obytes[i] = txq->stats.tx_bytes;\n \n \t\tstats->ipackets += stats->q_ipackets[i];\n \t\tstats->ibytes += stats->q_ibytes[i];\n-\t\tstats->imissed += internals->rx_queues[i].stats.rx_dropped;\n+\t\tstats->imissed += rxq->stats.rx_dropped;\n \t\tret = getsockopt(xsk_socket__fd(rxq->xsk), SOL_XDP,\n \t\t\t\tXDP_STATISTICS, &xdp_stats, &optlen);\n \t\tif (ret != 0) {\n@@ -436,7 +446,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)\n \t\tstats->imissed += xdp_stats.rx_dropped;\n \n \t\tstats->opackets += stats->q_opackets[i];\n-\t\tstats->oerrors += internals->tx_queues[i].stats.err_pkts;\n+\t\tstats->oerrors += txq->stats.err_pkts;\n \t\tstats->obytes += stats->q_obytes[i];\n \t}\n \n@@ -449,7 +459,7 @@ eth_stats_reset(struct rte_eth_dev *dev)\n \tstruct pmd_internals *internals = dev->data->dev_private;\n \tint i;\n \n-\tfor (i = 0; i < ETH_AF_XDP_MAX_QUEUE_PAIRS; i++) {\n+\tfor (i = 0; i < internals->queue_cnt; i++) {\n \t\tmemset(&internals->rx_queues[i].stats, 0,\n \t\t\t\t\tsizeof(struct rx_stats));\n \t\tmemset(&internals->tx_queues[i].stats, 0,\n@@ -494,13 +504,17 @@ eth_dev_close(struct rte_eth_dev *dev)\n \tAF_XDP_LOG(INFO, \"Closing AF_XDP ethdev on numa socket %u\\n\",\n \t\trte_socket_id());\n \n-\tfor (i = 0; i < ETH_AF_XDP_MAX_QUEUE_PAIRS; i++) {\n+\tfor (i = 0; i < internals->queue_cnt; i++) {\n \t\trxq = &internals->rx_queues[i];\n \t\tif (rxq->umem == NULL)\n \t\t\tbreak;\n \t\txsk_socket__delete(rxq->xsk);\n \t\t(void)xsk_umem__delete(rxq->umem->umem);\n \t\txdp_umem_destroy(rxq->umem);\n+\n+\t\t/* free pkt_tx_queue */\n+\t\trte_free(rxq->pair);\n+\t\trte_free(rxq);\n \t}\n \n \t/*\n@@ -525,7 +539,8 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused,\n }\n \n static struct\n-xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals)\n+xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,\n+\t\t\t\t  struct pkt_rx_queue *rxq)\n {\n \tstruct xsk_umem_info *umem;\n \tconst struct rte_memzone *mz;\n@@ -546,7 +561,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals)\n \t}\n \n \tsnprintf(ring_name, sizeof(ring_name), \"af_xdp_ring_%s_%u\",\n-\t\t       internals->if_name, internals->queue_idx);\n+\t\t       internals->if_name, rxq->xsk_queue_idx);\n \tumem->buf_ring = rte_ring_create(ring_name,\n \t\t\t\t\t ETH_AF_XDP_NUM_BUFFERS,\n \t\t\t\t\t rte_socket_id(),\n@@ -562,7 +577,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals)\n \t\t\t\t\t  ETH_AF_XDP_DATA_HEADROOM));\n \n \tsnprintf(mz_name, sizeof(mz_name), \"af_xdp_umem_%s_%u\",\n-\t\t       internals->if_name, internals->queue_idx);\n+\t\t       internals->if_name, rxq->xsk_queue_idx);\n \tmz = rte_memzone_reserve_aligned(mz_name,\n \t\t\tETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,\n \t\t\trte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,\n@@ -599,7 +614,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,\n \tint ret = 0;\n \tint reserve_size;\n \n-\trxq->umem = xdp_umem_configure(internals);\n+\trxq->umem = xdp_umem_configure(internals, rxq);\n \tif (rxq->umem == NULL)\n \t\treturn -ENOMEM;\n \n@@ -609,7 +624,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,\n \tcfg.xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;\n \tcfg.bind_flags = 0;\n \tret = xsk_socket__create(&rxq->xsk, internals->if_name,\n-\t\t\tinternals->queue_idx, rxq->umem->umem, &rxq->rx,\n+\t\t\trxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,\n \t\t\t&txq->tx, &cfg);\n \tif (ret) {\n \t\tAF_XDP_LOG(ERR, \"Failed to create xsk socket.\\n\");\n@@ -632,20 +647,6 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,\n \treturn ret;\n }\n \n-static void\n-queue_reset(struct pmd_internals *internals, uint16_t queue_idx)\n-{\n-\tstruct pkt_rx_queue *rxq = &internals->rx_queues[queue_idx];\n-\tstruct pkt_tx_queue *txq = rxq->pair;\n-\n-\tmemset(rxq, 0, sizeof(*rxq));\n-\tmemset(txq, 0, sizeof(*txq));\n-\trxq->pair = txq;\n-\ttxq->pair = rxq;\n-\trxq->queue_idx = queue_idx;\n-\ttxq->queue_idx = queue_idx;\n-}\n-\n static int\n eth_rx_queue_setup(struct rte_eth_dev *dev,\n \t\t   uint16_t rx_queue_id,\n@@ -660,8 +661,9 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,\n \tint ret;\n \n \trxq = &internals->rx_queues[rx_queue_id];\n-\tqueue_reset(internals, rx_queue_id);\n \n+\tAF_XDP_LOG(INFO, \"Set up rx queue, rx queue id: %d, xsk queue id: %d\\n\",\n+\t\t   rx_queue_id, rxq->xsk_queue_idx);\n \t/* Now get the space available for data in the mbuf */\n \tbuf_size = rte_pktmbuf_data_room_size(mb_pool) -\n \t\tRTE_PKTMBUF_HEADROOM;\n@@ -688,7 +690,6 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,\n \treturn 0;\n \n err:\n-\tqueue_reset(internals, rx_queue_id);\n \treturn ret;\n }\n \n@@ -818,8 +819,45 @@ parse_name_arg(const char *key __rte_unused,\n }\n \n static int\n-parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *queue_idx,\n-\t\t\t\tint *pmd_zc)\n+xdp_get_channels_info(const char *if_name, int *max_queues,\n+\t\t\t\tint *combined_queues)\n+{\n+\tstruct ethtool_channels channels;\n+\tstruct ifreq ifr;\n+\tint fd, ret;\n+\n+\tfd = socket(AF_INET, SOCK_DGRAM, 0);\n+\tif (fd < 0)\n+\t\treturn -1;\n+\n+\tchannels.cmd = ETHTOOL_GCHANNELS;\n+\tifr.ifr_data = (void *)&channels;\n+\tstrncpy(ifr.ifr_name, if_name, IFNAMSIZ);\n+\tret = ioctl(fd, SIOCETHTOOL, &ifr);\n+\tif (ret && errno != EOPNOTSUPP) {\n+\t\tret = -errno;\n+\t\tgoto out;\n+\t}\n+\n+\tif (channels.max_combined == 0 || errno == EOPNOTSUPP) {\n+\t\t/* If the device says it has no channels, then all traffic\n+\t\t * is sent to a single stream, so max queues = 1.\n+\t\t */\n+\t\t*max_queues = 1;\n+\t\t*combined_queues = 1;\n+\t} else {\n+\t\t*max_queues = channels.max_combined;\n+\t\t*combined_queues = channels.combined_count;\n+\t}\n+\n+ out:\n+\tclose(fd);\n+\treturn ret;\n+}\n+\n+static int\n+parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,\n+\t\t\tint *queue_cnt, int *pmd_zc)\n {\n \tint ret;\n \n@@ -828,11 +866,18 @@ parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *queue_idx,\n \tif (ret < 0)\n \t\tgoto free_kvlist;\n \n-\tret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_IDX_ARG,\n-\t\t\t\t &parse_integer_arg, queue_idx);\n+\tret = rte_kvargs_process(kvlist, ETH_AF_XDP_START_QUEUE_ARG,\n+\t\t\t\t &parse_integer_arg, start_queue);\n \tif (ret < 0)\n \t\tgoto free_kvlist;\n \n+\tret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_COUNT_ARG,\n+\t\t\t\t &parse_integer_arg, queue_cnt);\n+\tif (ret < 0 || *queue_cnt <= 0) {\n+\t\tret = -EINVAL;\n+\t\tgoto free_kvlist;\n+\t}\n+\n \tret = rte_kvargs_process(kvlist, ETH_AF_XDP_PMD_ZC_ARG,\n \t\t\t\t &parse_integer_arg, pmd_zc);\n \tif (ret < 0)\n@@ -874,8 +919,8 @@ get_iface_info(const char *if_name,\n }\n \n static struct rte_eth_dev *\n-init_internals(struct rte_vdev_device *dev, const char *if_name, int queue_idx,\n-\t\t\t\t\tint pmd_zc)\n+init_internals(struct rte_vdev_device *dev, const char *if_name,\n+\t\t\tint start_queue_idx, int queue_cnt, int pmd_zc)\n {\n \tconst char *name = rte_vdev_device_name(dev);\n \tconst unsigned int numa_node = dev->device.numa_node;\n@@ -888,23 +933,54 @@ init_internals(struct rte_vdev_device *dev, const char *if_name, int queue_idx,\n \tif (internals == NULL)\n \t\treturn NULL;\n \n-\tinternals->queue_idx = queue_idx;\n+\tinternals->start_queue_idx = start_queue_idx;\n+\tinternals->queue_cnt = queue_cnt;\n \tinternals->pmd_zc = pmd_zc;\n \tstrlcpy(internals->if_name, if_name, IFNAMSIZ);\n \n-\tfor (i = 0; i < ETH_AF_XDP_MAX_QUEUE_PAIRS; i++) {\n+\tif (xdp_get_channels_info(if_name, &internals->max_queue_cnt,\n+\t\t\t\t  &internals->combined_queue_cnt)) {\n+\t\tAF_XDP_LOG(ERR, \"Failed to get channel info of interface: %s\\n\",\n+\t\t\t\tif_name);\n+\t\tgoto err_free_internals;\n+\t}\n+\n+\tif (queue_cnt > internals->combined_queue_cnt) {\n+\t\tAF_XDP_LOG(ERR, \"Specified queue count %d is larger than combined queue count %d.\\n\",\n+\t\t\t\tqueue_cnt, internals->combined_queue_cnt);\n+\t\tgoto err_free_internals;\n+\t}\n+\n+\tinternals->rx_queues = rte_zmalloc_socket(NULL,\n+\t\t\t\t\tsizeof(struct pkt_rx_queue) * queue_cnt,\n+\t\t\t\t\t0, numa_node);\n+\tif (internals->rx_queues == NULL) {\n+\t\tAF_XDP_LOG(ERR, \"Failed to allocate memory for rx queues.\\n\");\n+\t\tgoto err_free_internals;\n+\t}\n+\n+\tinternals->tx_queues = rte_zmalloc_socket(NULL,\n+\t\t\t\t\tsizeof(struct pkt_tx_queue) * queue_cnt,\n+\t\t\t\t\t0, numa_node);\n+\tif (internals->tx_queues == NULL) {\n+\t\tAF_XDP_LOG(ERR, \"Failed to allocate memory for tx queues.\\n\");\n+\t\tgoto err_free_rx;\n+\t}\n+\tfor (i = 0; i < queue_cnt; i++) {\n \t\tinternals->tx_queues[i].pair = &internals->rx_queues[i];\n \t\tinternals->rx_queues[i].pair = &internals->tx_queues[i];\n+\t\tinternals->rx_queues[i].xsk_queue_idx = start_queue_idx + i;\n+\t\tinternals->tx_queues[i].xsk_queue_idx = start_queue_idx + i;\n \t}\n \n \tret = get_iface_info(if_name, &internals->eth_addr,\n \t\t\t     &internals->if_index);\n \tif (ret)\n-\t\tgoto err;\n+\t\tgoto err_free_tx;\n \n \teth_dev = rte_eth_vdev_allocate(dev, 0);\n \tif (eth_dev == NULL)\n-\t\tgoto err;\n+\t\tgoto err_free_tx;\n \n \teth_dev->data->dev_private = internals;\n \teth_dev->data->dev_link = pmd_link;\n@@ -920,7 +996,11 @@ init_internals(struct rte_vdev_device *dev, const char *if_name, int queue_idx,\n \n \treturn eth_dev;\n \n-err:\n+err_free_tx:\n+\trte_free(internals->tx_queues);\n+err_free_rx:\n+\trte_free(internals->rx_queues);\n+err_free_internals:\n \trte_free(internals);\n \treturn NULL;\n }\n@@ -930,7 +1010,8 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)\n {\n \tstruct rte_kvargs *kvlist;\n \tchar if_name[IFNAMSIZ] = {'\\0'};\n-\tint xsk_queue_idx = ETH_AF_XDP_DFLT_QUEUE_IDX;\n+\tint xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;\n+\tint xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;\n \tstruct rte_eth_dev *eth_dev = NULL;\n \tconst char *name;\n \tint pmd_zc = 0;\n@@ -960,7 +1041,8 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)\n \tif (dev->device.numa_node == SOCKET_ID_ANY)\n \t\tdev->device.numa_node = rte_socket_id();\n \n-\tif (parse_parameters(kvlist, if_name, &xsk_queue_idx, &pmd_zc) < 0) {\n+\tif (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,\n+\t\t\t     &xsk_queue_cnt, &pmd_zc) < 0) {\n \t\tAF_XDP_LOG(ERR, \"Invalid kvargs value\\n\");\n \t\treturn -EINVAL;\n \t}\n@@ -970,7 +1052,8 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)\n \t\treturn -EINVAL;\n \t}\n \n-\teth_dev = init_internals(dev, if_name, xsk_queue_idx, pmd_zc);\n+\teth_dev = init_internals(dev, if_name, xsk_start_queue_idx,\n+\t\t\t\t\txsk_queue_cnt, pmd_zc);\n \tif (eth_dev == NULL) {\n \t\tAF_XDP_LOG(ERR, \"Failed to init internals\\n\");\n \t\treturn -1;\n@@ -1012,7 +1095,8 @@ static struct rte_vdev_driver pmd_af_xdp_drv = {\n RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv);\n RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,\n \t\t\t      \"iface=<string> \"\n-\t\t\t      \"queue=<int> \"\n+\t\t\t      \"start_queue=<int> \"\n+\t\t\t      \"queue_count=<int> \"\n \t\t\t      \"pmd_zero_copy=<0|1>\");\n \n RTE_INIT(af_xdp_init_log)\n",
    "prefixes": [
        "v2",
        "2/3"
    ]
}