get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/13744/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 13744,
    "url": "http://patches.dpdk.org/api/patches/13744/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1465948557-14753-1-git-send-email-neescoba@cisco.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1465948557-14753-1-git-send-email-neescoba@cisco.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1465948557-14753-1-git-send-email-neescoba@cisco.com",
    "date": "2016-06-14T23:55:57",
    "name": "[dpdk-dev] enic: scattered Rx",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "fbc820bf93e5f946500ae16c12c96fe4709ff41a",
    "submitter": {
        "id": 453,
        "url": "http://patches.dpdk.org/api/people/453/?format=api",
        "name": "Nelson Escobar",
        "email": "neescoba@cisco.com"
    },
    "delegate": {
        "id": 10,
        "url": "http://patches.dpdk.org/api/users/10/?format=api",
        "username": "bruce",
        "first_name": "Bruce",
        "last_name": "Richardson",
        "email": "bruce.richardson@intel.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1465948557-14753-1-git-send-email-neescoba@cisco.com/mbox/",
    "series": [],
    "comments": "http://patches.dpdk.org/api/patches/13744/comments/",
    "check": "pending",
    "checks": "http://patches.dpdk.org/api/patches/13744/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [IPv6:::1])\n\tby dpdk.org (Postfix) with ESMTP id F18F9ADAA;\n\tWed, 15 Jun 2016 01:56:07 +0200 (CEST)",
            "from rcdn-iport-1.cisco.com (rcdn-iport-1.cisco.com [173.37.86.72])\n\tby dpdk.org (Postfix) with ESMTP id B82B6ADAA\n\tfor <dev@dpdk.org>; Wed, 15 Jun 2016 01:56:05 +0200 (CEST)",
            "from alln-core-7.cisco.com ([173.36.13.140])\n\tby rcdn-iport-1.cisco.com with ESMTP/TLS/DHE-RSA-AES256-SHA;\n\t14 Jun 2016 23:56:05 +0000",
            "from cisco.com (savbu-usnic-a.cisco.com [10.193.184.48])\n\tby alln-core-7.cisco.com (8.14.5/8.14.5) with ESMTP id u5ENu467008533;\n\tTue, 14 Jun 2016 23:56:04 GMT",
            "by cisco.com (Postfix, from userid 412739)\n\tid AF7083FAAE36; Tue, 14 Jun 2016 16:56:04 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n\td=cisco.com; i=@cisco.com; l=27227; q=dns/txt;\n\ts=iport; t=1465948565; x=1467158165;\n\th=from:to:cc:subject:date:message-id;\n\tbh=VV247lw2HkvS1Zm29CWso1vc2hUroeHQD56ty+QHhLU=;\n\tb=Th/X2iJYiyvUMKJEIgBYJmpfNKH7vjxvn+C0Oml2rSQTpcmS73wlZSEL\n\tLX5SSjdZZ1sTQT5DkeDdNRv4RIYkEUAEIhh26xAPdPNF+DuQocKqQa1tQ\n\tPm2Dw8Ej8hgK98koFyek+8cZsXER+mkvQwUv50YkJxnf0QWdUcdhTKrst U=;",
        "X-IronPort-AV": "E=Sophos;i=\"5.26,473,1459814400\"; d=\"scan'208\";a=\"118810485\"",
        "From": "Nelson Escobar <neescoba@cisco.com>",
        "To": "dev@dpdk.org",
        "Cc": "bruce.richardson@intel.com, johndale@cisco.com,\n\tNelson Escobar <neescoba@cisco.com>",
        "Date": "Tue, 14 Jun 2016 16:55:57 -0700",
        "Message-Id": "<1465948557-14753-1-git-send-email-neescoba@cisco.com>",
        "X-Mailer": "git-send-email 2.7.0",
        "Subject": "[dpdk-dev] [PATCH] enic: scattered Rx",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "patches and discussions about DPDK <dev.dpdk.org>",
        "List-Unsubscribe": "<http://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<http://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "For performance reasons, this patch uses 2 VIC RQs per RQ presented to\nDPDK.\n\nThe VIC requires that each descriptor be marked as either a start of\npacket (SOP) descriptor or a non-SOP descriptor.  A one RQ solution\nrequires skipping descriptors when receiving small packets and results\nin bad performance when receiving many small packets.\n\nThe 2 RQ solution makes use of the VIC feature that allows a receive\non primary queue to 'spill over' into another queue if the receive is\ntoo large to fit in the buffer assigned to the descriptor on the\nprimary queue.  This means that there is no skipping of descriptors\nwhen receiving small packets and results in much better performance.\n\nSigned-off-by: Nelson Escobar <neescoba@cisco.com>\nReviewed-by: John Daley <johndale@cisco.com>\n---\n doc/guides/nics/overview.rst         |   2 +-\n drivers/net/enic/base/rq_enet_desc.h |   2 +-\n drivers/net/enic/base/vnic_rq.c      |   8 +-\n drivers/net/enic/base/vnic_rq.h      |  18 ++-\n drivers/net/enic/enic.h              |  22 ++-\n drivers/net/enic/enic_ethdev.c       |  10 +-\n drivers/net/enic/enic_main.c         | 277 +++++++++++++++++++++++++++--------\n drivers/net/enic/enic_res.c          |   5 +-\n drivers/net/enic/enic_rxtx.c         | 137 +++++++++++------\n 9 files changed, 358 insertions(+), 123 deletions(-)",
    "diff": "diff --git a/doc/guides/nics/overview.rst b/doc/guides/nics/overview.rst\nindex 2200171..d0ae847 100644\n--- a/doc/guides/nics/overview.rst\n+++ b/doc/guides/nics/overview.rst\n@@ -94,7 +94,7 @@ Most of these differences are summarized below.\n    Queue start/stop             Y   Y Y Y Y Y Y     Y Y     Y Y Y Y Y Y               Y   Y Y\n    MTU update                   Y Y Y           Y   Y Y Y Y         Y Y\n    Jumbo frame                  Y Y Y Y Y Y Y Y Y   Y Y Y Y Y Y Y Y Y Y       Y Y Y\n-   Scattered Rx                 Y Y Y   Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y               Y   Y\n+   Scattered Rx                 Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y               Y   Y\n    LRO                                              Y Y Y Y\n    TSO                          Y   Y   Y Y Y Y Y Y Y Y Y Y Y Y Y Y\n    Promiscuous mode       Y Y   Y Y   Y Y Y Y Y Y Y Y Y     Y Y     Y Y         Y Y   Y   Y Y\ndiff --git a/drivers/net/enic/base/rq_enet_desc.h b/drivers/net/enic/base/rq_enet_desc.h\nindex 7292d9d..13e24b4 100644\n--- a/drivers/net/enic/base/rq_enet_desc.h\n+++ b/drivers/net/enic/base/rq_enet_desc.h\n@@ -55,7 +55,7 @@ enum rq_enet_type_types {\n #define RQ_ENET_TYPE_BITS\t\t2\n #define RQ_ENET_TYPE_MASK\t\t((1 << RQ_ENET_TYPE_BITS) - 1)\n \n-static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,\n+static inline void rq_enet_desc_enc(volatile struct rq_enet_desc *desc,\n \tu64 address, u8 type, u16 length)\n {\n \tdesc->address = cpu_to_le64(address);\ndiff --git a/drivers/net/enic/base/vnic_rq.c b/drivers/net/enic/base/vnic_rq.c\nindex cb62c5e..0e700a1 100644\n--- a/drivers/net/enic/base/vnic_rq.c\n+++ b/drivers/net/enic/base/vnic_rq.c\n@@ -84,11 +84,12 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,\n \tiowrite32(cq_index, &rq->ctrl->cq_index);\n \tiowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);\n \tiowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);\n-\tiowrite32(0, &rq->ctrl->dropped_packet_count);\n \tiowrite32(0, &rq->ctrl->error_status);\n \tiowrite32(fetch_index, &rq->ctrl->fetch_index);\n \tiowrite32(posted_index, &rq->ctrl->posted_index);\n-\n+\tif (rq->is_sop)\n+\t\tiowrite32(((rq->is_sop << 10) | rq->data_queue_idx),\n+\t\t\t  &rq->ctrl->data_ring);\n }\n \n void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,\n@@ -96,6 +97,7 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,\n \tunsigned int error_interrupt_offset)\n {\n \tu32 fetch_index = 0;\n+\n \t/* Use current fetch_index as the ring starting point */\n \tfetch_index = ioread32(&rq->ctrl->fetch_index);\n \n@@ -110,6 +112,8 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,\n \t\terror_interrupt_offset);\n \trq->rxst_idx = 0;\n \trq->tot_pkts = 0;\n+\trq->pkt_first_seg = NULL;\n+\trq->pkt_last_seg = NULL;\n }\n \n void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error)\ndiff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h\nindex e083ccc..fd9e170 100644\n--- a/drivers/net/enic/base/vnic_rq.h\n+++ b/drivers/net/enic/base/vnic_rq.h\n@@ -60,10 +60,18 @@ struct vnic_rq_ctrl {\n \tu32 pad7;\n \tu32 error_status;\t\t/* 0x48 */\n \tu32 pad8;\n-\tu32 dropped_packet_count;\t/* 0x50 */\n+\tu32 tcp_sn;\t\t\t/* 0x50 */\n \tu32 pad9;\n-\tu32 dropped_packet_count_rc;\t/* 0x58 */\n+\tu32 unused;\t\t\t/* 0x58 */\n \tu32 pad10;\n+\tu32 dca_select;\t\t\t/* 0x60 */\n+\tu32 pad11;\n+\tu32 dca_value;\t\t\t/* 0x68 */\n+\tu32 pad12;\n+\tu32 data_ring;\t\t\t/* 0x70 */\n+\tu32 pad13;\n+\tu32 header_split;\t\t/* 0x78 */\n+\tu32 pad14;\n };\n \n struct vnic_rq {\n@@ -82,6 +90,12 @@ struct vnic_rq {\n \tstruct rte_mempool *mp;\n \tuint16_t rxst_idx;\n \tuint32_t tot_pkts;\n+\tuint16_t data_queue_idx;\n+\tuint8_t is_sop;\n+\tuint8_t in_use;\n+\tstruct rte_mbuf *pkt_first_seg;\n+\tstruct rte_mbuf *pkt_last_seg;\n+\tunsigned int max_mbufs_per_pkt;\n };\n \n static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)\ndiff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h\nindex 9f94afb..fce75af 100644\n--- a/drivers/net/enic/enic.h\n+++ b/drivers/net/enic/enic.h\n@@ -55,8 +55,11 @@\n #define DRV_COPYRIGHT\t\t\"Copyright 2008-2015 Cisco Systems, Inc\"\n \n #define ENIC_WQ_MAX\t\t8\n-#define ENIC_RQ_MAX\t\t8\n-#define ENIC_CQ_MAX\t\t(ENIC_WQ_MAX + ENIC_RQ_MAX)\n+/* With Rx scatter support, we use two RQs on VIC per RQ used by app. Both\n+ * RQs use the same CQ.\n+ */\n+#define ENIC_RQ_MAX\t\t16\n+#define ENIC_CQ_MAX\t\t(ENIC_WQ_MAX + (ENIC_RQ_MAX / 2))\n #define ENIC_INTR_MAX\t\t(ENIC_CQ_MAX + 2)\n \n #define VLAN_ETH_HLEN           18\n@@ -154,6 +157,21 @@ struct enic {\n \n };\n \n+static inline unsigned int enic_sop_rq(unsigned int rq)\n+{\n+\treturn rq * 2;\n+}\n+\n+static inline unsigned int enic_data_rq(unsigned int rq)\n+{\n+\treturn rq * 2 + 1;\n+}\n+\n+static inline unsigned int enic_vnic_rq_count(struct enic *enic)\n+{\n+\treturn (enic->rq_count * 2);\n+}\n+\n static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq)\n {\n \treturn rq;\ndiff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c\nindex 003dec0..83048d8 100644\n--- a/drivers/net/enic/enic_ethdev.c\n+++ b/drivers/net/enic/enic_ethdev.c\n@@ -269,14 +269,18 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,\n \tstruct enic *enic = pmd_priv(eth_dev);\n \n \tENICPMD_FUNC_TRACE();\n-\tif (queue_idx >= ENIC_RQ_MAX) {\n+\t/* With Rx scatter support, two RQs are now used on VIC per RQ used\n+\t * by the application.\n+\t */\n+\tif (queue_idx * 2 >= ENIC_RQ_MAX) {\n \t\tdev_err(enic,\n-\t\t\t\"Max number of RX queues exceeded.  Max is %d\\n\",\n+\t\t\t\"Max number of RX queues exceeded.  Max is %d. This PMD uses 2 RQs on VIC per RQ used by DPDK.\\n\",\n \t\t\tENIC_RQ_MAX);\n \t\treturn -EINVAL;\n \t}\n \n-\teth_dev->data->rx_queues[queue_idx] = (void *)&enic->rq[queue_idx];\n+\teth_dev->data->rx_queues[queue_idx] =\n+\t\t(void *)&enic->rq[enic_sop_rq(queue_idx)];\n \n \tret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc);\n \tif (ret) {\ndiff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c\nindex 9b6fe36..8b1d7ff 100644\n--- a/drivers/net/enic/enic_main.c\n+++ b/drivers/net/enic/enic_main.c\n@@ -122,7 +122,7 @@ static void enic_log_q_error(struct enic *enic)\n \t\t\t\terror_status);\n \t}\n \n-\tfor (i = 0; i < enic->rq_count; i++) {\n+\tfor (i = 0; i < enic_vnic_rq_count(enic); i++) {\n \t\terror_status = vnic_rq_error_status(&enic->rq[i]);\n \t\tif (error_status)\n \t\t\tdev_err(enic, \"RQ[%d] error_status %d\\n\", i,\n@@ -235,13 +235,21 @@ void enic_init_vnic_resources(struct enic *enic)\n \tunsigned int error_interrupt_offset = 0;\n \tunsigned int index = 0;\n \tunsigned int cq_idx;\n+\tstruct vnic_rq *data_rq;\n \n \tfor (index = 0; index < enic->rq_count; index++) {\n-\t\tvnic_rq_init(&enic->rq[index],\n+\t\tvnic_rq_init(&enic->rq[enic_sop_rq(index)],\n \t\t\tenic_cq_rq(enic, index),\n \t\t\terror_interrupt_enable,\n \t\t\terror_interrupt_offset);\n \n+\t\tdata_rq = &enic->rq[enic_data_rq(index)];\n+\t\tif (data_rq->in_use)\n+\t\t\tvnic_rq_init(data_rq,\n+\t\t\t\t     enic_cq_rq(enic, index),\n+\t\t\t\t     error_interrupt_enable,\n+\t\t\t\t     error_interrupt_offset);\n+\n \t\tcq_idx = enic_cq_rq(enic, index);\n \t\tvnic_cq_init(&enic->cq[cq_idx],\n \t\t\t0 /* flow_control_enable */,\n@@ -291,6 +299,9 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)\n \tunsigned i;\n \tdma_addr_t dma_addr;\n \n+\tif (!rq->in_use)\n+\t\treturn 0;\n+\n \tdev_debug(enic, \"queue %u, allocating %u rx queue mbufs\\n\", rq->index,\n \t\t  rq->ring.desc_count);\n \n@@ -304,18 +315,19 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)\n \n \t\tdma_addr = (dma_addr_t)(mb->buf_physaddr\n \t\t\t   + RTE_PKTMBUF_HEADROOM);\n-\n-\t\trq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP,\n-\t\t\t\t mb->buf_len - RTE_PKTMBUF_HEADROOM);\n+\t\trq_enet_desc_enc(rqd, dma_addr,\n+\t\t\t\t(rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP\n+\t\t\t\t: RQ_ENET_TYPE_NOT_SOP),\n+\t\t\t\tmb->buf_len - RTE_PKTMBUF_HEADROOM);\n \t\trq->mbuf_ring[i] = mb;\n \t}\n \n \t/* make sure all prior writes are complete before doing the PIO write */\n \trte_rmb();\n \n-\t/* Post all but the last 2 cache lines' worth of descriptors */\n-\trq->posted_index = rq->ring.desc_count - (2 * RTE_CACHE_LINE_SIZE\n-\t\t\t/ sizeof(struct rq_enet_desc));\n+\t/* Post all but the last buffer to VIC. */\n+\trq->posted_index = rq->ring.desc_count - 1;\n+\n \trq->rx_nb_hold = 0;\n \n \tdev_debug(enic, \"port=%u, qidx=%u, Write %u posted idx, %u sw held\\n\",\n@@ -419,17 +431,28 @@ int enic_enable(struct enic *enic)\n \t\t\t\"Flow director feature will not work\\n\");\n \n \tfor (index = 0; index < enic->rq_count; index++) {\n-\t\terr = enic_alloc_rx_queue_mbufs(enic, &enic->rq[index]);\n+\t\terr = enic_alloc_rx_queue_mbufs(enic,\n+\t\t\t&enic->rq[enic_sop_rq(index)]);\n \t\tif (err) {\n-\t\t\tdev_err(enic, \"Failed to alloc RX queue mbufs\\n\");\n+\t\t\tdev_err(enic, \"Failed to alloc sop RX queue mbufs\\n\");\n+\t\t\treturn err;\n+\t\t}\n+\t\terr = enic_alloc_rx_queue_mbufs(enic,\n+\t\t\t&enic->rq[enic_data_rq(index)]);\n+\t\tif (err) {\n+\t\t\t/* release the allocated mbufs for the sop rq*/\n+\t\t\tenic_rxmbuf_queue_release(enic,\n+\t\t\t\t&enic->rq[enic_sop_rq(index)]);\n+\n+\t\t\tdev_err(enic, \"Failed to alloc data RX queue mbufs\\n\");\n \t\t\treturn err;\n \t\t}\n \t}\n \n \tfor (index = 0; index < enic->wq_count; index++)\n-\t\tvnic_wq_enable(&enic->wq[index]);\n+\t\tenic_start_wq(enic, index);\n \tfor (index = 0; index < enic->rq_count; index++)\n-\t\tvnic_rq_enable(&enic->rq[index]);\n+\t\tenic_start_rq(enic, index);\n \n \tvnic_dev_enable_wait(enic->vdev);\n \n@@ -449,7 +472,7 @@ int enic_alloc_intr_resources(struct enic *enic)\n \n \tdev_info(enic, \"vNIC resources used:  \"\\\n \t\t\"wq %d rq %d cq %d intr %d\\n\",\n-\t\tenic->wq_count, enic->rq_count,\n+\t\tenic->wq_count, enic_vnic_rq_count(enic),\n \t\tenic->cq_count, enic->intr_count);\n \n \terr = vnic_intr_alloc(enic->vdev, &enic->intr, 0);\n@@ -461,19 +484,32 @@ int enic_alloc_intr_resources(struct enic *enic)\n \n void enic_free_rq(void *rxq)\n {\n-\tstruct vnic_rq *rq;\n+\tstruct vnic_rq *rq_sop, *rq_data;\n \tstruct enic *enic;\n \n \tif (rxq == NULL)\n \t\treturn;\n \n-\trq = (struct vnic_rq *)rxq;\n-\tenic = vnic_dev_priv(rq->vdev);\n-\tenic_rxmbuf_queue_release(enic, rq);\n-\trte_free(rq->mbuf_ring);\n-\trq->mbuf_ring = NULL;\n-\tvnic_rq_free(rq);\n-\tvnic_cq_free(&enic->cq[rq->index]);\n+\trq_sop = (struct vnic_rq *)rxq;\n+\tenic = vnic_dev_priv(rq_sop->vdev);\n+\trq_data = &enic->rq[rq_sop->data_queue_idx];\n+\n+\tenic_rxmbuf_queue_release(enic, rq_sop);\n+\tif (rq_data->in_use)\n+\t\tenic_rxmbuf_queue_release(enic, rq_data);\n+\n+\trte_free(rq_sop->mbuf_ring);\n+\tif (rq_data->in_use)\n+\t\trte_free(rq_data->mbuf_ring);\n+\n+\trq_sop->mbuf_ring = NULL;\n+\trq_data->mbuf_ring = NULL;\n+\n+\tvnic_rq_free(rq_sop);\n+\tif (rq_data->in_use)\n+\t\tvnic_rq_free(rq_data);\n+\n+\tvnic_cq_free(&enic->cq[rq_sop->index]);\n }\n \n void enic_start_wq(struct enic *enic, uint16_t queue_idx)\n@@ -488,12 +524,32 @@ int enic_stop_wq(struct enic *enic, uint16_t queue_idx)\n \n void enic_start_rq(struct enic *enic, uint16_t queue_idx)\n {\n-\tvnic_rq_enable(&enic->rq[queue_idx]);\n+\tstruct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)];\n+\tstruct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];\n+\n+\tif (rq_data->in_use)\n+\t\tvnic_rq_enable(rq_data);\n+\trte_mb();\n+\tvnic_rq_enable(rq_sop);\n+\n }\n \n int enic_stop_rq(struct enic *enic, uint16_t queue_idx)\n {\n-\treturn vnic_rq_disable(&enic->rq[queue_idx]);\n+\tint ret1 = 0, ret2 = 0;\n+\n+\tstruct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)];\n+\tstruct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];\n+\n+\tret2 = vnic_rq_disable(rq_sop);\n+\trte_mb();\n+\tif (rq_data->in_use)\n+\t\tret1 = vnic_rq_disable(rq_data);\n+\n+\tif (ret2)\n+\t\treturn ret2;\n+\telse\n+\t\treturn ret1;\n }\n \n int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,\n@@ -501,53 +557,141 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,\n \tuint16_t nb_desc)\n {\n \tint rc;\n-\tstruct vnic_rq *rq = &enic->rq[queue_idx];\n+\tuint16_t sop_queue_idx = enic_sop_rq(queue_idx);\n+\tuint16_t data_queue_idx = enic_data_rq(queue_idx);\n+\tstruct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];\n+\tstruct vnic_rq *rq_data = &enic->rq[data_queue_idx];\n+\tunsigned int mbuf_size, mbufs_per_pkt;\n+\tunsigned int nb_sop_desc, nb_data_desc;\n+\tuint16_t min_sop, max_sop, min_data, max_data;\n+\n+\trq_sop->is_sop = 1;\n+\trq_sop->data_queue_idx = data_queue_idx;\n+\trq_data->is_sop = 0;\n+\trq_data->data_queue_idx = 0;\n+\trq_sop->socket_id = socket_id;\n+\trq_sop->mp = mp;\n+\trq_data->socket_id = socket_id;\n+\trq_data->mp = mp;\n+\trq_sop->in_use = 1;\n+\n+\tmbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -\n+\t\t\t       RTE_PKTMBUF_HEADROOM);\n+\n+\tif (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {\n+\t\tdev_info(enic, \"Scatter rx mode enabled\\n\");\n+\t\t/* ceil(mtu/mbuf_size) */\n+\t\tmbufs_per_pkt = (enic->config.mtu +\n+\t\t\t\t (mbuf_size - 1)) / mbuf_size;\n+\t} else {\n+\t\tdev_info(enic, \"Scatter rx mode disabled\\n\");\n+\t\tmbufs_per_pkt = 1;\n+\t}\n+\n+\tif (mbufs_per_pkt > 1) {\n+\t\tdev_info(enic, \"Scatter rx mode in use\\n\");\n+\t\trq_data->in_use = 1;\n+\t} else {\n+\t\tdev_info(enic, \"Scatter rx mode not being used\\n\");\n+\t\trq_data->in_use = 0;\n+\t}\n \n-\trq->socket_id = socket_id;\n-\trq->mp = mp;\n+\t/* number of descriptors have to be a multiple of 32 */\n+\tnb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;\n+\tnb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;\n+\n+\trq_sop->max_mbufs_per_pkt = mbufs_per_pkt;\n+\trq_data->max_mbufs_per_pkt = mbufs_per_pkt;\n+\n+\tif (mbufs_per_pkt > 1) {\n+\t\tmin_sop = 64;\n+\t\tmax_sop = ((enic->config.rq_desc_count /\n+\t\t\t    (mbufs_per_pkt - 1)) & ~0x1F);\n+\t\tmin_data = min_sop * (mbufs_per_pkt - 1);\n+\t\tmax_data = enic->config.rq_desc_count;\n+\t} else {\n+\t\tmin_sop = 64;\n+\t\tmax_sop = enic->config.rq_desc_count;\n+\t\tmin_data = 0;\n+\t\tmax_data = 0;\n+\t}\n \n-\tif (nb_desc) {\n-\t\tif (nb_desc > enic->config.rq_desc_count) {\n-\t\t\tdev_warning(enic,\n-\t\t\t\t\"RQ %d - number of rx desc in cmd line (%d)\"\\\n-\t\t\t\t\"is greater than that in the UCSM/CIMC adapter\"\\\n-\t\t\t\t\"policy.  Applying the value in the adapter \"\\\n-\t\t\t\t\"policy (%d).\\n\",\n-\t\t\t\tqueue_idx, nb_desc, enic->config.rq_desc_count);\n-\t\t\tnb_desc = enic->config.rq_desc_count;\n-\t\t}\n-\t\tdev_info(enic, \"RX Queues - effective number of descs:%d\\n\",\n-\t\t\t nb_desc);\n+\tif (nb_desc < (min_sop + min_data)) {\n+\t\tdev_warning(enic,\n+\t\t\t    \"Number of rx descs too low, adjusting to minimum\\n\");\n+\t\tnb_sop_desc = min_sop;\n+\t\tnb_data_desc = min_data;\n+\t} else if (nb_desc > (max_sop + max_data)) {\n+\t\tdev_warning(enic,\n+\t\t\t    \"Number of rx_descs too high, adjusting to maximum\\n\");\n+\t\tnb_sop_desc = max_sop;\n+\t\tnb_data_desc = max_data;\n \t}\n+\tif (mbufs_per_pkt > 1) {\n+\t\tdev_info(enic, \"For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\\n\",\n+\t\t\t enic->config.mtu, mbuf_size, min_sop + min_data,\n+\t\t\t max_sop + max_data);\n+\t}\n+\tdev_info(enic, \"Using %d rx descriptors (sop %d, data %d)\\n\",\n+\t\t nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);\n \n-\t/* Allocate queue resources */\n-\trc = vnic_rq_alloc(enic->vdev, rq, queue_idx,\n-\t\tnb_desc, sizeof(struct rq_enet_desc));\n+\t/* Allocate sop queue resources */\n+\trc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,\n+\t\tnb_sop_desc, sizeof(struct rq_enet_desc));\n \tif (rc) {\n-\t\tdev_err(enic, \"error in allocation of rq\\n\");\n+\t\tdev_err(enic, \"error in allocation of sop rq\\n\");\n \t\tgoto err_exit;\n \t}\n-\n+\tnb_sop_desc = rq_sop->ring.desc_count;\n+\n+\tif (rq_data->in_use) {\n+\t\t/* Allocate data queue resources */\n+\t\trc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,\n+\t\t\t\t   nb_data_desc,\n+\t\t\t\t   sizeof(struct rq_enet_desc));\n+\t\tif (rc) {\n+\t\t\tdev_err(enic, \"error in allocation of data rq\\n\");\n+\t\t\tgoto err_free_rq_sop;\n+\t\t}\n+\t\tnb_data_desc = rq_data->ring.desc_count;\n+\t}\n \trc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,\n-\t\tsocket_id, nb_desc,\n-\t\tsizeof(struct cq_enet_rq_desc));\n+\t\t\t   socket_id, nb_sop_desc + nb_data_desc,\n+\t\t\t   sizeof(struct cq_enet_rq_desc));\n \tif (rc) {\n \t\tdev_err(enic, \"error in allocation of cq for rq\\n\");\n-\t\tgoto err_free_rq_exit;\n+\t\tgoto err_free_rq_data;\n \t}\n \n-\t/* Allocate the mbuf ring */\n-\trq->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket(\"rq->mbuf_ring\",\n-\t\t\tsizeof(struct rte_mbuf *) * nb_desc,\n-\t\t\tRTE_CACHE_LINE_SIZE, rq->socket_id);\n+\t/* Allocate the mbuf rings */\n+\trq_sop->mbuf_ring = (struct rte_mbuf **)\n+\t\trte_zmalloc_socket(\"rq->mbuf_ring\",\n+\t\t\t\t   sizeof(struct rte_mbuf *) * nb_sop_desc,\n+\t\t\t\t   RTE_CACHE_LINE_SIZE, rq_sop->socket_id);\n+\tif (rq_sop->mbuf_ring == NULL)\n+\t\tgoto err_free_cq;\n+\n+\tif (rq_data->in_use) {\n+\t\trq_data->mbuf_ring = (struct rte_mbuf **)\n+\t\t\trte_zmalloc_socket(\"rq->mbuf_ring\",\n+\t\t\t\tsizeof(struct rte_mbuf *) * nb_data_desc,\n+\t\t\t\tRTE_CACHE_LINE_SIZE, rq_sop->socket_id);\n+\t\tif (rq_data->mbuf_ring == NULL)\n+\t\t\tgoto err_free_sop_mbuf;\n+\t}\n \n-\tif (rq->mbuf_ring != NULL)\n-\t\treturn 0;\n+\treturn 0;\n \n+err_free_sop_mbuf:\n+\trte_free(rq_sop->mbuf_ring);\n+err_free_cq:\n \t/* cleanup on error */\n \tvnic_cq_free(&enic->cq[queue_idx]);\n-err_free_rq_exit:\n-\tvnic_rq_free(rq);\n+err_free_rq_data:\n+\tif (rq_data->in_use)\n+\t\tvnic_rq_free(rq_data);\n+err_free_rq_sop:\n+\tvnic_rq_free(rq_sop);\n err_exit:\n \treturn -ENOMEM;\n }\n@@ -645,10 +789,12 @@ int enic_disable(struct enic *enic)\n \t\tif (err)\n \t\t\treturn err;\n \t}\n-\tfor (i = 0; i < enic->rq_count; i++) {\n-\t\terr = vnic_rq_disable(&enic->rq[i]);\n-\t\tif (err)\n-\t\t\treturn err;\n+\tfor (i = 0; i < enic_vnic_rq_count(enic); i++) {\n+\t\tif (enic->rq[i].in_use) {\n+\t\t\terr = vnic_rq_disable(&enic->rq[i]);\n+\t\t\tif (err)\n+\t\t\t\treturn err;\n+\t\t}\n \t}\n \n \tvnic_dev_set_reset_flag(enic->vdev, 1);\n@@ -657,8 +803,9 @@ int enic_disable(struct enic *enic)\n \tfor (i = 0; i < enic->wq_count; i++)\n \t\tvnic_wq_clean(&enic->wq[i], enic_free_wq_buf);\n \n-\tfor (i = 0; i < enic->rq_count; i++)\n-\t\tvnic_rq_clean(&enic->rq[i], enic_free_rq_buf);\n+\tfor (i = 0; i < enic_vnic_rq_count(enic); i++)\n+\t\tif (enic->rq[i].in_use)\n+\t\t\tvnic_rq_clean(&enic->rq[i], enic_free_rq_buf);\n \tfor (i = 0; i < enic->cq_count; i++)\n \t\tvnic_cq_clean(&enic->cq[i]);\n \tvnic_intr_clean(&enic->intr);\n@@ -863,9 +1010,13 @@ int enic_set_vnic_res(struct enic *enic)\n \tstruct rte_eth_dev *eth_dev = enic->rte_dev;\n \tint rc = 0;\n \n-\tif (enic->rq_count < eth_dev->data->nb_rx_queues) {\n-\t\tdev_err(dev, \"Not enough Receive queues. Requested:%u, Configured:%u\\n\",\n-\t\t\teth_dev->data->nb_rx_queues, enic->rq_count);\n+\t/* With Rx scatter support, two RQs are now used per RQ used by\n+\t * the application.\n+\t */\n+\tif (enic->rq_count < (eth_dev->data->nb_rx_queues * 2)) {\n+\t\tdev_err(dev, \"Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\\n\",\n+\t\t\teth_dev->data->nb_rx_queues,\n+\t\t\teth_dev->data->nb_rx_queues * 2, enic->rq_count);\n \t\trc = -EINVAL;\n \t}\n \tif (enic->wq_count < eth_dev->data->nb_tx_queues) {\ndiff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c\nindex ebe379d..42edd84 100644\n--- a/drivers/net/enic/enic_res.c\n+++ b/drivers/net/enic/enic_res.c\n@@ -196,8 +196,9 @@ void enic_free_vnic_resources(struct enic *enic)\n \n \tfor (i = 0; i < enic->wq_count; i++)\n \t\tvnic_wq_free(&enic->wq[i]);\n-\tfor (i = 0; i < enic->rq_count; i++)\n-\t\tvnic_rq_free(&enic->rq[i]);\n+\tfor (i = 0; i < enic_vnic_rq_count(enic); i++)\n+\t\tif (enic->rq[i].in_use)\n+\t\t\tvnic_rq_free(&enic->rq[i]);\n \tfor (i = 0; i < enic->cq_count; i++)\n \t\tvnic_cq_free(&enic->cq[i]);\n \tvnic_intr_free(&enic->intr);\ndiff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c\nindex 972eae2..6ca4ac3 100644\n--- a/drivers/net/enic/enic_rxtx.c\n+++ b/drivers/net/enic/enic_rxtx.c\n@@ -228,27 +228,34 @@ uint16_t\n enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t       uint16_t nb_pkts)\n {\n-\tstruct vnic_rq *rq = rx_queue;\n-\tstruct enic *enic = vnic_dev_priv(rq->vdev);\n-\tunsigned int rx_id;\n+\tstruct vnic_rq *sop_rq = rx_queue;\n+\tstruct vnic_rq *data_rq;\n+\tstruct vnic_rq *rq;\n+\tstruct enic *enic = vnic_dev_priv(sop_rq->vdev);\n+\tuint16_t cq_idx;\n+\tuint16_t rq_idx;\n+\tuint16_t rq_num;\n \tstruct rte_mbuf *nmb, *rxmb;\n-\tuint16_t nb_rx = 0, nb_err = 0;\n-\tuint16_t nb_hold;\n+\tuint16_t nb_rx = 0;\n \tstruct vnic_cq *cq;\n \tvolatile struct cq_desc *cqd_ptr;\n \tuint8_t color;\n+\tuint16_t seg_length;\n+\tstruct rte_mbuf *first_seg = sop_rq->pkt_first_seg;\n+\tstruct rte_mbuf *last_seg = sop_rq->pkt_last_seg;\n \n-\tcq = &enic->cq[enic_cq_rq(enic, rq->index)];\n-\trx_id = cq->to_clean;\t\t/* index of cqd, rqd, mbuf_table */\n-\tcqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;\n+\tcq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];\n+\tcq_idx = cq->to_clean;\t\t/* index of cqd, rqd, mbuf_table */\n+\tcqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;\n \n-\tnb_hold = rq->rx_nb_hold;\t/* mbufs held by software */\n+\tdata_rq = &enic->rq[sop_rq->data_queue_idx];\n \n \twhile (nb_rx < nb_pkts) {\n \t\tvolatile struct rq_enet_desc *rqd_ptr;\n \t\tdma_addr_t dma_addr;\n \t\tstruct cq_desc cqd;\n \t\tuint8_t packet_error;\n+\t\tuint16_t ciflags;\n \n \t\t/* Check for pkts available */\n \t\tcolor = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)\n@@ -256,9 +263,13 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\tif (color == cq->last_color)\n \t\t\tbreak;\n \n-\t\t/* Get the cq descriptor and rq pointer */\n+\t\t/* Get the cq descriptor and extract rq info from it */\n \t\tcqd = *cqd_ptr;\n-\t\trqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;\n+\t\trq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;\n+\t\trq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;\n+\n+\t\trq = &enic->rq[rq_num];\n+\t\trqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;\n \n \t\t/* allocate a new mbuf */\n \t\tnmb = rte_mbuf_raw_alloc(rq->mp);\n@@ -271,67 +282,99 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,\n \t\tpacket_error = enic_cq_rx_check_err(&cqd);\n \n \t\t/* Get the mbuf to return and replace with one just allocated */\n-\t\trxmb = rq->mbuf_ring[rx_id];\n-\t\trq->mbuf_ring[rx_id] = nmb;\n+\t\trxmb = rq->mbuf_ring[rq_idx];\n+\t\trq->mbuf_ring[rq_idx] = nmb;\n \n \t\t/* Increment cqd, rqd, mbuf_table index */\n-\t\trx_id++;\n-\t\tif (unlikely(rx_id == rq->ring.desc_count)) {\n-\t\t\trx_id = 0;\n+\t\tcq_idx++;\n+\t\tif (unlikely(cq_idx == cq->ring.desc_count)) {\n+\t\t\tcq_idx = 0;\n \t\t\tcq->last_color = cq->last_color ? 0 : 1;\n \t\t}\n \n \t\t/* Prefetch next mbuf & desc while processing current one */\n-\t\tcqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;\n+\t\tcqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;\n \t\trte_enic_prefetch(cqd_ptr);\n-\t\trte_enic_prefetch(rq->mbuf_ring[rx_id]);\n-\t\trte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)\n-\t\t\t\t + rx_id);\n+\n+\t\tciflags = enic_cq_rx_desc_ciflags(\n+\t\t\t(struct cq_enet_rq_desc *)&cqd);\n \n \t\t/* Push descriptor for newly allocated mbuf */\n-\t\tdma_addr = (dma_addr_t)(nmb->buf_physaddr\n-\t\t\t   + RTE_PKTMBUF_HEADROOM);\n-\t\trqd_ptr->address = rte_cpu_to_le_64(dma_addr);\n-\t\trqd_ptr->length_type = cpu_to_le16(nmb->buf_len\n-\t\t\t\t       - RTE_PKTMBUF_HEADROOM);\n+\t\tdma_addr = (dma_addr_t)(nmb->buf_physaddr +\n+\t\t\t\t\tRTE_PKTMBUF_HEADROOM);\n+\t\trq_enet_desc_enc(rqd_ptr, dma_addr,\n+\t\t\t\t(rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP\n+\t\t\t\t: RQ_ENET_TYPE_NOT_SOP),\n+\t\t\t\tnmb->buf_len - RTE_PKTMBUF_HEADROOM);\n \n-\t\t/* Drop incoming bad packet */\n-\t\tif (unlikely(packet_error)) {\n-\t\t\trte_pktmbuf_free(rxmb);\n-\t\t\tnb_err++;\n-\t\t\tcontinue;\n+\t\t/* Fill in the rest of the mbuf */\n+\t\tseg_length = enic_cq_rx_desc_n_bytes(&cqd);\n+\t\trxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);\n+\t\tenic_cq_rx_to_pkt_flags(&cqd, rxmb);\n+\t\tif (rq->is_sop) {\n+\t\t\tfirst_seg = rxmb;\n+\t\t\tfirst_seg->nb_segs = 1;\n+\t\t\tfirst_seg->pkt_len = seg_length;\n+\t\t} else {\n+\t\t\tfirst_seg->pkt_len = (uint16_t)(first_seg->pkt_len\n+\t\t\t\t\t\t\t+ seg_length);\n+\t\t\tfirst_seg->nb_segs++;\n+\t\t\tlast_seg->next = rxmb;\n \t\t}\n \n-\t\t/* Fill in the rest of the mbuf */\n-\t\trxmb->data_off = RTE_PKTMBUF_HEADROOM;\n-\t\trxmb->nb_segs = 1;\n \t\trxmb->next = NULL;\n \t\trxmb->port = enic->port_id;\n-\t\trxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);\n-\t\trxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);\n-\t\tenic_cq_rx_to_pkt_flags(&cqd, rxmb);\n-\t\trxmb->data_len = rxmb->pkt_len;\n+\t\trxmb->data_len = seg_length;\n+\n+\t\trq->rx_nb_hold++;\n+\n+\t\tif (!(enic_cq_rx_desc_eop(ciflags))) {\n+\t\t\tlast_seg = rxmb;\n+\t\t\tcontinue;\n+\t\t}\n+\n+\t\tif (unlikely(packet_error)) {\n+\t\t\trte_pktmbuf_free(first_seg);\n+\t\t\trte_atomic64_inc(&enic->soft_stats.rx_packet_errors);\n+\t\t\tcontinue;\n+\t\t}\n+\n \n \t\t/* prefetch mbuf data for caller */\n-\t\trte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,\n+\t\trte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,\n \t\t\t\t    RTE_PKTMBUF_HEADROOM));\n \n \t\t/* store the mbuf address into the next entry of the array */\n-\t\trx_pkts[nb_rx++] = rxmb;\n+\t\trx_pkts[nb_rx++] = first_seg;\n \t}\n \n-\tnb_hold += nb_rx + nb_err;\n-\tcq->to_clean = rx_id;\n+\tsop_rq->pkt_first_seg = first_seg;\n+\tsop_rq->pkt_last_seg = last_seg;\n+\n+\tcq->to_clean = cq_idx;\n+\n+\tif ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) >\n+\t    sop_rq->rx_free_thresh) {\n+\t\tif (data_rq->in_use) {\n+\t\t\tdata_rq->posted_index =\n+\t\t\t\tenic_ring_add(data_rq->ring.desc_count,\n+\t\t\t\t\t      data_rq->posted_index,\n+\t\t\t\t\t      data_rq->rx_nb_hold);\n+\t\t\tdata_rq->rx_nb_hold = 0;\n+\t\t}\n+\t\tsop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,\n+\t\t\t\t\t\t     sop_rq->posted_index,\n+\t\t\t\t\t\t     sop_rq->rx_nb_hold);\n+\t\tsop_rq->rx_nb_hold = 0;\n \n-\tif (nb_hold > rq->rx_free_thresh) {\n-\t\trq->posted_index = enic_ring_add(rq->ring.desc_count,\n-\t\t\t\trq->posted_index, nb_hold);\n-\t\tnb_hold = 0;\n \t\trte_mb();\n-\t\tiowrite32(rq->posted_index, &rq->ctrl->posted_index);\n+\t\tif (data_rq->in_use)\n+\t\t\tiowrite32(data_rq->posted_index,\n+\t\t\t\t  &data_rq->ctrl->posted_index);\n+\t\trte_compiler_barrier();\n+\t\tiowrite32(sop_rq->posted_index, &sop_rq->ctrl->posted_index);\n \t}\n \n-\trq->rx_nb_hold = nb_hold;\n \n \treturn nb_rx;\n }\n",
    "prefixes": [
        "dpdk-dev"
    ]
}