get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/53424/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 53424,
    "url": "http://patches.dpdk.org/api/patches/53424/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20190515083842.15116-2-xiaolong.ye@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190515083842.15116-2-xiaolong.ye@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190515083842.15116-2-xiaolong.ye@intel.com",
    "date": "2019-05-15T08:38:40",
    "name": "[v1,1/3] net/af_xdp: enable zero copy by extbuf",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "68c6468d4c26c2ae621b9fa36dad4328ffe7617d",
    "submitter": {
        "id": 1120,
        "url": "http://patches.dpdk.org/api/people/1120/?format=api",
        "name": "Xiaolong Ye",
        "email": "xiaolong.ye@intel.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20190515083842.15116-2-xiaolong.ye@intel.com/mbox/",
    "series": [
        {
            "id": 4663,
            "url": "http://patches.dpdk.org/api/series/4663/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=4663",
            "date": "2019-05-15T08:38:39",
            "name": "add more features for AF_XDP pmd",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/4663/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/53424/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/53424/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id E85385F13;\n\tWed, 15 May 2019 10:49:48 +0200 (CEST)",
            "from mga09.intel.com (mga09.intel.com [134.134.136.24])\n\tby dpdk.org (Postfix) with ESMTP id C225CA3\n\tfor <dev@dpdk.org>; Wed, 15 May 2019 10:49:46 +0200 (CEST)",
            "from fmsmga008.fm.intel.com ([10.253.24.58])\n\tby orsmga102.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t15 May 2019 01:49:45 -0700",
            "from yexl-server.sh.intel.com (HELO\n\tNPG-DPDK-XDP-yexl-server.sh.intel.com) ([10.67.110.206])\n\tby fmsmga008.fm.intel.com with ESMTP; 15 May 2019 01:49:43 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "Xiaolong Ye <xiaolong.ye@intel.com>",
        "To": "Xiaolong Ye <xiaolong.ye@intel.com>, Qi Zhang <qi.z.zhang@intel.com>,\n\tJohn McNamara <john.mcnamara@intel.com>,\n\tMarko Kovacevic <marko.kovacevic@intel.com>",
        "Cc": "Karlsson Magnus <magnus.karlsson@intel.com>,\n\tTopel Bjorn <bjorn.topel@intel.com>, dev@dpdk.org",
        "Date": "Wed, 15 May 2019 16:38:40 +0800",
        "Message-Id": "<20190515083842.15116-2-xiaolong.ye@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20190515083842.15116-1-xiaolong.ye@intel.com>",
        "References": "<20190515083842.15116-1-xiaolong.ye@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v1 1/3] net/af_xdp: enable zero copy by extbuf",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Implement zero copy of af_xdp pmd through mbuf's external memory\nmechanism to achieve high performance.\n\nThis patch also provides a new parameter \"pmd_zero_copy\" for user, so they\ncan choose to enable zero copy of af_xdp pmd or not.\n\nTo be clear, \"zero copy\" here is different from the \"zero copy mode\" of\nAF_XDP, it is about zero copy between af_xdp umem and mbuf used in dpdk\napplication.\n\nSuggested-by: Varghese Vipin <vipin.varghese@intel.com>\nSuggested-by: Olivier Matz <olivier.matz@6wind.com>\nSigned-off-by: Xiaolong Ye <xiaolong.ye@intel.com>\n---\n doc/guides/nics/af_xdp.rst          |  1 +\n drivers/net/af_xdp/rte_eth_af_xdp.c | 96 +++++++++++++++++++++++------\n 2 files changed, 77 insertions(+), 20 deletions(-)",
    "diff": "diff --git a/doc/guides/nics/af_xdp.rst b/doc/guides/nics/af_xdp.rst\nindex 175038153..0bd4239fe 100644\n--- a/doc/guides/nics/af_xdp.rst\n+++ b/doc/guides/nics/af_xdp.rst\n@@ -28,6 +28,7 @@ The following options can be provided to set up an af_xdp port in DPDK.\n \n *   ``iface`` - name of the Kernel interface to attach to (required);\n *   ``queue`` - netdev queue id (optional, default 0);\n+*   ``pmd_zero_copy`` - enable zero copy or not (optional, default 0);\n \n Prerequisites\n -------------\ndiff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c\nindex 35c72272c..ebef7bf34 100644\n--- a/drivers/net/af_xdp/rte_eth_af_xdp.c\n+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c\n@@ -70,6 +70,7 @@ struct xsk_umem_info {\n \tstruct xsk_umem *umem;\n \tstruct rte_ring *buf_ring;\n \tconst struct rte_memzone *mz;\n+\tint pmd_zc;\n };\n \n struct rx_stats {\n@@ -109,6 +110,7 @@ struct pmd_internals {\n \tint if_index;\n \tchar if_name[IFNAMSIZ];\n \tuint16_t queue_idx;\n+\tint pmd_zc;\n \tstruct ether_addr eth_addr;\n \tstruct xsk_umem_info *umem;\n \tstruct rte_mempool *mb_pool_share;\n@@ -119,10 +121,12 @@ struct pmd_internals {\n \n #define ETH_AF_XDP_IFACE_ARG\t\t\t\"iface\"\n #define ETH_AF_XDP_QUEUE_IDX_ARG\t\t\"queue\"\n+#define ETH_AF_XDP_PMD_ZC_ARG\t\t\t\"pmd_zero_copy\"\n \n static const char * const valid_arguments[] = {\n \tETH_AF_XDP_IFACE_ARG,\n \tETH_AF_XDP_QUEUE_IDX_ARG,\n+\tETH_AF_XDP_PMD_ZC_ARG,\n \tNULL\n };\n \n@@ -166,6 +170,15 @@ reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size)\n \treturn 0;\n }\n \n+static void\n+umem_buf_release_to_fq(void *addr, void *opaque)\n+{\n+\tstruct xsk_umem_info *umem = (struct xsk_umem_info *)opaque;\n+\tuint64_t umem_addr = (uint64_t)addr - umem->mz->addr_64;\n+\n+\trte_ring_enqueue(umem->buf_ring, (void *)umem_addr);\n+}\n+\n static uint16_t\n eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n {\n@@ -175,6 +188,7 @@ eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \tstruct xsk_ring_prod *fq = &umem->fq;\n \tuint32_t idx_rx = 0;\n \tuint32_t free_thresh = fq->size >> 1;\n+\tint pmd_zc = umem->pmd_zc;\n \tstruct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];\n \tunsigned long dropped = 0;\n \tunsigned long rx_bytes = 0;\n@@ -197,19 +211,29 @@ eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \t\tuint64_t addr;\n \t\tuint32_t len;\n \t\tvoid *pkt;\n+\t\tuint16_t buf_len = ETH_AF_XDP_FRAME_SIZE;\n+\t\tstruct rte_mbuf_ext_shared_info *shinfo;\n \n \t\tdesc = xsk_ring_cons__rx_desc(rx, idx_rx++);\n \t\taddr = desc->addr;\n \t\tlen = desc->len;\n \t\tpkt = xsk_umem__get_data(rxq->umem->mz->addr, addr);\n \n-\t\trte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *), pkt, len);\n+\t\tif (pmd_zc) {\n+\t\t\tshinfo = rte_pktmbuf_ext_shinfo_init_helper(pkt,\n+\t\t\t\t\t&buf_len, umem_buf_release_to_fq, umem);\n+\n+\t\t\trte_pktmbuf_attach_extbuf(mbufs[i], pkt, 0, buf_len,\n+\t\t\t\t\t\t  shinfo);\n+\t\t} else {\n+\t\t\trte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *),\n+\t\t\t\t\t\t\tpkt, len);\n+\t\t\trte_ring_enqueue(umem->buf_ring, (void *)addr);\n+\t\t}\n \t\trte_pktmbuf_pkt_len(mbufs[i]) = len;\n \t\trte_pktmbuf_data_len(mbufs[i]) = len;\n \t\trx_bytes += len;\n \t\tbufs[i] = mbufs[i];\n-\n-\t\trte_ring_enqueue(umem->buf_ring, (void *)addr);\n \t}\n \n \txsk_ring_cons__release(rx, rcvd);\n@@ -262,12 +286,21 @@ kick_tx(struct pkt_tx_queue *txq)\n \tpull_umem_cq(umem, ETH_AF_XDP_TX_BATCH_SIZE);\n }\n \n+static inline bool\n+in_umem_range(struct xsk_umem_info *umem, uint64_t addr)\n+{\n+\tuint64_t mz_base_addr = umem->mz->addr_64;\n+\n+\treturn addr >= mz_base_addr && addr < mz_base_addr + umem->mz->len;\n+}\n+\n static uint16_t\n eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n {\n \tstruct pkt_tx_queue *txq = queue;\n \tstruct xsk_umem_info *umem = txq->pair->umem;\n \tstruct rte_mbuf *mbuf;\n+\tint pmd_zc = umem->pmd_zc;\n \tvoid *addrs[ETH_AF_XDP_TX_BATCH_SIZE];\n \tunsigned long tx_bytes = 0;\n \tint i;\n@@ -294,16 +327,26 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \n \t\tdesc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);\n \t\tmbuf = bufs[i];\n-\n-\t\tdesc->addr = (uint64_t)addrs[i];\n \t\tdesc->len = mbuf->pkt_len;\n-\t\tpkt = xsk_umem__get_data(umem->mz->addr,\n-\t\t\t\t\t desc->addr);\n-\t\trte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),\n-\t\t\t   desc->len);\n-\t\ttx_bytes += mbuf->pkt_len;\n \n-\t\trte_pktmbuf_free(mbuf);\n+\t\t/*\n+\t\t * We need to make sure the external mbuf address is within\n+\t\t * current port's umem memzone range\n+\t\t */\n+\t\tif (pmd_zc && RTE_MBUF_HAS_EXTBUF(mbuf) &&\n+\t\t\t\tin_umem_range(umem, (uint64_t)mbuf->buf_addr)) {\n+\t\t\tdesc->addr = (uint64_t)mbuf->buf_addr -\n+\t\t\t\tumem->mz->addr_64;\n+\t\t\tmbuf->buf_addr = xsk_umem__get_data(umem->mz->addr,\n+\t\t\t\t\t(uint64_t)addrs[i]);\n+\t\t} else {\n+\t\t\tdesc->addr = (uint64_t)addrs[i];\n+\t\t\tpkt = xsk_umem__get_data(umem->mz->addr,\n+\t\t\t\t\tdesc->addr);\n+\t\t\trte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),\n+\t\t\t\t\tdesc->len);\n+\t\t}\n+\t\ttx_bytes += mbuf->pkt_len;\n \t}\n \n \txsk_ring_prod__submit(&txq->tx, nb_pkts);\n@@ -313,6 +356,9 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \ttxq->stats.tx_pkts += nb_pkts;\n \ttxq->stats.tx_bytes += tx_bytes;\n \n+\tfor (i = 0; i < nb_pkts; i++)\n+\t\trte_pktmbuf_free(bufs[i]);\n+\n \treturn nb_pkts;\n }\n \n@@ -640,6 +686,10 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,\n \t}\n \n \tinternals->umem = rxq->umem;\n+\tinternals->umem->pmd_zc = internals->pmd_zc;\n+\n+\tif (internals->umem->pmd_zc)\n+\t\tAF_XDP_LOG(INFO, \"Zero copy between umem and mbuf enabled.\\n\");\n \n \tdev->data->rx_queues[rx_queue_id] = rxq;\n \treturn 0;\n@@ -775,9 +825,8 @@ parse_name_arg(const char *key __rte_unused,\n }\n \n static int\n-parse_parameters(struct rte_kvargs *kvlist,\n-\t\t char *if_name,\n-\t\t int *queue_idx)\n+parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *queue_idx,\n+\t\t\t\tint *pmd_zc)\n {\n \tint ret;\n \n@@ -791,6 +840,11 @@ parse_parameters(struct rte_kvargs *kvlist,\n \tif (ret < 0)\n \t\tgoto free_kvlist;\n \n+\tret = rte_kvargs_process(kvlist, ETH_AF_XDP_PMD_ZC_ARG,\n+\t\t\t\t &parse_integer_arg, pmd_zc);\n+\tif (ret < 0)\n+\t\tgoto free_kvlist;\n+\n free_kvlist:\n \trte_kvargs_free(kvlist);\n \treturn ret;\n@@ -827,9 +881,8 @@ get_iface_info(const char *if_name,\n }\n \n static struct rte_eth_dev *\n-init_internals(struct rte_vdev_device *dev,\n-\t       const char *if_name,\n-\t       int queue_idx)\n+init_internals(struct rte_vdev_device *dev, const char *if_name, int queue_idx,\n+\t\t\t\t\tint pmd_zc)\n {\n \tconst char *name = rte_vdev_device_name(dev);\n \tconst unsigned int numa_node = dev->device.numa_node;\n@@ -843,6 +896,7 @@ init_internals(struct rte_vdev_device *dev,\n \t\treturn NULL;\n \n \tinternals->queue_idx = queue_idx;\n+\tinternals->pmd_zc = pmd_zc;\n \tstrlcpy(internals->if_name, if_name, IFNAMSIZ);\n \n \tfor (i = 0; i < ETH_AF_XDP_MAX_QUEUE_PAIRS; i++) {\n@@ -883,6 +937,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)\n \tint xsk_queue_idx = ETH_AF_XDP_DFLT_QUEUE_IDX;\n \tstruct rte_eth_dev *eth_dev = NULL;\n \tconst char *name;\n+\tint pmd_zc = 0;\n \n \tAF_XDP_LOG(INFO, \"Initializing pmd_af_xdp for %s\\n\",\n \t\trte_vdev_device_name(dev));\n@@ -909,7 +964,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)\n \tif (dev->device.numa_node == SOCKET_ID_ANY)\n \t\tdev->device.numa_node = rte_socket_id();\n \n-\tif (parse_parameters(kvlist, if_name, &xsk_queue_idx) < 0) {\n+\tif (parse_parameters(kvlist, if_name, &xsk_queue_idx, &pmd_zc) < 0) {\n \t\tAF_XDP_LOG(ERR, \"Invalid kvargs value\\n\");\n \t\treturn -EINVAL;\n \t}\n@@ -919,7 +974,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)\n \t\treturn -EINVAL;\n \t}\n \n-\teth_dev = init_internals(dev, if_name, xsk_queue_idx);\n+\teth_dev = init_internals(dev, if_name, xsk_queue_idx, pmd_zc);\n \tif (eth_dev == NULL) {\n \t\tAF_XDP_LOG(ERR, \"Failed to init internals\\n\");\n \t\treturn -1;\n@@ -961,7 +1016,8 @@ static struct rte_vdev_driver pmd_af_xdp_drv = {\n RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv);\n RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,\n \t\t\t      \"iface=<string> \"\n-\t\t\t      \"queue=<int> \");\n+\t\t\t      \"queue=<int> \"\n+\t\t\t      \"pmd_zero_copy=<0|1>\");\n \n RTE_INIT(af_xdp_init_log)\n {\n",
    "prefixes": [
        "v1",
        "1/3"
    ]
}