get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/51332/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 51332,
    "url": "https://patches.dpdk.org/api/patches/51332/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20190319071256.26302-5-xiaolong.ye@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190319071256.26302-5-xiaolong.ye@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190319071256.26302-5-xiaolong.ye@intel.com",
    "date": "2019-03-19T07:12:54",
    "name": "[v2,4/6] net/af_xdp: use mbuf mempool for buffer management",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "8c67dc6df42237e72bd96214e9cc66e65b708a65",
    "submitter": {
        "id": 1120,
        "url": "https://patches.dpdk.org/api/people/1120/?format=api",
        "name": "Xiaolong Ye",
        "email": "xiaolong.ye@intel.com"
    },
    "delegate": {
        "id": 319,
        "url": "https://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20190319071256.26302-5-xiaolong.ye@intel.com/mbox/",
    "series": [
        {
            "id": 3796,
            "url": "https://patches.dpdk.org/api/series/3796/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=3796",
            "date": "2019-03-19T07:12:50",
            "name": "Introduce AF_XDP PMD",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/3796/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/51332/comments/",
    "check": "fail",
    "checks": "https://patches.dpdk.org/api/patches/51332/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id BC8C04CB5;\n\tTue, 19 Mar 2019 08:17:11 +0100 (CET)",
            "from mga06.intel.com (mga06.intel.com [134.134.136.31])\n\tby dpdk.org (Postfix) with ESMTP id 26D99493D\n\tfor <dev@dpdk.org>; Tue, 19 Mar 2019 08:17:00 +0100 (CET)",
            "from fmsmga005.fm.intel.com ([10.253.24.32])\n\tby orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t19 Mar 2019 00:16:59 -0700",
            "from yexl-server.sh.intel.com ([10.67.110.206])\n\tby fmsmga005.fm.intel.com with ESMTP; 19 Mar 2019 00:16:58 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.58,496,1544515200\"; d=\"scan'208\";a=\"329889964\"",
        "From": "Xiaolong Ye <xiaolong.ye@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "Qi Zhang <qi.z.zhang@intel.com>,\n\tKarlsson Magnus <magnus.karlsson@intel.com>,\n\tTopel Bjorn <bjorn.topel@intel.com>, Xiaolong Ye <xiaolong.ye@intel.com>",
        "Date": "Tue, 19 Mar 2019 15:12:54 +0800",
        "Message-Id": "<20190319071256.26302-5-xiaolong.ye@intel.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20190319071256.26302-1-xiaolong.ye@intel.com>",
        "References": "<20190301080947.91086-1-xiaolong.ye@intel.com>\n\t<20190319071256.26302-1-xiaolong.ye@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v2 4/6] net/af_xdp: use mbuf mempool for buffer\n\tmanagement",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Now, af_xdp registered memory buffer is managed by rte_mempool. mbuf be\nallocated from rte_mempool can be convert to xdp_desc's address and vice\nversa.\n\nSigned-off-by: Xiaolong Ye <xiaolong.ye@intel.com>\n---\n drivers/net/af_xdp/rte_eth_af_xdp.c | 127 +++++++++++++++++-----------\n 1 file changed, 78 insertions(+), 49 deletions(-)",
    "diff": "diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c\nindex 96dedc0c4..fc60cb5c5 100644\n--- a/drivers/net/af_xdp/rte_eth_af_xdp.c\n+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c\n@@ -43,7 +43,11 @@\n \n #define ETH_AF_XDP_FRAME_SIZE\t\tXSK_UMEM__DEFAULT_FRAME_SIZE\n #define ETH_AF_XDP_NUM_BUFFERS\t\t4096\n-#define ETH_AF_XDP_DATA_HEADROOM\t0\n+/* mempool hdrobj size (64 bytes) + sizeof(struct rte_mbuf) (128 bytes) */\n+#define ETH_AF_XDP_MBUF_OVERHEAD\t192\n+/* data start from offset 320 (192 + 128) bytes */\n+#define ETH_AF_XDP_DATA_HEADROOM\t\t\t\t\\\n+\t(ETH_AF_XDP_MBUF_OVERHEAD + RTE_PKTMBUF_HEADROOM)\n #define ETH_AF_XDP_DFLT_NUM_DESCS\tXSK_RING_CONS__DEFAULT_NUM_DESCS\n #define ETH_AF_XDP_DFLT_QUEUE_IDX\t0\n \n@@ -56,7 +60,7 @@ struct xsk_umem_info {\n \tstruct xsk_ring_prod fq;\n \tstruct xsk_ring_cons cq;\n \tstruct xsk_umem *umem;\n-\tstruct rte_ring *buf_ring;\n+\tstruct rte_mempool *mb_pool;\n \tvoid *buffer;\n };\n \n@@ -110,12 +114,32 @@ static struct rte_eth_link pmd_link = {\n \t.link_autoneg = ETH_LINK_AUTONEG\n };\n \n+static inline struct rte_mbuf *\n+addr_to_mbuf(struct xsk_umem_info *umem, uint64_t addr)\n+{\n+\tuint64_t offset = (addr / ETH_AF_XDP_FRAME_SIZE *\n+\t\t\tETH_AF_XDP_FRAME_SIZE);\n+\tstruct rte_mbuf *mbuf = (struct rte_mbuf *)((uint64_t)umem->buffer +\n+\t\t\t\t    offset + ETH_AF_XDP_MBUF_OVERHEAD -\n+\t\t\t\t    sizeof(struct rte_mbuf));\n+\tmbuf->data_off = addr - offset - ETH_AF_XDP_MBUF_OVERHEAD;\n+\treturn mbuf;\n+}\n+\n+static inline uint64_t\n+mbuf_to_addr(struct xsk_umem_info *umem, struct rte_mbuf *mbuf)\n+{\n+\treturn (uint64_t)mbuf->buf_addr + mbuf->data_off -\n+\t\t(uint64_t)umem->buffer;\n+}\n+\n static inline int\n reserve_fill_queue(struct xsk_umem_info *umem, int reserve_size)\n {\n \tstruct xsk_ring_prod *fq = &umem->fq;\n+\tstruct rte_mbuf *mbuf;\n \tuint32_t idx;\n-\tvoid *addr = NULL;\n+\tuint64_t addr;\n \tint i, ret = 0;\n \n \tret = xsk_ring_prod__reserve(fq, reserve_size, &idx);\n@@ -125,11 +149,14 @@ reserve_fill_queue(struct xsk_umem_info *umem, int reserve_size)\n \t}\n \n \tfor (i = 0; i < reserve_size; i++) {\n-\t\trte_ring_dequeue(umem->buf_ring, &addr);\n-\t\t*xsk_ring_prod__fill_addr(fq, idx++) = (uint64_t)addr;\n+\t\tmbuf = rte_pktmbuf_alloc(umem->mb_pool);\n+\t\tif (!mbuf)\n+\t\t\tbreak;\n+\t\taddr = mbuf_to_addr(umem, mbuf);\n+\t\t*xsk_ring_prod__fill_addr(fq, idx++) = addr;\n \t}\n \n-\txsk_ring_prod__submit(fq, reserve_size);\n+\txsk_ring_prod__submit(fq, i);\n \n \treturn 0;\n }\n@@ -174,7 +201,7 @@ eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \t\t} else {\n \t\t\tdropped++;\n \t\t}\n-\t\trte_ring_enqueue(umem->buf_ring, (void *)addr);\n+\t\trte_pktmbuf_free(addr_to_mbuf(umem, addr));\n \t}\n \n \txsk_ring_cons__release(rx, rcvd);\n@@ -197,9 +224,8 @@ static void pull_umem_cq(struct xsk_umem_info *umem, int size)\n \tn = xsk_ring_cons__peek(cq, size, &idx_cq);\n \tif (n > 0) {\n \t\tfor (i = 0; i < n; i++) {\n-\t\t\taddr = *xsk_ring_cons__comp_addr(cq,\n-\t\t\t\t\t\t\t idx_cq++);\n-\t\t\trte_ring_enqueue(umem->buf_ring, (void *)addr);\n+\t\t\taddr = *xsk_ring_cons__comp_addr(cq, idx_cq++);\n+\t\t\trte_pktmbuf_free(addr_to_mbuf(umem, addr));\n \t\t}\n \n \t\txsk_ring_cons__release(cq, n);\n@@ -236,7 +262,7 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \tstruct pkt_tx_queue *txq = queue;\n \tstruct xsk_umem_info *umem = txq->pair->umem;\n \tstruct rte_mbuf *mbuf;\n-\tvoid *addrs[ETH_AF_XDP_TX_BATCH_SIZE];\n+\tstruct rte_mbuf *mbuf_to_tx;\n \tunsigned long tx_bytes = 0;\n \tint i, valid = 0;\n \tuint32_t idx_tx;\n@@ -246,10 +272,6 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \n \tpull_umem_cq(umem, nb_pkts);\n \n-\tnb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,\n-\t\t\t\t\tnb_pkts, NULL);\n-\tif (!nb_pkts)\n-\t\treturn 0;\n \n \tif (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) {\n \t\tkick_tx(txq);\n@@ -264,7 +286,12 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \t\tdesc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);\n \t\tmbuf = bufs[i];\n \t\tif (mbuf->pkt_len <= buf_len) {\n-\t\t\tdesc->addr = (uint64_t)addrs[valid];\n+\t\t\tmbuf_to_tx = rte_pktmbuf_alloc(umem->mb_pool);\n+\t\t\tif (!mbuf_to_tx) {\n+\t\t\t\trte_pktmbuf_free(mbuf);\n+\t\t\t\tcontinue;\n+\t\t\t}\n+\t\t\tdesc->addr = mbuf_to_addr(umem, mbuf_to_tx);\n \t\t\tdesc->len = mbuf->pkt_len;\n \t\t\tpkt = xsk_umem__get_data(umem->buffer,\n \t\t\t\t\t\t desc->addr);\n@@ -280,10 +307,6 @@ eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)\n \n \tkick_tx(txq);\n \n-\tif (valid < nb_pkts)\n-\t\trte_ring_enqueue_bulk(umem->buf_ring, &addrs[valid],\n-\t\t\t\t nb_pkts - valid, NULL);\n-\n \ttxq->err_pkts += nb_pkts - valid;\n \ttxq->tx_pkts += valid;\n \ttxq->tx_bytes += tx_bytes;\n@@ -433,16 +456,29 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused,\n \n static void xdp_umem_destroy(struct xsk_umem_info *umem)\n {\n-\tfree(umem->buffer);\n-\tumem->buffer = NULL;\n-\n-\trte_ring_free(umem->buf_ring);\n-\tumem->buf_ring = NULL;\n+\trte_mempool_free(umem->mb_pool);\n+\tumem->mb_pool = NULL;\n \n \tfree(umem);\n \tumem = NULL;\n }\n \n+static inline uint64_t get_base_addr(struct rte_mempool *mp)\n+{\n+\tstruct rte_mempool_memhdr *memhdr;\n+\n+\tmemhdr = STAILQ_FIRST(&mp->mem_list);\n+\treturn (uint64_t)(memhdr->addr);\n+}\n+\n+static inline uint64_t get_len(struct rte_mempool *mp)\n+{\n+\tstruct rte_mempool_memhdr *memhdr;\n+\n+\tmemhdr = STAILQ_FIRST(&mp->mem_list);\n+\treturn (uint64_t)(memhdr->len);\n+}\n+\n static struct xsk_umem_info *xdp_umem_configure(void)\n {\n \tstruct xsk_umem_info *umem;\n@@ -451,10 +487,9 @@ static struct xsk_umem_info *xdp_umem_configure(void)\n \t\t.comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,\n \t\t.frame_size = ETH_AF_XDP_FRAME_SIZE,\n \t\t.frame_headroom = ETH_AF_XDP_DATA_HEADROOM };\n-\tvoid *bufs = NULL;\n-\tchar ring_name[0x100];\n+\tvoid *base_addr = NULL;\n+\tchar pool_name[0x100];\n \tint ret;\n-\tuint64_t i;\n \n \tumem = calloc(1, sizeof(*umem));\n \tif (!umem) {\n@@ -462,28 +497,23 @@ static struct xsk_umem_info *xdp_umem_configure(void)\n \t\treturn NULL;\n \t}\n \n-\tsnprintf(ring_name, 0x100, \"af_xdp_ring\");\n-\tumem->buf_ring = rte_ring_create(ring_name,\n-\t\t\t\t\t ETH_AF_XDP_NUM_BUFFERS,\n-\t\t\t\t\t SOCKET_ID_ANY,\n-\t\t\t\t\t 0x0);\n-\tif (!umem->buf_ring) {\n+\tsnprintf(pool_name, 0x100, \"af_xdp_ring\");\n+\tumem->mb_pool = rte_pktmbuf_pool_create_with_flags(pool_name,\n+\t\t\tETH_AF_XDP_NUM_BUFFERS,\n+\t\t\t250, 0,\n+\t\t\tETH_AF_XDP_FRAME_SIZE -\n+\t\t\tETH_AF_XDP_MBUF_OVERHEAD,\n+\t\t\tMEMPOOL_F_NO_SPREAD | MEMPOOL_F_PAGE_ALIGN,\n+\t\t\tSOCKET_ID_ANY);\n+\n+\tif (!umem->mb_pool || umem->mb_pool->nb_mem_chunks != 1) {\n \t\tRTE_LOG(ERR, AF_XDP,\n-\t\t\t\"Failed to create rte_ring\\n\");\n+\t\t\t\"Failed to create rte_mempool\\n\");\n \t\tgoto err;\n \t}\n+\tbase_addr = (void *)get_base_addr(umem->mb_pool);\n \n-\tfor (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++)\n-\t\trte_ring_enqueue(umem->buf_ring,\n-\t\t\t\t (void *)(i * ETH_AF_XDP_FRAME_SIZE +\n-\t\t\t\t\t  ETH_AF_XDP_DATA_HEADROOM));\n-\n-\tif (posix_memalign(&bufs, getpagesize(),\n-\t\t\t   ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE)) {\n-\t\tRTE_LOG(ERR, AF_XDP, \"Failed to allocate memory pool.\\n\");\n-\t\tgoto err;\n-\t}\n-\tret = xsk_umem__create(&umem->umem, bufs,\n+\tret = xsk_umem__create(&umem->umem, base_addr,\n \t\t\t       ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,\n \t\t\t       &umem->fq, &umem->cq,\n \t\t\t       &usr_config);\n@@ -492,7 +522,7 @@ static struct xsk_umem_info *xdp_umem_configure(void)\n \t\tRTE_LOG(ERR, AF_XDP, \"Failed to create umem\");\n \t\tgoto err;\n \t}\n-\tumem->buffer = bufs;\n+\tumem->buffer = base_addr;\n \n \treturn umem;\n \n@@ -909,8 +939,7 @@ rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)\n \n \tinternals = eth_dev->data->dev_private;\n \n-\trte_ring_free(internals->umem->buf_ring);\n-\trte_free(internals->umem->buffer);\n+\trte_mempool_free(internals->umem->mb_pool);\n \trte_free(internals->umem);\n \n \trte_eth_dev_release_port(eth_dev);\n",
    "prefixes": [
        "v2",
        "4/6"
    ]
}