get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/84986/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 84986,
    "url": "https://patches.dpdk.org/api/patches/84986/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1607478454-23218-1-git-send-email-dheemanthm@vmware.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1607478454-23218-1-git-send-email-dheemanthm@vmware.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1607478454-23218-1-git-send-email-dheemanthm@vmware.com",
    "date": "2020-12-09T01:47:34",
    "name": "[v1,2/2] linux/kni: Added support for KNI multiple fifos",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "16a4eff802d6ced58a0de4e5bacef65a54ab218e",
    "submitter": {
        "id": 2062,
        "url": "https://patches.dpdk.org/api/people/2062/?format=api",
        "name": "Dheemanth Mallikarjun",
        "email": "dheemanthm@vmware.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1607478454-23218-1-git-send-email-dheemanthm@vmware.com/mbox/",
    "series": [
        {
            "id": 14242,
            "url": "https://patches.dpdk.org/api/series/14242/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=14242",
            "date": "2020-12-09T01:47:34",
            "name": null,
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/14242/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/84986/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/84986/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id F01AEA04DB;\n\tThu, 10 Dec 2020 17:20:47 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 7B310C98C;\n\tThu, 10 Dec 2020 17:20:46 +0100 (CET)",
            "from EX13-EDG-OU-001.vmware.com (ex13-edg-ou-001.vmware.com\n [208.91.0.189]) by dpdk.org (Postfix) with ESMTP id E06C22B8B\n for <dev@dpdk.org>; Wed,  9 Dec 2020 02:47:45 +0100 (CET)",
            "from sc9-mailhost1.vmware.com (10.113.161.71) by\n EX13-EDG-OU-001.vmware.com (10.113.208.155) with Microsoft SMTP Server id\n 15.0.1156.6; Tue, 8 Dec 2020 17:47:39 -0800",
            "from nsx.eng.vmware.com. (unknown [10.173.12.65])\n by sc9-mailhost1.vmware.com (Postfix) with ESMTP id DAC752018F\n for <dev@dpdk.org>; Tue,  8 Dec 2020 17:47:43 -0800 (PST)"
        ],
        "From": "dheemanth <dheemanthm@vmware.com>",
        "To": "<dev@dpdk.org>",
        "Date": "Wed, 9 Dec 2020 01:47:34 +0000",
        "Message-ID": "<1607478454-23218-1-git-send-email-dheemanthm@vmware.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "Received-SPF": "None (EX13-EDG-OU-001.vmware.com: dheemanthm@vmware.com does not\n designate permitted sender hosts)",
        "X-Mailman-Approved-At": "Thu, 10 Dec 2020 17:20:44 +0100",
        "Subject": "[dpdk-dev] [PATCH v1 2/2] linux/kni: Added support for KNI multiple\n\tfifos",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "In order to improve performance, the KNI is made to\nsupport multiple fifos, So that multiple threads pinned\nto multiple cores can process packets in parallel.\n\nSigned-off-by: dheemanth <dheemanthm@vmware.com>\n---\n app/test/test_kni.c             |   4 +-\n drivers/net/kni/rte_eth_kni.c   |   5 +-\n examples/kni/main.c             |   4 +-\n kernel/linux/kni/kni_dev.h      |  11 +-\n kernel/linux/kni/kni_fifo.h     | 192 ++++++++++++++++++++++++++++++-----\n kernel/linux/kni/kni_misc.c     | 189 +++++++++++++++++++++--------------\n kernel/linux/kni/kni_net.c      |  88 ++++++++++------\n lib/librte_kni/rte_kni.c        | 216 ++++++++++++++++++++++++++--------------\n lib/librte_kni/rte_kni.h        |  11 +-\n lib/librte_kni/rte_kni_common.h |  10 +-\n lib/librte_port/rte_port_kni.c  |  12 +--\n 11 files changed, 515 insertions(+), 227 deletions(-)",
    "diff": "diff --git a/app/test/test_kni.c b/app/test/test_kni.c\nindex f53a53e..9bbceab 100644\n--- a/app/test/test_kni.c\n+++ b/app/test/test_kni.c\n@@ -245,7 +245,7 @@ test_kni_loop(__rte_unused void *arg)\n \t\t\t}\n \n \t\t\tnum = rte_kni_tx_burst(test_kni_ctx, pkts_burst,\n-\t\t\t\t\t\t\t\tnb_rx);\n+\t\t\t\t\t\tnb_rx, 0);\n \t\t\tstats.ingress += num;\n \t\t\trte_kni_handle_request(test_kni_ctx);\n \t\t\tif (num < nb_rx) {\n@@ -260,7 +260,7 @@ test_kni_loop(__rte_unused void *arg)\n \t\t\tif (test_kni_processing_flag)\n \t\t\t\tbreak;\n \t\t\tnum = rte_kni_rx_burst(test_kni_ctx, pkts_burst,\n-\t\t\t\t\t\t\tPKT_BURST_SZ);\n+\t\t\t\t\t\tPKT_BURST_SZ, 0);\n \t\t\tstats.egress += num;\n \t\t\tfor (nb_tx = 0; nb_tx < num; nb_tx++)\n \t\t\t\trte_pktmbuf_free(pkts_burst[nb_tx]);\ndiff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c\nindex 1696787..55711c5 100644\n--- a/drivers/net/kni/rte_eth_kni.c\n+++ b/drivers/net/kni/rte_eth_kni.c\n@@ -81,7 +81,7 @@ eth_kni_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)\n \tuint16_t nb_pkts;\n \tint i;\n \n-\tnb_pkts = rte_kni_rx_burst(kni, bufs, nb_bufs);\n+\tnb_pkts = rte_kni_rx_burst(kni, bufs, nb_bufs, 0);\n \tfor (i = 0; i < nb_pkts; i++)\n \t\tbufs[i]->port = kni_q->internals->port_id;\n \n@@ -97,7 +97,7 @@ eth_kni_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)\n \tstruct rte_kni *kni = kni_q->internals->kni;\n \tuint16_t nb_pkts;\n \n-\tnb_pkts =  rte_kni_tx_burst(kni, bufs, nb_bufs);\n+\tnb_pkts =  rte_kni_tx_burst(kni, bufs, nb_bufs, 0);\n \n \tkni_q->tx.pkts += nb_pkts;\n \n@@ -129,6 +129,7 @@ eth_kni_start(struct rte_eth_dev *dev)\n \n \tmb_pool = internals->rx_queues[0].mb_pool;\n \tstrlcpy(conf.name, name, RTE_KNI_NAMESIZE);\n+\tmemset(&conf, 0, sizeof(conf));\n \tconf.force_bind = 0;\n \tconf.group_id = port_id;\n \tconf.mbuf_size =\ndiff --git a/examples/kni/main.c b/examples/kni/main.c\nindex fe93b86..a34bf1a 100644\n--- a/examples/kni/main.c\n+++ b/examples/kni/main.c\n@@ -229,7 +229,7 @@ kni_ingress(struct kni_port_params *p)\n \t\t\treturn;\n \t\t}\n \t\t/* Burst tx to kni */\n-\t\tnum = rte_kni_tx_burst(p->kni[i], pkts_burst, nb_rx);\n+\t\tnum = rte_kni_tx_burst(p->kni[i], pkts_burst, nb_rx, 0);\n \t\tif (num)\n \t\t\tkni_stats[port_id].rx_packets += num;\n \n@@ -261,7 +261,7 @@ kni_egress(struct kni_port_params *p)\n \tport_id = p->port_id;\n \tfor (i = 0; i < nb_kni; i++) {\n \t\t/* Burst rx from kni */\n-\t\tnum = rte_kni_rx_burst(p->kni[i], pkts_burst, PKT_BURST_SZ);\n+\t\tnum = rte_kni_rx_burst(p->kni[i], pkts_burst, PKT_BURST_SZ, 0);\n \t\tif (unlikely(num > PKT_BURST_SZ)) {\n \t\t\tRTE_LOG(ERR, APP, \"Error receiving from KNI\\n\");\n \t\t\treturn;\ndiff --git a/kernel/linux/kni/kni_dev.h b/kernel/linux/kni/kni_dev.h\nindex c15da311..f782ec1 100644\n--- a/kernel/linux/kni/kni_dev.h\n+++ b/kernel/linux/kni/kni_dev.h\n@@ -55,16 +55,16 @@ struct kni_dev {\n \tstruct net_device *net_dev;\n \n \t/* queue for packets to be sent out */\n-\tstruct rte_kni_fifo *tx_q;\n+\tstruct rte_kni_fifo *tx_q[RTE_MAX_LCORE];\n \n \t/* queue for the packets received */\n-\tstruct rte_kni_fifo *rx_q;\n+\tstruct rte_kni_fifo *rx_q[RTE_MAX_LCORE];\n \n \t/* queue for the allocated mbufs those can be used to save sk buffs */\n-\tstruct rte_kni_fifo *alloc_q;\n+\tstruct rte_kni_fifo *alloc_q[RTE_MAX_LCORE];\n \n \t/* free queue for the mbufs to be freed */\n-\tstruct rte_kni_fifo *free_q;\n+\tstruct rte_kni_fifo *free_q[RTE_MAX_LCORE];\n \n \t/* request queue */\n \tstruct rte_kni_fifo *req_q;\n@@ -87,6 +87,9 @@ struct kni_dev {\n \tvoid *alloc_pa[MBUF_BURST_SZ];\n \tvoid *alloc_va[MBUF_BURST_SZ];\n \n+\tunsigned int queues_num;\n+\tunsigned int fifos_num;\n+\n \tstruct task_struct *usr_tsk;\n };\n \ndiff --git a/kernel/linux/kni/kni_fifo.h b/kernel/linux/kni/kni_fifo.h\nindex 5c91b55..f62374a 100644\n--- a/kernel/linux/kni/kni_fifo.h\n+++ b/kernel/linux/kni/kni_fifo.h\n@@ -18,48 +18,186 @@\n \n /**\n  * Adds num elements into the fifo. Return the number actually written\n+ * Multiple-producer safe based on  __rte_ring_mp_do_enqueue().\n  */\n-static inline uint32_t\n-kni_fifo_put(struct rte_kni_fifo *fifo, void **data, uint32_t num)\n+\n+static inline unsigned\n+kni_mp_fifo_put(struct rte_kni_fifo *fifo, void **data, unsigned int n)\n {\n-\tuint32_t i = 0;\n-\tuint32_t fifo_write = fifo->write;\n-\tuint32_t fifo_read = smp_load_acquire(&fifo->read);\n-\tuint32_t new_write = fifo_write;\n+\tunsigned int fifo_write, new_write;\n+\tunsigned int fifo_read, free_entries;\n+\tconst unsigned int max = n;\n+\tint success = 0;\n+\tunsigned int i;\n+\tconst unsigned int mask = (fifo->len) - 1;\n+\tunsigned int idx;\n+\n+\tif (unlikely(n == 0))\n+\t\treturn 0;\n+\n+\t/* Move fifo->write.head atomically. */\n+\tdo {\n+\t\t/* Reset n to the initial burst count. */\n+\t\tn = max;\n+\n+\t\tfifo_write = fifo->write;\n+\t\tfifo_read = fifo->read;\n+\n+\t\t/* The subtraction is done between two unsigned 32bits value\n+\t\t * (the result is always modulo 32 bits even if we have\n+\t\t * fifo_write > fifo_read). So 'free_entries' is always\n+\t\t * between 0 and fifo->len-1. \n+\t\t */\n+\t\tfree_entries = mask + fifo_read - fifo_write;\n \n-\tfor (i = 0; i < num; i++) {\n-\t\tnew_write = (new_write + 1) & (fifo->len - 1);\n+\t\t/* Check that we have enough room in fifo. */\n+\t\tif (unlikely(n > free_entries)) {\n+\t\t\tif (unlikely(free_entries == 0)) \n+\t\t\t\treturn 0;\n+\t\t\tn = free_entries;\n+\t\t}\n \n-\t\tif (new_write == fifo_read)\n-\t\t\tbreak;\n-\t\tfifo->buffer[fifo_write] = data[i];\n-\t\tfifo_write = new_write;\n+\t\tnew_write = fifo_write + n;\n+\t\tif (cmpxchg(&fifo->write, fifo_write, new_write) == fifo_write)\n+\t\t\tsuccess = 1;\n+\n+\t } while (unlikely(success == 0));\n+        \n+\t/* Write entries in fifo. */\n+\tidx = fifo_write & mask;\n+\tif (likely(idx + n < fifo->len)) {\n+\t\tfor (i = 0; i < (n & ((~(unsigned int)0x3))); i += 4, idx += 4) {\n+\t\t\tfifo->buffer[idx] = data[i];\n+\t\t\tfifo->buffer[idx + 1] = data[i + 1];\n+\t\t\tfifo->buffer[idx + 2] = data[i + 2]; \n+\t\t\tfifo->buffer[idx + 3] = data[i + 3]; \n+\t\t}\n+\t\tswitch (n & 0x3) {\n+\t\t\tcase 3:\n+\t\t\t\tfifo->buffer[idx++] = data[i++];\n+\t\t\tcase 2:\n+\t\t\t\tfifo->buffer[idx++] = data[i++];\n+\t\t\tcase 1:\n+\t\t\t\tfifo->buffer[idx++] = data[i++];\n+\t\t }       \n+\t} else {\n+\t\tfor (i = 0; i < n; i++)\n+\t\t\tfifo->buffer[(fifo_write + i) & mask] = data[i];\n \t}\n-\tsmp_store_release(&fifo->write, fifo_write);\n+        \n+\t/* barrier required to have ordered value for fifo write and read */\n+\tmb();\n \n-\treturn i;\n+\t/* If there are other enqueues in progress that preceded us,\n+\t * we need to wait for them to complete.\n+\t */\n+\twhile (unlikely(fifo->write != fifo_write))\n+\t\tcpu_relax();\n+                \n+\tfifo->write = new_write;\n+\treturn n;\n }\n \n /**\n- * Get up to num elements from the fifo. Return the number actully read\n+ * Adds num elements into the fifo. Return the number actually written\n  */\n static inline uint32_t\n-kni_fifo_get(struct rte_kni_fifo *fifo, void **data, uint32_t num)\n+kni_fifo_put(struct rte_kni_fifo *fifo, void **data, uint32_t num)\n {\n-\tuint32_t i = 0;\n-\tuint32_t new_read = fifo->read;\n-\tuint32_t fifo_write = smp_load_acquire(&fifo->write);\n+\treturn kni_mp_fifo_put(fifo, data, num);\n+}\n+\n+/**\n+ * Get up to num elements from the fifo. Return the number actually read.\n+ *\n+ * Multiple-consumer safe based on __rte_ring_mc_do_dequeue().\n+ */\n+static inline uint32_t\n+kni_mc_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned int n)\n+{       \n+\tunsigned int fifo_read, fifo_write;\n+\tunsigned int new_read, entries;\n+\tconst unsigned int max = n;\n+\tint success = 0;\n+\tunsigned int i;\n+\tunsigned int mask = (fifo->len) - 1;\n+\tunsigned int idx;\n+       \n+\tif (unlikely(n == 0))\n+\t\treturn 0;\n+        \n+\t/* Move fifo->read.head atomically. */\n+\tdo {    \n+\t\t/* Restore n as it may change every loop. */\n+\t\tn = max;\n+                \n+\t\tfifo_read = fifo->read;\n+\t\tfifo_write = fifo->write;\n+                \n+\t\t/* The subtraction is done between two unsigned 32bits value\n+\t\t * (the result is always modulo 32 bits even if we have\n+\t\t * fifo_read > fifo_write). So 'entries' is always between 0\n+\t\t * and fifo->len-1.\n+\t\t */ \n+\t\tentries = fifo_write - fifo_read;\n \n-\tfor (i = 0; i < num; i++) {\n-\t\tif (new_read == fifo_write)\n-\t\t\tbreak;\n+\t\t/* Set the actual entries for dequeue. */\n+\t\tif (n > entries) {\n+\t\t\tif (unlikely(entries == 0))\n+\t\t\t\treturn 0;\n+\t\t\tn = entries;\n+\t\t}\n \n-\t\tdata[i] = fifo->buffer[new_read];\n-\t\tnew_read = (new_read + 1) & (fifo->len - 1);\n-\t}\n-\tsmp_store_release(&fifo->read, new_read);\n+\t\tnew_read = fifo_read + n;\n+\t\tif (cmpxchg(&fifo->read, fifo_read, new_read) == fifo_read)\n+\t\t\tsuccess = 1;\n \n-\treturn i;\n+\t} while (unlikely(success == 0));\n+\n+\t/* Copy entries from fifo. */\n+\tidx = fifo_read & mask;\n+\tif (likely(idx + n < fifo->len)) {\n+\t\tfor (i = 0; i < (n & ((~(unsigned int)0x3))); i += 4, idx += 4) {\n+\t\t\tdata[i] = fifo->buffer[idx];\n+\t\t\tdata[i + 1] = fifo->buffer[idx + 1];\n+\t\t\tdata[i + 2] = fifo->buffer[idx + 2];\n+\t\t\tdata[i + 3] = fifo->buffer[idx + 3];\n+\t\t}\n+\t\tswitch (n & 0x3) {\n+\t\t\tcase 3:\n+\t\t\t\tdata[i++] = fifo->buffer[idx++];\n+\t\t\tcase 2:\n+\t\t\t\tdata[i++] = fifo->buffer[idx++];\n+\t\t\tcase 1:\n+\t\t\t\tdata[i++] = fifo->buffer[idx++];\n+\t\t}\n+\t } else {\n+\t\tfor (i = 0; i < n; i++)\n+\t\t\tdata[i] = fifo->buffer[(fifo_read + i) & mask];\n+        }\n+\n+\t/* barrier required to have ordered value for fifo write and read */\n+        mb();\n+\n+\t/*\n+\t * If there are other dequeues in progress that preceded us,\n+\t * we need to wait for them to complete.\n+\t */\n+\twhile (unlikely(fifo->read != fifo_read))\n+\t\tcpu_relax();\n+                \n+\tfifo->read = new_read;\n+\treturn n;\n+}\n+\n+\n+/**\n+ * Get up to num elements from the fifo. Return the number actually read\n+ */\n+static inline uint32_t\n+kni_fifo_get(struct rte_kni_fifo *fifo, void **data, uint32_t num)\n+{\n+\treturn kni_mc_fifo_get(fifo, data, num);\n }\n \n /**\ndiff --git a/kernel/linux/kni/kni_misc.c b/kernel/linux/kni/kni_misc.c\nindex 2b464c4..7080b2c 100644\n--- a/kernel/linux/kni/kni_misc.c\n+++ b/kernel/linux/kni/kni_misc.c\n@@ -292,51 +292,69 @@ kni_ioctl_create(struct net *net, uint32_t ioctl_num,\n {\n \tstruct kni_net *knet = net_generic(net, kni_net_id);\n \tint ret;\n-\tstruct rte_kni_device_info dev_info;\n+\tunsigned int i, tx_queues_num;\n+\tstruct rte_kni_device_info *dev_info;\n \tstruct net_device *net_dev = NULL;\n \tstruct kni_dev *kni, *dev, *n;\n \n \tpr_info(\"Creating kni...\\n\");\n+\n+\t/* allocate dev_info from stack to avoid Wframe-larger-than=1024\n+\t * compile error.\n+\t */\n+\tdev_info = kzalloc(sizeof(struct rte_kni_device_info), GFP_KERNEL);\n+\tif (!dev_info) {\n+\t\treturn -ENOMEM;\n+\t}\n \t/* Check the buffer size, to avoid warning */\n-\tif (_IOC_SIZE(ioctl_num) > sizeof(dev_info))\n-\t\treturn -EINVAL;\n+\tif (_IOC_SIZE(ioctl_num) > sizeof(*dev_info)) {\n+\t\tret = -EINVAL;\n+\t\tgoto out;\n+\t}\n \n \t/* Copy kni info from user space */\n-\tif (copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info)))\n-\t\treturn -EFAULT;\n+\tif (copy_from_user(dev_info, (void *)ioctl_param, sizeof(*dev_info))) {\n+\t\tret = -EFAULT;\n+\t\tgoto out;\n+\t}\n \n \t/* Check if name is zero-ended */\n-\tif (strnlen(dev_info.name, sizeof(dev_info.name)) == sizeof(dev_info.name)) {\n+\tif (strnlen(dev_info->name, sizeof(dev_info->name)) == sizeof(dev_info->name)) {\n \t\tpr_err(\"kni.name not zero-terminated\");\n-\t\treturn -EINVAL;\n+\t\tret = -EINVAL;\n+\t\tgoto out;\n \t}\n \n \t/**\n \t * Check if the cpu core id is valid for binding.\n \t */\n-\tif (dev_info.force_bind && !cpu_online(dev_info.core_id)) {\n-\t\tpr_err(\"cpu %u is not online\\n\", dev_info.core_id);\n-\t\treturn -EINVAL;\n+\tif (dev_info->force_bind && !cpu_online(dev_info->core_id)) {\n+\t\tpr_err(\"cpu %u is not online\\n\", dev_info->core_id);\n+\t\tret = -EINVAL;\n+\t\tgoto out;\n \t}\n \n \t/* Check if it has been created */\n \tdown_read(&knet->kni_list_lock);\n \tlist_for_each_entry_safe(dev, n, &knet->kni_list_head, list) {\n-\t\tif (kni_check_param(dev, &dev_info) < 0) {\n+\t\tif (kni_check_param(dev, dev_info) < 0) {\n \t\t\tup_read(&knet->kni_list_lock);\n-\t\t\treturn -EINVAL;\n+\t\t\tret = -EINVAL;\n+\t\t\tgoto out;\n \t\t}\n \t}\n+\ttx_queues_num = dev_info->queues_num;\n \tup_read(&knet->kni_list_lock);\n \n-\tnet_dev = alloc_netdev(sizeof(struct kni_dev), dev_info.name,\n+\tnet_dev = alloc_netdev_mqs(sizeof(struct kni_dev), dev_info->name,\n #ifdef NET_NAME_USER\n-\t\t\t\t\t\t\tNET_NAME_USER,\n+\t\t\t\t\tNET_NAME_USER,\n #endif\n-\t\t\t\t\t\t\tkni_net_init);\n+\t\t\t\t\tkni_net_init, tx_queues_num, 1);\n \tif (net_dev == NULL) {\n-\t\tpr_err(\"error allocating device \\\"%s\\\"\\n\", dev_info.name);\n-\t\treturn -EBUSY;\n+\t\tpr_err(\"error allocating device \\\"%s\\\"\\n\", dev_info->name);\n+\t\tret = -EBUSY;\n+\t\tgoto out;\n \t}\n \n \tdev_net_set(net_dev, net);\n@@ -344,60 +362,68 @@ kni_ioctl_create(struct net *net, uint32_t ioctl_num,\n \tkni = netdev_priv(net_dev);\n \n \tkni->net_dev = net_dev;\n-\tkni->core_id = dev_info.core_id;\n-\tstrncpy(kni->name, dev_info.name, RTE_KNI_NAMESIZE);\n-\n+\tkni->core_id = dev_info->core_id;\n+\tstrncpy(kni->name, dev_info->name, RTE_KNI_NAMESIZE);\n+\tkni->name[RTE_KNI_NAMESIZE - 1] = '\\0';\n+\tkni->queues_num = tx_queues_num;\n+\tkni->fifos_num = dev_info->fifos_num;\n \t/* Translate user space info into kernel space info */\n-\tif (dev_info.iova_mode) {\n+\tif (dev_info->iova_mode) {\n #ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT\n-\t\tkni->tx_q = iova_to_kva(current, dev_info.tx_phys);\n-\t\tkni->rx_q = iova_to_kva(current, dev_info.rx_phys);\n-\t\tkni->alloc_q = iova_to_kva(current, dev_info.alloc_phys);\n-\t\tkni->free_q = iova_to_kva(current, dev_info.free_phys);\n-\n-\t\tkni->req_q = iova_to_kva(current, dev_info.req_phys);\n-\t\tkni->resp_q = iova_to_kva(current, dev_info.resp_phys);\n-\t\tkni->sync_va = dev_info.sync_va;\n-\t\tkni->sync_kva = iova_to_kva(current, dev_info.sync_phys);\n+\t\tfor (i = 0; i < kni->fifos_num; i++) {\n+\t\t\tkni->tx_q[i] = iova_to_kva(current, dev_info->tx_phys[i]);\n+\t\t\tkni->rx_q[i] = iova_to_kva(current, dev_info->rx_phys[i]);\n+\t\t\tkni->alloc_q[i] = iova_to_kva(current, dev_info->alloc_phys[i]);\n+\t\t\tkni->free_q[i] = iova_to_kva(current, dev_info->free_phys[i]);\n+\t\t}\n+\n+\t\tkni->req_q = iova_to_kva(current, dev_info->req_phys);\n+\t\tkni->resp_q = iova_to_kva(current, dev_info->resp_phys);\n+\t\tkni->sync_va = dev_info->sync_va;\n+\t\tkni->sync_kva = iova_to_kva(current, dev_info->sync_phys);\n \t\tkni->usr_tsk = current;\n \t\tkni->iova_mode = 1;\n #else\n \t\tpr_err(\"KNI module does not support IOVA to VA translation\\n\");\n-\t\treturn -EINVAL;\n+\t\tret = -EINVAL;\n+\t\tgoto out;\n #endif\n \t} else {\n+\t\tfor (i = 0; i < kni->fifos_num; i++) {\n+\t\t\tkni->tx_q[i] = phys_to_virt(dev_info->tx_phys[i]);\n+\t\t\tkni->rx_q[i] = phys_to_virt(dev_info->rx_phys[i]);\n+\t\t\tkni->alloc_q[i] = phys_to_virt(dev_info->alloc_phys[i]);\n+\t\t\tkni->free_q[i] = phys_to_virt(dev_info->free_phys[i]);\n+\t\t}\n \n-\t\tkni->tx_q = phys_to_virt(dev_info.tx_phys);\n-\t\tkni->rx_q = phys_to_virt(dev_info.rx_phys);\n-\t\tkni->alloc_q = phys_to_virt(dev_info.alloc_phys);\n-\t\tkni->free_q = phys_to_virt(dev_info.free_phys);\n-\n-\t\tkni->req_q = phys_to_virt(dev_info.req_phys);\n-\t\tkni->resp_q = phys_to_virt(dev_info.resp_phys);\n-\t\tkni->sync_va = dev_info.sync_va;\n-\t\tkni->sync_kva = phys_to_virt(dev_info.sync_phys);\n+\t\tkni->req_q = phys_to_virt(dev_info->req_phys);\n+\t\tkni->resp_q = phys_to_virt(dev_info->resp_phys);\n+\t\tkni->sync_va = dev_info->sync_va;\n+\t\tkni->sync_kva = phys_to_virt(dev_info->sync_phys);\n \t\tkni->iova_mode = 0;\n \t}\n \n-\tkni->mbuf_size = dev_info.mbuf_size;\n-\n-\tpr_debug(\"tx_phys:      0x%016llx, tx_q addr:      0x%p\\n\",\n-\t\t(unsigned long long) dev_info.tx_phys, kni->tx_q);\n-\tpr_debug(\"rx_phys:      0x%016llx, rx_q addr:      0x%p\\n\",\n-\t\t(unsigned long long) dev_info.rx_phys, kni->rx_q);\n-\tpr_debug(\"alloc_phys:   0x%016llx, alloc_q addr:   0x%p\\n\",\n-\t\t(unsigned long long) dev_info.alloc_phys, kni->alloc_q);\n-\tpr_debug(\"free_phys:    0x%016llx, free_q addr:    0x%p\\n\",\n-\t\t(unsigned long long) dev_info.free_phys, kni->free_q);\n+\tkni->mbuf_size = dev_info->mbuf_size;\n+\t\n+\tfor (i = 0; i < kni->fifos_num; i++) {\n+\t\tpr_debug(\"tx_phys[%d]:      0x%016llx, tx_q[%d] addr:      0x%p\\n\",\n+\t\t\t i, (unsigned long long) dev_info->tx_phys[i], i, kni->tx_q[i]);\n+\t\tpr_debug(\"rx_phys[%d]:      0x%016llx, rx_q[%d] addr:      0x%p\\n\",\n+\t\t\t i, (unsigned long long) dev_info->rx_phys[i], i, kni->rx_q[i]);\n+\t\tpr_debug(\"alloc_phys[%d]:   0x%016llx, alloc_q[%d] addr:   0x%p\\n\",\n+\t\t\t i, (unsigned long long) dev_info->alloc_phys[i], i, kni->alloc_q[i]);\n+\t\tpr_debug(\"free_phys[%d]:    0x%016llx, free_q[%d] addr:    0x%p\\n\",\n+\t\t\t i, (unsigned long long) dev_info->free_phys[i], i, kni->free_q[i]);\n+\t}\n \tpr_debug(\"req_phys:     0x%016llx, req_q addr:     0x%p\\n\",\n-\t\t(unsigned long long) dev_info.req_phys, kni->req_q);\n+\t\t(unsigned long long) dev_info->req_phys, kni->req_q);\n \tpr_debug(\"resp_phys:    0x%016llx, resp_q addr:    0x%p\\n\",\n-\t\t(unsigned long long) dev_info.resp_phys, kni->resp_q);\n+\t\t(unsigned long long) dev_info->resp_phys, kni->resp_q);\n \tpr_debug(\"mbuf_size:    %u\\n\", kni->mbuf_size);\n \n \t/* if user has provided a valid mac address */\n-\tif (is_valid_ether_addr(dev_info.mac_addr))\n-\t\tmemcpy(net_dev->dev_addr, dev_info.mac_addr, ETH_ALEN);\n+\tif (is_valid_ether_addr(dev_info->mac_addr))\n+\t\tmemcpy(net_dev->dev_addr, dev_info->mac_addr, ETH_ALEN);\n \telse\n \t\t/*\n \t\t * Generate random mac address. eth_random_addr() is the\n@@ -405,39 +431,43 @@ kni_ioctl_create(struct net *net, uint32_t ioctl_num,\n \t\t */\n \t\trandom_ether_addr(net_dev->dev_addr);\n \n-\tif (dev_info.mtu)\n-\t\tnet_dev->mtu = dev_info.mtu;\n+\tif (dev_info->mtu)\n+\t\tnet_dev->mtu = dev_info->mtu;\n #ifdef HAVE_MAX_MTU_PARAM\n \tnet_dev->max_mtu = net_dev->mtu;\n \n-\tif (dev_info.min_mtu)\n-\t\tnet_dev->min_mtu = dev_info.min_mtu;\n+\tif (dev_info->min_mtu)\n+\t\tnet_dev->min_mtu = dev_info->min_mtu;\n \n-\tif (dev_info.max_mtu)\n-\t\tnet_dev->max_mtu = dev_info.max_mtu;\n+\tif (dev_info->max_mtu)\n+\t\tnet_dev->max_mtu = dev_info->max_mtu;\n #endif\n \n \tret = register_netdev(net_dev);\n \tif (ret) {\n \t\tpr_err(\"error %i registering device \\\"%s\\\"\\n\",\n-\t\t\t\t\tret, dev_info.name);\n+\t\t\t\t\tret, dev_info->name);\n \t\tkni->net_dev = NULL;\n \t\tkni_dev_remove(kni);\n \t\tfree_netdev(net_dev);\n-\t\treturn -ENODEV;\n+\t\tret = -ENODEV;\n \t}\n \n \tnetif_carrier_off(net_dev);\n \n-\tret = kni_run_thread(knet, kni, dev_info.force_bind);\n+\tret = kni_run_thread(knet, kni, dev_info->force_bind);\n \tif (ret != 0)\n-\t\treturn ret;\n+\t\tgoto out;\n \n \tdown_write(&knet->kni_list_lock);\n \tlist_add(&kni->list, &knet->kni_list_head);\n \tup_write(&knet->kni_list_lock);\n \n-\treturn 0;\n+\tret = 0;\n+\n+out:\n+\tkfree(dev_info);\n+\treturn ret;\n }\n \n static int\n@@ -447,21 +477,36 @@ kni_ioctl_release(struct net *net, uint32_t ioctl_num,\n \tstruct kni_net *knet = net_generic(net, kni_net_id);\n \tint ret = -EINVAL;\n \tstruct kni_dev *dev, *n;\n-\tstruct rte_kni_device_info dev_info;\n+\tstruct rte_kni_device_info *dev_info;\n+\n+\t/* allocate dev_info from heap to avoid Wframe-larger-than=1024\n+\t * compile error.\n+\t */\n+\n+\tdev_info = kzalloc(sizeof(struct rte_kni_device_info), GFP_KERNEL);\n+\tif (!dev_info) {\n+\t\treturn -ENOMEM;\n+\t}\n \n-\tif (_IOC_SIZE(ioctl_num) > sizeof(dev_info))\n+\tif (_IOC_SIZE(ioctl_num) > sizeof(*dev_info)) {\n+\t\tkfree(dev_info);\n \t\treturn -EINVAL;\n+\t}\n \n-\tif (copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info)))\n+\tif (copy_from_user(dev_info, (void *)ioctl_param, sizeof(*dev_info))) {\n+\t\tkfree(dev_info);\n \t\treturn -EFAULT;\n+\t}\n \n \t/* Release the network device according to its name */\n-\tif (strlen(dev_info.name) == 0)\n+\tif (strlen(dev_info->name) == 0) {\n+\t\tkfree(dev_info);\n \t\treturn -EINVAL;\n+\t}\n \n \tdown_write(&knet->kni_list_lock);\n \tlist_for_each_entry_safe(dev, n, &knet->kni_list_head, list) {\n-\t\tif (strncmp(dev->name, dev_info.name, RTE_KNI_NAMESIZE) != 0)\n+\t\tif (strncmp(dev->name, dev_info->name, RTE_KNI_NAMESIZE) != 0)\n \t\t\tcontinue;\n \n \t\tif (multiple_kthread_on && dev->pthread != NULL) {\n@@ -476,8 +521,8 @@ kni_ioctl_release(struct net *net, uint32_t ioctl_num,\n \t}\n \tup_write(&knet->kni_list_lock);\n \tpr_info(\"%s release kni named %s\\n\",\n-\t\t(ret == 0 ? \"Successfully\" : \"Unsuccessfully\"), dev_info.name);\n-\n+\t\t(ret == 0 ? \"Successfully\" : \"Unsuccessfully\"), dev_info->name);\n+\tkfree(dev_info);\n \treturn ret;\n }\n \ndiff --git a/kernel/linux/kni/kni_net.c b/kernel/linux/kni/kni_net.c\nindex 4b75208..6dbd22c 100644\n--- a/kernel/linux/kni/kni_net.c\n+++ b/kernel/linux/kni/kni_net.c\n@@ -29,9 +29,9 @@\n #define KNI_WAIT_RESPONSE_TIMEOUT 300 /* 3 seconds */\n \n /* typedef for rx function */\n-typedef void (*kni_net_rx_t)(struct kni_dev *kni);\n+typedef void (*kni_net_rx_t)(struct kni_dev *kni, int index);\n \n-static void kni_net_rx_normal(struct kni_dev *kni);\n+static void kni_net_rx_normal(struct kni_dev *kni, int index);\n \n /* kni rx function pointer, with default to normal rx */\n static kni_net_rx_t kni_net_rx_func = kni_net_rx_normal;\n@@ -241,10 +241,17 @@ kni_fifo_trans_pa2va(struct kni_dev *kni,\n /* Try to release mbufs when kni release */\n void kni_net_release_fifo_phy(struct kni_dev *kni)\n {\n-\t/* release rx_q first, because it can't release in userspace */\n-\tkni_fifo_trans_pa2va(kni, kni->rx_q, kni->free_q);\n-\t/* release alloc_q for speeding up kni release in userspace */\n-\tkni_fifo_trans_pa2va(kni, kni->alloc_q, kni->free_q);\n+\tunsigned int i;\n+\n+\tfor (i = 0; i < kni->fifos_num; i++) {\n+\t\t/* release rx_q first, because it can't release in userspace */\n+\t\tkni_fifo_trans_pa2va(kni, kni->rx_q[i], kni->free_q[i]);\n+\t}\n+\n+\tfor (i = 0; i < kni->fifos_num; i++) {\n+\t\t/* release alloc_q for speeding up kni release in userspace */\n+\t\tkni_fifo_trans_pa2va(kni, kni->alloc_q[i], kni->free_q[i]);\n+\t}\n }\n \n /*\n@@ -261,6 +268,24 @@ kni_net_config(struct net_device *dev, struct ifmap *map)\n }\n \n /*\n+ * Select a tx fifo to enqueue the packets\n+ */\n+static unsigned\n+kni_net_select_fifo(struct sk_buff *skb, struct kni_dev *kni)\n+{\n+\tu32 hash;\n+\tunsigned int fifo_idx, fifos_num = kni->fifos_num;\n+\n+\tif (unlikely(fifos_num == 1))\n+\t\treturn 0;\n+\n+\thash = skb_get_hash(skb);\n+\tfifo_idx = hash % fifos_num;\n+\n+\treturn fifo_idx;\n+}\n+\n+/*\n  * Transmit a packet (called by the kernel)\n  */\n static int\n@@ -272,6 +297,7 @@ kni_net_tx(struct sk_buff *skb, struct net_device *dev)\n \tstruct rte_kni_mbuf *pkt_kva = NULL;\n \tvoid *pkt_pa = NULL;\n \tvoid *pkt_va = NULL;\n+\tunsigned int fifo_idx;\n \n \t/* save the timestamp */\n #ifdef HAVE_TRANS_START_HELPER\n@@ -284,12 +310,14 @@ kni_net_tx(struct sk_buff *skb, struct net_device *dev)\n \tif (skb->len > kni->mbuf_size)\n \t\tgoto drop;\n \n+\tfifo_idx = kni_net_select_fifo(skb, kni);\n+\n \t/**\n \t * Check if it has at least one free entry in tx_q and\n \t * one entry in alloc_q.\n \t */\n-\tif (kni_fifo_free_count(kni->tx_q) == 0 ||\n-\t\t\tkni_fifo_count(kni->alloc_q) == 0) {\n+\tif (kni_fifo_free_count(kni->tx_q[fifo_idx]) == 0 ||\n+\t\t\tkni_fifo_count(kni->alloc_q[fifo_idx]) == 0) {\n \t\t/**\n \t\t * If no free entry in tx_q or no entry in alloc_q,\n \t\t * drops skb and goes out.\n@@ -298,7 +326,7 @@ kni_net_tx(struct sk_buff *skb, struct net_device *dev)\n \t}\n \n \t/* dequeue a mbuf from alloc_q */\n-\tret = kni_fifo_get(kni->alloc_q, &pkt_pa, 1);\n+\tret = kni_fifo_get(kni->alloc_q[fifo_idx], &pkt_pa, 1);\n \tif (likely(ret == 1)) {\n \t\tvoid *data_kva;\n \n@@ -316,7 +344,7 @@ kni_net_tx(struct sk_buff *skb, struct net_device *dev)\n \t\tpkt_kva->data_len = len;\n \n \t\t/* enqueue mbuf into tx_q */\n-\t\tret = kni_fifo_put(kni->tx_q, &pkt_va, 1);\n+\t\tret = kni_fifo_put(kni->tx_q[fifo_idx], &pkt_va, 1);\n \t\tif (unlikely(ret != 1)) {\n \t\t\t/* Failing should not happen */\n \t\t\tpr_err(\"Fail to enqueue mbuf into tx_q\\n\");\n@@ -347,7 +375,7 @@ kni_net_tx(struct sk_buff *skb, struct net_device *dev)\n  * RX: normal working mode\n  */\n static void\n-kni_net_rx_normal(struct kni_dev *kni)\n+kni_net_rx_normal(struct kni_dev *kni, int index)\n {\n \tuint32_t ret;\n \tuint32_t len;\n@@ -358,7 +386,7 @@ kni_net_rx_normal(struct kni_dev *kni)\n \tstruct net_device *dev = kni->net_dev;\n \n \t/* Get the number of free entries in free_q */\n-\tnum_fq = kni_fifo_free_count(kni->free_q);\n+\tnum_fq = kni_fifo_free_count(kni->free_q[index]);\n \tif (num_fq == 0) {\n \t\t/* No room on the free_q, bail out */\n \t\treturn;\n@@ -368,7 +396,7 @@ kni_net_rx_normal(struct kni_dev *kni)\n \tnum_rx = min_t(uint32_t, num_fq, MBUF_BURST_SZ);\n \n \t/* Burst dequeue from rx_q */\n-\tnum_rx = kni_fifo_get(kni->rx_q, kni->pa, num_rx);\n+\tnum_rx = kni_fifo_get(kni->rx_q[index], kni->pa, num_rx);\n \tif (num_rx == 0)\n \t\treturn;\n \n@@ -419,7 +447,7 @@ kni_net_rx_normal(struct kni_dev *kni)\n \t}\n \n \t/* Burst enqueue mbufs into free_q */\n-\tret = kni_fifo_put(kni->free_q, kni->va, num_rx);\n+\tret = kni_fifo_put(kni->free_q[index], kni->va, num_rx);\n \tif (ret != num_rx)\n \t\t/* Failing should not happen */\n \t\tpr_err(\"Fail to enqueue entries into free_q\\n\");\n@@ -429,7 +457,7 @@ kni_net_rx_normal(struct kni_dev *kni)\n  * RX: loopback with enqueue/dequeue fifos.\n  */\n static void\n-kni_net_rx_lo_fifo(struct kni_dev *kni)\n+kni_net_rx_lo_fifo(struct kni_dev *kni, int index)\n {\n \tuint32_t ret;\n \tuint32_t len;\n@@ -441,16 +469,16 @@ kni_net_rx_lo_fifo(struct kni_dev *kni)\n \tstruct net_device *dev = kni->net_dev;\n \n \t/* Get the number of entries in rx_q */\n-\tnum_rq = kni_fifo_count(kni->rx_q);\n+\tnum_rq = kni_fifo_count(kni->rx_q[index]);\n \n \t/* Get the number of free entries in tx_q */\n-\tnum_tq = kni_fifo_free_count(kni->tx_q);\n+\tnum_tq = kni_fifo_free_count(kni->tx_q[index]);\n \n \t/* Get the number of entries in alloc_q */\n-\tnum_aq = kni_fifo_count(kni->alloc_q);\n+\tnum_aq = kni_fifo_count(kni->alloc_q[index]);\n \n \t/* Get the number of free entries in free_q */\n-\tnum_fq = kni_fifo_free_count(kni->free_q);\n+\tnum_fq = kni_fifo_free_count(kni->free_q[index]);\n \n \t/* Calculate the number of entries to be dequeued from rx_q */\n \tnum = min(num_rq, num_tq);\n@@ -463,12 +491,12 @@ kni_net_rx_lo_fifo(struct kni_dev *kni)\n \t\treturn;\n \n \t/* Burst dequeue from rx_q */\n-\tret = kni_fifo_get(kni->rx_q, kni->pa, num);\n+\tret = kni_fifo_get(kni->rx_q[index], kni->pa, num);\n \tif (ret == 0)\n \t\treturn; /* Failing should not happen */\n \n \t/* Dequeue entries from alloc_q */\n-\tret = kni_fifo_get(kni->alloc_q, kni->alloc_pa, num);\n+\tret = kni_fifo_get(kni->alloc_q[index], kni->alloc_pa, num);\n \tif (ret) {\n \t\tnum = ret;\n \t\t/* Copy mbufs */\n@@ -498,14 +526,14 @@ kni_net_rx_lo_fifo(struct kni_dev *kni)\n \t\t}\n \n \t\t/* Burst enqueue mbufs into tx_q */\n-\t\tret = kni_fifo_put(kni->tx_q, kni->alloc_va, num);\n+\t\tret = kni_fifo_put(kni->tx_q[index], kni->alloc_va, num);\n \t\tif (ret != num)\n \t\t\t/* Failing should not happen */\n \t\t\tpr_err(\"Fail to enqueue mbufs into tx_q\\n\");\n \t}\n \n \t/* Burst enqueue mbufs into free_q */\n-\tret = kni_fifo_put(kni->free_q, kni->va, num);\n+\tret = kni_fifo_put(kni->free_q[index], kni->va, num);\n \tif (ret != num)\n \t\t/* Failing should not happen */\n \t\tpr_err(\"Fail to enqueue mbufs into free_q\\n\");\n@@ -522,7 +550,7 @@ kni_net_rx_lo_fifo(struct kni_dev *kni)\n  * RX: loopback with enqueue/dequeue fifos and sk buffer copies.\n  */\n static void\n-kni_net_rx_lo_fifo_skb(struct kni_dev *kni)\n+kni_net_rx_lo_fifo_skb(struct kni_dev *kni, int index)\n {\n \tuint32_t ret;\n \tuint32_t len;\n@@ -533,10 +561,10 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)\n \tstruct net_device *dev = kni->net_dev;\n \n \t/* Get the number of entries in rx_q */\n-\tnum_rq = kni_fifo_count(kni->rx_q);\n+\tnum_rq = kni_fifo_count(kni->rx_q[index]);\n \n \t/* Get the number of free entries in free_q */\n-\tnum_fq = kni_fifo_free_count(kni->free_q);\n+\tnum_fq = kni_fifo_free_count(kni->free_q[index]);\n \n \t/* Calculate the number of entries to dequeue from rx_q */\n \tnum = min(num_rq, num_fq);\n@@ -547,7 +575,7 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)\n \t\treturn;\n \n \t/* Burst dequeue mbufs from rx_q */\n-\tret = kni_fifo_get(kni->rx_q, kni->pa, num);\n+\tret = kni_fifo_get(kni->rx_q[index], kni->pa, num);\n \tif (ret == 0)\n \t\treturn;\n \n@@ -603,7 +631,7 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)\n \t}\n \n \t/* enqueue all the mbufs from rx_q into free_q */\n-\tret = kni_fifo_put(kni->free_q, kni->va, num);\n+\tret = kni_fifo_put(kni->free_q[index], kni->va, num);\n \tif (ret != num)\n \t\t/* Failing should not happen */\n \t\tpr_err(\"Fail to enqueue mbufs into free_q\\n\");\n@@ -613,11 +641,13 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)\n void\n kni_net_rx(struct kni_dev *kni)\n {\n+\tint i;\n \t/**\n \t * It doesn't need to check if it is NULL pointer,\n \t * as it has a default value\n \t */\n-\t(*kni_net_rx_func)(kni);\n+\tfor (i = 0; i < kni->fifos_num; i++)\n+\t\t(*kni_net_rx_func)(kni, i);\n }\n \n /*\ndiff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c\nindex 837d021..9457076 100644\n--- a/lib/librte_kni/rte_kni.c\n+++ b/lib/librte_kni/rte_kni.c\n@@ -37,10 +37,10 @@\n #define KNI_MEM_CHECK(cond, fail) do { if (cond) goto fail; } while (0)\n \n #define KNI_MZ_NAME_FMT\t\t\t\"kni_info_%s\"\n-#define KNI_TX_Q_MZ_NAME_FMT\t\t\"kni_tx_%s\"\n-#define KNI_RX_Q_MZ_NAME_FMT\t\t\"kni_rx_%s\"\n-#define KNI_ALLOC_Q_MZ_NAME_FMT\t\t\"kni_alloc_%s\"\n-#define KNI_FREE_Q_MZ_NAME_FMT\t\t\"kni_free_%s\"\n+#define KNI_TX_Q_MZ_NAME_FMT\t\t\"kni_tx_%s.%d\"\n+#define KNI_RX_Q_MZ_NAME_FMT\t\t\"kni_rx_%s.%d\"\n+#define KNI_ALLOC_Q_MZ_NAME_FMT\t\t\"kni_alloc_%s.%d\"\n+#define KNI_FREE_Q_MZ_NAME_FMT\t\t\"kni_free_%s.%d\"\n #define KNI_REQ_Q_MZ_NAME_FMT\t\t\"kni_req_%s\"\n #define KNI_RESP_Q_MZ_NAME_FMT\t\t\"kni_resp_%s\"\n #define KNI_SYNC_ADDR_MZ_NAME_FMT\t\"kni_sync_%s\"\n@@ -62,15 +62,15 @@ struct rte_kni {\n \tstruct rte_mempool *pktmbuf_pool;   /**< pkt mbuf mempool */\n \tunsigned int mbuf_size;                 /**< mbuf size */\n \n-\tconst struct rte_memzone *m_tx_q;   /**< TX queue memzone */\n-\tconst struct rte_memzone *m_rx_q;   /**< RX queue memzone */\n-\tconst struct rte_memzone *m_alloc_q;/**< Alloc queue memzone */\n-\tconst struct rte_memzone *m_free_q; /**< Free queue memzone */\n+\tconst struct rte_memzone *m_tx_q[RTE_MAX_LCORE];   /**< TX queue memzone */\n+\tconst struct rte_memzone *m_rx_q[RTE_MAX_LCORE];   /**< RX queue memzone */\n+\tconst struct rte_memzone *m_alloc_q[RTE_MAX_LCORE];/**< Alloc queue memzone */\n+\tconst struct rte_memzone *m_free_q[RTE_MAX_LCORE]; /**< Free queue memzone */\n \n-\tstruct rte_kni_fifo *tx_q;          /**< TX queue */\n-\tstruct rte_kni_fifo *rx_q;          /**< RX queue */\n-\tstruct rte_kni_fifo *alloc_q;       /**< Allocated mbufs queue */\n-\tstruct rte_kni_fifo *free_q;        /**< To be freed mbufs queue */\n+\tstruct rte_kni_fifo *tx_q[RTE_MAX_LCORE];          /**< TX queue */\n+\tstruct rte_kni_fifo *rx_q[RTE_MAX_LCORE];          /**< RX queue */\n+\tstruct rte_kni_fifo *alloc_q[RTE_MAX_LCORE];       /**< Allocated mbufs queue */\n+\tstruct rte_kni_fifo *free_q[RTE_MAX_LCORE];        /**< To be freed mbufs queue */\n \n \tconst struct rte_memzone *m_req_q;  /**< Request queue memzone */\n \tconst struct rte_memzone *m_resp_q; /**< Response queue memzone */\n@@ -82,6 +82,8 @@ struct rte_kni {\n \tvoid *sync_addr;                   /**< Req/Resp Mem address */\n \n \tstruct rte_kni_ops ops;             /**< operations for request */\n+\tunsigned int queues_num;\t\t    /**< Num of tx queues of KNI vnic */\n+\tunsigned int fifos_num;                 /**< TX/RX/Alloc/Free fifos number */\n };\n \n enum kni_ops_status {\n@@ -89,8 +91,8 @@ enum kni_ops_status {\n \tKNI_REQ_REGISTERED,\n };\n \n-static void kni_free_mbufs(struct rte_kni *kni);\n-static void kni_allocate_mbufs(struct rte_kni *kni);\n+static void kni_free_mbufs(struct rte_kni *kni, unsigned int index);\n+static void kni_allocate_mbufs(struct rte_kni *kni, unsigned int index);\n \n static volatile int kni_fd = -1;\n \n@@ -140,29 +142,38 @@ __rte_kni_get(const char *name)\n }\n \n static int\n-kni_reserve_mz(struct rte_kni *kni)\n+kni_reserve_mz(struct rte_kni *kni, unsigned fifos_num)\n {\n+\tunsigned int i, j;\n \tchar mz_name[RTE_MEMZONE_NAMESIZE];\n+\tfor (i = 0; i < fifos_num; i++) {\n+\t\tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_TX_Q_MZ_NAME_FMT, kni->name, i);\n+\t\tkni->m_tx_q[i] = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,\n+\t\t\t\tRTE_MEMZONE_IOVA_CONTIG);\n+\t\tKNI_MEM_CHECK(kni->m_tx_q[i] == NULL, tx_q_fail);\n+\t}\n \n-\tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_TX_Q_MZ_NAME_FMT, kni->name);\n-\tkni->m_tx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,\n-\t\t\tRTE_MEMZONE_IOVA_CONTIG);\n-\tKNI_MEM_CHECK(kni->m_tx_q == NULL, tx_q_fail);\n+\tfor (i = 0; i < fifos_num; i++) {\n+\t\tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RX_Q_MZ_NAME_FMT, kni->name, i);\n+\t\tkni->m_rx_q[i] = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,\n+\t\t\t\tRTE_MEMZONE_IOVA_CONTIG);\n+\t\tKNI_MEM_CHECK(kni->m_rx_q[i] == NULL, rx_q_fail);\n+\t}\n \n-\tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RX_Q_MZ_NAME_FMT, kni->name);\n-\tkni->m_rx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,\n-\t\t\tRTE_MEMZONE_IOVA_CONTIG);\n-\tKNI_MEM_CHECK(kni->m_rx_q == NULL, rx_q_fail);\n \n-\tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_ALLOC_Q_MZ_NAME_FMT, kni->name);\n-\tkni->m_alloc_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,\n-\t\t\tRTE_MEMZONE_IOVA_CONTIG);\n-\tKNI_MEM_CHECK(kni->m_alloc_q == NULL, alloc_q_fail);\n+\tfor (i = 0; i < fifos_num; i++) {\n+\t\tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_ALLOC_Q_MZ_NAME_FMT, kni->name, i);\n+\t\tkni->m_alloc_q[i] = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,\n+\t\t\t\tRTE_MEMZONE_IOVA_CONTIG);\n+\t\tKNI_MEM_CHECK(kni->m_alloc_q[i] == NULL, alloc_q_fail);\n+\t}\n \n-\tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_FREE_Q_MZ_NAME_FMT, kni->name);\n-\tkni->m_free_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,\n-\t\t\tRTE_MEMZONE_IOVA_CONTIG);\n-\tKNI_MEM_CHECK(kni->m_free_q == NULL, free_q_fail);\n+\t for (i = 0; i < fifos_num; i++) {\n+\t\tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_FREE_Q_MZ_NAME_FMT, kni->name, i);\n+\t\tkni->m_free_q[i] = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,\n+\t\t\t\tRTE_MEMZONE_IOVA_CONTIG);\n+\t\tKNI_MEM_CHECK(kni->m_free_q[i] == NULL, free_q_fail);\n+\t}\n \n \tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_REQ_Q_MZ_NAME_FMT, kni->name);\n \tkni->m_req_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,\n@@ -186,24 +197,51 @@ kni_reserve_mz(struct rte_kni *kni)\n resp_q_fail:\n \trte_memzone_free(kni->m_req_q);\n req_q_fail:\n-\trte_memzone_free(kni->m_free_q);\n+\tfor (j = 0; j < fifos_num; j++) {\n+\t\trte_memzone_free(kni->m_alloc_q[j]);\n+\t\trte_memzone_free(kni->m_rx_q[j]);\n+\t\trte_memzone_free(kni->m_tx_q[j]);\n+\t\trte_memzone_free(kni->m_free_q[j]);\n+\t}\n+\treturn -1;\n free_q_fail:\n-\trte_memzone_free(kni->m_alloc_q);\n+\tfor (j = 0; j < fifos_num; j++) {\n+\t\trte_memzone_free(kni->m_alloc_q[j]);\n+\t\trte_memzone_free(kni->m_rx_q[j]);\n+\t\trte_memzone_free(kni->m_tx_q[j]);\n+\t}\n+\tfor (j = 0; j < i; j++)\n+\t\trte_memzone_free(kni->m_free_q[j]);\n+\treturn -1;\n alloc_q_fail:\n-\trte_memzone_free(kni->m_rx_q);\n+\tfor (j = 0; j < fifos_num; j++) {\n+\t\trte_memzone_free(kni->m_rx_q[j]);\n+\t\trte_memzone_free(kni->m_tx_q[j]);\n+\t}\n+\tfor (j = 0; j < i; j++)\n+\t\trte_memzone_free(kni->m_alloc_q[j]);\n+\treturn -1;\n rx_q_fail:\n-\trte_memzone_free(kni->m_tx_q);\n+\tfor (j = 0; j < fifos_num; j++)\n+\t\trte_memzone_free(kni->m_tx_q[j]);\n+\tfor (j = 0; j < i; j++)\n+\t\trte_memzone_free(kni->m_rx_q[j]);\n+\treturn -1;\n tx_q_fail:\n+\tfor (j = 0; j < i; j++)\n+\t\trte_memzone_free(kni->m_tx_q[j]);\n \treturn -1;\n }\n \n static void\n-kni_release_mz(struct rte_kni *kni)\n+kni_release_mz(struct rte_kni *kni, unsigned int fifos_num)\n {\n-\trte_memzone_free(kni->m_tx_q);\n-\trte_memzone_free(kni->m_rx_q);\n-\trte_memzone_free(kni->m_alloc_q);\n-\trte_memzone_free(kni->m_free_q);\n+\tfor (unsigned int i = 0; i < fifos_num; i++) {\n+\t\trte_memzone_free(kni->m_tx_q[i]);\n+\t\trte_memzone_free(kni->m_rx_q[i]);\n+\t\trte_memzone_free(kni->m_alloc_q[i]);\n+\t\trte_memzone_free(kni->m_free_q[i]);\n+\t}\n \trte_memzone_free(kni->m_req_q);\n \trte_memzone_free(kni->m_resp_q);\n \trte_memzone_free(kni->m_sync_addr);\n@@ -215,6 +253,7 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,\n \t      struct rte_kni_ops *ops)\n {\n \tint ret;\n+\tunsigned int i, fifos_num;\n \tstruct rte_kni_device_info dev_info;\n \tstruct rte_kni *kni;\n \tstruct rte_tailq_entry *te;\n@@ -264,34 +303,47 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,\n \tdev_info.mtu = conf->mtu;\n \tdev_info.min_mtu = conf->min_mtu;\n \tdev_info.max_mtu = conf->max_mtu;\n-\n+\tdev_info.queues_num = conf->queues_num ? conf->queues_num : 1;\n+\tdev_info.fifos_num = conf->fifos_num ? conf->fifos_num : 1;\n \tmemcpy(dev_info.mac_addr, conf->mac_addr, RTE_ETHER_ADDR_LEN);\n \n \tstrlcpy(dev_info.name, conf->name, RTE_KNI_NAMESIZE);\n \n-\tret = kni_reserve_mz(kni);\n+\tret = kni_reserve_mz(kni, dev_info.fifos_num);\n \tif (ret < 0)\n \t\tgoto mz_fail;\n \n+\tfifos_num = dev_info.fifos_num;\n+\tkni->fifos_num = fifos_num;\n+\tkni->queues_num = dev_info.queues_num;\n+\n \t/* TX RING */\n-\tkni->tx_q = kni->m_tx_q->addr;\n-\tkni_fifo_init(kni->tx_q, KNI_FIFO_COUNT_MAX);\n-\tdev_info.tx_phys = kni->m_tx_q->iova;\n+\tfor (i = 0; i < fifos_num; i++) {\n+\t\tkni->tx_q[i] = kni->m_tx_q[i]->addr;\n+\t\tkni_fifo_init(kni->tx_q[i], KNI_FIFO_COUNT_MAX);\n+\t\tdev_info.tx_phys[i] = kni->m_tx_q[i]->iova;\n+\t}\n \n \t/* RX RING */\n-\tkni->rx_q = kni->m_rx_q->addr;\n-\tkni_fifo_init(kni->rx_q, KNI_FIFO_COUNT_MAX);\n-\tdev_info.rx_phys = kni->m_rx_q->iova;\n+\tfor (i = 0; i < fifos_num; i++) {\n+\t\tkni->rx_q[i] = kni->m_rx_q[i]->addr;\n+\t\tkni_fifo_init(kni->rx_q[i], KNI_FIFO_COUNT_MAX);\n+\t\tdev_info.rx_phys[i] = kni->m_rx_q[i]->iova;\n+\t}\n \n \t/* ALLOC RING */\n-\tkni->alloc_q = kni->m_alloc_q->addr;\n-\tkni_fifo_init(kni->alloc_q, KNI_FIFO_COUNT_MAX);\n-\tdev_info.alloc_phys = kni->m_alloc_q->iova;\n+\tfor (i = 0; i < fifos_num; i++) {\n+\t\tkni->alloc_q[i] = kni->m_alloc_q[i]->addr;\n+\t\tkni_fifo_init(kni->alloc_q[i], KNI_FIFO_COUNT_MAX);\n+\t\tdev_info.alloc_phys[i] = kni->m_alloc_q[i]->iova;\n+\t}\n \n \t/* FREE RING */\n-\tkni->free_q = kni->m_free_q->addr;\n-\tkni_fifo_init(kni->free_q, KNI_FIFO_COUNT_MAX);\n-\tdev_info.free_phys = kni->m_free_q->iova;\n+\tfor (i = 0; i < fifos_num; i++) {\n+\t\tkni->free_q[i] = kni->m_free_q[i]->addr;\n+\t\tkni_fifo_init(kni->free_q[i], KNI_FIFO_COUNT_MAX);\n+\t\tdev_info.free_phys[i] = kni->m_free_q[i]->iova;\n+\t}\n \n \t/* Request RING */\n \tkni->req_q = kni->m_req_q->addr;\n@@ -326,12 +378,13 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,\n \trte_mcfg_tailq_write_unlock();\n \n \t/* Allocate mbufs and then put them into alloc_q */\n-\tkni_allocate_mbufs(kni);\n+\tfor (i = 0; i < fifos_num; i++)\n+\t\tkni_allocate_mbufs(kni, i);\n \n \treturn kni;\n \n ioctl_fail:\n-\tkni_release_mz(kni);\n+\tkni_release_mz(kni, fifos_num);\n mz_fail:\n \trte_free(kni);\n kni_fail:\n@@ -407,7 +460,7 @@ rte_kni_release(struct rte_kni *kni)\n \tstruct rte_kni_list *kni_list;\n \tstruct rte_kni_device_info dev_info;\n \tuint32_t retry = 5;\n-\n+\tunsigned int i;\n \tif (!kni)\n \t\treturn -1;\n \n@@ -436,17 +489,24 @@ rte_kni_release(struct rte_kni *kni)\n \t/* mbufs in all fifo should be released, except request/response */\n \n \t/* wait until all rxq packets processed by kernel */\n-\twhile (kni_fifo_count(kni->rx_q) && retry--)\n-\t\tusleep(1000);\n+\tfor (i = 0; i < kni->fifos_num; i++) {\n+\t\twhile (kni_fifo_count(kni->rx_q[i]) && retry--)\n+\t\t\tusleep(1000);\n+\t\tretry = 5;\n+\t}\n \n-\tif (kni_fifo_count(kni->rx_q))\n-\t\tRTE_LOG(ERR, KNI, \"Fail to free all Rx-q items\\n\");\n+\tfor (i = 0; i < kni->fifos_num; i++) {\n+\t\tif (kni_fifo_count(kni->rx_q[i]))\n+\t\t\tRTE_LOG(ERR, KNI, \"Fail to free all Rx-q items for queue: %d\\n\", i);\n+\t}\n \n-\tkni_free_fifo_phy(kni->pktmbuf_pool, kni->alloc_q);\n-\tkni_free_fifo(kni->tx_q);\n-\tkni_free_fifo(kni->free_q);\n+\tfor (i = 0; i < kni->fifos_num; i++) {\n+\t\tkni_free_fifo_phy(kni->pktmbuf_pool, kni->alloc_q[i]);\n+\t\tkni_free_fifo(kni->tx_q[i]);\n+\t\tkni_free_fifo(kni->free_q[i]);\n+\t}\n \n-\tkni_release_mz(kni);\n+\tkni_release_mz(kni, kni->fifos_num);\n \n \trte_free(kni);\n \n@@ -602,9 +662,10 @@ rte_kni_handle_request(struct rte_kni *kni)\n }\n \n unsigned\n-rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num)\n+rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs,\n+\t\tunsigned int num, unsigned int index)\n {\n-\tnum = RTE_MIN(kni_fifo_free_count(kni->rx_q), num);\n+\tnum = RTE_MIN(kni_fifo_free_count(kni->rx_q[index]), num);\n \tvoid *phy_mbufs[num];\n \tunsigned int ret;\n \tunsigned int i;\n@@ -612,33 +673,34 @@ rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num)\n \tfor (i = 0; i < num; i++)\n \t\tphy_mbufs[i] = va2pa_all(mbufs[i]);\n \n-\tret = kni_fifo_put(kni->rx_q, phy_mbufs, num);\n+\tret = kni_fifo_put(kni->rx_q[index], phy_mbufs, num);\n \n \t/* Get mbufs from free_q and then free them */\n-\tkni_free_mbufs(kni);\n+\tkni_free_mbufs(kni, index);\n \n \treturn ret;\n }\n \n unsigned\n-rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num)\n+rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs,\n+\t\tunsigned int num, unsigned int index)\n {\n-\tunsigned int ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num);\n+\tunsigned int ret = kni_fifo_get(kni->tx_q[index], (void **)mbufs, num);\n \n \t/* If buffers removed, allocate mbufs and then put them into alloc_q */\n \tif (ret)\n-\t\tkni_allocate_mbufs(kni);\n+\t\tkni_allocate_mbufs(kni, index);\n \n \treturn ret;\n }\n \n static void\n-kni_free_mbufs(struct rte_kni *kni)\n+kni_free_mbufs(struct rte_kni *kni, unsigned int index)\n {\n \tint i, ret;\n \tstruct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];\n \n-\tret = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);\n+\tret = kni_fifo_get(kni->free_q[index], (void **)pkts, MAX_MBUF_BURST_NUM);\n \tif (likely(ret > 0)) {\n \t\tfor (i = 0; i < ret; i++)\n \t\t\trte_pktmbuf_free(pkts[i]);\n@@ -646,7 +708,7 @@ kni_free_mbufs(struct rte_kni *kni)\n }\n \n static void\n-kni_allocate_mbufs(struct rte_kni *kni)\n+kni_allocate_mbufs(struct rte_kni *kni, unsigned int index)\n {\n \tint i, ret;\n \tstruct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];\n@@ -674,7 +736,7 @@ kni_allocate_mbufs(struct rte_kni *kni)\n \t\treturn;\n \t}\n \n-\tallocq_free = (kni->alloc_q->read - kni->alloc_q->write - 1)\n+\tallocq_free = (kni->alloc_q[index]->read - kni->alloc_q[index]->write - 1)\n \t\t\t& (MAX_MBUF_BURST_NUM - 1);\n \tfor (i = 0; i < allocq_free; i++) {\n \t\tpkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);\n@@ -690,7 +752,7 @@ kni_allocate_mbufs(struct rte_kni *kni)\n \tif (i <= 0)\n \t\treturn;\n \n-\tret = kni_fifo_put(kni->alloc_q, phys, i);\n+\tret = kni_fifo_put(kni->alloc_q[index], phys, i);\n \n \t/* Check if any mbufs not put into alloc_q, and then free them */\n \tif (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {\ndiff --git a/lib/librte_kni/rte_kni.h b/lib/librte_kni/rte_kni.h\nindex b0eaf46..70cec3c 100644\n--- a/lib/librte_kni/rte_kni.h\n+++ b/lib/librte_kni/rte_kni.h\n@@ -75,6 +75,9 @@ struct rte_kni_conf {\n \tuint16_t mtu;\n \tuint16_t min_mtu;\n \tuint16_t max_mtu;\n+\n+\tunsigned int fifos_num; \n+\tunsigned int queues_num;\n };\n \n /**\n@@ -162,12 +165,14 @@ int rte_kni_handle_request(struct rte_kni *kni);\n  *  The array to store the pointers of mbufs.\n  * @param num\n  *  The maximum number per burst.\n+ * @param index\n+ *  The rx_q fifo's index of the KNI interface.\n  *\n  * @return\n  *  The actual number of packets retrieved.\n  */\n unsigned rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs,\n-\t\tunsigned num);\n+\t\tunsigned int num, unsigned int index);\n \n /**\n  * Send a burst of packets to a KNI interface. The packets to be sent out are\n@@ -181,12 +186,14 @@ unsigned rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs,\n  *  The array to store the pointers of mbufs.\n  * @param num\n  *  The maximum number per burst.\n+ * @param index\n+ *  The tx_q fifo's index of the KNI interface.\n  *\n  * @return\n  *  The actual number of packets sent.\n  */\n unsigned rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs,\n-\t\tunsigned num);\n+\t\tunsigned int num, unsigned int index);\n \n /**\n  * Get the KNI context of its name.\ndiff --git a/lib/librte_kni/rte_kni_common.h b/lib/librte_kni/rte_kni_common.h\nindex ffb3182..35afebf 100644\n--- a/lib/librte_kni/rte_kni_common.h\n+++ b/lib/librte_kni/rte_kni_common.h\n@@ -99,10 +99,10 @@ struct rte_kni_mbuf {\n struct rte_kni_device_info {\n \tchar name[RTE_KNI_NAMESIZE];  /**< Network device name for KNI */\n \n-\tphys_addr_t tx_phys;\n-\tphys_addr_t rx_phys;\n-\tphys_addr_t alloc_phys;\n-\tphys_addr_t free_phys;\n+\tphys_addr_t tx_phys[RTE_MAX_LCORE];\n+\tphys_addr_t rx_phys[RTE_MAX_LCORE];\n+\tphys_addr_t alloc_phys[RTE_MAX_LCORE];\n+\tphys_addr_t free_phys[RTE_MAX_LCORE];\n \n \t/* Used by Ethtool */\n \tphys_addr_t req_phys;\n@@ -127,6 +127,8 @@ struct rte_kni_device_info {\n \tunsigned int max_mtu;\n \tuint8_t mac_addr[6];\n \tuint8_t iova_mode;\n+\tunsigned int fifos_num;\n+\tunsigned int queues_num;\n };\n \n #define KNI_DEVICE \"kni\"\ndiff --git a/lib/librte_port/rte_port_kni.c b/lib/librte_port/rte_port_kni.c\nindex 7b370f9..648b832 100644\n--- a/lib/librte_port/rte_port_kni.c\n+++ b/lib/librte_port/rte_port_kni.c\n@@ -67,7 +67,7 @@ rte_port_kni_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)\n \t\t\tport;\n \tuint16_t rx_pkt_cnt;\n \n-\trx_pkt_cnt = rte_kni_rx_burst(p->kni, pkts, n_pkts);\n+\trx_pkt_cnt = rte_kni_rx_burst(p->kni, pkts, n_pkts, 0);\n \tRTE_PORT_KNI_READER_STATS_PKTS_IN_ADD(p, rx_pkt_cnt);\n \treturn rx_pkt_cnt;\n }\n@@ -165,7 +165,7 @@ send_burst(struct rte_port_kni_writer *p)\n {\n \tuint32_t nb_tx;\n \n-\tnb_tx = rte_kni_tx_burst(p->kni, p->tx_buf, p->tx_buf_count);\n+\tnb_tx = rte_kni_tx_burst(p->kni, p->tx_buf, p->tx_buf_count, 0);\n \n \tRTE_PORT_KNI_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);\n \tfor (; nb_tx < p->tx_buf_count; nb_tx++)\n@@ -208,7 +208,7 @@ rte_port_kni_writer_tx_bulk(void *port,\n \t\t\tsend_burst(p);\n \n \t\tRTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);\n-\t\tn_pkts_ok = rte_kni_tx_burst(p->kni, pkts, n_pkts);\n+\t\tn_pkts_ok = rte_kni_tx_burst(p->kni, pkts, n_pkts, 0);\n \n \t\tRTE_PORT_KNI_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok);\n \t\tfor (; n_pkts_ok < n_pkts; n_pkts_ok++) {\n@@ -349,7 +349,7 @@ send_burst_nodrop(struct rte_port_kni_writer_nodrop *p)\n {\n \tuint32_t nb_tx = 0, i;\n \n-\tnb_tx = rte_kni_tx_burst(p->kni, p->tx_buf, p->tx_buf_count);\n+\tnb_tx = rte_kni_tx_burst(p->kni, p->tx_buf, p->tx_buf_count, 0);\n \n \t/* We sent all the packets in a first try */\n \tif (nb_tx >= p->tx_buf_count) {\n@@ -360,7 +360,7 @@ send_burst_nodrop(struct rte_port_kni_writer_nodrop *p)\n \tfor (i = 0; i < p->n_retries; i++) {\n \t\tnb_tx += rte_kni_tx_burst(p->kni,\n \t\t\tp->tx_buf + nb_tx,\n-\t\t\tp->tx_buf_count - nb_tx);\n+\t\t\tp->tx_buf_count - nb_tx, 0);\n \n \t\t/* We sent all the packets in more than one try */\n \t\tif (nb_tx >= p->tx_buf_count) {\n@@ -412,7 +412,7 @@ rte_port_kni_writer_nodrop_tx_bulk(void *port,\n \t\t\tsend_burst_nodrop(p);\n \n \t\tRTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts);\n-\t\tn_pkts_ok = rte_kni_tx_burst(p->kni, pkts, n_pkts);\n+\t\tn_pkts_ok = rte_kni_tx_burst(p->kni, pkts, n_pkts, 0);\n \n \t\tif (n_pkts_ok >= n_pkts)\n \t\t\treturn 0;\n",
    "prefixes": [
        "v1",
        "2/2"
    ]
}