get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/45180/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 45180,
    "url": "https://patches.dpdk.org/api/patches/45180/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20180923191202.64896-1-iryzhov@nfware.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20180923191202.64896-1-iryzhov@nfware.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20180923191202.64896-1-iryzhov@nfware.com",
    "date": "2018-09-23T19:12:02",
    "name": "[v2] kni: dynamically allocate memory for each KNI",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "aae700238aba0c6eb19399e0bf2174aa34bdf3a0",
    "submitter": {
        "id": 175,
        "url": "https://patches.dpdk.org/api/people/175/?format=api",
        "name": "Igor Ryzhov",
        "email": "iryzhov@nfware.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20180923191202.64896-1-iryzhov@nfware.com/mbox/",
    "series": [
        {
            "id": 1456,
            "url": "https://patches.dpdk.org/api/series/1456/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=1456",
            "date": "2018-09-23T19:12:02",
            "name": "[v2] kni: dynamically allocate memory for each KNI",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/1456/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/45180/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/45180/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 7AB9E2B8C;\n\tSun, 23 Sep 2018 21:12:15 +0200 (CEST)",
            "from mail-lj1-f196.google.com (mail-lj1-f196.google.com\n\t[209.85.208.196]) by dpdk.org (Postfix) with ESMTP id 03E21201\n\tfor <dev@dpdk.org>; Sun, 23 Sep 2018 21:12:13 +0200 (CEST)",
            "by mail-lj1-f196.google.com with SMTP id j19-v6so16326309ljc.7\n\tfor <dev@dpdk.org>; Sun, 23 Sep 2018 12:12:13 -0700 (PDT)",
            "from localhost.localdomain ([77.105.176.214])\n\tby smtp.gmail.com with ESMTPSA id\n\t22-v6sm6130278ljv.83.2018.09.23.12.12.10\n\t(version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128);\n\tSun, 23 Sep 2018 12:12:11 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=nfware-com.20150623.gappssmtp.com; s=20150623;\n\th=from:to:cc:subject:date:message-id:in-reply-to:references;\n\tbh=EfnN8TkukYp0vwV3p+4FdG9TK8COpUEnL0VacafR7w8=;\n\tb=aosIIIxY4u4eCFU/qIGUusoCV2DGI4sOA9NCtx+lPw8XV/wdgWzUthtY/fAR2CEjXk\n\tVC9MBtSPwJjm3BgGCaV69KNmifNbH38qIW6d0qWoguk9HoTZKqB90YiK9mNs+UtQcvud\n\tNzv5MwXM04QYdI9+TpuDCazwdLLebhM/+PNCs0mFX1t1W01gPbjt9MgBakdXWGXIlEzv\n\ttNKv31rgu005/aBDOjXJ+cAhyS6xOSCb3Tb00DHbHhciB7PHicYvyZ6UXLZSzvf+FfCb\n\tVukAAaNQWvXEA5Jl6cYa8boLGpIvmbyUfe0nun3pZ7m+YbejDcJTWJ1Gq7XGhhWydjTZ\n\tJFSg==",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n\td=1e100.net; s=20161025;\n\th=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n\t:references;\n\tbh=EfnN8TkukYp0vwV3p+4FdG9TK8COpUEnL0VacafR7w8=;\n\tb=VusK5hXsno3NAqVFQIjx9P+dmU3xvjdpewV/YgufxfNICzD7JMNQW/G20ry8+he2MD\n\tMnAgumQRYj0jP2czBqH9PxGWrHo9TnzUhao62AvbsNvEl6xxA1Ra3G9Gv7ytHqyW8Oli\n\tQHo+U2VBgM19wljn8TZgAlaAj5t6rxI12EW1KKlJXuyOcNldt/ZGvVTcdO+ZEJJK+EIJ\n\tRjiwc082lqSQjXW5RnTEDOTzff1USkHmn97U8oDiYlgbU6p+eiuOrnyfZ2ttIRzkkE5B\n\tASj0QsXUvLAaQ+KmMTxuXLJvpNhKCVZkyJNCqBnY+gzU4H2G3RTRRF0r9ZvtCe24I2vg\n\t4M5g==",
        "X-Gm-Message-State": "ABuFfohJw14238bPb/jPs5K8aO60u3aft4SfGOka2kssB1hIPWd4EpN8\n\tM0eBjvwjL2gEpikOBOxT6RRZKDxqBmRyuw==",
        "X-Google-Smtp-Source": "ANB0VdYjx1MrXr+auJ4hnPebW0ngQoOKhk09x1AE2xQAkzW+k0J+a+wU+Nl2COAPMloi+8dW/ngwEA==",
        "X-Received": "by 2002:a2e:85d5:: with SMTP id\n\th21-v6mr8303962ljj.103.1537729932449; \n\tSun, 23 Sep 2018 12:12:12 -0700 (PDT)",
        "From": "Igor Ryzhov <iryzhov@nfware.com>",
        "To": "dev@dpdk.org",
        "Cc": "ferruh.yigit@intel.com",
        "Date": "Sun, 23 Sep 2018 22:12:02 +0300",
        "Message-Id": "<20180923191202.64896-1-iryzhov@nfware.com>",
        "X-Mailer": "git-send-email 2.18.0",
        "In-Reply-To": "<20180802142522.57900-1-iryzhov@nfware.com>",
        "References": "<20180802142522.57900-1-iryzhov@nfware.com>",
        "Subject": "[dpdk-dev] [PATCH v2] kni: dynamically allocate memory for each KNI",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Long time ago preallocation of memory for KNI was introduced in commit\n0c6bc8e. It was done because of lack of ability to free previously\nallocated memzones, which led to memzone exhaustion. Currently memzones\ncan be freed and this patch uses this ability for dynamic KNI memory\nallocation.\n\nSigned-off-by: Igor Ryzhov <iryzhov@nfware.com>\n---\nv2:\n * allocate KNI using rte_zmalloc\n * swap reserve/release functions\n * use \"kni\" as a variable name\n * use macros for memzone names\n\n lib/librte_kni/rte_kni.c | 502 +++++++++++++++++----------------------\n lib/librte_kni/rte_kni.h |   6 +-\n test/test/test_kni.c     |   6 -\n 3 files changed, 218 insertions(+), 296 deletions(-)",
    "diff": "diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c\nindex 8a8f6c1cc..6af6e7efe 100644\n--- a/lib/librte_kni/rte_kni.c\n+++ b/lib/librte_kni/rte_kni.c\n@@ -18,6 +18,9 @@\n #include <rte_log.h>\n #include <rte_kni.h>\n #include <rte_memzone.h>\n+#include <rte_tailq.h>\n+#include <rte_rwlock.h>\n+#include <rte_eal_memconfig.h>\n #include <exec-env/rte_kni_common.h>\n #include \"rte_kni_fifo.h\"\n \n@@ -30,7 +33,23 @@\n \n #define KNI_REQUEST_MBUF_NUM_MAX      32\n \n-#define KNI_MEM_CHECK(cond) do { if (cond) goto kni_fail; } while (0)\n+#define KNI_MEM_CHECK(cond, fail) do { if (cond) goto fail; } while (0)\n+\n+#define KNI_MZ_NAME_FMT\t\t\t\"kni_info_%s\"\n+#define KNI_TX_Q_MZ_NAME_FMT\t\t\"kni_tx_%s\"\n+#define KNI_RX_Q_MZ_NAME_FMT\t\t\"kni_rx_%s\"\n+#define KNI_ALLOC_Q_MZ_NAME_FMT\t\t\"kni_alloc_%s\"\n+#define KNI_FREE_Q_MZ_NAME_FMT\t\t\"kni_free_%s\"\n+#define KNI_REQ_Q_MZ_NAME_FMT\t\t\"kni_req_%s\"\n+#define KNI_RESP_Q_MZ_NAME_FMT\t\t\"kni_resp_%s\"\n+#define KNI_SYNC_ADDR_MZ_NAME_FMT\t\"kni_sync_%s\"\n+\n+TAILQ_HEAD(rte_kni_list, rte_tailq_entry);\n+\n+static struct rte_tailq_elem rte_kni_tailq = {\n+\t.name = \"RTE_KNI\",\n+};\n+EAL_REGISTER_TAILQ(rte_kni_tailq)\n \n /**\n  * KNI context\n@@ -42,18 +61,26 @@ struct rte_kni {\n \tstruct rte_mempool *pktmbuf_pool;   /**< pkt mbuf mempool */\n \tunsigned mbuf_size;                 /**< mbuf size */\n \n+\tconst struct rte_memzone *m_tx_q;   /**< TX queue memzone */\n+\tconst struct rte_memzone *m_rx_q;   /**< RX queue memzone */\n+\tconst struct rte_memzone *m_alloc_q;/**< Alloc queue memzone */\n+\tconst struct rte_memzone *m_free_q; /**< Free queue memzone */\n+\n \tstruct rte_kni_fifo *tx_q;          /**< TX queue */\n \tstruct rte_kni_fifo *rx_q;          /**< RX queue */\n \tstruct rte_kni_fifo *alloc_q;       /**< Allocated mbufs queue */\n \tstruct rte_kni_fifo *free_q;        /**< To be freed mbufs queue */\n \n+\tconst struct rte_memzone *m_req_q;  /**< Request queue memzone */\n+\tconst struct rte_memzone *m_resp_q; /**< Response queue memzone */\n+\tconst struct rte_memzone *m_sync_addr;/**< Sync addr memzone */\n+\n \t/* For request & response */\n \tstruct rte_kni_fifo *req_q;         /**< Request queue */\n \tstruct rte_kni_fifo *resp_q;        /**< Response queue */\n \tvoid * sync_addr;                   /**< Req/Resp Mem address */\n \n \tstruct rte_kni_ops ops;             /**< operations for request */\n-\tuint8_t in_use : 1;                 /**< kni in use */\n };\n \n enum kni_ops_status {\n@@ -61,232 +88,91 @@ enum kni_ops_status {\n \tKNI_REQ_REGISTERED,\n };\n \n-/**\n- * KNI memzone pool slot\n- */\n-struct rte_kni_memzone_slot {\n-\tuint32_t id;\n-\tuint8_t in_use : 1;                    /**< slot in use */\n-\n-\t/* Memzones */\n-\tconst struct rte_memzone *m_ctx;       /**< KNI ctx */\n-\tconst struct rte_memzone *m_tx_q;      /**< TX queue */\n-\tconst struct rte_memzone *m_rx_q;      /**< RX queue */\n-\tconst struct rte_memzone *m_alloc_q;   /**< Allocated mbufs queue */\n-\tconst struct rte_memzone *m_free_q;    /**< To be freed mbufs queue */\n-\tconst struct rte_memzone *m_req_q;     /**< Request queue */\n-\tconst struct rte_memzone *m_resp_q;    /**< Response queue */\n-\tconst struct rte_memzone *m_sync_addr;\n-\n-\t/* Free linked list */\n-\tstruct rte_kni_memzone_slot *next;     /**< Next slot link.list */\n-};\n-\n-/**\n- * KNI memzone pool\n- */\n-struct rte_kni_memzone_pool {\n-\tuint8_t initialized : 1;            /**< Global KNI pool init flag */\n-\n-\tuint32_t max_ifaces;                /**< Max. num of KNI ifaces */\n-\tstruct rte_kni_memzone_slot *slots;        /**< Pool slots */\n-\trte_spinlock_t mutex;               /**< alloc/release mutex */\n-\n-\t/* Free memzone slots linked-list */\n-\tstruct rte_kni_memzone_slot *free;         /**< First empty slot */\n-\tstruct rte_kni_memzone_slot *free_tail;    /**< Last empty slot */\n-};\n-\n-\n static void kni_free_mbufs(struct rte_kni *kni);\n static void kni_allocate_mbufs(struct rte_kni *kni);\n \n static volatile int kni_fd = -1;\n-static struct rte_kni_memzone_pool kni_memzone_pool = {\n-\t.initialized = 0,\n-};\n \n-static const struct rte_memzone *\n-kni_memzone_reserve(const char *name, size_t len, int socket_id,\n-\t\t\t\t\t\tunsigned flags)\n+/* Shall be called before any allocation happens */\n+int\n+rte_kni_init(unsigned int max_kni_ifaces __rte_unused)\n {\n-\tconst struct rte_memzone *mz = rte_memzone_lookup(name);\n-\n-\tif (mz == NULL)\n-\t\tmz = rte_memzone_reserve(name, len, socket_id, flags);\n+\t/* Check FD and open */\n+\tif (kni_fd < 0) {\n+\t\tkni_fd = open(\"/dev/\" KNI_DEVICE, O_RDWR);\n+\t\tif (kni_fd < 0) {\n+\t\t\tRTE_LOG(ERR, KNI,\n+\t\t\t\t\"Can not open /dev/%s\\n\", KNI_DEVICE);\n+\t\t\treturn -1;\n+\t\t}\n+\t}\n \n-\treturn mz;\n+\treturn 0;\n }\n \n-/* Pool mgmt */\n-static struct rte_kni_memzone_slot*\n-kni_memzone_pool_alloc(void)\n+static int\n+kni_reserve_mz(struct rte_kni *kni)\n {\n-\tstruct rte_kni_memzone_slot *slot;\n+\tchar mz_name[RTE_MEMZONE_NAMESIZE];\n \n-\trte_spinlock_lock(&kni_memzone_pool.mutex);\n+\tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_TX_Q_MZ_NAME_FMT, kni->name);\n+\tkni->m_tx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);\n+\tKNI_MEM_CHECK(kni->m_tx_q == NULL, tx_q_fail);\n \n-\tif (!kni_memzone_pool.free) {\n-\t\trte_spinlock_unlock(&kni_memzone_pool.mutex);\n-\t\treturn NULL;\n-\t}\n+\tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RX_Q_MZ_NAME_FMT, kni->name);\n+\tkni->m_rx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);\n+\tKNI_MEM_CHECK(kni->m_rx_q == NULL, rx_q_fail);\n \n-\tslot = kni_memzone_pool.free;\n-\tkni_memzone_pool.free = slot->next;\n-\tslot->in_use = 1;\n+\tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_ALLOC_Q_MZ_NAME_FMT, kni->name);\n+\tkni->m_alloc_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);\n+\tKNI_MEM_CHECK(kni->m_alloc_q == NULL, alloc_q_fail);\n \n-\tif (!kni_memzone_pool.free)\n-\t\tkni_memzone_pool.free_tail = NULL;\n+\tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_FREE_Q_MZ_NAME_FMT, kni->name);\n+\tkni->m_free_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);\n+\tKNI_MEM_CHECK(kni->m_free_q == NULL, free_q_fail);\n \n-\trte_spinlock_unlock(&kni_memzone_pool.mutex);\n+\tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_REQ_Q_MZ_NAME_FMT, kni->name);\n+\tkni->m_req_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);\n+\tKNI_MEM_CHECK(kni->m_req_q == NULL, req_q_fail);\n \n-\treturn slot;\n-}\n-\n-static void\n-kni_memzone_pool_release(struct rte_kni_memzone_slot *slot)\n-{\n-\trte_spinlock_lock(&kni_memzone_pool.mutex);\n+\tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RESP_Q_MZ_NAME_FMT, kni->name);\n+\tkni->m_resp_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);\n+\tKNI_MEM_CHECK(kni->m_resp_q == NULL, resp_q_fail);\n \n-\tif (kni_memzone_pool.free)\n-\t\tkni_memzone_pool.free_tail->next = slot;\n-\telse\n-\t\tkni_memzone_pool.free = slot;\n+\tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_SYNC_ADDR_MZ_NAME_FMT, kni->name);\n+\tkni->m_sync_addr = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);\n+\tKNI_MEM_CHECK(kni->m_sync_addr == NULL, sync_addr_fail);\n \n-\tkni_memzone_pool.free_tail = slot;\n-\tslot->next = NULL;\n-\tslot->in_use = 0;\n+\treturn 0;\n \n-\trte_spinlock_unlock(&kni_memzone_pool.mutex);\n+sync_addr_fail:\n+\trte_memzone_free(kni->m_resp_q);\n+resp_q_fail:\n+\trte_memzone_free(kni->m_req_q);\n+req_q_fail:\n+\trte_memzone_free(kni->m_free_q);\n+free_q_fail:\n+\trte_memzone_free(kni->m_alloc_q);\n+alloc_q_fail:\n+\trte_memzone_free(kni->m_rx_q);\n+rx_q_fail:\n+\trte_memzone_free(kni->m_tx_q);\n+tx_q_fail:\n+\treturn -1;\n }\n \n-\n-/* Shall be called before any allocation happens */\n-void\n-rte_kni_init(unsigned int max_kni_ifaces)\n+static void\n+kni_release_mz(struct rte_kni *kni)\n {\n-\tuint32_t i;\n-\tstruct rte_kni_memzone_slot *it;\n-\tconst struct rte_memzone *mz;\n-#define OBJNAMSIZ 32\n-\tchar obj_name[OBJNAMSIZ];\n-\tchar mz_name[RTE_MEMZONE_NAMESIZE];\n-\n-\t/* Immediately return if KNI is already initialized */\n-\tif (kni_memzone_pool.initialized) {\n-\t\tRTE_LOG(WARNING, KNI, \"Double call to rte_kni_init()\");\n-\t\treturn;\n-\t}\n-\n-\tif (max_kni_ifaces == 0) {\n-\t\tRTE_LOG(ERR, KNI, \"Invalid number of max_kni_ifaces %d\\n\",\n-\t\t\t\t\t\t\tmax_kni_ifaces);\n-\t\tRTE_LOG(ERR, KNI, \"Unable to initialize KNI\\n\");\n-\t\treturn;\n-\t}\n-\n-\t/* Check FD and open */\n-\tif (kni_fd < 0) {\n-\t\tkni_fd = open(\"/dev/\" KNI_DEVICE, O_RDWR);\n-\t\tif (kni_fd < 0) {\n-\t\t\tRTE_LOG(ERR, KNI,\n-\t\t\t\t\"Can not open /dev/%s\\n\", KNI_DEVICE);\n-\t\t\treturn;\n-\t\t}\n-\t}\n-\n-\t/* Allocate slot objects */\n-\tkni_memzone_pool.slots = (struct rte_kni_memzone_slot *)\n-\t\t\t\t\trte_malloc(NULL,\n-\t\t\t\t\tsizeof(struct rte_kni_memzone_slot) *\n-\t\t\t\t\tmax_kni_ifaces,\n-\t\t\t\t\t0);\n-\tKNI_MEM_CHECK(kni_memzone_pool.slots == NULL);\n-\n-\t/* Initialize general pool variables */\n-\tkni_memzone_pool.initialized = 1;\n-\tkni_memzone_pool.max_ifaces = max_kni_ifaces;\n-\tkni_memzone_pool.free = &kni_memzone_pool.slots[0];\n-\trte_spinlock_init(&kni_memzone_pool.mutex);\n-\n-\t/* Pre-allocate all memzones of all the slots; panic on error */\n-\tfor (i = 0; i < max_kni_ifaces; i++) {\n-\n-\t\t/* Recover current slot */\n-\t\tit = &kni_memzone_pool.slots[i];\n-\t\tit->id = i;\n-\n-\t\t/* Allocate KNI context */\n-\t\tsnprintf(mz_name, RTE_MEMZONE_NAMESIZE, \"KNI_INFO_%d\", i);\n-\t\tmz = kni_memzone_reserve(mz_name, sizeof(struct rte_kni),\n-\t\t\t\t\tSOCKET_ID_ANY, 0);\n-\t\tKNI_MEM_CHECK(mz == NULL);\n-\t\tit->m_ctx = mz;\n-\n-\t\t/* TX RING */\n-\t\tsnprintf(obj_name, OBJNAMSIZ, \"kni_tx_%d\", i);\n-\t\tmz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,\n-\t\t\t\t\t\t\tSOCKET_ID_ANY, 0);\n-\t\tKNI_MEM_CHECK(mz == NULL);\n-\t\tit->m_tx_q = mz;\n-\n-\t\t/* RX RING */\n-\t\tsnprintf(obj_name, OBJNAMSIZ, \"kni_rx_%d\", i);\n-\t\tmz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,\n-\t\t\t\t\t\t\tSOCKET_ID_ANY, 0);\n-\t\tKNI_MEM_CHECK(mz == NULL);\n-\t\tit->m_rx_q = mz;\n-\n-\t\t/* ALLOC RING */\n-\t\tsnprintf(obj_name, OBJNAMSIZ, \"kni_alloc_%d\", i);\n-\t\tmz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,\n-\t\t\t\t\t\t\tSOCKET_ID_ANY, 0);\n-\t\tKNI_MEM_CHECK(mz == NULL);\n-\t\tit->m_alloc_q = mz;\n-\n-\t\t/* FREE RING */\n-\t\tsnprintf(obj_name, OBJNAMSIZ, \"kni_free_%d\", i);\n-\t\tmz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,\n-\t\t\t\t\t\t\tSOCKET_ID_ANY, 0);\n-\t\tKNI_MEM_CHECK(mz == NULL);\n-\t\tit->m_free_q = mz;\n-\n-\t\t/* Request RING */\n-\t\tsnprintf(obj_name, OBJNAMSIZ, \"kni_req_%d\", i);\n-\t\tmz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,\n-\t\t\t\t\t\t\tSOCKET_ID_ANY, 0);\n-\t\tKNI_MEM_CHECK(mz == NULL);\n-\t\tit->m_req_q = mz;\n-\n-\t\t/* Response RING */\n-\t\tsnprintf(obj_name, OBJNAMSIZ, \"kni_resp_%d\", i);\n-\t\tmz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,\n-\t\t\t\t\t\t\tSOCKET_ID_ANY, 0);\n-\t\tKNI_MEM_CHECK(mz == NULL);\n-\t\tit->m_resp_q = mz;\n-\n-\t\t/* Req/Resp sync mem area */\n-\t\tsnprintf(obj_name, OBJNAMSIZ, \"kni_sync_%d\", i);\n-\t\tmz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,\n-\t\t\t\t\t\t\tSOCKET_ID_ANY, 0);\n-\t\tKNI_MEM_CHECK(mz == NULL);\n-\t\tit->m_sync_addr = mz;\n-\n-\t\tif ((i+1) == max_kni_ifaces) {\n-\t\t\tit->next = NULL;\n-\t\t\tkni_memzone_pool.free_tail = it;\n-\t\t} else\n-\t\t\tit->next = &kni_memzone_pool.slots[i+1];\n-\t}\n-\n-\treturn;\n-\n-kni_fail:\n-\tRTE_LOG(ERR, KNI, \"Unable to allocate memory for max_kni_ifaces:%d.\"\n-\t\t\"Increase the amount of hugepages memory\\n\", max_kni_ifaces);\n+\trte_memzone_free(kni->m_tx_q);\n+\trte_memzone_free(kni->m_rx_q);\n+\trte_memzone_free(kni->m_alloc_q);\n+\trte_memzone_free(kni->m_free_q);\n+\trte_memzone_free(kni->m_req_q);\n+\trte_memzone_free(kni->m_resp_q);\n+\trte_memzone_free(kni->m_sync_addr);\n }\n \n-\n struct rte_kni *\n rte_kni_alloc(struct rte_mempool *pktmbuf_pool,\n \t      const struct rte_kni_conf *conf,\n@@ -294,41 +180,52 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,\n {\n \tint ret;\n \tstruct rte_kni_device_info dev_info;\n-\tstruct rte_kni *ctx;\n-\tchar intf_name[RTE_KNI_NAMESIZE];\n-\tconst struct rte_memzone *mz;\n-\tstruct rte_kni_memzone_slot *slot = NULL;\n+\tstruct rte_kni *kni;\n+\tstruct rte_tailq_entry *te = NULL;\n+\tstruct rte_kni_list *kni_list;\n+\n+\tkni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);\n \n \tif (!pktmbuf_pool || !conf || !conf->name[0])\n \t\treturn NULL;\n \n \t/* Check if KNI subsystem has been initialized */\n-\tif (kni_memzone_pool.initialized != 1) {\n+\tif (kni_fd < 0) {\n \t\tRTE_LOG(ERR, KNI, \"KNI subsystem has not been initialized. Invoke rte_kni_init() first\\n\");\n \t\treturn NULL;\n \t}\n \n-\t/* Get an available slot from the pool */\n-\tslot = kni_memzone_pool_alloc();\n-\tif (!slot) {\n-\t\tRTE_LOG(ERR, KNI, \"Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unused ones.\\n\",\n-\t\t\tkni_memzone_pool.max_ifaces);\n-\t\treturn NULL;\n+\trte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);\n+\n+\tTAILQ_FOREACH(te, kni_list, next) {\n+\t\tkni = (struct rte_kni *) te->data;\n+\t\tif (strncmp(conf->name, kni->name, RTE_KNI_NAMESIZE) == 0)\n+\t\t\tbreak;\n \t}\n \n-\t/* Recover ctx */\n-\tctx = slot->m_ctx->addr;\n-\tsnprintf(intf_name, RTE_KNI_NAMESIZE, \"%s\", conf->name);\n+\tif (te != NULL) {\n+\t\tRTE_LOG(ERR, KNI, \"KNI already exists\\n\");\n+\t\tgoto unlock;\n+\t}\n \n-\tif (ctx->in_use) {\n-\t\tRTE_LOG(ERR, KNI, \"KNI %s is in use\\n\", ctx->name);\n-\t\treturn NULL;\n+\tte = rte_zmalloc(\"KNI_TAILQ_ENTRY\", sizeof(*te), 0);\n+\tif (te == NULL) {\n+\t\tRTE_LOG(ERR, KNI, \"Failed to allocate tailq entry\\n\");\n+\t\tgoto unlock;\n+\t}\n+\n+\tkni = rte_zmalloc(\"KNI\", sizeof(struct rte_kni), RTE_CACHE_LINE_SIZE);\n+\tif (kni == NULL) {\n+\t\tRTE_LOG(ERR, KNI, \"KNI memory allocation failed\\n\");\n+\t\tgoto kni_fail;\n \t}\n-\tmemset(ctx, 0, sizeof(struct rte_kni));\n+\n+\tsnprintf(kni->name, RTE_KNI_NAMESIZE, \"%s\", conf->name);\n+\n \tif (ops)\n-\t\tmemcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops));\n+\t\tmemcpy(&kni->ops, ops, sizeof(struct rte_kni_ops));\n \telse\n-\t\tctx->ops.port_id = UINT16_MAX;\n+\t\tkni->ops.port_id = UINT16_MAX;\n \n \tmemset(&dev_info, 0, sizeof(dev_info));\n \tdev_info.bus = conf->addr.bus;\n@@ -344,72 +241,76 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,\n \n \tmemcpy(dev_info.mac_addr, conf->mac_addr, ETHER_ADDR_LEN);\n \n-\tsnprintf(ctx->name, RTE_KNI_NAMESIZE, \"%s\", intf_name);\n-\tsnprintf(dev_info.name, RTE_KNI_NAMESIZE, \"%s\", intf_name);\n+\tsnprintf(dev_info.name, RTE_KNI_NAMESIZE, \"%s\", conf->name);\n \n \tRTE_LOG(INFO, KNI, \"pci: %02x:%02x:%02x \\t %02x:%02x\\n\",\n \t\tdev_info.bus, dev_info.devid, dev_info.function,\n \t\t\tdev_info.vendor_id, dev_info.device_id);\n+\n+\tret = kni_reserve_mz(kni);\n+\tif (ret < 0)\n+\t\tgoto mz_fail;\n+\n \t/* TX RING */\n-\tmz = slot->m_tx_q;\n-\tctx->tx_q = mz->addr;\n-\tkni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX);\n-\tdev_info.tx_phys = mz->phys_addr;\n+\tkni->tx_q = kni->m_tx_q->addr;\n+\tkni_fifo_init(kni->tx_q, KNI_FIFO_COUNT_MAX);\n+\tdev_info.tx_phys = kni->m_tx_q->phys_addr;\n \n \t/* RX RING */\n-\tmz = slot->m_rx_q;\n-\tctx->rx_q = mz->addr;\n-\tkni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX);\n-\tdev_info.rx_phys = mz->phys_addr;\n+\tkni->rx_q = kni->m_rx_q->addr;\n+\tkni_fifo_init(kni->rx_q, KNI_FIFO_COUNT_MAX);\n+\tdev_info.rx_phys = kni->m_rx_q->phys_addr;\n \n \t/* ALLOC RING */\n-\tmz = slot->m_alloc_q;\n-\tctx->alloc_q = mz->addr;\n-\tkni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX);\n-\tdev_info.alloc_phys = mz->phys_addr;\n+\tkni->alloc_q = kni->m_alloc_q->addr;\n+\tkni_fifo_init(kni->alloc_q, KNI_FIFO_COUNT_MAX);\n+\tdev_info.alloc_phys = kni->m_alloc_q->phys_addr;\n \n \t/* FREE RING */\n-\tmz = slot->m_free_q;\n-\tctx->free_q = mz->addr;\n-\tkni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX);\n-\tdev_info.free_phys = mz->phys_addr;\n+\tkni->free_q = kni->m_free_q->addr;\n+\tkni_fifo_init(kni->free_q, KNI_FIFO_COUNT_MAX);\n+\tdev_info.free_phys = kni->m_free_q->phys_addr;\n \n \t/* Request RING */\n-\tmz = slot->m_req_q;\n-\tctx->req_q = mz->addr;\n-\tkni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX);\n-\tdev_info.req_phys = mz->phys_addr;\n+\tkni->req_q = kni->m_req_q->addr;\n+\tkni_fifo_init(kni->req_q, KNI_FIFO_COUNT_MAX);\n+\tdev_info.req_phys = kni->m_req_q->phys_addr;\n \n \t/* Response RING */\n-\tmz = slot->m_resp_q;\n-\tctx->resp_q = mz->addr;\n-\tkni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX);\n-\tdev_info.resp_phys = mz->phys_addr;\n+\tkni->resp_q = kni->m_resp_q->addr;\n+\tkni_fifo_init(kni->resp_q, KNI_FIFO_COUNT_MAX);\n+\tdev_info.resp_phys = kni->m_resp_q->phys_addr;\n \n \t/* Req/Resp sync mem area */\n-\tmz = slot->m_sync_addr;\n-\tctx->sync_addr = mz->addr;\n-\tdev_info.sync_va = mz->addr;\n-\tdev_info.sync_phys = mz->phys_addr;\n+\tkni->sync_addr = kni->m_sync_addr->addr;\n+\tdev_info.sync_va = kni->m_sync_addr->addr;\n+\tdev_info.sync_phys = kni->m_sync_addr->phys_addr;\n \n-\tctx->pktmbuf_pool = pktmbuf_pool;\n-\tctx->group_id = conf->group_id;\n-\tctx->slot_id = slot->id;\n-\tctx->mbuf_size = conf->mbuf_size;\n+\tkni->pktmbuf_pool = pktmbuf_pool;\n+\tkni->group_id = conf->group_id;\n+\tkni->mbuf_size = conf->mbuf_size;\n \n \tret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);\n-\tKNI_MEM_CHECK(ret < 0);\n+\tif (ret < 0)\n+\t\tgoto ioctl_fail;\n \n-\tctx->in_use = 1;\n+\tte->data = kni;\n+\tTAILQ_INSERT_TAIL(kni_list, te, next);\n+\trte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);\n \n \t/* Allocate mbufs and then put them into alloc_q */\n-\tkni_allocate_mbufs(ctx);\n+\tkni_allocate_mbufs(kni);\n \n-\treturn ctx;\n+\treturn kni;\n \n+ioctl_fail:\n+\tkni_release_mz(kni);\n+mz_fail:\n+\trte_free(kni);\n kni_fail:\n-\tif (slot)\n-\t\tkni_memzone_pool_release(&kni_memzone_pool.slots[slot->id]);\n+\trte_free(te);\n+unlock:\n+\trte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);\n \n \treturn NULL;\n }\n@@ -462,19 +363,37 @@ kni_free_fifo_phy(struct rte_mempool *mp, struct rte_kni_fifo *fifo)\n int\n rte_kni_release(struct rte_kni *kni)\n {\n+\tstruct rte_tailq_entry *te;\n+\tstruct rte_kni_list *kni_list;\n \tstruct rte_kni_device_info dev_info;\n-\tuint32_t slot_id;\n \tuint32_t retry = 5;\n \n-\tif (!kni || !kni->in_use)\n+\tif (!kni)\n \t\treturn -1;\n \n+\tkni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);\n+\n+\trte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);\n+\n+\tTAILQ_FOREACH(te, kni_list, next) {\n+\t\tif (te->data == kni)\n+\t\t\tbreak;\n+\t}\n+\n+\tif (te == NULL) {\n+\t\tgoto unlock;\n+\t}\n+\n \tsnprintf(dev_info.name, sizeof(dev_info.name), \"%s\", kni->name);\n \tif (ioctl(kni_fd, RTE_KNI_IOCTL_RELEASE, &dev_info) < 0) {\n \t\tRTE_LOG(ERR, KNI, \"Fail to release kni device\\n\");\n-\t\treturn -1;\n+\t\tgoto unlock;\n \t}\n \n+\tTAILQ_REMOVE(kni_list, te, next);\n+\n+\trte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);\n+\n \t/* mbufs in all fifo should be released, except request/response */\n \n \t/* wait until all rxq packets processed by kernel */\n@@ -488,20 +407,18 @@ rte_kni_release(struct rte_kni *kni)\n \tkni_free_fifo(kni->tx_q);\n \tkni_free_fifo(kni->free_q);\n \n-\tslot_id = kni->slot_id;\n+\tkni_release_mz(kni);\n \n-\t/* Memset the KNI struct */\n-\tmemset(kni, 0, sizeof(struct rte_kni));\n+\trte_free(kni);\n \n-\t/* Release memzone */\n-\tif (slot_id > kni_memzone_pool.max_ifaces) {\n-\t\tRTE_LOG(ERR, KNI, \"KNI pool: corrupted slot ID: %d, max: %d\\n\",\n-\t\t\tslot_id, kni_memzone_pool.max_ifaces);\n-\t\treturn -1;\n-\t}\n-\tkni_memzone_pool_release(&kni_memzone_pool.slots[slot_id]);\n+\trte_free(te);\n \n \treturn 0;\n+\n+unlock:\n+\trte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);\n+\n+\treturn -1;\n }\n \n /* default callback for request of configuring device mac address */\n@@ -711,21 +628,28 @@ kni_allocate_mbufs(struct rte_kni *kni)\n struct rte_kni *\n rte_kni_get(const char *name)\n {\n-\tuint32_t i;\n-\tstruct rte_kni_memzone_slot *it;\n-\tstruct rte_kni *kni;\n+\tstruct rte_kni *kni = NULL;\n+\tstruct rte_tailq_entry *te;\n+\tstruct rte_kni_list *kni_list;\n+\n+\tif (!name || !name[0])\n+\t\treturn NULL;\n \n-\t/* Note: could be improved perf-wise if necessary */\n-\tfor (i = 0; i < kni_memzone_pool.max_ifaces; i++) {\n-\t\tit = &kni_memzone_pool.slots[i];\n-\t\tif (it->in_use == 0)\n-\t\t\tcontinue;\n-\t\tkni = it->m_ctx->addr;\n-\t\tif (strncmp(kni->name, name, RTE_KNI_NAMESIZE) == 0)\n-\t\t\treturn kni;\n+\tkni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);\n+\n+\trte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);\n+\tTAILQ_FOREACH(te, kni_list, next) {\n+\t\tkni = te->data;\n+\t\tif (strncmp(name, kni->name, RTE_KNI_NAMESIZE) == 0)\n+\t\t\tbreak;\n \t}\n+\trte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);\n \n-\treturn NULL;\n+\tif (te == NULL) {\n+\t\treturn NULL;\n+\t}\n+\n+\treturn kni;\n }\n \n const char *\ndiff --git a/lib/librte_kni/rte_kni.h b/lib/librte_kni/rte_kni.h\nindex 99055e2c2..601abdfc6 100644\n--- a/lib/librte_kni/rte_kni.h\n+++ b/lib/librte_kni/rte_kni.h\n@@ -81,8 +81,12 @@ struct rte_kni_conf {\n  *\n  * @param max_kni_ifaces\n  *  The maximum number of KNI interfaces that can coexist concurrently\n+ *\n+ * @return\n+ *  - 0 indicates success.\n+ *  - negative value indicates failure.\n  */\n-void rte_kni_init(unsigned int max_kni_ifaces);\n+int rte_kni_init(unsigned int max_kni_ifaces);\n \n \n /**\ndiff --git a/test/test/test_kni.c b/test/test/test_kni.c\nindex 1b876719a..56c98513a 100644\n--- a/test/test/test_kni.c\n+++ b/test/test/test_kni.c\n@@ -429,12 +429,6 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp)\n \t}\n \ttest_kni_ctx = NULL;\n \n-\t/* test of releasing a released kni device */\n-\tif (rte_kni_release(kni) == 0) {\n-\t\tprintf(\"should not release a released kni device\\n\");\n-\t\treturn -1;\n-\t}\n-\n \t/* test of reusing memzone */\n \tkni = rte_kni_alloc(mp, &conf, &ops);\n \tif (!kni) {\n",
    "prefixes": [
        "v2"
    ]
}