get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/120889/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 120889,
    "url": "https://patches.dpdk.org/api/patches/120889/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20221214153426.1518587-1-shibin.koikkara.reeny@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20221214153426.1518587-1-shibin.koikkara.reeny@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20221214153426.1518587-1-shibin.koikkara.reeny@intel.com",
    "date": "2022-12-14T15:34:26",
    "name": "[v2] net/af_xdp: AF_XDP PMD CNI Integration",
    "commit_ref": null,
    "pull_url": null,
    "state": "rejected",
    "archived": true,
    "hash": "ccd135e47544541db39339161a959d27f0aa8395",
    "submitter": {
        "id": 2540,
        "url": "https://patches.dpdk.org/api/people/2540/?format=api",
        "name": "Koikkara Reeny, Shibin",
        "email": "shibin.koikkara.reeny@intel.com"
    },
    "delegate": {
        "id": 3961,
        "url": "https://patches.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20221214153426.1518587-1-shibin.koikkara.reeny@intel.com/mbox/",
    "series": [
        {
            "id": 26125,
            "url": "https://patches.dpdk.org/api/series/26125/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=26125",
            "date": "2022-12-14T15:34:26",
            "name": "[v2] net/af_xdp: AF_XDP PMD CNI Integration",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/26125/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/120889/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/120889/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 00669A0543;\n\tWed, 14 Dec 2022 16:34:38 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 989424021D;\n\tWed, 14 Dec 2022 16:34:38 +0100 (CET)",
            "from mga05.intel.com (mga05.intel.com [192.55.52.43])\n by mails.dpdk.org (Postfix) with ESMTP id 6AB4B400D6\n for <dev@dpdk.org>; Wed, 14 Dec 2022 16:34:36 +0100 (CET)",
            "from orsmga007.jf.intel.com ([10.7.209.58])\n by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 14 Dec 2022 07:34:34 -0800",
            "from silpixa00400899.ir.intel.com ([10.243.22.107])\n by orsmga007.jf.intel.com with ESMTP; 14 Dec 2022 07:34:32 -0800"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1671032076; x=1702568076;\n h=from:to:cc:subject:date:message-id:mime-version:\n content-transfer-encoding;\n bh=tE9OM4cyAJJeLNCyExN7SiQHTtBeSnejxqQqjU2tCJ4=;\n b=Z3gy9I06RKZQEG94g0sAV1Z9fDoh4uKVnzrMlxDo17NFciF4AAy72ZEv\n W0+i6T2ryV69W75VPBElqzbFm+eEqPIKl9nPahFdiuGm/Nm+qaZPel6pG\n Q60k7aXxkDdu8KxAyvY/psXWmAo2K3fFCI5U4JitL+yCAVUSW1nLGlNRB\n MvtwbZ4QuyFRnAR7fDKLK56iFlxAWRHvIAVzA6VHM8AIogUiLDHiWceB2\n E9QBw64w5RktT0kyXevqs6g75SwE2AfqK2A+kF+bwcvqLMn1qfEeTNEyE\n sCcjM0BisXVuHwR9Ml/KYOqlltX/mszVhn9GG6+QfukB2R6nSPx3ur7+S Q==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10561\"; a=\"404701432\"",
            "E=Sophos;i=\"5.96,244,1665471600\"; d=\"scan'208\";a=\"404701432\"",
            "E=McAfee;i=\"6500,9779,10561\"; a=\"642541199\"",
            "E=Sophos;i=\"5.96,244,1665471600\"; d=\"scan'208\";a=\"642541199\""
        ],
        "X-ExtLoop1": "1",
        "From": "Shibin Koikkara Reeny <shibin.koikkara.reeny@intel.com>",
        "To": "dev@dpdk.org,\n\tanatoly.burakov@intel.com,\n\tbruce.richardson@intel.com",
        "Cc": "ciara.loftus@intel.com, qi.z.zhang@intel.com,\n Shibin Koikkara Reeny <shibin.koikkara.reeny@intel.com>",
        "Subject": "[PATCH v2] net/af_xdp: AF_XDP PMD CNI Integration",
        "Date": "Wed, 14 Dec 2022 15:34:26 +0000",
        "Message-Id": "<20221214153426.1518587-1-shibin.koikkara.reeny@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Integrate support for the AF_XDP CNI and device plugin [1] so that the\nDPDK AF_XDP PMD can work in an unprivileged container environment.\nPart of the AF_XDP PMD initialization process involves loading\nan eBPF program onto the given netdev. This operation requires\nprivileges, which prevents the PMD from being able to work in an\nunprivileged container (without root access). The plugin CNI handles\nthe program loading. CNI open Unix Domain Socket (UDS) and waits\nlistening for a client to make requests over that UDS. The client(DPDK)\nconnects and a \"handshake\" occurs, then the File Descriptor which points\nto the XSKMAP associated with the loaded eBPF program is handed over\nto the client. The client can then proceed with creating an AF_XDP\nsocket and inserting the socket into the XSKMAP pointed to by the\nFD received on the UDS.\n\nA new vdev arg \"use_cni\" is created to indicate user wishes to run\nthe PMD in unprivileged mode and to receive the XSKMAP FD from the CNI.\nWhen this flag is set, the XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD libbpf flag\nshould be used when creating the socket, which tells libbpf not to load the\ndefault libbpf program on the netdev. We tell libbpf not to do this because\nthe loading is handled by the CNI in this scenario.\n\n[1]: https://github.com/intel/afxdp-plugins-for-kubernetes\n\nSigned-off-by: Shibin Koikkara Reeny <shibin.koikkara.reeny@intel.com>\n---\n drivers/net/af_xdp/rte_eth_af_xdp.c | 337 +++++++++++++++++++++++++++-\n 1 file changed, 325 insertions(+), 12 deletions(-)",
    "diff": "diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c\nindex b6ec9bf490..196d98ad97 100644\n--- a/drivers/net/af_xdp/rte_eth_af_xdp.c\n+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c\n@@ -7,6 +7,7 @@\n #include <string.h>\n #include <netinet/in.h>\n #include <net/if.h>\n+#include <sys/un.h>\n #include <sys/socket.h>\n #include <sys/ioctl.h>\n #include <linux/if_ether.h>\n@@ -81,6 +82,24 @@ RTE_LOG_REGISTER_DEFAULT(af_xdp_logtype, NOTICE);\n \n #define ETH_AF_XDP_MP_KEY \"afxdp_mp_send_fds\"\n \n+#define MAX_LONG_OPT_SZ\t\t\t64\n+#define UDS_MAX_FD_NUM\t\t\t2\n+#define UDS_MAX_CMD_LEN\t\t\t64\n+#define UDS_MAX_CMD_RESP\t\t128\n+#define UDS_XSK_MAP_FD_MSG\t\t\"/xsk_map_fd\"\n+#define UDS_SOCK\t\t\t\"/tmp/afxdp.sock\"\n+#define UDS_CONNECT_MSG\t\t\t\"/connect\"\n+#define UDS_HOST_OK_MSG\t\t\t\"/host_ok\"\n+#define UDS_HOST_NAK_MSG\t\t\"/host_nak\"\n+#define UDS_VERSION_MSG\t\t\t\"/version\"\n+#define UDS_XSK_MAP_FD_MSG\t\t\"/xsk_map_fd\"\n+#define UDS_XSK_SOCKET_MSG\t\t\"/xsk_socket\"\n+#define UDS_FD_ACK_MSG\t\t\t\"/fd_ack\"\n+#define UDS_FD_NAK_MSG\t\t\t\"/fd_nak\"\n+#define UDS_FIN_MSG\t\t\t\"/fin\"\n+#define UDS_FIN_ACK_MSG\t\t\t\"/fin_ack\"\n+\n+\n static int afxdp_dev_count;\n \n /* Message header to synchronize fds via IPC */\n@@ -151,6 +170,7 @@ struct pmd_internals {\n \tchar prog_path[PATH_MAX];\n \tbool custom_prog_configured;\n \tbool force_copy;\n+\tbool use_cni;\n \tstruct bpf_map *map;\n \n \tstruct rte_ether_addr eth_addr;\n@@ -170,6 +190,7 @@ struct pmd_process_private {\n #define ETH_AF_XDP_PROG_ARG\t\t\t\"xdp_prog\"\n #define ETH_AF_XDP_BUDGET_ARG\t\t\t\"busy_budget\"\n #define ETH_AF_XDP_FORCE_COPY_ARG\t\t\"force_copy\"\n+#define ETH_AF_XDP_USE_CNI_ARG\t\t\t\"use_cni\"\n \n static const char * const valid_arguments[] = {\n \tETH_AF_XDP_IFACE_ARG,\n@@ -179,8 +200,8 @@ static const char * const valid_arguments[] = {\n \tETH_AF_XDP_PROG_ARG,\n \tETH_AF_XDP_BUDGET_ARG,\n \tETH_AF_XDP_FORCE_COPY_ARG,\n-\tNULL\n-};\n+\tETH_AF_XDP_USE_CNI_ARG,\n+\tNULL};\n \n static const struct rte_eth_link pmd_link = {\n \t.link_speed = RTE_ETH_SPEED_NUM_10G,\n@@ -1129,7 +1150,8 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,\n \t\tret = xsk_umem__create(&umem->umem, base_addr, umem_size,\n \t\t\t\t&rxq->fq, &rxq->cq, &usr_config);\n \t\tif (ret) {\n-\t\t\tAF_XDP_LOG(ERR, \"Failed to create umem\\n\");\n+\t\t\tAF_XDP_LOG(ERR, \"Failed to create umem [%d]: [%s]\\n\",\n+\t\t\t\t   errno, strerror(errno));\n \t\t\tgoto err;\n \t\t}\n \t\tumem->buffer = base_addr;\n@@ -1314,6 +1336,245 @@ configure_preferred_busy_poll(struct pkt_rx_queue *rxq)\n \treturn 0;\n }\n \n+static int\n+init_uds_sock(struct sockaddr_un *server)\n+{\n+\tint sock;\n+\n+\tsock = socket(AF_UNIX, SOCK_SEQPACKET, 0);\n+\tif (sock < 0) {\n+\t\tAF_XDP_LOG(ERR, \"Failed to opening stream socket\\n\");\n+\t\treturn -1;\n+\t}\n+\n+\tserver->sun_family = AF_UNIX;\n+\tstrlcpy(server->sun_path, UDS_SOCK, sizeof(server->sun_path));\n+\n+\tif (connect(sock, (struct sockaddr *)server, sizeof(struct sockaddr_un)) < 0) {\n+\t\tclose(sock);\n+\t\tAF_XDP_LOG(ERR, \"Error connecting stream socket errno = [%d]: [%s]\\n\",\n+\t\t\t   errno, strerror(errno));\n+\t\treturn -1;\n+\t}\n+\n+\treturn sock;\n+}\n+\n+struct msg_internal {\n+\tchar response[UDS_MAX_CMD_RESP];\n+\tint len_param;\n+\tint num_fds;\n+\tint fds[UDS_MAX_FD_NUM];\n+};\n+\n+static int\n+send_msg(int sock, char *request, int *fd)\n+{\n+\tint snd;\n+\tstruct iovec iov;\n+\tstruct msghdr msgh;\n+\tstruct cmsghdr *cmsg;\n+\tstruct sockaddr_un dst;\n+\tchar control[CMSG_SPACE(sizeof(*fd))];\n+\n+\tmemset(&dst, 0, sizeof(dst));\n+\tdst.sun_family = AF_UNIX;\n+\tstrlcpy(dst.sun_path, UDS_SOCK, sizeof(dst.sun_path));\n+\n+\t/* Initialize message header structure */\n+\tmemset(&msgh, 0, sizeof(msgh));\n+\tmemset(control, 0, sizeof(control));\n+\tiov.iov_base = request;\n+\tiov.iov_len = strlen(request);\n+\n+\tmsgh.msg_name = &dst;\n+\tmsgh.msg_namelen = sizeof(dst);\n+\tmsgh.msg_iov = &iov;\n+\tmsgh.msg_iovlen = 1;\n+\tmsgh.msg_control = control;\n+\tmsgh.msg_controllen = sizeof(control);\n+\n+\t/* Translate the FD. */\n+\tcmsg = CMSG_FIRSTHDR(&msgh);\n+\tcmsg->cmsg_len = CMSG_LEN(sizeof(*fd));\n+\tcmsg->cmsg_level = SOL_SOCKET;\n+\tcmsg->cmsg_type = SCM_RIGHTS;\n+\tmemcpy(CMSG_DATA(cmsg), fd, sizeof(*fd));\n+\n+\t/* Send the request message. */\n+\tdo {\n+\t\tsnd = sendmsg(sock, &msgh, 0);\n+\t} while (snd < 0 && errno == EINTR);\n+\n+\treturn snd;\n+}\n+\n+static int\n+read_msg(int sock, char *response, struct sockaddr_un *s, int *fd)\n+{\n+\tint msglen;\n+\tstruct msghdr msgh;\n+\tstruct iovec iov;\n+\tchar control[CMSG_SPACE(sizeof(*fd))];\n+\tstruct cmsghdr *cmsg;\n+\n+\t/* Initialize message header structure */\n+\tmemset(&msgh, 0, sizeof(msgh));\n+\tiov.iov_base = response;\n+\tiov.iov_len = UDS_MAX_CMD_RESP;\n+\n+\tmsgh.msg_name = s;\n+\tmsgh.msg_namelen = sizeof(*s);\n+\tmsgh.msg_iov = &iov;\n+\tmsgh.msg_iovlen = 1;\n+\tmsgh.msg_control = control;\n+\tmsgh.msg_controllen = sizeof(control);\n+\n+\tmsglen = recvmsg(sock, &msgh, 0);\n+\n+\t/* zero length message means socket was closed */\n+\tif (msglen == 0)\n+\t\treturn 0;\n+\n+\tif (msglen < 0) {\n+\t\tAF_XDP_LOG(ERR, \"recvmsg failed, %s\\n\", strerror(errno));\n+\t\treturn -1;\n+\t}\n+\n+\t/* read auxiliary FDs if any */\n+\tfor (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;\n+\t\t\tcmsg = CMSG_NXTHDR(&msgh, cmsg)) {\n+\t\tif (cmsg->cmsg_level == SOL_SOCKET &&\n+\t\t\t\tcmsg->cmsg_type == SCM_RIGHTS) {\n+\t\t\tmemcpy(fd, CMSG_DATA(cmsg), sizeof(*fd));\n+\t\t\tbreak;\n+\t\t}\n+\t}\n+\n+\tresponse[msglen] = '\\0';\n+\treturn msglen;\n+}\n+\n+static int\n+make_request_cni(int sock, struct sockaddr_un *server, char *request,\n+\t\t int *req_fd, char *response, int *out_fd)\n+{\n+\tint rval;\n+\n+\tAF_XDP_LOG(INFO, \"Request: [%s]\\n\", request);\n+\n+\t/* if no file descriptor to send then directly write to socket.\n+\t * else use sendmsg() to send the file descriptor.\n+\t */\n+\tif (req_fd == NULL)\n+\t\trval = write(sock, request, strlen(request));\n+\telse\n+\t\trval = send_msg(sock, request, req_fd);\n+\n+\tif (rval < 0) {\n+\t\tAF_XDP_LOG(ERR, \"Write error %s\\n\", strerror(errno));\n+\t\treturn -1;\n+\t}\n+\n+\trval = read_msg(sock, response, server, out_fd);\n+\tif (rval <= 0) {\n+\t\tAF_XDP_LOG(ERR, \"Read error %d\\n\", rval);\n+\t\treturn -1;\n+\t}\n+\tAF_XDP_LOG(INFO, \"Response: [%s]\\n\", request);\n+\n+\treturn 0;\n+}\n+\n+static int\n+check_response(char *response, char *exp_resp, long size)\n+{\n+\treturn strncmp(response, exp_resp, size);\n+}\n+\n+static int\n+get_cni_fd(char *if_name)\n+{\n+\tchar request[UDS_MAX_CMD_LEN], response[UDS_MAX_CMD_RESP];\n+\tchar hostname[MAX_LONG_OPT_SZ], exp_resp[UDS_MAX_CMD_RESP];\n+\tstruct sockaddr_un server;\n+\tint xsk_map_fd = -1, out_fd = 0;\n+\tint sock, err;\n+\n+\terr = gethostname(hostname, MAX_LONG_OPT_SZ - 1);\n+\tif (err)\n+\t\treturn -1;\n+\n+\tmemset(&server, 0, sizeof(server));\n+\tsock = init_uds_sock(&server);\n+\n+\t/* Initiates handshake to CNI send: /connect,hostname */\n+\tsnprintf(request, sizeof(request), \"%s,%s\", UDS_CONNECT_MSG, hostname);\n+\tmemset(response, 0, sizeof(response));\n+\tif (make_request_cni(sock, &server, request, NULL, response, &out_fd) < 0) {\n+\t\tAF_XDP_LOG(ERR, \"Error in processing cmd [%s]\\n\", request);\n+\t\tgoto err_close;\n+\t}\n+\n+\t/* Expect /host_ok */\n+\tstrlcpy(exp_resp, UDS_HOST_OK_MSG, UDS_MAX_CMD_LEN);\n+\tif (check_response(response, exp_resp, strlen(exp_resp)) < 0) {\n+\t\tAF_XDP_LOG(ERR, \"Unexpected response [%s]\\n\", response);\n+\t\tgoto err_close;\n+\t}\n+\t/* Request for \"/version\" */\n+\tstrlcpy(request, UDS_VERSION_MSG, UDS_MAX_CMD_LEN);\n+\tmemset(response, 0, sizeof(response));\n+\tif (make_request_cni(sock, &server, request, NULL, response, &out_fd) < 0) {\n+\t\tAF_XDP_LOG(ERR, \"Error in processing cmd [%s]\\n\", request);\n+\t\tgoto err_close;\n+\t}\n+\n+\t/* Request for file descriptor for netdev name*/\n+\tsnprintf(request, sizeof(request), \"%s,%s\", UDS_XSK_MAP_FD_MSG, if_name);\n+\tmemset(response, 0, sizeof(response));\n+\tif (make_request_cni(sock, &server, request, NULL, response, &out_fd) < 0) {\n+\t\tAF_XDP_LOG(ERR, \"Error in processing cmd [%s]\\n\", request);\n+\t\tgoto err_close;\n+\t}\n+\n+\tif (out_fd < 0) {\n+\t\tAF_XDP_LOG(ERR, \"Error in processing cmd [%s]\\n\", request);\n+\t\tgoto err_close;\n+\t}\n+\n+\txsk_map_fd = out_fd;\n+\n+\t/* Expect fd_ack with file descriptor */\n+\tstrlcpy(exp_resp, UDS_FD_ACK_MSG, UDS_MAX_CMD_LEN);\n+\tif (check_response(response, exp_resp, strlen(exp_resp)) < 0) {\n+\t\tAF_XDP_LOG(ERR, \"Unexpected response [%s]\\n\", response);\n+\t\tgoto err_close;\n+\t}\n+\n+\t/* Initiate close connection */\n+\tstrlcpy(request, UDS_FIN_MSG, UDS_MAX_CMD_LEN);\n+\tmemset(response, 0, sizeof(response));\n+\tif (make_request_cni(sock, &server, request, NULL, response, &out_fd) < 0) {\n+\t\tAF_XDP_LOG(ERR, \"Error in processing cmd [%s]\\n\", request);\n+\t\tgoto err_close;\n+\t}\n+\n+\t/* Connection close */\n+\tstrlcpy(exp_resp, UDS_FIN_ACK_MSG, UDS_MAX_CMD_LEN);\n+\tif (check_response(response, exp_resp, strlen(exp_resp)) < 0) {\n+\t\tAF_XDP_LOG(ERR, \"Unexpected response [%s]\\n\", response);\n+\t\tgoto err_close;\n+\t}\n+\tclose(sock);\n+\n+\treturn xsk_map_fd;\n+\n+err_close:\n+\tclose(sock);\n+\treturn -1;\n+}\n+\n static int\n xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,\n \t      int ring_size)\n@@ -1362,6 +1623,10 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,\n \tcfg.bind_flags |= XDP_USE_NEED_WAKEUP;\n #endif\n \n+\t/* Disable libbpf from loading XDP program */\n+\tif (internals->use_cni)\n+\t\tcfg.libbpf_flags |= XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;\n+\n \tif (strnlen(internals->prog_path, PATH_MAX)) {\n \t\tif (!internals->custom_prog_configured) {\n \t\t\tret = load_custom_xdp_prog(internals->prog_path,\n@@ -1413,7 +1678,23 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,\n \t\t}\n \t}\n \n-\tif (rxq->busy_budget) {\n+\tif (internals->use_cni) {\n+\t\tint err, fd, map_fd;\n+\n+\t\t/* get socket fd from CNI plugin */\n+\t\tmap_fd = get_cni_fd(internals->if_name);\n+\t\tif (map_fd < 0) {\n+\t\t\tAF_XDP_LOG(ERR, \"Failed to receive CNI plugin fd\\n\");\n+\t\t\tgoto out_xsk;\n+\t\t}\n+\t\t/* get socket fd */\n+\t\tfd = xsk_socket__fd(rxq->xsk);\n+\t\terr = bpf_map_update_elem(map_fd, &rxq->xsk_queue_idx, &fd, 0);\n+\t\tif (err) {\n+\t\t\tAF_XDP_LOG(ERR, \"Failed to insert unprivileged xsk in map.\\n\");\n+\t\t\tgoto out_xsk;\n+\t\t}\n+\t} else if (rxq->busy_budget) {\n \t\tret = configure_preferred_busy_poll(rxq);\n \t\tif (ret) {\n \t\t\tAF_XDP_LOG(ERR, \"Failed configure busy polling.\\n\");\n@@ -1584,6 +1865,26 @@ static const struct eth_dev_ops ops = {\n \t.get_monitor_addr = eth_get_monitor_addr,\n };\n \n+/* CNI option works in unprivileged container environment\n+ * and ethernet device functionality will be reduced. So\n+ * additional customiszed eth_dev_ops struct is needed\n+ * for cni.\n+ **/\n+static const struct eth_dev_ops ops_cni = {\n+\t.dev_start = eth_dev_start,\n+\t.dev_stop = eth_dev_stop,\n+\t.dev_close = eth_dev_close,\n+\t.dev_configure = eth_dev_configure,\n+\t.dev_infos_get = eth_dev_info,\n+\t.mtu_set = eth_dev_mtu_set,\n+\t.rx_queue_setup = eth_rx_queue_setup,\n+\t.tx_queue_setup = eth_tx_queue_setup,\n+\t.link_update = eth_link_update,\n+\t.stats_get = eth_stats_get,\n+\t.stats_reset = eth_stats_reset,\n+\t.get_monitor_addr = eth_get_monitor_addr,\n+};\n+\n /** parse busy_budget argument */\n static int\n parse_budget_arg(const char *key __rte_unused,\n@@ -1704,8 +2005,8 @@ xdp_get_channels_info(const char *if_name, int *max_queues,\n \n static int\n parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,\n-\t\t\tint *queue_cnt, int *shared_umem, char *prog_path,\n-\t\t\tint *busy_budget, int *force_copy)\n+\t\t int *queue_cnt, int *shared_umem, char *prog_path,\n+\t\t int *busy_budget, int *force_copy, int *use_cni)\n {\n \tint ret;\n \n@@ -1746,6 +2047,11 @@ parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,\n \tif (ret < 0)\n \t\tgoto free_kvlist;\n \n+\tret = rte_kvargs_process(kvlist, ETH_AF_XDP_USE_CNI_ARG,\n+\t\t\t\t &parse_integer_arg, use_cni);\n+\tif (ret < 0)\n+\t\tgoto free_kvlist;\n+\n free_kvlist:\n \trte_kvargs_free(kvlist);\n \treturn ret;\n@@ -1783,8 +2089,9 @@ get_iface_info(const char *if_name,\n \n static struct rte_eth_dev *\n init_internals(struct rte_vdev_device *dev, const char *if_name,\n-\t\tint start_queue_idx, int queue_cnt, int shared_umem,\n-\t\tconst char *prog_path, int busy_budget, int force_copy)\n+\t       int start_queue_idx, int queue_cnt, int shared_umem,\n+\t       const char *prog_path, int busy_budget, int force_copy,\n+\t       int use_cni)\n {\n \tconst char *name = rte_vdev_device_name(dev);\n \tconst unsigned int numa_node = dev->device.numa_node;\n@@ -1813,6 +2120,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,\n #endif\n \tinternals->shared_umem = shared_umem;\n \tinternals->force_copy = force_copy;\n+\tinternals->use_cni = use_cni;\n \n \tif (xdp_get_channels_info(if_name, &internals->max_queue_cnt,\n \t\t\t\t  &internals->combined_queue_cnt)) {\n@@ -1871,7 +2179,11 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,\n \teth_dev->data->dev_link = pmd_link;\n \teth_dev->data->mac_addrs = &internals->eth_addr;\n \teth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;\n-\teth_dev->dev_ops = &ops;\n+\tif (!internals->use_cni)\n+\t\teth_dev->dev_ops = &ops;\n+\telse\n+\t\teth_dev->dev_ops = &ops_cni;\n+\n \teth_dev->rx_pkt_burst = eth_af_xdp_rx;\n \teth_dev->tx_pkt_burst = eth_af_xdp_tx;\n \teth_dev->process_private = process_private;\n@@ -1998,6 +2310,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)\n \tchar prog_path[PATH_MAX] = {'\\0'};\n \tint busy_budget = -1, ret;\n \tint force_copy = 0;\n+\tint use_cni = 0;\n \tstruct rte_eth_dev *eth_dev = NULL;\n \tconst char *name = rte_vdev_device_name(dev);\n \n@@ -2043,7 +2356,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)\n \n \tif (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,\n \t\t\t     &xsk_queue_cnt, &shared_umem, prog_path,\n-\t\t\t     &busy_budget, &force_copy) < 0) {\n+\t\t\t     &busy_budget, &force_copy, &use_cni) < 0) {\n \t\tAF_XDP_LOG(ERR, \"Invalid kvargs value\\n\");\n \t\treturn -EINVAL;\n \t}\n@@ -2057,8 +2370,8 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)\n \t\t\t\t\tbusy_budget;\n \n \teth_dev = init_internals(dev, if_name, xsk_start_queue_idx,\n-\t\t\t\t\txsk_queue_cnt, shared_umem, prog_path,\n-\t\t\t\t\tbusy_budget, force_copy);\n+\t\t\t\t xsk_queue_cnt, shared_umem, prog_path,\n+\t\t\t\t busy_budget, force_copy, use_cni);\n \tif (eth_dev == NULL) {\n \t\tAF_XDP_LOG(ERR, \"Failed to init internals\\n\");\n \t\treturn -1;\n",
    "prefixes": [
        "v2"
    ]
}