get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/36787/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 36787,
    "url": "https://patches.dpdk.org/api/patches/36787/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1522431163-25621-6-git-send-email-konstantin.ananyev@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1522431163-25621-6-git-send-email-konstantin.ananyev@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1522431163-25621-6-git-send-email-konstantin.ananyev@intel.com",
    "date": "2018-03-30T17:32:41",
    "name": "[dpdk-dev,v2,5/7] bpf: introduce basic RX/TX BPF filters",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "b641d0c7526a49067c39a66781c080208167657b",
    "submitter": {
        "id": 33,
        "url": "https://patches.dpdk.org/api/people/33/?format=api",
        "name": "Ananyev, Konstantin",
        "email": "konstantin.ananyev@intel.com"
    },
    "delegate": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1522431163-25621-6-git-send-email-konstantin.ananyev@intel.com/mbox/",
    "series": [],
    "comments": "https://patches.dpdk.org/api/patches/36787/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/36787/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id EA1BDA492;\n\tFri, 30 Mar 2018 19:33:12 +0200 (CEST)",
            "from mga04.intel.com (mga04.intel.com [192.55.52.120])\n\tby dpdk.org (Postfix) with ESMTP id 86DEC7288\n\tfor <dev@dpdk.org>; Fri, 30 Mar 2018 19:33:05 +0200 (CEST)",
            "from orsmga002.jf.intel.com ([10.7.209.21])\n\tby fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t30 Mar 2018 10:33:04 -0700",
            "from sivswdev02.ir.intel.com (HELO localhost.localdomain)\n\t([10.237.217.46])\n\tby orsmga002.jf.intel.com with ESMTP; 30 Mar 2018 10:33:03 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.48,382,1517904000\"; d=\"scan'208\";a=\"46664533\"",
        "From": "Konstantin Ananyev <konstantin.ananyev@intel.com>",
        "To": "dev@dpdk.org",
        "Cc": "Konstantin Ananyev <konstantin.ananyev@intel.com>",
        "Date": "Fri, 30 Mar 2018 18:32:41 +0100",
        "Message-Id": "<1522431163-25621-6-git-send-email-konstantin.ananyev@intel.com>",
        "X-Mailer": "git-send-email 1.7.0.7",
        "In-Reply-To": "<1520613725-9176-1-git-send-email-konstantin.ananyev@intel.com>",
        "References": "<1520613725-9176-1-git-send-email-konstantin.ananyev@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v2 5/7] bpf: introduce basic RX/TX BPF filters",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://dpdk.org/ml/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://dpdk.org/ml/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://dpdk.org/ml/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Introduce API to install BPF based filters on ethdev RX/TX path.\nCurrent implementation is pure SW one, based on ethdev RX/TX\ncallback mechanism.\n\nSigned-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>\n---\n lib/librte_bpf/Makefile            |   2 +\n lib/librte_bpf/bpf_pkt.c           | 607 +++++++++++++++++++++++++++++++++++++\n lib/librte_bpf/meson.build         |   6 +-\n lib/librte_bpf/rte_bpf_ethdev.h    | 100 ++++++\n lib/librte_bpf/rte_bpf_version.map |   4 +\n 5 files changed, 717 insertions(+), 2 deletions(-)\n create mode 100644 lib/librte_bpf/bpf_pkt.c\n create mode 100644 lib/librte_bpf/rte_bpf_ethdev.h",
    "diff": "diff --git a/lib/librte_bpf/Makefile b/lib/librte_bpf/Makefile\nindex 44b12c439..501c49c60 100644\n--- a/lib/librte_bpf/Makefile\n+++ b/lib/librte_bpf/Makefile\n@@ -22,6 +22,7 @@ LIBABIVER := 1\n SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf.c\n SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf_exec.c\n SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf_load.c\n+SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf_pkt.c\n SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf_validate.c\n ifeq ($(CONFIG_RTE_ARCH_X86_64),y)\n SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf_jit_x86.c\n@@ -29,5 +30,6 @@ endif\n \n # install header files\n SYMLINK-$(CONFIG_RTE_LIBRTE_BPF)-include += rte_bpf.h\n+SYMLINK-$(CONFIG_RTE_LIBRTE_BPF)-include += rte_bpf_ethdev.h\n \n include $(RTE_SDK)/mk/rte.lib.mk\ndiff --git a/lib/librte_bpf/bpf_pkt.c b/lib/librte_bpf/bpf_pkt.c\nnew file mode 100644\nindex 000000000..287d40564\n--- /dev/null\n+++ b/lib/librte_bpf/bpf_pkt.c\n@@ -0,0 +1,607 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018 Intel Corporation\n+ */\n+\n+#include <stdarg.h>\n+#include <stdio.h>\n+#include <string.h>\n+#include <errno.h>\n+#include <stdint.h>\n+#include <unistd.h>\n+#include <inttypes.h>\n+\n+#include <sys/types.h>\n+#include <sys/stat.h>\n+#include <fcntl.h>\n+\n+#include <sys/queue.h>\n+#include <sys/stat.h>\n+\n+#include <rte_common.h>\n+#include <rte_byteorder.h>\n+#include <rte_malloc.h>\n+#include <rte_log.h>\n+#include <rte_debug.h>\n+#include <rte_cycles.h>\n+#include <rte_eal.h>\n+#include <rte_per_lcore.h>\n+#include <rte_lcore.h>\n+#include <rte_atomic.h>\n+#include <rte_mbuf.h>\n+#include <rte_ethdev.h>\n+\n+#include <rte_bpf_ethdev.h>\n+#include \"bpf_impl.h\"\n+\n+/*\n+ * information about installed BPF rx/tx callback\n+ */\n+\n+struct bpf_eth_cbi {\n+\t/* used by both data & control path */\n+\tuint32_t use;    /*usage counter */\n+\tvoid *cb;        /* callback handle */\n+\tstruct rte_bpf *bpf;\n+\tstruct rte_bpf_jit jit;\n+\t/* used by control path only */\n+\tLIST_ENTRY(bpf_eth_cbi) link;\n+\tuint16_t port;\n+\tuint16_t queue;\n+} __rte_cache_aligned;\n+\n+/*\n+ * Odd number means that callback is used by datapath.\n+ * Even number means that callback is not used by datapath.\n+ */\n+#define BPF_ETH_CBI_INUSE  1\n+\n+/*\n+ * List to manage RX/TX installed callbacks.\n+ */\n+LIST_HEAD(bpf_eth_cbi_list, bpf_eth_cbi);\n+\n+enum {\n+\tBPF_ETH_RX,\n+\tBPF_ETH_TX,\n+\tBPF_ETH_NUM,\n+};\n+\n+/*\n+ * information about all installed BPF rx/tx callbacks\n+ */\n+struct bpf_eth_cbh {\n+\trte_spinlock_t lock;\n+\tstruct bpf_eth_cbi_list list;\n+\tuint32_t type;\n+};\n+\n+static struct bpf_eth_cbh rx_cbh = {\n+\t.lock = RTE_SPINLOCK_INITIALIZER,\n+\t.list = LIST_HEAD_INITIALIZER(list),\n+\t.type = BPF_ETH_RX,\n+};\n+\n+static struct bpf_eth_cbh tx_cbh = {\n+\t.lock = RTE_SPINLOCK_INITIALIZER,\n+\t.list = LIST_HEAD_INITIALIZER(list),\n+\t.type = BPF_ETH_TX,\n+};\n+\n+/*\n+ * Marks given callback as used by datapath.\n+ */\n+static __rte_always_inline void\n+bpf_eth_cbi_inuse(struct bpf_eth_cbi *cbi)\n+{\n+\tcbi->use++;\n+\t/* make sure no store/load reordering could happen */\n+\trte_smp_mb();\n+}\n+\n+/*\n+ * Marks given callback list as not used by datapath.\n+ */\n+static __rte_always_inline void\n+bpf_eth_cbi_unuse(struct bpf_eth_cbi *cbi)\n+{\n+\t/* make sure all previous loads are completed */\n+\trte_smp_rmb();\n+\tcbi->use++;\n+}\n+\n+/*\n+ * Waits till datapath finished using given callback.\n+ */\n+static void\n+bpf_eth_cbi_wait(const struct bpf_eth_cbi *cbi)\n+{\n+\tuint32_t nuse, puse;\n+\n+\t/* make sure all previous loads and stores are completed */\n+\trte_smp_mb();\n+\n+\tpuse = cbi->use;\n+\n+\t/* in use, busy wait till current RX/TX iteration is finished */\n+\tif ((puse & BPF_ETH_CBI_INUSE) != 0) {\n+\t\tdo {\n+\t\t\trte_pause();\n+\t\t\trte_compiler_barrier();\n+\t\t\tnuse = cbi->use;\n+\t\t} while (nuse == puse);\n+\t}\n+}\n+\n+static void\n+bpf_eth_cbi_cleanup(struct bpf_eth_cbi *bc)\n+{\n+\tbc->bpf = NULL;\n+\tmemset(&bc->jit, 0, sizeof(bc->jit));\n+}\n+\n+static struct bpf_eth_cbi *\n+bpf_eth_cbh_find(struct bpf_eth_cbh *cbh, uint16_t port, uint16_t queue)\n+{\n+\tstruct bpf_eth_cbi *cbi;\n+\n+\tLIST_FOREACH(cbi, &cbh->list, link) {\n+\t\tif (cbi->port == port && cbi->queue == queue)\n+\t\t\tbreak;\n+\t}\n+\treturn cbi;\n+}\n+\n+static struct bpf_eth_cbi *\n+bpf_eth_cbh_add(struct bpf_eth_cbh *cbh, uint16_t port, uint16_t queue)\n+{\n+\tstruct bpf_eth_cbi *cbi;\n+\n+\t/* return an existing one */\n+\tcbi = bpf_eth_cbh_find(cbh, port, queue);\n+\tif (cbi != NULL)\n+\t\treturn cbi;\n+\n+\tcbi = rte_zmalloc(NULL, sizeof(*cbi), RTE_CACHE_LINE_SIZE);\n+\tif (cbi != NULL) {\n+\t\tcbi->port = port;\n+\t\tcbi->queue = queue;\n+\t\tLIST_INSERT_HEAD(&cbh->list, cbi, link);\n+\t}\n+\treturn cbi;\n+}\n+\n+/*\n+ * BPF packet processing routinies.\n+ */\n+\n+static inline uint32_t\n+apply_filter(struct rte_mbuf *mb[], const uint64_t rc[], uint32_t num,\n+\tuint32_t drop)\n+{\n+\tuint32_t i, j, k;\n+\tstruct rte_mbuf *dr[num];\n+\n+\tfor (i = 0, j = 0, k = 0; i != num; i++) {\n+\n+\t\t/* filter matches */\n+\t\tif (rc[i] != 0)\n+\t\t\tmb[j++] = mb[i];\n+\t\t/* no match */\n+\t\telse\n+\t\t\tdr[k++] = mb[i];\n+\t}\n+\n+\tif (drop != 0) {\n+\t\t/* free filtered out mbufs */\n+\t\tfor (i = 0; i != k; i++)\n+\t\t\trte_pktmbuf_free(dr[i]);\n+\t} else {\n+\t\t/* copy filtered out mbufs beyond good ones */\n+\t\tfor (i = 0; i != k; i++)\n+\t\t\tmb[j + i] = dr[i];\n+\t}\n+\n+\treturn j;\n+}\n+\n+static inline uint32_t\n+pkt_filter_vm(const struct rte_bpf *bpf, struct rte_mbuf *mb[], uint32_t num,\n+\tuint32_t drop)\n+{\n+\tuint32_t i;\n+\tvoid *dp[num];\n+\tuint64_t rc[num];\n+\n+\tfor (i = 0; i != num; i++)\n+\t\tdp[i] = rte_pktmbuf_mtod(mb[i], void *);\n+\n+\trte_bpf_exec_burst(bpf, dp, rc, num);\n+\treturn apply_filter(mb, rc, num, drop);\n+}\n+\n+static inline uint32_t\n+pkt_filter_jit(const struct rte_bpf_jit *jit, struct rte_mbuf *mb[],\n+\tuint32_t num, uint32_t drop)\n+{\n+\tuint32_t i, n;\n+\tvoid *dp;\n+\tuint64_t rc[num];\n+\n+\tn = 0;\n+\tfor (i = 0; i != num; i++) {\n+\t\tdp = rte_pktmbuf_mtod(mb[i], void *);\n+\t\trc[i] = jit->func(dp);\n+\t\tn += (rc[i] == 0);\n+\t}\n+\n+\tif (n != 0)\n+\t\tnum = apply_filter(mb, rc, num, drop);\n+\n+\treturn num;\n+}\n+\n+static inline uint32_t\n+pkt_filter_mb_vm(const struct rte_bpf *bpf, struct rte_mbuf *mb[], uint32_t num,\n+\tuint32_t drop)\n+{\n+\tuint64_t rc[num];\n+\n+\trte_bpf_exec_burst(bpf, (void **)mb, rc, num);\n+\treturn apply_filter(mb, rc, num, drop);\n+}\n+\n+static inline uint32_t\n+pkt_filter_mb_jit(const struct rte_bpf_jit *jit, struct rte_mbuf *mb[],\n+\tuint32_t num, uint32_t drop)\n+{\n+\tuint32_t i, n;\n+\tuint64_t rc[num];\n+\n+\tn = 0;\n+\tfor (i = 0; i != num; i++) {\n+\t\trc[i] = jit->func(mb[i]);\n+\t\tn += (rc[i] == 0);\n+\t}\n+\n+\tif (n != 0)\n+\t\tnum = apply_filter(mb, rc, num, drop);\n+\n+\treturn num;\n+}\n+\n+/*\n+ * RX/TX callbacks for raw data bpf.\n+ */\n+\n+static uint16_t\n+bpf_rx_callback_vm(__rte_unused uint16_t port, __rte_unused uint16_t queue,\n+\tstruct rte_mbuf *pkt[], uint16_t nb_pkts,\n+\t__rte_unused uint16_t max_pkts, void *user_param)\n+{\n+\tstruct bpf_eth_cbi *cbi;\n+\tuint16_t rc;\n+\n+\tcbi = user_param;\n+\n+\tbpf_eth_cbi_inuse(cbi);\n+\trc = (cbi->cb != NULL) ?\n+\t\tpkt_filter_vm(cbi->bpf, pkt, nb_pkts, 1) :\n+\t\tnb_pkts;\n+\tbpf_eth_cbi_unuse(cbi);\n+\treturn rc;\n+}\n+\n+static uint16_t\n+bpf_rx_callback_jit(__rte_unused uint16_t port, __rte_unused uint16_t queue,\n+\tstruct rte_mbuf *pkt[], uint16_t nb_pkts,\n+\t__rte_unused uint16_t max_pkts, void *user_param)\n+{\n+\tstruct bpf_eth_cbi *cbi;\n+\tuint16_t rc;\n+\n+\tcbi = user_param;\n+\tbpf_eth_cbi_inuse(cbi);\n+\trc = (cbi->cb != NULL) ?\n+\t\tpkt_filter_jit(&cbi->jit, pkt, nb_pkts, 1) :\n+\t\tnb_pkts;\n+\tbpf_eth_cbi_unuse(cbi);\n+\treturn rc;\n+}\n+\n+static uint16_t\n+bpf_tx_callback_vm(__rte_unused uint16_t port, __rte_unused uint16_t queue,\n+\tstruct rte_mbuf *pkt[], uint16_t nb_pkts, void *user_param)\n+{\n+\tstruct bpf_eth_cbi *cbi;\n+\tuint16_t rc;\n+\n+\tcbi = user_param;\n+\tbpf_eth_cbi_inuse(cbi);\n+\trc = (cbi->cb != NULL) ?\n+\t\tpkt_filter_vm(cbi->bpf, pkt, nb_pkts, 0) :\n+\t\tnb_pkts;\n+\tbpf_eth_cbi_unuse(cbi);\n+\treturn rc;\n+}\n+\n+static uint16_t\n+bpf_tx_callback_jit(__rte_unused uint16_t port, __rte_unused uint16_t queue,\n+\tstruct rte_mbuf *pkt[], uint16_t nb_pkts, void *user_param)\n+{\n+\tstruct bpf_eth_cbi *cbi;\n+\tuint16_t rc;\n+\n+\tcbi = user_param;\n+\tbpf_eth_cbi_inuse(cbi);\n+\trc = (cbi->cb != NULL) ?\n+\t\tpkt_filter_jit(&cbi->jit, pkt, nb_pkts, 0) :\n+\t\tnb_pkts;\n+\tbpf_eth_cbi_unuse(cbi);\n+\treturn rc;\n+}\n+\n+/*\n+ * RX/TX callbacks for mbuf.\n+ */\n+\n+static uint16_t\n+bpf_rx_callback_mb_vm(__rte_unused uint16_t port, __rte_unused uint16_t queue,\n+\tstruct rte_mbuf *pkt[], uint16_t nb_pkts,\n+\t__rte_unused uint16_t max_pkts, void *user_param)\n+{\n+\tstruct bpf_eth_cbi *cbi;\n+\tuint16_t rc;\n+\n+\tcbi = user_param;\n+\tbpf_eth_cbi_inuse(cbi);\n+\trc = (cbi->cb != NULL) ?\n+\t\tpkt_filter_mb_vm(cbi->bpf, pkt, nb_pkts, 1) :\n+\t\tnb_pkts;\n+\tbpf_eth_cbi_unuse(cbi);\n+\treturn rc;\n+}\n+\n+static uint16_t\n+bpf_rx_callback_mb_jit(__rte_unused uint16_t port, __rte_unused uint16_t queue,\n+\tstruct rte_mbuf *pkt[], uint16_t nb_pkts,\n+\t__rte_unused uint16_t max_pkts, void *user_param)\n+{\n+\tstruct bpf_eth_cbi *cbi;\n+\tuint16_t rc;\n+\n+\tcbi = user_param;\n+\tbpf_eth_cbi_inuse(cbi);\n+\trc = (cbi->cb != NULL) ?\n+\t\tpkt_filter_mb_jit(&cbi->jit, pkt, nb_pkts, 1) :\n+\t\tnb_pkts;\n+\tbpf_eth_cbi_unuse(cbi);\n+\treturn rc;\n+}\n+\n+static uint16_t\n+bpf_tx_callback_mb_vm(__rte_unused uint16_t port, __rte_unused uint16_t queue,\n+\tstruct rte_mbuf *pkt[], uint16_t nb_pkts, void *user_param)\n+{\n+\tstruct bpf_eth_cbi *cbi;\n+\tuint16_t rc;\n+\n+\tcbi = user_param;\n+\tbpf_eth_cbi_inuse(cbi);\n+\trc = (cbi->cb != NULL) ?\n+\t\tpkt_filter_mb_vm(cbi->bpf, pkt, nb_pkts, 0) :\n+\t\tnb_pkts;\n+\tbpf_eth_cbi_unuse(cbi);\n+\treturn rc;\n+}\n+\n+static uint16_t\n+bpf_tx_callback_mb_jit(__rte_unused uint16_t port, __rte_unused uint16_t queue,\n+\tstruct rte_mbuf *pkt[], uint16_t nb_pkts, void *user_param)\n+{\n+\tstruct bpf_eth_cbi *cbi;\n+\tuint16_t rc;\n+\n+\tcbi = user_param;\n+\tbpf_eth_cbi_inuse(cbi);\n+\trc = (cbi->cb != NULL) ?\n+\t\tpkt_filter_mb_jit(&cbi->jit, pkt, nb_pkts, 0) :\n+\t\tnb_pkts;\n+\tbpf_eth_cbi_unuse(cbi);\n+\treturn rc;\n+}\n+\n+static rte_rx_callback_fn\n+select_rx_callback(enum rte_bpf_prog_type ptype, uint32_t flags)\n+{\n+\tif (flags & RTE_BPF_ETH_F_JIT) {\n+\t\tif (ptype == RTE_BPF_PROG_TYPE_UNSPEC)\n+\t\t\treturn bpf_rx_callback_jit;\n+\t\telse if (ptype == RTE_BPF_PROG_TYPE_MBUF)\n+\t\t\treturn bpf_rx_callback_mb_jit;\n+\t} else if (ptype == RTE_BPF_PROG_TYPE_UNSPEC)\n+\t\treturn bpf_rx_callback_vm;\n+\telse if (ptype == RTE_BPF_PROG_TYPE_MBUF)\n+\t\treturn bpf_rx_callback_mb_vm;\n+\n+\treturn NULL;\n+}\n+\n+static rte_tx_callback_fn\n+select_tx_callback(enum rte_bpf_prog_type ptype, uint32_t flags)\n+{\n+\tif (flags & RTE_BPF_ETH_F_JIT) {\n+\t\tif (ptype == RTE_BPF_PROG_TYPE_UNSPEC)\n+\t\t\treturn bpf_tx_callback_jit;\n+\t\telse if (ptype == RTE_BPF_PROG_TYPE_MBUF)\n+\t\t\treturn bpf_tx_callback_mb_jit;\n+\t} else if (ptype == RTE_BPF_PROG_TYPE_UNSPEC)\n+\t\treturn bpf_tx_callback_vm;\n+\telse if (ptype == RTE_BPF_PROG_TYPE_MBUF)\n+\t\treturn bpf_tx_callback_mb_vm;\n+\n+\treturn NULL;\n+}\n+\n+/*\n+ * helper function to perform BPF unload for given port/queue.\n+ * have to introduce extra complexity (and possible slowdown) here,\n+ * as right now there is no safe generic way to remove RX/TX callback\n+ * while IO is active.\n+ * Still don't free memory allocated for callback handle itself,\n+ * again right now there is no safe way to do that without stopping RX/TX\n+ * on given port/queue first.\n+ */\n+static void\n+bpf_eth_cbi_unload(struct bpf_eth_cbi *bc)\n+{\n+\t/* mark this cbi as empty */\n+\tbc->cb = NULL;\n+\trte_smp_mb();\n+\n+\t/* make sure datapath doesn't use bpf anymore, then destroy bpf */\n+\tbpf_eth_cbi_wait(bc);\n+\trte_bpf_destroy(bc->bpf);\n+\tbpf_eth_cbi_cleanup(bc);\n+}\n+\n+static void\n+bpf_eth_unload(struct bpf_eth_cbh *cbh, uint16_t port, uint16_t queue)\n+{\n+\tstruct bpf_eth_cbi *bc;\n+\n+\tbc = bpf_eth_cbh_find(cbh, port, queue);\n+\tif (bc == NULL || bc->cb == NULL)\n+\t\treturn;\n+\n+\tif (cbh->type == BPF_ETH_RX)\n+\t\trte_eth_remove_rx_callback(port, queue, bc->cb);\n+\telse\n+\t\trte_eth_remove_tx_callback(port, queue, bc->cb);\n+\n+\tbpf_eth_cbi_unload(bc);\n+}\n+\n+\n+__rte_experimental void\n+rte_bpf_eth_rx_unload(uint16_t port, uint16_t queue)\n+{\n+\tstruct bpf_eth_cbh *cbh;\n+\n+\tcbh = &rx_cbh;\n+\trte_spinlock_lock(&cbh->lock);\n+\tbpf_eth_unload(cbh, port, queue);\n+\trte_spinlock_unlock(&cbh->lock);\n+}\n+\n+__rte_experimental void\n+rte_bpf_eth_tx_unload(uint16_t port, uint16_t queue)\n+{\n+\tstruct bpf_eth_cbh *cbh;\n+\n+\tcbh = &tx_cbh;\n+\trte_spinlock_lock(&cbh->lock);\n+\tbpf_eth_unload(cbh, port, queue);\n+\trte_spinlock_unlock(&cbh->lock);\n+}\n+\n+static int\n+bpf_eth_elf_load(struct bpf_eth_cbh *cbh, uint16_t port, uint16_t queue,\n+\tconst struct rte_bpf_prm *prm, const char *fname, const char *sname,\n+\tuint32_t flags)\n+{\n+\tint32_t rc;\n+\tstruct bpf_eth_cbi *bc;\n+\tstruct rte_bpf *bpf;\n+\trte_rx_callback_fn frx;\n+\trte_tx_callback_fn ftx;\n+\tstruct rte_bpf_jit jit;\n+\n+\tfrx = NULL;\n+\tftx = NULL;\n+\n+\tif (prm == NULL || rte_eth_dev_is_valid_port(port) == 0 ||\n+\t\t\tqueue >= RTE_MAX_QUEUES_PER_PORT)\n+\t\treturn -EINVAL;\n+\n+\tif (cbh->type == BPF_ETH_RX)\n+\t\tfrx = select_rx_callback(prm->prog_type, flags);\n+\telse\n+\t\tftx = select_tx_callback(prm->prog_type, flags);\n+\n+\tif (frx == NULL && ftx == NULL) {\n+\t\tRTE_BPF_LOG(ERR, \"%s(%u, %u): no callback selected;\\n\",\n+\t\t\t__func__, port, queue);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tbpf = rte_bpf_elf_load(prm, fname, sname);\n+\tif (bpf == NULL)\n+\t\treturn -rte_errno;\n+\n+\trte_bpf_get_jit(bpf, &jit);\n+\n+\tif ((flags & RTE_BPF_ETH_F_JIT) != 0 && jit.func == NULL) {\n+\t\tRTE_BPF_LOG(ERR, \"%s(%u, %u): no JIT generated;\\n\",\n+\t\t\t__func__, port, queue);\n+\t\trte_bpf_destroy(bpf);\n+\t\trc = -ENOTSUP;\n+\t}\n+\n+\t/* setup/update global callback info */\n+\tbc = bpf_eth_cbh_add(cbh, port, queue);\n+\tif (bc == NULL)\n+\t\treturn -ENOMEM;\n+\n+\t/* remove old one, if any */\n+\tif (bc->cb != NULL)\n+\t\tbpf_eth_unload(cbh, port, queue);\n+\n+\tbc->bpf = bpf;\n+\tbc->jit = jit;\n+\n+\tif (cbh->type == BPF_ETH_RX)\n+\t\tbc->cb = rte_eth_add_rx_callback(port, queue, frx, bc);\n+\telse\n+\t\tbc->cb = rte_eth_add_tx_callback(port, queue, ftx, bc);\n+\n+\tif (bc->cb == NULL) {\n+\t\trc = -rte_errno;\n+\t\trte_bpf_destroy(bpf);\n+\t\tbpf_eth_cbi_cleanup(bc);\n+\t} else\n+\t\trc = 0;\n+\n+\treturn rc;\n+}\n+\n+__rte_experimental int\n+rte_bpf_eth_rx_elf_load(uint16_t port, uint16_t queue,\n+\tconst struct rte_bpf_prm *prm, const char *fname, const char *sname,\n+\tuint32_t flags)\n+{\n+\tint32_t rc;\n+\tstruct bpf_eth_cbh *cbh;\n+\n+\tcbh = &rx_cbh;\n+\trte_spinlock_lock(&cbh->lock);\n+\trc = bpf_eth_elf_load(cbh, port, queue, prm, fname, sname, flags);\n+\trte_spinlock_unlock(&cbh->lock);\n+\n+\treturn rc;\n+}\n+\n+__rte_experimental int\n+rte_bpf_eth_tx_elf_load(uint16_t port, uint16_t queue,\n+\tconst struct rte_bpf_prm *prm, const char *fname, const char *sname,\n+\tuint32_t flags)\n+{\n+\tint32_t rc;\n+\tstruct bpf_eth_cbh *cbh;\n+\n+\tcbh = &tx_cbh;\n+\trte_spinlock_lock(&cbh->lock);\n+\trc = bpf_eth_elf_load(cbh, port, queue, prm, fname, sname, flags);\n+\trte_spinlock_unlock(&cbh->lock);\n+\n+\treturn rc;\n+}\ndiff --git a/lib/librte_bpf/meson.build b/lib/librte_bpf/meson.build\nindex 67ca30533..39b464041 100644\n--- a/lib/librte_bpf/meson.build\n+++ b/lib/librte_bpf/meson.build\n@@ -5,15 +5,17 @@ allow_experimental_apis = true\n sources = files('bpf.c',\n \t\t'bpf_exec.c',\n \t\t'bpf_load.c',\n+\t\t'bpf_pkt.c',\n \t\t'bpf_validate.c')\n \n if arch_subdir == 'x86'\n \tsources += files('bpf_jit_x86.c')\n endif\n \n-install_headers = files('rte_bpf.h')\n+install_headers = files('rte_bpf.h',\n+\t\t\t'rte_bpf_ethdev.h')\n \n-deps += ['mbuf', 'net']\n+deps += ['mbuf', 'net', 'ethdev']\n \n dep = dependency('libelf', required: false)\n if dep.found() == false\ndiff --git a/lib/librte_bpf/rte_bpf_ethdev.h b/lib/librte_bpf/rte_bpf_ethdev.h\nnew file mode 100644\nindex 000000000..33ce0c6c7\n--- /dev/null\n+++ b/lib/librte_bpf/rte_bpf_ethdev.h\n@@ -0,0 +1,100 @@\n+/* SPDX-License-Identifier: BSD-3-Clause\n+ * Copyright(c) 2018 Intel Corporation\n+ */\n+\n+#ifndef _RTE_BPF_ETHDEV_H_\n+#define _RTE_BPF_ETHDEV_H_\n+\n+#include <rte_bpf.h>\n+\n+#ifdef __cplusplus\n+extern \"C\" {\n+#endif\n+\n+enum {\n+\tRTE_BPF_ETH_F_NONE = 0,\n+\tRTE_BPF_ETH_F_JIT  = 0x1, /*< use compiled into native ISA code */\n+};\n+\n+/**\n+ * API to install BPF filter as RX/TX callbacks for eth devices.\n+ * Note that right now:\n+ * - it is not MT safe, i.e. it is not allowed to do load/unload for the\n+ *   same port/queue from different threads in parallel.\n+ * - though it allows to do load/unload at runtime\n+ *   (while RX/TX is ongoing on given port/queue).\n+ * - allows only one BPF program per port/queue,\n+ * i.e. new load will replace previously loaded for that port/queue BPF program.\n+ * Filter behaviour - if BPF program returns zero value for a given packet,\n+ * then it will be dropped inside callback and no further processing\n+ *   on RX - it will be dropped inside callback and no further processing\n+ *   for that packet will happen.\n+ *   on TX - packet will remain unsent, and it is responsibility of the user\n+ *   to handle such situation (drop, try to send again, etc.).\n+ */\n+\n+/**\n+ * Unload previously loaded BPF program (if any) from given RX port/queue\n+ * and remove appropriate RX port/queue callback.\n+ *\n+ * @param port\n+ *   The identifier of the ethernet port\n+ * @param queue\n+ *   The identifier of the RX queue on the given port\n+ */\n+void rte_bpf_eth_rx_unload(uint16_t port, uint16_t queue);\n+\n+/**\n+ * Unload previously loaded BPF program (if any) from given TX port/queue\n+ * and remove appropriate TX port/queue callback.\n+ *\n+ * @param port\n+ *   The identifier of the ethernet port\n+ * @param queue\n+ *   The identifier of the TX queue on the given port\n+ */\n+void rte_bpf_eth_tx_unload(uint16_t port, uint16_t queue);\n+\n+/**\n+ * Load BPF program from the ELF file and install callback to execute it\n+ * on given RX port/queue.\n+ *\n+ * @param port\n+ *   The identifier of the ethernet port\n+ * @param queue\n+ *   The identifier of the RX queue on the given port\n+ * @param fname\n+ *  Pathname for a ELF file.\n+ * @param sname\n+ *  Name of the executable section within the file to load.\n+ * @return\n+ *   Zero on successful completion or negative error code otherwise.\n+ */\n+int rte_bpf_eth_rx_elf_load(uint16_t port, uint16_t queue,\n+\tconst struct rte_bpf_prm *prm, const char *fname, const char *sname,\n+\tuint32_t flags);\n+\n+/**\n+ * Load BPF program from the ELF file and install callback to execute it\n+ * on given TX port/queue.\n+ *\n+ * @param port\n+ *   The identifier of the ethernet port\n+ * @param queue\n+ *   The identifier of the TX queue on the given port\n+ * @param fname\n+ *  Pathname for a ELF file.\n+ * @param sname\n+ *  Name of the executable section within the file to load.\n+ * @return\n+ *   Zero on successful completion or negative error code otherwise.\n+ */\n+int rte_bpf_eth_tx_elf_load(uint16_t port, uint16_t queue,\n+\tconst struct rte_bpf_prm *prm, const char *fname, const char *sname,\n+\tuint32_t flags);\n+\n+#ifdef __cplusplus\n+}\n+#endif\n+\n+#endif /* _RTE_BPF_ETHDEV_H_ */\ndiff --git a/lib/librte_bpf/rte_bpf_version.map b/lib/librte_bpf/rte_bpf_version.map\nindex ff65144df..a203e088e 100644\n--- a/lib/librte_bpf/rte_bpf_version.map\n+++ b/lib/librte_bpf/rte_bpf_version.map\n@@ -3,6 +3,10 @@ EXPERIMENTAL {\n \n \trte_bpf_destroy;\n \trte_bpf_elf_load;\n+\trte_bpf_eth_rx_elf_load;\n+\trte_bpf_eth_rx_unload;\n+\trte_bpf_eth_tx_elf_load;\n+\trte_bpf_eth_tx_unload;\n \trte_bpf_exec;\n \trte_bpf_exec_burst;\n \trte_bpf_get_jit;\n",
    "prefixes": [
        "dpdk-dev",
        "v2",
        "5/7"
    ]
}