bpf: don't verify classic bpfs

Message ID 20240512055109.96609-1-yoav.w@claroty.com (mailing list archive)
State Superseded
Delegated to: Thomas Monjalon
Headers
Series bpf: don't verify classic bpfs |

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/Intel-compilation success Compilation OK
ci/github-robot: build fail github build: failed
ci/intel-Functional success Functional PASS
ci/intel-Testing success Testing PASS
ci/iol-mellanox-Performance success Performance Testing PASS
ci/iol-intel-Performance success Performance Testing PASS
ci/iol-abi-testing warning Testing issues
ci/iol-compile-amd64-testing success Testing PASS
ci/iol-sample-apps-testing success Testing PASS
ci/iol-unit-arm64-testing success Testing PASS
ci/iol-unit-amd64-testing success Testing PASS
ci/iol-intel-Functional success Functional Testing PASS
ci/iol-broadcom-Performance success Performance Testing PASS
ci/iol-broadcom-Functional success Functional Testing PASS
ci/iol-compile-arm64-testing success Testing PASS

Commit Message

Yoav Winstein May 12, 2024, 5:51 a.m. UTC
  I noticed that when classic BPFs with lots of branching instructions are compiled, __rte_bpf_bpf_validate runs way too slow. A simple bpf such as: 'ether host a0:38:6d:af:17:eb or b3:a3:ff:b6:c1:ef or ...' 12 times results in ~1 minute of bpf validation. This patch makes __rte_bpf_bpf_validate be aware of bpf_prm originating from classic BPF, allowing to safely skip over the validation.

Signed-off-by: Yoav Winstein <yoav.w@claroty.com>
---
 app/test/test_bpf.c   | 2 ++
 lib/bpf/bpf_convert.c | 1 +
 lib/bpf/bpf_load.c    | 6 ++++--
 lib/bpf/rte_bpf.h     | 1 +
 4 files changed, 8 insertions(+), 2 deletions(-)
  

Patch

diff --git a/app/test/test_bpf.c b/app/test/test_bpf.c
index 53e3a31123..7aae290c1a 100644
--- a/app/test/test_bpf.c
+++ b/app/test/test_bpf.c
@@ -3407,6 +3407,8 @@  static const char * const sample_filters[] = {
 	" and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4 + 1] = 0x03)"
 	" and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4 + 2] < 0x04)"
 	" and ((ip[2:2] - 4 * (ip[0] & 0x0F) - 4 * ((tcp[12] & 0xF0) >> 4) > 69))",
+	/* Performance */
+	"ether host a0:38:6d:af:17:eb or b3:a3:ff:b6:c1:ef or 4a:e8:e7:5b:76:ce or 0d:87:fa:7a:a6:6d or bb:fd:c6:4b:bc:ae or 4e:28:dc:f1:1c:f6 or 3d:f2:b7:99:54:55 or 1c:5a:90:4b:82:ce or a7:28:08:ea:c9:84 or 1f:c0:2f:2f:0a:01 or c6:64:91:e9:78:f2 or 5b:cc:3f:90:39:ae or 4c:38:8f:ed:16:5c or cb:89:cb:54:0f:4f or 1e:0d:d7:b2:21:84 or 91:07:2b:88:e0:96 or 6c:ca:cf:bf:cf:3e or b9:df:f1:d6:dd:11 or ea:34:0b:b3:96:9e or 70:e5:18:9f:22:93",
 	/* Other */
 	"len = 128",
 };
diff --git a/lib/bpf/bpf_convert.c b/lib/bpf/bpf_convert.c
index d7ff2b4325..bc3c9a5d8e 100644
--- a/lib/bpf/bpf_convert.c
+++ b/lib/bpf/bpf_convert.c
@@ -567,6 +567,7 @@  rte_bpf_convert(const struct bpf_program *prog)
 	/* Classic BPF programs use mbufs */
 	prm->prog_arg.type = RTE_BPF_ARG_PTR_MBUF;
 	prm->prog_arg.size = sizeof(struct rte_mbuf);
+	prm->skip_verification = true;
 
 	return prm;
 }
diff --git a/lib/bpf/bpf_load.c b/lib/bpf/bpf_load.c
index de43347405..e30797cb49 100644
--- a/lib/bpf/bpf_load.c
+++ b/lib/bpf/bpf_load.c
@@ -108,13 +108,15 @@  rte_bpf_load(const struct rte_bpf_prm *prm)
 		return NULL;
 	}
 
-	rc = __rte_bpf_validate(bpf);
+	if (!prm->skip_verification)
+		rc = __rte_bpf_validate(bpf);
+
 	if (rc == 0) {
 		__rte_bpf_jit(bpf);
 		if (mprotect(bpf, bpf->sz, PROT_READ) != 0)
 			rc = -ENOMEM;
 	}
-
+	
 	if (rc != 0) {
 		rte_bpf_destroy(bpf);
 		rte_errno = -rc;
diff --git a/lib/bpf/rte_bpf.h b/lib/bpf/rte_bpf.h
index 80ebb0210f..6f3b4e3c31 100644
--- a/lib/bpf/rte_bpf.h
+++ b/lib/bpf/rte_bpf.h
@@ -94,6 +94,7 @@  struct rte_bpf_prm {
 	/**< array of external symbols that eBPF code is allowed to reference */
 	uint32_t nb_xsym; /**< number of elements in xsym */
 	struct rte_bpf_arg prog_arg; /**< eBPF program input arg description */
+	bool skip_verification; /**< should skip eBPF verification before load */
 };
 
 /**