@@ -218,10 +218,6 @@ F: lib/librte_mbuf/
F: doc/guides/prog_guide/mbuf_lib.rst
F: app/test/test_mbuf.c
-Packet buffer offload - EXPERIMENTAL
-M: Declan Doherty <declan.doherty@intel.com>
-F: lib/librte_mbuf_offload/
-
Ethernet API
M: Thomas Monjalon <thomas.monjalon@6wind.com>
F: lib/librte_ether/
@@ -35,7 +35,6 @@
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
-#include <rte_mbuf_offload.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
@@ -48,7 +47,7 @@ static enum rte_cryptodev_type gbl_cryptodev_type;
struct crypto_testsuite_params {
struct rte_mempool *mbuf_pool;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
struct rte_cryptodev_config conf;
struct rte_cryptodev_qp_conf qp_conf;
@@ -62,7 +61,6 @@ struct crypto_unittest_params {
struct rte_cryptodev_session *sess;
- struct rte_mbuf_offload *ol;
struct rte_crypto_op *op;
struct rte_mbuf *obuf, *ibuf;
@@ -104,7 +102,7 @@ setup_test_string(struct rte_mempool *mpool,
return m;
}
-#if HEX_DUMP
+#ifdef HEX_DUMP
static void
hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
{
@@ -112,27 +110,29 @@ hexdump_mbuf_data(FILE *f, const char *title, struct rte_mbuf *m)
}
#endif
-static struct rte_mbuf *
-process_crypto_request(uint8_t dev_id, struct rte_mbuf *ibuf)
+static struct rte_crypto_op *
+process_crypto_request(uint8_t dev_id, struct rte_crypto_op *op)
{
- struct rte_mbuf *obuf = NULL;
-#if HEX_DUMP
+#ifdef HEX_DUMP
hexdump_mbuf_data(stdout, "Enqueued Packet", ibuf);
#endif
- if (rte_cryptodev_enqueue_burst(dev_id, 0, &ibuf, 1) != 1) {
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
printf("Error sending packet for encryption");
return NULL;
}
- while (rte_cryptodev_dequeue_burst(dev_id, 0, &obuf, 1) == 0)
+
+ op = NULL;
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
rte_pause();
-#if HEX_DUMP
+#ifdef HEX_DUMP
if (obuf)
hexdump_mbuf_data(stdout, "Dequeued Packet", obuf);
#endif
- return obuf;
+ return op;
}
static struct crypto_testsuite_params testsuite_params = { NULL };
@@ -161,12 +161,13 @@ testsuite_setup(void)
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "MBUF_OFFLOAD_POOL",
+ ts_params->op_mpool = rte_crypto_op_pool_create(
+ "MBUF_CRYPTO_SYM_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
NUM_MBUFS, MBUF_CACHE_SIZE,
DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_xform),
rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -251,10 +252,9 @@ testsuite_teardown(void)
rte_mempool_count(ts_params->mbuf_pool));
}
-
- if (ts_params->mbuf_ol_pool != NULL) {
+ if (ts_params->op_mpool != NULL) {
RTE_LOG(DEBUG, USER1, "CRYPTO_OP_POOL count %u\n",
- rte_mempool_count(ts_params->mbuf_ol_pool));
+ rte_mempool_count(ts_params->op_mpool));
}
}
@@ -324,8 +324,8 @@ ut_teardown(void)
}
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
/*
* free mbuf - both obuf and ibuf are usually the same,
@@ -678,76 +678,77 @@ static uint8_t aes_cbc_iv[] = {
/* ***** AES-CBC / HMAC-SHA1 Hash Tests ***** */
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_ciphertext[] = {
- 0x8B, 0X4D, 0XDA, 0X1B, 0XCF, 0X04, 0XA0, 0X31,
- 0XB4, 0XBF, 0XBD, 0X68, 0X43, 0X20, 0X7E, 0X76,
- 0XB1, 0X96, 0X8B, 0XA2, 0X7C, 0XA2, 0X83, 0X9E,
- 0X39, 0X5A, 0X2F, 0X7E, 0X92, 0XB4, 0X48, 0X1A,
- 0X3F, 0X6B, 0X5D, 0XDF, 0X52, 0X85, 0X5F, 0X8E,
- 0X42, 0X3C, 0XFB, 0XE9, 0X1A, 0X24, 0XD6, 0X08,
- 0XDD, 0XFD, 0X16, 0XFB, 0XE9, 0X55, 0XEF, 0XF0,
- 0XA0, 0X8D, 0X13, 0XAB, 0X81, 0XC6, 0X90, 0X01,
- 0XB5, 0X18, 0X84, 0XB3, 0XF6, 0XE6, 0X11, 0X57,
- 0XD6, 0X71, 0XC6, 0X3C, 0X3F, 0X2F, 0X33, 0XEE,
- 0X24, 0X42, 0X6E, 0XAC, 0X0B, 0XCA, 0XEC, 0XF9,
- 0X84, 0XF8, 0X22, 0XAA, 0X60, 0XF0, 0X32, 0XA9,
- 0X75, 0X75, 0X3B, 0XCB, 0X70, 0X21, 0X0A, 0X8D,
- 0X0F, 0XE0, 0XC4, 0X78, 0X2B, 0XF8, 0X97, 0XE3,
- 0XE4, 0X26, 0X4B, 0X29, 0XDA, 0X88, 0XCD, 0X46,
- 0XEC, 0XAA, 0XF9, 0X7F, 0XF1, 0X15, 0XEA, 0XC3,
- 0X87, 0XE6, 0X31, 0XF2, 0XCF, 0XDE, 0X4D, 0X80,
- 0X70, 0X91, 0X7E, 0X0C, 0XF7, 0X26, 0X3A, 0X92,
- 0X4F, 0X18, 0X83, 0XC0, 0X8F, 0X59, 0X01, 0XA5,
- 0X88, 0XD1, 0XDB, 0X26, 0X71, 0X27, 0X16, 0XF5,
- 0XEE, 0X10, 0X82, 0XAC, 0X68, 0X26, 0X9B, 0XE2,
- 0X6D, 0XD8, 0X9A, 0X80, 0XDF, 0X04, 0X31, 0XD5,
- 0XF1, 0X35, 0X5C, 0X3B, 0XDD, 0X9A, 0X65, 0XBA,
- 0X58, 0X34, 0X85, 0X61, 0X1C, 0X42, 0X10, 0X76,
- 0X73, 0X02, 0X42, 0XC9, 0X23, 0X18, 0X8E, 0XB4,
- 0X6F, 0XB4, 0XA3, 0X54, 0X6E, 0X88, 0X3B, 0X62,
- 0X7C, 0X02, 0X8D, 0X4C, 0X9F, 0XC8, 0X45, 0XF4,
- 0XC9, 0XDE, 0X4F, 0XEB, 0X22, 0X83, 0X1B, 0XE4,
- 0X49, 0X37, 0XE4, 0XAD, 0XE7, 0XCD, 0X21, 0X54,
- 0XBC, 0X1C, 0XC2, 0X04, 0X97, 0XB4, 0X10, 0X61,
- 0XF0, 0XE4, 0XEF, 0X27, 0X63, 0X3A, 0XDA, 0X91,
- 0X41, 0X25, 0X62, 0X1C, 0X5C, 0XB6, 0X38, 0X4A,
- 0X88, 0X71, 0X59, 0X5A, 0X8D, 0XA0, 0X09, 0XAF,
- 0X72, 0X94, 0XD7, 0X79, 0X5C, 0X60, 0X7C, 0X8F,
- 0X4C, 0XF5, 0XD9, 0XA1, 0X39, 0X6D, 0X81, 0X28,
- 0XEF, 0X13, 0X28, 0XDF, 0XF5, 0X3E, 0XF7, 0X8E,
- 0X09, 0X9C, 0X78, 0X18, 0X79, 0XB8, 0X68, 0XD7,
- 0XA8, 0X29, 0X62, 0XAD, 0XDE, 0XE1, 0X61, 0X76,
- 0X1B, 0X05, 0X16, 0XCD, 0XBF, 0X02, 0X8E, 0XA6,
- 0X43, 0X6E, 0X92, 0X55, 0X4F, 0X60, 0X9C, 0X03,
- 0XB8, 0X4F, 0XA3, 0X02, 0XAC, 0XA8, 0XA7, 0X0C,
- 0X1E, 0XB5, 0X6B, 0XF8, 0XC8, 0X4D, 0XDE, 0XD2,
- 0XB0, 0X29, 0X6E, 0X40, 0XE6, 0XD6, 0XC9, 0XE6,
- 0XB9, 0X0F, 0XB6, 0X63, 0XF5, 0XAA, 0X2B, 0X96,
- 0XA7, 0X16, 0XAC, 0X4E, 0X0A, 0X33, 0X1C, 0XA6,
- 0XE6, 0XBD, 0X8A, 0XCF, 0X40, 0XA9, 0XB2, 0XFA,
- 0X63, 0X27, 0XFD, 0X9B, 0XD9, 0XFC, 0XD5, 0X87,
- 0X8D, 0X4C, 0XB6, 0XA4, 0XCB, 0XE7, 0X74, 0X55,
- 0XF4, 0XFB, 0X41, 0X25, 0XB5, 0X4B, 0X0A, 0X1B,
- 0XB1, 0XD6, 0XB7, 0XD9, 0X47, 0X2A, 0XC3, 0X98,
- 0X6A, 0XC4, 0X03, 0X73, 0X1F, 0X93, 0X6E, 0X53,
- 0X19, 0X25, 0X64, 0X15, 0X83, 0XF9, 0X73, 0X2A,
- 0X74, 0XB4, 0X93, 0X69, 0XC4, 0X72, 0XFC, 0X26,
- 0XA2, 0X9F, 0X43, 0X45, 0XDD, 0XB9, 0XEF, 0X36,
- 0XC8, 0X3A, 0XCD, 0X99, 0X9B, 0X54, 0X1A, 0X36,
- 0XC1, 0X59, 0XF8, 0X98, 0XA8, 0XCC, 0X28, 0X0D,
- 0X73, 0X4C, 0XEE, 0X98, 0XCB, 0X7C, 0X58, 0X7E,
- 0X20, 0X75, 0X1E, 0XB7, 0XC9, 0XF8, 0XF2, 0X0E,
- 0X63, 0X9E, 0X05, 0X78, 0X1A, 0XB6, 0XA8, 0X7A,
- 0XF9, 0X98, 0X6A, 0XA6, 0X46, 0X84, 0X2E, 0XF6,
- 0X4B, 0XDC, 0X9B, 0X8F, 0X9B, 0X8F, 0XEE, 0XB4,
- 0XAA, 0X3F, 0XEE, 0XC0, 0X37, 0X27, 0X76, 0XC7,
- 0X95, 0XBB, 0X26, 0X74, 0X69, 0X12, 0X7F, 0XF1,
- 0XBB, 0XFF, 0XAE, 0XB5, 0X99, 0X6E, 0XCB, 0X0C
+ 0x8B, 0x4D, 0xDA, 0x1B, 0xCF, 0x04, 0xA0, 0x31,
+ 0xB4, 0xBF, 0xBD, 0x68, 0x43, 0x20, 0x7E, 0x76,
+ 0xB1, 0x96, 0x8B, 0xA2, 0x7C, 0xA2, 0x83, 0x9E,
+ 0x39, 0x5A, 0x2F, 0x7E, 0x92, 0xB4, 0x48, 0x1A,
+ 0x3F, 0x6B, 0x5D, 0xDF, 0x52, 0x85, 0x5F, 0x8E,
+ 0x42, 0x3C, 0xFB, 0xE9, 0x1A, 0x24, 0xD6, 0x08,
+ 0xDD, 0xFD, 0x16, 0xFB, 0xE9, 0x55, 0xEF, 0xF0,
+ 0xA0, 0x8D, 0x13, 0xAB, 0x81, 0xC6, 0x90, 0x01,
+ 0xB5, 0x18, 0x84, 0xB3, 0xF6, 0xE6, 0x11, 0x57,
+ 0xD6, 0x71, 0xC6, 0x3C, 0x3F, 0x2F, 0x33, 0xEE,
+ 0x24, 0x42, 0x6E, 0xAC, 0x0B, 0xCA, 0xEC, 0xF9,
+ 0x84, 0xF8, 0x22, 0xAA, 0x60, 0xF0, 0x32, 0xA9,
+ 0x75, 0x75, 0x3B, 0xCB, 0x70, 0x21, 0x0A, 0x8D,
+ 0x0F, 0xE0, 0xC4, 0x78, 0x2B, 0xF8, 0x97, 0xE3,
+ 0xE4, 0x26, 0x4B, 0x29, 0xDA, 0x88, 0xCD, 0x46,
+ 0xEC, 0xAA, 0xF9, 0x7F, 0xF1, 0x15, 0xEA, 0xC3,
+ 0x87, 0xE6, 0x31, 0xF2, 0xCF, 0xDE, 0x4D, 0x80,
+ 0x70, 0x91, 0x7E, 0x0C, 0xF7, 0x26, 0x3A, 0x92,
+ 0x4F, 0x18, 0x83, 0xC0, 0x8F, 0x59, 0x01, 0xA5,
+ 0x88, 0xD1, 0xDB, 0x26, 0x71, 0x27, 0x16, 0xF5,
+ 0xEE, 0x10, 0x82, 0xAC, 0x68, 0x26, 0x9B, 0xE2,
+ 0x6D, 0xD8, 0x9A, 0x80, 0xDF, 0x04, 0x31, 0xD5,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
+ 0x73, 0x02, 0x42, 0xC9, 0x23, 0x18, 0x8E, 0xB4,
+ 0x6F, 0xB4, 0xA3, 0x54, 0x6E, 0x88, 0x3B, 0x62,
+ 0x7C, 0x02, 0x8D, 0x4C, 0x9F, 0xC8, 0x45, 0xF4,
+ 0xC9, 0xDE, 0x4F, 0xEB, 0x22, 0x83, 0x1B, 0xE4,
+ 0x49, 0x37, 0xE4, 0xAD, 0xE7, 0xCD, 0x21, 0x54,
+ 0xBC, 0x1C, 0xC2, 0x04, 0x97, 0xB4, 0x10, 0x61,
+ 0xF0, 0xE4, 0xEF, 0x27, 0x63, 0x3A, 0xDA, 0x91,
+ 0x41, 0x25, 0x62, 0x1C, 0x5C, 0xB6, 0x38, 0x4A,
+ 0x88, 0x71, 0x59, 0x5A, 0x8D, 0xA0, 0x09, 0xAF,
+ 0x72, 0x94, 0xD7, 0x79, 0x5C, 0x60, 0x7C, 0x8F,
+ 0x4C, 0xF5, 0xD9, 0xA1, 0x39, 0x6D, 0x81, 0x28,
+ 0xEF, 0x13, 0x28, 0xDF, 0xF5, 0x3E, 0xF7, 0x8E,
+ 0x09, 0x9C, 0x78, 0x18, 0x79, 0xB8, 0x68, 0xD7,
+ 0xA8, 0x29, 0x62, 0xAD, 0xDE, 0xE1, 0x61, 0x76,
+ 0x1B, 0x05, 0x16, 0xCD, 0xBF, 0x02, 0x8E, 0xA6,
+ 0x43, 0x6E, 0x92, 0x55, 0x4F, 0x60, 0x9C, 0x03,
+ 0xB8, 0x4F, 0xA3, 0x02, 0xAC, 0xA8, 0xA7, 0x0C,
+ 0x1E, 0xB5, 0x6B, 0xF8, 0xC8, 0x4D, 0xDE, 0xD2,
+ 0xB0, 0x29, 0x6E, 0x40, 0xE6, 0xD6, 0xC9, 0xE6,
+ 0xB9, 0x0F, 0xB6, 0x63, 0xF5, 0xAA, 0x2B, 0x96,
+ 0xA7, 0x16, 0xAC, 0x4E, 0x0A, 0x33, 0x1C, 0xA6,
+ 0xE6, 0xBD, 0x8A, 0xCF, 0x40, 0xA9, 0xB2, 0xFA,
+ 0x63, 0x27, 0xFD, 0x9B, 0xD9, 0xFC, 0xD5, 0x87,
+ 0x8D, 0x4C, 0xB6, 0xA4, 0xCB, 0xE7, 0x74, 0x55,
+ 0xF4, 0xFB, 0x41, 0x25, 0xB5, 0x4B, 0x0A, 0x1B,
+ 0xB1, 0xD6, 0xB7, 0xD9, 0x47, 0x2A, 0xC3, 0x98,
+ 0x6A, 0xC4, 0x03, 0x73, 0x1F, 0x93, 0x6E, 0x53,
+ 0x19, 0x25, 0x64, 0x15, 0x83, 0xF9, 0x73, 0x2A,
+ 0x74, 0xB4, 0x93, 0x69, 0xC4, 0x72, 0xFC, 0x26,
+ 0xA2, 0x9F, 0x43, 0x45, 0xDD, 0xB9, 0xEF, 0x36,
+ 0xC8, 0x3A, 0xCD, 0x99, 0x9B, 0x54, 0x1A, 0x36,
+ 0xC1, 0x59, 0xF8, 0x98, 0xA8, 0xCC, 0x28, 0x0D,
+ 0x73, 0x4C, 0xEE, 0x98, 0xCB, 0x7C, 0x58, 0x7E,
+ 0x20, 0x75, 0x1E, 0xB7, 0xC9, 0xF8, 0xF2, 0x0E,
+ 0x63, 0x9E, 0x05, 0x78, 0x1A, 0xB6, 0xA8, 0x7A,
+ 0xF9, 0x98, 0x6A, 0xA6, 0x46, 0x84, 0x2E, 0xF6,
+ 0x4B, 0xDC, 0x9B, 0x8F, 0x9B, 0x8F, 0xEE, 0xB4,
+ 0xAA, 0x3F, 0xEE, 0xC0, 0x37, 0x27, 0x76, 0xC7,
+ 0x95, 0xBB, 0x26, 0x74, 0x69, 0x12, 0x7F, 0xF1,
+ 0xBB, 0xFF, 0xAE, 0xB5, 0x99, 0x6E, 0xCB, 0x0C
};
static const uint8_t catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest[] = {
- 0x9a, 0X4f, 0X88, 0X1b, 0Xb6, 0X8f, 0Xd8, 0X60,
- 0X42, 0X1a, 0X7d, 0X3d, 0Xf5, 0X82, 0X80, 0Xf1,
- 0X18, 0X8c, 0X1d, 0X32 };
+ 0x9a, 0x4f, 0x88, 0x1b, 0xb6, 0x8f, 0xd8, 0x60,
+ 0x42, 0x1a, 0x7d, 0x3d, 0xf5, 0x82, 0x80, 0xf1,
+ 0x18, 0x8c, 0x1d, 0x32
+};
static int
@@ -774,7 +775,6 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* Setup HMAC Parameters */
-
ut_params->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
ut_params->auth_xform.next = NULL;
@@ -784,58 +784,63 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
ut_params->auth_xform.auth.key.data = hmac_sha1_key;
ut_params->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
- /* Create Crypto session*/
+ /* Create crypto session*/
ut_params->sess = rte_cryptodev_session_create(ts_params->valid_devs[0],
&ut_params->cipher_xform);
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
- /* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
+ /* Generate crypto op data structure */
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- ut_params->op = &ut_params->ol->op.crypto;
+ rte_crypto_sym_op_attach_session(&ut_params->op->sym, ut_params->sess);
- /* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ /* Set crypto operation data parameters */
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
/* Validate obuf */
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC,
+ uint8_t *ciphertext = rte_pktmbuf_mtod_offset(ut_params->op->sym.m_src,
+ uint8_t *, CIPHER_IV_LENGTH_AES_CBC);
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(ciphertext,
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
- "Ciphertext data not as expected");
+ "ciphertext data not as expected");
- TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
- CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
+ uint8_t *digest = ciphertext + QUOTE_512_BYTES;
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(digest,
catch_22_quote_2_512_bytes_AES_CBC_HMAC_SHA1_digest,
gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD ?
TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 :
@@ -860,60 +865,65 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest_sessionless(void)
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
- TEST_ASSERT_NOT_NULL(rte_pktmbuf_offload_alloc_crypto_xforms(
- ut_params->ol, 2),
+ TEST_ASSERT_NOT_NULL(rte_crypto_sym_op_xforms_alloc(ut_params->op, 2),
"failed to allocate space for crypto transforms");
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
+
/* Set crypto operation data parameters */
- ut_params->op->xform->type = RTE_CRYPTO_XFORM_CIPHER;
+ sym_op->xform->type = RTE_CRYPTO_XFORM_CIPHER;
/* cipher parameters */
- ut_params->op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- ut_params->op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- ut_params->op->xform->cipher.key.data = aes_cbc_key;
- ut_params->op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
+ sym_op->xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ sym_op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ sym_op->xform->cipher.key.data = aes_cbc_key;
+ sym_op->xform->cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
/* hash parameters */
- ut_params->op->xform->next->type = RTE_CRYPTO_XFORM_AUTH;
+ sym_op->xform->next->type = RTE_CRYPTO_XFORM_AUTH;
- ut_params->op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- ut_params->op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- ut_params->op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
- ut_params->op->xform->next->auth.key.data = hmac_sha1_key;
- ut_params->op->xform->next->auth.digest_length =
+ sym_op->xform->next->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ sym_op->xform->next->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ sym_op->xform->next->auth.key.length = HMAC_KEY_LENGTH_SHA1;
+ sym_op->xform->next->auth.key.data = hmac_sha1_key;
+ sym_op->xform->next->auth.digest_length =
DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
+
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym.m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -981,42 +991,46 @@ test_AES_CBC_HMAC_SHA1_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
-
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(&ut_params->op->sym, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym.m_src;
+
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1089,41 +1103,46 @@ test_AES_CBC_HMAC_SHA256_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(&ut_params->op->sym, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym.m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1191,42 +1210,47 @@ test_AES_CBC_HMAC_SHA256_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(&ut_params->op->sym, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym.m_src;
+
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1307,41 +1331,47 @@ test_AES_CBC_HMAC_SHA512_encrypt_digest(void)
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(&ut_params->op->sym, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym.m_src;
+
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1440,43 +1470,47 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_session *sess,
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, sess);
+ rte_crypto_sym_op_attach_session(&ut_params->op->sym, sess);
+
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym.m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1545,37 +1579,41 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(&ut_params->op->sym, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- ut_params->op->iv.data = (uint8_t *)
- rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym.m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1584,6 +1622,7 @@ test_AES_CBC_HMAC_AES_XCBC_encrypt_digest(void)
catch_22_quote_2_512_bytes_AES_CBC_ciphertext,
QUOTE_512_BYTES,
"Ciphertext data not as expected");
+
TEST_ASSERT_BUFFERS_ARE_EQUAL(
rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) +
CIPHER_IV_LENGTH_AES_CBC + QUOTE_512_BYTES,
@@ -1638,35 +1677,41 @@ test_AES_CBC_HMAC_AES_XCBC_decrypt_digest_verify(void)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(&ut_params->op->sym, ut_params->sess);
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(ut_params->ibuf,
- CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
+
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ ut_params->obuf = ut_params->op->sym.m_src;
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
@@ -1826,50 +1871,48 @@ test_not_in_place_crypto(void)
DIGEST_BYTE_LENGTH_SHA512);
/* Generate Crypto op data structure */
- ut_params->ol = rte_pktmbuf_offload_alloc(ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ut_params->ol,
- "Failed to allocate pktmbuf offload");
-
- ut_params->op = &ut_params->ol->op.crypto;
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(ut_params->op, ut_params->sess);
+ rte_crypto_sym_op_attach_session(&ut_params->op->sym, ut_params->sess);
- ut_params->op->digest.data = ut_params->digest;
- ut_params->op->digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ struct rte_crypto_sym_op *sym_op = &ut_params->op->sym;
+
+ sym_op->auth.digest.data = ut_params->digest;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, QUOTE_512_BYTES);
- ut_params->op->digest.length = DIGEST_BYTE_LENGTH_SHA512;
+ sym_op->auth.digest.length = DIGEST_BYTE_LENGTH_SHA512;
+
+ sym_op->auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->auth.data.length = QUOTE_512_BYTES;
+
- ut_params->op->iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->iv.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(
ut_params->ibuf, 0);
- ut_params->op->iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(ut_params->op->iv.data, aes_cbc_iv,
+ rte_memcpy(sym_op->cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- ut_params->op->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_cipher.length = QUOTE_512_BYTES;
-
- ut_params->op->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- ut_params->op->data.to_hash.length = QUOTE_512_BYTES;
-
- ut_params->op->dst.m = dst_m;
- ut_params->op->dst.offset = 0;
+ sym_op->cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ sym_op->cipher.data.length = QUOTE_512_BYTES;
- rte_pktmbuf_offload_attach(ut_params->ibuf, ut_params->ol);
+ sym_op->m_dst = dst_m;
+ sym_op->m_src = ut_params->ibuf;
/* Process crypto operation */
- ut_params->obuf = process_crypto_request(ts_params->valid_devs[0],
- ut_params->ibuf);
- TEST_ASSERT_NOT_NULL(ut_params->obuf, "failed to retrieve obuf");
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process op obuf");
/* Validate obuf */
TEST_ASSERT_BUFFERS_ARE_EQUAL(
- rte_pktmbuf_mtod(ut_params->op->dst.m, char *),
+ rte_pktmbuf_mtod(ut_params->op->sym.m_dst, char *),
catch_22_quote,
QUOTE_512_BYTES,
"Plaintext data not as expected");
@@ -32,8 +32,6 @@
#ifndef TEST_CRYPTODEV_H_
#define TEST_CRYPTODEV_H_
-#define HEX_DUMP 0
-
#define FALSE 0
#define TRUE 1
@@ -47,8 +45,9 @@
#define DEFAULT_NUM_XFORMS (2)
#define NUM_MBUFS (8191)
#define MBUF_CACHE_SIZE (250)
-#define MBUF_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512 + \
- sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATAPAYLOAD_SIZE (2048 + DIGEST_BYTE_LENGTH_SHA512)
+#define MBUF_SIZE (sizeof(struct rte_mbuf) + \
+ RTE_PKTMBUF_HEADROOM + MBUF_DATAPAYLOAD_SIZE)
#define BYTE_LENGTH(x) (x/8)
/* HASH DIGEST LENGTHS */
@@ -62,6 +61,7 @@
#define AES_XCBC_MAC_KEY_SZ (16)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA1 (12)
+#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA224 (14)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA256 (16)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA512 (32)
@@ -32,7 +32,6 @@
#include <rte_common.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
@@ -50,7 +49,7 @@
struct crypto_testsuite_params {
struct rte_mempool *mbuf_mp;
- struct rte_mempool *mbuf_ol_pool;
+ struct rte_mempool *op_mpool;
uint16_t nb_queue_pairs;
@@ -69,7 +68,6 @@ struct crypto_unittest_params {
struct rte_cryptodev_session *sess;
struct rte_crypto_op *op;
- struct rte_mbuf_offload *ol;
struct rte_mbuf *obuf[MAX_NUM_OF_OPS_PER_UT];
struct rte_mbuf *ibuf[MAX_NUM_OF_OPS_PER_UT];
@@ -79,7 +77,7 @@ struct crypto_unittest_params {
static struct rte_mbuf *
setup_test_string(struct rte_mempool *mpool,
- const char *string, size_t len, uint8_t blocksize)
+ const uint8_t *data, size_t len, uint8_t blocksize)
{
struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
size_t t_len = len - (blocksize ? (len % blocksize) : 0);
@@ -92,7 +90,7 @@ setup_test_string(struct rte_mempool *mpool,
return NULL;
}
- rte_memcpy(dst, string, t_len);
+ rte_memcpy(dst, (const void *)data, t_len);
}
return m;
}
@@ -112,23 +110,23 @@ testsuite_setup(void)
ts_params->mbuf_mp = rte_mempool_lookup("CRYPTO_PERF_MBUFPOOL");
if (ts_params->mbuf_mp == NULL) {
/* Not already created so create */
- ts_params->mbuf_mp = rte_mempool_create("CRYPTO_PERF_MBUFPOOL", NUM_MBUFS,
- MBUF_SIZE, MBUF_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
- rte_socket_id(), 0);
+ ts_params->mbuf_mp = rte_pktmbuf_pool_create(
+ "CRYPTO_PERF_MBUFPOOL",
+ NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,
+ rte_socket_id());
if (ts_params->mbuf_mp == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_PERF_MBUFPOOL\n");
return TEST_FAILED;
}
}
- ts_params->mbuf_ol_pool = rte_pktmbuf_offload_pool_create("CRYPTO_OP_POOL",
- NUM_MBUFS, MBUF_CACHE_SIZE,
- DEFAULT_NUM_XFORMS *
- sizeof(struct rte_crypto_xform),
- rte_socket_id());
- if (ts_params->mbuf_ol_pool == NULL) {
+
+ ts_params->op_mpool = rte_crypto_op_pool_create("CRYPTO_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ NUM_MBUFS, MBUF_CACHE_SIZE,
+ DEFAULT_NUM_XFORMS * sizeof(struct rte_crypto_xform),
+ rte_socket_id());
+ if (ts_params->op_mpool == NULL) {
RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
return TEST_FAILED;
}
@@ -255,8 +253,8 @@ ut_teardown(void)
ut_params->sess);
/* free crypto operation structure */
- if (ut_params->ol)
- rte_pktmbuf_offload_free(ut_params->ol);
+ if (ut_params->op)
+ rte_crypto_op_free(ut_params->op);
for (i = 0; i < MAX_NUM_OF_OPS_PER_UT; i++) {
if (ut_params->obuf[i])
@@ -1697,11 +1695,12 @@ struct crypto_data_params aes_cbc_hmac_sha256_output[MAX_PACKET_SIZE_INDEX] = {
static int
test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
{
- uint32_t num_to_submit = 2048, max_outstanding_reqs = 512;
- struct rte_mbuf *rx_mbufs[num_to_submit], *tx_mbufs[num_to_submit];
+ uint32_t num_to_submit = 4096;
+ struct rte_crypto_op *c_ops[num_to_submit];
+ struct rte_crypto_op *proc_ops[num_to_submit];
uint64_t failed_polls, retries, start_cycles, end_cycles, total_cycles = 0;
uint32_t burst_sent, burst_received;
- uint32_t b, burst_size, num_sent, num_received;
+ uint32_t i, burst_size, num_sent, num_received;
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1738,46 +1737,50 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
/* Generate Crypto op data structure(s) */
- for (b = 0; b < num_to_submit ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
- (const char *)data_params[0].expected.ciphertext,
+ for (i = 0; i < num_to_submit ; i++) {
+ struct rte_mbuf *m = setup_test_string(ts_params->mbuf_mp,
+ data_params[0].expected.ciphertext,
data_params[0].length, 0);
- TEST_ASSERT_NOT_NULL(tx_mbufs[b], "Failed to allocate tx_buf");
+ TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(tx_mbufs[b],
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
DIGEST_BYTE_LENGTH_SHA256);
TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
rte_memcpy(ut_params->digest, data_params[0].expected.digest,
DIGEST_BYTE_LENGTH_SHA256);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool, RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
- struct rte_crypto_op *cop = &ol->op.crypto;
+ struct rte_crypto_op *op =
+ rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ rte_crypto_sym_op_attach_session(&op->sym, ut_params->sess);
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(tx_mbufs[b],
+ op->sym.auth.digest.data = ut_params->digest;
+ op->sym.auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
data_params[0].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym.auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
+
+ op->sym.auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym.auth.data.length = data_params[0].length;
+
+
+ op->sym.cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(m,
+ CIPHER_IV_LENGTH_AES_CBC);
+ op->sym.cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ rte_memcpy(op->sym.cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ op->sym.cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym.cipher.data.length = data_params[0].length;
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[0].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[0].length;
+ op->sym.m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[i] = op;
}
printf("\nTest to measure the IA cycle cost using AES128_CBC_SHA256_HMAC "
@@ -1788,17 +1791,17 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
printf("\nDev No\tQP No\tNum Sent\tNum Received\tTx/Rx burst");
printf("\tRetries (Device Busy)\tAverage IA cycle cost "
"(assuming 0 retries)");
- for (b = 2; b <= 128 ; b *= 2) {
+ for (i = 2; i <= 128 ; i *= 2) {
num_sent = 0;
num_received = 0;
retries = 0;
failed_polls = 0;
- burst_size = b;
+ burst_size = i;
total_cycles = 0;
while (num_sent < num_to_submit) {
start_cycles = rte_rdtsc_precise();
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0,
- &tx_mbufs[num_sent],
+ burst_sent = rte_cryptodev_enqueue_burst(dev_num,
+ 0, &c_ops[num_sent],
((num_to_submit-num_sent) < burst_size) ?
num_to_submit-num_sent : burst_size);
if (burst_sent == 0)
@@ -1813,8 +1816,8 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
rte_delay_ms(1);
start_cycles = rte_rdtsc_precise();
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1822,12 +1825,15 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
end_cycles = rte_rdtsc_precise();
total_cycles += end_cycles - start_cycles;
}
+
while (num_received != num_to_submit) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
- burst_received = rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, burst_size);
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops, burst_size);
if (burst_received == 0)
failed_polls++;
else
@@ -1841,16 +1847,9 @@ test_perf_crypto_qp_vary_burst_size(uint16_t dev_num)
}
printf("\n");
- for (b = 0; b < max_outstanding_reqs ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ for (i = 0; i < num_to_submit ; i++) {
+ rte_pktmbuf_free(c_ops[i]->sym.m_src);
+ rte_crypto_op_free(c_ops[i]);
}
return TEST_SUCCESS;
}
@@ -1860,11 +1859,14 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
{
uint16_t index;
uint32_t burst_sent, burst_received;
- uint32_t b, num_sent, num_received, throughput;
+ uint32_t b, num_sent, num_received;
uint64_t failed_polls, retries, start_cycles, end_cycles;
const uint64_t mhz = rte_get_tsc_hz()/1000000;
- double mmps;
- struct rte_mbuf *rx_mbufs[DEFAULT_BURST_SIZE], *tx_mbufs[DEFAULT_BURST_SIZE];
+ double throughput, mmps;
+
+ struct rte_crypto_op *c_ops[DEFAULT_BURST_SIZE];
+ struct rte_crypto_op *proc_ops[DEFAULT_BURST_SIZE];
+
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
struct crypto_data_params *data_params = aes_cbc_hmac_sha256_output;
@@ -1903,7 +1905,7 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
"AES128_CBC_SHA256_HMAC requests with a constant burst "
"size of %u while varying payload sizes", DEFAULT_BURST_SIZE);
printf("\nDev No\tQP No\tReq Size(B)\tNum Sent\tNum Received\t"
- "Mrps\tThoughput(Mbps)");
+ "Mrps\tThoughput(Gbps)");
printf("\tRetries (Attempted a burst, but the device was busy)");
for (index = 0; index < MAX_PACKET_SIZE_INDEX; index++) {
num_sent = 0;
@@ -1913,94 +1915,106 @@ test_perf_AES_CBC_HMAC_SHA256_encrypt_digest_vary_req_size(uint16_t dev_num)
/* Generate Crypto op data structure(s) */
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- tx_mbufs[b] = setup_test_string(ts_params->mbuf_mp,
+ struct rte_mbuf *m = setup_test_string(
+ ts_params->mbuf_mp,
+ (const uint8_t *)
data_params[index].plaintext,
data_params[index].length,
0);
- ut_params->digest = (uint8_t *)rte_pktmbuf_append(
- tx_mbufs[b], DIGEST_BYTE_LENGTH_SHA256);
- TEST_ASSERT_NOT_NULL(ut_params->digest, "no room to append digest");
+ ut_params->digest = (uint8_t *)rte_pktmbuf_append(m,
+ DIGEST_BYTE_LENGTH_SHA256);
+ TEST_ASSERT_NOT_NULL(ut_params->digest
+ , "no room to append digest");
- rte_memcpy(ut_params->digest, data_params[index].expected.digest,
- DIGEST_BYTE_LENGTH_SHA256);
+ rte_memcpy(ut_params->digest,
+ data_params[index].expected.digest,
+ DIGEST_BYTE_LENGTH_SHA256);
- struct rte_mbuf_offload *ol = rte_pktmbuf_offload_alloc(
- ts_params->mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
- TEST_ASSERT_NOT_NULL(ol, "Failed to allocate pktmbuf offload");
+ struct rte_crypto_op *op = rte_crypto_op_alloc(
+ ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
- struct rte_crypto_op *cop = &ol->op.crypto;
+ rte_crypto_sym_op_attach_session(&op->sym,
+ ut_params->sess);
- rte_crypto_op_attach_session(cop, ut_params->sess);
+ op->sym.auth.digest.data = ut_params->digest;
+ op->sym.auth.digest.phys_addr =
+ rte_pktmbuf_mtophys_offset(m,
+ data_params[index].length);
+ op->sym.auth.digest.length = DIGEST_BYTE_LENGTH_SHA256;
- cop->digest.data = ut_params->digest;
- cop->digest.phys_addr = rte_pktmbuf_mtophys_offset(
- tx_mbufs[b], data_params[index].length);
- cop->digest.length = DIGEST_BYTE_LENGTH_SHA256;
+ op->sym.auth.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym.auth.data.length = data_params[index].length;
- cop->iv.data = (uint8_t *)rte_pktmbuf_prepend(tx_mbufs[b],
+ op->sym.cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ m, CIPHER_IV_LENGTH_AES_CBC);
+ op->sym.cipher.iv.phys_addr = rte_pktmbuf_mtophys(m);
+ op->sym.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ rte_memcpy(op->sym.cipher.iv.data, aes_cbc_iv,
CIPHER_IV_LENGTH_AES_CBC);
- cop->iv.phys_addr = rte_pktmbuf_mtophys(tx_mbufs[b]);
- cop->iv.length = CIPHER_IV_LENGTH_AES_CBC;
- rte_memcpy(cop->iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ op->sym.cipher.data.offset = CIPHER_IV_LENGTH_AES_CBC;
+ op->sym.cipher.data.length = data_params[index].length;
- cop->data.to_cipher.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_cipher.length = data_params[index].length;
- cop->data.to_hash.offset = CIPHER_IV_LENGTH_AES_CBC;
- cop->data.to_hash.length = data_params[index].length;
+ op->sym.m_src = m;
- rte_pktmbuf_offload_attach(tx_mbufs[b], ol);
+ c_ops[b] = op;
}
+
start_cycles = rte_rdtsc_precise();
while (num_sent < DEFAULT_NUM_REQS_TO_SUBMIT) {
- burst_sent = rte_cryptodev_enqueue_burst(dev_num, 0, tx_mbufs,
- ((DEFAULT_NUM_REQS_TO_SUBMIT-num_sent) < DEFAULT_BURST_SIZE) ?
- DEFAULT_NUM_REQS_TO_SUBMIT-num_sent : DEFAULT_BURST_SIZE);
+ uint16_t burst_size = (DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent) < DEFAULT_BURST_SIZE ?
+ DEFAULT_NUM_REQS_TO_SUBMIT -
+ num_sent : DEFAULT_BURST_SIZE;
+
+ burst_sent = rte_cryptodev_enqueue_burst(
+ dev_num, 0, c_ops, burst_size);
if (burst_sent == 0)
retries++;
else
num_sent += burst_sent;
burst_received = rte_cryptodev_dequeue_burst(dev_num,
- 0, rx_mbufs, DEFAULT_BURST_SIZE);
+ 0, proc_ops, DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
num_received += burst_received;
}
while (num_received != DEFAULT_NUM_REQS_TO_SUBMIT) {
- if (gbl_cryptodev_preftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD)
- rte_cryptodev_enqueue_burst(dev_num, 0, NULL, 0);
-
- burst_received = rte_cryptodev_dequeue_burst(dev_num, 0,
- rx_mbufs, DEFAULT_BURST_SIZE);
+ if (gbl_cryptodev_preftest_devtype ==
+ RTE_CRYPTODEV_AESNI_MB_PMD)
+ rte_cryptodev_enqueue_burst(dev_num, 0,
+ NULL, 0);
+
+ burst_received = rte_cryptodev_dequeue_burst(
+ dev_num, 0, proc_ops,
+ DEFAULT_BURST_SIZE);
if (burst_received == 0)
failed_polls++;
else
num_received += burst_received;
}
end_cycles = rte_rdtsc_precise();
- mmps = (double)num_received*mhz/(end_cycles - start_cycles);
- throughput = mmps*data_params[index].length*8;
+ mmps = ((double)num_received * mhz) /
+ (end_cycles - start_cycles);
+ throughput = (mmps * data_params[index].length * 8) / 1000;
+
printf("\n%u\t%u\t%u\t\t%u\t%u", dev_num, 0,
- data_params[index].length, num_sent, num_received);
- printf("\t%.2f\t%u", mmps, throughput);
+ data_params[index].length,
+ num_sent, num_received);
+ printf("\t%.2f\t%.2f", mmps, throughput);
printf("\t\t%"PRIu64, retries);
for (b = 0; b < DEFAULT_BURST_SIZE ; b++) {
- struct rte_mbuf_offload *ol = tx_mbufs[b]->offload_ops;
-
- if (ol) {
- do {
- rte_pktmbuf_offload_free(ol);
- ol = ol->next;
- } while (ol != NULL);
- }
- rte_pktmbuf_free(tx_mbufs[b]);
+ rte_pktmbuf_free(c_ops[b]->sym.m_src);
+ rte_crypto_op_free(c_ops[b]);
}
}
+
printf("\n");
return TEST_SUCCESS;
}
@@ -362,13 +362,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
#
-# Compile librte_mbuf_offload
-# EXPERIMENTAL: API may change without prior notice
-#
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n
-
-#
# Compile librte_timer
#
CONFIG_RTE_LIBRTE_TIMER=y
@@ -379,13 +379,6 @@ CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
#
-# Compile librte_mbuf_offload
-# EXPERIMENTAL: API may change without prior notice
-#
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD=y
-CONFIG_RTE_LIBRTE_MBUF_OFFLOAD_DEBUG=n
-
-#
# Compile librte_timer
#
CONFIG_RTE_LIBRTE_TIMER=y
@@ -104,7 +104,6 @@ There are many libraries, so their headers may be grouped by topics:
- **containers**:
[mbuf] (@ref rte_mbuf.h),
- [mbuf_offload] (@ref rte_mbuf_offload.h),
[ring] (@ref rte_ring.h),
[distributor] (@ref rte_distributor.h),
[reorder] (@ref rte_reorder.h),
@@ -38,7 +38,6 @@
#include <rte_dev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
-#include <rte_mbuf_offload.h>
#include "rte_aesni_mb_pmd_private.h"
@@ -297,27 +296,30 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/** Get multi buffer session */
static struct aesni_mb_session *
-get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
{
- struct aesni_mb_session *sess;
+ struct aesni_mb_session *sess = NULL;
- if (crypto_op->type == RTE_CRYPTO_OP_WITH_SESSION) {
- if (unlikely(crypto_op->session->type !=
+ if (op->sym.type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (unlikely(op->sym.session->type !=
RTE_CRYPTODEV_AESNI_MB_PMD))
return NULL;
- sess = (struct aesni_mb_session *)crypto_op->session->_private;
+ sess = (struct aesni_mb_session *)op->sym.session->_private;
} else {
- struct rte_cryptodev_session *c_sess = NULL;
+ void *_sess = NULL;
- if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
return NULL;
- sess = (struct aesni_mb_session *)c_sess->_private;
+ sess = (struct aesni_mb_session *)
+ ((struct rte_cryptodev_session *)_sess)->_private;
if (unlikely(aesni_mb_set_session_parameters(qp->ops,
- sess, crypto_op->xform) != 0))
- return NULL;
+ sess, op->sym.xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ sess = NULL;
+ }
}
return sess;
@@ -336,11 +338,14 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op)
* - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
*/
static JOB_AES_HMAC *
-process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
- struct rte_crypto_op *c_op, struct aesni_mb_session *session)
+process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
+ struct aesni_mb_session *session)
{
JOB_AES_HMAC *job;
+ struct rte_mbuf *m_src = op->sym.m_src, *m_dst;
+ uint16_t m_offset = 0;
+
job = (*qp->ops->job.get_next)(&qp->mb_mgr);
if (unlikely(job == NULL))
return job;
@@ -369,49 +374,65 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
}
/* Mutable crypto operation parameters */
+ if (op->sym.m_dst) {
+ m_src = m_dst = op->sym.m_dst;
+
+ /* append space for output data to mbuf */
+ char *odata = rte_pktmbuf_append(m_dst,
+ rte_pktmbuf_data_len(op->sym.m_src));
+ if (odata == NULL)
+ MB_LOG_ERR("failed to allocate space in destination "
+ "mbuf for source data");
+
+ memcpy(odata, rte_pktmbuf_mtod(op->sym.m_src, void*),
+ rte_pktmbuf_data_len(op->sym.m_src));
+ } else {
+ m_dst = m_src;
+ m_offset = op->sym.cipher.data.offset;
+ }
/* Set digest output location */
if (job->cipher_direction == DECRYPT) {
- job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m,
+ job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
get_digest_byte_length(job->hash_alg));
- if (job->auth_tag_output)
- memset(job->auth_tag_output, 0,
- sizeof(get_digest_byte_length(job->hash_alg)));
- else
+ if (job->auth_tag_output == NULL) {
+ MB_LOG_ERR("failed to allocate space in output mbuf "
+ "for temp digest");
return NULL;
+ }
+
+ memset(job->auth_tag_output, 0,
+ sizeof(get_digest_byte_length(job->hash_alg)));
+
} else {
- job->auth_tag_output = c_op->digest.data;
+ job->auth_tag_output = op->sym.auth.digest.data;
}
/*
- * Multiple buffer library current only support returning a truncated
+ * Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
job->auth_tag_output_len_in_bytes =
get_truncated_digest_byte_length(job->hash_alg);
/* Set IV parameters */
- job->iv = c_op->iv.data;
- job->iv_len_in_bytes = c_op->iv.length;
+ job->iv = op->sym.cipher.iv.data;
+ job->iv_len_in_bytes = op->sym.cipher.iv.length;
/* Data Parameter */
- job->src = rte_pktmbuf_mtod(m, uint8_t *);
- job->dst = c_op->dst.m ?
- rte_pktmbuf_mtod(c_op->dst.m, uint8_t *) +
- c_op->dst.offset :
- rte_pktmbuf_mtod(m, uint8_t *) +
- c_op->data.to_cipher.offset;
+ job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
- job->cipher_start_src_offset_in_bytes = c_op->data.to_cipher.offset;
- job->msg_len_to_cipher_in_bytes = c_op->data.to_cipher.length;
+ job->cipher_start_src_offset_in_bytes = op->sym.cipher.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym.cipher.data.length;
- job->hash_start_src_offset_in_bytes = c_op->data.to_hash.offset;
- job->msg_len_to_hash_in_bytes = c_op->data.to_hash.length;
+ job->hash_start_src_offset_in_bytes = op->sym.auth.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym.auth.data.length;
/* Set user data to be crypto operation data struct */
- job->user_data = m;
- job->user_data2 = c_op;
+ job->user_data = op;
+ job->user_data2 = m_dst;
return job;
}
@@ -426,43 +447,41 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_mbuf *m,
* verification of supplied digest in the case of a HASH_CIPHER operation
* - Returns NULL on invalid job
*/
-static struct rte_mbuf *
+static struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m;
- struct rte_crypto_op *c_op;
+ struct rte_crypto_op *op =
+ (struct rte_crypto_op *)job->user_data;
+ struct rte_mbuf *m_dst =
+ (struct rte_mbuf *)job->user_data2;
- if (job->user_data == NULL)
+ if (op == NULL || m_dst == NULL)
return NULL;
- /* handled retrieved job */
- m = (struct rte_mbuf *)job->user_data;
- c_op = (struct rte_crypto_op *)job->user_data2;
-
/* set status as successful by default */
- c_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* check if job has been processed */
if (unlikely(job->status != STS_COMPLETED)) {
- c_op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return m;
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return op;
} else if (job->chain_order == HASH_CIPHER) {
/* Verify digest if required */
- if (memcmp(job->auth_tag_output, c_op->digest.data,
+ if (memcmp(job->auth_tag_output, op->sym.auth.digest.data,
job->auth_tag_output_len_in_bytes) != 0)
- c_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
/* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m, get_digest_byte_length(job->hash_alg));
+ rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
}
/* Free session if a session-less crypto op */
- if (c_op->type == RTE_CRYPTO_OP_SESSIONLESS) {
- rte_mempool_put(qp->sess_mp, c_op->session);
- c_op->session = NULL;
+ if (op->sym.type == RTE_CRYPTO_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym.session);
+ op->sym.session = NULL;
}
- return m;
+ return op;
}
/**
@@ -478,16 +497,16 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
static unsigned
handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_mbuf *m = NULL;
+ struct rte_crypto_op *op = NULL;
unsigned processed_jobs = 0;
while (job) {
processed_jobs++;
- m = post_process_mb_job(qp, job);
- if (m)
- rte_ring_enqueue(qp->processed_pkts, (void *)m);
+ op = post_process_mb_job(qp, job);
+ if (op)
+ rte_ring_enqueue(qp->processed_pkts, (void *)op);
else
- qp->qp_stats.dequeue_err_count++;
+ qp->stats.dequeue_err_count++;
job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
}
@@ -496,52 +515,49 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
}
static uint16_t
-aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_mbuf **bufs,
- uint16_t nb_bufs)
+aesni_mb_pmd_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
-
struct aesni_mb_session *sess;
- struct aesni_mb_qp *qp = queue_pair;
+ struct aesni_mb_qp *_qp = qp;
JOB_AES_HMAC *job = NULL;
int i, processed_jobs = 0;
- for (i = 0; i < nb_bufs; i++) {
- ol = rte_pktmbuf_offload_get(bufs[i], RTE_PKTMBUF_OL_CRYPTO);
- if (unlikely(ol == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ for (i = 0; i < nb_ops; i++) {
+ if (unlikely(ops[i]->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
- sess = get_session(qp, &ol->op.crypto);
+ sess = get_session(_qp, ops[i]);
if (unlikely(sess == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
- job = process_crypto_op(qp, bufs[i], &ol->op.crypto, sess);
+ job = process_crypto_op(_qp, ops[i], sess);
if (unlikely(job == NULL)) {
- qp->qp_stats.enqueue_err_count++;
+ _qp->stats.enqueue_err_count++;
goto flush_jobs;
}
/* Submit Job */
- job = (*qp->ops->job.submit)(&qp->mb_mgr);
+ job = (*_qp->ops->job.submit)(&_qp->mb_mgr);
/*
* If submit returns a processed job then handle it,
* before submitting subsequent jobs
*/
if (job)
- processed_jobs += handle_completed_jobs(qp, job);
+ processed_jobs += handle_completed_jobs(_qp, job);
}
if (processed_jobs == 0)
goto flush_jobs;
else
- qp->qp_stats.enqueued_count += processed_jobs;
+ _qp->stats.enqueued_count += processed_jobs;
return i;
flush_jobs:
@@ -549,26 +565,22 @@ flush_jobs:
* If we haven't processed any jobs in submit loop, then flush jobs
* queue to stop the output stalling
*/
- job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
+ job = (*_qp->ops->job.flush_job)(&_qp->mb_mgr);
if (job)
- qp->qp_stats.enqueued_count += handle_completed_jobs(qp, job);
+ _qp->stats.enqueued_count += handle_completed_jobs(_qp, job);
return i;
}
static uint16_t
-aesni_mb_pmd_dequeue_burst(void *queue_pair,
- struct rte_mbuf **bufs, uint16_t nb_bufs)
+aesni_mb_pmd_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct aesni_mb_qp *qp = queue_pair;
-
- unsigned nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)bufs, nb_bufs);
- qp->qp_stats.dequeued_count += nb_dequeued;
+ struct aesni_mb_qp *_qp = qp;
- return nb_dequeued;
+ return _qp->stats.dequeued_count =
+ rte_ring_dequeue_burst(_qp->processed_pkts,
+ (void **)ops, nb_ops);
}
@@ -76,11 +76,11 @@ aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- stats->enqueued_count += qp->qp_stats.enqueued_count;
- stats->dequeued_count += qp->qp_stats.dequeued_count;
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
- stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
- stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
}
}
@@ -93,7 +93,7 @@ aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
}
}
@@ -196,7 +196,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
qp->sess_mp = dev->data->session_pool;
- memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ memset(&qp->stats, 0, sizeof(qp->stats));
/* Initialise multi-buffer manager */
(*qp->ops->job.init_mgr)(&qp->mb_mgr);
@@ -150,7 +150,7 @@ struct aesni_mb_qp {
/**< Ring for placing process packets */
struct rte_mempool *sess_mp;
/**< Session Mempool */
- struct rte_cryptodev_stats qp_stats;
+ struct rte_cryptodev_stats stats;
/**< Queue pair statistics */
} __rte_cache_aligned;
@@ -59,7 +59,6 @@
#include <rte_mbuf.h>
#include <rte_string_fns.h>
#include <rte_spinlock.h>
-#include <rte_mbuf_offload.h>
#include <rte_hexdump.h>
#include "qat_logs.h"
@@ -72,7 +71,7 @@ static inline uint32_t
adf_modulo(uint32_t data, uint32_t shift);
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg);
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
void *session)
@@ -275,15 +274,15 @@ unsigned qat_crypto_sym_get_session_private_size(
}
-uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+uint16_t qat_pmd_enqueue_sym_op_burst(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
register struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
register uint32_t nb_pkts_sent = 0;
- register struct rte_mbuf **cur_tx_pkt = tx_pkts;
+ register struct rte_crypto_op **cur_op = ops;
register int ret;
- uint16_t nb_pkts_possible = nb_pkts;
+ uint16_t nb_ops_possible = nb_ops;
register uint8_t *base_addr;
register uint32_t tail;
int overflow;
@@ -294,19 +293,18 @@ uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
tail = queue->tail;
/* Find how many can actually fit on the ring */
- overflow = (rte_atomic16_add_return(&tmp_qp->inflights16, nb_pkts)
+ overflow = (rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
- queue->max_inflights);
if (overflow > 0) {
rte_atomic16_sub(&tmp_qp->inflights16, overflow);
- nb_pkts_possible = nb_pkts - overflow;
- if (nb_pkts_possible == 0)
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
return 0;
}
- while (nb_pkts_sent != nb_pkts_possible) {
+ while (nb_pkts_sent != nb_ops_possible) {
- ret = qat_alg_write_mbuf_entry(*cur_tx_pkt,
- base_addr + tail);
+ ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
if (ret != 0) {
tmp_qp->stats.enqueue_err_count++;
if (nb_pkts_sent == 0)
@@ -316,7 +314,7 @@ uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts,
tail = adf_modulo(tail + queue->msg_size, queue->modulo);
nb_pkts_sent++;
- cur_tx_pkt++;
+ cur_op++;
}
kick_tail:
WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
@@ -327,13 +325,13 @@ kick_tail:
}
uint16_t
-qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+qat_pmd_dequeue_sym_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- struct rte_mbuf_offload *ol;
struct qat_queue *queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
uint32_t msg_counter = 0;
- struct rte_mbuf *rx_mbuf;
+ struct rte_crypto_op *rx_op;
struct icp_qat_fw_comn_resp *resp_msg;
queue = &(tmp_qp->rx_q);
@@ -341,16 +339,20 @@ qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
((uint8_t *)queue->base_addr + queue->head);
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
- msg_counter != nb_pkts) {
- rx_mbuf = (struct rte_mbuf *)(resp_msg->opaque_data);
- ol = rte_pktmbuf_offload_get(rx_mbuf, RTE_PKTMBUF_OL_CRYPTO);
+ msg_counter != nb_ops) {
+ rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
- ol->op.crypto.status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
*(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
queue->head = adf_modulo(queue->head +
@@ -359,9 +361,8 @@ qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
resp_msg = (struct icp_qat_fw_comn_resp *)
((uint8_t *)queue->base_addr +
queue->head);
-
- *rx_pkts = rx_mbuf;
- rx_pkts++;
+ *ops = rx_op;
+ ops++;
msg_counter++;
}
if (msg_counter > 0) {
@@ -375,74 +376,64 @@ qat_crypto_pkt_rx_burst(void *qp, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
static inline int
-qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
{
- struct rte_mbuf_offload *ol;
-
struct qat_session *ctx;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- ol = rte_pktmbuf_offload_get(mbuf, RTE_PKTMBUF_OL_CRYPTO);
- if (unlikely(ol == NULL)) {
- PMD_DRV_LOG(ERR, "No valid crypto off-load operation attached "
- "to (%p) mbuf.", mbuf);
- return -EINVAL;
- }
-
- if (unlikely(ol->op.crypto.type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ if (unlikely(op->sym.type == RTE_CRYPTO_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
- " requests mbuf (%p) is sessionless.", mbuf);
+ " requests, op (%p) is sessionless.", op);
return -EINVAL;
}
- if (unlikely(ol->op.crypto.session->type != RTE_CRYPTODEV_QAT_PMD)) {
+ if (unlikely(op->sym.session->type != RTE_CRYPTODEV_QAT_PMD)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
- ctx = (struct qat_session *)ol->op.crypto.session->_private;
+ ctx = (struct qat_session *)op->sym.session->_private;
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
*qat_req = ctx->fw_req;
- qat_req->comn_mid.opaque_data = (uint64_t)mbuf;
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
/*
* The following code assumes:
* - single entry buffer.
- * - always in place.
*/
- qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length = mbuf->data_len;
- qat_req->comn_mid.dest_data_addr =
- qat_req->comn_mid.src_data_addr =
- rte_pktmbuf_mtophys(mbuf);
+ qat_req->comn_mid.dst_length = qat_req->comn_mid.src_length =
+ rte_pktmbuf_data_len(op->sym.m_src);
+ qat_req->comn_mid.dest_data_addr = qat_req->comn_mid.src_data_addr =
+ rte_pktmbuf_mtophys(op->sym.m_src);
cipher_param = (void *)&qat_req->serv_specif_rqpars;
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
- cipher_param->cipher_length = ol->op.crypto.data.to_cipher.length;
- cipher_param->cipher_offset = ol->op.crypto.data.to_cipher.offset;
- if (ol->op.crypto.iv.length &&
- (ol->op.crypto.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array))) {
+ cipher_param->cipher_length = op->sym.cipher.data.length;
+ cipher_param->cipher_offset = op->sym.cipher.data.offset;
+ if (op->sym.cipher.iv.length && (op->sym.cipher.iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array))) {
rte_memcpy(cipher_param->u.cipher_IV_array,
- ol->op.crypto.iv.data, ol->op.crypto.iv.length);
+ op->sym.cipher.iv.data,
+ op->sym.cipher.iv.length);
} else {
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr = ol->op.crypto.iv.phys_addr;
+ cipher_param->u.s.cipher_IV_ptr = op->sym.cipher.iv.phys_addr;
}
- if (ol->op.crypto.digest.phys_addr) {
+ if (op->sym.auth.digest.phys_addr) {
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
- auth_param->auth_res_addr = ol->op.crypto.digest.phys_addr;
+ auth_param->auth_res_addr = op->sym.auth.digest.phys_addr;
}
- auth_param->auth_off = ol->op.crypto.data.to_hash.offset;
- auth_param->auth_len = ol->op.crypto.data.to_hash.length;
- auth_param->u1.aad_adr = ol->op.crypto.additional_auth.phys_addr;
+ auth_param->auth_off = op->sym.auth.data.offset;
+ auth_param->auth_len = op->sym.auth.data.length;
+
+ auth_param->u1.aad_adr = op->sym.auth.aad.phys_addr;
/* (GCM) aad length(240 max) will be at this location after precompute */
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
@@ -454,9 +445,19 @@ qat_alg_write_mbuf_entry(struct rte_mbuf *mbuf, uint8_t *out_msg)
}
auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));
+ rte_hexdump(stdout, "src_data:",
+ rte_pktmbuf_mtod(op->sym.m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym.m_src));
+ rte_hexdump(stdout, "iv:", op->sym.cipher.iv.data,
+ op->sym.cipher.iv.length);
+ rte_hexdump(stdout, "digest:", op->sym.auth.digest.data,
+ op->sym.auth.digest.length);
+ rte_hexdump(stdout, "aad:", op->sym.auth.aad.data,
+ op->sym.auth.aad.length);
#endif
return 0;
}
@@ -115,10 +115,12 @@ extern void
qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
-uint16_t
-qat_crypto_pkt_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_enqueue_sym_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
-uint16_t
-qat_crypto_pkt_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+extern uint16_t
+qat_pmd_dequeue_sym_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
#endif /* _QAT_CRYPTO_H_ */
@@ -92,8 +92,8 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->dev_type = RTE_CRYPTODEV_QAT_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
- cryptodev->enqueue_burst = qat_crypto_pkt_tx_burst;
- cryptodev->dequeue_burst = qat_crypto_pkt_rx_burst;
+ cryptodev->enqueue_burst = qat_pmd_enqueue_sym_op_burst;
+ cryptodev->dequeue_burst = qat_pmd_dequeue_sym_op_burst;
internals = cryptodev->data->dev_private;
@@ -62,7 +62,6 @@
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
-#include <rte_mbuf_offload.h>
#include <rte_memcpy.h>
#include <rte_memory.h>
#include <rte_mempool.h>
@@ -85,6 +84,7 @@
*/
#define RTE_TEST_RX_DESC_DEFAULT 128
#define RTE_TEST_TX_DESC_DEFAULT 512
+
static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
@@ -104,6 +104,11 @@ struct pkt_buffer {
struct rte_mbuf *buffer[MAX_PKT_BURST];
};
+struct op_buffer {
+ unsigned len;
+ struct rte_crypto_op *buffer[MAX_PKT_BURST];
+};
+
#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
@@ -154,14 +159,16 @@ struct lcore_queue_conf {
unsigned nb_crypto_devs;
unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
- struct pkt_buffer crypto_pkt_buf[RTE_MAX_ETHPORTS];
- struct pkt_buffer tx_pkt_buf[RTE_MAX_ETHPORTS];
+ struct op_buffer op_buf[RTE_MAX_ETHPORTS];
+ struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
} __rte_cache_aligned;
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
static const struct rte_eth_conf port_conf = {
.rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+ .max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
@@ -175,7 +182,7 @@ static const struct rte_eth_conf port_conf = {
};
struct rte_mempool *l2fwd_pktmbuf_pool;
-struct rte_mempool *l2fwd_mbuf_ol_pool;
+struct rte_mempool *l2fwd_crypto_op_pool;
/* Per-port statistics struct */
struct l2fwd_port_statistics {
@@ -200,14 +207,17 @@ struct l2fwd_crypto_statistics crypto_statistics[RTE_MAX_ETHPORTS];
/* A tsc-based timer responsible for triggering statistics printout */
#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
-#define MAX_TIMER_PERIOD 86400 /* 1 day max */
+#define MAX_TIMER_PERIOD 86400UL /* 1 day max */
/* default period is 10 seconds */
static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000;
-uint64_t total_packets_dropped = 0, total_packets_tx = 0, total_packets_rx = 0,
- total_packets_enqueued = 0, total_packets_dequeued = 0,
- total_packets_errors = 0;
+uint64_t total_packets_dropped = 0,
+ total_packets_tx = 0,
+ total_packets_rx = 0,
+ total_packets_enqueued = 0,
+ total_packets_dequeued = 0,
+ total_packets_errors = 0;
/* Print out statistics on packets dropped */
static void
@@ -284,20 +294,21 @@ static int
l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
struct l2fwd_crypto_params *cparams)
{
- struct rte_mbuf **pkt_buffer;
+ struct rte_crypto_op **op_buffer;
unsigned ret;
- pkt_buffer = (struct rte_mbuf **)
- qconf->crypto_pkt_buf[cparams->dev_id].buffer;
+ op_buffer = (struct rte_crypto_op **)
+ qconf->op_buf[cparams->dev_id].buffer;
+
+ ret = rte_cryptodev_enqueue_burst(cparams->dev_id,
+ cparams->qp_id, op_buffer, (uint16_t) n);
- ret = rte_cryptodev_enqueue_burst(cparams->dev_id, cparams->qp_id,
- pkt_buffer, (uint16_t) n);
crypto_statistics[cparams->dev_id].enqueued += ret;
if (unlikely(ret < n)) {
crypto_statistics[cparams->dev_id].errors += (n - ret);
do {
- rte_pktmbuf_offload_free(pkt_buffer[ret]->offload_ops);
- rte_pktmbuf_free(pkt_buffer[ret]);
+ rte_pktmbuf_free(op_buffer[ret]->sym.m_src);
+ rte_crypto_op_free(op_buffer[ret]);
} while (++ret < n);
}
@@ -305,7 +316,8 @@ l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
}
static int
-l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
+l2fwd_crypto_enqueue(struct rte_crypto_op *op,
+ struct l2fwd_crypto_params *cparams)
{
unsigned lcore_id, len;
struct lcore_queue_conf *qconf;
@@ -313,23 +325,23 @@ l2fwd_crypto_enqueue(struct rte_mbuf *m, struct l2fwd_crypto_params *cparams)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->crypto_pkt_buf[cparams->dev_id].len;
- qconf->crypto_pkt_buf[cparams->dev_id].buffer[len] = m;
+ len = qconf->op_buf[cparams->dev_id].len;
+ qconf->op_buf[cparams->dev_id].buffer[len] = op;
len++;
- /* enough pkts to be sent */
+ /* enough ops to be sent */
if (len == MAX_PKT_BURST) {
l2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams);
len = 0;
}
- qconf->crypto_pkt_buf[cparams->dev_id].len = len;
+ qconf->op_buf[cparams->dev_id].len = len;
return 0;
}
static int
l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
- struct rte_mbuf_offload *ol,
+ struct rte_crypto_op *op,
struct l2fwd_crypto_params *cparams)
{
struct ether_hdr *eth_hdr;
@@ -367,43 +379,43 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
}
/* Set crypto operation data parameters */
- rte_crypto_op_attach_session(&ol->op.crypto, cparams->session);
+ rte_crypto_sym_op_attach_session(&op->sym, cparams->session);
/* Append space for digest to end of packet */
- ol->op.crypto.digest.data = (uint8_t *)rte_pktmbuf_append(m,
+ op->sym.auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
cparams->digest_length);
- ol->op.crypto.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ op->sym.auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
- ol->op.crypto.digest.length = cparams->digest_length;
+ op->sym.auth.digest.length = cparams->digest_length;
+
+ op->sym.auth.data.offset = ipdata_offset;
+ op->sym.auth.data.length = data_len;
- ol->op.crypto.iv.data = cparams->iv_key.data;
- ol->op.crypto.iv.phys_addr = cparams->iv_key.phys_addr;
- ol->op.crypto.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_cipher.offset = ipdata_offset;
- ol->op.crypto.data.to_cipher.length = data_len;
+ op->sym.cipher.iv.data = cparams->iv_key.data;
+ op->sym.cipher.iv.phys_addr = cparams->iv_key.phys_addr;
+ op->sym.cipher.iv.length = cparams->iv_key.length;
- ol->op.crypto.data.to_hash.offset = ipdata_offset;
- ol->op.crypto.data.to_hash.length = data_len;
+ op->sym.cipher.data.offset = ipdata_offset;
+ op->sym.cipher.data.length = data_len;
- rte_pktmbuf_offload_attach(m, ol);
+ op->sym.m_src = m;
- return l2fwd_crypto_enqueue(m, cparams);
+ return l2fwd_crypto_enqueue(op, cparams);
}
/* Send the burst of packets on an output interface */
static int
-l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port)
+l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
+ uint8_t port)
{
struct rte_mbuf **pkt_buffer;
unsigned ret;
- unsigned queueid = 0;
- pkt_buffer = (struct rte_mbuf **)qconf->tx_pkt_buf[port].buffer;
+ pkt_buffer = (struct rte_mbuf **)qconf->pkt_buf[port].buffer;
- ret = rte_eth_tx_burst(port, (uint16_t) queueid, pkt_buffer,
- (uint16_t)n);
+ ret = rte_eth_tx_burst(port, 0, pkt_buffer, (uint16_t)n);
port_statistics[port].tx += ret;
if (unlikely(ret < n)) {
port_statistics[port].dropped += (n - ret);
@@ -425,8 +437,8 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
- len = qconf->tx_pkt_buf[port].len;
- qconf->tx_pkt_buf[port].buffer[len] = m;
+ len = qconf->pkt_buf[port].len;
+ qconf->pkt_buf[port].buffer[len] = m;
len++;
/* enough pkts to be sent */
@@ -435,7 +447,7 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
len = 0;
}
- qconf->tx_pkt_buf[port].len = len;
+ qconf->pkt_buf[port].len = len;
return 0;
}
@@ -495,6 +507,8 @@ static void
l2fwd_main_loop(struct l2fwd_crypto_options *options)
{
struct rte_mbuf *m, *pkts_burst[MAX_PKT_BURST];
+ struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
+
unsigned lcore_id = rte_lcore_id();
uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
unsigned i, j, portid, nb_rx;
@@ -555,12 +569,12 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
if (unlikely(diff_tsc > drain_tsc)) {
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- if (qconf->tx_pkt_buf[portid].len == 0)
+ if (qconf->pkt_buf[portid].len == 0)
continue;
l2fwd_send_burst(&lcore_queue_conf[lcore_id],
- qconf->tx_pkt_buf[portid].len,
+ qconf->pkt_buf[portid].len,
(uint8_t) portid);
- qconf->tx_pkt_buf[portid].len = 0;
+ qconf->pkt_buf[portid].len = 0;
}
/* if timer is enabled */
@@ -589,7 +603,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
* Read packet from RX queues
*/
for (i = 0; i < qconf->nb_rx_ports; i++) {
- struct rte_mbuf_offload *ol;
+ struct rte_crypto_op *op;
portid = qconf->rx_port_list[i];
@@ -603,15 +617,14 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
/* Enqueue packets from Crypto device*/
for (j = 0; j < nb_rx; j++) {
m = pkts_burst[j];
- ol = rte_pktmbuf_offload_alloc(
- l2fwd_mbuf_ol_pool,
- RTE_PKTMBUF_OL_CRYPTO);
+ op = rte_crypto_op_alloc(l2fwd_crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
/*
- * If we can't allocate a offload, then drop
+ * If we can't allocate a crypto_op, then drop
* the rest of the burst and dequeue and
* process the packets to free offload structs
*/
- if (unlikely(ol == NULL)) {
+ if (unlikely(op == NULL)) {
for (; j < nb_rx; j++) {
rte_pktmbuf_free(pkts_burst[j]);
port_statistics[portid].dropped++;
@@ -620,24 +633,31 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
}
rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- rte_prefetch0((void *)ol);
+ rte_prefetch0((void *)op);
- l2fwd_simple_crypto_enqueue(m, ol, cparams);
+ l2fwd_simple_crypto_enqueue(m, op, cparams);
}
/* Dequeue packets from Crypto device */
- nb_rx = rte_cryptodev_dequeue_burst(
- cparams->dev_id, cparams->qp_id,
- pkts_burst, MAX_PKT_BURST);
- crypto_statistics[cparams->dev_id].dequeued += nb_rx;
+ do {
+ nb_rx = rte_cryptodev_dequeue_burst(
+ cparams->dev_id, cparams->qp_id,
+ ops_burst, MAX_PKT_BURST);
- /* Forward crypto'd packets */
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- rte_pktmbuf_offload_free(m->offload_ops);
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- l2fwd_simple_forward(m, portid);
- }
+ crypto_statistics[cparams->dev_id].dequeued +=
+ nb_rx;
+
+ /* Forward crypto'd packets */
+ for (j = 0; j < nb_rx; j++) {
+ m = ops_burst[j]->sym.m_src;
+
+ rte_crypto_op_free(ops_burst[j]);
+ rte_prefetch0(rte_pktmbuf_mtod(m,
+ void *));
+
+ l2fwd_simple_forward(m, portid);
+ }
+ } while (nb_rx == MAX_PKT_BURST);
}
}
}
@@ -750,26 +770,17 @@ parse_key(struct rte_crypto_key *key __rte_unused,
static int
parse_auth_algo(enum rte_crypto_auth_algorithm *algo, char *optarg)
{
- if (strcmp("SHA1", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA1;
- return 0;
- } else if (strcmp("SHA1_HMAC", optarg) == 0) {
+ if (strcmp("SHA1_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
return 0;
- } else if (strcmp("SHA224", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA224;
- return 0;
} else if (strcmp("SHA224_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
return 0;
- } else if (strcmp("SHA256", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA256;
- return 0;
} else if (strcmp("SHA256_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
return 0;
- } else if (strcmp("SHA512", optarg) == 0) {
- *algo = RTE_CRYPTO_AUTH_SHA256;
+ } else if (strcmp("SHA384_HMAC", optarg) == 0) {
+ *algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
return 0;
} else if (strcmp("SHA512_HMAC", optarg) == 0) {
*algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
@@ -800,7 +811,7 @@ static int
l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
struct option *lgopts, int option_index)
{
- if (strcmp(lgopts[option_index].name, "cdev_type") == 0)
+ if (strcmp(lgopts[option_index].name, "cdev") == 0)
return parse_cryptodev_type(&options->cdev_type, optarg);
else if (strcmp(lgopts[option_index].name, "chain") == 0)
@@ -825,12 +836,10 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
/* Authentication options */
else if (strcmp(lgopts[option_index].name, "auth_algo") == 0)
- return parse_auth_algo(&options->cipher_xform.auth.algo,
- optarg);
+ return parse_auth_algo(&options->auth_xform.auth.algo, optarg);
else if (strcmp(lgopts[option_index].name, "auth_op") == 0)
- return parse_auth_op(&options->cipher_xform.auth.op,
- optarg);
+ return parse_auth_op(&options->auth_xform.auth.op, optarg);
else if (strcmp(lgopts[option_index].name, "auth_key") == 0)
return parse_key(&options->auth_xform.auth.key,
@@ -896,16 +905,16 @@ l2fwd_crypto_parse_timer_period(struct l2fwd_crypto_options *options,
const char *q_arg)
{
char *end = NULL;
- long int n;
+ unsigned long n;
/* parse number string */
- n = strtol(q_arg, &end, 10);
+ n = (unsigned)strtol(q_arg, &end, 10);
if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
n = 0;
if (n >= MAX_TIMER_PERIOD) {
- printf("Warning refresh period specified %ld is greater than "
- "max value %d! using max value",
+ printf("Warning refresh period specified %lu is greater than "
+ "max value %lu! using max value",
n, MAX_TIMER_PERIOD);
n = MAX_TIMER_PERIOD;
}
@@ -925,9 +934,9 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
options->nb_ports_per_lcore = 1;
options->refresh_period = 10000;
options->single_lcore = 0;
+ options->sessionless = 0;
options->cdev_type = RTE_CRYPTODEV_AESNI_MB_PMD;
- options->sessionless = 0;
options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
/* Cipher Data */
@@ -984,39 +993,6 @@ l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
printf("sessionless crypto: %s\n",
options->sessionless ? "enabled" : "disabled");
-#if 0
- options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
-
- /* Cipher Data */
- options->cipher_xform.type = RTE_CRYPTO_XFORM_CIPHER;
- options->cipher_xform.next = NULL;
-
- options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- options->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
-
- generate_random_key(options->ckey_data, sizeof(options->ckey_data));
-
- options->cipher_xform.cipher.key.data = options->ckey_data;
- options->cipher_xform.cipher.key.phys_addr = 0;
- options->cipher_xform.cipher.key.length = 16;
-
-
- /* Authentication Data */
- options->auth_xform.type = RTE_CRYPTO_XFORM_AUTH;
- options->auth_xform.next = NULL;
-
- options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- options->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
-
- options->auth_xform.auth.add_auth_data_length = 0;
- options->auth_xform.auth.digest_length = 20;
-
- generate_random_key(options->akey_data, sizeof(options->akey_data));
-
- options->auth_xform.auth.key.data = options->akey_data;
- options->auth_xform.auth.key.phys_addr = 0;
- options->auth_xform.auth.key.length = 20;
-#endif
}
/* Parse the argument given in the command line of the application */
@@ -1030,7 +1006,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
static struct option lgopts[] = {
{ "sessionless", no_argument, 0, 0 },
- { "cdev_type", required_argument, 0, 0 },
+ { "cdev", required_argument, 0, 0 },
{ "chain", required_argument, 0, 0 },
{ "cipher_algo", required_argument, 0, 0 },
@@ -1044,6 +1020,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
{ "iv", required_argument, 0, 0 },
{ "sessionless", no_argument, 0, 0 },
+
{ NULL, 0, 0, 0 }
};
@@ -1361,15 +1338,17 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Invalid L2FWD-CRYPTO arguments\n");
/* create the mbuf pool */
- l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 128,
- 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 512,
+ sizeof(struct rte_crypto_op),
+ RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
if (l2fwd_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
/* create crypto op pool */
- l2fwd_mbuf_ol_pool = rte_pktmbuf_offload_pool_create(
- "mbuf_offload_pool", NB_MBUF, 128, 0, rte_socket_id());
- if (l2fwd_mbuf_ol_pool == NULL)
+ l2fwd_crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, 0,
+ rte_socket_id());
+ if (l2fwd_crypto_op_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n");
/* Enable Ethernet ports */
@@ -36,7 +36,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_EAL) += librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_RING) += librte_ring
DIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += librte_mempool
DIRS-$(CONFIG_RTE_LIBRTE_MBUF) += librte_mbuf
-DIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += librte_mbuf_offload
DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += librte_timer
DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile
DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline
@@ -347,12 +347,15 @@ enum rte_crypto_op_sess_type {
enum rte_crypto_op_status {
RTE_CRYPTO_OP_STATUS_SUCCESS,
/**< Operation completed successfully */
- RTE_CRYPTO_OP_STATUS_NO_SUBMITTED,
+ RTE_CRYPTO_OP_STATUS_NOT_SUBMITTED,
/**< Operation not yet submitted to a cryptodev */
RTE_CRYPTO_OP_STATUS_ENQUEUED,
/**< Operation is enqueued on device */
RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
/**< Authentication verification failed */
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION,
+ /**< Operation failed due to invalid session args or if in session-less
+ * mode fail to created session */
RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
/**< Operation failed due to invalid arguments in request */
RTE_CRYPTO_OP_STATUS_ERROR,
@@ -360,20 +363,16 @@ enum rte_crypto_op_status {
};
/**
- * Cryptographic Operation Data.
+ * Symmetric Cryptographic Operation.
*
- * This structure contains data relating to performing cryptographic processing
- * on a data buffer. This request is used with rte_crypto_enqueue_burst() call
- * for performing cipher, hash, or a combined hash and cipher operations.
+ * This structure contains data relating to performing symmetric cryptographic
+ * processing on a referenced mbuf data buffer.
*/
-struct rte_crypto_op {
+struct rte_crypto_sym_op {
enum rte_crypto_op_sess_type type;
- enum rte_crypto_op_status status;
- struct {
- struct rte_mbuf *m; /**< Destination mbuf */
- uint8_t offset; /**< Data offset */
- } dst;
+ struct rte_mbuf *m_src; /**< source mbuf */
+ struct rte_mbuf *m_dst; /**< destination mbuf */
union {
struct rte_cryptodev_session *session;
@@ -384,7 +383,7 @@ struct rte_crypto_op {
struct {
struct {
- uint32_t offset;
+ uint16_t offset;
/**< Starting point for cipher processing, specified
* as number of bytes from start of data in the source
* buffer. The result of the cipher operation will be
@@ -392,7 +391,7 @@ struct rte_crypto_op {
* this location.
*/
- uint32_t length;
+ uint16_t length;
/**< The message length, in bytes, of the source buffer
* on which the cryptographic operation will be
* computed. This must be a multiple of the block size
@@ -411,17 +410,68 @@ struct rte_crypto_op {
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
* field should be set to 0.
*/
- } to_cipher; /**< Data offsets and length for ciphering */
+ } data; /**< Data offsets and length for ciphering */
+
+ struct {
+ uint8_t *data;
+ /**< Initialisation Vector or Counter.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * Initialisation Vector (IV) value.
+ *
+ * - For block ciphers in CTR mode, this is the counter.
+ *
+ * - For GCM mode, this is either the IV (if the length
+ * is 96 bits) or J0 (for other sizes), where J0 is as
+ * defined by NIST SP800-38D. Regardless of the IV
+ * length, a full 16 bytes needs to be allocated.
+ *
+ * - For CCM mode, the first byte is reserved, and the
+ * nonce should be written starting at &iv[1] (to allow
+ * space for the implementation to write in the flags
+ * in the first byte). Note that a full 16 bytes should
+ * be allocated, even though the length field will
+ * have a value less than this.
+ *
+ * - For AES-XTS, this is the 128bit tweak, i, from
+ * IEEE Std 1619-2007.
+ *
+ * For optimum performance, the data pointed to SHOULD
+ * be 8-byte aligned.
+ */
+ phys_addr_t phys_addr;
+ uint16_t length;
+ /**< Length of valid IV data.
+ *
+ * - For block ciphers in CBC or F8 mode, or for Kasumi
+ * in F8 mode, or for SNOW3G in UEA2 mode, this is the
+ * length of the IV (which must be the same as the
+ * block length of the cipher).
+ *
+ * - For block ciphers in CTR mode, this is the length
+ * of the counter (which must be the same as the block
+ * length of the cipher).
+ *
+ * - For GCM mode, this is either 12 (for 96-bit IVs)
+ * or 16, in which case data points to J0.
+ *
+ * - For CCM mode, this is the length of the nonce,
+ * which can be in the range 7 to 13 inclusive.
+ */
+ } iv; /**< Initialisation vector parameters */
+ } cipher;
+ struct {
struct {
- uint32_t offset;
+ uint16_t offset;
/**< Starting point for hash processing, specified as
* number of bytes from start of packet in source
* buffer.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
+ * ignored. The field @ref aad field
* should be set instead.
*
* @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
@@ -429,180 +479,398 @@ struct rte_crypto_op {
* of the AAD data in the source buffer.
*/
- uint32_t length;
+ uint16_t length;
/**< The message length, in bytes, of the source
* buffer that the hash will be computed on.
*
* @note
* For CCM and GCM modes of operation, this field is
- * ignored. The field @ref additional_auth field
- * should be set instead.
+ * ignored. The field @ref aad field should be set
+ * instead.
*
* @note
* For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
* of operation, this field specifies the length of
* the AAD data in the source buffer.
*/
- } to_hash; /**< Data offsets and length for authentication */
- } data; /**< Details of data to be operated on */
+ } data; /**< Data offsets and length for authentication */
- struct {
- uint8_t *data;
- /**< Initialisation Vector or Counter.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the Initialisation
- * Vector (IV) value.
- *
- * - For block ciphers in CTR mode, this is the counter.
- *
- * - For GCM mode, this is either the IV (if the length is 96
- * bits) or J0 (for other sizes), where J0 is as defined by
- * NIST SP800-38D. Regardless of the IV length, a full 16 bytes
- * needs to be allocated.
- *
- * - For CCM mode, the first byte is reserved, and the nonce
- * should be written starting at &iv[1] (to allow space for the
- * implementation to write in the flags in the first byte).
- * Note that a full 16 bytes should be allocated, even though
- * the length field will have a value less than this.
- *
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std
- * 1619-2007.
- *
- * For optimum performance, the data pointed to SHOULD be
- * 8-byte aligned.
- */
- phys_addr_t phys_addr;
- size_t length;
- /**< Length of valid IV data.
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8
- * mode, or for SNOW3G in UEA2 mode, this is the length of the
- * IV (which must be the same as the block length of the
- * cipher).
- *
- * - For block ciphers in CTR mode, this is the length of the
- * counter (which must be the same as the block length of the
- * cipher).
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in
- * which case data points to J0.
- *
- * - For CCM mode, this is the length of the nonce, which can
- * be in the range 7 to 13 inclusive.
- */
- } iv; /**< Initialisation vector parameters */
-
- struct {
- uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result should be
- * inserted (in the case of digest generation) or where the
- * purported digest exists (in the case of digest
- * verification).
- *
- * At session creation time, the client specified the digest
- * result length with the digest_length member of the @ref
- * rte_crypto_auth_xform structure. For physical crypto
- * devices the caller must allocate at least digest_length of
- * physically contiguous memory at this location.
- *
- * For digest generation, the digest result will overwrite
- * any data at this location.
- *
- * @note
- * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
- * "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is understood
- * to be in the destination buffer for digest generation, and
- * in the source buffer for digest verification. The location
- * of the digest result in this case is immediately following
- * the region over which the digest is computed.
- */
- phys_addr_t phys_addr; /**< Physical address of digest */
- uint32_t length; /**< Length of digest */
- } digest; /**< Digest parameters */
+ struct {
+ uint8_t *data;
+ /**< If this member of this structure is set this is a
+ * pointer to the location where the digest result
+ * should be inserted (in the case of digest generation)
+ * or where the purported digest exists (in the case of
+ * digest verification).
+ *
+ * At session creation time, the client specified the
+ * digest result length with the digest_length member
+ * of the @ref rte_crypto_auth_xform structure. For
+ * physical crypto devices the caller must allocate at
+ * least digest_length of physically contiguous memory
+ * at this location.
+ *
+ * For digest generation, the digest result will
+ * overwrite any data at this location.
+ *
+ * @note
+ * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
+ * "digest result" read "authentication tag T".
+ *
+ * If this member is not set the digest result is
+ * understood to be in the destination buffer for
+ * digest generation, and in the source buffer for
+ * digest verification. The location of the digest
+ * result in this case is immediately following the
+ * region over which the digest is computed.
+ */
+ phys_addr_t phys_addr;
+ /**< Physical address of digest */
+ uint16_t length;
+ /**< Length of digest */
+ } digest; /**< Digest parameters */
- struct {
- uint8_t *data;
- /**< Pointer to Additional Authenticated Data (AAD) needed for
- * authenticated cipher mechanisms (CCM and GCM), and to the IV
- * for SNOW3G authentication
- * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
- * authentication mechanisms this pointer is ignored.
- *
- * The length of the data pointed to by this field is set up
- * for the session in the @ref rte_crypto_auth_xform structure
- * as part of the @ref rte_cryptodev_session_create function
- * call. This length must not exceed 240 bytes.
- *
- * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), the
- * caller should setup this field as follows:
- *
- * - the nonce should be written starting at an offset of one
- * byte into the array, leaving room for the implementation
- * to write in the flags to the first byte.
- *
- * - the additional authentication data itself should be
- * written starting at an offset of 18 bytes into the array,
- * leaving room for the length encoding in the first two
- * bytes of the second block.
- *
- * - the array should be big enough to hold the above fields,
- * plus any padding to round this up to the nearest multiple
- * of the block size (16 bytes). Padding will be added by
- * the implementation.
- *
- * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
- * caller should setup this field as follows:
- *
- * - the AAD is written in starting at byte 0
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- *
- * @note
- * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
- * operation, this field is not used and should be set to 0.
- * Instead the AAD data should be placed in the source buffer.
- */
- phys_addr_t phys_addr; /**< physical address */
- uint32_t length; /**< Length of digest */
- } additional_auth;
- /**< Additional authentication parameters */
-
- struct rte_mempool *pool;
- /**< mempool used to allocate crypto op */
+ struct {
+ uint8_t *data;
+ /**< Pointer to Additional Authenticated Data (AAD)
+ * needed for authenticated cipher mechanisms (CCM and
+ * GCM), and to the IV for SNOW3G authentication
+ * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other
+ * authentication mechanisms this pointer is ignored.
+ *
+ * The length of the data pointed to by this field is
+ * set up for the session in the @ref
+ * rte_crypto_auth_xform structure as part of the @ref
+ * rte_cryptodev_session_create function call. This
+ * length must not exceed 240 bytes.
+ *
+ * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM),
+ * the caller should setup this field as follows:
+ *
+ * - the nonce should be written starting at an offset
+ * of one byte into the array, leaving room for the
+ * implementation to write in the flags to the first
+ * byte.
+ *
+ * - the additional authentication data itself should
+ * be written starting at an offset of 18 bytes into
+ * the array, leaving room for the length encoding in
+ * the first two bytes of the second block.
+ *
+ * - the array should be big enough to hold the above
+ * fields, plus any padding to round this up to the
+ * nearest multiple of the block size (16 bytes).
+ * Padding will be added by the implementation.
+ *
+ * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the
+ * caller should setup this field as follows:
+ *
+ * - the AAD is written in starting at byte 0
+ * - the array must be big enough to hold the AAD, plus
+ * any space to round this up to the nearest multiple
+ * of the block size (16 bytes).
+ *
+ * @note
+ * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
+ * operation, this field is not used and should be set
+ * to 0. Instead the AAD data should be placed in the
+ * source buffer.
+ */
+ phys_addr_t phys_addr; /**< physical address */
+ uint16_t length; /**< Length of digest */
+ } aad;
+ /**< Additional authentication parameters */
+ } auth;
void *user_data;
/**< opaque pointer for user data */
};
+/** Crypto operation types */
+enum rte_crypto_op_type {
+ RTE_CRYPTO_OP_TYPE_UNDEFINED,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, /**< Symmetric operation */
+};
/**
- * Reset the fields of a crypto operation to their default values.
+ * Cryptographic Operation.
+ *
+ * This structure contains data relating to performing cryptographic
+ * operations. This operation structure is used to contain any operation which
+ * is supported by the cryptodev API, PMDs should check the type parameter to
+ * verify that the operation is a support function of the device. Crypto
+ * operations are enqueued and dequeued in crypto PMDs using the
+ * rte_cryptodev_enqueue_burst() / rte_cryptodev_dequeue_burst() .
+ */
+struct rte_crypto_op {
+ enum rte_crypto_op_type type;
+ /**< operation type */
+
+ enum rte_crypto_op_status status;
+ /**< operation status - this is reset to
+ * RTE_CRYPTO_OP_STATUS_NOT_SUBMITTED on allocation from mempool and
+ * should be set to RTE_CRYPTO_OP_STATUS_SUCCESS after crypto operation
+ * is successfully processed by crypto PMD */
+
+ struct rte_mempool *mempool;
+ /**< mempool crypto op allcoated from */
+
+ union {
+ struct rte_crypto_sym_op sym;
+ /**< Symmetric operation parameters */
+ }; /** operation specific parameters */
+};
+
+
+/**
+ * Reset the fields of a symmetric operation to their default values.
*
* @param op The crypto operation to be reset.
*/
static inline void
-__rte_crypto_op_reset(struct rte_crypto_op *op)
+__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op)
{
op->type = RTE_CRYPTO_OP_SESSIONLESS;
- op->dst.m = NULL;
- op->dst.offset = 0;
+
+ op->m_src = NULL;
+ op->m_dst = NULL;
+}
+
+/**
+ * Reset the fields of a crypto operation to their default values.
+ *
+ * @param op The crypto operation to be reset.
+ * @param type The crypto operation type.
+ */
+static inline void
+__rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type type)
+{
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_SUBMITTED;
+
+ if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ op->type = type;
+ __rte_crypto_sym_op_reset(&op->sym);
+ }
}
-/** Attach a session to a crypto operation */
+/**
+ * Attach a session to a symmetric crypto operation
+ *
+ * @param op crypto operation
+ * @param sess cryptodev session
+ */
static inline void
-rte_crypto_op_attach_session(struct rte_crypto_op *op,
+rte_crypto_sym_op_attach_session(struct rte_crypto_sym_op *op,
struct rte_cryptodev_session *sess)
{
op->session = sess;
op->type = RTE_CRYPTO_OP_WITH_SESSION;
}
+/**
+ * Private data structure belonging to a crypto symmetric operation pool.*/
+struct rte_crypto_op_pool_private {
+ enum rte_crypto_op_type type;
+ uint16_t priv_size;
+ /**< Size of private area in each crypto operation. */
+};
+
+
+/**
+ * Returns the size of private data allocated with each rte_crypto_op object by
+ * the mempool
+ *
+ * @param mempool rte_crypto_op mempool
+ *
+ * @return private data size
+ */
+static inline uint16_t
+__rte_crypto_op_get_priv_data_size(struct rte_mempool *mempool)
+{
+ struct rte_crypto_op_pool_private *priv =
+ rte_mempool_get_priv(mempool);
+
+ return priv->priv_size;
+}
+
+
+/**
+ * Creates a crypto operation pool
+ *
+ * @param name pool name
+ * @param type crypto operation type, use
+ * RTE_CRYPTO_OP_TYPE_UNDEFINED for a pool which
+ * supports all operation types
+ * @param size number of elements in pool
+ * @param cache_size Number of elements for core cache
+ * @param priv_size Size of private data to allocate with each
+ * operation
+ * @param socket_id Socket to allocate memory on
+ *
+ * @return
+ * - On success pointer to mempool
+ * - On failure NULL
+ */
+extern struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned size, unsigned cache_size, uint16_t priv_size,
+ int socket_id);
+
+/**
+ * Allocate raw element from mempool an return as crypto operation
+ *
+ * @param mempool crypto operation mempool.
+ * @param type crypto operation type.
+ *
+ * @returns
+ * - On success a rte_crypto_op structure
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_op *
+__rte_crypto_op_raw_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type)
+{
+ struct rte_crypto_op_pool_private *priv =
+ rte_mempool_get_priv(mempool);
+
+ if (unlikely(priv->type != type))
+ return NULL;
+
+ void *buf = NULL;
+
+ if (rte_mempool_get(mempool, &buf) < 0)
+ return NULL;
+
+ return (struct rte_crypto_op *)buf;
+}
+
+/**
+ * Allocate a crypto operation from a mempool with default parameters set
+ *
+ * @param mempool crypto operation mempool
+ * @param type operation type to allocate
+ *
+ * @returns
+ * - On success returns a valid rte_crypto_op structure
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_op *
+rte_crypto_op_alloc(struct rte_mempool *mempool, enum rte_crypto_op_type type)
+{
+ struct rte_crypto_op *op = __rte_crypto_op_raw_alloc(mempool, type);
+
+ if (unlikely(op != NULL))
+ __rte_crypto_op_reset(op, type);
+
+ return op;
+}
+
+/**
+ * Allocate a symmetric crypto operation in the private data of an mbuf.
+ *
+ * @param m mbuf in which to allocate crypto operation in.
+ *
+ * @returns
+ * - On success returns a pointer to the crypto operation, which
+ * m_src parameter will be set to the mbuf in which the operation is allocated
+ * from.
+ * - On failure returns NULL.
+ */
+static inline struct rte_crypto_op *
+rte_crypto_sym_op_alloc_priv(struct rte_mbuf *m)
+{
+ if (unlikely(m == NULL))
+ return NULL;
+
+ /* check that the mbuf's private data size is sufficient to contain a
+ * crypto operation */
+ if (unlikely(m->priv_size < sizeof(struct rte_crypto_op)))
+ return NULL;
+
+ /* private data starts immediately after the mbuf header in the mbuf. */
+ struct rte_crypto_op *op = (struct rte_crypto_op *)(m + 1);
+
+ __rte_crypto_op_reset(op, RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+
+ op->mempool = NULL;
+ op->sym.m_src = m;
+
+ return op;
+}
+
+
+/**
+ * Returns a pointer to the private data of a crypto operation if
+ * that operation has enough capacity for requested size.
+ *
+ * @param op crypto operation.
+ * @param size size of space requested in private data.
+ *
+ * @returns
+ * - if sufficient space available returns pointer to start of private data
+ * - if insufficient space returns NULL
+ */
+static inline void *
+__rte_crypto_op_get_priv_data(struct rte_crypto_op *op, uint32_t size)
+{
+ uint32_t priv_size;
+
+ if (likely(op->mempool != NULL)) {
+ priv_size = __rte_crypto_op_get_priv_data_size(op->mempool);
+
+ if (likely(priv_size >= size))
+ return (void *)(op + 1);
+ }
+
+ return NULL;
+}
+/**
+ * Allocate space for crypto xforms in the private data space of the
+ * symmetric crypto operation. This also defaults the crypto xform type and
+ * configures the chaining of the xforms in the crypto operation
+ *
+ * @return
+ * - On success returns pointer to first crypto xform in crypto operations chain
+ * - On failure returns NULL
+ */
+static inline struct rte_crypto_xform *
+rte_crypto_sym_op_xforms_alloc(struct rte_crypto_op *op, uint8_t nb_xforms)
+{
+ struct rte_crypto_xform *xform;
+ void *priv_data;
+ uint32_t size;
+
+ size = sizeof(struct rte_crypto_xform) * nb_xforms;
+
+ priv_data = __rte_crypto_op_get_priv_data(op, size);
+ if (priv_data == NULL)
+ return NULL;
+
+ op->sym.xform = xform = (struct rte_crypto_xform *)priv_data;
+
+ do {
+ xform->type = RTE_CRYPTO_XFORM_NOT_SPECIFIED;
+ xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
+ } while (xform);
+
+ return op->sym.xform;
+}
+
+/**
+ * free crypto operation structure
+ * If operation has been allocate from a rte_mempool, then the operation will
+ * be returned to the mempool.
+ *
+ * @param op symmetric crypto operation
+ */
+static inline void
+rte_crypto_op_free(struct rte_crypto_op *op)
+{
+ if (op != NULL && op->mempool != NULL)
+ rte_mempool_put(op->mempool, op);
+}
+
+
#ifdef __cplusplus
}
#endif
@@ -71,6 +71,15 @@
#include "rte_cryptodev.h"
#include "rte_cryptodev_pmd.h"
+
+const char *cryptodev_pmd_names[] = {
+ [RTE_CRYPTODEV_NULL_PMD] = CRYPTODEV_NAME_NULL_PMD,
+ [RTE_CRYPTODEV_AESNI_MB_PMD] = CRYPTODEV_NAME_AESNI_MB_PMD,
+ [RTE_CRYPTODEV_QAT_PMD] = CRYPTODEV_NAME_QAT_PMD
+};
+
+const char **rte_cyptodev_names = &cryptodev_pmd_names[0];
+
struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
struct rte_cryptodev *rte_cryptodevs = &rte_crypto_devices[0];
@@ -1093,3 +1102,71 @@ rte_cryptodev_session_free(uint8_t dev_id, struct rte_cryptodev_session *sess)
return NULL;
}
+
+/** Initialize rte_crypto_op mempool element */
+static void
+rte_crypto_op_init(struct rte_mempool *mempool,
+ void *opaque_arg,
+ void *_op_data,
+ __rte_unused unsigned i)
+{
+ struct rte_crypto_op *op = _op_data;
+ enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
+
+ memset(_op_data, 0, mempool->elt_size);
+
+ __rte_crypto_op_reset(op, type);
+
+ op->mempool = mempool;
+}
+
+
+struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned size, unsigned cache_size, uint16_t priv_size,
+ int socket_id)
+{
+ struct rte_crypto_op_pool_private *priv;
+ unsigned elt_size = sizeof(struct rte_crypto_op) + priv_size;
+
+
+ /* lookup mempool in case already allocated */
+ struct rte_mempool *mp = rte_mempool_lookup(name);
+
+ if (mp != NULL) {
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ if (priv->priv_size < priv_size || mp->elt_size != elt_size ||
+ mp->cache_size < cache_size ||
+ mp->size < size) {
+ mp = NULL;
+ return NULL;
+ }
+ return mp;
+ }
+
+ mp = rte_mempool_create(
+ name,
+ size,
+ elt_size,
+ cache_size,
+ sizeof(struct rte_crypto_op_pool_private),
+ NULL,
+ NULL,
+ rte_crypto_op_init,
+ &type,
+ socket_id,
+ 0);
+
+ if (mp == NULL)
+ return NULL;
+
+ priv = (struct rte_crypto_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ priv->priv_size = priv_size;
+ priv->type = type;
+
+ return mp;
+}
@@ -67,6 +67,9 @@ enum rte_cryptodev_type {
RTE_CRYPTODEV_QAT_PMD, /**< QAT PMD */
};
+
+extern const char **rte_cyptodev_names;
+
/* Logging Macros */
#define CDEV_LOG_ERR(fmt, args...) \
@@ -440,12 +443,12 @@ rte_cryptodev_callback_unregister(uint8_t dev_id,
rte_cryptodev_cb_fn cb_fn, void *cb_arg);
-typedef uint16_t (*dequeue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*dequeue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Dequeue processed packets from queue pair of a device. */
-typedef uint16_t (*enqueue_pkt_burst_t)(void *qp, struct rte_mbuf **pkts,
- uint16_t nb_pkts);
+typedef uint16_t (*enqueue_pkt_burst_t)(void *qp,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
/**< Enqueue packets for processing on queue pair of a device. */
@@ -516,68 +519,72 @@ struct rte_cryptodev_data {
} __rte_cache_aligned;
extern struct rte_cryptodev *rte_cryptodevs;
+
/**
*
- * Dequeue a burst of processed packets from a queue of the crypto device.
- * The dequeued packets are stored in *rte_mbuf* structures whose pointers are
- * supplied in the *pkts* array.
+ * Dequeue a burst of processed symmetric operations from a queue on the crypto
+ * device. The dequeued operation are stored in *rte_crypto_op* structures
+ * whose pointers are supplied in the *ops* array.
*
- * The rte_crypto_dequeue_burst() function returns the number of packets
- * actually dequeued, which is the number of *rte_mbuf* data structures
- * effectively supplied into the *pkts* array.
+ * The rte_cryptodev_dequeue_burst() function returns the number of ops
+ * actually dequeued, which is the number of *rte_crypto_op* data structures
+ * effectively supplied into the *ops* array.
*
- * A return value equal to *nb_pkts* indicates that the queue contained
- * at least *rx_pkts* packets, and this is likely to signify that other
- * received packets remain in the input queue. Applications implementing
- * a "retrieve as much received packets as possible" policy can check this
- * specific case and keep invoking the rte_crypto_dequeue_burst() function
- * until a value less than *nb_pkts* is returned.
+ * A return value equal to *nb_ops* indicates that the queue contained
+ * at least *nb_ops* operations, and this is likely to signify that other
+ * processed operations remain in the devices output queue. Applications
+ * implementing a "retrieve as many processed operations as possible" policy
+ * can check this specific case and keep invoking the
+ * rte_cryptodev_dequeue_burst() function until a value less than
+ * *nb_ops* is returned.
*
- * The rte_crypto_dequeue_burst() function does not provide any error
+ * The rte_cryptodev_dequeue_burst() function does not provide any error
* notification to avoid the corresponding overhead.
*
- * @param dev_id The identifier of the device.
+ * @param dev_id The symmetric crypto device identifier
* @param qp_id The index of the queue pair from which to
* retrieve processed packets. The value must be
* in the range [0, nb_queue_pair - 1] previously
* supplied to rte_cryptodev_configure().
- * @param pkts The address of an array of pointers to
- * *rte_mbuf* structures that must be large enough
- * to store *nb_pkts* pointers in it.
- * @param nb_pkts The maximum number of packets to dequeue.
+ * @param ops The address of an array of pointers to
+ * *rte_crypto_op* structures that must be
+ * large enough to store *nb_ops* pointers in it.
+ * @param nb_ops The maximum number of operations to dequeue.
*
* @return
- * - The number of packets actually dequeued, which is the number
- * of pointers to *rte_mbuf* structures effectively supplied to the
- * *pkts* array.
+ * - The number of operations actually dequeued, which is the number
+ * of pointers to *rte_crypto_op* structures effectively supplied to the
+ * *ops* array.
*/
static inline uint16_t
rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
- nb_pkts = (*dev->dequeue_burst)
- (dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ nb_ops = (*dev->dequeue_burst)
+ (dev->data->queue_pairs[qp_id], ops, nb_ops);
- return nb_pkts;
+ return nb_ops;
}
/**
- * Enqueue a burst of packets for processing on a crypto device.
+ * Enqueue a burst of symmetric operations for processing on a crypto device.
*
- * The rte_crypto_enqueue_burst() function is invoked to place packets
- * on the queue *queue_id* of the device designated by its *dev_id*.
+ * The rte_cryptodev_enqueue_burst() function is invoked to place
+ * symmetric crypto operations on the queue *qp_id* of the device designated by
+ * its *dev_id*.
*
- * The *nb_pkts* parameter is the number of packets to process which are
- * supplied in the *pkts* array of *rte_mbuf* structures.
+ * The *nb_ops* parameter is the number of operations to process which are
+ * supplied in the *ops* array of *rte_crypto_op* structures.
*
- * The rte_crypto_enqueue_burst() function returns the number of packets it
- * actually sent. A return value equal to *nb_pkts* means that all packets
- * have been sent.
+ * The rte_cryptodev_enqueue_burst() function returns the number of
+ * packets it actually enqueued for processing. A return value equal to
+ * *nb_ops* means that all packets have been enqueued.
*
- * Each mbuf in the *pkts* array must have a valid *rte_mbuf_offload* structure
- * attached which contains a valid crypto operation.
+ * Each operation in the *ops* array must have a valid *rte_mbuf* structure
+ * attached, via m_src parameter, which contains the source data which the
+ * crypto operation is to be performed on
*
* @param dev_id The identifier of the device.
* @param qp_id The index of the queue pair which packets are
@@ -585,25 +592,25 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
* must be in the range [0, nb_queue_pairs - 1]
* previously supplied to
* *rte_cryptodev_configure*.
- * @param pkts The address of an array of *nb_pkts* pointers
- * to *rte_mbuf* structures which contain the
- * output packets.
- * @param nb_pkts The number of packets to transmit.
+ * @param ops The address of an array of *nb_ops* pointers
+ * to *rte_crypto_op* structures which contain
+ * the symmetric crypto operations to be processed.
+ * @param nb_ops The number of operations to process.
*
* @return
- * The number of packets actually enqueued on the crypto device. The return
- * value can be less than the value of the *nb_pkts* parameter when the
- * crypto devices queue is full or has been filled up.
- * The number of packets is 0 if the device hasn't been started.
+ * The number of operations actually enqueued on the crypto device. The return
+ * value can be less than the value of the *nb_ops* parameter when the
+ * crypto devices queue is full or has been filled up or if valid parameters
+ * are specified in a *rte_crypto_op*.
*/
static inline uint16_t
rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
- struct rte_mbuf **pkts, uint16_t nb_pkts)
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_cryptodev *dev = &rte_cryptodevs[dev_id];
return (*dev->enqueue_burst)(
- dev->data->queue_pairs[qp_id], pkts, nb_pkts);
+ dev->data->queue_pairs[qp_id], ops, nb_ops);
}
@@ -27,6 +27,7 @@ DPDK_2.2 {
rte_cryptodev_queue_pair_setup;
rte_cryptodev_queue_pair_start;
rte_cryptodev_queue_pair_stop;
+ rte_crypto_op_pool_create;
local: *;
};
\ No newline at end of file
@@ -728,9 +728,6 @@ typedef uint8_t MARKER8[0]; /**< generic marker with 1B alignment */
typedef uint64_t MARKER64[0]; /**< marker that allows us to overwrite 8 bytes
* with a single assignment */
-/** Opaque rte_mbuf_offload structure declarations */
-struct rte_mbuf_offload;
-
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
@@ -847,9 +844,6 @@ struct rte_mbuf {
/** Timesync flags for use with IEEE1588. */
uint16_t timesync;
-
- /* Chain of off-load operations to perform on mbuf */
- struct rte_mbuf_offload *offload_ops;
} __rte_cache_aligned;
static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
deleted file mode 100644
@@ -1,52 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2015 Intel Corporation. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_mbuf_offload.a
-
-CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-
-EXPORT_MAP := rte_mbuf_offload_version.map
-
-LIBABIVER := 1
-
-# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) := rte_mbuf_offload.c
-
-# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD)-include := rte_mbuf_offload.h
-
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF_OFFLOAD) += lib/librte_cryptodev
-
-include $(RTE_SDK)/mk/rte.lib.mk
deleted file mode 100644
@@ -1,100 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <rte_common.h>
-
-#include "rte_mbuf_offload.h"
-
-/** Initialize rte_mbuf_offload structure */
-static void
-rte_pktmbuf_offload_init(struct rte_mempool *mp,
- __rte_unused void *opaque_arg,
- void *_op_data,
- __rte_unused unsigned i)
-{
- struct rte_mbuf_offload *ol = _op_data;
-
- memset(_op_data, 0, mp->elt_size);
-
- ol->type = RTE_PKTMBUF_OL_NOT_SPECIFIED;
- ol->mp = mp;
-}
-
-
-struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id)
-{
- struct rte_pktmbuf_offload_pool_private *priv;
- unsigned elt_size = sizeof(struct rte_mbuf_offload) + priv_size;
-
-
- /* lookup mempool in case already allocated */
- struct rte_mempool *mp = rte_mempool_lookup(name);
-
- if (mp != NULL) {
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- if (priv->offload_priv_size < priv_size ||
- mp->elt_size != elt_size ||
- mp->cache_size < cache_size ||
- mp->size < size) {
- mp = NULL;
- return NULL;
- }
- return mp;
- }
-
- mp = rte_mempool_create(
- name,
- size,
- elt_size,
- cache_size,
- sizeof(struct rte_pktmbuf_offload_pool_private),
- NULL,
- NULL,
- rte_pktmbuf_offload_init,
- NULL,
- socket_id,
- 0);
-
- if (mp == NULL)
- return NULL;
-
- priv = (struct rte_pktmbuf_offload_pool_private *)
- rte_mempool_get_priv(mp);
-
- priv->offload_priv_size = priv_size;
- return mp;
-}
deleted file mode 100644
@@ -1,307 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_MBUF_OFFLOAD_H_
-#define _RTE_MBUF_OFFLOAD_H_
-
-/**
- * @file
- * RTE mbuf offload
- *
- * The rte_mbuf_offload library provides the ability to specify a device generic
- * off-load operation independent of the current Rx/Tx Ethernet offloads
- * supported within the rte_mbuf structure, and add supports for multiple
- * off-load operations and offload device types.
- *
- * The rte_mbuf_offload specifies the particular off-load operation type, such
- * as a crypto operation, and provides a container for the operations
- * parameter's inside the op union. These parameters are then used by the
- * device which supports that operation to perform the specified offload.
- *
- * This library provides an API to create pre-allocated mempool of offload
- * operations, with supporting allocate and free functions. It also provides
- * APIs for attaching an offload to a mbuf, as well as an API to retrieve a
- * specified offload type from an mbuf offload chain.
- *
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- */
-
-#include <rte_mbuf.h>
-#include <rte_crypto.h>
-
-
-/** packet mbuf offload operation types */
-enum rte_mbuf_ol_op_type {
- RTE_PKTMBUF_OL_NOT_SPECIFIED = 0,
- /**< Off-load not specified */
- RTE_PKTMBUF_OL_CRYPTO
- /**< Crypto offload operation */
-};
-
-/**
- * Generic packet mbuf offload
- * This is used to specify a offload operation to be performed on a rte_mbuf.
- * Multiple offload operations can be chained to the same mbuf, but only a
- * single offload operation of a particular type can be in the chain
- */
-struct rte_mbuf_offload {
- struct rte_mbuf_offload *next; /**< next offload in chain */
- struct rte_mbuf *m; /**< mbuf offload is attached to */
- struct rte_mempool *mp; /**< mempool offload allocated from */
-
- enum rte_mbuf_ol_op_type type; /**< offload type */
- union {
- struct rte_crypto_op crypto; /**< Crypto operation */
- } op;
-};
-
-/**< private data structure belonging to packet mbug offload mempool */
-struct rte_pktmbuf_offload_pool_private {
- uint16_t offload_priv_size;
- /**< Size of private area in each mbuf_offload. */
-};
-
-
-/**
- * Creates a mempool of rte_mbuf_offload objects
- *
- * EXPERIMENTAL: this API file may change without prior notice
- *
- * @param name mempool name
- * @param size number of objects in mempool
- * @param cache_size cache size of objects for each core
- * @param priv_size size of private data to be allocated with each
- * rte_mbuf_offload object
- * @param socket_id Socket on which to allocate mempool objects
- *
- * @return
- * - On success returns a valid mempool of rte_mbuf_offload objects
- * - On failure return NULL
- */
-extern struct rte_mempool *
-rte_pktmbuf_offload_pool_create(const char *name, unsigned size,
- unsigned cache_size, uint16_t priv_size, int socket_id);
-
-
-/**
- * Returns private data size allocated with each rte_mbuf_offload object by
- * the mempool
- *
- * @param mpool rte_mbuf_offload mempool
- *
- * @return private data size
- */
-static inline uint16_t
-__rte_pktmbuf_offload_priv_size(struct rte_mempool *mpool)
-{
- struct rte_pktmbuf_offload_pool_private *priv =
- rte_mempool_get_priv(mpool);
-
- return priv->offload_priv_size;
-}
-
-/**
- * Get specified off-load operation type from mbuf.
- *
- * @param m packet mbuf.
- * @param type offload operation type requested.
- *
- * @return
- * - On success retruns rte_mbuf_offload pointer
- * - On failure returns NULL
- *
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_get(struct rte_mbuf *m, enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol;
-
- for (ol = m->offload_ops; ol != NULL; ol = ol->next)
- if (ol->type == type)
- return ol;
-
- return ol;
-}
-
-/**
- * Attach a rte_mbuf_offload to a mbuf. We only support a single offload of any
- * one type in our chain of offloads.
- *
- * @param m packet mbuf.
- * @param ol rte_mbuf_offload strucutre to be attached
- *
- * @returns
- * - On success returns the pointer to the offload we just added
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_attach(struct rte_mbuf *m, struct rte_mbuf_offload *ol)
-{
- struct rte_mbuf_offload **ol_last;
-
- for (ol_last = &m->offload_ops; ol_last[0] != NULL;
- ol_last = &ol_last[0]->next)
- if (ol_last[0]->type == ol->type)
- return NULL;
-
- ol_last[0] = ol;
- ol_last[0]->m = m;
- ol_last[0]->next = NULL;
-
- return ol_last[0];
-}
-
-
-/** Rearms rte_mbuf_offload default parameters */
-static inline void
-__rte_pktmbuf_offload_reset(struct rte_mbuf_offload *ol,
- enum rte_mbuf_ol_op_type type)
-{
- ol->m = NULL;
- ol->type = type;
-
- switch (type) {
- case RTE_PKTMBUF_OL_CRYPTO:
- __rte_crypto_op_reset(&ol->op.crypto); break;
- default:
- break;
- }
-}
-
-/** Allocate rte_mbuf_offload from mempool */
-static inline struct rte_mbuf_offload *
-__rte_pktmbuf_offload_raw_alloc(struct rte_mempool *mp)
-{
- void *buf = NULL;
-
- if (rte_mempool_get(mp, &buf) < 0)
- return NULL;
-
- return (struct rte_mbuf_offload *)buf;
-}
-
-/**
- * Allocate a rte_mbuf_offload with a specified operation type from
- * rte_mbuf_offload mempool
- *
- * @param mpool rte_mbuf_offload mempool
- * @param type offload operation type
- *
- * @returns
- * - On success returns a valid rte_mbuf_offload structure
- * - On failure returns NULL
- */
-static inline struct rte_mbuf_offload *
-rte_pktmbuf_offload_alloc(struct rte_mempool *mpool,
- enum rte_mbuf_ol_op_type type)
-{
- struct rte_mbuf_offload *ol = __rte_pktmbuf_offload_raw_alloc(mpool);
-
- if (ol != NULL)
- __rte_pktmbuf_offload_reset(ol, type);
-
- return ol;
-}
-
-/**
- * free rte_mbuf_offload structure
- */
-static inline void
-rte_pktmbuf_offload_free(struct rte_mbuf_offload *ol)
-{
- if (ol != NULL && ol->mp != NULL)
- rte_mempool_put(ol->mp, ol);
-}
-
-/**
- * Checks if the private data of a rte_mbuf_offload has enough capacity for
- * requested size
- *
- * @returns
- * - if sufficient space available returns pointer to start of private data
- * - if insufficient space returns NULL
- */
-static inline void *
-__rte_pktmbuf_offload_check_priv_data_size(struct rte_mbuf_offload *ol,
- uint16_t size)
-{
- uint16_t priv_size;
-
- if (likely(ol->mp != NULL)) {
- priv_size = __rte_pktmbuf_offload_priv_size(ol->mp);
-
- if (likely(priv_size >= size))
- return (void *)(ol + 1);
- }
- return NULL;
-}
-
-/**
- * Allocate space for crypto xforms in the private data space of the
- * rte_mbuf_offload. This also defaults the crypto xform type and configures
- * the chaining of the xform in the crypto operation
- *
- * @return
- * - On success returns pointer to first crypto xform in crypto operations chain
- * - On failure returns NULL
- */
-static inline struct rte_crypto_xform *
-rte_pktmbuf_offload_alloc_crypto_xforms(struct rte_mbuf_offload *ol,
- unsigned nb_xforms)
-{
- struct rte_crypto_xform *xform;
- void *priv_data;
- uint16_t size;
-
- size = sizeof(struct rte_crypto_xform) * nb_xforms;
- priv_data = __rte_pktmbuf_offload_check_priv_data_size(ol, size);
-
- if (priv_data == NULL)
- return NULL;
-
- ol->op.crypto.xform = xform = (struct rte_crypto_xform *)priv_data;
-
- do {
- xform->type = RTE_CRYPTO_XFORM_NOT_SPECIFIED;
- xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL;
- } while (xform);
-
- return ol->op.crypto.xform;
-}
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MBUF_OFFLOAD_H_ */
deleted file mode 100644
@@ -1,7 +0,0 @@
-DPDK_2.2 {
- global:
-
- rte_pktmbuf_offload_pool_create;
-
- local: *;
-};