@@ -416,6 +416,19 @@ Runtime Config Options
With the above configuration, PMD would allocate meta buffers of size 512 for
inline inbound IPsec processing second pass.
+- ``Rx Inject Enable inbound inline IPsec for second pass`` (default ``0``)
+
+ Rx packet inject feature for inbound inline IPsec processing can be enabled
+ by ``rx_inj_ena`` ``devargs`` parameter.
+ This option is for OCTEON CN106-B0/CN103XX SoC family.
+
+ For example::
+
+ -a 0002:02:00.0,rx_inj_ena=1
+
+ With the above configuration, driver would enable packet inject from ARM cores
+ to crypto to process and send back in Rx path.
+
.. note::
Above devarg parameters are configurable per device, user needs to pass the
@@ -613,6 +626,20 @@ Runtime Config Options for inline device
With the above configuration, driver would poll for aging flows every 50
seconds.
+- ``Rx Inject Enable inbound inline IPsec for second pass`` (default ``0``)
+
+ Rx packet inject feature for inbound inline IPsec processing can be enabled
+ by ``rx_inj_ena`` ``devargs`` parameter with both inline device and ethdev
+ device.
+ This option is for OCTEON CN106-B0/CN103XX SoC family.
+
+ For example::
+
+ -a 0002:1d:00.0,rx_inj_ena=1
+
+ With the above configuration, driver would enable packet inject from ARM cores
+ to crypto to process and send back in Rx path.
+
Debugging Options
-----------------
@@ -593,6 +593,10 @@ cn10k_nix_dev_start(struct rte_eth_dev *eth_dev)
if (dev->rx_offload_flags & NIX_RX_OFFLOAD_SECURITY_F)
cn10k_nix_rx_queue_meta_aura_update(eth_dev);
+ /* Set flags for Rx Inject feature */
+ if (roc_idev_nix_rx_inject_get(nix->port_id))
+ dev->rx_offload_flags |= NIX_RX_SEC_REASSEMBLY_F;
+
cn10k_eth_set_tx_function(eth_dev);
cn10k_eth_set_rx_function(eth_dev);
return 0;
@@ -1253,6 +1253,52 @@ eth_sec_caps_add(struct rte_security_capability eth_sec_caps[], uint32_t *idx,
*idx += nb_caps;
}
+static uint16_t __rte_hot
+cn10k_eth_sec_inb_rx_inject(void *device, struct rte_mbuf **pkts,
+ struct rte_security_session **sess, uint16_t nb_pkts)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+ return cn10k_nix_inj_pkts(sess, &dev->inj_cfg, pkts, nb_pkts);
+}
+
+static int
+cn10k_eth_sec_rx_inject_config(void *device, uint16_t port_id, bool enable)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ uint64_t channel, pf_func, inj_match_id = 0xFFFFUL;
+ struct cnxk_ethdev_inj_cfg *inj_cfg;
+ struct roc_nix *nix = &dev->nix;
+ struct roc_cpt_lf *inl_lf;
+ uint64_t sa_base;
+
+ if (!rte_eth_dev_is_valid_port(port_id))
+ return -EINVAL;
+
+ if (eth_dev->data->dev_started || !eth_dev->data->dev_configured)
+ return -EBUSY;
+
+ if (!roc_nix_inl_inb_rx_inject_enable(nix, dev->inb.inl_dev))
+ return -ENOTSUP;
+
+ roc_idev_nix_rx_inject_set(port_id, enable);
+
+ inl_lf = roc_nix_inl_inb_inj_lf_get(nix);
+ sa_base = roc_nix_inl_inb_sa_base_get(nix, dev->inb.inl_dev);
+
+ inj_cfg = &dev->inj_cfg;
+ inj_cfg->sa_base = sa_base | eth_dev->data->port_id;
+ inj_cfg->io_addr = inl_lf->io_addr;
+ inj_cfg->lmt_base = nix->lmt_base;
+ channel = roc_nix_get_base_chan(nix);
+ pf_func = roc_nix_inl_dev_pffunc_get();
+ inj_cfg->cmd_w0 = pf_func << 48 | inj_match_id << 32 | channel << 4;
+
+ return 0;
+}
+
void
cn10k_eth_sec_ops_override(void)
{
@@ -1287,4 +1333,6 @@ cn10k_eth_sec_ops_override(void)
cnxk_eth_sec_ops.session_stats_get = cn10k_eth_sec_session_stats_get;
cnxk_eth_sec_ops.macsec_sc_stats_get = cnxk_eth_macsec_sc_stats_get;
cnxk_eth_sec_ops.macsec_sa_stats_get = cnxk_eth_macsec_sa_stats_get;
+ cnxk_eth_sec_ops.rx_inject_configure = cn10k_eth_sec_rx_inject_config;
+ cnxk_eth_sec_ops.inb_pkt_rx_inject = cn10k_eth_sec_inb_rx_inject;
}
@@ -5,6 +5,7 @@
#define __CN10K_RX_H__
#include <rte_ethdev.h>
+#include <rte_security_driver.h>
#include <rte_vect.h>
#include "cn10k_rxtx.h"
@@ -487,8 +488,19 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base,
inb_sa = roc_nix_inl_ot_ipsec_inb_sa(sa_base, sa_idx);
inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
- /* Update dynamic field with userdata */
- *rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
+ /* Cryptodev injected packet can be identified from SA IDX 0xFFFFFFFF, and
+ * Ethdev injected packet can be identified with match ID 0xFFFF.
+ */
+ if (flags & NIX_RX_REAS_F && (sa_idx == 0xFFFFFFFF || hdr->w0.match_id == 0xFFFFU)) {
+ *(uint64_t *)(&inner->rearm_data) = (mbuf_init & ~(BIT_ULL(16) - 1)) |
+ inner->data_off;
+ if (hdr->w0.match_id == 0xFFFFU)
+ *rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
+ } else {
+ /* Update dynamic field with userdata */
+ *rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
+ *(uint64_t *)(&inner->rearm_data) = mbuf_init;
+ }
/* Get ucc from cpt parse header */
ucc = hdr->w3.hw_ccode;
@@ -502,7 +514,6 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base,
inner->pkt_len = len;
inner->data_len = len;
- *(uint64_t *)(&inner->rearm_data) = mbuf_init;
inner->ol_flags = ((CPT_COMP_HWGOOD_MASK & (1U << ucc)) ?
RTE_MBUF_F_RX_SEC_OFFLOAD :
@@ -567,11 +578,20 @@ nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t inb_sa,
*ol_flags &= ~(RTE_MBUF_F_RX_L4_CKSUM_MASK |
RTE_MBUF_F_RX_IP_CKSUM_MASK);
- /* Get SPI from CPT_PARSE_S's cookie(already swapped) */
- inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd((void *)inb_sa);
- /* Update dynamic field with userdata */
- *rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
+ if (flags & NIX_RX_REAS_F && !inb_sa) {
+ /* Clear and update original lower 16 bit of data offset */
+ *rearm = (*rearm & ~(BIT_ULL(16) - 1)) | inner->data_off;
+ } else {
+ /* Get SPI from CPT_PARSE_S's cookie(already swapped) */
+ inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd((void *)inb_sa);
+ /* Update dynamic field with userdata */
+ *rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
+ }
+
+ /* Clear and update original lower 16 bit of data offset */
+ if (flags & NIX_RX_REAS_F && hdr->w0.match_id == 0xFFFFU)
+ *rearm = (*rearm & ~(BIT_ULL(16) - 1)) | inner->data_off;
/* Mark inner mbuf as get */
if (!(flags & NIX_RX_REAS_F) ||
@@ -604,8 +624,10 @@ nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t inb_sa,
*rearm = vsetq_lane_u64(mbuf_init, *rearm, 0);
} else {
/* Reassembly failure */
- nix_sec_attach_frags(hdr, inner, inb_priv, mbuf_init);
- *ol_flags |= inner->ol_flags;
+ if (inb_sa) {
+ nix_sec_attach_frags(hdr, inner, inb_priv, mbuf_init);
+ *ol_flags |= inner->ol_flags;
+ }
}
} else if (flags & NIX_RX_REAS_F) {
/* Without fragmentation but may have to handle OOP session */
@@ -703,7 +725,14 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
cq_w5 = *((const uint64_t *)rx + 4);
/* Use inner rx parse for meta pkts sg list */
if (cq_w1 & BIT(11) && flags & NIX_RX_OFFLOAD_SECURITY_F) {
- const uint64_t *wqe = (const uint64_t *)(mbuf + 1);
+ const uint64_t *wqe;
+ /* Rx Inject packet must have Match ID 0xFFFF and for this
+ * wqe will get from address stored at mbuf+1 location
+ */
+ if ((flags & NIX_RX_REAS_F) && hdr->w0.match_id == 0xFFFFU)
+ wqe = (const uint64_t *)*((uint64_t *)(mbuf + 1));
+ else
+ wqe = (const uint64_t *)(mbuf + 1);
if (hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL)
rx = (const union nix_rx_parse_u *)(wqe + 1);
@@ -1191,6 +1220,187 @@ cn10k_nix_flush_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pk
return nb_pkts;
}
+#if defined(RTE_ARCH_ARM64)
+static __rte_always_inline uint16_t
+cn10k_nix_rx_inj_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd)
+{
+ union nix_send_sg_s *sg, l_sg;
+ struct rte_mbuf *m_next;
+ uint16_t segdw, nb_segs;
+ uint64_t len, dlen;
+ uint64_t *slist;
+
+ sg = (union nix_send_sg_s *)cmd;
+ l_sg.u = sg->u;
+ l_sg.u &= 0xC00000000000000; /* LD_TYPE */
+ l_sg.subdc = NIX_SUBDC_SG;
+ nb_segs = m->nb_segs;
+ len = m->pkt_len;
+ slist = &cmd[1];
+
+ /* Fill mbuf segments */
+ do {
+ *slist = rte_pktmbuf_iova(m);
+ dlen = m->data_len;
+ len -= dlen;
+
+ /* Set the segment length */
+ l_sg.u |= ((uint64_t)dlen << (l_sg.segs << 4));
+ l_sg.segs += 1;
+ slist++;
+ nb_segs--;
+ if (l_sg.segs > 2 && nb_segs) {
+ sg->u = l_sg.u;
+ /* Next SG subdesc */
+ sg = (union nix_send_sg_s *)slist;
+ l_sg.u = sg->u;
+ l_sg.u &= 0xC00000000000000; /* LD_TYPE */
+ l_sg.subdc = NIX_SUBDC_SG;
+ slist++;
+ }
+ m_next = m->next;
+ m->next = NULL;
+ m = m_next;
+ } while (nb_segs);
+
+ /* Add remaining bytes of data to last seg */
+ if (len) {
+ uint8_t shft = (l_sg.subdc == NIX_SUBDC_SG) ? ((l_sg.segs - 1) << 4) : 0;
+ dlen = ((l_sg.u >> shft) & 0xFFFFULL) + len;
+ l_sg.u = l_sg.u & ~(0xFFFFULL << shft);
+ l_sg.u |= dlen << shft;
+ }
+ /* Write the last subdc out */
+ sg->u = l_sg.u;
+
+ segdw = (uint64_t *)slist - cmd;
+ /* Roundup extra dwords to multiple of 2 */
+ segdw = (segdw >> 1) + (segdw & 0x1);
+ return segdw;
+}
+
+static __rte_always_inline uint16_t
+cn10k_nix_inj_pkts(struct rte_security_session **sess, struct cnxk_ethdev_inj_cfg *inj_cfg,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ uintptr_t c_lbase = inj_cfg->lmt_base;
+ struct cn10k_sec_sess_priv sess_priv;
+ uint64_t sa_base = inj_cfg->sa_base;
+ uint16_t c_lmt_id, burst, left, i;
+ uintptr_t cptres, rxphdr, dptr;
+ struct rte_mbuf *m, *last;
+ uint8_t lnum, shft, loff;
+ uint64x2_t cmd01, cmd23;
+ uint64_t ucode_cmd[4];
+ rte_iova_t c_io_addr;
+ uint64_t *laddr;
+ uint64_t sa, w0;
+ uint16_t segdw;
+
+ /* Get LMT base address and LMT ID as lcore id */
+ ROC_LMT_CPT_BASE_ID_GET(c_lbase, c_lmt_id);
+ c_io_addr = inj_cfg->io_addr;
+
+ left = nb_pkts;
+again:
+ burst = left > 32 ? 32 : left;
+
+ lnum = 0;
+ loff = 0;
+ shft = 16;
+
+ for (i = 0; i < burst; i++) {
+ m = tx_pkts[i];
+ sess_priv.u64 = sess[i]->fast_mdata;
+ last = rte_pktmbuf_lastseg(m);
+
+ cptres = rte_pktmbuf_mtod_offset(last, uintptr_t, last->data_len);
+ cptres += BIT_ULL(7);
+ cptres = (cptres - 1) & ~(BIT_ULL(7) - 1);
+
+ if (m->nb_segs > 1) {
+ /* Will reserve NIX Rx descriptor with SG list after end of
+ * last mbuf data location. and pointer to this will be
+ * stored at 1st mbuf space for Rx path multi-seg processing.
+ */
+ /* Pointer to WQE header */
+ *(uint64_t *)(m + 1) = cptres;
+ /* Reserve 8 Dwords of WQE Hdr + Rx Parse Hdr */
+ rxphdr = cptres + 8;
+ dptr = rxphdr + 7 * 8;
+ /* Prepare Multiseg SG list */
+ segdw = cn10k_nix_rx_inj_prepare_mseg(m, (uint64_t *)(dptr));
+ *(uint64_t *)rxphdr = (uint64_t)(segdw - 1) << 12;
+ cptres += 64 + segdw * 16;
+ ucode_cmd[1] = dptr | ((uint64_t)m->nb_segs << 60);
+ } else {
+ dptr = (uint64_t)rte_pktmbuf_iova(m);
+ ucode_cmd[1] = dptr;
+ }
+
+ /* Prepare CPT instruction */
+ /* CPT word 0 and 1 */
+ cmd01 = vdupq_n_u64(0);
+ w0 = inj_cfg->cmd_w0 | ((uint64_t)m->l2_len - 2) << 24 | (uint64_t)m->l2_len << 16;
+ cmd01 = vsetq_lane_u64(w0, cmd01, 0);
+ cmd01 = vsetq_lane_u64(cptres, cmd01, 1);
+
+ /* CPT word 2 and 3 */
+ cmd23 = vdupq_n_u64(0);
+ /* Set PF func */
+ w0 &= 0xFFFF000000000000UL;
+ cmd23 = vsetq_lane_u64(w0, cmd23, 0);
+ cmd23 = vsetq_lane_u64(((uint64_t)m + sizeof(struct rte_mbuf)) | 1, cmd23, 1);
+
+ sa_base &= ~0xFFFFUL;
+ sa = (uintptr_t)roc_nix_inl_ot_ipsec_inb_sa(sa_base, sess_priv.sa_idx);
+ ucode_cmd[0] = (ROC_IE_OT_MAJOR_OP_PROCESS_INBOUND_IPSEC << 48 | 1UL << 54 |
+ ((uint64_t)sess_priv.chksum) << 32 |
+ ((uint64_t)sess_priv.dec_ttl) << 34 | m->pkt_len);
+
+ ucode_cmd[2] = 0;
+ ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | 1UL << 60 | sa);
+
+ /* Move to our line */
+ laddr = LMT_OFF(c_lbase, lnum, loff ? 64 : 0);
+
+ /* Write CPT instruction to lmt line */
+ vst1q_u64(laddr, cmd01);
+ vst1q_u64((laddr + 2), cmd23);
+
+ *(__uint128_t *)(laddr + 4) = *(__uint128_t *)ucode_cmd;
+ *(__uint128_t *)(laddr + 6) = *(__uint128_t *)(ucode_cmd + 2);
+
+ loff = !loff;
+ lnum = lnum + (loff ? 0 : 1);
+ shft = shft + (loff ? 0 : 3);
+ }
+
+ left -= burst;
+ tx_pkts += burst;
+ sess += burst;
+
+ cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, lnum, loff, shft);
+
+ rte_io_wmb();
+ if (left)
+ goto again;
+
+ return nb_pkts;
+}
+#else
+static __rte_always_inline uint16_t
+cn10k_nix_inj_pkts(struct rte_security_session **sess, struct cnxk_ethdev_inj_cfg *inj_cfg,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ RTE_SET_USED(sess);
+ RTE_SET_USED(inj_cfg);
+ RTE_SET_USED(tx_pkts);
+ RTE_SET_USED(nb_pkts);
+ return 0;
+}
+#endif
+
#if defined(RTE_ARCH_ARM64)
static __rte_always_inline uint64_t
@@ -1558,6 +1768,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
uint64x2_t inner0, inner1, inner2, inner3;
uint64x2_t wqe01, wqe23, sa01, sa23;
uint16x4_t lens, l2lens, ltypes;
+ uint64x2_t mask01, mask23;
uint8x8_t ucc;
cpth0 = (uintptr_t)mbuf0 + d_off;
@@ -1587,6 +1798,11 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
sa01 = vshrq_n_u64(sa01, 32);
sa23 = vshrq_n_u64(sa23, 32);
+
+ /* Crypto Look-aside Rx Inject case */
+ mask01 = vceqq_u64(sa01, vdupq_n_u64(0xFFFFFFFF));
+ mask23 = vceqq_u64(sa23, vdupq_n_u64(0xFFFFFFFF));
+
sa01 = vshlq_n_u64(sa01,
ROC_NIX_INL_OT_IPSEC_INB_SA_SZ_LOG2);
sa23 = vshlq_n_u64(sa23,
@@ -1594,6 +1810,11 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
sa01 = vaddq_u64(sa01, vdupq_n_u64(sa_base));
sa23 = vaddq_u64(sa23, vdupq_n_u64(sa_base));
+ if (flags & NIX_RX_REAS_F) {
+ sa01 = vbicq_u64(sa01, mask01);
+ sa23 = vbicq_u64(sa23, mask23);
+ }
+
const uint8x16x2_t tbl = {{
{
/* ROC_IE_OT_UCC_SUCCESS_PKT_IP_BADCSUM */
@@ -191,4 +191,61 @@ handle_tx_completion_pkts(struct cn10k_eth_txq *txq, uint8_t mt_safe)
rte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);
}
+static __rte_always_inline uint64_t
+cn10k_cpt_tx_steor_data(void)
+{
+ /* We have two CPT instructions per LMTLine */
+ const uint64_t dw_m1 = ROC_CN10K_TWO_CPT_INST_DW_M1;
+ uint64_t data;
+
+ /* This will be moved to addr area */
+ data = dw_m1 << 16;
+ data |= dw_m1 << 19;
+ data |= dw_m1 << 22;
+ data |= dw_m1 << 25;
+ data |= dw_m1 << 28;
+ data |= dw_m1 << 31;
+ data |= dw_m1 << 34;
+ data |= dw_m1 << 37;
+ data |= dw_m1 << 40;
+ data |= dw_m1 << 43;
+ data |= dw_m1 << 46;
+ data |= dw_m1 << 49;
+ data |= dw_m1 << 52;
+ data |= dw_m1 << 55;
+ data |= dw_m1 << 58;
+ data |= dw_m1 << 61;
+
+ return data;
+}
+
+static __rte_always_inline void
+cn10k_nix_sec_steorl(uintptr_t io_addr, uint32_t lmt_id, uint8_t lnum,
+ uint8_t loff, uint8_t shft)
+{
+ uint64_t data;
+ uintptr_t pa;
+
+ /* Check if there is any CPT instruction to submit */
+ if (!lnum && !loff)
+ return;
+
+ data = cn10k_cpt_tx_steor_data();
+ /* Update lmtline use for partial end line */
+ if (loff) {
+ data &= ~(0x7ULL << shft);
+ /* Update it to half full i.e 64B */
+ data |= (0x3UL << shft);
+ }
+
+ pa = io_addr | ((data >> 16) & 0x7) << 4;
+ data &= ~(0x7ULL << 16);
+ /* Update lines - 1 that contain valid data */
+ data |= ((uint64_t)(lnum + loff - 1)) << 12;
+ data |= (uint64_t)lmt_id;
+
+ /* STEOR */
+ roc_lmt_submit_steorl(data, pa);
+}
+
#endif /* __CN10K_RXTX_H__ */
@@ -314,34 +314,6 @@ cn10k_nix_tx_steor_vec_data(const uint16_t flags)
return data;
}
-static __rte_always_inline uint64_t
-cn10k_cpt_tx_steor_data(void)
-{
- /* We have two CPT instructions per LMTLine */
- const uint64_t dw_m1 = ROC_CN10K_TWO_CPT_INST_DW_M1;
- uint64_t data;
-
- /* This will be moved to addr area */
- data = dw_m1 << 16;
- data |= dw_m1 << 19;
- data |= dw_m1 << 22;
- data |= dw_m1 << 25;
- data |= dw_m1 << 28;
- data |= dw_m1 << 31;
- data |= dw_m1 << 34;
- data |= dw_m1 << 37;
- data |= dw_m1 << 40;
- data |= dw_m1 << 43;
- data |= dw_m1 << 46;
- data |= dw_m1 << 49;
- data |= dw_m1 << 52;
- data |= dw_m1 << 55;
- data |= dw_m1 << 58;
- data |= dw_m1 << 61;
-
- return data;
-}
-
static __rte_always_inline void
cn10k_nix_tx_skeleton(struct cn10k_eth_txq *txq, uint64_t *cmd,
const uint16_t flags, const uint16_t static_sz)
@@ -461,35 +433,6 @@ cn10k_nix_sec_fc_wait(struct cn10k_eth_txq *txq, uint16_t nb_pkts)
goto again;
}
-static __rte_always_inline void
-cn10k_nix_sec_steorl(uintptr_t io_addr, uint32_t lmt_id, uint8_t lnum,
- uint8_t loff, uint8_t shft)
-{
- uint64_t data;
- uintptr_t pa;
-
- /* Check if there is any CPT instruction to submit */
- if (!lnum && !loff)
- return;
-
- data = cn10k_cpt_tx_steor_data();
- /* Update lmtline use for partial end line */
- if (loff) {
- data &= ~(0x7ULL << shft);
- /* Update it to half full i.e 64B */
- data |= (0x3UL << shft);
- }
-
- pa = io_addr | ((data >> 16) & 0x7) << 4;
- data &= ~(0x7ULL << 16);
- /* Update lines - 1 that contain valid data */
- data |= ((uint64_t)(lnum + loff - 1)) << 12;
- data |= (uint64_t)lmt_id;
-
- /* STEOR */
- roc_lmt_submit_steorl(data, pa);
-}
-
#if defined(RTE_ARCH_ARM64)
static __rte_always_inline void
cn10k_nix_prep_sec_vec(struct rte_mbuf *m, uint64x2_t *cmd0, uint64x2_t *cmd1,
@@ -424,6 +424,9 @@ struct cnxk_eth_dev {
/* MCS device */
struct cnxk_mcs_dev *mcs_dev;
struct cnxk_macsec_sess_list mcs_list;
+
+ /* Inject packets */
+ struct cnxk_ethdev_inj_cfg inj_cfg;
};
struct cnxk_eth_rxq_sp {
@@ -279,6 +279,7 @@ parse_val_u16(const char *key, const char *value, void *extra_args)
#define CNXK_SQB_SLACK "sqb_slack"
#define CNXK_NIX_META_BUF_SZ "meta_buf_sz"
#define CNXK_FLOW_AGING_POLL_FREQ "aging_poll_freq"
+#define CNXK_NIX_RX_INJ_ENABLE "rx_inj_ena"
int
cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
@@ -305,6 +306,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
uint32_t meta_buf_sz = 0;
uint16_t no_inl_dev = 0;
uint8_t lock_rx_ctx = 0;
+ uint8_t rx_inj_ena = 0;
memset(&sdp_chan, 0, sizeof(sdp_chan));
memset(&pre_l2_info, 0, sizeof(struct flow_pre_l2_size_info));
@@ -355,6 +357,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
rte_kvargs_process(kvlist, CNXK_NIX_META_BUF_SZ, &parse_meta_bufsize, &meta_buf_sz);
rte_kvargs_process(kvlist, CNXK_FLOW_AGING_POLL_FREQ, &parse_val_u16,
&aging_thread_poll_freq);
+ rte_kvargs_process(kvlist, CNXK_NIX_RX_INJ_ENABLE, &parse_flag, &rx_inj_ena);
rte_kvargs_free(kvlist);
null_devargs:
@@ -387,6 +390,8 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
dev->npc.pre_l2_size_offset_mask = pre_l2_info.pre_l2_size_off_mask;
dev->npc.pre_l2_size_shift_dir = pre_l2_info.pre_l2_size_shift_dir;
dev->npc.flow_age.aging_poll_freq = aging_thread_poll_freq;
+ if (roc_feature_nix_has_rx_inject())
+ dev->nix.rx_inj_ena = rx_inj_ena;
return 0;
exit:
return -EINVAL;
@@ -409,4 +414,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_cnxk,
CNXK_SDP_CHANNEL_MASK "=<1-4095>/<1-4095>"
CNXK_CUSTOM_SA_ACT "=1"
CNXK_SQB_SLACK "=<12-512>"
- CNXK_FLOW_AGING_POLL_FREQ "=<10-65535>");
+ CNXK_FLOW_AGING_POLL_FREQ "=<10-65535>"
+ CNXK_NIX_RX_INJ_ENABLE "=1");
@@ -4,6 +4,7 @@
#ifndef __CNXK_ETHDEV_DP_H__
#define __CNXK_ETHDEV_DP_H__
+#include <rte_security_driver.h>
#include <rte_mbuf.h>
/* If PTP is enabled additional SEND MEM DESC is required which
@@ -82,6 +83,13 @@ struct cnxk_timesync_info {
uint64_t *tx_tstamp;
} __plt_cache_aligned;
+struct cnxk_ethdev_inj_cfg {
+ uintptr_t lmt_base;
+ uint64_t io_addr;
+ uint64_t sa_base;
+ uint64_t cmd_w0;
+} __plt_cache_aligned;
+
/* Inlines */
static __rte_always_inline uint64_t
cnxk_pktmbuf_detach(struct rte_mbuf *m)
@@ -16,6 +16,7 @@
#define CNXK_NIX_INL_META_BUF_SZ "meta_buf_sz"
#define CNXK_NIX_SOFT_EXP_POLL_FREQ "soft_exp_poll_freq"
#define CNXK_MAX_IPSEC_RULES "max_ipsec_rules"
+#define CNXK_NIX_INL_RX_INJ_ENABLE "rx_inj_ena"
/* Default soft expiry poll freq in usec */
#define CNXK_NIX_SOFT_EXP_POLL_FREQ_DFLT 100
@@ -192,6 +193,19 @@ parse_max_ipsec_rules(const char *key, const char *value, void *extra_args)
return 0;
}
+static int
+parse_inl_rx_inj_ena(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+ uint32_t val;
+
+ val = atoi(value);
+
+ *(uint8_t *)extra_args = !!(val == 1);
+
+ return 0;
+}
+
int
cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p,
uint32_t spi)
@@ -352,6 +366,7 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
struct rte_kvargs *kvlist;
uint32_t nb_meta_bufs = 0;
uint32_t meta_buf_sz = 0;
+ uint8_t rx_inj_ena = 0;
uint8_t selftest = 0;
memset(&cpt_channel, 0, sizeof(cpt_channel));
@@ -378,6 +393,7 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
rte_kvargs_process(kvlist, CNXK_NIX_SOFT_EXP_POLL_FREQ,
&parse_val_u32, &soft_exp_poll_freq);
rte_kvargs_process(kvlist, CNXK_MAX_IPSEC_RULES, &parse_max_ipsec_rules, &max_ipsec_rules);
+ rte_kvargs_process(kvlist, CNXK_NIX_INL_RX_INJ_ENABLE, &parse_inl_rx_inj_ena, &rx_inj_ena);
rte_kvargs_free(kvlist);
null_devargs:
@@ -391,6 +407,8 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
inl_dev->meta_buf_sz = meta_buf_sz;
inl_dev->soft_exp_poll_freq = soft_exp_poll_freq;
inl_dev->max_ipsec_rules = max_ipsec_rules;
+ if (roc_feature_nix_has_rx_inject())
+ inl_dev->rx_inj_ena = rx_inj_ena;
return 0;
exit:
return -EINVAL;
@@ -518,4 +536,5 @@ RTE_PMD_REGISTER_PARAM_STRING(cnxk_nix_inl,
CNXK_NIX_INL_NB_META_BUFS "=<1-U32_MAX>"
CNXK_NIX_INL_META_BUF_SZ "=<1-U32_MAX>"
CNXK_NIX_SOFT_EXP_POLL_FREQ "=<0-U32_MAX>"
- CNXK_MAX_IPSEC_RULES "=<1-4095>");
+ CNXK_MAX_IPSEC_RULES "=<1-4095>"
+ CNXK_NIX_INL_RX_INJ_ENABLE "=1");