@@ -204,6 +204,9 @@ cn10k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
txq->cpt_io_addr = inl_lf->io_addr;
txq->cpt_fc = inl_lf->fc_addr;
+ txq->cpt_fc_sw = (int32_t *)((uintptr_t)dev->outb.fc_sw_mem +
+ crypto_qid * RTE_CACHE_LINE_SIZE);
+
txq->cpt_desc = inl_lf->nb_desc * 0.7;
txq->sa_base = (uint64_t)dev->outb.sa_base;
txq->sa_base |= eth_dev->data->port_id;
@@ -19,6 +19,7 @@ struct cn10k_eth_txq {
uint64_t sa_base;
uint64_t *cpt_fc;
uint16_t cpt_desc;
+ int32_t *cpt_fc_sw;
uint64_t lso_tun_fmt;
uint64_t ts_mem;
uint64_t mark_flag : 8;
@@ -209,6 +209,37 @@ cn10k_nix_tx_skeleton(struct cn10k_eth_txq *txq, uint64_t *cmd,
}
static __rte_always_inline void
+cn10k_nix_sec_fc_wait(struct cn10k_eth_txq *txq, uint16_t nb_pkts)
+{
+ int32_t nb_desc, val, newval;
+ int32_t *fc_sw;
+ volatile uint64_t *fc;
+
+ /* Check if there is any CPT instruction to submit */
+ if (!nb_pkts)
+ return;
+
+again:
+ fc_sw = txq->cpt_fc_sw;
+ val = __atomic_sub_fetch(fc_sw, nb_pkts, __ATOMIC_RELAXED);
+ if (likely(val >= 0))
+ return;
+
+ nb_desc = txq->cpt_desc;
+ fc = txq->cpt_fc;
+ while (true) {
+ newval = nb_desc - __atomic_load_n(fc, __ATOMIC_RELAXED);
+ newval -= nb_pkts;
+ if (newval >= 0)
+ break;
+ }
+
+ if (!__atomic_compare_exchange_n(fc_sw, &val, newval, false,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ goto again;
+}
+
+static __rte_always_inline void
cn10k_nix_sec_steorl(uintptr_t io_addr, uint32_t lmt_id, uint8_t lnum,
uint8_t loff, uint8_t shft)
{
@@ -995,6 +1026,7 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
/* Reduce pkts to be sent to CPT */
burst -= ((c_lnum << 1) + c_loff);
+ cn10k_nix_sec_fc_wait(txq, (c_lnum << 1) + c_loff);
cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
c_shft);
}
@@ -1138,6 +1170,7 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
/* Reduce pkts to be sent to CPT */
burst -= ((c_lnum << 1) + c_loff);
+ cn10k_nix_sec_fc_wait(txq, (c_lnum << 1) + c_loff);
cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
c_shft);
}
@@ -2682,9 +2715,11 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
left -= burst;
/* Submit CPT instructions if any */
- if (flags & NIX_TX_OFFLOAD_SECURITY_F)
+ if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+ cn10k_nix_sec_fc_wait(txq, (c_lnum << 1) + c_loff);
cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
c_shft);
+ }
/* Trigger LMTST */
if (lnum > 16) {
@@ -155,9 +155,19 @@ nix_security_setup(struct cnxk_eth_dev *dev)
dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix);
dev->outb.sa_bmap_mem = mem;
dev->outb.sa_bmap = bmap;
+
+ dev->outb.fc_sw_mem = plt_zmalloc(dev->outb.nb_crypto_qs *
+ RTE_CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (!dev->outb.fc_sw_mem) {
+ plt_err("Outbound fc sw mem alloc failed");
+ goto sa_bmap_free;
+ }
}
return 0;
+sa_bmap_free:
+ plt_free(dev->outb.sa_bmap_mem);
sa_dptr_free:
if (dev->inb.sa_dptr)
plt_free(dev->inb.sa_dptr);
@@ -253,6 +263,9 @@ nix_security_release(struct cnxk_eth_dev *dev)
plt_free(dev->outb.sa_dptr);
dev->outb.sa_dptr = NULL;
}
+
+ plt_free(dev->outb.fc_sw_mem);
+ dev->outb.fc_sw_mem = NULL;
}
dev->inb.inl_dev = false;
@@ -321,6 +321,9 @@ struct cnxk_eth_dev_sec_outb {
/* Crypto queues => CPT lf count */
uint16_t nb_crypto_qs;
+ /* FC sw mem */
+ uint64_t *fc_sw_mem;
+
/* Active sessions */
uint16_t nb_sess;