From: Rakesh Kudurumalla <rkudurumalla@marvell.com>
Added mailbox for masking and setting nix_rq_ctx
parameters and enabling rq masking in ipsec_cfg1
so second pass is applied to all rq's
Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
drivers/common/cnxk/hw/nix.h | 4 +-
drivers/common/cnxk/roc_mbox.h | 23 ++++++++-
drivers/common/cnxk/roc_nix_inl.c | 81 +++++++++++++++++++++++++++++++
3 files changed, 106 insertions(+), 2 deletions(-)
@@ -1242,7 +1242,9 @@ struct nix_cn10k_rq_ctx_s {
uint64_t ipsech_ena : 1;
uint64_t ena_wqwd : 1;
uint64_t cq : 20;
- uint64_t rsvd_36_24 : 13;
+ uint64_t rsvd_34_24 : 11;
+ uint64_t port_ol4_dis : 1;
+ uint64_t port_il4_dis : 1;
uint64_t lenerr_dis : 1;
uint64_t csum_il4_dis : 1;
uint64_t csum_ol4_dis : 1;
@@ -265,7 +265,9 @@ struct mbox_msghdr {
msg_rsp) \
M(NIX_RX_SW_SYNC, 0x8022, nix_rx_sw_sync, msg_req, msg_rsp) \
M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \
- msg_req, nix_inline_ipsec_cfg)
+ msg_req, nix_inline_ipsec_cfg) \
+ M(NIX_LF_INLINE_RQ_CFG, 0x8024, nix_lf_inline_rq_cfg, \
+ nix_rq_cpt_field_mask_cfg_req, msg_rsp)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
@@ -1088,6 +1090,25 @@ struct nix_mark_format_cfg_rsp {
uint8_t __io mark_format_idx;
};
+struct nix_rq_cpt_field_mask_cfg_req {
+ struct mbox_msghdr hdr;
+#define RQ_CTX_MASK_MAX 6
+ union {
+ uint64_t __io rq_ctx_word_set[RQ_CTX_MASK_MAX];
+ struct nix_cn10k_rq_ctx_s rq_set;
+ };
+ union {
+ uint64_t __io rq_ctx_word_mask[RQ_CTX_MASK_MAX];
+ struct nix_cn10k_rq_ctx_s rq_mask;
+ };
+ struct nix_lf_rx_ipec_cfg1_req {
+ uint32_t __io spb_cpt_aura;
+ uint8_t __io rq_mask_enable;
+ uint8_t __io spb_cpt_sizem1;
+ uint8_t __io spb_cpt_enable;
+ } ipsec_cfg1;
+};
+
struct nix_lso_format_cfg {
struct mbox_msghdr hdr;
uint64_t __io field_mask;
@@ -416,6 +416,70 @@ roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags)
return roc_cpt_rxc_time_cfg(roc_cpt, &cfg);
}
+static int
+nix_inl_rq_mask_cfg(struct roc_nix *roc_nix, bool enable)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct nix_rq_cpt_field_mask_cfg_req *msk_req;
+ struct idev_cfg *idev = idev_get_cfg();
+ struct mbox *mbox = (&nix->dev)->mbox;
+ struct idev_nix_inl_cfg *inl_cfg;
+ uint64_t aura_handle;
+ int rc = -ENOSPC;
+ int i;
+
+ if (!idev)
+ return rc;
+
+ inl_cfg = &idev->inl_cfg;
+ msk_req = mbox_alloc_msg_nix_lf_inline_rq_cfg(mbox);
+ if (msk_req == NULL)
+ return rc;
+
+ for (i = 0; i < RQ_CTX_MASK_MAX; i++)
+ msk_req->rq_ctx_word_mask[i] = 0xFFFFFFFFFFFFFFFF;
+
+ msk_req->rq_set.len_ol3_dis = 1;
+ msk_req->rq_set.len_ol4_dis = 1;
+ msk_req->rq_set.len_il3_dis = 1;
+
+ msk_req->rq_set.len_il4_dis = 1;
+ msk_req->rq_set.csum_ol4_dis = 1;
+ msk_req->rq_set.csum_il4_dis = 1;
+
+ msk_req->rq_set.lenerr_dis = 1;
+ msk_req->rq_set.port_ol4_dis = 1;
+ msk_req->rq_set.port_il4_dis = 1;
+
+ msk_req->rq_set.lpb_drop_ena = 0;
+ msk_req->rq_set.spb_drop_ena = 0;
+ msk_req->rq_set.xqe_drop_ena = 0;
+
+ msk_req->rq_mask.len_ol3_dis = ~(msk_req->rq_set.len_ol3_dis);
+ msk_req->rq_mask.len_ol4_dis = ~(msk_req->rq_set.len_ol4_dis);
+ msk_req->rq_mask.len_il3_dis = ~(msk_req->rq_set.len_il3_dis);
+
+ msk_req->rq_mask.len_il4_dis = ~(msk_req->rq_set.len_il4_dis);
+ msk_req->rq_mask.csum_ol4_dis = ~(msk_req->rq_set.csum_ol4_dis);
+ msk_req->rq_mask.csum_il4_dis = ~(msk_req->rq_set.csum_il4_dis);
+
+ msk_req->rq_mask.lenerr_dis = ~(msk_req->rq_set.lenerr_dis);
+ msk_req->rq_mask.port_ol4_dis = ~(msk_req->rq_set.port_ol4_dis);
+ msk_req->rq_mask.port_il4_dis = ~(msk_req->rq_set.port_il4_dis);
+
+ msk_req->rq_mask.lpb_drop_ena = ~(msk_req->rq_set.lpb_drop_ena);
+ msk_req->rq_mask.spb_drop_ena = ~(msk_req->rq_set.spb_drop_ena);
+ msk_req->rq_mask.xqe_drop_ena = ~(msk_req->rq_set.xqe_drop_ena);
+
+ aura_handle = roc_npa_zero_aura_handle();
+ msk_req->ipsec_cfg1.spb_cpt_aura = roc_npa_aura_handle_to_aura(aura_handle);
+ msk_req->ipsec_cfg1.rq_mask_enable = enable;
+ msk_req->ipsec_cfg1.spb_cpt_sizem1 = inl_cfg->buf_sz;
+ msk_req->ipsec_cfg1.spb_cpt_enable = enable;
+
+ return mbox_process(mbox);
+}
+
int
roc_nix_inl_inb_init(struct roc_nix *roc_nix)
{
@@ -472,6 +536,14 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
nix->need_meta_aura = true;
idev->inl_cfg.refs++;
}
+
+ if (roc_model_is_cn10kb_a0()) {
+ rc = nix_inl_rq_mask_cfg(roc_nix, true);
+ if (rc) {
+ plt_err("Failed to get rq mask rc=%d", rc);
+ return rc;
+ }
+ }
nix->inl_inb_ena = true;
return 0;
}
@@ -481,6 +553,7 @@ roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
{
struct idev_cfg *idev = idev_get_cfg();
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ int rc;
if (!nix->inl_inb_ena)
return 0;
@@ -496,6 +569,14 @@ roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
nix_inl_meta_aura_destroy();
}
+ if (roc_model_is_cn10kb_a0()) {
+ rc = nix_inl_rq_mask_cfg(roc_nix, false);
+ if (rc) {
+ plt_err("Failed to get rq mask rc=%d", rc);
+ return rc;
+ }
+ }
+
/* Flush Inbound CTX cache entries */
roc_nix_cpt_ctx_cache_sync(roc_nix);