common/cnxk: add REE support
Checks
Commit Message
From: Liron Himi <lironh@marvell.com>
Adding REE definitions and related ROC code
Signed-off-by: Liron Himi <lironh@marvell.com>
---
drivers/common/cnxk/hw/ree.h | 165 +++++++
drivers/common/cnxk/hw/rvu.h | 5 +
drivers/common/cnxk/meson.build | 1 +
drivers/common/cnxk/roc_api.h | 4 +
drivers/common/cnxk/roc_constants.h | 2 +
drivers/common/cnxk/roc_mbox.h | 100 +++++
drivers/common/cnxk/roc_platform.c | 1 +
drivers/common/cnxk/roc_platform.h | 2 +
drivers/common/cnxk/roc_priv.h | 3 +
drivers/common/cnxk/roc_ree.c | 647 ++++++++++++++++++++++++++++
drivers/common/cnxk/roc_ree.h | 137 ++++++
drivers/common/cnxk/roc_ree_priv.h | 18 +
drivers/common/cnxk/version.map | 18 +-
13 files changed, 1102 insertions(+), 1 deletion(-)
create mode 100644 drivers/common/cnxk/hw/ree.h
create mode 100644 drivers/common/cnxk/roc_ree.c
create mode 100644 drivers/common/cnxk/roc_ree.h
create mode 100644 drivers/common/cnxk/roc_ree_priv.h
Comments
On Sun, Oct 3, 2021 at 11:53 AM <lironh@marvell.com> wrote:
>
> From: Liron Himi <lironh@marvell.com>
>
> Adding REE definitions and related ROC code
# Mention the full form of REE in the git commit message.
#Patch looks good to me. Some minor comments below
# Also, Please send a patch for switching drivers/regex/octeontx2/ to
the new RoC driver. We will plan to merge it on RC2.
# Please split this patch a 3 or 4 more logical patches for
device-specific, rules API, etc
>
> Signed-off-by: Liron Himi <lironh@marvell.com>
> ---
> drivers/common/cnxk/hw/ree.h | 165 +++++++
> drivers/common/cnxk/hw/rvu.h | 5 +
> drivers/common/cnxk/meson.build | 1 +
> drivers/common/cnxk/roc_api.h | 4 +
> drivers/common/cnxk/roc_constants.h | 2 +
> drivers/common/cnxk/roc_mbox.h | 100 +++++
> drivers/common/cnxk/roc_platform.c | 1 +
> drivers/common/cnxk/roc_platform.h | 2 +
> drivers/common/cnxk/roc_priv.h | 3 +
> drivers/common/cnxk/roc_ree.c | 647 ++++++++++++++++++++++++++++
> drivers/common/cnxk/roc_ree.h | 137 ++++++
> drivers/common/cnxk/roc_ree_priv.h | 18 +
> drivers/common/cnxk/version.map | 18 +-
> 13 files changed, 1102 insertions(+), 1 deletion(-)
> create mode 100644 drivers/common/cnxk/hw/ree.h
> create mode 100644 drivers/common/cnxk/roc_ree.c
> create mode 100644 drivers/common/cnxk/roc_ree.h
> create mode 100644 drivers/common/cnxk/roc_ree_priv.h
>
> diff --git a/drivers/common/cnxk/hw/ree.h b/drivers/common/cnxk/hw/ree.h
> new file mode 100644
> index 0000000000..0766d35e52
> --- /dev/null
> +++ b/drivers/common/cnxk/hw/ree.h
> @@ -0,0 +1,165 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2021 Marvell.
> + */
> +
> +#ifndef __REE_HW_H__
> +#define __REE_HW_H__
> +
> +/* REE instruction queue length */
> +#define REE_IQ_LEN (1 << 13)
> +
> +#define REE_DEFAULT_CMD_QLEN REE_IQ_LEN
> +
> +/* Status register bits */
> +#define REE_STATUS_PMI_EOJ_BIT (1 << 14)
> +#define REE_STATUS_PMI_SOJ_BIT (1 << 13)
> +#define REE_STATUS_MP_CNT_DET_BIT (1 << 7)
> +#define REE_STATUS_MM_CNT_DET_BIT (1 << 6)
> +#define REE_STATUS_ML_CNT_DET_BIT (1 << 5)
> +#define REE_STATUS_MST_CNT_DET_BIT (1 << 4)
> +#define REE_STATUS_MPT_CNT_DET_BIT (1 << 3)
Use macros in drivers/common/cnxk/roc_bits.h
> +
> +/* Register offsets */
> +/* REE LF registers */
> +#define REE_LF_DONE_INT 0x120ull
> +#define REE_LF_DONE_INT_W1S 0x130ull
> +#define REE_LF_DONE_INT_ENA_W1S 0x138ull
> +#define REE_LF_DONE_INT_ENA_W1C 0x140ull
> +#define REE_LF_MISC_INT 0x300ull
> +#define REE_LF_MISC_INT_W1S 0x310ull
> +#define REE_LF_MISC_INT_ENA_W1S 0x320ull
> +#define REE_LF_MISC_INT_ENA_W1C 0x330ull
> +#define REE_LF_ENA 0x10ull
> +#define REE_LF_SBUF_ADDR 0x20ull
> +#define REE_LF_DONE 0x100ull
> +#define REE_LF_DONE_ACK 0x110ull
> +#define REE_LF_DONE_WAIT 0x148ull
> +#define REE_LF_DOORBELL 0x400ull
> +#define REE_LF_OUTSTAND_JOB 0x410ull
> +
> +/* BAR 0 */
> +#define REE_AF_REEXM_MAX_MATCH (0x80c8ull)
> +#define REE_AF_QUE_SBUF_CTL(a) (0x1200ull | (uint64_t)(a) << 3)
> +#define REE_PRIV_LF_CFG(a) (0x41000ull | (uint64_t)(a) << 3)
> +
> +#define REE_AF_QUEX_GMCTL(a) (0x800 | (a) << 3)
> +
> +#define REE_AF_INT_VEC_RAS (0x0ull)
> +#define REE_AF_INT_VEC_RVU (0x1ull)
> +#define REE_AF_INT_VEC_QUE_DONE (0x2ull)
> +#define REE_AF_INT_VEC_AQ (0x3ull)
> +
> +/* ENUMS */
> +
> +#define REE_LF_INT_VEC_QUE_DONE (0x0ull)
> +#define REE_LF_INT_VEC_MISC (0x1ull)
> +
> +#define REE_LF_BAR2(vf, q_id) \
> + ((vf)->dev->bar2 + (((vf)->block_address << 20) | ((q_id) << 12)))
> +
> +#define REE_QUEUE_HI_PRIO 0x1
> +
> +enum ree_desc_type_e {
> + REE_TYPE_JOB_DESC = 0x0,
> + REE_TYPE_RESULT_DESC = 0x1,
> + REE_TYPE_ENUM_LAST = 0x2
> +};
> +
> +union ree_priv_lf_cfg {
> + uint64_t u;
> + struct {
> + uint64_t slot : 8;
> + uint64_t pf_func : 16;
> + uint64_t reserved_24_62 : 39;
> + uint64_t ena : 1;
> + } s;
> +};
> +
> +union ree_lf_sbuf_addr {
> + uint64_t u;
> + struct {
> + uint64_t off : 7;
> + uint64_t ptr : 46;
> + uint64_t reserved_53_63 : 11;
> + } s;
> +};
> +
> +union ree_lf_ena {
> + uint64_t u;
> + struct {
> + uint64_t ena : 1;
> + uint64_t reserved_1_63 : 63;
> + } s;
> +};
> +
> +union ree_af_reexm_max_match {
> + uint64_t u;
> + struct {
> + uint64_t max : 8;
> + uint64_t reserved_8_63 : 56;
> + } s;
> +};
> +
> +union ree_lf_done {
> + uint64_t u;
> + struct {
> + uint64_t done : 20;
> + uint64_t reserved_20_63 : 44;
> + } s;
> +};
All the register definitions above use bitfield APIs in
drivers/common/cnxk/roc_bitfield.h
context structure like below OK to use structure scheme like existing
HW blocks in RoC.
> +
> +union ree_res_status {
> + uint64_t u;
> + struct {
> + uint64_t job_type : 3;
> + uint64_t mpt_cnt_det : 1;
> + uint64_t mst_cnt_det : 1;
> + uint64_t ml_cnt_det : 1;
> + uint64_t mm_cnt_det : 1;
> + uint64_t mp_cnt_det : 1;
> + uint64_t mode : 2;
> + uint64_t reserved_10_11 : 2;
> + uint64_t reserved_12_12 : 1;
> + uint64_t pmi_soj : 1;
> + uint64_t pmi_eoj : 1;
> + uint64_t reserved_15_15 : 1;
> + uint64_t reserved_16_63 : 48;
> + } s;
> +};
> +
> +union ree_res {
> + uint64_t u[8];
> + struct ree_res_s_98 {
> + uint64_t done : 1;
> + uint64_t hwjid : 7;
> + uint64_t ree_res_job_id : 24;
> + uint64_t ree_res_status : 16;
> + uint64_t ree_res_dmcnt : 8;
> + uint64_t ree_res_mcnt : 8;
> + uint64_t ree_meta_ptcnt : 16;
> + uint64_t ree_meta_icnt : 16;
> + uint64_t ree_meta_lcnt : 16;
> + uint64_t ree_pmi_min_byte_ptr : 16;
> + uint64_t ree_err : 1;
> + uint64_t reserved_129_190 : 62;
> + uint64_t doneint : 1;
> + uint64_t reserved_192_255 : 64;
> + uint64_t reserved_256_319 : 64;
> + uint64_t reserved_320_383 : 64;
> + uint64_t reserved_384_447 : 64;
> + uint64_t reserved_448_511 : 64;
> + } s;
> +};
> +
> +union ree_match {
> + uint64_t u;
> + struct {
> + uint64_t ree_rule_id : 32;
> + uint64_t start_ptr : 14;
> + uint64_t reserved_46_47 : 2;
> + uint64_t match_length : 15;
> + uint64_t reserved_63_6 : 1;
> + } s;
> +};
> +
> +#endif /* __REE_HW_H__ */
> diff --git a/drivers/common/cnxk/hw/rvu.h b/drivers/common/cnxk/hw/rvu.h
> index 632d9499ea..daf758f0b5 100644
> --- a/drivers/common/cnxk/hw/rvu.h
> +++ b/drivers/common/cnxk/hw/rvu.h
> @@ -130,6 +130,7 @@
> #define RVU_BLOCK_TYPE_RAD (0xdull)
> #define RVU_BLOCK_TYPE_DFA (0xeull)
> #define RVU_BLOCK_TYPE_HNA (0xfull)
> +#define RVU_BLOCK_TYPE_REE (0xeull)
>
> #define RVU_BLOCK_ADDR_RVUM (0x0ull)
> #define RVU_BLOCK_ADDR_LMT (0x1ull)
> @@ -147,6 +148,8 @@
> #define RVU_BLOCK_ADDR_NDC2 (0xeull)
> #define RVU_BLOCK_ADDR_R_END (0x1full)
> #define RVU_BLOCK_ADDR_R_START (0x14ull)
> +#define RVU_BLOCK_ADDR_REE0 (0x14ull)
> +#define RVU_BLOCK_ADDR_REE1 (0x15ull)
>
> #define RVU_VF_INT_VEC_MBOX (0x0ull)
>
> @@ -167,6 +170,7 @@
> #define NPA_AF_BAR2_SEL (0x9000000ull)
> #define CPT_AF_BAR2_SEL (0x9000000ull)
> #define RVU_AF_BAR2_SEL (0x9000000ull)
> +#define REE_AF_BAR2_SEL (0x9000000ull)
>
> #define AF_BAR2_ALIASX(a, b) \
> (0x9100000ull | (uint64_t)(a) << 12 | (uint64_t)(b))
> @@ -177,6 +181,7 @@
> #define NPA_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(0, b)
> #define CPT_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
> #define RVU_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
> +#define REE_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
>
> /* Structures definitions */
>
> diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build
> index 97db5f087b..fa24f385a3 100644
> --- a/drivers/common/cnxk/meson.build
> +++ b/drivers/common/cnxk/meson.build
> @@ -59,6 +59,7 @@ sources = files(
> 'roc_tim.c',
> 'roc_tim_irq.c',
> 'roc_utils.c',
> + 'roc_ree.c',
> )
>
> # Security common code
> diff --git a/drivers/common/cnxk/roc_api.h b/drivers/common/cnxk/roc_api.h
> index b8f3667c6c..abf72077f8 100644
> --- a/drivers/common/cnxk/roc_api.h
> +++ b/drivers/common/cnxk/roc_api.h
> @@ -36,6 +36,7 @@
> #include "hw/nix.h"
> #include "hw/npa.h"
> #include "hw/npc.h"
> +#include "hw/ree.h"
> #include "hw/rvu.h"
> #include "hw/sdp.h"
> #include "hw/sso.h"
> @@ -86,6 +87,9 @@
> #include "roc_ie_ot.h"
> #include "roc_se.h"
>
> +/* REE */
> +#include "roc_ree.h"
> +
> /* HASH computation */
> #include "roc_hash.h"
>
> diff --git a/drivers/common/cnxk/roc_constants.h b/drivers/common/cnxk/roc_constants.h
> index 1e6427cf91..e1ffca31d4 100644
> --- a/drivers/common/cnxk/roc_constants.h
> +++ b/drivers/common/cnxk/roc_constants.h
> @@ -37,6 +37,8 @@
> #define PCI_DEVID_CNXK_BPHY 0xA089
> #define PCI_DEVID_CNXK_RVU_NIX_INL_PF 0xA0F0
> #define PCI_DEVID_CNXK_RVU_NIX_INL_VF 0xA0F1
> +#define PCI_DEVID_CNXK_RVU_REE_PF 0xA0f4
> +#define PCI_DEVID_CNXK_RVU_REE_VF 0xA0f5
>
> #define PCI_DEVID_CN9K_CGX 0xA059
> #define PCI_DEVID_CN10K_RPM 0xA060
> diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
> index 75d1ff1ef3..4924620084 100644
> --- a/drivers/common/cnxk/roc_mbox.h
> +++ b/drivers/common/cnxk/roc_mbox.h
> @@ -148,6 +148,16 @@ struct mbox_msghdr {
> M(CPT_GET_CAPS, 0xBFD, cpt_caps_get, msg_req, cpt_caps_rsp_msg) \
> M(CPT_GET_ENG_GRP, 0xBFF, cpt_eng_grp_get, cpt_eng_grp_req, \
> cpt_eng_grp_rsp) \
> + /* REE mbox IDs (range 0xE00 - 0xFFF) */ \
> + M(REE_CONFIG_LF, 0xE01, ree_config_lf, ree_lf_req_msg, msg_rsp) \
> + M(REE_RD_WR_REGISTER, 0xE02, ree_rd_wr_register, ree_rd_wr_reg_msg, \
> + ree_rd_wr_reg_msg) \
> + M(REE_RULE_DB_PROG, 0xE03, ree_rule_db_prog, ree_rule_db_prog_req_msg, \
> + msg_rsp) \
> + M(REE_RULE_DB_LEN_GET, 0xE04, ree_rule_db_len_get, ree_req_msg, \
> + ree_rule_db_len_rsp_msg) \
> + M(REE_RULE_DB_GET, 0xE05, ree_rule_db_get, ree_rule_db_get_req_msg, \
> + ree_rule_db_get_rsp_msg) \
> /* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
> M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, \
> msg_rsp) \
> @@ -1394,6 +1404,96 @@ struct cpt_eng_grp_rsp {
> uint8_t __io eng_grp_num;
> };
>
> +/* REE mailbox error codes
> + * Range 1001 - 1100.
> + */
> +enum ree_af_status {
> + REE_AF_ERR_RULE_UNKNOWN_VALUE = -1001,
> + REE_AF_ERR_LF_NO_MORE_RESOURCES = -1002,
> + REE_AF_ERR_LF_INVALID = -1003,
> + REE_AF_ERR_ACCESS_DENIED = -1004,
> + REE_AF_ERR_RULE_DB_PARTIAL = -1005,
> + REE_AF_ERR_RULE_DB_EQ_BAD_VALUE = -1006,
> + REE_AF_ERR_RULE_DB_BLOCK_ALLOC_FAILED = -1007,
> + REE_AF_ERR_BLOCK_NOT_IMPLEMENTED = -1008,
> + REE_AF_ERR_RULE_DB_INC_OFFSET_TOO_BIG = -1009,
> + REE_AF_ERR_RULE_DB_OFFSET_TOO_BIG = -1010,
> + REE_AF_ERR_Q_IS_GRACEFUL_DIS = -1011,
> + REE_AF_ERR_Q_NOT_GRACEFUL_DIS = -1012,
> + REE_AF_ERR_RULE_DB_ALLOC_FAILED = -1013,
> + REE_AF_ERR_RULE_DB_TOO_BIG = -1014,
> + REE_AF_ERR_RULE_DB_GEQ_BAD_VALUE = -1015,
> + REE_AF_ERR_RULE_DB_LEQ_BAD_VALUE = -1016,
> + REE_AF_ERR_RULE_DB_WRONG_LENGTH = -1017,
> + REE_AF_ERR_RULE_DB_WRONG_OFFSET = -1018,
> + REE_AF_ERR_RULE_DB_BLOCK_TOO_BIG = -1019,
> + REE_AF_ERR_RULE_DB_SHOULD_FILL_REQUEST = -1020,
> + REE_AF_ERR_RULE_DBI_ALLOC_FAILED = -1021,
> + REE_AF_ERR_LF_WRONG_PRIORITY = -1022,
> + REE_AF_ERR_LF_SIZE_TOO_BIG = -1023,
> +};
> +
> +/* REE mbox message formats */
> +
> +struct ree_req_msg {
> + struct mbox_msghdr hdr;
> + uint32_t __io blkaddr;
> +};
> +
> +struct ree_lf_req_msg {
> + struct mbox_msghdr hdr;
> + uint32_t __io blkaddr;
> + uint32_t __io size;
> + uint8_t __io lf;
> + uint8_t __io pri;
> +};
> +
> +struct ree_rule_db_prog_req_msg {
> + struct mbox_msghdr hdr;
> +#define REE_RULE_DB_REQ_BLOCK_SIZE ((64ULL * 1024ULL) >> 1)
> + uint8_t __io rule_db[REE_RULE_DB_REQ_BLOCK_SIZE];
> + uint32_t __io blkaddr; /* REE0 or REE1 */
> + uint32_t __io total_len; /* total len of rule db */
> + uint32_t __io offset; /* offset of current rule db block */
> + uint16_t __io len; /* length of rule db block */
> + uint8_t __io is_last; /* is this the last block */
> + uint8_t __io is_incremental; /* is incremental flow */
> + uint8_t __io is_dbi; /* is rule db incremental */
> +};
> +
> +struct ree_rule_db_get_req_msg {
> + struct mbox_msghdr hdr;
> + uint32_t __io blkaddr;
> + uint32_t __io offset; /* retrieve db from this offset */
> + uint8_t __io is_dbi; /* is request for rule db incremental */
> +};
> +
> +struct ree_rd_wr_reg_msg {
> + struct mbox_msghdr hdr;
> + uint64_t __io reg_offset;
> + uint64_t __io *ret_val;
> + uint64_t __io val;
> + uint32_t __io blkaddr;
> + uint8_t __io is_write;
> +};
> +
> +struct ree_rule_db_len_rsp_msg {
> + struct mbox_msghdr hdr;
> + uint32_t __io blkaddr;
> + uint32_t __io len;
> + uint32_t __io inc_len;
> +};
> +
> +struct ree_rule_db_get_rsp_msg {
> + struct mbox_msghdr hdr;
> +#define REE_RULE_DB_RSP_BLOCK_SIZE (15ULL * 1024ULL)
> + uint8_t __io rule_db[REE_RULE_DB_RSP_BLOCK_SIZE];
> + uint32_t __io total_len; /* total len of rule db */
> + uint32_t __io offset; /* offset of current rule db block */
> + uint16_t __io len; /* length of rule db block */
> + uint8_t __io is_last; /* is this the last block */
> +};
> +
> /* NPC mbox message structs */
>
> #define NPC_MCAM_ENTRY_INVALID 0xFFFF
> diff --git a/drivers/common/cnxk/roc_platform.c b/drivers/common/cnxk/roc_platform.c
> index 74dbdeceb9..ebb6225f4d 100644
> --- a/drivers/common/cnxk/roc_platform.c
> +++ b/drivers/common/cnxk/roc_platform.c
> @@ -65,3 +65,4 @@ RTE_LOG_REGISTER(cnxk_logtype_npc, pmd.net.cnxk.flow, NOTICE);
> RTE_LOG_REGISTER(cnxk_logtype_sso, pmd.event.cnxk, NOTICE);
> RTE_LOG_REGISTER(cnxk_logtype_tim, pmd.event.cnxk.timer, NOTICE);
> RTE_LOG_REGISTER(cnxk_logtype_tm, pmd.net.cnxk.tm, NOTICE);
> +RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_ree, NOTICE);
> diff --git a/drivers/common/cnxk/roc_platform.h b/drivers/common/cnxk/roc_platform.h
> index 241655b334..4f48a8e282 100644
> --- a/drivers/common/cnxk/roc_platform.h
> +++ b/drivers/common/cnxk/roc_platform.h
> @@ -152,6 +152,7 @@ extern int cnxk_logtype_npc;
> extern int cnxk_logtype_sso;
> extern int cnxk_logtype_tim;
> extern int cnxk_logtype_tm;
> +extern int cnxk_logtype_ree;
>
> #define plt_err(fmt, args...) \
> RTE_LOG(ERR, PMD, "%s():%u " fmt "\n", __func__, __LINE__, ##args)
> @@ -176,6 +177,7 @@ extern int cnxk_logtype_tm;
> #define plt_sso_dbg(fmt, ...) plt_dbg(sso, fmt, ##__VA_ARGS__)
> #define plt_tim_dbg(fmt, ...) plt_dbg(tim, fmt, ##__VA_ARGS__)
> #define plt_tm_dbg(fmt, ...) plt_dbg(tm, fmt, ##__VA_ARGS__)
> +#define plt_ree_dbg(fmt, ...) plt_dbg(ree, fmt, ##__VA_ARGS__)
>
> /* Datapath logs */
> #define plt_dp_err(fmt, args...) \
> diff --git a/drivers/common/cnxk/roc_priv.h b/drivers/common/cnxk/roc_priv.h
> index f72bbd568f..b091d519cb 100644
> --- a/drivers/common/cnxk/roc_priv.h
> +++ b/drivers/common/cnxk/roc_priv.h
> @@ -41,4 +41,7 @@
> /* NIX Inline dev */
> #include "roc_nix_inl_priv.h"
>
> +/* REE */
> +#include "roc_ree_priv.h"
> +
> #endif /* _ROC_PRIV_H_ */
> diff --git a/drivers/common/cnxk/roc_ree.c b/drivers/common/cnxk/roc_ree.c
> new file mode 100644
> index 0000000000..35ed9ab3d5
> --- /dev/null
> +++ b/drivers/common/cnxk/roc_ree.c
> @@ -0,0 +1,647 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2021 Marvell.
> + */
> +
> +#include "roc_api.h"
> +#include "roc_priv.h"
> +
> +/* This is temporarily here */
Remove this comment if it not valid.
> +#define REE0_PF 19
> +#define REE1_PF 20
> +
> +static int
> +roc_ree_available_queues_get(struct roc_ree_vf *vf, uint16_t *nb_queues)
> +{
> + struct free_rsrcs_rsp *rsp;
> + struct dev *dev = vf->dev;
> + int ret;
> +
> + mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
> +
> + ret = mbox_process_msg(dev->mbox, (void *)&rsp);
> + if (ret)
> + return -EIO;
> +
> + if (vf->block_address == RVU_BLOCK_ADDR_REE0)
> + *nb_queues = rsp->ree0;
> + else
> + *nb_queues = rsp->ree1;
> + return 0;
> +}
> +
> +static int
> +roc_ree_max_matches_get(struct roc_ree_vf *vf, uint8_t *max_matches)
> +{
> + uint64_t val;
> + int ret;
> +
> + ret = roc_ree_af_reg_read(vf, REE_AF_REEXM_MAX_MATCH, &val);
> + if (ret)
> + return ret;
> +
> + *max_matches = val;
> + return 0;
> +}
> +
> +int
> +roc_ree_queues_attach(struct roc_ree_vf *vf, uint8_t nb_queues)
> +{
> + struct rsrc_attach_req *req;
> + struct mbox *mbox;
> +
> + mbox = vf->dev->mbox;
> + /* Ask AF to attach required LFs */
> + req = mbox_alloc_msg_attach_resources(mbox);
> + if (req == NULL) {
> + plt_err("Could not allocate mailbox message");
> + return -EFAULT;
> + }
> +
> + /* 1 LF = 1 queue */
> + req->reelfs = nb_queues;
> + req->ree_blkaddr = vf->block_address;
> +
> + if (mbox_process(mbox) < 0)
> + return -EIO;
> +
> + /* Update number of attached queues */
> + vf->nb_queues = nb_queues;
> +
> + return 0;
> +}
> +
> +int
> +roc_ree_queues_detach(struct roc_ree_vf *vf)
> +{
> + struct rsrc_detach_req *req;
> + struct mbox *mbox;
> +
> + mbox = vf->dev->mbox;
> + req = mbox_alloc_msg_detach_resources(mbox);
> + if (req == NULL) {
> + plt_err("Could not allocate mailbox message");
> + return -EFAULT;
> + }
> + req->reelfs = true;
> + req->partial = true;
> + if (mbox_process(mbox) < 0)
> + return -EIO;
> +
> + /* Queues have been detached */
> + vf->nb_queues = 0;
> +
> + return 0;
> +}
> +
> +int
> +roc_ree_msix_offsets_get(struct roc_ree_vf *vf)
> +{
> + struct msix_offset_rsp *rsp;
> + struct mbox *mbox;
> + uint32_t i, ret;
> +
> + /* Get REE MSI-X vector offsets */
> + mbox = vf->dev->mbox;
> + mbox_alloc_msg_msix_offset(mbox);
> +
> + ret = mbox_process_msg(mbox, (void *)&rsp);
> + if (ret)
> + return ret;
> +
> + for (i = 0; i < vf->nb_queues; i++) {
> + if (vf->block_address == RVU_BLOCK_ADDR_REE0)
> + vf->lf_msixoff[i] = rsp->ree0_lf_msixoff[i];
> + else
> + vf->lf_msixoff[i] = rsp->ree1_lf_msixoff[i];
> + plt_ree_dbg("lf_msixoff[%d] 0x%x", i, vf->lf_msixoff[i]);
> + }
> +
> + return 0;
> +}
> +
> +static int
> +ree_send_mbox_msg(struct roc_ree_vf *vf)
> +{
> + struct mbox *mbox = vf->dev->mbox;
> + int ret;
> +
> + mbox_msg_send(mbox, 0);
> +
> + ret = mbox_wait_for_rsp(mbox, 0);
> + if (ret < 0) {
> + plt_err("Could not get mailbox response");
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> +int
> +roc_ree_config_lf(struct roc_ree_vf *vf, uint8_t lf, uint8_t pri, uint32_t size)
> +{
> + struct ree_lf_req_msg *req;
> + struct mbox *mbox;
> + int ret;
> +
> + mbox = vf->dev->mbox;
> + req = mbox_alloc_msg_ree_config_lf(mbox);
> + if (req == NULL) {
> + plt_err("Could not allocate mailbox message");
> + return -EFAULT;
> + }
> +
> + req->lf = lf;
> + req->pri = pri ? 1 : 0;
> + req->size = size;
> + req->blkaddr = vf->block_address;
> +
> + ret = mbox_process(mbox);
> + if (ret < 0) {
> + plt_err("Could not get mailbox response");
> + return ret;
> + }
> + return 0;
> +}
> +
> +int
> +roc_ree_af_reg_read(struct roc_ree_vf *vf, uint64_t reg, uint64_t *val)
> +{
> + struct ree_rd_wr_reg_msg *msg;
> + struct mbox_dev *mdev;
> + struct mbox *mbox;
> + int ret, off;
> +
> + mbox = vf->dev->mbox;
> + mdev = &mbox->dev[0];
> + msg = (struct ree_rd_wr_reg_msg *)mbox_alloc_msg_rsp(
> + mbox, 0, sizeof(*msg), sizeof(*msg));
> + if (msg == NULL) {
> + plt_err("Could not allocate mailbox message");
> + return -EFAULT;
> + }
> +
> + msg->hdr.id = MBOX_MSG_REE_RD_WR_REGISTER;
> + msg->hdr.sig = MBOX_REQ_SIG;
> + msg->hdr.pcifunc = vf->dev->pf_func;
> + msg->is_write = 0;
> + msg->reg_offset = reg;
> + msg->ret_val = val;
> + msg->blkaddr = vf->block_address;
> +
> + ret = ree_send_mbox_msg(vf);
> + if (ret < 0)
> + return ret;
> +
> + off = mbox->rx_start +
> + RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
> + msg = (struct ree_rd_wr_reg_msg *)((uintptr_t)mdev->mbase + off);
> +
> + *val = msg->val;
> +
> + return 0;
> +}
> +
> +int
> +roc_ree_af_reg_write(struct roc_ree_vf *vf, uint64_t reg, uint64_t val)
> +{
> + struct ree_rd_wr_reg_msg *msg;
> + struct mbox *mbox;
> +
> + mbox = vf->dev->mbox;
> + msg = (struct ree_rd_wr_reg_msg *)mbox_alloc_msg_rsp(
> + mbox, 0, sizeof(*msg), sizeof(*msg));
> + if (msg == NULL) {
> + plt_err("Could not allocate mailbox message");
> + return -EFAULT;
> + }
> +
> + msg->hdr.id = MBOX_MSG_REE_RD_WR_REGISTER;
> + msg->hdr.sig = MBOX_REQ_SIG;
> + msg->hdr.pcifunc = vf->dev->pf_func;
> + msg->is_write = 1;
> + msg->reg_offset = reg;
> + msg->val = val;
> + msg->blkaddr = vf->block_address;
> +
> + return ree_send_mbox_msg(vf);
> +}
> +
> +int
> +roc_ree_rule_db_get(struct roc_ree_vf *vf, char *rule_db, uint32_t rule_db_len,
> + char *rule_dbi, uint32_t rule_dbi_len)
> +{
> + struct ree_rule_db_get_req_msg *req;
> + struct ree_rule_db_get_rsp_msg *rsp;
> + char *rule_db_ptr = (char *)rule_db;
> + struct mbox *mbox;
> + int ret, last = 0;
> + uint32_t len = 0;
> +
> + mbox = vf->dev->mbox;
> + if (!rule_db) {
> + plt_err("Couldn't return rule db due to NULL pointer");
> + return -EFAULT;
> + }
> +
> + while (!last) {
> + req = (struct ree_rule_db_get_req_msg *)mbox_alloc_msg_rsp(
> + mbox, 0, sizeof(*req), sizeof(*rsp));
> + if (!req) {
> + plt_err("Could not allocate mailbox message");
> + return -EFAULT;
> + }
> +
> + req->hdr.id = MBOX_MSG_REE_RULE_DB_GET;
> + req->hdr.sig = MBOX_REQ_SIG;
> + req->hdr.pcifunc = vf->dev->pf_func;
> + req->blkaddr = vf->block_address;
> + req->is_dbi = 0;
> + req->offset = len;
> + ret = mbox_process_msg(mbox, (void *)&rsp);
> + if (ret)
> + return ret;
> + if (rule_db_len < len + rsp->len) {
> + plt_err("Rule db size is too small");
> + return -EFAULT;
> + }
> + mbox_memcpy(rule_db_ptr, rsp->rule_db, rsp->len);
> + len += rsp->len;
> + rule_db_ptr = rule_db_ptr + rsp->len;
> + last = rsp->is_last;
> + }
> +
> + if (rule_dbi) {
> + req = (struct ree_rule_db_get_req_msg *)mbox_alloc_msg_rsp(
> + mbox, 0, sizeof(*req), sizeof(*rsp));
> + if (!req) {
> + plt_err("Could not allocate mailbox message");
> + return -EFAULT;
> + }
> +
> + req->hdr.id = MBOX_MSG_REE_RULE_DB_GET;
> + req->hdr.sig = MBOX_REQ_SIG;
> + req->hdr.pcifunc = vf->dev->pf_func;
> + req->blkaddr = vf->block_address;
> + req->is_dbi = 1;
> + req->offset = 0;
> +
> + ret = mbox_process_msg(mbox, (void *)&rsp);
> + if (ret)
> + return ret;
> + if (rule_dbi_len < rsp->len) {
> + plt_err("Rule dbi size is too small");
> + return -EFAULT;
> + }
> + mbox_memcpy(rule_dbi, rsp->rule_db, rsp->len);
> + }
> + return 0;
> +}
> +
> +int
> +roc_ree_rule_db_len_get(struct roc_ree_vf *vf, uint32_t *rule_db_len,
> + uint32_t *rule_dbi_len)
> +{
> + struct ree_rule_db_len_rsp_msg *rsp;
> + struct ree_req_msg *req;
> + struct mbox *mbox;
> + int ret;
> +
> + mbox = vf->dev->mbox;
> + req = (struct ree_req_msg *)mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
> + sizeof(*rsp));
> + if (!req) {
> + plt_err("Could not allocate mailbox message");
> + return -EFAULT;
> + }
> +
> + req->hdr.id = MBOX_MSG_REE_RULE_DB_LEN_GET;
> + req->hdr.sig = MBOX_REQ_SIG;
> + req->hdr.pcifunc = vf->dev->pf_func;
> + req->blkaddr = vf->block_address;
> + ret = mbox_process_msg(mbox, (void *)&rsp);
> + if (ret)
> + return ret;
> + if (rule_db_len != NULL)
> + *rule_db_len = rsp->len;
> + if (rule_dbi_len != NULL)
> + *rule_dbi_len = rsp->inc_len;
> +
> + return 0;
> +}
> +
> +static int
> +ree_db_msg(struct roc_ree_vf *vf, const char *db, uint32_t db_len, int inc,
> + int dbi)
> +{
> + uint32_t len_left = db_len, offset = 0;
> + struct ree_rule_db_prog_req_msg *req;
> + const char *rule_db_ptr = db;
> + struct mbox *mbox;
> + struct msg_rsp *rsp;
> + int ret;
> +
> + mbox = vf->dev->mbox;
> + while (len_left) {
> + req = (struct ree_rule_db_prog_req_msg *)mbox_alloc_msg_rsp(
> + mbox, 0, sizeof(*req), sizeof(*rsp));
> + if (!req) {
> + plt_err("Could not allocate mailbox message");
> + return -EFAULT;
> + }
> + req->hdr.id = MBOX_MSG_REE_RULE_DB_PROG;
> + req->hdr.sig = MBOX_REQ_SIG;
> + req->hdr.pcifunc = vf->dev->pf_func;
> + req->offset = offset;
> + req->total_len = db_len;
> + req->len = REE_RULE_DB_REQ_BLOCK_SIZE;
> + req->is_incremental = inc;
> + req->is_dbi = dbi;
> + req->blkaddr = vf->block_address;
> +
> + if (len_left < REE_RULE_DB_REQ_BLOCK_SIZE) {
> + req->is_last = true;
> + req->len = len_left;
> + }
> + mbox_memcpy(req->rule_db, rule_db_ptr, req->len);
> + ret = mbox_process_msg(mbox, (void *)&rsp);
> + if (ret) {
> + plt_err("Programming mailbox processing failed");
> + return ret;
> + }
> + len_left -= req->len;
> + offset += req->len;
> + rule_db_ptr = rule_db_ptr + req->len;
> + }
> + return 0;
> +}
> +
> +int
> +roc_ree_rule_db_prog(struct roc_ree_vf *vf, const char *rule_db,
> + uint32_t rule_db_len, const char *rule_dbi,
> + uint32_t rule_dbi_len)
> +{
> + int inc, ret;
> +
> + if (rule_db_len == 0) {
> + plt_err("Couldn't program empty rule db");
> + return -EFAULT;
> + }
> + inc = (rule_dbi_len != 0);
> + if ((rule_db == NULL) || (inc && (rule_dbi == NULL))) {
> + plt_err("Couldn't program NULL rule db");
> + return -EFAULT;
> + }
> + if (inc) {
> + ret = ree_db_msg(vf, rule_dbi, rule_dbi_len, inc, 1);
> + if (ret)
> + return ret;
> + }
> + return ree_db_msg(vf, rule_db, rule_db_len, inc, 0);
> +}
> +
> +static int
> +ree_get_blkaddr(struct dev *dev)
> +{
> + int pf;
> +
> + pf = dev_get_pf(dev->pf_func);
> + if (pf == REE0_PF)
> + return RVU_BLOCK_ADDR_REE0;
> + else if (pf == REE1_PF)
> + return RVU_BLOCK_ADDR_REE1;
> + else
> + return 0;
> +}
> +
> +uintptr_t
> +roc_ree_qp_get_base(struct roc_ree_vf *vf, uint16_t qp_id)
> +{
> + return REE_LF_BAR2(vf, qp_id);
> +}
> +
> +static void
> +roc_ree_lf_err_intr_handler(void *param)
> +{
> + uintptr_t base = (uintptr_t)param;
> + uint8_t lf_id;
> + uint64_t intr;
> +
> + lf_id = (base >> 12) & 0xFF;
> +
> + intr = plt_read64(base + REE_LF_MISC_INT);
> + if (intr == 0)
> + return;
> +
> + plt_ree_dbg("LF %d MISC_INT: 0x%" PRIx64 "", lf_id, intr);
> +
> + /* Clear interrupt */
> + plt_write64(intr, base + REE_LF_MISC_INT);
> +}
> +
> +static void
> +roc_ree_lf_err_intr_unregister(struct roc_ree_vf *vf, uint16_t msix_off,
> + uintptr_t base)
> +{
> + struct rte_pci_device *pci_dev = vf->pci_dev;
> + struct rte_intr_handle *handle = &pci_dev->intr_handle;
> +
> + /* Disable error interrupts */
> + plt_write64(~0ull, base + REE_LF_MISC_INT_ENA_W1C);
> +
> + dev_irq_unregister(handle, roc_ree_lf_err_intr_handler, (void *)base,
> + msix_off);
> +}
> +
> +void
> +roc_ree_err_intr_unregister(struct roc_ree_vf *vf)
> +{
> + uintptr_t base;
> + uint32_t i;
> +
> + for (i = 0; i < vf->nb_queues; i++) {
> + base = REE_LF_BAR2(vf, i);
> + roc_ree_lf_err_intr_unregister(vf, vf->lf_msixoff[i], base);
> + }
> +
> + vf->err_intr_registered = 0;
> +}
> +
> +static int
> +roc_ree_lf_err_intr_register(struct roc_ree_vf *vf, uint16_t msix_off,
> + uintptr_t base)
> +{
> + struct rte_pci_device *pci_dev = vf->pci_dev;
> + struct rte_intr_handle *handle = &pci_dev->intr_handle;
> + int ret;
> +
> + /* Disable error interrupts */
> + plt_write64(~0ull, base + REE_LF_MISC_INT_ENA_W1C);
> +
> + /* Register error interrupt handler */
> + ret = dev_irq_register(handle, roc_ree_lf_err_intr_handler,
> + (void *)base, msix_off);
> + if (ret)
> + return ret;
> +
> + /* Enable error interrupts */
> + plt_write64(~0ull, base + REE_LF_MISC_INT_ENA_W1S);
> +
> + return 0;
> +}
> +
> +int
> +roc_ree_err_intr_register(struct roc_ree_vf *vf)
> +{
> + uint32_t i, j, ret;
> + uintptr_t base;
> +
> + for (i = 0; i < vf->nb_queues; i++) {
> + if (vf->lf_msixoff[i] == MSIX_VECTOR_INVALID) {
> + plt_err("Invalid REE LF MSI-X offset: 0x%x",
> + vf->lf_msixoff[i]);
> + return -EINVAL;
> + }
> + }
> +
> + for (i = 0; i < vf->nb_queues; i++) {
> + base = REE_LF_BAR2(vf, i);
> + ret = roc_ree_lf_err_intr_register(vf, vf->lf_msixoff[i], base);
> + if (ret)
> + goto intr_unregister;
> + }
> +
> + vf->err_intr_registered = 1;
> + return 0;
> +
> +intr_unregister:
> + /* Unregister the ones already registered */
> + for (j = 0; j < i; j++) {
> + base = REE_LF_BAR2(vf, j);
> + roc_ree_lf_err_intr_unregister(vf, vf->lf_msixoff[j], base);
> + }
> + return ret;
> +}
> +
> +int
> +roc_ree_iq_enable(struct roc_ree_vf *vf, const struct roc_ree_qp *qp,
> + uint8_t pri, uint32_t size_div2)
> +{
> + union ree_lf_sbuf_addr base;
> + union ree_lf_ena lf_ena;
> +
> + /* Set instruction queue size and priority */
> + roc_ree_config_lf(vf, qp->id, pri, size_div2);
> +
> + /* Set instruction queue base address */
> + /* Should be written after SBUF_CTL and before LF_ENA */
> +
> + base.u = plt_read64(qp->base + REE_LF_SBUF_ADDR);
> + base.s.ptr = qp->iq_dma_addr >> 7;
> + plt_write64(base.u, qp->base + REE_LF_SBUF_ADDR);
> +
> + /* Enable instruction queue */
> +
> + lf_ena.u = plt_read64(qp->base + REE_LF_ENA);
> + lf_ena.s.ena = 1;
> + plt_write64(lf_ena.u, qp->base + REE_LF_ENA);
> +
> + return 0;
> +}
> +
> +void
> +roc_ree_iq_disable(struct roc_ree_qp *qp)
> +{
> + union ree_lf_ena lf_ena;
> +
> + /* Stop instruction execution */
> + lf_ena.u = plt_read64(qp->base + REE_LF_ENA);
> + lf_ena.s.ena = 0x0;
> + plt_write64(lf_ena.u, qp->base + REE_LF_ENA);
> +}
> +
> +int
> +roc_ree_dev_init(struct roc_ree_vf *vf)
> +{
> + struct plt_pci_device *pci_dev;
> + struct ree *ree;
> + struct dev *dev;
> + uint8_t max_matches = 0;
> + uint16_t nb_queues = 0;
> + int rc;
> +
> + if (vf == NULL || vf->pci_dev == NULL)
> + return -EINVAL;
> +
> + PLT_STATIC_ASSERT(sizeof(struct ree) <= ROC_REE_MEM_SZ);
> +
> + ree = roc_ree_to_ree_priv(vf);
> + memset(ree, 0, sizeof(*ree));
> + vf->dev = &ree->dev;
> +
> + pci_dev = vf->pci_dev;
> + dev = vf->dev;
> +
> + /* Initialize device */
> + rc = dev_init(dev, pci_dev);
> + if (rc) {
> + plt_err("Failed to init roc device");
> + goto fail;
> + }
> +
> + /* Get REE block address */
> + vf->block_address = ree_get_blkaddr(dev);
> + if (!vf->block_address) {
> + plt_err("Could not determine block PF number");
> + goto fail;
> + }
> +
> + /* Get number of queues available on the device */
> + rc = roc_ree_available_queues_get(vf, &nb_queues);
> + if (rc) {
> + plt_err("Could not determine the number of queues available");
> + goto fail;
> + }
> +
> + /* Don't exceed the limits set per VF */
> + nb_queues = RTE_MIN(nb_queues, REE_MAX_QUEUES_PER_VF);
> +
> + if (nb_queues == 0) {
> + plt_err("No free queues available on the device");
> + goto fail;
> + }
> +
> + vf->max_queues = nb_queues;
> +
> + plt_ree_dbg("Max queues supported by device: %d", vf->max_queues);
> +
> + /* Get number of maximum matches supported on the device */
> + rc = roc_ree_max_matches_get(vf, &max_matches);
> + if (rc) {
> + plt_err("Could not determine the maximum matches supported");
> + goto fail;
> + }
> + /* Don't exceed the limits set per VF */
> + max_matches = RTE_MIN(max_matches, REE_MAX_MATCHES_PER_VF);
> + if (max_matches == 0) {
> + plt_err("Could not determine the maximum matches supported");
> + goto fail;
> + }
> +
> + vf->max_matches = max_matches;
> +
> + plt_ree_dbg("Max matches supported by device: %d", vf->max_matches);
> +fail:
> + return rc;
> +}
> +
> +int
> +roc_ree_dev_fini(struct roc_ree_vf *vf)
> +{
> + if (vf == NULL)
> + return -EINVAL;
> +
> + vf->max_matches = 0;
> + vf->max_queues = 0;
> +
> + return dev_fini(vf->dev, vf->pci_dev);
> +}
> diff --git a/drivers/common/cnxk/roc_ree.h b/drivers/common/cnxk/roc_ree.h
> new file mode 100644
> index 0000000000..e138e4de66
> --- /dev/null
> +++ b/drivers/common/cnxk/roc_ree.h
> @@ -0,0 +1,137 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2021 Marvell.
> + */
> +
> +#ifndef _ROC_REE_H_
> +#define _ROC_REE_H_
> +
> +#include "roc_api.h"
> +
> +#define REE_MAX_LFS 36
> +#define REE_MAX_QUEUES_PER_VF 36
> +#define REE_MAX_MATCHES_PER_VF 254
> +
> +#define REE_MAX_PAYLOAD_SIZE (1 << 14)
> +
> +#define REE_NON_INC_PROG 0
> +#define REE_INC_PROG 1
> +
> +#define REE_MOD_INC(i, l) ((i) == (l - 1) ? (i) = 0 : (i)++)
> +
> +/**
> + * Device vf data
> + */
> +struct roc_ree_vf {
> + struct plt_pci_device *pci_dev;
> + struct dev *dev;
> + /**< Base class */
> + uint16_t max_queues;
> + /**< Max queues supported */
> + uint8_t nb_queues;
> + /**< Number of regex queues attached */
> + uint16_t max_matches;
> + /**< Max matches supported*/
> + uint16_t lf_msixoff[REE_MAX_LFS];
> + /**< MSI-X offsets */
> + uint8_t block_address;
> + /**< REE Block Address */
> + uint8_t err_intr_registered : 1;
> + /**< Are error interrupts registered? */
> +
> +#define ROC_REE_MEM_SZ (6 * 1024)
> + uint8_t reserved[ROC_REE_MEM_SZ] __plt_cache_aligned;
> +} __plt_cache_aligned;
> +
> +struct roc_ree_rid {
> + uintptr_t rid;
> + /** Request id of a ree operation */
> + uint64_t user_id;
> + /* Client data */
> + /**< IOVA address of the pattern to be matched. */
> +};
> +
> +struct roc_ree_pending_queue {
> + uint64_t pending_count;
> + /** Pending requests count */
> + struct roc_ree_rid *rid_queue;
> + /** Array of pending requests */
> + uint16_t enq_tail;
> + /** Tail of queue to be used for enqueue */
> + uint16_t deq_head;
> + /** Head of queue to be used for dequeue */
> +};
> +
> +struct roc_ree_qp {
> + uint32_t id;
> + /**< Queue pair id */
> + uintptr_t base;
> + /**< Base address where BAR is mapped */
> + struct roc_ree_pending_queue pend_q;
> + /**< Pending queue */
> + rte_iova_t iq_dma_addr;
> + /**< Instruction queue address */
> + uint32_t roc_regexdev_jobid;
> + /**< Job ID */
> + uint32_t write_offset;
> + /**< write offset */
> +};
> +
> +union roc_ree_inst {
> + uint64_t u[8];
> + struct {
> + uint64_t doneint : 1;
> + uint64_t reserved_1_3 : 3;
> + uint64_t dg : 1;
> + uint64_t reserved_5_7 : 3;
> + uint64_t ooj : 1;
> + uint64_t reserved_9_15 : 7;
> + uint64_t reserved_16_63 : 48;
> + uint64_t inp_ptr_addr : 64;
> + uint64_t inp_ptr_ctl : 64;
> + uint64_t res_ptr_addr : 64;
> + uint64_t wq_ptr : 64;
> + uint64_t tag : 32;
> + uint64_t tt : 2;
> + uint64_t ggrp : 10;
> + uint64_t reserved_364_383 : 20;
> + uint64_t reserved_384_391 : 8;
> + uint64_t ree_job_id : 24;
> + uint64_t ree_job_ctrl : 16;
> + uint64_t ree_job_length : 15;
> + uint64_t reserved_447_447 : 1;
> + uint64_t ree_job_subset_id_0 : 16;
> + uint64_t ree_job_subset_id_1 : 16;
> + uint64_t ree_job_subset_id_2 : 16;
> + uint64_t ree_job_subset_id_3 : 16;
> + } cn98xx;
> +};
> +
> +int __roc_api roc_ree_dev_init(struct roc_ree_vf *vf);
> +int __roc_api roc_ree_dev_fini(struct roc_ree_vf *vf);
> +int __roc_api roc_ree_queues_attach(struct roc_ree_vf *vf, uint8_t nb_queues);
> +int __roc_api roc_ree_queues_detach(struct roc_ree_vf *vf);
> +int __roc_api roc_ree_msix_offsets_get(struct roc_ree_vf *vf);
> +int __roc_api roc_ree_config_lf(struct roc_ree_vf *vf, uint8_t lf, uint8_t pri,
> + uint32_t size);
> +int __roc_api roc_ree_af_reg_read(struct roc_ree_vf *vf, uint64_t reg,
> + uint64_t *val);
> +int __roc_api roc_ree_af_reg_write(struct roc_ree_vf *vf, uint64_t reg,
> + uint64_t val);
> +int __roc_api roc_ree_rule_db_get(struct roc_ree_vf *vf, char *rule_db,
> + uint32_t rule_db_len, char *rule_dbi,
> + uint32_t rule_dbi_len);
> +int __roc_api roc_ree_rule_db_len_get(struct roc_ree_vf *vf,
> + uint32_t *rule_db_len,
> + uint32_t *rule_dbi_len);
> +int __roc_api roc_ree_rule_db_prog(struct roc_ree_vf *vf, const char *rule_db,
> + uint32_t rule_db_len, const char *rule_dbi,
> + uint32_t rule_dbi_len);
> +uintptr_t __roc_api roc_ree_qp_get_base(struct roc_ree_vf *vf, uint16_t qp_id);
> +void __roc_api roc_ree_err_intr_unregister(struct roc_ree_vf *vf);
> +int __roc_api roc_ree_err_intr_register(struct roc_ree_vf *vf);
> +int __roc_api roc_ree_iq_enable(struct roc_ree_vf *vf,
> + const struct roc_ree_qp *qp, uint8_t pri,
> + uint32_t size_div128);
> +void __roc_api roc_ree_iq_disable(struct roc_ree_qp *qp);
> +
> +#endif /* _ROC_REE_H_ */
> diff --git a/drivers/common/cnxk/roc_ree_priv.h b/drivers/common/cnxk/roc_ree_priv.h
> new file mode 100644
> index 0000000000..c39f7cf986
> --- /dev/null
> +++ b/drivers/common/cnxk/roc_ree_priv.h
> @@ -0,0 +1,18 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2021 Marvell.
> + */
> +
> +#ifndef _ROC_REE_PRIV_H_
> +#define _ROC_REE_PRIV_H_
> +
> +struct ree {
> + struct dev dev;
> +} __plt_cache_aligned;
> +
> +static inline struct ree *
> +roc_ree_to_ree_priv(struct roc_ree_vf *roc_ree)
> +{
> + return (struct ree *)&roc_ree->reserved[0];
> +}
> +
> +#endif /* _ROC_REE_PRIV_H_ */
> diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
> index 926d5c2167..e1e96d41d0 100644
> --- a/drivers/common/cnxk/version.map
> +++ b/drivers/common/cnxk/version.map
> @@ -11,6 +11,7 @@ INTERNAL {
> cnxk_logtype_nix;
> cnxk_logtype_npa;
> cnxk_logtype_npc;
> + cnxk_logtype_ree;
> cnxk_logtype_sso;
> cnxk_logtype_tim;
> cnxk_logtype_tm;
> @@ -314,6 +315,21 @@ INTERNAL {
> roc_tim_lf_enable;
> roc_tim_lf_free;
> roc_se_ctx_swap;
> -
> + roc_ree_af_reg_read;
> + roc_ree_af_reg_write;
> + roc_ree_config_lf;
> + roc_ree_dev_fini;
> + roc_ree_dev_init;
> + roc_ree_err_intr_register;
> + roc_ree_err_intr_unregister;
> + roc_ree_iq_disable;
> + roc_ree_iq_enable;
> + roc_ree_msix_offsets_get;
> + roc_ree_qp_get_base;
> + roc_ree_queues_attach;
> + roc_ree_queues_detach;
> + roc_ree_rule_db_get;
> + roc_ree_rule_db_len_get;
> + roc_ree_rule_db_prog;
> local: *;
> };
> --
> 2.28.0
>
new file mode 100644
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef __REE_HW_H__
+#define __REE_HW_H__
+
+/* REE instruction queue length */
+#define REE_IQ_LEN (1 << 13)
+
+#define REE_DEFAULT_CMD_QLEN REE_IQ_LEN
+
+/* Status register bits */
+#define REE_STATUS_PMI_EOJ_BIT (1 << 14)
+#define REE_STATUS_PMI_SOJ_BIT (1 << 13)
+#define REE_STATUS_MP_CNT_DET_BIT (1 << 7)
+#define REE_STATUS_MM_CNT_DET_BIT (1 << 6)
+#define REE_STATUS_ML_CNT_DET_BIT (1 << 5)
+#define REE_STATUS_MST_CNT_DET_BIT (1 << 4)
+#define REE_STATUS_MPT_CNT_DET_BIT (1 << 3)
+
+/* Register offsets */
+/* REE LF registers */
+#define REE_LF_DONE_INT 0x120ull
+#define REE_LF_DONE_INT_W1S 0x130ull
+#define REE_LF_DONE_INT_ENA_W1S 0x138ull
+#define REE_LF_DONE_INT_ENA_W1C 0x140ull
+#define REE_LF_MISC_INT 0x300ull
+#define REE_LF_MISC_INT_W1S 0x310ull
+#define REE_LF_MISC_INT_ENA_W1S 0x320ull
+#define REE_LF_MISC_INT_ENA_W1C 0x330ull
+#define REE_LF_ENA 0x10ull
+#define REE_LF_SBUF_ADDR 0x20ull
+#define REE_LF_DONE 0x100ull
+#define REE_LF_DONE_ACK 0x110ull
+#define REE_LF_DONE_WAIT 0x148ull
+#define REE_LF_DOORBELL 0x400ull
+#define REE_LF_OUTSTAND_JOB 0x410ull
+
+/* BAR 0 */
+#define REE_AF_REEXM_MAX_MATCH (0x80c8ull)
+#define REE_AF_QUE_SBUF_CTL(a) (0x1200ull | (uint64_t)(a) << 3)
+#define REE_PRIV_LF_CFG(a) (0x41000ull | (uint64_t)(a) << 3)
+
+#define REE_AF_QUEX_GMCTL(a) (0x800 | (a) << 3)
+
+#define REE_AF_INT_VEC_RAS (0x0ull)
+#define REE_AF_INT_VEC_RVU (0x1ull)
+#define REE_AF_INT_VEC_QUE_DONE (0x2ull)
+#define REE_AF_INT_VEC_AQ (0x3ull)
+
+/* ENUMS */
+
+#define REE_LF_INT_VEC_QUE_DONE (0x0ull)
+#define REE_LF_INT_VEC_MISC (0x1ull)
+
+#define REE_LF_BAR2(vf, q_id) \
+ ((vf)->dev->bar2 + (((vf)->block_address << 20) | ((q_id) << 12)))
+
+#define REE_QUEUE_HI_PRIO 0x1
+
+enum ree_desc_type_e {
+ REE_TYPE_JOB_DESC = 0x0,
+ REE_TYPE_RESULT_DESC = 0x1,
+ REE_TYPE_ENUM_LAST = 0x2
+};
+
+union ree_priv_lf_cfg {
+ uint64_t u;
+ struct {
+ uint64_t slot : 8;
+ uint64_t pf_func : 16;
+ uint64_t reserved_24_62 : 39;
+ uint64_t ena : 1;
+ } s;
+};
+
+union ree_lf_sbuf_addr {
+ uint64_t u;
+ struct {
+ uint64_t off : 7;
+ uint64_t ptr : 46;
+ uint64_t reserved_53_63 : 11;
+ } s;
+};
+
+union ree_lf_ena {
+ uint64_t u;
+ struct {
+ uint64_t ena : 1;
+ uint64_t reserved_1_63 : 63;
+ } s;
+};
+
+union ree_af_reexm_max_match {
+ uint64_t u;
+ struct {
+ uint64_t max : 8;
+ uint64_t reserved_8_63 : 56;
+ } s;
+};
+
+union ree_lf_done {
+ uint64_t u;
+ struct {
+ uint64_t done : 20;
+ uint64_t reserved_20_63 : 44;
+ } s;
+};
+
+union ree_res_status {
+ uint64_t u;
+ struct {
+ uint64_t job_type : 3;
+ uint64_t mpt_cnt_det : 1;
+ uint64_t mst_cnt_det : 1;
+ uint64_t ml_cnt_det : 1;
+ uint64_t mm_cnt_det : 1;
+ uint64_t mp_cnt_det : 1;
+ uint64_t mode : 2;
+ uint64_t reserved_10_11 : 2;
+ uint64_t reserved_12_12 : 1;
+ uint64_t pmi_soj : 1;
+ uint64_t pmi_eoj : 1;
+ uint64_t reserved_15_15 : 1;
+ uint64_t reserved_16_63 : 48;
+ } s;
+};
+
+union ree_res {
+ uint64_t u[8];
+ struct ree_res_s_98 {
+ uint64_t done : 1;
+ uint64_t hwjid : 7;
+ uint64_t ree_res_job_id : 24;
+ uint64_t ree_res_status : 16;
+ uint64_t ree_res_dmcnt : 8;
+ uint64_t ree_res_mcnt : 8;
+ uint64_t ree_meta_ptcnt : 16;
+ uint64_t ree_meta_icnt : 16;
+ uint64_t ree_meta_lcnt : 16;
+ uint64_t ree_pmi_min_byte_ptr : 16;
+ uint64_t ree_err : 1;
+ uint64_t reserved_129_190 : 62;
+ uint64_t doneint : 1;
+ uint64_t reserved_192_255 : 64;
+ uint64_t reserved_256_319 : 64;
+ uint64_t reserved_320_383 : 64;
+ uint64_t reserved_384_447 : 64;
+ uint64_t reserved_448_511 : 64;
+ } s;
+};
+
+union ree_match {
+ uint64_t u;
+ struct {
+ uint64_t ree_rule_id : 32;
+ uint64_t start_ptr : 14;
+ uint64_t reserved_46_47 : 2;
+ uint64_t match_length : 15;
+ uint64_t reserved_63_6 : 1;
+ } s;
+};
+
+#endif /* __REE_HW_H__ */
@@ -130,6 +130,7 @@
#define RVU_BLOCK_TYPE_RAD (0xdull)
#define RVU_BLOCK_TYPE_DFA (0xeull)
#define RVU_BLOCK_TYPE_HNA (0xfull)
+#define RVU_BLOCK_TYPE_REE (0xeull)
#define RVU_BLOCK_ADDR_RVUM (0x0ull)
#define RVU_BLOCK_ADDR_LMT (0x1ull)
@@ -147,6 +148,8 @@
#define RVU_BLOCK_ADDR_NDC2 (0xeull)
#define RVU_BLOCK_ADDR_R_END (0x1full)
#define RVU_BLOCK_ADDR_R_START (0x14ull)
+#define RVU_BLOCK_ADDR_REE0 (0x14ull)
+#define RVU_BLOCK_ADDR_REE1 (0x15ull)
#define RVU_VF_INT_VEC_MBOX (0x0ull)
@@ -167,6 +170,7 @@
#define NPA_AF_BAR2_SEL (0x9000000ull)
#define CPT_AF_BAR2_SEL (0x9000000ull)
#define RVU_AF_BAR2_SEL (0x9000000ull)
+#define REE_AF_BAR2_SEL (0x9000000ull)
#define AF_BAR2_ALIASX(a, b) \
(0x9100000ull | (uint64_t)(a) << 12 | (uint64_t)(b))
@@ -177,6 +181,7 @@
#define NPA_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(0, b)
#define CPT_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
#define RVU_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
+#define REE_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
/* Structures definitions */
@@ -59,6 +59,7 @@ sources = files(
'roc_tim.c',
'roc_tim_irq.c',
'roc_utils.c',
+ 'roc_ree.c',
)
# Security common code
@@ -36,6 +36,7 @@
#include "hw/nix.h"
#include "hw/npa.h"
#include "hw/npc.h"
+#include "hw/ree.h"
#include "hw/rvu.h"
#include "hw/sdp.h"
#include "hw/sso.h"
@@ -86,6 +87,9 @@
#include "roc_ie_ot.h"
#include "roc_se.h"
+/* REE */
+#include "roc_ree.h"
+
/* HASH computation */
#include "roc_hash.h"
@@ -37,6 +37,8 @@
#define PCI_DEVID_CNXK_BPHY 0xA089
#define PCI_DEVID_CNXK_RVU_NIX_INL_PF 0xA0F0
#define PCI_DEVID_CNXK_RVU_NIX_INL_VF 0xA0F1
+#define PCI_DEVID_CNXK_RVU_REE_PF 0xA0f4
+#define PCI_DEVID_CNXK_RVU_REE_VF 0xA0f5
#define PCI_DEVID_CN9K_CGX 0xA059
#define PCI_DEVID_CN10K_RPM 0xA060
@@ -148,6 +148,16 @@ struct mbox_msghdr {
M(CPT_GET_CAPS, 0xBFD, cpt_caps_get, msg_req, cpt_caps_rsp_msg) \
M(CPT_GET_ENG_GRP, 0xBFF, cpt_eng_grp_get, cpt_eng_grp_req, \
cpt_eng_grp_rsp) \
+ /* REE mbox IDs (range 0xE00 - 0xFFF) */ \
+ M(REE_CONFIG_LF, 0xE01, ree_config_lf, ree_lf_req_msg, msg_rsp) \
+ M(REE_RD_WR_REGISTER, 0xE02, ree_rd_wr_register, ree_rd_wr_reg_msg, \
+ ree_rd_wr_reg_msg) \
+ M(REE_RULE_DB_PROG, 0xE03, ree_rule_db_prog, ree_rule_db_prog_req_msg, \
+ msg_rsp) \
+ M(REE_RULE_DB_LEN_GET, 0xE04, ree_rule_db_len_get, ree_req_msg, \
+ ree_rule_db_len_rsp_msg) \
+ M(REE_RULE_DB_GET, 0xE05, ree_rule_db_get, ree_rule_db_get_req_msg, \
+ ree_rule_db_get_rsp_msg) \
/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, \
msg_rsp) \
@@ -1394,6 +1404,96 @@ struct cpt_eng_grp_rsp {
uint8_t __io eng_grp_num;
};
+/* REE mailbox error codes
+ * Range 1001 - 1100.
+ */
+enum ree_af_status {
+ REE_AF_ERR_RULE_UNKNOWN_VALUE = -1001,
+ REE_AF_ERR_LF_NO_MORE_RESOURCES = -1002,
+ REE_AF_ERR_LF_INVALID = -1003,
+ REE_AF_ERR_ACCESS_DENIED = -1004,
+ REE_AF_ERR_RULE_DB_PARTIAL = -1005,
+ REE_AF_ERR_RULE_DB_EQ_BAD_VALUE = -1006,
+ REE_AF_ERR_RULE_DB_BLOCK_ALLOC_FAILED = -1007,
+ REE_AF_ERR_BLOCK_NOT_IMPLEMENTED = -1008,
+ REE_AF_ERR_RULE_DB_INC_OFFSET_TOO_BIG = -1009,
+ REE_AF_ERR_RULE_DB_OFFSET_TOO_BIG = -1010,
+ REE_AF_ERR_Q_IS_GRACEFUL_DIS = -1011,
+ REE_AF_ERR_Q_NOT_GRACEFUL_DIS = -1012,
+ REE_AF_ERR_RULE_DB_ALLOC_FAILED = -1013,
+ REE_AF_ERR_RULE_DB_TOO_BIG = -1014,
+ REE_AF_ERR_RULE_DB_GEQ_BAD_VALUE = -1015,
+ REE_AF_ERR_RULE_DB_LEQ_BAD_VALUE = -1016,
+ REE_AF_ERR_RULE_DB_WRONG_LENGTH = -1017,
+ REE_AF_ERR_RULE_DB_WRONG_OFFSET = -1018,
+ REE_AF_ERR_RULE_DB_BLOCK_TOO_BIG = -1019,
+ REE_AF_ERR_RULE_DB_SHOULD_FILL_REQUEST = -1020,
+ REE_AF_ERR_RULE_DBI_ALLOC_FAILED = -1021,
+ REE_AF_ERR_LF_WRONG_PRIORITY = -1022,
+ REE_AF_ERR_LF_SIZE_TOO_BIG = -1023,
+};
+
+/* REE mbox message formats */
+
+struct ree_req_msg {
+ struct mbox_msghdr hdr;
+ uint32_t __io blkaddr;
+};
+
+struct ree_lf_req_msg {
+ struct mbox_msghdr hdr;
+ uint32_t __io blkaddr;
+ uint32_t __io size;
+ uint8_t __io lf;
+ uint8_t __io pri;
+};
+
+struct ree_rule_db_prog_req_msg {
+ struct mbox_msghdr hdr;
+#define REE_RULE_DB_REQ_BLOCK_SIZE ((64ULL * 1024ULL) >> 1)
+ uint8_t __io rule_db[REE_RULE_DB_REQ_BLOCK_SIZE];
+ uint32_t __io blkaddr; /* REE0 or REE1 */
+ uint32_t __io total_len; /* total len of rule db */
+ uint32_t __io offset; /* offset of current rule db block */
+ uint16_t __io len; /* length of rule db block */
+ uint8_t __io is_last; /* is this the last block */
+ uint8_t __io is_incremental; /* is incremental flow */
+ uint8_t __io is_dbi; /* is rule db incremental */
+};
+
+struct ree_rule_db_get_req_msg {
+ struct mbox_msghdr hdr;
+ uint32_t __io blkaddr;
+ uint32_t __io offset; /* retrieve db from this offset */
+ uint8_t __io is_dbi; /* is request for rule db incremental */
+};
+
+struct ree_rd_wr_reg_msg {
+ struct mbox_msghdr hdr;
+ uint64_t __io reg_offset;
+ uint64_t __io *ret_val;
+ uint64_t __io val;
+ uint32_t __io blkaddr;
+ uint8_t __io is_write;
+};
+
+struct ree_rule_db_len_rsp_msg {
+ struct mbox_msghdr hdr;
+ uint32_t __io blkaddr;
+ uint32_t __io len;
+ uint32_t __io inc_len;
+};
+
+struct ree_rule_db_get_rsp_msg {
+ struct mbox_msghdr hdr;
+#define REE_RULE_DB_RSP_BLOCK_SIZE (15ULL * 1024ULL)
+ uint8_t __io rule_db[REE_RULE_DB_RSP_BLOCK_SIZE];
+ uint32_t __io total_len; /* total len of rule db */
+ uint32_t __io offset; /* offset of current rule db block */
+ uint16_t __io len; /* length of rule db block */
+ uint8_t __io is_last; /* is this the last block */
+};
+
/* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF
@@ -65,3 +65,4 @@ RTE_LOG_REGISTER(cnxk_logtype_npc, pmd.net.cnxk.flow, NOTICE);
RTE_LOG_REGISTER(cnxk_logtype_sso, pmd.event.cnxk, NOTICE);
RTE_LOG_REGISTER(cnxk_logtype_tim, pmd.event.cnxk.timer, NOTICE);
RTE_LOG_REGISTER(cnxk_logtype_tm, pmd.net.cnxk.tm, NOTICE);
+RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_ree, NOTICE);
@@ -152,6 +152,7 @@ extern int cnxk_logtype_npc;
extern int cnxk_logtype_sso;
extern int cnxk_logtype_tim;
extern int cnxk_logtype_tm;
+extern int cnxk_logtype_ree;
#define plt_err(fmt, args...) \
RTE_LOG(ERR, PMD, "%s():%u " fmt "\n", __func__, __LINE__, ##args)
@@ -176,6 +177,7 @@ extern int cnxk_logtype_tm;
#define plt_sso_dbg(fmt, ...) plt_dbg(sso, fmt, ##__VA_ARGS__)
#define plt_tim_dbg(fmt, ...) plt_dbg(tim, fmt, ##__VA_ARGS__)
#define plt_tm_dbg(fmt, ...) plt_dbg(tm, fmt, ##__VA_ARGS__)
+#define plt_ree_dbg(fmt, ...) plt_dbg(ree, fmt, ##__VA_ARGS__)
/* Datapath logs */
#define plt_dp_err(fmt, args...) \
@@ -41,4 +41,7 @@
/* NIX Inline dev */
#include "roc_nix_inl_priv.h"
+/* REE */
+#include "roc_ree_priv.h"
+
#endif /* _ROC_PRIV_H_ */
new file mode 100644
@@ -0,0 +1,647 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "roc_api.h"
+#include "roc_priv.h"
+
+/* This is temporarily here */
+#define REE0_PF 19
+#define REE1_PF 20
+
+static int
+roc_ree_available_queues_get(struct roc_ree_vf *vf, uint16_t *nb_queues)
+{
+ struct free_rsrcs_rsp *rsp;
+ struct dev *dev = vf->dev;
+ int ret;
+
+ mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
+
+ ret = mbox_process_msg(dev->mbox, (void *)&rsp);
+ if (ret)
+ return -EIO;
+
+ if (vf->block_address == RVU_BLOCK_ADDR_REE0)
+ *nb_queues = rsp->ree0;
+ else
+ *nb_queues = rsp->ree1;
+ return 0;
+}
+
+static int
+roc_ree_max_matches_get(struct roc_ree_vf *vf, uint8_t *max_matches)
+{
+ uint64_t val;
+ int ret;
+
+ ret = roc_ree_af_reg_read(vf, REE_AF_REEXM_MAX_MATCH, &val);
+ if (ret)
+ return ret;
+
+ *max_matches = val;
+ return 0;
+}
+
+int
+roc_ree_queues_attach(struct roc_ree_vf *vf, uint8_t nb_queues)
+{
+ struct rsrc_attach_req *req;
+ struct mbox *mbox;
+
+ mbox = vf->dev->mbox;
+ /* Ask AF to attach required LFs */
+ req = mbox_alloc_msg_attach_resources(mbox);
+ if (req == NULL) {
+ plt_err("Could not allocate mailbox message");
+ return -EFAULT;
+ }
+
+ /* 1 LF = 1 queue */
+ req->reelfs = nb_queues;
+ req->ree_blkaddr = vf->block_address;
+
+ if (mbox_process(mbox) < 0)
+ return -EIO;
+
+ /* Update number of attached queues */
+ vf->nb_queues = nb_queues;
+
+ return 0;
+}
+
+int
+roc_ree_queues_detach(struct roc_ree_vf *vf)
+{
+ struct rsrc_detach_req *req;
+ struct mbox *mbox;
+
+ mbox = vf->dev->mbox;
+ req = mbox_alloc_msg_detach_resources(mbox);
+ if (req == NULL) {
+ plt_err("Could not allocate mailbox message");
+ return -EFAULT;
+ }
+ req->reelfs = true;
+ req->partial = true;
+ if (mbox_process(mbox) < 0)
+ return -EIO;
+
+ /* Queues have been detached */
+ vf->nb_queues = 0;
+
+ return 0;
+}
+
+int
+roc_ree_msix_offsets_get(struct roc_ree_vf *vf)
+{
+ struct msix_offset_rsp *rsp;
+ struct mbox *mbox;
+ uint32_t i, ret;
+
+ /* Get REE MSI-X vector offsets */
+ mbox = vf->dev->mbox;
+ mbox_alloc_msg_msix_offset(mbox);
+
+ ret = mbox_process_msg(mbox, (void *)&rsp);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < vf->nb_queues; i++) {
+ if (vf->block_address == RVU_BLOCK_ADDR_REE0)
+ vf->lf_msixoff[i] = rsp->ree0_lf_msixoff[i];
+ else
+ vf->lf_msixoff[i] = rsp->ree1_lf_msixoff[i];
+ plt_ree_dbg("lf_msixoff[%d] 0x%x", i, vf->lf_msixoff[i]);
+ }
+
+ return 0;
+}
+
+static int
+ree_send_mbox_msg(struct roc_ree_vf *vf)
+{
+ struct mbox *mbox = vf->dev->mbox;
+ int ret;
+
+ mbox_msg_send(mbox, 0);
+
+ ret = mbox_wait_for_rsp(mbox, 0);
+ if (ret < 0) {
+ plt_err("Could not get mailbox response");
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+roc_ree_config_lf(struct roc_ree_vf *vf, uint8_t lf, uint8_t pri, uint32_t size)
+{
+ struct ree_lf_req_msg *req;
+ struct mbox *mbox;
+ int ret;
+
+ mbox = vf->dev->mbox;
+ req = mbox_alloc_msg_ree_config_lf(mbox);
+ if (req == NULL) {
+ plt_err("Could not allocate mailbox message");
+ return -EFAULT;
+ }
+
+ req->lf = lf;
+ req->pri = pri ? 1 : 0;
+ req->size = size;
+ req->blkaddr = vf->block_address;
+
+ ret = mbox_process(mbox);
+ if (ret < 0) {
+ plt_err("Could not get mailbox response");
+ return ret;
+ }
+ return 0;
+}
+
+int
+roc_ree_af_reg_read(struct roc_ree_vf *vf, uint64_t reg, uint64_t *val)
+{
+ struct ree_rd_wr_reg_msg *msg;
+ struct mbox_dev *mdev;
+ struct mbox *mbox;
+ int ret, off;
+
+ mbox = vf->dev->mbox;
+ mdev = &mbox->dev[0];
+ msg = (struct ree_rd_wr_reg_msg *)mbox_alloc_msg_rsp(
+ mbox, 0, sizeof(*msg), sizeof(*msg));
+ if (msg == NULL) {
+ plt_err("Could not allocate mailbox message");
+ return -EFAULT;
+ }
+
+ msg->hdr.id = MBOX_MSG_REE_RD_WR_REGISTER;
+ msg->hdr.sig = MBOX_REQ_SIG;
+ msg->hdr.pcifunc = vf->dev->pf_func;
+ msg->is_write = 0;
+ msg->reg_offset = reg;
+ msg->ret_val = val;
+ msg->blkaddr = vf->block_address;
+
+ ret = ree_send_mbox_msg(vf);
+ if (ret < 0)
+ return ret;
+
+ off = mbox->rx_start +
+ RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ msg = (struct ree_rd_wr_reg_msg *)((uintptr_t)mdev->mbase + off);
+
+ *val = msg->val;
+
+ return 0;
+}
+
+int
+roc_ree_af_reg_write(struct roc_ree_vf *vf, uint64_t reg, uint64_t val)
+{
+ struct ree_rd_wr_reg_msg *msg;
+ struct mbox *mbox;
+
+ mbox = vf->dev->mbox;
+ msg = (struct ree_rd_wr_reg_msg *)mbox_alloc_msg_rsp(
+ mbox, 0, sizeof(*msg), sizeof(*msg));
+ if (msg == NULL) {
+ plt_err("Could not allocate mailbox message");
+ return -EFAULT;
+ }
+
+ msg->hdr.id = MBOX_MSG_REE_RD_WR_REGISTER;
+ msg->hdr.sig = MBOX_REQ_SIG;
+ msg->hdr.pcifunc = vf->dev->pf_func;
+ msg->is_write = 1;
+ msg->reg_offset = reg;
+ msg->val = val;
+ msg->blkaddr = vf->block_address;
+
+ return ree_send_mbox_msg(vf);
+}
+
+int
+roc_ree_rule_db_get(struct roc_ree_vf *vf, char *rule_db, uint32_t rule_db_len,
+ char *rule_dbi, uint32_t rule_dbi_len)
+{
+ struct ree_rule_db_get_req_msg *req;
+ struct ree_rule_db_get_rsp_msg *rsp;
+ char *rule_db_ptr = (char *)rule_db;
+ struct mbox *mbox;
+ int ret, last = 0;
+ uint32_t len = 0;
+
+ mbox = vf->dev->mbox;
+ if (!rule_db) {
+ plt_err("Couldn't return rule db due to NULL pointer");
+ return -EFAULT;
+ }
+
+ while (!last) {
+ req = (struct ree_rule_db_get_req_msg *)mbox_alloc_msg_rsp(
+ mbox, 0, sizeof(*req), sizeof(*rsp));
+ if (!req) {
+ plt_err("Could not allocate mailbox message");
+ return -EFAULT;
+ }
+
+ req->hdr.id = MBOX_MSG_REE_RULE_DB_GET;
+ req->hdr.sig = MBOX_REQ_SIG;
+ req->hdr.pcifunc = vf->dev->pf_func;
+ req->blkaddr = vf->block_address;
+ req->is_dbi = 0;
+ req->offset = len;
+ ret = mbox_process_msg(mbox, (void *)&rsp);
+ if (ret)
+ return ret;
+ if (rule_db_len < len + rsp->len) {
+ plt_err("Rule db size is too small");
+ return -EFAULT;
+ }
+ mbox_memcpy(rule_db_ptr, rsp->rule_db, rsp->len);
+ len += rsp->len;
+ rule_db_ptr = rule_db_ptr + rsp->len;
+ last = rsp->is_last;
+ }
+
+ if (rule_dbi) {
+ req = (struct ree_rule_db_get_req_msg *)mbox_alloc_msg_rsp(
+ mbox, 0, sizeof(*req), sizeof(*rsp));
+ if (!req) {
+ plt_err("Could not allocate mailbox message");
+ return -EFAULT;
+ }
+
+ req->hdr.id = MBOX_MSG_REE_RULE_DB_GET;
+ req->hdr.sig = MBOX_REQ_SIG;
+ req->hdr.pcifunc = vf->dev->pf_func;
+ req->blkaddr = vf->block_address;
+ req->is_dbi = 1;
+ req->offset = 0;
+
+ ret = mbox_process_msg(mbox, (void *)&rsp);
+ if (ret)
+ return ret;
+ if (rule_dbi_len < rsp->len) {
+ plt_err("Rule dbi size is too small");
+ return -EFAULT;
+ }
+ mbox_memcpy(rule_dbi, rsp->rule_db, rsp->len);
+ }
+ return 0;
+}
+
+int
+roc_ree_rule_db_len_get(struct roc_ree_vf *vf, uint32_t *rule_db_len,
+ uint32_t *rule_dbi_len)
+{
+ struct ree_rule_db_len_rsp_msg *rsp;
+ struct ree_req_msg *req;
+ struct mbox *mbox;
+ int ret;
+
+ mbox = vf->dev->mbox;
+ req = (struct ree_req_msg *)mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(*rsp));
+ if (!req) {
+ plt_err("Could not allocate mailbox message");
+ return -EFAULT;
+ }
+
+ req->hdr.id = MBOX_MSG_REE_RULE_DB_LEN_GET;
+ req->hdr.sig = MBOX_REQ_SIG;
+ req->hdr.pcifunc = vf->dev->pf_func;
+ req->blkaddr = vf->block_address;
+ ret = mbox_process_msg(mbox, (void *)&rsp);
+ if (ret)
+ return ret;
+ if (rule_db_len != NULL)
+ *rule_db_len = rsp->len;
+ if (rule_dbi_len != NULL)
+ *rule_dbi_len = rsp->inc_len;
+
+ return 0;
+}
+
+static int
+ree_db_msg(struct roc_ree_vf *vf, const char *db, uint32_t db_len, int inc,
+ int dbi)
+{
+ uint32_t len_left = db_len, offset = 0;
+ struct ree_rule_db_prog_req_msg *req;
+ const char *rule_db_ptr = db;
+ struct mbox *mbox;
+ struct msg_rsp *rsp;
+ int ret;
+
+ mbox = vf->dev->mbox;
+ while (len_left) {
+ req = (struct ree_rule_db_prog_req_msg *)mbox_alloc_msg_rsp(
+ mbox, 0, sizeof(*req), sizeof(*rsp));
+ if (!req) {
+ plt_err("Could not allocate mailbox message");
+ return -EFAULT;
+ }
+ req->hdr.id = MBOX_MSG_REE_RULE_DB_PROG;
+ req->hdr.sig = MBOX_REQ_SIG;
+ req->hdr.pcifunc = vf->dev->pf_func;
+ req->offset = offset;
+ req->total_len = db_len;
+ req->len = REE_RULE_DB_REQ_BLOCK_SIZE;
+ req->is_incremental = inc;
+ req->is_dbi = dbi;
+ req->blkaddr = vf->block_address;
+
+ if (len_left < REE_RULE_DB_REQ_BLOCK_SIZE) {
+ req->is_last = true;
+ req->len = len_left;
+ }
+ mbox_memcpy(req->rule_db, rule_db_ptr, req->len);
+ ret = mbox_process_msg(mbox, (void *)&rsp);
+ if (ret) {
+ plt_err("Programming mailbox processing failed");
+ return ret;
+ }
+ len_left -= req->len;
+ offset += req->len;
+ rule_db_ptr = rule_db_ptr + req->len;
+ }
+ return 0;
+}
+
+int
+roc_ree_rule_db_prog(struct roc_ree_vf *vf, const char *rule_db,
+ uint32_t rule_db_len, const char *rule_dbi,
+ uint32_t rule_dbi_len)
+{
+ int inc, ret;
+
+ if (rule_db_len == 0) {
+ plt_err("Couldn't program empty rule db");
+ return -EFAULT;
+ }
+ inc = (rule_dbi_len != 0);
+ if ((rule_db == NULL) || (inc && (rule_dbi == NULL))) {
+ plt_err("Couldn't program NULL rule db");
+ return -EFAULT;
+ }
+ if (inc) {
+ ret = ree_db_msg(vf, rule_dbi, rule_dbi_len, inc, 1);
+ if (ret)
+ return ret;
+ }
+ return ree_db_msg(vf, rule_db, rule_db_len, inc, 0);
+}
+
+static int
+ree_get_blkaddr(struct dev *dev)
+{
+ int pf;
+
+ pf = dev_get_pf(dev->pf_func);
+ if (pf == REE0_PF)
+ return RVU_BLOCK_ADDR_REE0;
+ else if (pf == REE1_PF)
+ return RVU_BLOCK_ADDR_REE1;
+ else
+ return 0;
+}
+
+uintptr_t
+roc_ree_qp_get_base(struct roc_ree_vf *vf, uint16_t qp_id)
+{
+ return REE_LF_BAR2(vf, qp_id);
+}
+
+static void
+roc_ree_lf_err_intr_handler(void *param)
+{
+ uintptr_t base = (uintptr_t)param;
+ uint8_t lf_id;
+ uint64_t intr;
+
+ lf_id = (base >> 12) & 0xFF;
+
+ intr = plt_read64(base + REE_LF_MISC_INT);
+ if (intr == 0)
+ return;
+
+ plt_ree_dbg("LF %d MISC_INT: 0x%" PRIx64 "", lf_id, intr);
+
+ /* Clear interrupt */
+ plt_write64(intr, base + REE_LF_MISC_INT);
+}
+
+static void
+roc_ree_lf_err_intr_unregister(struct roc_ree_vf *vf, uint16_t msix_off,
+ uintptr_t base)
+{
+ struct rte_pci_device *pci_dev = vf->pci_dev;
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+
+ /* Disable error interrupts */
+ plt_write64(~0ull, base + REE_LF_MISC_INT_ENA_W1C);
+
+ dev_irq_unregister(handle, roc_ree_lf_err_intr_handler, (void *)base,
+ msix_off);
+}
+
+void
+roc_ree_err_intr_unregister(struct roc_ree_vf *vf)
+{
+ uintptr_t base;
+ uint32_t i;
+
+ for (i = 0; i < vf->nb_queues; i++) {
+ base = REE_LF_BAR2(vf, i);
+ roc_ree_lf_err_intr_unregister(vf, vf->lf_msixoff[i], base);
+ }
+
+ vf->err_intr_registered = 0;
+}
+
+static int
+roc_ree_lf_err_intr_register(struct roc_ree_vf *vf, uint16_t msix_off,
+ uintptr_t base)
+{
+ struct rte_pci_device *pci_dev = vf->pci_dev;
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ int ret;
+
+ /* Disable error interrupts */
+ plt_write64(~0ull, base + REE_LF_MISC_INT_ENA_W1C);
+
+ /* Register error interrupt handler */
+ ret = dev_irq_register(handle, roc_ree_lf_err_intr_handler,
+ (void *)base, msix_off);
+ if (ret)
+ return ret;
+
+ /* Enable error interrupts */
+ plt_write64(~0ull, base + REE_LF_MISC_INT_ENA_W1S);
+
+ return 0;
+}
+
+int
+roc_ree_err_intr_register(struct roc_ree_vf *vf)
+{
+ uint32_t i, j, ret;
+ uintptr_t base;
+
+ for (i = 0; i < vf->nb_queues; i++) {
+ if (vf->lf_msixoff[i] == MSIX_VECTOR_INVALID) {
+ plt_err("Invalid REE LF MSI-X offset: 0x%x",
+ vf->lf_msixoff[i]);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < vf->nb_queues; i++) {
+ base = REE_LF_BAR2(vf, i);
+ ret = roc_ree_lf_err_intr_register(vf, vf->lf_msixoff[i], base);
+ if (ret)
+ goto intr_unregister;
+ }
+
+ vf->err_intr_registered = 1;
+ return 0;
+
+intr_unregister:
+ /* Unregister the ones already registered */
+ for (j = 0; j < i; j++) {
+ base = REE_LF_BAR2(vf, j);
+ roc_ree_lf_err_intr_unregister(vf, vf->lf_msixoff[j], base);
+ }
+ return ret;
+}
+
+int
+roc_ree_iq_enable(struct roc_ree_vf *vf, const struct roc_ree_qp *qp,
+ uint8_t pri, uint32_t size_div2)
+{
+ union ree_lf_sbuf_addr base;
+ union ree_lf_ena lf_ena;
+
+ /* Set instruction queue size and priority */
+ roc_ree_config_lf(vf, qp->id, pri, size_div2);
+
+ /* Set instruction queue base address */
+ /* Should be written after SBUF_CTL and before LF_ENA */
+
+ base.u = plt_read64(qp->base + REE_LF_SBUF_ADDR);
+ base.s.ptr = qp->iq_dma_addr >> 7;
+ plt_write64(base.u, qp->base + REE_LF_SBUF_ADDR);
+
+ /* Enable instruction queue */
+
+ lf_ena.u = plt_read64(qp->base + REE_LF_ENA);
+ lf_ena.s.ena = 1;
+ plt_write64(lf_ena.u, qp->base + REE_LF_ENA);
+
+ return 0;
+}
+
+void
+roc_ree_iq_disable(struct roc_ree_qp *qp)
+{
+ union ree_lf_ena lf_ena;
+
+ /* Stop instruction execution */
+ lf_ena.u = plt_read64(qp->base + REE_LF_ENA);
+ lf_ena.s.ena = 0x0;
+ plt_write64(lf_ena.u, qp->base + REE_LF_ENA);
+}
+
+int
+roc_ree_dev_init(struct roc_ree_vf *vf)
+{
+ struct plt_pci_device *pci_dev;
+ struct ree *ree;
+ struct dev *dev;
+ uint8_t max_matches = 0;
+ uint16_t nb_queues = 0;
+ int rc;
+
+ if (vf == NULL || vf->pci_dev == NULL)
+ return -EINVAL;
+
+ PLT_STATIC_ASSERT(sizeof(struct ree) <= ROC_REE_MEM_SZ);
+
+ ree = roc_ree_to_ree_priv(vf);
+ memset(ree, 0, sizeof(*ree));
+ vf->dev = &ree->dev;
+
+ pci_dev = vf->pci_dev;
+ dev = vf->dev;
+
+ /* Initialize device */
+ rc = dev_init(dev, pci_dev);
+ if (rc) {
+ plt_err("Failed to init roc device");
+ goto fail;
+ }
+
+ /* Get REE block address */
+ vf->block_address = ree_get_blkaddr(dev);
+ if (!vf->block_address) {
+ plt_err("Could not determine block PF number");
+ goto fail;
+ }
+
+ /* Get number of queues available on the device */
+ rc = roc_ree_available_queues_get(vf, &nb_queues);
+ if (rc) {
+ plt_err("Could not determine the number of queues available");
+ goto fail;
+ }
+
+ /* Don't exceed the limits set per VF */
+ nb_queues = RTE_MIN(nb_queues, REE_MAX_QUEUES_PER_VF);
+
+ if (nb_queues == 0) {
+ plt_err("No free queues available on the device");
+ goto fail;
+ }
+
+ vf->max_queues = nb_queues;
+
+ plt_ree_dbg("Max queues supported by device: %d", vf->max_queues);
+
+ /* Get number of maximum matches supported on the device */
+ rc = roc_ree_max_matches_get(vf, &max_matches);
+ if (rc) {
+ plt_err("Could not determine the maximum matches supported");
+ goto fail;
+ }
+ /* Don't exceed the limits set per VF */
+ max_matches = RTE_MIN(max_matches, REE_MAX_MATCHES_PER_VF);
+ if (max_matches == 0) {
+ plt_err("Could not determine the maximum matches supported");
+ goto fail;
+ }
+
+ vf->max_matches = max_matches;
+
+ plt_ree_dbg("Max matches supported by device: %d", vf->max_matches);
+fail:
+ return rc;
+}
+
+int
+roc_ree_dev_fini(struct roc_ree_vf *vf)
+{
+ if (vf == NULL)
+ return -EINVAL;
+
+ vf->max_matches = 0;
+ vf->max_queues = 0;
+
+ return dev_fini(vf->dev, vf->pci_dev);
+}
new file mode 100644
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef _ROC_REE_H_
+#define _ROC_REE_H_
+
+#include "roc_api.h"
+
+#define REE_MAX_LFS 36
+#define REE_MAX_QUEUES_PER_VF 36
+#define REE_MAX_MATCHES_PER_VF 254
+
+#define REE_MAX_PAYLOAD_SIZE (1 << 14)
+
+#define REE_NON_INC_PROG 0
+#define REE_INC_PROG 1
+
+#define REE_MOD_INC(i, l) ((i) == (l - 1) ? (i) = 0 : (i)++)
+
+/**
+ * Device vf data
+ */
+struct roc_ree_vf {
+ struct plt_pci_device *pci_dev;
+ struct dev *dev;
+ /**< Base class */
+ uint16_t max_queues;
+ /**< Max queues supported */
+ uint8_t nb_queues;
+ /**< Number of regex queues attached */
+ uint16_t max_matches;
+ /**< Max matches supported*/
+ uint16_t lf_msixoff[REE_MAX_LFS];
+ /**< MSI-X offsets */
+ uint8_t block_address;
+ /**< REE Block Address */
+ uint8_t err_intr_registered : 1;
+ /**< Are error interrupts registered? */
+
+#define ROC_REE_MEM_SZ (6 * 1024)
+ uint8_t reserved[ROC_REE_MEM_SZ] __plt_cache_aligned;
+} __plt_cache_aligned;
+
+struct roc_ree_rid {
+ uintptr_t rid;
+ /** Request id of a ree operation */
+ uint64_t user_id;
+ /* Client data */
+ /**< IOVA address of the pattern to be matched. */
+};
+
+struct roc_ree_pending_queue {
+ uint64_t pending_count;
+ /** Pending requests count */
+ struct roc_ree_rid *rid_queue;
+ /** Array of pending requests */
+ uint16_t enq_tail;
+ /** Tail of queue to be used for enqueue */
+ uint16_t deq_head;
+ /** Head of queue to be used for dequeue */
+};
+
+struct roc_ree_qp {
+ uint32_t id;
+ /**< Queue pair id */
+ uintptr_t base;
+ /**< Base address where BAR is mapped */
+ struct roc_ree_pending_queue pend_q;
+ /**< Pending queue */
+ rte_iova_t iq_dma_addr;
+ /**< Instruction queue address */
+ uint32_t roc_regexdev_jobid;
+ /**< Job ID */
+ uint32_t write_offset;
+ /**< write offset */
+};
+
+union roc_ree_inst {
+ uint64_t u[8];
+ struct {
+ uint64_t doneint : 1;
+ uint64_t reserved_1_3 : 3;
+ uint64_t dg : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t ooj : 1;
+ uint64_t reserved_9_15 : 7;
+ uint64_t reserved_16_63 : 48;
+ uint64_t inp_ptr_addr : 64;
+ uint64_t inp_ptr_ctl : 64;
+ uint64_t res_ptr_addr : 64;
+ uint64_t wq_ptr : 64;
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t ggrp : 10;
+ uint64_t reserved_364_383 : 20;
+ uint64_t reserved_384_391 : 8;
+ uint64_t ree_job_id : 24;
+ uint64_t ree_job_ctrl : 16;
+ uint64_t ree_job_length : 15;
+ uint64_t reserved_447_447 : 1;
+ uint64_t ree_job_subset_id_0 : 16;
+ uint64_t ree_job_subset_id_1 : 16;
+ uint64_t ree_job_subset_id_2 : 16;
+ uint64_t ree_job_subset_id_3 : 16;
+ } cn98xx;
+};
+
+int __roc_api roc_ree_dev_init(struct roc_ree_vf *vf);
+int __roc_api roc_ree_dev_fini(struct roc_ree_vf *vf);
+int __roc_api roc_ree_queues_attach(struct roc_ree_vf *vf, uint8_t nb_queues);
+int __roc_api roc_ree_queues_detach(struct roc_ree_vf *vf);
+int __roc_api roc_ree_msix_offsets_get(struct roc_ree_vf *vf);
+int __roc_api roc_ree_config_lf(struct roc_ree_vf *vf, uint8_t lf, uint8_t pri,
+ uint32_t size);
+int __roc_api roc_ree_af_reg_read(struct roc_ree_vf *vf, uint64_t reg,
+ uint64_t *val);
+int __roc_api roc_ree_af_reg_write(struct roc_ree_vf *vf, uint64_t reg,
+ uint64_t val);
+int __roc_api roc_ree_rule_db_get(struct roc_ree_vf *vf, char *rule_db,
+ uint32_t rule_db_len, char *rule_dbi,
+ uint32_t rule_dbi_len);
+int __roc_api roc_ree_rule_db_len_get(struct roc_ree_vf *vf,
+ uint32_t *rule_db_len,
+ uint32_t *rule_dbi_len);
+int __roc_api roc_ree_rule_db_prog(struct roc_ree_vf *vf, const char *rule_db,
+ uint32_t rule_db_len, const char *rule_dbi,
+ uint32_t rule_dbi_len);
+uintptr_t __roc_api roc_ree_qp_get_base(struct roc_ree_vf *vf, uint16_t qp_id);
+void __roc_api roc_ree_err_intr_unregister(struct roc_ree_vf *vf);
+int __roc_api roc_ree_err_intr_register(struct roc_ree_vf *vf);
+int __roc_api roc_ree_iq_enable(struct roc_ree_vf *vf,
+ const struct roc_ree_qp *qp, uint8_t pri,
+ uint32_t size_div128);
+void __roc_api roc_ree_iq_disable(struct roc_ree_qp *qp);
+
+#endif /* _ROC_REE_H_ */
new file mode 100644
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef _ROC_REE_PRIV_H_
+#define _ROC_REE_PRIV_H_
+
+struct ree {
+ struct dev dev;
+} __plt_cache_aligned;
+
+static inline struct ree *
+roc_ree_to_ree_priv(struct roc_ree_vf *roc_ree)
+{
+ return (struct ree *)&roc_ree->reserved[0];
+}
+
+#endif /* _ROC_REE_PRIV_H_ */
@@ -11,6 +11,7 @@ INTERNAL {
cnxk_logtype_nix;
cnxk_logtype_npa;
cnxk_logtype_npc;
+ cnxk_logtype_ree;
cnxk_logtype_sso;
cnxk_logtype_tim;
cnxk_logtype_tm;
@@ -314,6 +315,21 @@ INTERNAL {
roc_tim_lf_enable;
roc_tim_lf_free;
roc_se_ctx_swap;
-
+ roc_ree_af_reg_read;
+ roc_ree_af_reg_write;
+ roc_ree_config_lf;
+ roc_ree_dev_fini;
+ roc_ree_dev_init;
+ roc_ree_err_intr_register;
+ roc_ree_err_intr_unregister;
+ roc_ree_iq_disable;
+ roc_ree_iq_enable;
+ roc_ree_msix_offsets_get;
+ roc_ree_qp_get_base;
+ roc_ree_queues_attach;
+ roc_ree_queues_detach;
+ roc_ree_rule_db_get;
+ roc_ree_rule_db_len_get;
+ roc_ree_rule_db_prog;
local: *;
};