[v1,4/5] net/cpfl: add fxp rule module
Checks
Commit Message
Added low level fxp module for rule packing / creation / destroying.
Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.h | 4 +
drivers/net/cpfl/cpfl_fxp_rule.c | 288 +++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_fxp_rule.h | 87 ++++++++++
drivers/net/cpfl/meson.build | 1 +
4 files changed, 380 insertions(+)
create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h
Comments
> -----Original Message-----
> From: Zhang, Yuying <yuying.zhang@intel.com>
> Sent: Saturday, August 12, 2023 3:55 PM
> To: dev@dpdk.org; Xing, Beilei <beilei.xing@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Zhang, Yuying <yuying.zhang@intel.com>
> Subject: [PATCH v1 4/5] net/cpfl: add fxp rule module
>
> Added low level fxp module for rule packing / creation / destroying.
>
> Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
> ---
> drivers/net/cpfl/cpfl_ethdev.h | 4 +
> drivers/net/cpfl/cpfl_fxp_rule.c | 288 +++++++++++++++++++++++++++++++
> drivers/net/cpfl/cpfl_fxp_rule.h | 87 ++++++++++
> drivers/net/cpfl/meson.build | 1 +
> 4 files changed, 380 insertions(+)
> create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c create mode 100644
> drivers/net/cpfl/cpfl_fxp_rule.h
>
> diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index
> c71f16ac60..63bcc5551f 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.h
> +++ b/drivers/net/cpfl/cpfl_ethdev.h
> @@ -145,10 +145,14 @@ enum cpfl_itf_type {
>
> TAILQ_HEAD(cpfl_flow_list, rte_flow);
>
> +#define CPFL_FLOW_BATCH_SIZE 490
> struct cpfl_itf {
> enum cpfl_itf_type type;
> struct cpfl_adapter_ext *adapter;
> struct cpfl_flow_list flow_list;
> + struct idpf_dma_mem flow_dma;
> + struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
> + struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
> void *data;
> };
>
> diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c
> new file mode 100644
> index 0000000000..936f57e4fa
> --- /dev/null
> +++ b/drivers/net/cpfl/cpfl_fxp_rule.c
> @@ -0,0 +1,288 @@
<...>
> +int
> +cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t
> num_q_msg,
> + struct idpf_ctlq_msg q_msg[])
> +{
> + int retries = 0;
> + struct idpf_dma_mem *dma;
> + uint16_t i;
> + uint16_t buff_cnt;
> + int ret = 0;
> +
> + retries = 0;
> + while (retries <= CTLQ_RECEIVE_RETRIES) {
> + rte_delay_us_sleep(10);
> + ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);
> +
> + if (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&
> + ret != CPFL_ERR_CTLQ_ERROR) {
> + PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err:
> 0x%4x\n", ret);
> + retries++;
> + continue;
> + }
> +
> + if (ret == CPFL_ERR_CTLQ_NO_WORK) {
> + retries++;
> + continue;
> + }
> +
> + if (ret == CPFL_ERR_CTLQ_EMPTY)
> + break;
> +
> + ret = cpfl_process_rx_ctlq_msg(num_q_msg, q_msg);
> + if (ret) {
> + PMD_INIT_LOG(WARNING, "failed to process rx_ctrlq
> msg");
> + break;
Don't break, need to post buffer to recv ring.
Please check the internal fix patch.
> + }
> +
> + for (i = 0; i < num_q_msg; i++) {
> + if (q_msg[i].data_len > 0)
> + dma = q_msg[i].ctx.indirect.payload;
> + else
> + dma = NULL;
> +
> + buff_cnt = dma ? 1 : 0;
> + ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt,
> &dma);
> + if (ret)
> + PMD_INIT_LOG(WARNING, "could not posted
> recv bufs\n");
> + }
> + break;
> + }
> +
> + if (retries > CTLQ_RECEIVE_RETRIES) {
> + PMD_INIT_LOG(ERR, "timed out while polling for receive
> response");
> + ret = -1;
> + }
> +
> + return ret;
> +}
> +
> +static int
> +pack_mod_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
Please follow the function name style, how about cpfl_mod_rule_pack?
> + struct idpf_ctlq_msg *msg)
<...>
> +
> +static int pack_default_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem
static init
cpfl_default_rule_pack
> *dma,
> + struct idpf_ctlq_msg *msg, bool add) {
<...>
> +
> +static int pack_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
static init
cpfl_rule_pack
> + struct idpf_ctlq_msg *msg, bool add) {
> + int ret = 0;
> +
> + if (rinfo->type == CPFL_RULE_TYPE_SEM) {
> + if (pack_default_rule(rinfo, dma, msg, add) < 0)
> + ret = -1;
> + } else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
> + if (pack_mod_rule(rinfo, dma, msg) < 0)
> + ret = -1;
> + }
Need to check invalid rinfo->type? E.g CPFL_RULE_TYPE_LEM?
> +
> + return ret;
> +}
> +
> +int
> +cpfl_rule_update(struct cpfl_itf *itf,
> + struct idpf_ctlq_info *tx_cq,
> + struct idpf_ctlq_info *rx_cq,
> + struct cpfl_rule_info *rinfo,
> + int rule_num,
> + bool add)
> +{
> + struct idpf_hw *hw = &itf->adapter->base.hw;
> + int i;
> + int ret = 0;
> +
> + if (rule_num == 0)
> + return 0;
> +
> + for (i = 0; i < rule_num; i++) {
> + ret = pack_rule(&rinfo[i], &itf->dma[i], &itf->msg[i], add);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "Could not create rule");
Could not pack rule?
> + return ret;
> + }
> + }
> + ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "Failed to send rule");
> + return ret;
> + }
> + ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "Failed to create rule");
Is this function for update rule or create rule?
The function name is rule_update, but seems it's to create rule.
> + return ret;
> + }
> +
> + return 0;
> +}
> diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h
> new file mode 100644
> index 0000000000..68efa8e3f8
> --- /dev/null
> +++ b/drivers/net/cpfl/cpfl_fxp_rule.h
> @@ -0,0 +1,87 @@
<...>
> +
> +int cpfl_rule_update(struct cpfl_itf *itf,
> + struct idpf_ctlq_info *tx_cq,
> + struct idpf_ctlq_info *rx_cq,
> + struct cpfl_rule_info *rinfo,
> + int rule_num,
> + bool add);
> +int
> +cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t
> num_q_msg,
> + struct idpf_ctlq_msg q_msg[]);
> +int
> +cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t
> num_q_msg,
No need new line.
> + struct idpf_ctlq_msg q_msg[]);
> +#endif /*CPFL_FXP_RULE_H*/
> diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build index
> 222497f7c2..4061123034 100644
> --- a/drivers/net/cpfl/meson.build
> +++ b/drivers/net/cpfl/meson.build
> @@ -46,6 +46,7 @@ if js_dep.found()
> 'cpfl_flow_parser.c',
> 'cpfl_rules.c',
> 'cpfl_controlq.c',
> + 'cpfl_fxp_rule.c',
> )
> dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
> ext_deps += js_dep
> --
> 2.25.1
> -----Original Message-----
> From: Zhang, Yuying <yuying.zhang@intel.com>
> Sent: Saturday, August 12, 2023 3:55 PM
> To: dev@dpdk.org; Xing, Beilei <beilei.xing@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: Zhang, Yuying <yuying.zhang@intel.com>
> Subject: [PATCH v1 4/5] net/cpfl: add fxp rule module
>
> Added low level fxp module for rule packing / creation / destroying.
>
> Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
> ---
> drivers/net/cpfl/cpfl_ethdev.h | 4 +
> drivers/net/cpfl/cpfl_fxp_rule.c | 288 +++++++++++++++++++++++++++++++
> drivers/net/cpfl/cpfl_fxp_rule.h | 87 ++++++++++
> drivers/net/cpfl/meson.build | 1 +
> 4 files changed, 380 insertions(+)
> create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c create mode 100644
> drivers/net/cpfl/cpfl_fxp_rule.h
>
> diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
> index c71f16ac60..63bcc5551f 100644
<...>
> +struct cpfl_lem_rule_info {
> + uint16_t prof_id;
> + uint8_t key[CPFL_MAX_KEY_LEN];
> + uint8_t key_byte_len;
> + uint8_t pin_to_cache;
> + uint8_t fixed_fetch;
> +};
Remove LEM related structures and members below.
> +#define CPFL_MAX_MOD_CONTENT_LEN 256
> +struct cpfl_mod_rule_info {
> + uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN];
> + uint8_t mod_content_byte_len;
> + uint32_t mod_index;
> + uint8_t pin_mod_content;
> + uint8_t mod_obj_size;
> +};
> +
> +enum cpfl_rule_type {
> + CPFL_RULE_TYPE_NONE,
> + CPFL_RULE_TYPE_SEM,
> + CPFL_RULE_TYPE_LEM,
> + CPFL_RULE_TYPE_MOD
> +};
> +
> +struct cpfl_rule_info {
> + enum cpfl_rule_type type;
> + uint64_t cookie;
> + uint8_t host_id;
> + uint8_t port_num;
> + uint8_t resp_req;
> + /* TODO: change this to be dynamically allocated/reallocated */
> + uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union
> cpfl_action_set)];
> + uint8_t act_byte_len;
> + /* vsi is used for lem and lpm rules */
> + uint16_t vsi;
> + uint8_t clear_mirror_1st_state;
> + /* mod related fields */
> + union {
> + struct cpfl_mod_rule_info mod;
> + struct cpfl_sem_rule_info sem;
> + struct cpfl_lem_rule_info lem;
> + };
> +};
> +
> +struct cpfl_meter_action_info {
> + uint8_t meter_logic_bank_id;
> + uint32_t meter_logic_idx;
> + uint8_t prof_id;
> + uint8_t slot;
> +};
Remove meter lated.
@@ -145,10 +145,14 @@ enum cpfl_itf_type {
TAILQ_HEAD(cpfl_flow_list, rte_flow);
+#define CPFL_FLOW_BATCH_SIZE 490
struct cpfl_itf {
enum cpfl_itf_type type;
struct cpfl_adapter_ext *adapter;
struct cpfl_flow_list flow_list;
+ struct idpf_dma_mem flow_dma;
+ struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
+ struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
void *data;
};
new file mode 100644
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include "cpfl_ethdev.h"
+
+#include "cpfl_fxp_rule.h"
+#include "cpfl_logs.h"
+
+#define CTLQ_SEND_RETRIES 100
+#define CTLQ_RECEIVE_RETRIES 100
+
+int
+cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t num_q_msg,
+ struct idpf_ctlq_msg q_msg[])
+{
+ struct idpf_ctlq_msg **msg_ptr_list;
+ uint16_t clean_count = 0;
+ int num_cleaned = 0;
+ int retries = 0;
+ int ret = 0;
+
+ msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *));
+ if (!msg_ptr_list) {
+ PMD_INIT_LOG(ERR, "no memory for cleaning ctlq");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret);
+ goto send_err;
+ }
+
+ while (retries <= CTLQ_SEND_RETRIES) {
+ clean_count = num_q_msg - num_cleaned;
+ ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count,
+ &msg_ptr_list[num_cleaned]);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret);
+ goto send_err;
+ }
+
+ num_cleaned += clean_count;
+ retries++;
+ if (num_cleaned >= num_q_msg)
+ break;
+ rte_delay_us_sleep(10);
+ }
+
+ if (retries > CTLQ_SEND_RETRIES) {
+ PMD_INIT_LOG(ERR, "timed out while polling for completions");
+ ret = -1;
+ goto send_err;
+ }
+
+send_err:
+ if (msg_ptr_list)
+ free(msg_ptr_list);
+err:
+ return ret;
+}
+
+static int
+cpfl_process_rx_ctlq_msg(uint16_t num_q_msg, struct idpf_ctlq_msg *q_msg)
+{
+ uint16_t i;
+ int ret = 0;
+
+ if (!num_q_msg || !q_msg)
+ return -EINVAL;
+
+ for (i = 0; i < num_q_msg; i++) {
+ switch (q_msg[i].status) {
+ case CPFL_CFG_PKT_ERR_OK:
+ continue;
+ case CPFL_CFG_PKT_ERR_EEXIST:
+ PMD_INIT_LOG(ERR, "The rule has confliction with already existed one");
+ return -EINVAL;
+ case CPFL_CFG_PKT_ERR_ENOTFND:
+ PMD_INIT_LOG(ERR, "The rule has already deleted");
+ return -EINVAL;
+ default:
+ PMD_INIT_LOG(ERR, "Invalid rule");
+ return -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+int
+cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t num_q_msg,
+ struct idpf_ctlq_msg q_msg[])
+{
+ int retries = 0;
+ struct idpf_dma_mem *dma;
+ uint16_t i;
+ uint16_t buff_cnt;
+ int ret = 0;
+
+ retries = 0;
+ while (retries <= CTLQ_RECEIVE_RETRIES) {
+ rte_delay_us_sleep(10);
+ ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);
+
+ if (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&
+ ret != CPFL_ERR_CTLQ_ERROR) {
+ PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret);
+ retries++;
+ continue;
+ }
+
+ if (ret == CPFL_ERR_CTLQ_NO_WORK) {
+ retries++;
+ continue;
+ }
+
+ if (ret == CPFL_ERR_CTLQ_EMPTY)
+ break;
+
+ ret = cpfl_process_rx_ctlq_msg(num_q_msg, q_msg);
+ if (ret) {
+ PMD_INIT_LOG(WARNING, "failed to process rx_ctrlq msg");
+ break;
+ }
+
+ for (i = 0; i < num_q_msg; i++) {
+ if (q_msg[i].data_len > 0)
+ dma = q_msg[i].ctx.indirect.payload;
+ else
+ dma = NULL;
+
+ buff_cnt = dma ? 1 : 0;
+ ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "could not posted recv bufs\n");
+ }
+ break;
+ }
+
+ if (retries > CTLQ_RECEIVE_RETRIES) {
+ PMD_INIT_LOG(ERR, "timed out while polling for receive response");
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int
+pack_mod_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+ struct idpf_ctlq_msg *msg)
+{
+ struct cpfl_mod_rule_info *minfo = &rinfo->mod;
+ union cpfl_rule_cfg_pkt_record *blob = NULL;
+ struct cpfl_rule_cfg_data cfg = {0};
+
+ /* prepare rule blob */
+ if (!dma->va) {
+ PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+ return -1;
+ }
+ blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+ memset(blob, 0, sizeof(*blob));
+ memset(&cfg, 0, sizeof(cfg));
+
+ /* fill info for both query and add/update */
+ cpfl_fill_rule_mod_content(minfo->mod_obj_size,
+ minfo->pin_mod_content,
+ minfo->mod_index,
+ &cfg.ext.mod_content);
+
+ /* only fill content for add/update */
+ memcpy(blob->mod_blob, minfo->mod_content,
+ minfo->mod_content_byte_len);
+
+#define NO_HOST_NEEDED 0
+ /* pack message */
+ cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule,
+ rinfo->cookie,
+ 0, /* vsi_id not used for mod */
+ rinfo->port_num,
+ NO_HOST_NEEDED,
+ 0, /* time_sel */
+ 0, /* time_sel_val */
+ 0, /* cache_wr_thru */
+ rinfo->resp_req,
+ (u16)sizeof(*blob),
+ (void *)dma,
+ &cfg.common);
+ cpfl_prep_rule_desc(&cfg, msg);
+ return 0;
+}
+
+static int pack_default_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+ struct idpf_ctlq_msg *msg, bool add)
+{
+ union cpfl_rule_cfg_pkt_record *blob = NULL;
+ enum cpfl_ctlq_rule_cfg_opc opc;
+ struct cpfl_rule_cfg_data cfg;
+ uint16_t cfg_ctrl;
+
+ if (!dma->va) {
+ PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+ return -1;
+ }
+ blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+ memset(blob, 0, sizeof(*blob));
+ memset(msg, 0, sizeof(*msg));
+
+ if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+ cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id,
+ rinfo->sem.sub_prof_id,
+ rinfo->sem.pin_to_cache,
+ rinfo->sem.fixed_fetch);
+ cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len,
+ rinfo->act_bytes, rinfo->act_byte_len,
+ cfg_ctrl, blob);
+ opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule;
+ }
+
+ cpfl_fill_rule_cfg_data_common(opc,
+ rinfo->cookie,
+ rinfo->vsi,
+ rinfo->port_num,
+ rinfo->host_id,
+ 0, /* time_sel */
+ 0, /* time_sel_val */
+ 0, /* cache_wr_thru */
+ rinfo->resp_req,
+ sizeof(union cpfl_rule_cfg_pkt_record),
+ dma,
+ &cfg.common);
+ cpfl_prep_rule_desc(&cfg, msg);
+ return 0;
+}
+
+static int pack_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+ struct idpf_ctlq_msg *msg, bool add)
+{
+ int ret = 0;
+
+ if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+ if (pack_default_rule(rinfo, dma, msg, add) < 0)
+ ret = -1;
+ } else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
+ if (pack_mod_rule(rinfo, dma, msg) < 0)
+ ret = -1;
+ }
+
+ return ret;
+}
+
+int
+cpfl_rule_update(struct cpfl_itf *itf,
+ struct idpf_ctlq_info *tx_cq,
+ struct idpf_ctlq_info *rx_cq,
+ struct cpfl_rule_info *rinfo,
+ int rule_num,
+ bool add)
+{
+ struct idpf_hw *hw = &itf->adapter->base.hw;
+ int i;
+ int ret = 0;
+
+ if (rule_num == 0)
+ return 0;
+
+ for (i = 0; i < rule_num; i++) {
+ ret = pack_rule(&rinfo[i], &itf->dma[i], &itf->msg[i], add);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Could not create rule");
+ return ret;
+ }
+ }
+ ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to send rule");
+ return ret;
+ }
+ ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to create rule");
+ return ret;
+ }
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FXP_RULE_H_
+#define _CPFL_FXP_RULE_H_
+
+#include "cpfl_rules.h"
+
+#define CPFL_MAX_KEY_LEN 128
+#define CPFL_MAX_RULE_ACTIONS 32
+
+struct cpfl_sem_rule_info {
+ uint16_t prof_id;
+ uint8_t sub_prof_id;
+ uint8_t key[CPFL_MAX_KEY_LEN];
+ uint8_t key_byte_len;
+ uint8_t pin_to_cache;
+ uint8_t fixed_fetch;
+};
+
+struct cpfl_lem_rule_info {
+ uint16_t prof_id;
+ uint8_t key[CPFL_MAX_KEY_LEN];
+ uint8_t key_byte_len;
+ uint8_t pin_to_cache;
+ uint8_t fixed_fetch;
+};
+
+#define CPFL_MAX_MOD_CONTENT_LEN 256
+struct cpfl_mod_rule_info {
+ uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN];
+ uint8_t mod_content_byte_len;
+ uint32_t mod_index;
+ uint8_t pin_mod_content;
+ uint8_t mod_obj_size;
+};
+
+enum cpfl_rule_type {
+ CPFL_RULE_TYPE_NONE,
+ CPFL_RULE_TYPE_SEM,
+ CPFL_RULE_TYPE_LEM,
+ CPFL_RULE_TYPE_MOD
+};
+
+struct cpfl_rule_info {
+ enum cpfl_rule_type type;
+ uint64_t cookie;
+ uint8_t host_id;
+ uint8_t port_num;
+ uint8_t resp_req;
+ /* TODO: change this to be dynamically allocated/reallocated */
+ uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)];
+ uint8_t act_byte_len;
+ /* vsi is used for lem and lpm rules */
+ uint16_t vsi;
+ uint8_t clear_mirror_1st_state;
+ /* mod related fields */
+ union {
+ struct cpfl_mod_rule_info mod;
+ struct cpfl_sem_rule_info sem;
+ struct cpfl_lem_rule_info lem;
+ };
+};
+
+struct cpfl_meter_action_info {
+ uint8_t meter_logic_bank_id;
+ uint32_t meter_logic_idx;
+ uint8_t prof_id;
+ uint8_t slot;
+};
+
+extern struct cpfl_vport_ext *vport;
+
+int cpfl_rule_update(struct cpfl_itf *itf,
+ struct idpf_ctlq_info *tx_cq,
+ struct idpf_ctlq_info *rx_cq,
+ struct cpfl_rule_info *rinfo,
+ int rule_num,
+ bool add);
+int
+cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t num_q_msg,
+ struct idpf_ctlq_msg q_msg[]);
+int
+cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t num_q_msg,
+ struct idpf_ctlq_msg q_msg[]);
+#endif /*CPFL_FXP_RULE_H*/
@@ -46,6 +46,7 @@ if js_dep.found()
'cpfl_flow_parser.c',
'cpfl_rules.c',
'cpfl_controlq.c',
+ 'cpfl_fxp_rule.c',
)
dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
ext_deps += js_dep