[dpdk-dev,v6,20/22] i40e: take flow director flexible payload configuration

Message ID 1416530816-2159-21-git-send-email-jingjing.wu@intel.com (mailing list archive)
State Accepted, archived
Headers

Commit Message

Jingjing Wu Nov. 21, 2014, 12:46 a.m. UTC
  configure flexible payload and flex mask in i40e driver
It includes arguments verification and HW setting.

Signed-off-by: jingjing.wu <jingjing.wu@intel.com>
---
 lib/librte_pmd_i40e/i40e_fdir.c | 283 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 283 insertions(+)
  

Patch

diff --git a/lib/librte_pmd_i40e/i40e_fdir.c b/lib/librte_pmd_i40e/i40e_fdir.c
index c452a80..54f3e24 100644
--- a/lib/librte_pmd_i40e/i40e_fdir.c
+++ b/lib/librte_pmd_i40e/i40e_fdir.c
@@ -109,6 +109,13 @@ 
 #define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
 
 static int i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq);
+static int i40e_check_fdir_flex_conf(
+	const struct rte_eth_fdir_flex_conf *conf);
+static void i40e_set_flx_pld_cfg(struct i40e_pf *pf,
+			 const struct rte_eth_flex_payload_cfg *cfg);
+static void i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
+		enum i40e_filter_pctype pctype,
+		const struct rte_eth_fdir_flex_mask *mask_cfg);
 static int i40e_fdir_construct_pkt(struct i40e_pf *pf,
 				     const struct rte_eth_fdir_input *fdir_input,
 				     unsigned char *raw_pkt);
@@ -364,6 +371,261 @@  i40e_init_flx_pld(struct i40e_pf *pf)
 	}
 }
 
+#define I40E_WORD(hi, lo) (uint16_t)((((hi) << 8) & 0xFF00) | ((lo) & 0xFF))
+
+#define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
+	if ((flex_pit2).src_offset < \
+		(flex_pit1).src_offset + (flex_pit1).size) { \
+		PMD_DRV_LOG(ERR, "src_offset should be not" \
+			" less than than previous offset" \
+			" + previous FSIZE."); \
+		return -EINVAL; \
+	} \
+} while (0)
+
+/*
+ * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
+ * and the flex_pit will be sorted by it's src_offset value
+ */
+static inline uint16_t
+i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
+			struct i40e_fdir_flex_pit *flex_pit)
+{
+	uint16_t src_tmp, size, num = 0;
+	uint16_t i, k, j = 0;
+
+	while (j < I40E_FDIR_MAX_FLEX_LEN) {
+		size = 1;
+		for (; j < I40E_FDIR_MAX_FLEX_LEN; j++) {
+			if (src_offset[j + 1] == src_offset[j] + 1)
+				size++;
+			else {
+				src_tmp = src_offset[j] + 1 - size;
+				/* the flex_pit need to be sort by scr_offset */
+				for (i = 0; i < num; i++) {
+					if (src_tmp < flex_pit[i].src_offset)
+						break;
+				}
+				/* if insert required, move backward */
+				for (k = num; k > i; k--)
+					flex_pit[k] = flex_pit[k - 1];
+				/* insert */
+				flex_pit[i].dst_offset = j + 1 - size;
+				flex_pit[i].src_offset = src_tmp;
+				flex_pit[i].size = size;
+				j++;
+				num++;
+				break;
+			}
+		}
+	}
+	return num;
+}
+
+/* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
+static inline int
+i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
+{
+	struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
+	uint16_t num, i;
+
+	for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
+		if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
+			PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
+			return -EINVAL;
+		}
+	}
+
+	memset(flex_pit, 0, sizeof(flex_pit));
+	num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
+	if (num > I40E_MAX_FLXPLD_FIED) {
+		PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
+		return -EINVAL;
+	}
+	for (i = 0; i < num; i++) {
+		if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
+			flex_pit[i].src_offset & 0x01) {
+			PMD_DRV_LOG(ERR, "flexpayload should be measured"
+				" in word");
+			return -EINVAL;
+		}
+		if (i != num - 1)
+			I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
+	}
+	return 0;
+}
+
+/*
+ * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
+ * arguments are valid
+ */
+static int
+i40e_check_fdir_flex_conf(const struct rte_eth_fdir_flex_conf *conf)
+{
+	const struct rte_eth_flex_payload_cfg *flex_cfg;
+	const struct rte_eth_fdir_flex_mask *flex_mask;
+	uint16_t mask_tmp;
+	uint8_t nb_bitmask;
+	uint16_t i, j;
+	int ret = 0;
+
+	if (conf == NULL) {
+		PMD_DRV_LOG(INFO, "NULL pointer.");
+		return -EINVAL;
+	}
+	/* check flexible payload setting configuration */
+	if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
+		PMD_DRV_LOG(ERR, "invalid number of payload setting.");
+		return -EINVAL;
+	}
+	for (i = 0; i < conf->nb_payloads; i++) {
+		flex_cfg = &conf->flex_set[i];
+		if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
+			PMD_DRV_LOG(ERR, "invalid payload type.");
+			return -EINVAL;
+		}
+		ret = i40e_check_fdir_flex_payload(flex_cfg);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
+			return -EINVAL;
+		}
+	}
+
+	/* check flex mask setting configuration */
+	if (conf->nb_flexmasks > RTE_ETH_FLOW_TYPE_FRAG_IPV6) {
+		PMD_DRV_LOG(ERR, "invalid number of flex masks.");
+		return -EINVAL;
+	}
+	for (i = 0; i < conf->nb_flexmasks; i++) {
+		flex_mask = &conf->flex_mask[i];
+		if (!I40E_VALID_FLOW_TYPE(flex_mask->flow_type)) {
+			PMD_DRV_LOG(WARNING, "invalid flow type.");
+			return -EINVAL;
+		}
+		nb_bitmask = 0;
+		for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
+			mask_tmp = I40E_WORD(flex_mask->mask[j],
+					     flex_mask->mask[j + 1]);
+			if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
+				nb_bitmask++;
+				if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
+					PMD_DRV_LOG(ERR, " exceed maximal"
+						" number of bitmasks.");
+					return -EINVAL;
+				}
+			}
+		}
+	}
+	return 0;
+}
+
+/*
+ * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
+ * @pf: board private structure
+ * @cfg: the rule how bytes stream is extracted as flexible payload
+ */
+static void
+i40e_set_flx_pld_cfg(struct i40e_pf *pf,
+			 const struct rte_eth_flex_payload_cfg *cfg)
+{
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
+	uint32_t flx_pit;
+	uint16_t num, min_next_off;  /* in words */
+	uint8_t field_idx = 0;
+	uint8_t layer_idx = 0;
+	uint16_t i;
+
+	if (cfg->type == RTE_ETH_L2_PAYLOAD)
+		layer_idx = I40E_FLXPLD_L2_IDX;
+	else if (cfg->type == RTE_ETH_L3_PAYLOAD)
+		layer_idx = I40E_FLXPLD_L3_IDX;
+	else if (cfg->type == RTE_ETH_L4_PAYLOAD)
+		layer_idx = I40E_FLXPLD_L4_IDX;
+
+	memset(flex_pit, 0, sizeof(flex_pit));
+	num = i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit);
+
+	for (i = 0; i < num; i++) {
+		field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
+		/* record the info in fdir structure */
+		pf->fdir.flex_set[field_idx].src_offset =
+			flex_pit[i].src_offset / sizeof(uint16_t);
+		pf->fdir.flex_set[field_idx].size =
+			flex_pit[i].size / sizeof(uint16_t);
+		pf->fdir.flex_set[field_idx].dst_offset =
+			flex_pit[i].dst_offset / sizeof(uint16_t);
+		flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
+				pf->fdir.flex_set[field_idx].size,
+				pf->fdir.flex_set[field_idx].dst_offset);
+
+		I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
+	}
+	min_next_off = pf->fdir.flex_set[field_idx].src_offset +
+				pf->fdir.flex_set[field_idx].size;
+
+	for (; i < I40E_MAX_FLXPLD_FIED; i++) {
+		/* set the non-used register obeying register's constrain */
+		flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
+			   NONUSE_FLX_PIT_DEST_OFF);
+		I40E_WRITE_REG(hw,
+			I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
+			flx_pit);
+		min_next_off++;
+	}
+}
+
+/*
+ * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
+ * @pf: board private structure
+ * @pctype: packet classify type
+ * @flex_masks: mask for flexible payload
+ */
+static void
+i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
+		enum i40e_filter_pctype pctype,
+		const struct rte_eth_fdir_flex_mask *mask_cfg)
+{
+	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+	struct i40e_fdir_flex_mask *flex_mask;
+	uint32_t flxinset, fd_mask;
+	uint16_t mask_tmp;
+	uint8_t i, nb_bitmask = 0;
+
+	flex_mask = &pf->fdir.flex_mask[pctype];
+	memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
+	for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
+		mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
+		if (mask_tmp != 0x0) {
+			flex_mask->word_mask |=
+				I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
+			if (mask_tmp != UINT16_MAX) {
+				/* set bit mask */
+				flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
+				flex_mask->bitmask[nb_bitmask].offset =
+					i / sizeof(uint16_t);
+				nb_bitmask++;
+			}
+		}
+	}
+	/* write mask to hw */
+	flxinset = (flex_mask->word_mask <<
+		I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
+		I40E_PRTQF_FD_FLXINSET_INSET_MASK;
+	I40E_WRITE_REG(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
+
+	for (i = 0; i < nb_bitmask; i++) {
+		fd_mask = (flex_mask->bitmask[i].mask <<
+			I40E_PRTQF_FD_MSK_MASK_SHIFT) &
+			I40E_PRTQF_FD_MSK_MASK_MASK;
+		fd_mask |= ((flex_mask->bitmask[i].offset +
+			I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
+			I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
+			I40E_PRTQF_FD_MSK_OFFSET_MASK;
+		I40E_WRITE_REG(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
+	}
+}
+
 /*
  * Configure flow director related setting
  */
@@ -372,7 +634,10 @@  i40e_fdir_configure(struct rte_eth_dev *dev)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct rte_eth_fdir_flex_conf *conf;
+	enum i40e_filter_pctype pctype;
 	uint32_t val;
+	uint8_t i;
 	int ret = 0;
 
 	/*
@@ -396,6 +661,24 @@  i40e_fdir_configure(struct rte_eth_dev *dev)
 		I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, val);
 
 		i40e_init_flx_pld(pf); /* set flex config to default value */
+
+		conf = &dev->data->dev_conf.fdir_conf.flex_conf;
+		ret = i40e_check_fdir_flex_conf(conf);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR, " invalid configuration arguments.");
+			return -EINVAL;
+		}
+		/* configure flex payload */
+		for (i = 0; i < conf->nb_payloads; i++)
+			i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
+		/* configure flex mask*/
+		for (i = 0; i < conf->nb_flexmasks; i++) {
+			pctype = i40e_flowtype_to_pctype(
+				conf->flex_mask[i].flow_type);
+			i40e_set_flex_mask_on_pctype(pf,
+					pctype,
+					&conf->flex_mask[i]);
+		}
 	} else {
 		/* disable FDIR filter */
 		val &= ~I40E_PFQF_CTL_0_FD_ENA_MASK;