[2/9] net/cxgbe: validate flows offloaded to HASH region

Message ID 79796390c63f1a8e3c67053c026c6079e8abb0ca.1530295732.git.rahul.lakkireddy@chelsio.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series net/cxgbe: add support for offloading flows to HASH region |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Rahul Lakkireddy June 29, 2018, 6:12 p.m. UTC
  From: Shagun Agrawal <shaguna@chelsio.com>

Fetch supported match items in HASH region. Ensure the mask
is all set for all the supported match items to be offloaded
to HASH region. Otherwise, offload them to LE-TCAM region.

Signed-off-by: Shagun Agrawal <shaguna@chelsio.com>
Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
---
 drivers/net/cxgbe/base/common.h  |  3 +++
 drivers/net/cxgbe/base/t4_hw.c   |  7 +++++
 drivers/net/cxgbe/base/t4_regs.h |  3 +++
 drivers/net/cxgbe/cxgbe_filter.h |  1 +
 drivers/net/cxgbe/cxgbe_flow.c   | 57 ++++++++++++++++++++++++++++++++++++++++
 5 files changed, 71 insertions(+)
  

Patch

diff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h
index a276a1ef5..ea3e112b9 100644
--- a/drivers/net/cxgbe/base/common.h
+++ b/drivers/net/cxgbe/base/common.h
@@ -156,6 +156,9 @@  struct tp_params {
 	int vnic_shift;
 	int port_shift;
 	int protocol_shift;
+	int ethertype_shift;
+
+	u64 hash_filter_mask;
 };
 
 struct vpd_params {
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index 66d080476..0893b7ba0 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -5032,6 +5032,8 @@  int t4_init_tp_params(struct adapter *adap)
 	adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
 	adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
 							       F_PROTOCOL);
+	adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
+								F_ETHERTYPE);
 
 	/*
 	 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
@@ -5040,6 +5042,11 @@  int t4_init_tp_params(struct adapter *adap)
 	if ((adap->params.tp.ingress_config & F_VNIC) == 0)
 		adap->params.tp.vnic_shift = -1;
 
+	v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
+	adap->params.tp.hash_filter_mask = v;
+	v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
+	adap->params.tp.hash_filter_mask |= ((u64)v << 32);
+
 	return 0;
 }
 
diff --git a/drivers/net/cxgbe/base/t4_regs.h b/drivers/net/cxgbe/base/t4_regs.h
index a1f6208ea..cbaf415fa 100644
--- a/drivers/net/cxgbe/base/t4_regs.h
+++ b/drivers/net/cxgbe/base/t4_regs.h
@@ -946,3 +946,6 @@ 
 #define F_HASHEN    V_HASHEN(1U)
 
 #define A_LE_DB_TID_HASHBASE 0x19df8
+
+#define LE_3_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eac
+#define LE_4_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eb0
diff --git a/drivers/net/cxgbe/cxgbe_filter.h b/drivers/net/cxgbe/cxgbe_filter.h
index 6758a1879..27421a475 100644
--- a/drivers/net/cxgbe/cxgbe_filter.h
+++ b/drivers/net/cxgbe/cxgbe_filter.h
@@ -86,6 +86,7 @@  struct ch_filter_specification {
 	 * matching that doesn't exist as a (value, mask) tuple.
 	 */
 	uint32_t type:1;	/* 0 => IPv4, 1 => IPv6 */
+	uint32_t cap:1;		/* 0 => LE-TCAM, 1 => Hash */
 
 	/*
 	 * Packet dispatch information.  Ingress packets which match the
diff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c
index 23b7d00b3..dfb5fac2b 100644
--- a/drivers/net/cxgbe/cxgbe_flow.c
+++ b/drivers/net/cxgbe/cxgbe_flow.c
@@ -48,6 +48,58 @@  cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
 	return 0;
 }
 
+static void
+cxgbe_fill_filter_region(struct adapter *adap,
+			 struct ch_filter_specification *fs)
+{
+	struct tp_params *tp = &adap->params.tp;
+	u64 hash_filter_mask = tp->hash_filter_mask;
+	u64 ntuple_mask = 0;
+
+	fs->cap = 0;
+
+	if (!is_hashfilter(adap))
+		return;
+
+	if (fs->type) {
+		uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
+				     0xff, 0xff, 0xff, 0xff,
+				     0xff, 0xff, 0xff, 0xff,
+				     0xff, 0xff, 0xff, 0xff};
+		uint8_t bitoff[16] = {0};
+
+		if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
+		    !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
+		    memcmp(fs->mask.lip, biton, sizeof(biton)) ||
+		    memcmp(fs->mask.fip, biton, sizeof(biton)))
+			return;
+	} else {
+		uint32_t biton  = 0xffffffff;
+		uint32_t bitoff = 0x0U;
+
+		if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
+		    !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
+		    memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
+		    memcmp(fs->mask.fip, &biton, sizeof(biton)))
+			return;
+	}
+
+	if (!fs->val.lport || fs->mask.lport != 0xffff)
+		return;
+	if (!fs->val.fport || fs->mask.fport != 0xffff)
+		return;
+
+	if (tp->protocol_shift >= 0)
+		ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
+	if (tp->ethertype_shift >= 0)
+		ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
+
+	if (ntuple_mask != hash_filter_mask)
+		return;
+
+	fs->cap = 1;	/* use hash region */
+}
+
 static int
 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
 		     struct ch_filter_specification *fs,
@@ -222,6 +274,8 @@  cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
 static int
 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
 {
+	if (flow->fs.cap)
+		return 0; /* Hash filters */
 	return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
 		cxgbe_validate_fidxonadd(&flow->fs,
 					 ethdev2adap(flow->dev), fidx);
@@ -329,6 +383,7 @@  cxgbe_rtef_parse_items(struct rte_flow *flow,
 		       const struct rte_flow_item items[],
 		       struct rte_flow_error *e)
 {
+	struct adapter *adap = ethdev2adap(flow->dev);
 	const struct rte_flow_item *i;
 	char repeat[ARRAY_SIZE(parseitem)] = {0};
 
@@ -369,6 +424,8 @@  cxgbe_rtef_parse_items(struct rte_flow *flow,
 		}
 	}
 
+	cxgbe_fill_filter_region(adap, &flow->fs);
+
 	return 0;
 }