[3/5] net/enic: change Rx queue ordering to enable RSS action

Message ID 20200415010641.5195-3-johndale@cisco.com (mailing list archive)
State Accepted, archived
Delegated to: Ferruh Yigit
Headers
Series [1/5] net/enic: fix action reordering |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation fail Compilation issues

Commit Message

John Daley (johndale) April 15, 2020, 1:06 a.m. UTC
  Each RTE RQ is represented on enic as a Start Of Packet (SOP) queue and
and overflow queue (DATA). There were arranged SOP0/DATA0, SOP1/DATA1,...
but need to be arranged SOP0, SOP1,..., DATA0, DATA1... so that
rte_flow RSS queue ranges work.

Signed-off-by: John Daley <johndale@cisco.com>
Reviewed-by: Hyong Youb Kim <hyonkim@cisco.com>
---
 drivers/net/enic/enic.h        | 13 +++++++------
 drivers/net/enic/enic_ethdev.c |  2 +-
 drivers/net/enic/enic_main.c   | 13 ++++++-------
 3 files changed, 14 insertions(+), 14 deletions(-)
  

Patch

diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index c9901faf5f..a95e51eea8 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -221,25 +221,26 @@  static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu)
 /* Get the CQ index from a Start of Packet(SOP) RQ index */
 static inline unsigned int enic_sop_rq_idx_to_cq_idx(unsigned int sop_idx)
 {
-	return sop_idx / 2;
+	return sop_idx;
 }
 
 /* Get the RTE RQ index from a Start of Packet(SOP) RQ index */
 static inline unsigned int enic_sop_rq_idx_to_rte_idx(unsigned int sop_idx)
 {
-	return sop_idx / 2;
+	return sop_idx;
 }
 
 /* Get the Start of Packet(SOP) RQ index from a RTE RQ index */
 static inline unsigned int enic_rte_rq_idx_to_sop_idx(unsigned int rte_idx)
 {
-	return rte_idx * 2;
+	return rte_idx;
 }
 
 /* Get the Data RQ index from a RTE RQ index */
-static inline unsigned int enic_rte_rq_idx_to_data_idx(unsigned int rte_idx)
+static inline unsigned int enic_rte_rq_idx_to_data_idx(unsigned int rte_idx,
+						       struct enic *enic)
 {
-	return rte_idx * 2 + 1;
+	return enic->rq_count + rte_idx;
 }
 
 static inline unsigned int enic_vnic_rq_count(struct enic *enic)
@@ -253,7 +254,7 @@  static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned i
 	 * completion queue, so the completion queue number is no
 	 * longer the same as the rq number.
 	 */
-	return rq / 2;
+	return rq;
 }
 
 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index a7a178e41b..32d5397f85 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -940,7 +940,7 @@  static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
 
 	ENICPMD_FUNC_TRACE();
 	sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
-	data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id);
+	data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id, enic);
 	rq_sop = &enic->rq[sop_queue_idx];
 	rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
 	qinfo->mp = rq_sop->mp;
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 78e2dd133c..7942b0df6b 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -222,13 +222,12 @@  void enic_init_vnic_resources(struct enic *enic)
 			error_interrupt_enable,
 			error_interrupt_offset);
 
-		data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
+		data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index, enic)];
 		if (data_rq->in_use)
 			vnic_rq_init(data_rq,
 				     cq_idx,
 				     error_interrupt_enable,
 				     error_interrupt_offset);
-
 		vnic_cq_init(&enic->cq[cq_idx],
 			0 /* flow_control_enable */,
 			1 /* color_enable */,
@@ -620,7 +619,7 @@  int enic_enable(struct enic *enic)
 			return err;
 		}
 		err = enic_alloc_rx_queue_mbufs(enic,
-			&enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
+			&enic->rq[enic_rte_rq_idx_to_data_idx(index, enic)]);
 		if (err) {
 			/* release the allocated mbufs for the sop rq*/
 			enic_rxmbuf_queue_release(enic,
@@ -808,7 +807,7 @@  int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
 {
 	int rc;
 	uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
-	uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
+	uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx, enic);
 	struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
 	struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
 	unsigned int mbuf_size, mbufs_per_pkt;
@@ -1475,7 +1474,7 @@  enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
 	int rc = 0;
 
 	sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
-	data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
+	data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx, enic)];
 	cq_idx = rq_idx;
 
 	vnic_cq_clean(&enic->cq[cq_idx]);
@@ -1498,8 +1497,8 @@  enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
 	if (data_rq->in_use) {
 		vnic_rq_init_start(data_rq,
 				   enic_cq_rq(enic,
-				   enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
-				   data_rq->ring.desc_count - 1, 1, 0);
+				   enic_rte_rq_idx_to_data_idx(rq_idx, enic)),
+				   0, data_rq->ring.desc_count - 1, 1, 0);
 	}
 
 	rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);