In virtualization mode, target pool should be determined for the
filters. For ether type filter, virtualization mode must be enabled
to filter broadcast/multicast packets due to hardware limitations.
Fixes: f8e2cfc7702b ("net/txgbe: support ethertype filter add and delete")
Fixes: 77a72b4d9dc0 ("net/txgbe: support ntuple filter add and delete")
Fixes: 983a4ef2265b ("net/txgbe: support syn filter add and delete")
Fixes: 08d61139be0a ("net/txgbe: support flow director filter add and delete")
Fixes: 9fdfed08a5e3 ("net/txgbe: restore RSS filter")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 24 +++++++++++++++++++++---
drivers/net/txgbe/txgbe_fdir.c | 3 +++
drivers/net/txgbe/txgbe_rxtx.c | 8 +++++++-
3 files changed, 31 insertions(+), 4 deletions(-)
@@ -4011,6 +4011,7 @@ txgbe_syn_filter_set(struct rte_eth_dev *dev,
struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
uint32_t syn_info;
uint32_t synqf;
+ uint16_t queue;
if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
return -EINVAL;
@@ -4020,7 +4021,11 @@ txgbe_syn_filter_set(struct rte_eth_dev *dev,
if (add) {
if (syn_info & TXGBE_SYNCLS_ENA)
return -EINVAL;
- synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue);
+ if (RTE_ETH_DEV_SRIOV(dev).active)
+ queue = RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + filter->queue;
+ else
+ queue = filter->queue;
+ synqf = (uint32_t)TXGBE_SYNCLS_QPID(queue);
synqf |= TXGBE_SYNCLS_ENA;
if (filter->hig_pri)
@@ -4089,7 +4094,10 @@ txgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
wr32(hw, TXGBE_5TFPORT(i), sdpqf);
wr32(hw, TXGBE_5TFCTL0(i), ftqf);
- l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
+ if (RTE_ETH_DEV_SRIOV(dev).active)
+ l34timir |= TXGBE_5TFCTL1_QP(RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + filter->queue);
+ else
+ l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
wr32(hw, TXGBE_5TFCTL1(i), l34timir);
}
@@ -4373,7 +4381,17 @@ txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
if (add) {
etqf = TXGBE_ETFLT_ENA;
etqf |= TXGBE_ETFLT_ETID(filter->ether_type);
- etqs |= TXGBE_ETCLS_QPID(filter->queue);
+ if (RTE_ETH_DEV_SRIOV(dev).active) {
+ int pool, queue;
+
+ pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
+ queue = RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + filter->queue;
+ etqf |= TXGBE_ETFLT_POOLENA;
+ etqf |= TXGBE_ETFLT_POOL(pool);
+ etqs |= TXGBE_ETCLS_QPID(queue);
+ } else {
+ etqs |= TXGBE_ETCLS_QPID(filter->queue);
+ }
etqs |= TXGBE_ETCLS_QENA;
ethertype_filter.ethertype = filter->ether_type;
@@ -844,6 +844,9 @@ txgbe_fdir_filter_program(struct rte_eth_dev *dev,
return -EINVAL;
}
+ if (RTE_ETH_DEV_SRIOV(dev).active)
+ queue = RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue;
+
node = txgbe_fdir_filter_lookup(info, &rule->input);
if (node) {
if (!update) {
@@ -5160,6 +5160,7 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev,
uint32_t reta;
uint16_t i;
uint16_t j;
+ uint16_t queue;
struct rte_eth_rss_conf rss_conf = {
.rss_key = conf->conf.key_len ?
(void *)(uintptr_t)conf->conf.key : NULL,
@@ -5192,7 +5193,12 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev,
for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
if (j == conf->conf.queue_num)
j = 0;
- reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF);
+ if (RTE_ETH_DEV_SRIOV(dev).active)
+ queue = RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx +
+ conf->conf.queue[j];
+ else
+ queue = conf->conf.queue[j];
+ reta = (reta >> 8) | LS32(queue, 24, 0xFF);
if ((i & 3) == 3)
wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
}