[v4] app/testpmd: fix secondary process not forwarding
Checks
Commit Message
Under multi-process scenario, the secondary process gets queue state
from the wrong location (the global variable 'ports'). Therefore, the
secondary process can not forward since "stream_init" is not called.
This commit fixes the issue by calling 'rte_eth_rx/tx_queue_info_get'
to get queue state from shared memory.
Fixes: 3c4426db54fc ("app/testpmd: do not poll stopped queues")
Cc: stable@dpdk.org
Signed-off-by: Shiyang He <shiyangx.he@intel.com>
v2: Add function return value processing
v3: Add return value description
v4: Update queue state in 'start_port()'
---
app/test-pmd/testpmd.c | 72 +++++++++++++++++++++++++++++++++++++++++-
1 file changed, 71 insertions(+), 1 deletion(-)
Comments
On 3/8/2023 4:19 PM, Shiyang He wrote:
> Under multi-process scenario, the secondary process gets queue state
> from the wrong location (the global variable 'ports'). Therefore, the
> secondary process can not forward since "stream_init" is not called.
>
> This commit fixes the issue by calling 'rte_eth_rx/tx_queue_info_get'
> to get queue state from shared memory.
>
> Fixes: 3c4426db54fc ("app/testpmd: do not poll stopped queues")
> Cc: stable@dpdk.org
>
> Signed-off-by: Shiyang He <shiyangx.he@intel.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@amd.com>
Applied to dpdk-next-net/main, thanks.
@@ -2379,6 +2379,70 @@ launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
}
}
+static void
+update_rx_queue_state(uint16_t port_id, uint16_t queue_id)
+{
+ struct rte_eth_rxq_info rx_qinfo;
+ int32_t rc;
+
+ rc = rte_eth_rx_queue_info_get(port_id,
+ queue_id, &rx_qinfo);
+ if (rc == 0) {
+ ports[port_id].rxq[queue_id].state =
+ rx_qinfo.queue_state;
+ } else if (rc == -ENOTSUP) {
+ /*
+ * Set the rxq state to RTE_ETH_QUEUE_STATE_STARTED
+ * to ensure that the PMDs do not implement
+ * rte_eth_rx_queue_info_get can forward.
+ */
+ ports[port_id].rxq[queue_id].state =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ } else {
+ TESTPMD_LOG(WARNING,
+ "Failed to get rx queue info\n");
+ }
+}
+
+static void
+update_tx_queue_state(uint16_t port_id, uint16_t queue_id)
+{
+ struct rte_eth_txq_info tx_qinfo;
+ int32_t rc;
+
+ rc = rte_eth_tx_queue_info_get(port_id,
+ queue_id, &tx_qinfo);
+ if (rc == 0) {
+ ports[port_id].txq[queue_id].state =
+ tx_qinfo.queue_state;
+ } else if (rc == -ENOTSUP) {
+ /*
+ * Set the txq state to RTE_ETH_QUEUE_STATE_STARTED
+ * to ensure that the PMDs do not implement
+ * rte_eth_tx_queue_info_get can forward.
+ */
+ ports[port_id].txq[queue_id].state =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ } else {
+ TESTPMD_LOG(WARNING,
+ "Failed to get tx queue info\n");
+ }
+}
+
+static void
+update_queue_state(void)
+{
+ portid_t pi;
+ queueid_t qi;
+
+ RTE_ETH_FOREACH_DEV(pi) {
+ for (qi = 0; qi < nb_rxq; qi++)
+ update_rx_queue_state(pi, qi);
+ for (qi = 0; qi < nb_txq; qi++)
+ update_tx_queue_state(pi, qi);
+ }
+}
+
/*
* Launch packet forwarding configuration.
*/
@@ -2418,9 +2482,12 @@ start_packet_forwarding(int with_tx_first)
if (!pkt_fwd_shared_rxq_check())
return;
- if (stream_init != NULL)
+ if (stream_init != NULL) {
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ update_queue_state();
for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++)
stream_init(fwd_streams[i]);
+ }
port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
if (port_fwd_begin != NULL) {
@@ -3180,6 +3247,9 @@ start_port(portid_t pid)
pl[cfg_pi++] = pi;
}
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ update_queue_state();
+
if (at_least_one_port_successfully_started && !no_link_check)
check_all_ports_link_status(RTE_PORT_ALL);
else if (at_least_one_port_exist & all_ports_already_started)