[v3] net/ice: enable multi-process support
Checks
Commit Message
Add multiple processes support for ice, secondary processes will share
memory and configuration with primary process, do not need further
initialization for secondary processes.
Cc: stable@dpdk.org
Signed-off-by: Xiao Zhang <xiao.zhang@intel.com>
---
v3 Update the title and commit message.
v2 Remove limitation for secondary processes control path configuration.
---
drivers/net/ice/ice_ethdev.c | 12 +++++++
drivers/net/ice/ice_ethdev.h | 2 ++
drivers/net/ice/ice_rxtx.c | 74 ++++++++++++++++++++++++++++----------------
3 files changed, 62 insertions(+), 26 deletions(-)
Comments
> -----Original Message-----
> From: Zhang, Xiao
> Sent: Thursday, July 25, 2019 7:19 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Zhang, Xiao <xiao.zhang@intel.com>;
> stable@dpdk.org
> Subject: [v3] net/ice: enable multi-process support
>
> Add multiple processes support for ice, secondary processes will share memory
> and configuration with primary process, do not need further initialization for
> secondary processes.
>
> Cc: stable@dpdk.org
>
> Signed-off-by: Xiao Zhang <xiao.zhang@intel.com>
> ---
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Applied to dpdk-next-net-intel with cc stable be removed
Thanks
Qi
On 07/25, Xiao Zhang wrote:
>Add multiple processes support for ice, secondary processes will share
>memory and configuration with primary process, do not need further
>initialization for secondary processes.
>
>Cc: stable@dpdk.org
This is a new feature, it's no need to cc stable@dpdk.org.
Thanks,
Xiaolong
>
>Signed-off-by: Xiao Zhang <xiao.zhang@intel.com>
>---
>v3 Update the title and commit message.
>v2 Remove limitation for secondary processes control path configuration.
>---
> drivers/net/ice/ice_ethdev.c | 12 +++++++
> drivers/net/ice/ice_ethdev.h | 2 ++
> drivers/net/ice/ice_rxtx.c | 74 ++++++++++++++++++++++++++++----------------
> 3 files changed, 62 insertions(+), 26 deletions(-)
>
>diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
>index 9ce730c..532f4db 100644
>--- a/drivers/net/ice/ice_ethdev.c
>+++ b/drivers/net/ice/ice_ethdev.c
>@@ -1408,6 +1408,15 @@ ice_dev_init(struct rte_eth_dev *dev)
> dev->tx_pkt_burst = ice_xmit_pkts;
> dev->tx_pkt_prepare = ice_prep_pkts;
>
>+ /* for secondary processes, we don't initialise any further as primary
>+ * has already done this work.
>+ */
>+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
>+ ice_set_rx_function(dev);
>+ ice_set_tx_function(dev);
>+ return 0;
>+ }
>+
> ice_set_default_ptype_table(dev);
> pci_dev = RTE_DEV_TO_PCI(dev->device);
> intr_handle = &pci_dev->intr_handle;
>@@ -1638,6 +1647,9 @@ ice_dev_uninit(struct rte_eth_dev *dev)
> struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> struct rte_flow *p_flow;
>
>+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
>+ return 0;
>+
> ice_dev_close(dev);
>
> dev->dev_ops = NULL;
>diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
>index 8a52239..a083616 100644
>--- a/drivers/net/ice/ice_ethdev.h
>+++ b/drivers/net/ice/ice_ethdev.h
>@@ -282,6 +282,8 @@ struct ice_adapter {
> struct rte_eth_dev *eth_dev;
> struct ice_pf pf;
> bool rx_bulk_alloc_allowed;
>+ bool rx_vec_allowed;
>+ bool tx_vec_allowed;
> bool tx_simple_allowed;
> /* ptype mapping table */
> uint32_t ptype_tbl[ICE_MAX_PKT_TYPE] __rte_cache_min_aligned;
>diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
>index 035ed84..d67de8f 100644
>--- a/drivers/net/ice/ice_rxtx.c
>+++ b/drivers/net/ice/ice_rxtx.c
>@@ -2332,35 +2332,46 @@ ice_set_rx_function(struct rte_eth_dev *dev)
> int i;
> bool use_avx2 = false;
>
>- if (!ice_rx_vec_dev_check(dev)) {
>- for (i = 0; i < dev->data->nb_rx_queues; i++) {
>- rxq = dev->data->rx_queues[i];
>- (void)ice_rxq_vec_setup(rxq);
>- }
>+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
>+ if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed) {
>+ ad->rx_vec_allowed = true;
>+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
>+ rxq = dev->data->rx_queues[i];
>+ if (rxq && ice_rxq_vec_setup(rxq)) {
>+ ad->rx_vec_allowed = false;
>+ break;
>+ }
>+ }
>+
>+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
>+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
>+ use_avx2 = true;
>
>- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
>- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
>- use_avx2 = true;
>+ } else {
>+ ad->rx_vec_allowed = false;
>+ }
>+ }
>
>+ if (ad->rx_vec_allowed) {
> if (dev->data->scattered_rx) {
> PMD_DRV_LOG(DEBUG,
>- "Using %sVector Scattered Rx (port %d).",
>- use_avx2 ? "avx2 " : "",
>- dev->data->port_id);
>+ "Using %sVector Scattered Rx (port %d).",
>+ use_avx2 ? "avx2 " : "",
>+ dev->data->port_id);
> dev->rx_pkt_burst = use_avx2 ?
>- ice_recv_scattered_pkts_vec_avx2 :
>- ice_recv_scattered_pkts_vec;
>+ ice_recv_scattered_pkts_vec_avx2 :
>+ ice_recv_scattered_pkts_vec;
> } else {
> PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
>- use_avx2 ? "avx2 " : "",
>- dev->data->port_id);
>+ use_avx2 ? "avx2 " : "",
>+ dev->data->port_id);
> dev->rx_pkt_burst = use_avx2 ?
>- ice_recv_pkts_vec_avx2 :
>- ice_recv_pkts_vec;
>+ ice_recv_pkts_vec_avx2 :
>+ ice_recv_pkts_vec;
> }
>-
> return;
> }
>+
> #endif
>
> if (dev->data->scattered_rx) {
>@@ -2464,16 +2475,27 @@ ice_set_tx_function(struct rte_eth_dev *dev)
> int i;
> bool use_avx2 = false;
>
>- if (!ice_tx_vec_dev_check(dev)) {
>- for (i = 0; i < dev->data->nb_tx_queues; i++) {
>- txq = dev->data->tx_queues[i];
>- (void)ice_txq_vec_setup(txq);
>- }
>+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
>+ if (!ice_tx_vec_dev_check(dev)) {
>+ ad->tx_vec_allowed = true;
>+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
>+ txq = dev->data->tx_queues[i];
>+ if (txq && ice_txq_vec_setup(txq)) {
>+ ad->tx_vec_allowed = false;
>+ break;
>+ }
>+ }
>
>- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
>- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
>- use_avx2 = true;
>+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
>+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
>+ use_avx2 = true;
>+
>+ } else {
>+ ad->tx_vec_allowed = false;
>+ }
>+ }
>
>+ if (ad->tx_vec_allowed) {
> PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
> use_avx2 ? "avx2 " : "",
> dev->data->port_id);
>--
>2.7.4
>
@@ -1408,6 +1408,15 @@ ice_dev_init(struct rte_eth_dev *dev)
dev->tx_pkt_burst = ice_xmit_pkts;
dev->tx_pkt_prepare = ice_prep_pkts;
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ ice_set_rx_function(dev);
+ ice_set_tx_function(dev);
+ return 0;
+ }
+
ice_set_default_ptype_table(dev);
pci_dev = RTE_DEV_TO_PCI(dev->device);
intr_handle = &pci_dev->intr_handle;
@@ -1638,6 +1647,9 @@ ice_dev_uninit(struct rte_eth_dev *dev)
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct rte_flow *p_flow;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
ice_dev_close(dev);
dev->dev_ops = NULL;
@@ -282,6 +282,8 @@ struct ice_adapter {
struct rte_eth_dev *eth_dev;
struct ice_pf pf;
bool rx_bulk_alloc_allowed;
+ bool rx_vec_allowed;
+ bool tx_vec_allowed;
bool tx_simple_allowed;
/* ptype mapping table */
uint32_t ptype_tbl[ICE_MAX_PKT_TYPE] __rte_cache_min_aligned;
@@ -2332,35 +2332,46 @@ ice_set_rx_function(struct rte_eth_dev *dev)
int i;
bool use_avx2 = false;
- if (!ice_rx_vec_dev_check(dev)) {
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)ice_rxq_vec_setup(rxq);
- }
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed) {
+ ad->rx_vec_allowed = true;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq && ice_rxq_vec_setup(rxq)) {
+ ad->rx_vec_allowed = false;
+ break;
+ }
+ }
+
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
+ use_avx2 = true;
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
- use_avx2 = true;
+ } else {
+ ad->rx_vec_allowed = false;
+ }
+ }
+ if (ad->rx_vec_allowed) {
if (dev->data->scattered_rx) {
PMD_DRV_LOG(DEBUG,
- "Using %sVector Scattered Rx (port %d).",
- use_avx2 ? "avx2 " : "",
- dev->data->port_id);
+ "Using %sVector Scattered Rx (port %d).",
+ use_avx2 ? "avx2 " : "",
+ dev->data->port_id);
dev->rx_pkt_burst = use_avx2 ?
- ice_recv_scattered_pkts_vec_avx2 :
- ice_recv_scattered_pkts_vec;
+ ice_recv_scattered_pkts_vec_avx2 :
+ ice_recv_scattered_pkts_vec;
} else {
PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
- use_avx2 ? "avx2 " : "",
- dev->data->port_id);
+ use_avx2 ? "avx2 " : "",
+ dev->data->port_id);
dev->rx_pkt_burst = use_avx2 ?
- ice_recv_pkts_vec_avx2 :
- ice_recv_pkts_vec;
+ ice_recv_pkts_vec_avx2 :
+ ice_recv_pkts_vec;
}
-
return;
}
+
#endif
if (dev->data->scattered_rx) {
@@ -2464,16 +2475,27 @@ ice_set_tx_function(struct rte_eth_dev *dev)
int i;
bool use_avx2 = false;
- if (!ice_tx_vec_dev_check(dev)) {
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- (void)ice_txq_vec_setup(txq);
- }
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (!ice_tx_vec_dev_check(dev)) {
+ ad->tx_vec_allowed = true;
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq && ice_txq_vec_setup(txq)) {
+ ad->tx_vec_allowed = false;
+ break;
+ }
+ }
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
- use_avx2 = true;
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
+ use_avx2 = true;
+
+ } else {
+ ad->tx_vec_allowed = false;
+ }
+ }
+ if (ad->tx_vec_allowed) {
PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
use_avx2 ? "avx2 " : "",
dev->data->port_id);