> --- /dev/null
> +++ b/drivers/net/ixgbe/ixgbe_vf_representor.c
> @@ -0,0 +1,217 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2018 Intel Corporation.
> + */
> +
> +#include <rte_ethdev.h>
> +#include <rte_pci.h>
> +#include <rte_malloc.h>
> +
> +#include "base/ixgbe_type.h"
> +#include "base/ixgbe_vf.h"
> +#include "ixgbe_ethdev.h"
> +#include "ixgbe_rxtx.h"
> +#include "rte_pmd_ixgbe.h"
> +
> +
> +static int
> +ixgbe_vf_representor_link_update(struct rte_eth_dev *ethdev,
> + int wait_to_complete)
> +{
> + struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
> +
> + return ixgbe_dev_link_update_share(representor->pf_ethdev,
> + wait_to_complete, 1);
> +}
> +
> +static int
> +ixgbe_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev,
> + struct ether_addr *mac_addr)
> +{
> + struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
> +
> + return rte_pmd_ixgbe_set_vf_mac_addr(
> + representor->pf_ethdev->data->port_id,
> + representor->vf_id, mac_addr);
> +}
> +
> +static void
> +ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
> + struct rte_eth_dev_info *dev_info)
> +{
> + struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
> +
> + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(
> + representor->pf_ethdev->data->dev_private);
> +
> + dev_info->device = representor->pf_ethdev->device;
> +
> + dev_info->min_rx_bufsize = 1024;
> + /**< Minimum size of RX buffer. */
> + dev_info->max_rx_pktlen = 9728;
> + /**< Maximum configurable length of RX pkt. */
> + dev_info->max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
> + /**< Maximum number of RX queues. */
> + dev_info->max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
> + /**< Maximum number of TX queues. */
Sort of generic question - for representor ports that do only control path -
shouldn't we have max_rx_queues=max_tx_queues=0, zero and make
queue_setup/rx_burst/tx_burst, etc. to return an error?
> +
> + dev_info->max_mac_addrs = hw->mac.num_rar_entries;
> + /**< Maximum number of MAC addresses. */
> +
> + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
> + DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM |
> + DEV_RX_OFFLOAD_TCP_CKSUM;
> + /**< Device RX offload capabilities. */
> +
> + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
> + DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
> + DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM |
> + DEV_TX_OFFLOAD_TCP_TSO;
> + /**< Device TX offload capabilities. */
> +
> + dev_info->speed_capa =
> + representor->pf_ethdev->data->dev_link.link_speed;
> + /**< Supported speeds bitmap (ETH_LINK_SPEED_). */
> +
> + dev_info->switch_info.name =
> + representor->pf_ethdev->device->name;
> + dev_info->switch_info.domain_id = representor->switch_domain_id;
> + dev_info->switch_info.port_id = representor->vf_id;
> +}
> +
> +static int ixgbe_vf_representor_dev_configure(
> + __rte_unused struct rte_eth_dev *dev)
> +{
> + return 0;
> +}
> +
> +static int ixgbe_vf_representor_rx_queue_setup(
> + __rte_unused struct rte_eth_dev *dev,
> + __rte_unused uint16_t rx_queue_id,
> + __rte_unused uint16_t nb_rx_desc,
> + __rte_unused unsigned int socket_id,
> + __rte_unused const struct rte_eth_rxconf *rx_conf,
> + __rte_unused struct rte_mempool *mb_pool)
> +{
> + return 0;
> +}
> +
> +static int ixgbe_vf_representor_tx_queue_setup(
> + __rte_unused struct rte_eth_dev *dev,
> + __rte_unused uint16_t rx_queue_id,
> + __rte_unused uint16_t nb_rx_desc,
> + __rte_unused unsigned int socket_id,
> + __rte_unused const struct rte_eth_txconf *tx_conf)
> +{
> + return 0;
> +}
> +
@@ -103,6 +103,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ipsec.c
endif
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf_representor.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h
@@ -132,7 +132,7 @@
#define IXGBE_EXVET_VET_EXT_SHIFT 16
#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000
-static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
@@ -1043,7 +1043,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
* It returns 0 on success.
*/
static int
-eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
+eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
@@ -1226,6 +1226,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
/* initialize PF if max_vfs not zero */
ixgbe_pf_host_init(eth_dev);
+
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
/* let hardware know driver is loaded */
ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
@@ -1716,16 +1717,72 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
-static int eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+static int
+eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_probe(pci_dev,
- sizeof(struct ixgbe_adapter), eth_ixgbe_dev_init);
+ char name[RTE_ETH_NAME_MAX_LEN];
+
+ struct rte_eth_devargs eth_da;
+ int i, retval;
+
+ retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, ð_da);
+ if (retval)
+ return retval;
+
+ /* physical port net_bdf_port */
+ snprintf(name, sizeof(name), "net_%s_%d", pci_dev->device.name, 0);
+
+ retval = rte_eth_dev_create(&pci_dev->device, name,
+ sizeof(struct ixgbe_adapter),
+ eth_dev_pci_specific_init, pci_dev,
+ eth_ixgbe_dev_init, NULL);
+
+ if (retval || eth_da.nb_representor_ports < 1)
+ return retval;
+
+ /* probe VF representor ports */
+ struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(name);
+
+ for (i = 0; i < eth_da.nb_representor_ports; i++) {
+ struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
+ pf_ethdev->data->dev_private);
+
+ struct ixgbe_vf_representor representor = {
+ .vf_id = eth_da.representor_ports[i],
+ .switch_domain_id = vfinfo->switch_domain_id,
+ .pf_ethdev = pf_ethdev
+ };
+
+ /* representor port net_bdf_port */
+ snprintf(name, sizeof(name), "net_%s_representor_%d",
+ pci_dev->device.name,
+ eth_da.representor_ports[i]);
+
+ retval = rte_eth_dev_create(&pci_dev->device, name,
+ sizeof(struct ixgbe_vf_representor), NULL, NULL,
+ ixgbe_vf_representor_init, &representor);
+
+ if (retval)
+ PMD_DRV_LOG(ERR, "failed to create ixgbe vf "
+ "representor %s.", name);
+ }
+
+ return 0;
}
static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbe_dev_uninit);
+ struct rte_eth_dev *ethdev;
+
+ ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (!ethdev)
+ return -ENODEV;
+
+ if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+ return rte_eth_dev_destroy(ethdev, ixgbe_vf_representor_uninit);
+ else
+ return rte_eth_dev_destroy(ethdev, eth_ixgbe_dev_uninit);
}
static struct rte_pci_driver rte_ixgbe_pmd = {
@@ -2868,7 +2925,7 @@ ixgbe_dev_reset(struct rte_eth_dev *dev)
if (ret)
return ret;
- ret = eth_ixgbe_dev_init(dev);
+ ret = eth_ixgbe_dev_init(dev, NULL);
return ret;
}
@@ -3883,7 +3940,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
}
/* return 0 means link status changed, -1 means not changed */
-static int
+int
ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
int wait_to_complete, int vf)
{
@@ -253,6 +253,7 @@ struct ixgbe_vf_info {
uint16_t vlan_count;
uint8_t spoofchk_enabled;
uint8_t api_version;
+ uint16_t switch_domain_id;
};
/*
@@ -480,6 +481,15 @@ struct ixgbe_adapter {
struct ixgbe_tm_conf tm_conf;
};
+struct ixgbe_vf_representor {
+ uint16_t vf_id;
+ uint16_t switch_domain_id;
+ struct rte_eth_dev *pf_ethdev;
+};
+
+int ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params);
+int ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev);
+
#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
(&((struct ixgbe_adapter *)adapter)->hw)
@@ -652,6 +662,10 @@ int ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
void ixgbe_configure_dcb(struct rte_eth_dev *dev);
+int
+ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
+ int wait_to_complete, int vf);
+
/*
* misc function prototypes
*/
@@ -90,6 +90,8 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
if (*vfinfo == NULL)
rte_panic("Cannot allocate memory for private VF data\n");
+ rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id);
+
memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info));
memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
hw->mac.mc_filter_type = 0;
@@ -122,6 +124,7 @@ void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
{
struct ixgbe_vf_info **vfinfo;
uint16_t vf_num;
+ int ret;
PMD_INIT_FUNC_TRACE();
@@ -132,6 +135,10 @@ void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0;
RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0;
+ ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
+
vf_num = dev_num_vf(eth_dev);
if (vf_num == 0)
return;
new file mode 100644
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_vf.h"
+#include "ixgbe_ethdev.h"
+#include "ixgbe_rxtx.h"
+#include "rte_pmd_ixgbe.h"
+
+
+static int
+ixgbe_vf_representor_link_update(struct rte_eth_dev *ethdev,
+ int wait_to_complete)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ return ixgbe_dev_link_update_share(representor->pf_ethdev,
+ wait_to_complete, 1);
+}
+
+static int
+ixgbe_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev,
+ struct ether_addr *mac_addr)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ return rte_pmd_ixgbe_set_vf_mac_addr(
+ representor->pf_ethdev->data->port_id,
+ representor->vf_id, mac_addr);
+}
+
+static void
+ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(
+ representor->pf_ethdev->data->dev_private);
+
+ dev_info->device = representor->pf_ethdev->device;
+
+ dev_info->min_rx_bufsize = 1024;
+ /**< Minimum size of RX buffer. */
+ dev_info->max_rx_pktlen = 9728;
+ /**< Maximum configurable length of RX pkt. */
+ dev_info->max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+ /**< Maximum number of RX queues. */
+ dev_info->max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
+ /**< Maximum number of TX queues. */
+
+ dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+ /**< Maximum number of MAC addresses. */
+
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ /**< Device RX offload capabilities. */
+
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+ /**< Device TX offload capabilities. */
+
+ dev_info->speed_capa =
+ representor->pf_ethdev->data->dev_link.link_speed;
+ /**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+
+ dev_info->switch_info.name =
+ representor->pf_ethdev->device->name;
+ dev_info->switch_info.domain_id = representor->switch_domain_id;
+ dev_info->switch_info.port_id = representor->vf_id;
+}
+
+static int ixgbe_vf_representor_dev_configure(
+ __rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static int ixgbe_vf_representor_rx_queue_setup(
+ __rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ __rte_unused struct rte_mempool *mb_pool)
+{
+ return 0;
+}
+
+static int ixgbe_vf_representor_tx_queue_setup(
+ __rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ return 0;
+}
+
+static int ixgbe_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static void ixgbe_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev)
+{
+}
+
+static int
+ixgbe_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev,
+ uint16_t vlan_id, int on)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+ uint64_t vf_mask = 1ULL << representor->vf_id;
+
+ return rte_pmd_ixgbe_set_vf_vlan_filter(
+ representor->pf_ethdev->data->port_id, vlan_id, vf_mask, on);
+}
+
+static void
+ixgbe_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev,
+ __rte_unused uint16_t rx_queue_id, int on)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_ixgbe_set_vf_vlan_stripq(representor->pf_ethdev->data->port_id,
+ representor->vf_id, on);
+}
+
+struct eth_dev_ops ixgbe_vf_representor_dev_ops = {
+ .dev_infos_get = ixgbe_vf_representor_dev_infos_get,
+
+ .dev_start = ixgbe_vf_representor_dev_start,
+ .dev_configure = ixgbe_vf_representor_dev_configure,
+ .dev_stop = ixgbe_vf_representor_dev_stop,
+
+ .rx_queue_setup = ixgbe_vf_representor_rx_queue_setup,
+ .tx_queue_setup = ixgbe_vf_representor_tx_queue_setup,
+
+ .link_update = ixgbe_vf_representor_link_update,
+
+ .vlan_filter_set = ixgbe_vf_representor_vlan_filter_set,
+ .vlan_strip_queue_set = ixgbe_vf_representor_vlan_strip_queue_set,
+
+ .mac_addr_set = ixgbe_vf_representor_mac_addr_set,
+};
+
+
+int
+ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ struct ixgbe_vf_info *vf_data;
+ struct rte_pci_device *pci_dev;
+ struct rte_eth_link *link;
+
+ if (!representor)
+ return -ENOMEM;
+
+ representor->vf_id =
+ ((struct ixgbe_vf_representor *)init_params)->vf_id;
+ representor->switch_domain_id =
+ ((struct ixgbe_vf_representor *)init_params)->switch_domain_id;
+ representor->pf_ethdev =
+ ((struct ixgbe_vf_representor *)init_params)->pf_ethdev;
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(representor->pf_ethdev);
+
+ if (representor->vf_id >= pci_dev->max_vfs)
+ return -ENODEV;
+
+ ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+ /* Set representor device ops */
+ ethdev->dev_ops = &ixgbe_vf_representor_dev_ops;
+
+ /* No data-path so no RX/TX functions */
+ ethdev->rx_pkt_burst = NULL;
+ ethdev->tx_pkt_burst = NULL;
+
+ /* Setting the number queues allocated to the VF */
+ ethdev->data->nb_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+ ethdev->data->nb_tx_queues = IXGBE_VF_MAX_RX_QUEUES;
+
+ /* Reference VF mac address from PF data structure */
+ vf_data = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
+ representor->pf_ethdev->data->dev_private);
+
+ ethdev->data->mac_addrs = (struct ether_addr *)
+ vf_data[representor->vf_id].vf_mac_addresses;
+
+ /* Link state. Inherited from PF */
+ link = &representor->pf_ethdev->data->dev_link;
+
+ ethdev->data->dev_link.link_speed = link->link_speed;
+ ethdev->data->dev_link.link_duplex = link->link_duplex;
+ ethdev->data->dev_link.link_status = link->link_status;
+ ethdev->data->dev_link.link_autoneg = link->link_autoneg;
+
+ return 0;
+}
+
+
+int
+ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev __rte_unused)
+{
+ return 0;
+}
@@ -19,6 +19,7 @@ sources = files(
'ixgbe_pf.c',
'ixgbe_rxtx.c',
'ixgbe_tm.c',
+ 'ixgbe_vf_rerpesentor.c',
'rte_pmd_ixgbe.c'
)