@@ -25,18 +25,18 @@ static void
nfp_pf_repr_enable_queues(struct rte_eth_dev *dev)
{
uint16_t i;
- struct nfp_net_hw *hw;
+ struct nfp_hw *hw;
uint64_t enabled_queues = 0;
struct nfp_flower_representor *repr;
repr = dev->data->dev_private;
- hw = repr->app_fw_flower->pf_hw;
+ hw = &repr->app_fw_flower->pf_hw->super;
/* Enabling the required TX queues in the device */
for (i = 0; i < dev->data->nb_tx_queues; i++)
enabled_queues |= (1 << i);
- nn_cfg_writeq(&hw->super, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
+ nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
enabled_queues = 0;
@@ -44,7 +44,7 @@ nfp_pf_repr_enable_queues(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++)
enabled_queues |= (1 << i);
- nn_cfg_writeq(&hw->super, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
+ nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
}
static void
@@ -479,11 +479,11 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
uint16_t port;
uint64_t rx_base;
uint64_t tx_base;
- struct nfp_net_hw *hw;
+ struct nfp_hw *hw;
+ struct nfp_net_hw *net_hw;
struct nfp_pf_dev *pf_dev;
struct rte_pci_device *pci_dev;
struct nfp_app_fw_nic *app_fw_nic;
- struct rte_ether_addr *tmp_ether_addr;
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
@@ -503,46 +503,47 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
* Use PF array of physical ports to get pointer to
* this specific port.
*/
- hw = app_fw_nic->ports[port];
+ net_hw = app_fw_nic->ports[port];
+ hw = &net_hw->super;
PMD_INIT_LOG(DEBUG, "Working with physical port number: %hu, "
- "NFP internal port number: %d", port, hw->nfp_idx);
+ "NFP internal port number: %d", port, net_hw->nfp_idx);
rte_eth_copy_pci_info(eth_dev, pci_dev);
- hw->super.ctrl_bar = pci_dev->mem_resource[0].addr;
- if (hw->super.ctrl_bar == NULL) {
- PMD_DRV_LOG(ERR, "hw->super.ctrl_bar is NULL. BAR0 not configured");
+ hw->ctrl_bar = pci_dev->mem_resource[0].addr;
+ if (hw->ctrl_bar == NULL) {
+ PMD_DRV_LOG(ERR, "hw->ctrl_bar is NULL. BAR0 not configured");
return -ENODEV;
}
if (port == 0) {
uint32_t min_size;
- hw->super.ctrl_bar = pf_dev->ctrl_bar;
- min_size = NFP_MAC_STATS_SIZE * hw->pf_dev->nfp_eth_table->max_index;
- hw->mac_stats_bar = nfp_rtsym_map(hw->pf_dev->sym_tbl, "_mac_stats",
- min_size, &hw->mac_stats_area);
- if (hw->mac_stats_bar == NULL) {
+ hw->ctrl_bar = pf_dev->ctrl_bar;
+ min_size = NFP_MAC_STATS_SIZE * net_hw->pf_dev->nfp_eth_table->max_index;
+ net_hw->mac_stats_bar = nfp_rtsym_map(net_hw->pf_dev->sym_tbl, "_mac_stats",
+ min_size, &net_hw->mac_stats_area);
+ if (net_hw->mac_stats_bar == NULL) {
PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats_bar");
return -EIO;
}
- hw->mac_stats = hw->mac_stats_bar;
+ net_hw->mac_stats = net_hw->mac_stats_bar;
} else {
if (pf_dev->ctrl_bar == NULL)
return -ENODEV;
/* Use port offset in pf ctrl_bar for this ports control bar */
- hw->super.ctrl_bar = pf_dev->ctrl_bar + (port * NFP_NET_CFG_BAR_SZ);
- hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar +
- (hw->nfp_idx * NFP_MAC_STATS_SIZE);
+ hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_NET_CFG_BAR_SZ);
+ net_hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar +
+ (net_hw->nfp_idx * NFP_MAC_STATS_SIZE);
}
- PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->super.ctrl_bar);
- PMD_INIT_LOG(DEBUG, "MAC stats: %p", hw->mac_stats);
+ PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
+ PMD_INIT_LOG(DEBUG, "MAC stats: %p", net_hw->mac_stats);
- err = nfp_net_common_init(pci_dev, hw);
+ err = nfp_net_common_init(pci_dev, net_hw);
if (err != 0)
return err;
@@ -558,38 +559,38 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
return err;
}
- nfp_net_ethdev_ops_mount(hw, eth_dev);
+ nfp_net_ethdev_ops_mount(net_hw, eth_dev);
- hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) *
+ net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat", sizeof(struct rte_eth_xstat) *
nfp_net_xstats_size(eth_dev), 0);
- if (hw->eth_xstats_base == NULL) {
+ if (net_hw->eth_xstats_base == NULL) {
PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!",
pci_dev->device.name);
return -ENOMEM;
}
/* Work out where in the BAR the queues start. */
- tx_base = nn_cfg_readl(&hw->super, NFP_NET_CFG_START_TXQ);
- rx_base = nn_cfg_readl(&hw->super, NFP_NET_CFG_START_RXQ);
+ tx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
+ rx_base = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
- hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
- hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
- eth_dev->data->dev_private = hw;
+ net_hw->tx_bar = pf_dev->qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
+ net_hw->rx_bar = pf_dev->qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
+ eth_dev->data->dev_private = net_hw;
PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
- hw->super.ctrl_bar, hw->tx_bar, hw->rx_bar);
+ hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar);
- nfp_net_cfg_queue_setup(hw);
- hw->mtu = RTE_ETHER_MTU;
+ nfp_net_cfg_queue_setup(net_hw);
+ net_hw->mtu = RTE_ETHER_MTU;
/* VLAN insertion is incompatible with LSOv2 */
- if ((hw->super.cap & NFP_NET_CFG_CTRL_LSO2) != 0)
- hw->super.cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
+ if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0)
+ hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
- nfp_net_log_device_information(hw);
+ nfp_net_log_device_information(net_hw);
/* Initializing spinlock for reconfigs */
- rte_spinlock_init(&hw->super.reconfig_lock);
+ rte_spinlock_init(&hw->reconfig_lock);
/* Allocating memory for mac addr */
eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
@@ -599,20 +600,19 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
}
nfp_net_pf_read_mac(app_fw_nic, port);
- nfp_net_write_mac(&hw->super, &hw->super.mac_addr.addr_bytes[0]);
+ nfp_net_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
- tmp_ether_addr = &hw->super.mac_addr;
- if (rte_is_valid_assigned_ether_addr(tmp_ether_addr) == 0) {
+ if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) {
PMD_INIT_LOG(INFO, "Using random mac address for port %d", port);
/* Using random mac addresses for VFs */
- rte_eth_random_addr(&hw->super.mac_addr.addr_bytes[0]);
- nfp_net_write_mac(&hw->super, &hw->super.mac_addr.addr_bytes[0]);
+ rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]);
+ nfp_net_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
}
/* Copying mac address to DPDK eth_dev struct */
- rte_ether_addr_copy(&hw->super.mac_addr, eth_dev->data->mac_addrs);
+ rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs);
- if ((hw->super.cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0)
+ if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0)
eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
@@ -621,13 +621,13 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
"mac=" RTE_ETHER_ADDR_PRT_FMT,
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id,
- RTE_ETHER_ADDR_BYTES(&hw->super.mac_addr));
+ RTE_ETHER_ADDR_BYTES(&hw->mac_addr));
/* Registering LSC interrupt handler */
rte_intr_callback_register(pci_dev->intr_handle,
nfp_net_dev_interrupt_handler, (void *)eth_dev);
/* Telling the firmware about the LSC interrupt entry */
- nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
+ nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
/* Recording current stats counters values */
nfp_net_stats_reset(eth_dev);
@@ -254,7 +254,8 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
int err;
uint16_t port;
uint32_t start_q;
- struct nfp_net_hw *hw;
+ struct nfp_hw *hw;
+ struct nfp_net_hw *net_hw;
uint64_t tx_bar_off = 0;
uint64_t rx_bar_off = 0;
struct rte_pci_device *pci_dev;
@@ -269,22 +270,23 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
return -ENODEV;
}
- hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- hw->dev_info = dev_info;
+ net_hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ net_hw->dev_info = dev_info;
+ hw = &net_hw->super;
- hw->super.ctrl_bar = pci_dev->mem_resource[0].addr;
- if (hw->super.ctrl_bar == NULL) {
+ hw->ctrl_bar = pci_dev->mem_resource[0].addr;
+ if (hw->ctrl_bar == NULL) {
PMD_DRV_LOG(ERR, "hw->super.ctrl_bar is NULL. BAR0 not configured");
return -ENODEV;
}
- PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->super.ctrl_bar);
+ PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
- err = nfp_net_common_init(pci_dev, hw);
+ err = nfp_net_common_init(pci_dev, net_hw);
if (err != 0)
return err;
- nfp_netvf_ethdev_ops_mount(hw, eth_dev);
+ nfp_netvf_ethdev_ops_mount(net_hw, eth_dev);
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -292,37 +294,37 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
rte_eth_copy_pci_info(eth_dev, pci_dev);
- hw->eth_xstats_base = rte_malloc("rte_eth_xstat",
+ net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat",
sizeof(struct rte_eth_xstat) * nfp_net_xstats_size(eth_dev), 0);
- if (hw->eth_xstats_base == NULL) {
+ if (net_hw->eth_xstats_base == NULL) {
PMD_INIT_LOG(ERR, "No memory for xstats base values on device %s!",
pci_dev->device.name);
return -ENOMEM;
}
/* Work out where in the BAR the queues start. */
- start_q = nn_cfg_readl(&hw->super, NFP_NET_CFG_START_TXQ);
+ start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
tx_bar_off = nfp_qcp_queue_offset(dev_info, start_q);
- start_q = nn_cfg_readl(&hw->super, NFP_NET_CFG_START_RXQ);
+ start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
rx_bar_off = nfp_qcp_queue_offset(dev_info, start_q);
- hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off;
- hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off;
+ net_hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off;
+ net_hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off;
PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
- hw->super.ctrl_bar, hw->tx_bar, hw->rx_bar);
+ hw->ctrl_bar, net_hw->tx_bar, net_hw->rx_bar);
- nfp_net_cfg_queue_setup(hw);
- hw->mtu = RTE_ETHER_MTU;
+ nfp_net_cfg_queue_setup(net_hw);
+ net_hw->mtu = RTE_ETHER_MTU;
/* VLAN insertion is incompatible with LSOv2 */
- if ((hw->super.cap & NFP_NET_CFG_CTRL_LSO2) != 0)
- hw->super.cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
+ if ((hw->cap & NFP_NET_CFG_CTRL_LSO2) != 0)
+ hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
- nfp_net_log_device_information(hw);
+ nfp_net_log_device_information(net_hw);
/* Initializing spinlock for reconfigs */
- rte_spinlock_init(&hw->super.reconfig_lock);
+ rte_spinlock_init(&hw->reconfig_lock);
/* Allocating memory for mac addr */
eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
@@ -332,18 +334,18 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
goto dev_err_ctrl_map;
}
- nfp_netvf_read_mac(&hw->super);
- if (rte_is_valid_assigned_ether_addr(&hw->super.mac_addr) == 0) {
+ nfp_netvf_read_mac(hw);
+ if (rte_is_valid_assigned_ether_addr(&hw->mac_addr) == 0) {
PMD_INIT_LOG(INFO, "Using random mac address for port %hu", port);
/* Using random mac addresses for VFs */
- rte_eth_random_addr(&hw->super.mac_addr.addr_bytes[0]);
- nfp_net_write_mac(&hw->super, &hw->super.mac_addr.addr_bytes[0]);
+ rte_eth_random_addr(&hw->mac_addr.addr_bytes[0]);
+ nfp_net_write_mac(hw, &hw->mac_addr.addr_bytes[0]);
}
/* Copying mac address to DPDK eth_dev struct */
- rte_ether_addr_copy(&hw->super.mac_addr, eth_dev->data->mac_addrs);
+ rte_ether_addr_copy(&hw->mac_addr, eth_dev->data->mac_addrs);
- if ((hw->super.cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0)
+ if ((hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR) == 0)
eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
@@ -352,14 +354,14 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
"mac=" RTE_ETHER_ADDR_PRT_FMT,
port, pci_dev->id.vendor_id,
pci_dev->id.device_id,
- RTE_ETHER_ADDR_BYTES(&hw->super.mac_addr));
+ RTE_ETHER_ADDR_BYTES(&hw->mac_addr));
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
/* Registering LSC interrupt handler */
rte_intr_callback_register(pci_dev->intr_handle,
nfp_net_dev_interrupt_handler, (void *)eth_dev);
/* Telling the firmware about the LSC interrupt entry */
- nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
+ nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
/* Recording current stats counters values */
nfp_net_stats_reset(eth_dev);
}
@@ -367,7 +369,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
return 0;
dev_err_ctrl_map:
- nfp_cpp_area_free(hw->ctrl_area);
+ nfp_cpp_area_free(net_hw->ctrl_area);
return err;
}
@@ -434,7 +434,7 @@ enum nfp_ipsec_df_type {
};
static int
-nfp_ipsec_cfg_cmd_issue(struct nfp_net_hw *hw,
+nfp_ipsec_cfg_cmd_issue(struct nfp_net_hw *net_hw,
struct nfp_ipsec_msg *msg)
{
int ret;
@@ -445,9 +445,9 @@ nfp_ipsec_cfg_cmd_issue(struct nfp_net_hw *hw,
msg->rsp = NFP_IPSEC_CFG_MSG_OK;
for (i = 0; i < msg_size; i++)
- nn_cfg_writel(&hw->super, NFP_NET_CFG_MBOX_VAL + 4 * i, msg->raw[i]);
+ nn_cfg_writel(&net_hw->super, NFP_NET_CFG_MBOX_VAL + 4 * i, msg->raw[i]);
- ret = nfp_net_mbox_reconfig(hw, NFP_NET_CFG_MBOX_CMD_IPSEC);
+ ret = nfp_net_mbox_reconfig(net_hw, NFP_NET_CFG_MBOX_CMD_IPSEC);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to IPsec reconfig mbox");
return ret;
@@ -459,7 +459,7 @@ nfp_ipsec_cfg_cmd_issue(struct nfp_net_hw *hw,
* response. One example where the data is needed is for statistics.
*/
for (i = 0; i < msg_size; i++)
- msg->raw[i] = nn_cfg_readl(&hw->super, NFP_NET_CFG_MBOX_VAL + 4 * i);
+ msg->raw[i] = nn_cfg_readl(&net_hw->super, NFP_NET_CFG_MBOX_VAL + 4 * i);
switch (msg->rsp) {
case NFP_IPSEC_CFG_MSG_OK:
@@ -577,10 +577,10 @@ nfp_aead_map(struct rte_eth_dev *eth_dev,
uint32_t device_id;
const char *iv_str;
const uint32_t *key;
- struct nfp_net_hw *hw;
+ struct nfp_net_hw *net_hw;
- hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- device_id = hw->device_id;
+ net_hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ device_id = net_hw->device_id;
offset = 0;
switch (aead->algo) {
@@ -665,10 +665,10 @@ nfp_cipher_map(struct rte_eth_dev *eth_dev,
uint32_t i;
uint32_t device_id;
const uint32_t *key;
- struct nfp_net_hw *hw;
+ struct nfp_net_hw *net_hw;
- hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- device_id = hw->device_id;
+ net_hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ device_id = net_hw->device_id;
switch (cipher->algo) {
case RTE_CRYPTO_CIPHER_NULL:
@@ -801,15 +801,15 @@ nfp_auth_map(struct rte_eth_dev *eth_dev,
uint8_t key_length;
uint32_t device_id;
const uint32_t *key;
- struct nfp_net_hw *hw;
+ struct nfp_net_hw *net_hw;
if (digest_length == 0) {
PMD_DRV_LOG(ERR, "Auth digest length is illegal!");
return -EINVAL;
}
- hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- device_id = hw->device_id;
+ net_hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ device_id = net_hw->device_id;
digest_length = digest_length << 3;
switch (auth->algo) {
@@ -1068,7 +1068,7 @@ nfp_crypto_create_session(void *device,
{
int ret;
int sa_idx;
- struct nfp_net_hw *hw;
+ struct nfp_net_hw *net_hw;
struct nfp_ipsec_msg msg;
struct rte_eth_dev *eth_dev;
struct nfp_ipsec_session *priv_session;
@@ -1082,14 +1082,14 @@ nfp_crypto_create_session(void *device,
sa_idx = -1;
eth_dev = device;
priv_session = SECURITY_GET_SESS_PRIV(session);
- hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ net_hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- if (hw->ipsec_data->sa_free_cnt == 0) {
+ if (net_hw->ipsec_data->sa_free_cnt == 0) {
PMD_DRV_LOG(ERR, "No space in SA table, spi: %d", conf->ipsec.spi);
return -EINVAL;
}
- nfp_get_sa_entry(hw->ipsec_data, &sa_idx);
+ nfp_get_sa_entry(net_hw->ipsec_data, &sa_idx);
if (sa_idx < 0) {
PMD_DRV_LOG(ERR, "Failed to get SA entry!");
@@ -1105,7 +1105,7 @@ nfp_crypto_create_session(void *device,
msg.cmd = NFP_IPSEC_CFG_MSG_ADD_SA;
msg.sa_idx = sa_idx;
- ret = nfp_ipsec_cfg_cmd_issue(hw, &msg);
+ ret = nfp_ipsec_cfg_cmd_issue(net_hw, &msg);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to add SA to nic");
return -EINVAL;
@@ -1118,8 +1118,8 @@ nfp_crypto_create_session(void *device,
priv_session->dev = eth_dev;
priv_session->user_data = conf->userdata;
- hw->ipsec_data->sa_free_cnt--;
- hw->ipsec_data->sa_entries[sa_idx] = priv_session;
+ net_hw->ipsec_data->sa_free_cnt--;
+ net_hw->ipsec_data->sa_entries[sa_idx] = priv_session;
return 0;
}
@@ -1156,19 +1156,19 @@ nfp_security_set_pkt_metadata(void *device,
{
int offset;
uint64_t *sqn;
- struct nfp_net_hw *hw;
+ struct nfp_net_hw *net_hw;
struct rte_eth_dev *eth_dev;
struct nfp_ipsec_session *priv_session;
sqn = params;
eth_dev = device;
priv_session = SECURITY_GET_SESS_PRIV(session);
- hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ net_hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
if (priv_session->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
struct nfp_tx_ipsec_desc_msg *desc_md;
- offset = hw->ipsec_data->pkt_dynfield_offset;
+ offset = net_hw->ipsec_data->pkt_dynfield_offset;
desc_md = RTE_MBUF_DYNFIELD(m, offset, struct nfp_tx_ipsec_desc_msg *);
if (priv_session->msg.ctrl_word.ext_seq != 0 && sqn != NULL) {
@@ -1223,7 +1223,7 @@ nfp_security_session_get_stats(void *device,
struct rte_security_stats *stats)
{
int ret;
- struct nfp_net_hw *hw;
+ struct nfp_net_hw *net_hw;
struct nfp_ipsec_msg msg;
struct rte_eth_dev *eth_dev;
struct ipsec_get_sa_stats *cfg_s;
@@ -1236,9 +1236,9 @@ nfp_security_session_get_stats(void *device,
memset(&msg, 0, sizeof(msg));
msg.cmd = NFP_IPSEC_CFG_MSG_GET_SA_STATS;
msg.sa_idx = priv_session->sa_index;
- hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ net_hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- ret = nfp_ipsec_cfg_cmd_issue(hw, &msg);
+ ret = nfp_ipsec_cfg_cmd_issue(net_hw, &msg);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to get SA stats");
return ret;
@@ -1284,22 +1284,22 @@ nfp_crypto_remove_sa(struct rte_eth_dev *eth_dev,
{
int ret;
uint32_t sa_index;
- struct nfp_net_hw *hw;
+ struct nfp_net_hw *net_hw;
struct nfp_ipsec_msg cfg;
sa_index = priv_session->sa_index;
- hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ net_hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
cfg.cmd = NFP_IPSEC_CFG_MSG_INV_SA;
cfg.sa_idx = sa_index;
- ret = nfp_ipsec_cfg_cmd_issue(hw, &cfg);
+ ret = nfp_ipsec_cfg_cmd_issue(net_hw, &cfg);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to remove SA!");
return -EINVAL;
}
- hw->ipsec_data->sa_free_cnt++;
- hw->ipsec_data->sa_entries[sa_index] = NULL;
+ net_hw->ipsec_data->sa_free_cnt++;
+ net_hw->ipsec_data->sa_entries[sa_index] = NULL;
return 0;
}
@@ -1377,12 +1377,12 @@ nfp_ipsec_init(struct rte_eth_dev *dev)
{
int ret;
uint32_t cap_extend;
- struct nfp_net_hw *hw;
+ struct nfp_net_hw *net_hw;
struct nfp_net_ipsec_data *data;
- hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ net_hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- cap_extend = hw->super.cap_ext;
+ cap_extend = net_hw->super.cap_ext;
if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) == 0) {
PMD_INIT_LOG(INFO, "Unsupported IPsec extend capability");
return 0;
@@ -1396,7 +1396,7 @@ nfp_ipsec_init(struct rte_eth_dev *dev)
data->pkt_dynfield_offset = -1;
data->sa_free_cnt = NFP_NET_IPSEC_MAX_SA_CNT;
- hw->ipsec_data = data;
+ net_hw->ipsec_data = data;
ret = nfp_ipsec_ctx_create(dev, data);
if (ret != 0) {
@@ -1424,12 +1424,12 @@ nfp_ipsec_uninit(struct rte_eth_dev *dev)
{
uint16_t i;
uint32_t cap_extend;
- struct nfp_net_hw *hw;
+ struct nfp_net_hw *net_hw;
struct nfp_ipsec_session *priv_session;
- hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ net_hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- cap_extend = hw->super.cap_ext;
+ cap_extend = net_hw->super.cap_ext;
if ((cap_extend & NFP_NET_CFG_CTRL_IPSEC) == 0) {
PMD_INIT_LOG(INFO, "Unsupported IPsec extend capability");
return;
@@ -1437,17 +1437,17 @@ nfp_ipsec_uninit(struct rte_eth_dev *dev)
nfp_ipsec_ctx_destroy(dev);
- if (hw->ipsec_data == NULL) {
+ if (net_hw->ipsec_data == NULL) {
PMD_INIT_LOG(INFO, "IPsec data is NULL!");
return;
}
for (i = 0; i < NFP_NET_IPSEC_MAX_SA_CNT; i++) {
- priv_session = hw->ipsec_data->sa_entries[i];
+ priv_session = net_hw->ipsec_data->sa_entries[i];
if (priv_session != NULL)
memset(priv_session, 0, sizeof(struct nfp_ipsec_session));
}
- rte_free(hw->ipsec_data);
+ rte_free(net_hw->ipsec_data);
}
@@ -336,7 +336,7 @@ nfp_ext_reconfig(struct nfp_hw *hw,
/**
* Reconfigure the firmware via the mailbox
*
- * @param hw
+ * @param net_hw
* Device to reconfigure
* @param mbox_cmd
* The value for the mailbox command
@@ -346,24 +346,24 @@ nfp_ext_reconfig(struct nfp_hw *hw,
* - (-EIO) if I/O err and fail to reconfigure by the mailbox
*/
int
-nfp_net_mbox_reconfig(struct nfp_net_hw *hw,
+nfp_net_mbox_reconfig(struct nfp_net_hw *net_hw,
uint32_t mbox_cmd)
{
int ret;
uint32_t mbox;
- mbox = hw->tlv_caps.mbox_off;
+ mbox = net_hw->tlv_caps.mbox_off;
- rte_spinlock_lock(&hw->super.reconfig_lock);
+ rte_spinlock_lock(&net_hw->super.reconfig_lock);
- nn_cfg_writeq(&hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
- nn_cfg_writel(&hw->super, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_MBOX);
+ nn_cfg_writeq(&net_hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
+ nn_cfg_writel(&net_hw->super, NFP_NET_CFG_UPDATE, NFP_NET_CFG_UPDATE_MBOX);
rte_wmb();
- ret = nfp_reconfig_real(&hw->super, NFP_NET_CFG_UPDATE_MBOX);
+ ret = nfp_reconfig_real(&net_hw->super, NFP_NET_CFG_UPDATE_MBOX);
- rte_spinlock_unlock(&hw->super.reconfig_lock);
+ rte_spinlock_unlock(&net_hw->super.reconfig_lock);
if (ret != 0) {
PMD_DRV_LOG(ERR, "Error nft net mailbox reconfig: mbox=%#08x update=%#08x",
@@ -371,7 +371,7 @@ nfp_net_mbox_reconfig(struct nfp_net_hw *hw,
return -EIO;
}
- return nn_cfg_readl(&hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
+ return nn_cfg_readl(&net_hw->super, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
}
/*
@@ -625,6 +625,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
uint32_t
nfp_check_offloads(struct rte_eth_dev *dev)
{
+ uint32_t cap;
uint32_t ctrl = 0;
uint64_t rx_offload;
uint64_t tx_offload;
@@ -632,13 +633,14 @@ nfp_check_offloads(struct rte_eth_dev *dev)
struct rte_eth_conf *dev_conf;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ cap = hw->super.cap;
dev_conf = &dev->data->dev_conf;
rx_offload = dev_conf->rxmode.offloads;
tx_offload = dev_conf->txmode.offloads;
if ((rx_offload & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) != 0) {
- if ((hw->super.cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
+ if ((cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
}
@@ -646,25 +648,25 @@ nfp_check_offloads(struct rte_eth_dev *dev)
nfp_net_enable_rxvlan_cap(hw, &ctrl);
if ((rx_offload & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) != 0) {
- if ((hw->super.cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
+ if ((cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
}
hw->mtu = dev->data->mtu;
if ((tx_offload & RTE_ETH_TX_OFFLOAD_VLAN_INSERT) != 0) {
- if ((hw->super.cap & NFP_NET_CFG_CTRL_TXVLAN_V2) != 0)
+ if ((cap & NFP_NET_CFG_CTRL_TXVLAN_V2) != 0)
ctrl |= NFP_NET_CFG_CTRL_TXVLAN_V2;
- else if ((hw->super.cap & NFP_NET_CFG_CTRL_TXVLAN) != 0)
+ else if ((cap & NFP_NET_CFG_CTRL_TXVLAN) != 0)
ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
}
/* L2 broadcast */
- if ((hw->super.cap & NFP_NET_CFG_CTRL_L2BC) != 0)
+ if ((cap & NFP_NET_CFG_CTRL_L2BC) != 0)
ctrl |= NFP_NET_CFG_CTRL_L2BC;
/* L2 multicast */
- if ((hw->super.cap & NFP_NET_CFG_CTRL_L2MC) != 0)
+ if ((cap & NFP_NET_CFG_CTRL_L2MC) != 0)
ctrl |= NFP_NET_CFG_CTRL_L2MC;
/* TX checksum offload */
@@ -676,7 +678,7 @@ nfp_check_offloads(struct rte_eth_dev *dev)
/* LSO offload */
if ((tx_offload & RTE_ETH_TX_OFFLOAD_TCP_TSO) != 0 ||
(tx_offload & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO) != 0) {
- if ((hw->super.cap & NFP_NET_CFG_CTRL_LSO) != 0)
+ if ((cap & NFP_NET_CFG_CTRL_LSO) != 0)
ctrl |= NFP_NET_CFG_CTRL_LSO;
else
ctrl |= NFP_NET_CFG_CTRL_LSO2;
@@ -1194,6 +1196,7 @@ nfp_net_tx_desc_limits(struct nfp_net_hw *hw,
int
nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
+ uint32_t cap;
uint32_t cap_extend;
uint16_t min_rx_desc;
uint16_t max_rx_desc;
@@ -1224,32 +1227,34 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
/* Next should change when PF support is implemented */
dev_info->max_mac_addrs = 1;
- if ((hw->super.cap & (NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_RXVLAN_V2)) != 0)
+ cap = hw->super.cap;
+
+ if ((cap & (NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_RXVLAN_V2)) != 0)
dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
- if ((hw->super.cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
+ if ((cap & NFP_NET_CFG_CTRL_RXQINQ) != 0)
dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
- if ((hw->super.cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
+ if ((cap & NFP_NET_CFG_CTRL_RXCSUM) != 0)
dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
- if ((hw->super.cap & (NFP_NET_CFG_CTRL_TXVLAN | NFP_NET_CFG_CTRL_TXVLAN_V2)) != 0)
+ if ((cap & (NFP_NET_CFG_CTRL_TXVLAN | NFP_NET_CFG_CTRL_TXVLAN_V2)) != 0)
dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
- if ((hw->super.cap & NFP_NET_CFG_CTRL_TXCSUM) != 0)
+ if ((cap & NFP_NET_CFG_CTRL_TXCSUM) != 0)
dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
- if ((hw->super.cap & NFP_NET_CFG_CTRL_LSO_ANY) != 0) {
+ if ((cap & NFP_NET_CFG_CTRL_LSO_ANY) != 0) {
dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
- if ((hw->super.cap & NFP_NET_CFG_CTRL_VXLAN) != 0)
+ if ((cap & NFP_NET_CFG_CTRL_VXLAN) != 0)
dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
}
- if ((hw->super.cap & NFP_NET_CFG_CTRL_GATHER) != 0)
+ if ((cap & NFP_NET_CFG_CTRL_GATHER) != 0)
dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
cap_extend = hw->super.cap_ext;
@@ -1292,7 +1297,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
};
- if ((hw->super.cap & NFP_NET_CFG_CTRL_RSS_ANY) != 0) {
+ if ((cap & NFP_NET_CFG_CTRL_RSS_ANY) != 0) {
dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
dev_info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
@@ -1615,9 +1620,11 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev,
uint8_t mask;
uint32_t reta;
uint16_t shift;
- struct nfp_net_hw *hw;
+ struct nfp_hw *hw;
+ struct nfp_net_hw *net_hw;
- hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ net_hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ hw = &net_hw->super;
if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured (%hu)"
@@ -1642,7 +1649,7 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev,
/* If all 4 entries were set, don't need read RETA register */
if (mask != 0xF)
- reta = nn_cfg_readl(&hw->super, NFP_NET_CFG_RSS_ITBL + i);
+ reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
for (j = 0; j < 4; j++) {
if ((mask & (0x1 << j)) == 0)
@@ -1655,7 +1662,7 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev,
reta |= reta_conf[idx].reta[shift + j] << (8 * j);
}
- nn_cfg_writel(&hw->super, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, reta);
+ nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift, reta);
}
return 0;
@@ -1702,10 +1709,13 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
uint8_t mask;
uint32_t reta;
uint16_t shift;
- struct nfp_net_hw *hw;
+ struct nfp_hw *hw;
+ struct nfp_net_hw *net_hw;
- hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if ((hw->super.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
+ net_hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ hw = &net_hw->super;
+
+ if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
return -EINVAL;
if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
@@ -1728,7 +1738,7 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
if (mask == 0)
continue;
- reta = nn_cfg_readl(&hw->super, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift);
+ reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift);
for (j = 0; j < 4; j++) {
if ((mask & (0x1 << j)) == 0)
continue;
@@ -1748,15 +1758,17 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev,
uint8_t i;
uint8_t key;
uint64_t rss_hf;
- struct nfp_net_hw *hw;
+ struct nfp_hw *hw;
+ struct nfp_net_hw *net_hw;
uint32_t cfg_rss_ctrl = 0;
- hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ net_hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ hw = &net_hw->super;
/* Writing the key byte by byte */
for (i = 0; i < rss_conf->rss_key_len; i++) {
memcpy(&key, &rss_conf->rss_key[i], 1);
- nn_cfg_writeb(&hw->super, NFP_NET_CFG_RSS_KEY + i, key);
+ nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
}
rss_hf = rss_conf->rss_hf;
@@ -1789,10 +1801,10 @@ nfp_net_rss_hash_write(struct rte_eth_dev *dev,
cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
/* Configuring where to apply the RSS hash */
- nn_cfg_writel(&hw->super, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
+ nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
/* Writing the key size */
- nn_cfg_writeb(&hw->super, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
+ nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
return 0;
}
@@ -1843,16 +1855,18 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
uint8_t i;
uint8_t key;
uint64_t rss_hf;
+ struct nfp_hw *hw;
uint32_t cfg_rss_ctrl;
- struct nfp_net_hw *hw;
+ struct nfp_net_hw *net_hw;
- hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ net_hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ hw = &net_hw->super;
- if ((hw->super.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
+ if ((hw->ctrl & NFP_NET_CFG_CTRL_RSS_ANY) == 0)
return -EINVAL;
rss_hf = rss_conf->rss_hf;
- cfg_rss_ctrl = nn_cfg_readl(&hw->super, NFP_NET_CFG_RSS_CTRL);
+ cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
if ((cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4) != 0)
rss_hf |= RTE_ETH_RSS_IPV4;
@@ -1882,11 +1896,11 @@ nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
rss_conf->rss_hf = rss_hf;
/* Reading the key size */
- rss_conf->rss_key_len = nn_cfg_readl(&hw->super, NFP_NET_CFG_RSS_KEY_SZ);
+ rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
/* Reading the key byte a byte */
for (i = 0; i < rss_conf->rss_key_len; i++) {
- key = nn_cfg_readb(&hw->super, NFP_NET_CFG_RSS_KEY + i);
+ key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
memcpy(&rss_conf->rss_key[i], &key, 1);
}
@@ -29,15 +29,15 @@ nfp_net_tlv_caps_parse(struct rte_eth_dev *dev)
uint32_t length;
uint32_t offset;
uint32_t tlv_type;
- struct nfp_net_hw *hw;
+ struct nfp_net_hw *net_hw;
struct nfp_net_tlv_caps *caps;
- hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- caps = &hw->tlv_caps;
+ net_hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ caps = &net_hw->tlv_caps;
nfp_net_tlv_caps_reset(caps);
- data = hw->super.ctrl_bar + NFP_NET_CFG_TLV_BASE;
- end = hw->super.ctrl_bar + NFP_NET_CFG_BAR_SZ;
+ data = net_hw->super.ctrl_bar + NFP_NET_CFG_TLV_BASE;
+ end = net_hw->super.ctrl_bar + NFP_NET_CFG_BAR_SZ;
hdr = rte_read32(data);
if (hdr == 0) {
@@ -46,7 +46,7 @@ nfp_net_tlv_caps_parse(struct rte_eth_dev *dev)
}
for (; ; data += length) {
- offset = data - hw->super.ctrl_bar;
+ offset = data - net_hw->super.ctrl_bar;
if (data + NFP_NET_CFG_TLV_VALUE > end) {
PMD_DRV_LOG(ERR, "Reached end of BAR without END TLV");
@@ -87,7 +87,7 @@ nfp_net_tlv_caps_parse(struct rte_eth_dev *dev)
caps->mbox_len = length;
if (length != 0)
- caps->mbox_off = data - hw->super.ctrl_bar;
+ caps->mbox_off = data - net_hw->super.ctrl_bar;
else
caps->mbox_off = 0;
break;
@@ -336,10 +336,10 @@ nfp_net_parse_meta_vlan(const struct nfp_meta_parsed *meta,
struct nfp_net_rxq *rxq,
struct rte_mbuf *mb)
{
- struct nfp_net_hw *hw = rxq->hw;
+ uint32_t ctrl = rxq->hw->super.ctrl;
- /* Skip if firmware don't support setting vlan. */
- if ((hw->super.ctrl & (NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_RXVLAN_V2)) == 0)
+ /* Skip if hardware don't support setting vlan. */
+ if ((ctrl & (NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_RXVLAN_V2)) == 0)
return;
/*
@@ -347,12 +347,12 @@ nfp_net_parse_meta_vlan(const struct nfp_meta_parsed *meta,
* 1. Using the metadata when NFP_NET_CFG_CTRL_RXVLAN_V2 is set,
* 2. Using the descriptor when NFP_NET_CFG_CTRL_RXVLAN is set.
*/
- if ((hw->super.ctrl & NFP_NET_CFG_CTRL_RXVLAN_V2) != 0) {
+ if ((ctrl & NFP_NET_CFG_CTRL_RXVLAN_V2) != 0) {
if (meta->vlan_layer > 0 && meta->vlan[0].offload != 0) {
mb->vlan_tci = rte_cpu_to_le_32(meta->vlan[0].tci);
mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
}
- } else if ((hw->super.ctrl & NFP_NET_CFG_CTRL_RXVLAN) != 0) {
+ } else if ((ctrl & NFP_NET_CFG_CTRL_RXVLAN) != 0) {
if ((rxd->rxd.flags & PCIE_DESC_RX_VLAN) != 0) {
mb->vlan_tci = rte_cpu_to_le_32(rxd->rxd.offload_info);
mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
@@ -383,10 +383,10 @@ nfp_net_parse_meta_qinq(const struct nfp_meta_parsed *meta,
struct nfp_net_rxq *rxq,
struct rte_mbuf *mb)
{
- struct nfp_net_hw *hw = rxq->hw;
+ struct nfp_hw *hw = &rxq->hw->super;
- if ((hw->super.ctrl & NFP_NET_CFG_CTRL_RXQINQ) == 0 ||
- (hw->super.cap & NFP_NET_CFG_CTRL_RXQINQ) == 0)
+ if ((hw->ctrl & NFP_NET_CFG_CTRL_RXQINQ) == 0 ||
+ (hw->cap & NFP_NET_CFG_CTRL_RXQINQ) == 0)
return;
if (meta->vlan_layer < NFP_META_MAX_VLANS)