From patchwork Fri Nov 24 06:33:24 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jingjing Wu X-Patchwork-Id: 31627 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 6501F7CBA; Fri, 24 Nov 2017 07:42:17 +0100 (CET) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id 949AA2BF3 for ; Fri, 24 Nov 2017 07:42:04 +0100 (CET) Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga102.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 23 Nov 2017 22:42:04 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos; i="5.44,445,1505804400"; d="scan'208"; a="1247879204" Received: from dpdk2.sh.intel.com ([10.67.118.195]) by fmsmga002.fm.intel.com with ESMTP; 23 Nov 2017 22:42:03 -0800 From: Jingjing Wu To: dev@dpdk.org Cc: jingjing.wu@intel.com, wenzhuo.lu@intel.com Date: Fri, 24 Nov 2017 14:33:24 +0800 Message-Id: <1511505206-97333-13-git-send-email-jingjing.wu@intel.com> X-Mailer: git-send-email 2.4.11 In-Reply-To: <1511505206-97333-1-git-send-email-jingjing.wu@intel.com> References: <1508488012-82704-1-git-send-email-jingjing.wu@intel.com> <1511505206-97333-1-git-send-email-jingjing.wu@intel.com> Subject: [dpdk-dev] [PATCH v2 12/14] net/avf: enable sse vector Rx Tx func X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Signed-off-by: Jingjing Wu --- config/common_base | 1 + drivers/net/avf/Makefile | 1 + drivers/net/avf/avf.h | 4 + drivers/net/avf/avf_ethdev.c | 11 + drivers/net/avf/avf_rxtx.c | 178 ++++++++- drivers/net/avf/avf_rxtx.h | 34 ++ drivers/net/avf/avf_rxtx_vec_common.h | 238 ++++++++++++ drivers/net/avf/avf_rxtx_vec_sse.c | 680 ++++++++++++++++++++++++++++++++++ 8 files changed, 1136 insertions(+), 11 deletions(-) create mode 100644 drivers/net/avf/avf_rxtx_vec_common.h create mode 100644 drivers/net/avf/avf_rxtx_vec_sse.c diff --git a/config/common_base b/config/common_base index b1f1c1c..cdb8735 100644 --- a/config/common_base +++ b/config/common_base @@ -233,6 +233,7 @@ CONFIG_RTE_LIBRTE_AVF_DEBUG_TX=n CONFIG_RTE_LIBRTE_AVF_DEBUG_TX_FREE=n CONFIG_RTE_LIBRTE_AVF_DEBUG_RX=n CONFIG_RTE_LIBRTE_AVF_16BYTE_RX_DESC=n +CONFIG_RTE_LIBRTE_AVF_INC_VECTOR=y # # Compile burst-oriented Mellanox ConnectX-3 (MLX4) PMD diff --git a/drivers/net/avf/Makefile b/drivers/net/avf/Makefile index 6193fa9..ff9d523 100644 --- a/drivers/net/avf/Makefile +++ b/drivers/net/avf/Makefile @@ -58,5 +58,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_common.c SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_vchnl.c SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_AVF_INC_VECTOR) += avf_rxtx_vec_sse.c include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/avf/avf.h b/drivers/net/avf/avf.h index f39bebc..d4c275e 100644 --- a/drivers/net/avf/avf.h +++ b/drivers/net/avf/avf.h @@ -146,6 +146,10 @@ struct avf_adapter { struct avf_hw hw; struct rte_eth_dev *eth_dev; struct avf_info vf; + + /* For vector PMD */ + bool rx_vec_allowed; + bool tx_vec_allowed; }; /* AVF_DEV_PRIVATE_TO */ diff --git a/drivers/net/avf/avf_ethdev.c b/drivers/net/avf/avf_ethdev.c index 8f382ff..1e8d9c0 100644 --- a/drivers/net/avf/avf_ethdev.c +++ b/drivers/net/avf/avf_ethdev.c @@ -149,6 +149,17 @@ avf_dev_configure(struct rte_eth_dev *dev) AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct rte_eth_conf *dev_conf = &dev->data->dev_conf; +#ifdef RTE_LIBRTE_AVF_INC_VECTOR + /* Initialize to TRUE. If any of Rx queues doesn't meet the + * vector Rx/Tx preconditions, it will be reset. + */ + ad->rx_vec_allowed = true; + ad->tx_vec_allowed = true; +#else + ad->rx_vec_allowed = false; + ad->tx_vec_allowed = false; +#endif + /* Vlan stripping setting */ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) avf_enable_vlan_strip(ad); diff --git a/drivers/net/avf/avf_rxtx.c b/drivers/net/avf/avf_rxtx.c index 8e79efd..079d49b 100644 --- a/drivers/net/avf/avf_rxtx.c +++ b/drivers/net/avf/avf_rxtx.c @@ -121,6 +121,38 @@ check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh, return 0; } +#ifdef RTE_LIBRTE_AVF_INC_VECTOR +static inline bool +check_rx_vec_allow(struct avf_rx_queue *rxq) +{ + if (rxq->rx_free_thresh >= AVF_VPMD_RX_MAX_BURST && + rxq->nb_rx_desc % rxq->rx_free_thresh == 0) { + PMD_INIT_LOG(DEBUG, "Vector Rx" + " can be enabled on this rxq."); + return TRUE; + } + + PMD_INIT_LOG(DEBUG, "Vector Rx" + " cannot be enabled on this rxq."); + return FALSE; +} + +static inline bool +check_tx_vec_allow(struct avf_tx_queue *txq) +{ + if (((txq->txq_flags & AVF_SIMPLE_FLAGS) == AVF_SIMPLE_FLAGS) && + (txq->rs_thresh >= AVF_VPMD_TX_MAX_BURST) && + (txq->rs_thresh <= AVF_VPMD_TX_MAX_FREE_BUF)) { + PMD_INIT_LOG(DEBUG, "Vector tx" + " can be enabled on this txq."); + return TRUE; + } + PMD_INIT_LOG(DEBUG, "Vector Tx" + " cannot be enabled on this txq."); + return FALSE; +} +#endif + static inline void reset_rx_queue(struct avf_rx_queue *rxq) { @@ -254,6 +286,14 @@ release_txq_mbufs(struct avf_tx_queue *txq) } } +static const struct avf_rxq_ops def_rxq_ops = { + .release_mbufs = release_rxq_mbufs, +}; + +static const struct avf_txq_ops def_txq_ops = { + .release_mbufs = release_txq_mbufs, +}; + int avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, @@ -353,7 +393,12 @@ avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, rxq->q_set = TRUE; dev->data->rx_queues[queue_idx] = rxq; rxq->qrx_tail = hw->hw_addr + AVF_QRX_TAIL1(rxq->queue_id); + rxq->ops = &def_rxq_ops; +#ifdef RTE_LIBRTE_AVF_INC_VECTOR + if (check_rx_vec_allow(rxq) == FALSE) + ad->rx_vec_allowed = false; +#endif return 0; } @@ -365,6 +410,8 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev, const struct rte_eth_txconf *tx_conf) { struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct avf_adapter *ad = + AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct avf_tx_queue *txq; const struct rte_memzone *mz; uint32_t ring_size; @@ -444,6 +491,12 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->q_set = TRUE; dev->data->tx_queues[queue_idx] = txq; txq->qtx_tail = hw->hw_addr + AVF_QTX_TAIL1(queue_idx); + txq->ops = &def_txq_ops; + +#ifdef RTE_LIBRTE_AVF_INC_VECTOR + if (check_tx_vec_allow(txq) == FALSE) + ad->tx_vec_allowed = false; +#endif return 0; } @@ -542,7 +595,7 @@ avf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) } rxq = dev->data->rx_queues[rx_queue_id]; - release_rxq_mbufs(rxq); + rxq->ops->release_mbufs(rxq); reset_rx_queue(rxq); dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; @@ -570,7 +623,7 @@ avf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) } txq = dev->data->tx_queues[tx_queue_id]; - release_txq_mbufs(txq); + txq->ops->release_mbufs(txq); reset_tx_queue(txq); dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; @@ -585,7 +638,7 @@ avf_dev_rx_queue_release(void *rxq) if (!q) return; - release_rxq_mbufs(q); + q->ops->release_mbufs(q); rte_free(q->sw_ring); rte_memzone_free(q->mz); rte_free(q); @@ -599,7 +652,7 @@ avf_dev_tx_queue_release(void *txq) if (!q) return; - release_txq_mbufs(q); + q->ops->release_mbufs(q); rte_free(q->sw_ring); rte_memzone_free(q->mz); rte_free(q); @@ -623,7 +676,7 @@ avf_stop_queues(struct rte_eth_dev *dev) txq = dev->data->tx_queues[i]; if (!txq) continue; - release_txq_mbufs(txq); + txq->ops->release_mbufs(txq); reset_tx_queue(txq); dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } @@ -631,7 +684,7 @@ avf_stop_queues(struct rte_eth_dev *dev) rxq = dev->data->rx_queues[i]; if (!rxq) continue; - release_rxq_mbufs(rxq); + rxq->ops->release_mbufs(rxq); reset_rx_queue(rxq); dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } @@ -1348,6 +1401,28 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) return nb_tx; } +static uint16_t +avf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct avf_tx_queue *txq = (struct avf_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh); + ret = avf_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], + num); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} + /* TX prep functions */ uint16_t avf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, @@ -1400,18 +1475,64 @@ avf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, void avf_set_rx_function(struct rte_eth_dev *dev) { - if (dev->data->scattered_rx) + struct avf_adapter *adapter = + AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct avf_rx_queue *rxq; + int i; + + if (adapter->rx_vec_allowed) { + if (dev->data->scattered_rx) { + PMD_DRV_LOG(DEBUG, "Using Vector Scattered Rx callback" + " (port=%d).", dev->data->port_id); + dev->rx_pkt_burst = avf_recv_scattered_pkts_vec; + } else { + PMD_DRV_LOG(DEBUG, "Using Vector Rx callback" + " (port=%d).", dev->data->port_id); + dev->rx_pkt_burst = avf_recv_pkts_vec; + } + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (!rxq) + continue; + avf_rxq_vec_setup(rxq); + } + } else if (dev->data->scattered_rx) { + PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).", + dev->data->port_id); dev->rx_pkt_burst = avf_recv_scattered_pkts; - else + } else { + PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).", + dev->data->port_id); dev->rx_pkt_burst = avf_recv_pkts; + } } -/* choose rx function*/ +/* choose tx function*/ void avf_set_tx_function(struct rte_eth_dev *dev) { - dev->tx_pkt_burst = avf_xmit_pkts; - dev->tx_pkt_prepare = avf_prep_pkts; + struct avf_adapter *adapter = + AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct avf_tx_queue *txq; + int i; + + if (adapter->tx_vec_allowed) { + PMD_DRV_LOG(DEBUG, "Using Vector Tx callback (port=%d).", + dev->data->port_id); + dev->tx_pkt_burst = avf_xmit_pkts_vec; + dev->tx_pkt_prepare = NULL; + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (!txq) + continue; + avf_txq_vec_setup(txq); + } + } else { + PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).", + dev->data->port_id); + dev->tx_pkt_burst = avf_xmit_pkts; + dev->tx_pkt_prepare = avf_prep_pkts; + } } void @@ -1533,3 +1654,38 @@ avf_dev_tx_desc_status(void *tx_queue, uint16_t offset) return RTE_ETH_TX_DESC_FULL; } + +uint16_t __attribute__((weak)) +avf_recv_pkts_vec(void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +uint16_t __attribute__((weak)) +avf_recv_scattered_pkts_vec(void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +uint16_t __attribute__((weak)) +avf_xmit_fixed_burst_vec(void __rte_unused * tx_queue, + struct rte_mbuf __rte_unused **tx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} +int __attribute__((weak)) +avf_rxq_vec_setup(struct avf_rx_queue __rte_unused *rxq) +{ + return -1; +} + +int __attribute__((weak)) +avf_txq_vec_setup(struct avf_tx_queue __rte_unused *txq) +{ + return -1; +} diff --git a/drivers/net/avf/avf_rxtx.h b/drivers/net/avf/avf_rxtx.h index 8e1025c..0246a73 100644 --- a/drivers/net/avf/avf_rxtx.h +++ b/drivers/net/avf/avf_rxtx.h @@ -45,6 +45,15 @@ /* used for Rx Bulk Allocate */ #define AVF_RX_MAX_BURST 32 +/* used for Vector PMD */ +#define AVF_VPMD_RX_MAX_BURST 32 +#define AVF_VPMD_TX_MAX_BURST 32 +#define AVF_VPMD_DESCS_PER_LOOP 4 +#define AVF_VPMD_TX_MAX_FREE_BUF 64 + +#define AVF_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ + ETH_TXQ_FLAGS_NOOFFLOADS) + #define DEFAULT_TX_RS_THRESH 32 #define DEFAULT_TX_FREE_THRESH 32 @@ -74,6 +83,14 @@ #define avf_rx_desc avf_32byte_rx_desc #endif +struct avf_rxq_ops { + void (*release_mbufs)(struct avf_rx_queue *rxq); +}; + +struct avf_txq_ops { + void (*release_mbufs)(struct avf_tx_queue *txq); +}; + /* Structure associated with each Rx queue. */ struct avf_rx_queue { struct rte_mempool *mp; /* mbuf pool to populate Rx ring */ @@ -90,6 +107,11 @@ struct avf_rx_queue { struct rte_mbuf *pkt_last_seg; /* last segment of current packet */ struct rte_mbuf fake_mbuf; /* dummy mbuf */ + /* used for VPMD */ + uint16_t rxrearm_nb; /* number of remaining to be re-armed */ + uint16_t rxrearm_start; /* the idx we start the re-arming from */ + uint64_t mbuf_initializer; /* value to init mbufs */ + uint8_t port_id; /* device port ID */ uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */ uint16_t queue_id; /* Rx queue index */ @@ -99,6 +121,7 @@ struct avf_rx_queue { bool q_set; /* if rx queue has been configured */ bool rx_deferred_start; /* don't start this queue in dev start */ + const struct avf_rxq_ops *ops; }; struct avf_tx_entry { @@ -130,6 +153,7 @@ struct avf_tx_queue { bool q_set; /* if rx queue has been configured */ bool tx_deferred_start; /* don't start this queue in dev start */ + const struct avf_txq_ops *ops; }; /* Offload features */ @@ -183,6 +207,16 @@ uint32_t avf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id); int avf_dev_rx_desc_status(void *rx_queue, uint16_t offset); int avf_dev_tx_desc_status(void *tx_queue, uint16_t offset); +uint16_t avf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t avf_recv_scattered_pkts_vec(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t avf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +int avf_rxq_vec_setup(struct avf_rx_queue *rxq); +int avf_txq_vec_setup(struct avf_tx_queue *txq); + static inline void avf_dump_rx_descriptor(struct avf_rx_queue *rxq, const void *desc, diff --git a/drivers/net/avf/avf_rxtx_vec_common.h b/drivers/net/avf/avf_rxtx_vec_common.h new file mode 100644 index 0000000..726b68e --- /dev/null +++ b/drivers/net/avf/avf_rxtx_vec_common.h @@ -0,0 +1,238 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _AVF_RXTX_VEC_COMMON_H_ +#define _AVF_RXTX_VEC_COMMON_H_ +#include +#include +#include + +#include "avf.h" +#include "avf_rxtx.h" + +static inline uint16_t +reassemble_packets(struct avf_rx_queue *rxq, struct rte_mbuf **rx_bufs, + uint16_t nb_bufs, uint8_t *split_flags) +{ + struct rte_mbuf *pkts[AVF_VPMD_RX_MAX_BURST]; + struct rte_mbuf *start = rxq->pkt_first_seg; + struct rte_mbuf *end = rxq->pkt_last_seg; + unsigned pkt_idx, buf_idx; + + for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { + if (end != NULL) { + /* processing a split packet */ + end->next = rx_bufs[buf_idx]; + rx_bufs[buf_idx]->data_len += rxq->crc_len; + + start->nb_segs++; + start->pkt_len += rx_bufs[buf_idx]->data_len; + end = end->next; + + if (!split_flags[buf_idx]) { + /* it's the last packet of the set */ + start->hash = end->hash; + start->ol_flags = end->ol_flags; + /* we need to strip crc for the whole packet */ + start->pkt_len -= rxq->crc_len; + if (end->data_len > rxq->crc_len) + end->data_len -= rxq->crc_len; + else { + /* free up last mbuf */ + struct rte_mbuf *secondlast = start; + + start->nb_segs--; + while (secondlast->next != end) + secondlast = secondlast->next; + secondlast->data_len -= (rxq->crc_len - + end->data_len); + secondlast->next = NULL; + rte_pktmbuf_free_seg(end); + } + pkts[pkt_idx++] = start; + start = end = NULL; + } + } else { + /* not processing a split packet */ + if (!split_flags[buf_idx]) { + /* not a split packet, save and skip */ + pkts[pkt_idx++] = rx_bufs[buf_idx]; + continue; + } + end = start = rx_bufs[buf_idx]; + rx_bufs[buf_idx]->data_len += rxq->crc_len; + rx_bufs[buf_idx]->pkt_len += rxq->crc_len; + } + } + + /* save the partial packet for next time */ + rxq->pkt_first_seg = start; + rxq->pkt_last_seg = end; + memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); + return pkt_idx; +} + +static __rte_always_inline int +avf_tx_free_bufs(struct avf_tx_queue *txq) +{ + struct avf_tx_entry *txep; + uint32_t n; + uint32_t i; + int nb_free = 0; + struct rte_mbuf *m, *free[AVF_VPMD_TX_MAX_FREE_BUF]; + + /* check DD bits on threshold descriptor */ + if ((txq->tx_ring[txq->next_dd].cmd_type_offset_bsz & + rte_cpu_to_le_64(AVF_TXD_QW1_DTYPE_MASK)) != + rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DESC_DONE)) + return 0; + + n = txq->rs_thresh; + + /* first buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh-1) + */ + txep = &txq->sw_ring[txq->next_dd - (n - 1)]; + m = rte_pktmbuf_prefree_seg(txep[0].mbuf); + if (likely(m != NULL)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (likely(m != NULL)) { + if (likely(m->pool == free[0]->pool)) { + free[nb_free++] = m; + } else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, + nb_free); + free[0] = m; + nb_free = 1; + } + } + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (m != NULL) + rte_mempool_put(m->pool, m); + } + } + + /* buffers were freed, update counters */ + txq->nb_free = (uint16_t)(txq->nb_free + txq->rs_thresh); + txq->next_dd = (uint16_t)(txq->next_dd + txq->rs_thresh); + if (txq->next_dd >= txq->nb_tx_desc) + txq->next_dd = (uint16_t)(txq->rs_thresh - 1); + + return txq->rs_thresh; +} + +static __rte_always_inline void +tx_backlog_entry(struct avf_tx_entry *txep, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i; + + for (i = 0; i < (int)nb_pkts; ++i) + txep[i].mbuf = tx_pkts[i]; +} + +static inline void +_avf_rx_queue_release_mbufs_vec(struct avf_rx_queue *rxq) +{ + const unsigned mask = rxq->nb_rx_desc - 1; + unsigned i; + + if (!rxq->sw_ring || rxq->rxrearm_nb >= rxq->nb_rx_desc) + return; + + /* free all mbufs that are valid in the ring */ + if (rxq->rxrearm_nb == 0) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i] != NULL) + rte_pktmbuf_free_seg(rxq->sw_ring[i]); + } + } else { + for (i = rxq->rx_tail; + i != rxq->rxrearm_start; + i = (i + 1) & mask) { + if (rxq->sw_ring[i] != NULL) + rte_pktmbuf_free_seg(rxq->sw_ring[i]); + } + } + + rxq->rxrearm_nb = rxq->nb_rx_desc; + + /* set all entries to NULL */ + memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); +} + +static inline void +_avf_tx_queue_release_mbufs_vec(struct avf_tx_queue *txq) +{ + unsigned i; + const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1); + + if (!txq->sw_ring || txq->nb_free == max_desc) + return; + + i = txq->next_dd - txq->rs_thresh + 1; + if (txq->tx_tail < i) { + for (; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + i = 0; + } +} + +static inline int +avf_rxq_vec_setup_default(struct avf_rx_queue *rxq) +{ + uintptr_t p; + struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ + + mb_def.nb_segs = 1; + mb_def.data_off = RTE_PKTMBUF_HEADROOM; + mb_def.port = rxq->port_id; + rte_mbuf_refcnt_set(&mb_def, 1); + + /* prevent compiler reordering: rearm_data covers previous fields */ + rte_compiler_barrier(); + p = (uintptr_t)&mb_def.rearm_data; + rxq->mbuf_initializer = *(uint64_t *)p; + return 0; +} +#endif diff --git a/drivers/net/avf/avf_rxtx_vec_sse.c b/drivers/net/avf/avf_rxtx_vec_sse.c new file mode 100644 index 0000000..dce55a1 --- /dev/null +++ b/drivers/net/avf/avf_rxtx_vec_sse.c @@ -0,0 +1,680 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + +#include "base/avf_prototype.h" +#include "base/avf_type.h" +#include "avf.h" +#include "avf_rxtx.h" +#include "avf_rxtx_vec_common.h" + +#include + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +static inline void +avf_rxq_rearm(struct avf_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union avf_rx_desc *rxdp; + struct rte_mbuf **rxp = &rxq->sw_ring[rxq->rxrearm_start]; + struct rte_mbuf *mb0, *mb1; + __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, + RTE_PKTMBUF_HEADROOM); + __m128i dma_addr0, dma_addr1; + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* Pull 'n' more MBUFs into the software ring */ + if (rte_mempool_get_bulk(rxq->mp, (void *)rxp, + rxq->rx_free_thresh) < 0) { + if (rxq->rxrearm_nb + rxq->rx_free_thresh >= rxq->nb_rx_desc) { + dma_addr0 = _mm_setzero_si128(); + for (i = 0; i < AVF_VPMD_DESCS_PER_LOOP; i++) { + rxp[i] = &rxq->fake_mbuf; + _mm_store_si128((__m128i *)&rxdp[i].read, + dma_addr0); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + rxq->rx_free_thresh; + return; + } + + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < rxq->rx_free_thresh; i += 2, rxp += 2) { + __m128i vaddr0, vaddr1; + + mb0 = rxp[0]; + mb1 = rxp[1]; + + /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != + offsetof(struct rte_mbuf, buf_addr) + 8); + vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); + vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); + + /* convert pa to dma_addr hdr/data */ + dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0); + dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1); + + /* add headroom to pa values */ + dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room); + dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room); + + /* flush desc with pa dma_addr */ + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0); + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1); + } + + rxq->rxrearm_start += rxq->rx_free_thresh; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= rxq->rx_free_thresh; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "rearm_start=%u rearm_nb=%u", + rxq->port_id, rxq->queue_id, + rx_id, rxq->rxrearm_start, rxq->rxrearm_nb); + + /* Update the tail pointer on the NIC */ + AVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id); +} + +static inline void +desc_to_olflags_v(struct avf_rx_queue *rxq, __m128i descs[4], + struct rte_mbuf **rx_pkts) +{ + const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer); + __m128i rearm0, rearm1, rearm2, rearm3; + + __m128i vlan0, vlan1, rss, l3_l4e; + + /* mask everything except RSS, flow director and VLAN flags + * bit2 is for VLAN tag, bit11 for flow director indication + * bit13:12 for RSS indication. + */ + const __m128i rss_vlan_msk = _mm_set_epi32( + 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804); + + const __m128i cksum_mask = _mm_set_epi32( + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD); + + /* map rss and vlan type to rss hash and vlan flag */ + const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + 0, 0, 0, 0); + + const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0, + 0, 0, PKT_RX_FDIR, 0); + + const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + /* shift right 1 bit to make sure it not exceed 255 */ + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1, + PKT_RX_IP_CKSUM_BAD >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1); + + vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]); + vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]); + vlan0 = _mm_unpacklo_epi64(vlan0, vlan1); + + vlan1 = _mm_and_si128(vlan0, rss_vlan_msk); + vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1); + + rss = _mm_srli_epi32(vlan1, 11); + rss = _mm_shuffle_epi8(rss_flags, rss); + + l3_l4e = _mm_srli_epi32(vlan1, 22); + l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e); + /* then we shift left 1 bit */ + l3_l4e = _mm_slli_epi32(l3_l4e, 1); + /* we need to mask out the reduntant bits */ + l3_l4e = _mm_and_si128(l3_l4e, cksum_mask); + + vlan0 = _mm_or_si128(vlan0, rss); + vlan0 = _mm_or_si128(vlan0, l3_l4e); + + /* At this point, we have the 4 sets of flags in the low 16-bits + * of each 32-bit value in vlan0. + * We want to extract these, and merge them with the mbuf init data + * so we can do a single 16-byte write to the mbuf to set the flags + * and all the other initialization fields. Extracting the + * appropriate flags means that we have to do a shift and blend for + * each mbuf before we do the write. + */ + rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 8), 0x10); + rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 4), 0x10); + rearm2 = _mm_blend_epi16(mbuf_init, vlan0, 0x10); + rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(vlan0, 4), 0x10); + + /* write the rearm data and the olflags in one write */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, rearm_data) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != + RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16)); + _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0); + _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1); + _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2); + _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3); +} + +#define PKTLEN_SHIFT 10 + +static inline void +desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts) +{ + __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]); + __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]); + static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = { + /* [0] reserved */ + [1] = RTE_PTYPE_L2_ETHER, + /* [2] - [21] reserved */ + [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + /* [25] reserved */ + [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + /* All others reserved */ + }; + + ptype0 = _mm_srli_epi64(ptype0, 30); + ptype1 = _mm_srli_epi64(ptype1, 30); + + rx_pkts[0]->packet_type = type_table[_mm_extract_epi8(ptype0, 0)]; + rx_pkts[1]->packet_type = type_table[_mm_extract_epi8(ptype0, 8)]; + rx_pkts[2]->packet_type = type_table[_mm_extract_epi8(ptype1, 0)]; + rx_pkts[3]->packet_type = type_table[_mm_extract_epi8(ptype1, 8)]; +} + +/* Notice: + * - nb_pkts < AVF_VPMD_DESCS_PER_LOOP, just return no packet + * - nb_pkts > AVF_VPMD_RX_MAX_BURST, only scan AVF_VPMD_RX_MAX_BURST + * numbers of DD bits + */ +static inline uint16_t +_recv_raw_pkts_vec(struct avf_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ + volatile union avf_rx_desc *rxdp; + struct rte_mbuf **sw_ring; + uint16_t nb_pkts_recd; + int pos; + uint64_t var; + __m128i shuf_msk; + + __m128i crc_adjust = _mm_set_epi16( + 0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + 0, /* ignore high-16bits of pkt_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0, 0 /* ignore pkt_type field */ + ); + /* compile-time check the above crc_adjust layout is correct. + * NOTE: the first field (lowest address) is given last in set_epi16 + * call above. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + __m128i dd_check, eop_check; + + /* nb_pkts shall be less equal than AVF_VPMD_RX_MAX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, AVF_VPMD_RX_MAX_BURST); + + /* nb_pkts has to be floor-aligned to AVF_VPMD_DESCS_PER_LOOP */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, AVF_VPMD_DESCS_PER_LOOP); + + /* Just the act of getting into the function from the application is + * going to cost about 7 cycles + */ + rxdp = rxq->rx_ring + rxq->rx_tail; + + rte_prefetch0(rxdp); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > rxq->rx_free_thresh) + avf_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.qword1.status_error_len & + rte_cpu_to_le_32(1 << AVF_RX_DESC_STATUS_DD_SHIFT))) + return 0; + + /* 4 packets DD mask */ + dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL); + + /* 4 packets EOP mask */ + eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL); + + /* mask to shuffle from desc. to mbuf */ + shuf_msk = _mm_set_epi8( + 7, 6, 5, 4, /* octet 4~7, 32bits rss */ + 3, 2, /* octet 2~3, low 16 bits vlan_macip */ + 15, 14, /* octet 15~14, 16 bits data_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 15, 14, /* octet 15~14, low 16 bits pkt_len */ + 0xFF, 0xFF, 0xFF, 0xFF /* pkt_type set as unknown */ + ); + /* Compile-time verify the shuffle mask + * NOTE: some field positions already verified above, but duplicated + * here for completeness in case of future modifications. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); + + /* Cache is empty -> need to scan the buffer rings, but first move + * the next 'n' mbufs into the cache + */ + sw_ring = &rxq->sw_ring[rxq->rx_tail]; + + /* A. load 4 packet in one loop + * [A*. mask out 4 unused dirty field in desc] + * B. copy 4 mbuf point from swring to rx_pkts + * C. calc the number of DD bits among the 4 packets + * [C*. extract the end-of-packet bit, if requested] + * D. fill info. from desc to mbuf + */ + + for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; + pos += AVF_VPMD_DESCS_PER_LOOP, + rxdp += AVF_VPMD_DESCS_PER_LOOP) { + __m128i descs[AVF_VPMD_DESCS_PER_LOOP]; + __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; + __m128i zero, staterr, sterr_tmp1, sterr_tmp2; + /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */ + __m128i mbp1; +#if defined(RTE_ARCH_X86_64) + __m128i mbp2; +#endif + + /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */ + mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]); + /* Read desc statuses backwards to avoid race condition */ + /* A.1 load 4 pkts desc */ + descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); + rte_compiler_barrier(); + + /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); + +#if defined(RTE_ARCH_X86_64) + /* B.1 load 2 64 bit mbuf points */ + mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]); +#endif + + descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); + rte_compiler_barrier(); + /* B.1 load 2 mbuf point */ + descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); + rte_compiler_barrier(); + descs[0] = _mm_loadu_si128((__m128i *)(rxdp)); + +#if defined(RTE_ARCH_X86_64) + /* B.2 copy 2 mbuf point into rx_pkts */ + _mm_storeu_si128((__m128i *)&rx_pkts[pos + 2], mbp2); +#endif + + if (split_packet) { + rte_mbuf_prefetch_part2(rx_pkts[pos]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); + } + + /* avoid compiler reorder optimization */ + rte_compiler_barrier(); + + /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/ + const __m128i len3 = _mm_slli_epi32(descs[3], PKTLEN_SHIFT); + const __m128i len2 = _mm_slli_epi32(descs[2], PKTLEN_SHIFT); + + /* merge the now-aligned packet length fields back in */ + descs[3] = _mm_blend_epi16(descs[3], len3, 0x80); + descs[2] = _mm_blend_epi16(descs[2], len2, 0x80); + + /* D.1 pkt 3,4 convert format from desc to pktmbuf */ + pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk); + pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk); + + /* C.1 4=>2 status err info only */ + sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]); + sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); + + desc_to_olflags_v(rxq, descs, &rx_pkts[pos]); + + /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ + pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust); + pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust); + + /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/ + const __m128i len1 = _mm_slli_epi32(descs[1], PKTLEN_SHIFT); + const __m128i len0 = _mm_slli_epi32(descs[0], PKTLEN_SHIFT); + + /* merge the now-aligned packet length fields back in */ + descs[1] = _mm_blend_epi16(descs[1], len1, 0x80); + descs[0] = _mm_blend_epi16(descs[0], len0, 0x80); + + /* D.1 pkt 1,2 convert format from desc to pktmbuf */ + pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk); + pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk); + + /* C.2 get 4 pkts status err value */ + zero = _mm_xor_si128(dd_check, dd_check); + staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2); + + /* D.3 copy final 3,4 data to rx_pkts */ + _mm_storeu_si128((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1, + pkt_mb4); + _mm_storeu_si128((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1, + pkt_mb3); + + /* D.2 pkt 1,2 remove crc */ + pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust); + pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust); + + /* C* extract and record EOP bit */ + if (split_packet) { + __m128i eop_shuf_mask = _mm_set_epi8( + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0x04, 0x0C, 0x00, 0x08 + ); + + /* and with mask to extract bits, flipping 1-0 */ + __m128i eop_bits = _mm_andnot_si128(staterr, eop_check); + /* the staterr values are not in order, as the count + * count of dd bits doesn't care. However, for end of + * packet tracking, we do care, so shuffle. This also + * compresses the 32-bit values to 8-bit + */ + eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask); + /* store the resulting 32-bit value */ + *(int *)split_packet = _mm_cvtsi128_si32(eop_bits); + split_packet += AVF_VPMD_DESCS_PER_LOOP; + } + + /* C.3 calc available number of desc */ + staterr = _mm_and_si128(staterr, dd_check); + staterr = _mm_packs_epi32(staterr, zero); + + /* D.3 copy final 1,2 data to rx_pkts */ + _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1, + pkt_mb2); + _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, + pkt_mb1); + desc_to_ptype_v(descs, &rx_pkts[pos]); + /* C.4 calc avaialbe number of desc */ + var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); + nb_pkts_recd += var; + if (likely(var != AVF_VPMD_DESCS_PER_LOOP)) + break; + } + + /* Update our internal tail pointer */ + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd); + rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1)); + rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); + + return nb_pkts_recd; +} + +/* Notice: + * - nb_pkts < AVF_DESCS_PER_LOOP, just return no packet + * - nb_pkts > AVF_VPMD_RX_MAX_BURST, only scan AVF_VPMD_RX_MAX_BURST + * numbers of DD bits + */ +uint16_t +avf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); +} + +/* vPMD receive routine that reassembles scattered packets + * Notice: + * - nb_pkts < AVF_VPMD_DESCS_PER_LOOP, just return no packet + * - nb_pkts > VPMD_RX_MAX_BURST, only scan AVF_VPMD_RX_MAX_BURST + * numbers of DD bits + */ +uint16_t +avf_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct avf_rx_queue *rxq = rx_queue; + uint8_t split_flags[AVF_VPMD_RX_MAX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (rxq->pkt_first_seg == NULL && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + unsigned i = 0; + + if (rxq->pkt_first_seg == NULL) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + } + return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +static inline void +vtx1(volatile struct avf_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) +{ + uint64_t high_qw = + (AVF_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << AVF_TXD_QW1_CMD_SHIFT) | + ((uint64_t)pkt->data_len << AVF_TXD_QW1_TX_BUF_SZ_SHIFT)); + + __m128i descriptor = _mm_set_epi64x(high_qw, + pkt->buf_iova + pkt->data_off); + _mm_store_si128((__m128i *)txdp, descriptor); +} + +static inline void +vtx(volatile struct avf_tx_desc *txdp, struct rte_mbuf **pkt, + uint16_t nb_pkts, uint64_t flags) +{ + int i; + + for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) + vtx1(txdp, *pkt, flags); +} + +uint16_t +avf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct avf_tx_queue *txq = (struct avf_tx_queue *)tx_queue; + volatile struct avf_tx_desc *txdp; + struct avf_tx_entry *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = AVF_TX_DESC_CMD_EOP | 0x04; /* bit 2 must be set */ + uint64_t rs = AVF_TX_DESC_CMD_RS | flags; + int i; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->rs_thresh); + + if (txq->nb_free < txq->free_thresh) + avf_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_free = (uint16_t)(txq->nb_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + tx_backlog_entry(txep, tx_pkts, n); + + for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) + vtx1(txdp, *tx_pkts, flags); + + vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->next_rs = (uint16_t)(txq->rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + } + + tx_backlog_entry(txep, tx_pkts, nb_commit); + + vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->next_rs) { + txq->tx_ring[txq->next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)AVF_TX_DESC_CMD_RS) << + AVF_TXD_QW1_CMD_SHIFT); + txq->next_rs = + (uint16_t)(txq->next_rs + txq->rs_thresh); + } + + txq->tx_tail = tx_id; + + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_pkts=%u", + txq->port_id, txq->queue_id, tx_id, nb_pkts); + + AVF_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +void __attribute__((cold)) +avf_rx_queue_release_mbufs_sse(struct avf_rx_queue *rxq) +{ + _avf_rx_queue_release_mbufs_vec(rxq); +} + +static void __attribute__((cold)) +avf_tx_queue_release_mbufs_sse(struct avf_tx_queue *txq) +{ + _avf_tx_queue_release_mbufs_vec(txq); +} + +static const struct avf_rxq_ops sse_vec_rxq_ops = { + .release_mbufs = avf_rx_queue_release_mbufs_sse, +}; + +static const struct avf_txq_ops sse_vec_txq_ops = { + .release_mbufs = avf_tx_queue_release_mbufs_sse, +}; + +int __attribute__((cold)) +avf_txq_vec_setup(struct avf_tx_queue *txq) +{ + txq->ops = &sse_vec_txq_ops; + return 0; +} + +int __attribute__((cold)) +avf_rxq_vec_setup(struct avf_rx_queue *rxq) +{ + rxq->ops = &sse_vec_rxq_ops; + return avf_rxq_vec_setup_default(rxq); +}