@@ -19,6 +19,7 @@ SR-IOV = Y
FW version = Y
Jumbo frame = Y
MTU update = Y
+LRO = Y
Linux VFIO = Y
Linux UIO = Y
x86-64 = Y
@@ -286,6 +286,13 @@ static int bnxt_init_chip(struct bnxt *bp)
goto err_out;
}
}
+
+ bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
+
+ if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
+ bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
+ else
+ bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
}
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0]);
if (rc) {
@@ -1152,19 +1152,15 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
req.flags = rte_cpu_to_le_32(
-// HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_REGULAR_PLACEMENT |
HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
-// HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 | //TODO
-// HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6);
+
req.enables = rte_cpu_to_le_32(
HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
-// HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID);
size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
size -= RTE_PKTMBUF_HEADROOM;
req.jumbo_thresh = rte_cpu_to_le_16(size);
-// req.hds_threshold = rte_cpu_to_le_16(size);
req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -1174,6 +1170,41 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
return rc;
}
+int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, bool enable)
+{
+ int rc = 0;
+ struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
+
+ if (enable) {
+ req.enables = rte_cpu_to_le_32(
+ HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
+ HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
+ HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
+ req.flags = rte_cpu_to_le_32(
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
+ req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+ req.max_agg_segs = rte_cpu_to_le_16(5);
+ req.max_aggs =
+ rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
+ req.min_agg_len = rte_cpu_to_le_32(512);
+ }
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
{
struct hwrm_func_cfg_input req = {0};
@@ -1493,6 +1524,9 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
bnxt_clear_hwrm_vnic_filters(bp, vnic);
bnxt_hwrm_vnic_ctx_free(bp, vnic);
+
+ bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
+
bnxt_hwrm_vnic_free(bp, vnic);
}
/* Ring resources */
@@ -95,6 +95,8 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, bool enable);
int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp);
int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp);
@@ -31,6 +31,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <rte_bitmap.h>
#include <rte_memzone.h>
#include <unistd.h>
@@ -137,11 +138,23 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
sizeof(struct rx_prod_pkt_bd)) : 0;
- int total_alloc_len = rx_ring_start + rx_ring_len;
- int ag_ring_start = 0;
+ int ag_ring_start = rx_ring_start + rx_ring_len;
+ int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;
- ag_ring_start = rx_ring_start + rx_ring_len;
- total_alloc_len = ag_ring_start + rx_ring_len * AGG_RING_SIZE_FACTOR;
+ int ag_bitmap_start = ag_ring_start + ag_ring_len;
+ int ag_bitmap_len = rx_ring_info ?
+ RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint(
+ rx_ring_info->rx_ring_struct->ring_size *
+ AGG_RING_SIZE_FACTOR)) : 0;
+
+ int tpa_info_start = ag_bitmap_start + ag_bitmap_len;
+ int tpa_info_len = rx_ring_info ?
+ RTE_CACHE_LINE_ROUNDUP(BNXT_TPA_MAX *
+ sizeof(struct bnxt_tpa_info)) : 0;
+
+ int total_alloc_len = tpa_info_start;
+ if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
+ total_alloc_len += tpa_info_len;
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
"bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
@@ -230,6 +243,17 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
rx_ring_info->ag_buf_ring =
(struct bnxt_sw_rx_bd *)rx_ring->vmem;
}
+
+ rx_ring_info->ag_bitmap =
+ rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size *
+ AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr +
+ ag_bitmap_start, ag_bitmap_len);
+
+ /* TPA info */
+ if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
+ rx_ring_info->tpa_info =
+ ((struct bnxt_tpa_info *)((char *)mz->addr +
+ tpa_info_start));
}
cp_ring->bd = ((char *)mz->addr + cp_ring_start);
@@ -57,8 +57,8 @@
#define DEFAULT_RX_RING_SIZE 256
#define DEFAULT_TX_RING_SIZE 256
-#define MAX_TPA 128
-#define AGG_RING_SIZE_FACTOR 2
+#define BNXT_TPA_MAX 64
+#define AGG_RING_SIZE_FACTOR 2
/* These assume 4k pages */
#define MAX_RX_DESC_CNT (8 * 1024)
@@ -214,6 +214,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq __rte_unused)
{
struct bnxt_sw_rx_bd *sw_ring;
+ struct bnxt_tpa_info *tpa_info;
uint16_t i;
if (rxq) {
@@ -236,6 +237,17 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq __rte_unused)
}
}
}
+
+ /* Free up mbufs in TPA */
+ tpa_info = rxq->rx_ring->tpa_info;
+ if (tpa_info) {
+ for (i = 0; i < BNXT_TPA_MAX; i++) {
+ if (tpa_info[i].mbuf) {
+ rte_pktmbuf_free_seg(tpa_info[i].mbuf);
+ tpa_info[i].mbuf = NULL;
+ }
+ }
+ }
}
}
@@ -59,6 +59,8 @@ struct bnxt_rx_queue {
uint32_t rx_buf_use_size; /* useable size */
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
+
+ struct bnxt_tpa_info *rx_tpa;
};
void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq);
@@ -34,6 +34,7 @@
#include <inttypes.h>
#include <stdbool.h>
+#include <rte_bitmap.h>
#include <rte_byteorder.h>
#include <rte_malloc.h>
#include <rte_memory.h>
@@ -102,24 +103,28 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
return 0;
}
-#ifdef BNXT_DEBUG
-static void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
+static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
struct rte_mbuf *mbuf)
{
- uint16_t prod = rxr->rx_prod;
+ uint16_t prod = RING_NEXT(rxr->rx_ring_struct, rxr->rx_prod);
struct bnxt_sw_rx_bd *prod_rx_buf;
- struct rx_prod_pkt_bd *prod_bd, *cons_bd;
+ struct rx_prod_pkt_bd *prod_bd;
prod_rx_buf = &rxr->rx_buf_ring[prod];
+ RTE_ASSERT(prod_rx_buf->mbuf == NULL);
+ RTE_ASSERT(mbuf != NULL);
+
prod_rx_buf->mbuf = mbuf;
prod_bd = &rxr->rx_desc_ring[prod];
- cons_bd = &rxr->rx_desc_ring[cons];
- prod_bd->addr = cons_bd->addr;
+ prod_bd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(mbuf));
+
+ rxr->rx_prod = prod;
}
+#ifdef BNXT_DEBUG
static void bnxt_reuse_ag_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
struct rte_mbuf *mbuf)
{
@@ -138,7 +143,192 @@ static void bnxt_reuse_ag_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
}
#endif
-static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
+static inline
+struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
+ uint16_t cons)
+{
+ struct bnxt_sw_rx_bd *cons_rx_buf;
+ struct rte_mbuf *mbuf;
+
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+ RTE_ASSERT(cons_rx_buf->mbuf != NULL);
+ mbuf = cons_rx_buf->mbuf;
+ cons_rx_buf->mbuf = NULL;
+ return mbuf;
+}
+
+static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
+ struct rx_tpa_start_cmpl *tpa_start,
+ struct rx_tpa_start_cmpl_hi *tpa_start1)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint8_t agg_id = rte_le_to_cpu_32(tpa_start->agg_id &
+ RX_TPA_START_CMPL_AGG_ID_MASK) >> RX_TPA_START_CMPL_AGG_ID_SFT;
+ uint16_t data_cons;
+ struct bnxt_tpa_info *tpa_info;
+ struct rte_mbuf *mbuf;
+
+ data_cons = tpa_start->opaque;
+ tpa_info = &rxr->tpa_info[agg_id];
+
+ mbuf = bnxt_consume_rx_buf(rxr, data_cons);
+
+ bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
+
+ tpa_info->mbuf = mbuf;
+ tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
+
+ mbuf->nb_segs = 1;
+ mbuf->next = NULL;
+ mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->port = rxq->port_id;
+ mbuf->ol_flags = PKT_RX_LRO;
+ if (likely(tpa_start->flags_type &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
+ mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash);
+ mbuf->ol_flags |= PKT_RX_RSS_HASH;
+ } else {
+ mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code);
+ mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ }
+ if (tpa_start1->flags2 &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
+ mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
+ mbuf->ol_flags |= PKT_RX_VLAN_PKT;
+ }
+ if (likely(tpa_start1->flags2 &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ /* recycle next mbuf */
+ data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
+ bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
+}
+
+static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
+ uint8_t agg_bufs, uint32_t raw_cp_cons)
+{
+ uint16_t last_cp_cons;
+ struct rx_pkt_cmpl *agg_cmpl;
+
+ raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
+ last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
+ agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
+ return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
+}
+
+/* TPA consume agg buffer out of order, allocate connected data only */
+static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod);
+
+ /* TODO batch allocation for better performance */
+ while (rte_bitmap_get(rxr->ag_bitmap, next)) {
+ if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
+ RTE_LOG(ERR, PMD,
+ "agg mbuf alloc failed: prod=0x%x\n", next);
+ break;
+ }
+ rte_bitmap_clear(rxr->ag_bitmap, next);
+ rxr->ag_prod = next;
+ next = RING_NEXT(rxr->ag_ring_struct, next);
+ }
+
+ return 0;
+}
+
+static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
+ struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
+ uint8_t agg_buf)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ int i;
+ uint16_t cp_cons, ag_cons;
+ struct rx_pkt_cmpl *rxcmp;
+ struct rte_mbuf *last = mbuf;
+
+ for (i = 0; i < agg_buf; i++) {
+ struct bnxt_sw_rx_bd *ag_buf;
+ struct rte_mbuf *ag_mbuf;
+ *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
+ cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)
+ &cpr->cp_desc_ring[cp_cons];
+
+#ifdef BNXT_DEBUG
+ bnxt_dump_cmpl(cp_cons, rxcmp);
+#endif
+
+ ag_cons = rxcmp->opaque;
+ RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
+ ag_buf = &rxr->ag_buf_ring[ag_cons];
+ ag_mbuf = ag_buf->mbuf;
+ RTE_ASSERT(ag_mbuf != NULL);
+
+ ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
+
+ mbuf->nb_segs++;
+ mbuf->pkt_len += ag_mbuf->data_len;
+
+ last->next = ag_mbuf;
+ last = ag_mbuf;
+
+ ag_buf->mbuf = NULL;
+
+ /*
+ * As aggregation buffer consumed out of order in TPA module,
+ * use bitmap to track freed slots to be allocated and notified
+ * to NIC
+ */
+ rte_bitmap_set(rxr->ag_bitmap, ag_cons);
+ }
+ bnxt_prod_ag_mbuf(rxq);
+ return 0;
+}
+
+static inline struct rte_mbuf *bnxt_tpa_end(
+ struct bnxt_rx_queue *rxq,
+ uint32_t *raw_cp_cons,
+ struct rx_tpa_end_cmpl *tpa_end,
+ struct rx_tpa_end_cmpl_hi *tpa_end1 __rte_unused)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint8_t agg_id = (tpa_end->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK)
+ >> RX_TPA_END_CMPL_AGG_ID_SFT;
+ struct rte_mbuf *mbuf;
+ uint8_t agg_bufs;
+ //unsigned int len;
+ struct bnxt_tpa_info *tpa_info;
+
+ tpa_info = &rxr->tpa_info[agg_id];
+ mbuf = tpa_info->mbuf;
+ RTE_ASSERT(mbuf != NULL);
+
+ rte_prefetch0(mbuf);
+ //len = tpa_info->len;
+ agg_bufs = (rte_le_to_cpu_32(tpa_end->agg_bufs_v1) &
+ RX_TPA_END_CMPL_AGG_BUFS_MASK) >> RX_TPA_END_CMPL_AGG_BUFS_SFT;
+ if (agg_bufs) {
+ if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
+ return NULL;
+ bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs);
+ }
+ mbuf->l4_len = tpa_end->payload_offset;
+
+ struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
+ RTE_ASSERT(new_data != NULL);
+ if (!new_data)
+ return NULL;
+ tpa_info->mbuf = new_data;
+
+ return mbuf;
+}
+
+static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
{
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
@@ -148,12 +338,13 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
uint32_t tmp_raw_cons = *raw_cons;
uint16_t cons, prod, cp_cons =
RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
- uint16_t ag_cons, ag_prod = rxr->ag_prod;
- struct bnxt_sw_rx_bd *rx_buf;
+#ifdef BNXT_DEBUG
+ uint16_t ag_cons;
+#endif
struct rte_mbuf *mbuf;
int rc = 0;
- uint8_t i;
uint8_t agg_buf = 0;
+ uint16_t cmp_type;
rxcmp = (struct rx_pkt_cmpl *)
&cpr->cp_desc_ring[cp_cons];
@@ -165,12 +356,34 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
return -EBUSY;
+ cmp_type = CMP_TYPE(rxcmp);
+ if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_START) {
+ bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
+ (struct rx_tpa_start_cmpl_hi *)rxcmp1);
+ rc = -EINVAL; /* Continue w/o new mbuf */
+ goto next_rx;
+ } else if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_END) {
+ mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
+ (struct rx_tpa_end_cmpl *)rxcmp,
+ (struct rx_tpa_end_cmpl_hi *)rxcmp1);
+ if (unlikely(!mbuf))
+ return -EBUSY;
+ *rx_pkt = mbuf;
+ goto next_rx;
+ } else if (cmp_type != 0x11) {
+ rc = -EINVAL;
+ goto next_rx;
+ }
+
+ agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
+ >> RX_PKT_CMPL_AGG_BUFS_SFT;
+ if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
+ return -EBUSY;
+
prod = rxr->rx_prod;
- /* EW - GRO deferred to phase 3 */
cons = rxcmp->opaque;
- rx_buf = &rxr->rx_buf_ring[cons];
- mbuf = rx_buf->mbuf;
+ mbuf = bnxt_consume_rx_buf(rxr, cons);
rte_prefetch0(mbuf);
if (mbuf == NULL)
@@ -190,61 +403,8 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
}
- agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
- >> RX_PKT_CMPL_AGG_BUFS_SFT;
- if (agg_buf) {
- cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons + agg_buf);
- rxcmp = (struct rx_pkt_cmpl *)
- &cpr->cp_desc_ring[cp_cons];
- if (!CMP_VALID(rxcmp, tmp_raw_cons + agg_buf,
- cpr->cp_ring_struct))
- return -EBUSY;
- RTE_LOG(DEBUG, PMD, "JUMBO Frame %d. %x, agg_buf %x,\n",
- mbuf->pkt_len, rxcmp->agg_bufs_v1, agg_buf);
- }
-
- for (i = 0; i < agg_buf; i++) {
- struct bnxt_sw_rx_bd *ag_buf;
- struct rte_mbuf *ag_mbuf;
- tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
- cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
- rxcmp = (struct rx_pkt_cmpl *)
- &cpr->cp_desc_ring[cp_cons];
- ag_cons = rxcmp->opaque;
- ag_buf = &rxr->ag_buf_ring[ag_cons];
- ag_mbuf = ag_buf->mbuf;
- ag_mbuf->nb_segs = 1;
- ag_mbuf->data_len = rxcmp->len;
-
- mbuf->nb_segs++;
- mbuf->pkt_len += ag_mbuf->data_len;
- if (mbuf->next == NULL) {
- mbuf->next = ag_mbuf;
- } else {
- struct rte_mbuf *temp_mbuf = mbuf;
-
- while (temp_mbuf->next != NULL)
- temp_mbuf = temp_mbuf->next;
- temp_mbuf->next = ag_mbuf;
- }
- ag_buf->mbuf = NULL;
-
- ag_prod = RING_NEXT(rxr->ag_ring_struct, ag_prod);
- if (bnxt_alloc_ag_data(rxq, rxr, ag_prod)) {
- RTE_LOG(ERR, PMD,
- "agg mbuf alloc failed: prod=0x%x\n",
- ag_prod);
- rc = -ENOMEM;
- }
- rxr->ag_prod = ag_prod;
-
-#ifdef BNXT_DEBUG
- if (!CMP_VALID((struct cmpl_base *)
- &cpr->cp_desc_ring[cp_cons], tmp_raw_cons,
- cpr->cp_ring_struct))
- return -EBUSY;
-#endif
- }
+ if (agg_buf)
+ bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf);
if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
mbuf->vlan_tci = rxcmp1->metadata &
@@ -254,7 +414,6 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
mbuf->ol_flags |= PKT_RX_VLAN_PKT;
}
- rx_buf->mbuf = NULL;
#ifdef BNXT_DEBUG
if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
/* Re-install the mbuf back to the rx ring */
@@ -294,10 +453,8 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
*/
*rx_pkt = mbuf;
-#ifdef BNXT_DEBUG
+
next_rx:
-#endif
- //rxr->rx_prod = RING_NEXT(rxr->rx_ring_struct, prod);
*raw_cons = tmp_raw_cons;
@@ -333,7 +490,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
if (likely(!rc))
nb_rx_pkts++;
- else if (rc == -EBUSY) /* partial completion */
+ if (rc == -EBUSY) /* partial completion */
break;
}
raw_cons = NEXT_RAW_CMP(raw_cons);
@@ -341,6 +498,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
break;
}
+ cpr->cp_raw_cons = raw_cons;
if (prod == rxr->rx_prod && ag_prod == rxr->ag_prod) {
/*
* For PMD, there is no need to keep on pushing to REARM
@@ -348,7 +506,6 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
return nb_rx_pkts;
}
- cpr->cp_raw_cons = raw_cons;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
@@ -518,5 +675,15 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
}
RTE_LOG(DEBUG, PMD, "%s AGG Done!\n", __func__);
+ if (rxr->tpa_info) {
+ for (i = 0; i < BNXT_TPA_MAX; i++) {
+ rxr->tpa_info[i].mbuf =
+ __bnxt_alloc_rx_data(rxq->mb_pool);
+ if (!rxr->tpa_info[i].mbuf)
+ return -ENOMEM;
+ }
+ }
+ RTE_LOG(DEBUG, PMD, "%s TPA alloc Done!\n", __func__);
+
return 0;
}
@@ -37,6 +37,39 @@
#define B_RX_DB(db, prod) \
(*(uint32_t *)db = (DB_KEY_RX | prod))
+#define BNXT_TPA_L4_SIZE(x) \
+ { \
+ typeof(x) hdr_info = (x); \
+ (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32) \
+ }
+
+#define BNXT_TPA_INNER_L3_OFF(hdr_info) \
+ (((hdr_info) >> 18) & 0x1ff)
+
+#define BNXT_TPA_INNER_L2_OFF(hdr_info) \
+ (((hdr_info) >> 9) & 0x1ff)
+
+#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
+ ((hdr_info) & 0x1ff)
+
+enum pkt_hash_types {
+ PKT_HASH_TYPE_NONE, /* Undefined type */
+ PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
+ PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */
+ PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
+};
+
+struct bnxt_tpa_info {
+ struct rte_mbuf *mbuf;
+ uint16_t len;
+ unsigned short gso_type;
+ uint32_t flags2;
+ uint32_t metadata;
+ enum pkt_hash_types hash_type;
+ uint32_t rss_hash;
+ uint32_t hdr_info;
+};
+
struct bnxt_sw_rx_bd {
struct rte_mbuf *mbuf; /* data associated with RX descriptor */
};
@@ -57,6 +90,13 @@ struct bnxt_rx_ring_info {
struct bnxt_ring *rx_ring_struct;
struct bnxt_ring *ag_ring_struct;
+
+ /*
+ * To deal with out of order return from TPA, use free buffer indicator
+ */
+ struct rte_bitmap *ag_bitmap;
+
+ struct bnxt_tpa_info *tpa_info;
};
uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,