From patchwork Mon Oct 23 14:21:57 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ophir Munk X-Patchwork-Id: 30713 X-Patchwork-Delegate: ferruh.yigit@amd.com Return-Path: X-Original-To: patchwork@dpdk.org Delivered-To: patchwork@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8E44E1B633; Mon, 23 Oct 2017 16:22:36 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id F2D171B61D for ; Mon, 23 Oct 2017 16:22:29 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from ophirmu@mellanox.com) with ESMTPS (AES256-SHA encrypted); 23 Oct 2017 16:22:24 +0200 Received: from pegasus05.mtr.labs.mlnx (pegasus05.mtr.labs.mlnx [10.210.16.100]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id v9NEMOKt029052; Mon, 23 Oct 2017 17:22:24 +0300 Received: from pegasus05.mtr.labs.mlnx (localhost [127.0.0.1]) by pegasus05.mtr.labs.mlnx (8.14.7/8.14.7) with ESMTP id v9NEMOxJ004881; Mon, 23 Oct 2017 14:22:24 GMT Received: (from root@localhost) by pegasus05.mtr.labs.mlnx (8.14.7/8.14.7/Submit) id v9NEMOeK004880; Mon, 23 Oct 2017 14:22:24 GMT From: Ophir Munk To: Adrien Mazarguil Cc: dev@dpdk.org, Thomas Monjalon , Olga Shern , Matan Azrad Date: Mon, 23 Oct 2017 14:21:57 +0000 Message-Id: <1508768520-4810-5-git-send-email-ophirmu@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1508768520-4810-1-git-send-email-ophirmu@mellanox.com> References: <1508752838-30408-1-git-send-email-ophirmu@mellanox.com> <1508768520-4810-1-git-send-email-ophirmu@mellanox.com> Subject: [dpdk-dev] [PATCH v2 4/7] net/mlx4: merge Tx path functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Matan Azrad Merge tx_burst and mlx4_post_send functions to prevent double asking about WQ remain space. This should improve performance. Signed-off-by: Matan Azrad --- drivers/net/mlx4/mlx4_rxtx.c | 353 +++++++++++++++++++++---------------------- 1 file changed, 170 insertions(+), 183 deletions(-) diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c index 4a77be8..014a6d3 100644 --- a/drivers/net/mlx4/mlx4_rxtx.c +++ b/drivers/net/mlx4/mlx4_rxtx.c @@ -267,183 +267,6 @@ rte_be32_t mlx4_txq_add_mr(struct txq *txq, struct rte_mempool *mp, uint32_t i) } /** - * Posts a single work request to a send queue. - * - * @param txq - * Target Tx queue. - * @param pkt - * Packet to transmit. - * - * @return - * 0 on success, negative errno value otherwise and rte_errno is set. - */ -static inline int -mlx4_post_send(struct txq *txq, struct rte_mbuf *pkt) -{ - struct mlx4_wqe_ctrl_seg *ctrl; - struct mlx4_wqe_data_seg *dseg; - struct mlx4_sq *sq = &txq->msq; - struct rte_mbuf *buf; - union { - uint32_t flags; - uint16_t flags16[2]; - } srcrb; - uint32_t head_idx = sq->head & sq->txbb_cnt_mask; - uintptr_t addr; - uint32_t owner_opcode = MLX4_OPCODE_SEND; - uint32_t byte_count; - int wqe_real_size; - int nr_txbbs; - struct pv *pv = (struct pv *)txq->bounce_buf; - int pv_counter = 0; - - /* Calculate the needed work queue entry size for this packet. */ - wqe_real_size = sizeof(struct mlx4_wqe_ctrl_seg) + - pkt->nb_segs * sizeof(struct mlx4_wqe_data_seg); - nr_txbbs = MLX4_SIZE_TO_TXBBS(wqe_real_size); - /* - * Check that there is room for this WQE in the send queue and that - * the WQE size is legal. - */ - if (((sq->head - sq->tail) + nr_txbbs + - sq->headroom_txbbs) >= sq->txbb_cnt || - nr_txbbs > MLX4_MAX_WQE_TXBBS) { - return -ENOSPC; - } - /* Get the control and data entries of the WQE. */ - ctrl = (struct mlx4_wqe_ctrl_seg *)mlx4_get_send_wqe(sq, head_idx); - dseg = (struct mlx4_wqe_data_seg *)((uintptr_t)ctrl + - sizeof(struct mlx4_wqe_ctrl_seg)); - /* Fill the data segments with buffer information. */ - for (buf = pkt; buf != NULL; buf = buf->next, dseg++) { - addr = rte_pktmbuf_mtod(buf, uintptr_t); - rte_prefetch0((volatile void *)addr); - /* Handle WQE wraparound. */ - if (unlikely(dseg >= (struct mlx4_wqe_data_seg *)sq->eob)) - dseg = (struct mlx4_wqe_data_seg *)sq->buf; - dseg->addr = rte_cpu_to_be_64(addr); - /* Memory region key (big endian) for this memory pool. */ - dseg->lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(buf)); -#ifndef NDEBUG - if (unlikely(dseg->lkey == rte_cpu_to_be_32((uint32_t)-1))) { - /* MR does not exist. */ - DEBUG("%p: unable to get MP <-> MR association", - (void *)txq); - /* - * Restamp entry in case of failure. - * Make sure that size is written correctly - * Note that we give ownership to the SW, not the HW. - */ - ctrl->fence_size = (wqe_real_size >> 4) & 0x3f; - mlx4_txq_stamp_freed_wqe(sq, head_idx, - (sq->head & sq->txbb_cnt) ? 0 : 1); - return -EFAULT; - } -#endif /* NDEBUG */ - if (likely(buf->data_len)) { - byte_count = rte_cpu_to_be_32(buf->data_len); - } else { - /* - * Zero length segment is treated as inline segment - * with zero data. - */ - byte_count = RTE_BE32(0x80000000); - } - /* - * If the data segment is not at the beginning of a - * Tx basic block (TXBB) then write the byte count, - * else postpone the writing to just before updating the - * control segment. - */ - if ((uintptr_t)dseg & (uintptr_t)(MLX4_TXBB_SIZE - 1)) { - /* - * Need a barrier here before writing the byte_count - * fields to make sure that all the data is visible - * before the byte_count field is set. - * Otherwise, if the segment begins a new cacheline, - * the HCA prefetcher could grab the 64-byte chunk and - * get a valid (!= 0xffffffff) byte count but stale - * data, and end up sending the wrong data. - */ - rte_io_wmb(); - dseg->byte_count = byte_count; - } else { - /* - * This data segment starts at the beginning of a new - * TXBB, so we need to postpone its byte_count writing - * for later. - */ - pv[pv_counter].dseg = dseg; - pv[pv_counter++].val = byte_count; - } - } - /* Write the first DWORD of each TXBB save earlier. */ - if (pv_counter) { - /* Need a barrier here before writing the byte_count. */ - rte_io_wmb(); - for (--pv_counter; pv_counter >= 0; pv_counter--) - pv[pv_counter].dseg->byte_count = pv[pv_counter].val; - } - /* Fill the control parameters for this packet. */ - ctrl->fence_size = (wqe_real_size >> 4) & 0x3f; - /* - * For raw Ethernet, the SOLICIT flag is used to indicate that no ICRC - * should be calculated. - */ - txq->elts_comp_cd -= nr_txbbs; - if (unlikely(txq->elts_comp_cd <= 0)) { - txq->elts_comp_cd = txq->elts_comp_cd_init; - srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT | - MLX4_WQE_CTRL_CQ_UPDATE); - } else { - srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT); - } - /* Enable HW checksum offload if requested */ - if (txq->csum && - (pkt->ol_flags & - (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))) { - const uint64_t is_tunneled = (pkt->ol_flags & - (PKT_TX_TUNNEL_GRE | - PKT_TX_TUNNEL_VXLAN)); - - if (is_tunneled && txq->csum_l2tun) { - owner_opcode |= MLX4_WQE_CTRL_IIP_HDR_CSUM | - MLX4_WQE_CTRL_IL4_HDR_CSUM; - if (pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) - srcrb.flags |= - RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM); - } else { - srcrb.flags |= RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM | - MLX4_WQE_CTRL_TCP_UDP_CSUM); - } - } - if (txq->lb) { - /* - * Copy destination MAC address to the WQE, this allows - * loopback in eSwitch, so that VFs and PF can communicate - * with each other. - */ - srcrb.flags16[0] = *(rte_pktmbuf_mtod(pkt, uint16_t *)); - ctrl->imm = *(rte_pktmbuf_mtod_offset(pkt, uint32_t *, - sizeof(uint16_t))); - } else { - ctrl->imm = 0; - } - ctrl->srcrb_flags = srcrb.flags; - /* - * Make sure descriptor is fully written before - * setting ownership bit (because HW can start - * executing as soon as we do). - */ - rte_wmb(); - ctrl->owner_opcode = rte_cpu_to_be_32(owner_opcode | - ((sq->head & sq->txbb_cnt) ? - MLX4_BIT_WQE_OWN : 0)); - sq->head += nr_txbbs; - return 0; -} - -/** * DPDK callback for Tx. * * @param dpdk_txq @@ -466,7 +289,8 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) unsigned int bytes_sent = 0; unsigned int i; unsigned int max; - int err; + struct mlx4_sq *sq = &txq->msq; + struct pv *pv = (struct pv *)txq->bounce_buf; assert(txq->elts_comp_cd != 0); mlx4_txq_complete(txq); @@ -485,6 +309,20 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) (((elts_head + 1) == elts_n) ? 0 : elts_head + 1); struct txq_elt *elt_next = &(*txq->elts)[elts_head_next]; struct txq_elt *elt = &(*txq->elts)[elts_head]; + uint32_t owner_opcode = MLX4_OPCODE_SEND; + struct mlx4_wqe_ctrl_seg *ctrl; + struct mlx4_wqe_data_seg *dseg; + struct rte_mbuf *sbuf; + union { + uint32_t flags; + uint16_t flags16[2]; + } srcrb; + uint32_t head_idx = sq->head & sq->txbb_cnt_mask; + uintptr_t addr; + uint32_t byte_count; + int wqe_real_size; + int nr_txbbs; + int pv_counter = 0; /* Clean up old buffer. */ if (likely(elt->buf != NULL)) { @@ -503,18 +341,167 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) } while (tmp != NULL); } RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf); - /* Post the packet for sending. */ - err = mlx4_post_send(txq, buf); - if (unlikely(err)) { + + /* + * Calculate the needed work queue entry size + * for this packet. + */ + wqe_real_size = sizeof(struct mlx4_wqe_ctrl_seg) + + buf->nb_segs * sizeof(struct mlx4_wqe_data_seg); + nr_txbbs = MLX4_SIZE_TO_TXBBS(wqe_real_size); + /* + * Check that there is room for this WQE in the send + * queue and that the WQE size is legal. + */ + if (((sq->head - sq->tail) + nr_txbbs + + sq->headroom_txbbs) >= sq->txbb_cnt || + nr_txbbs > MLX4_MAX_WQE_TXBBS) { elt->buf = NULL; - goto stop; + break; + } + /* Get the control and data entries of the WQE. */ + ctrl = (struct mlx4_wqe_ctrl_seg *) + mlx4_get_send_wqe(sq, head_idx); + dseg = (struct mlx4_wqe_data_seg *)((uintptr_t)ctrl + + sizeof(struct mlx4_wqe_ctrl_seg)); + /* Fill the data segments with buffer information. */ + for (sbuf = buf; sbuf != NULL; sbuf = sbuf->next, dseg++) { + addr = rte_pktmbuf_mtod(sbuf, uintptr_t); + rte_prefetch0((volatile void *)addr); + /* Handle WQE wraparound. */ + if (unlikely(dseg >= + (struct mlx4_wqe_data_seg *)sq->eob)) + dseg = (struct mlx4_wqe_data_seg *)sq->buf; + dseg->addr = rte_cpu_to_be_64(addr); + /* Memory region key (big endian). */ + dseg->lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf)); + #ifndef NDEBUG + if (unlikely(dseg->lkey == + rte_cpu_to_be_32((uint32_t)-1))) { + /* MR does not exist. */ + DEBUG("%p: unable to get MP <-> MR association", + (void *)txq); + /* + * Restamp entry in case of failure. + * Make sure that size is written correctly + * Note that we give ownership to the SW, + * not the HW. + */ + ctrl->fence_size = (wqe_real_size >> 4) & 0x3f; + mlx4_txq_stamp_freed_wqe(sq, head_idx, + (sq->head & sq->txbb_cnt) ? 0 : 1); + elt->buf = NULL; + break; + } + #endif /* NDEBUG */ + if (likely(sbuf->data_len)) { + byte_count = rte_cpu_to_be_32(sbuf->data_len); + } else { + /* + * Zero length segment is treated as inline + * segment with zero data. + */ + byte_count = RTE_BE32(0x80000000); + } + /* + * If the data segment is not at the beginning + * of a Tx basic block (TXBB) then write the + * byte count, else postpone the writing to + * just before updating the control segment. + */ + if ((uintptr_t)dseg & (uintptr_t)(MLX4_TXBB_SIZE - 1)) { + /* + * Need a barrier here before writing the + * byte_count fields to make sure that all the + * data is visible before the byte_count field + * is set. otherwise, if the segment begins a + * new cacheline, the HCA prefetcher could grab + * the 64-byte chunk and get a valid + * (!= 0xffffffff) byte count but stale data, + * and end up sending the wrong data. + */ + rte_io_wmb(); + dseg->byte_count = byte_count; + } else { + /* + * This data segment starts at the beginning of + * a new TXBB, so we need to postpone its + * byte_count writing for later. + */ + pv[pv_counter].dseg = dseg; + pv[pv_counter++].val = byte_count; + } + } + /* Write the first DWORD of each TXBB save earlier. */ + if (pv_counter) { + /* Need a barrier before writing the byte_count. */ + rte_io_wmb(); + for (--pv_counter; pv_counter >= 0; pv_counter--) + pv[pv_counter].dseg->byte_count = + pv[pv_counter].val; + } + /* Fill the control parameters for this packet. */ + ctrl->fence_size = (wqe_real_size >> 4) & 0x3f; + /* + * For raw Ethernet, the SOLICIT flag is used to indicate + * that no ICRC should be calculated. + */ + txq->elts_comp_cd -= nr_txbbs; + if (unlikely(txq->elts_comp_cd <= 0)) { + txq->elts_comp_cd = txq->elts_comp_cd_init; + srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT | + MLX4_WQE_CTRL_CQ_UPDATE); + } else { + srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT); } + /* Enable HW checksum offload if requested */ + if (txq->csum && + (buf->ol_flags & + (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))) { + const uint64_t is_tunneled = (buf->ol_flags & + (PKT_TX_TUNNEL_GRE | + PKT_TX_TUNNEL_VXLAN)); + + if (is_tunneled && txq->csum_l2tun) { + owner_opcode |= MLX4_WQE_CTRL_IIP_HDR_CSUM | + MLX4_WQE_CTRL_IL4_HDR_CSUM; + if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM) + srcrb.flags |= + RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM); + } else { + srcrb.flags |= + RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM | + MLX4_WQE_CTRL_TCP_UDP_CSUM); + } + } + if (txq->lb) { + /* + * Copy destination MAC address to the WQE, this allows + * loopback in eSwitch, so that VFs and PF can + * communicate with each other. + */ + srcrb.flags16[0] = *(rte_pktmbuf_mtod(buf, uint16_t *)); + ctrl->imm = *(rte_pktmbuf_mtod_offset(buf, uint32_t *, + sizeof(uint16_t))); + } else { + ctrl->imm = 0; + } + ctrl->srcrb_flags = srcrb.flags; + /* + * Make sure descriptor is fully written before + * setting ownership bit (because HW can start + * executing as soon as we do). + */ + rte_wmb(); + ctrl->owner_opcode = rte_cpu_to_be_32(owner_opcode | + ((sq->head & sq->txbb_cnt) ? + MLX4_BIT_WQE_OWN : 0)); + sq->head += nr_txbbs; elt->buf = buf; bytes_sent += buf->pkt_len; ++elts_comp; elts_head = elts_head_next; } -stop: /* Take a shortcut if nothing must be sent. */ if (unlikely(i == 0)) return 0;