get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/56455/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 56455,
    "url": "http://patches.dpdk.org/api/patches/56455/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1563199161-29745-5-git-send-email-viacheslavo@mellanox.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1563199161-29745-5-git-send-email-viacheslavo@mellanox.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1563199161-29745-5-git-send-email-viacheslavo@mellanox.com",
    "date": "2019-07-15T13:59:18",
    "name": "[v2,4/7] net/mlx5: add Tx datapath configuration and setup",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "a84675b4ec60ec4538881a4e51b67ae8df7fc7a1",
    "submitter": {
        "id": 1102,
        "url": "http://patches.dpdk.org/api/people/1102/?format=api",
        "name": "Slava Ovsiienko",
        "email": "viacheslavo@mellanox.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1563199161-29745-5-git-send-email-viacheslavo@mellanox.com/mbox/",
    "series": [
        {
            "id": 5500,
            "url": "http://patches.dpdk.org/api/series/5500/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=5500",
            "date": "2019-07-15T13:59:14",
            "name": "net/mlx5: consolidate Tx datapath",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/5500/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/56455/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/56455/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 2779F3423;\n\tMon, 15 Jul 2019 15:59:55 +0200 (CEST)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n\tby dpdk.org (Postfix) with ESMTP id D1A1D3423\n\tfor <dev@dpdk.org>; Mon, 15 Jul 2019 15:59:52 +0200 (CEST)",
            "from Internal Mail-Server by MTLPINE2 (envelope-from\n\tviacheslavo@mellanox.com)\n\twith ESMTPS (AES256-SHA encrypted); 15 Jul 2019 16:59:49 +0300",
            "from pegasus12.mtr.labs.mlnx. (pegasus12.mtr.labs.mlnx\n\t[10.210.17.40])\n\tby labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x6FDxOEN013758;\n\tMon, 15 Jul 2019 16:59:49 +0300"
        ],
        "From": "Viacheslav Ovsiienko <viacheslavo@mellanox.com>",
        "To": "dev@dpdk.org",
        "Cc": "yskoh@mellanox.com",
        "Date": "Mon, 15 Jul 2019 13:59:18 +0000",
        "Message-Id": "<1563199161-29745-5-git-send-email-viacheslavo@mellanox.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1563199161-29745-1-git-send-email-viacheslavo@mellanox.com>",
        "References": "<1562257767-19035-2-git-send-email-viacheslavo@mellanox.com>\n\t<1563199161-29745-1-git-send-email-viacheslavo@mellanox.com>",
        "Subject": "[dpdk-dev] [PATCH v2 4/7] net/mlx5: add Tx datapath configuration\n\tand setup",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch updates the Tx datapath control and configuration\nstructures and code for mananging Tx datapath settings.\n\nSigned-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>\n---\n drivers/net/mlx5/mlx5_rxtx.c |   4 +-\n drivers/net/mlx5/mlx5_rxtx.h |  55 +++++++++----\n drivers/net/mlx5/mlx5_txq.c  | 182 ++++++++++++++++++++++++++++++++++++++++---\n 3 files changed, 212 insertions(+), 29 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c\nindex f2d6918..13f9431 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.c\n+++ b/drivers/net/mlx5/mlx5_rxtx.c\n@@ -507,7 +507,7 @@\n \t\t\tMKSTR(err_str, \"Unexpected CQE error syndrome \"\n \t\t\t      \"0x%02x CQN = %u SQN = %u wqe_counter = %u \"\n \t\t\t      \"wq_ci = %u cq_ci = %u\", err_cqe->syndrome,\n-\t\t\t      txq_ctrl->cqn, txq->qp_num_8s >> 8,\n+\t\t\t      txq->cqe_s, txq->qp_num_8s >> 8,\n \t\t\t      rte_be_to_cpu_16(err_cqe->wqe_counter),\n \t\t\t      txq->wqe_ci, txq->cq_ci);\n \t\t\tMKSTR(name, \"dpdk_mlx5_port_%u_txq_%u_index_%u_%u\",\n@@ -516,7 +516,7 @@\n \t\t\tmlx5_dump_debug_information(name, NULL, err_str, 0);\n \t\t\tmlx5_dump_debug_information(name, \"MLX5 Error CQ:\",\n \t\t\t\t\t\t    (const void *)((uintptr_t)\n-\t\t\t\t\t\t    &(*txq->cqes)[0]),\n+\t\t\t\t\t\t    txq->cqes),\n \t\t\t\t\t\t    sizeof(*err_cqe) *\n \t\t\t\t\t\t    (1 << txq->cqe_n));\n \t\t\tmlx5_dump_debug_information(name, \"MLX5 Error SQ:\",\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h\nindex acde09d..d8c6f35 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.h\n+++ b/drivers/net/mlx5/mlx5_rxtx.h\n@@ -188,37 +188,60 @@ struct mlx5_hrxq {\n \tuint8_t rss_key[]; /* Hash key. */\n };\n \n+/* TX queue send local data. */\n+__extension__\n+struct mlx5_txq_local {\n+\tstruct mlx5_wqe *wqe_last; /* last sent WQE pointer. */\n+\tstruct rte_mbuf *mbuf; /* first mbuf to process. */\n+\tuint16_t pkts_copy; /* packets copied to elts. */\n+\tuint16_t pkts_sent; /* packets sent. */\n+\tuint16_t elts_free; /* available elts remain. */\n+\tuint16_t wqe_free; /* available wqe remain. */\n+\tuint16_t mbuf_off; /* data offset in current mbuf. */\n+\tuint16_t mbuf_nseg; /* number of remaining mbuf. */\n+};\n+\n /* TX queue descriptor. */\n __extension__\n struct mlx5_txq_data {\n \tuint16_t elts_head; /* Current counter in (*elts)[]. */\n \tuint16_t elts_tail; /* Counter of first element awaiting completion. */\n-\tuint16_t elts_comp; /* Counter since last completion request. */\n-\tuint16_t mpw_comp; /* WQ index since last completion request. */\n+\tuint16_t elts_comp; /* elts index since last completion request. */\n+\tuint16_t elts_s; /* Number of mbuf elements. */\n+\tuint16_t elts_m; /* Mask for mbuf elements indices. */\n+\t/* Fields related to elts mbuf storage. */\n+\tuint16_t wqe_ci; /* Consumer index for work queue. */\n+\tuint16_t wqe_pi; /* Producer index for work queue. */\n+\tuint16_t wqe_s; /* Number of WQ elements. */\n+\tuint16_t wqe_m; /* Mask Number for WQ elements. */\n+\tuint16_t wqe_comp; /* WQE index since last completion request. */\n+\tuint16_t wqe_thres; /* WQE threshold to request completion in CQ. */\n+\t/* WQ related fields. */\n \tuint16_t cq_ci; /* Consumer index for completion queue. */\n #ifndef NDEBUG\n-\tuint16_t cq_pi; /* Producer index for completion queue. */\n+\tuint16_t cq_pi; /* Counter of issued CQE \"always\" requests. */\n #endif\n-\tuint16_t wqe_ci; /* Consumer index for work queue. */\n-\tuint16_t wqe_pi; /* Producer index for work queue. */\n-\tuint16_t elts_n:4; /* (*elts)[] length (in log2). */\n+\tuint16_t cqe_s; /* Number of CQ elements. */\n+\tuint16_t cqe_m; /* Mask for CQ indices. */\n+\t/* CQ related fields. */\n+\tuint16_t elts_n:4; /* elts[] length (in log2). */\n \tuint16_t cqe_n:4; /* Number of CQ elements (in log2). */\n-\tuint16_t wqe_n:4; /* Number of of WQ elements (in log2). */\n+\tuint16_t wqe_n:4; /* Number of WQ elements (in log2). */\n \tuint16_t tso_en:1; /* When set hardware TSO is enabled. */\n \tuint16_t tunnel_en:1;\n \t/* When set TX offload for tunneled packets are supported. */\n \tuint16_t swp_en:1; /* Whether SW parser is enabled. */\n-\tuint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */\n-\tuint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */\n-\tuint16_t inline_max_packet_sz; /* Max packet size for inlining. */\n+\tuint16_t inlen_send; /* Ordinary send data inline size. */\n+\tuint16_t inlen_empw; /* eMPW max packet size to inline. */\n+\tuint16_t inlen_mode; /* Minimal data length to inline. */\n \tuint32_t qp_num_8s; /* QP number shifted by 8. */\n \tuint64_t offloads; /* Offloads for Tx Queue. */\n \tstruct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */\n-\tvolatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */\n-\tvolatile void *wqes; /* Work queue (use volatile to write into). */\n+\tstruct mlx5_wqe *wqes; /* Work queue. */\n+\tstruct mlx5_wqe *wqes_end; /* Work queue array limit. */\n+\tvolatile struct mlx5_cqe *cqes; /* Completion queue. */\n \tvolatile uint32_t *qp_db; /* Work queue doorbell. */\n \tvolatile uint32_t *cq_db; /* Completion queue doorbell. */\n-\tstruct rte_mbuf *(*elts)[]; /* TX elements. */\n \tuint16_t port_id; /* Port ID of device. */\n \tuint16_t idx; /* Queue index. */\n \tstruct mlx5_txq_stats stats; /* TX queue counters. */\n@@ -226,6 +249,8 @@ struct mlx5_txq_data {\n \trte_spinlock_t *uar_lock;\n \t/* UAR access lock required for 32bit implementations */\n #endif\n+\tstruct rte_mbuf *elts[0];\n+\t/* Storage for queued packets, must be the last field. */\n } __rte_cache_aligned;\n \n /* Verbs Rx queue elements. */\n@@ -239,7 +264,6 @@ struct mlx5_txq_ibv {\n \n /* TX queue control descriptor. */\n struct mlx5_txq_ctrl {\n-\tstruct mlx5_txq_data txq; /* Data path structure. */\n \tLIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */\n \trte_atomic32_t refcnt; /* Reference counter. */\n \tunsigned int socket; /* CPU socket ID for allocations. */\n@@ -249,8 +273,9 @@ struct mlx5_txq_ctrl {\n \tstruct mlx5_priv *priv; /* Back pointer to private data. */\n \toff_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */\n \tvoid *bf_reg; /* BlueFlame register from Verbs. */\n-\tuint32_t cqn; /* CQ number. */\n \tuint16_t dump_file_n; /* Number of dump files. */\n+\tstruct mlx5_txq_data txq; /* Data path structure. */\n+\t/* Must be the last field in the structure, contains elts[]. */\n };\n \n #define MLX5_TX_BFREG(txq) \\\ndiff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c\nindex 0d2dbfa..147599b 100644\n--- a/drivers/net/mlx5/mlx5_txq.c\n+++ b/drivers/net/mlx5/mlx5_txq.c\n@@ -47,7 +47,7 @@\n \tunsigned int i;\n \n \tfor (i = 0; (i != elts_n); ++i)\n-\t\t(*txq_ctrl->txq.elts)[i] = NULL;\n+\t\ttxq_ctrl->txq.elts[i] = NULL;\n \tDRV_LOG(DEBUG, \"port %u Tx queue %u allocated and configured %u WRs\",\n \t\tPORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx, elts_n);\n \ttxq_ctrl->txq.elts_head = 0;\n@@ -68,7 +68,7 @@\n \tconst uint16_t elts_m = elts_n - 1;\n \tuint16_t elts_head = txq_ctrl->txq.elts_head;\n \tuint16_t elts_tail = txq_ctrl->txq.elts_tail;\n-\tstruct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;\n+\tstruct rte_mbuf *(*elts)[elts_n] = &txq_ctrl->txq.elts;\n \n \tDRV_LOG(DEBUG, \"port %u Tx queue %u freeing WRs\",\n \t\tPORT_ID(txq_ctrl->priv), txq_ctrl->txq.idx);\n@@ -411,7 +411,8 @@ struct mlx5_txq_ibv *\n \tattr.cq = (struct ibv_cq_init_attr_ex){\n \t\t.comp_mask = 0,\n \t};\n-\tcqe_n = desc / MLX5_TX_COMP_THRESH + 1;\n+\tcqe_n = desc / MLX5_TX_COMP_THRESH +\n+\t\t1 + MLX5_TX_COMP_THRESH_INLINE_DIV;\n \ttmpl.cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);\n \tif (tmpl.cq == NULL) {\n \t\tDRV_LOG(ERR, \"port %u Tx queue %u CQ creation failure\",\n@@ -449,7 +450,7 @@ struct mlx5_txq_ibv *\n \t\t.pd = priv->sh->pd,\n \t\t.comp_mask = IBV_QP_INIT_ATTR_PD,\n \t};\n-\tif (txq_data->max_inline)\n+\tif (txq_data->inlen_send)\n \t\tattr.init.cap.max_inline_data = txq_ctrl->max_inline_data;\n \tif (txq_data->tso_en) {\n \t\tattr.init.max_tso_header = txq_ctrl->max_tso_header;\n@@ -523,25 +524,29 @@ struct mlx5_txq_ibv *\n \t\tgoto error;\n \t}\n \ttxq_data->cqe_n = log2above(cq_info.cqe_cnt);\n+\ttxq_data->cqe_s = 1 << txq_data->cqe_n;\n+\ttxq_data->cqe_m = txq_data->cqe_s - 1;\n \ttxq_data->qp_num_8s = tmpl.qp->qp_num << 8;\n \ttxq_data->wqes = qp.sq.buf;\n \ttxq_data->wqe_n = log2above(qp.sq.wqe_cnt);\n+\ttxq_data->wqe_s = 1 << txq_data->wqe_n;\n+\ttxq_data->wqe_m = txq_data->wqe_s - 1;\n+\ttxq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;\n \ttxq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];\n \ttxq_data->cq_db = cq_info.dbrec;\n-\ttxq_data->cqes =\n-\t\t(volatile struct mlx5_cqe (*)[])\n-\t\t(uintptr_t)cq_info.buf;\n+\ttxq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf;\n \ttxq_data->cq_ci = 0;\n #ifndef NDEBUG\n \ttxq_data->cq_pi = 0;\n #endif\n \ttxq_data->wqe_ci = 0;\n \ttxq_data->wqe_pi = 0;\n+\ttxq_data->wqe_comp = 0;\n+\ttxq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;\n \ttxq_ibv->qp = tmpl.qp;\n \ttxq_ibv->cq = tmpl.cq;\n \trte_atomic32_inc(&txq_ibv->refcnt);\n \ttxq_ctrl->bf_reg = qp.bf.reg;\n-\ttxq_ctrl->cqn = cq_info.cqn;\n \ttxq_uar_init(txq_ctrl);\n \tif (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {\n \t\ttxq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;\n@@ -663,7 +668,11 @@ struct mlx5_txq_ibv *\n \tunsigned int wqe_size;\n \tconst unsigned int desc = 1 << txq_ctrl->txq.elts_n;\n \n-\twqe_size = MLX5_WQE_SIZE + txq_ctrl->max_inline_data;\n+\twqe_size = MLX5_WQE_CSEG_SIZE +\n+\t\t   MLX5_WQE_ESEG_SIZE +\n+\t\t   MLX5_WSEG_SIZE -\n+\t\t   MLX5_ESEG_MIN_INLINE_SIZE +\n+\t\t   txq_ctrl->max_inline_data;\n \treturn rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;\n }\n \n@@ -676,7 +685,156 @@ struct mlx5_txq_ibv *\n static void\n txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)\n {\n-\t(void)txq_ctrl;\n+\tstruct mlx5_priv *priv = txq_ctrl->priv;\n+\tstruct mlx5_dev_config *config = &priv->config;\n+\tunsigned int inlen_send; /* Inline data for ordinary SEND.*/\n+\tunsigned int inlen_empw; /* Inline data for enhanced MPW. */\n+\tunsigned int inlen_mode; /* Minimal required Inline data. */\n+\tunsigned int txqs_inline; /* Min Tx queues to enable inline. */\n+\tint tso = !!(txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |\n+\t\t\t\t\t       DEV_TX_OFFLOAD_VXLAN_TNL_TSO |\n+\t\t\t\t\t       DEV_TX_OFFLOAD_GRE_TNL_TSO |\n+\t\t\t\t\t       DEV_TX_OFFLOAD_IP_TNL_TSO |\n+\t\t\t\t\t       DEV_TX_OFFLOAD_UDP_TNL_TSO));\n+\tunsigned int temp;\n+\n+\ttxqs_inline = (config->txqs_inline == MLX5_ARG_UNSET) ?\n+\t\t      MLX5_EMPW_MIN_TXQS :\n+\t\t      (unsigned int)config->txqs_inline;\n+\tinlen_send = (config->txq_inline_max == MLX5_ARG_UNSET) ?\n+\t\t     MLX5_SEND_DEF_INLINE_LEN :\n+\t\t     (unsigned int)config->txq_inline_max;\n+\tinlen_empw = (config->txq_inline_mpw == MLX5_ARG_UNSET) ?\n+\t\t     MLX5_EMPW_DEF_INLINE_LEN :\n+\t\t     (unsigned int)config->txq_inline_mpw;\n+\tinlen_mode = (config->txq_inline_min == MLX5_ARG_UNSET) ?\n+\t\t     0 : (unsigned int)config->txq_inline_min;\n+\n+\t/*\n+\t * If there is requested minimal amount of data to inline\n+\t * we MUST enable inlining. This is a case for ConnectX-4\n+\t * which usually requires L2 inlined for correct operating\n+\t * and ConnectX-4LX which requires L2-L4 inlined to\n+\t * support E-Switch Flows.\n+\t */\n+\tif (inlen_mode) {\n+\t\tif (inlen_mode <= MLX5_ESEG_MIN_INLINE_SIZE) {\n+\t\t\t/*\n+\t\t\t * Optimize minimal inlining for single\n+\t\t\t * segment packets to fill one WQEBB\n+\t\t\t * without gaps.\n+\t\t\t */\n+\t\t\ttemp = MLX5_ESEG_MIN_INLINE_SIZE;\n+\t\t} else {\n+\t\t\ttemp = inlen_mode - MLX5_ESEG_MIN_INLINE_SIZE;\n+\t\t\ttemp = RTE_ALIGN(temp, MLX5_WSEG_SIZE) +\n+\t\t\t       MLX5_ESEG_MIN_INLINE_SIZE;\n+\t\t\ttemp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);\n+\t\t}\n+\t\tif (temp != inlen_mode) {\n+\t\t\tDRV_LOG(INFO,\n+\t\t\t\t\"port %u minimal required inline setting\"\n+\t\t\t\t\" aligned from %u to %u\",\n+\t\t\t\tPORT_ID(priv), inlen_mode, temp);\n+\t\t\tinlen_mode = temp;\n+\t\t}\n+\t}\n+\t/*\n+\t * If there are few Tx queues it is prioritized\n+\t * to save CPU cycles and disable data inlining at all.\n+\t */\n+\tif (inlen_send && priv->txqs_n >= txqs_inline) {\n+\t\t/*\n+\t\t * The data sent with ordinal MLX5_OPCODE_SEND\n+\t\t * may be inlined in Ethernet Segment, align the\n+\t\t * length accordingly to fit entire WQEBBs.\n+\t\t */\n+\t\ttemp = (inlen_send / MLX5_WQE_SIZE) * MLX5_WQE_SIZE +\n+\t\t\tMLX5_ESEG_MIN_INLINE_SIZE + MLX5_WQE_DSEG_SIZE;\n+\t\ttemp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +\n+\t\t\t\t     MLX5_ESEG_MIN_INLINE_SIZE -\n+\t\t\t\t     MLX5_WQE_CSEG_SIZE -\n+\t\t\t\t     MLX5_WQE_ESEG_SIZE -\n+\t\t\t\t     MLX5_WQE_DSEG_SIZE);\n+\t\ttemp = RTE_MIN(temp, MLX5_SEND_MAX_INLINE_LEN);\n+\t\ttemp = RTE_MAX(temp, inlen_mode);\n+\t\tif (temp != inlen_send) {\n+\t\t\tDRV_LOG(INFO,\n+\t\t\t\t\"port %u ordinary send inline setting\"\n+\t\t\t\t\" aligned from %u to %u\",\n+\t\t\t\tPORT_ID(priv), inlen_send, temp);\n+\t\t\tinlen_send = temp;\n+\t\t}\n+\t\t/*\n+\t\t * Not aligned to cache lines, but to WQEs.\n+\t\t * First bytes of data (initial alignment)\n+\t\t * is going to be copied explicitly at the\n+\t\t * beginning of inlining buffer in Ethernet\n+\t\t * Segment.\n+\t\t */\n+\t\tassert(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);\n+\t\tassert(inlen_send <= MLX5_WQE_SIZE_MAX +\n+\t\t\t\t     MLX5_ESEG_MIN_INLINE_SIZE -\n+\t\t\t\t     MLX5_WQE_CSEG_SIZE -\n+\t\t\t\t     MLX5_WQE_ESEG_SIZE -\n+\t\t\t\t     MLX5_WQE_DSEG_SIZE);\n+\t\ttxq_ctrl->txq.inlen_send = inlen_send;\n+\t\ttxq_ctrl->txq.inlen_mode = inlen_mode;\n+\t} else {\n+\t\t/*\n+\t\t * If minimal inlining is requested we must\n+\t\t * enable inlining in general, despite the\n+\t\t * number of configured queues.\n+\t\t */\n+\t\ttxq_ctrl->txq.inlen_send = inlen_mode;\n+\t\ttxq_ctrl->txq.inlen_mode = inlen_mode;\n+\t\ttxq_ctrl->txq.inlen_empw = 0;\n+\t\tinlen_send = 0;\n+\t\tinlen_empw = 0;\n+\t}\n+\tif (inlen_send && inlen_empw && priv->txqs_n >= txqs_inline) {\n+\t\t/*\n+\t\t * The data sent with MLX5_OPCODE_ENHANCED_MPSW\n+\t\t * may be inlined in Data Segment, align the\n+\t\t * length accordingly to fit entire WQEBBs.\n+\t\t */\n+\t\ttemp = (inlen_empw + MLX5_WQE_SIZE - 1) / MLX5_WQE_SIZE;\n+\t\ttemp = temp * MLX5_WQE_SIZE +\n+\t\t       MLX5_DSEG_MIN_INLINE_SIZE - MLX5_WQE_DSEG_SIZE;\n+\t\ttemp = RTE_MIN(temp, MLX5_WQE_SIZE_MAX +\n+\t\t\t\t     MLX5_DSEG_MIN_INLINE_SIZE -\n+\t\t\t\t     MLX5_WQE_CSEG_SIZE -\n+\t\t\t\t     MLX5_WQE_ESEG_SIZE -\n+\t\t\t\t     MLX5_WQE_DSEG_SIZE);\n+\t\ttemp = RTE_MIN(temp, MLX5_EMPW_MAX_INLINE_LEN);\n+\t\tif (temp != inlen_empw) {\n+\t\t\tDRV_LOG(INFO,\n+\t\t\t\t\"port %u enhanced empw inline setting\"\n+\t\t\t\t\" aligned from %u to %u\",\n+\t\t\t\tPORT_ID(priv), inlen_empw, temp);\n+\t\t\tinlen_empw = temp;\n+\t\t}\n+\t\tassert(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);\n+\t\tassert(inlen_empw <= MLX5_WQE_SIZE_MAX +\n+\t\t\t\t     MLX5_DSEG_MIN_INLINE_SIZE -\n+\t\t\t\t     MLX5_WQE_CSEG_SIZE -\n+\t\t\t\t     MLX5_WQE_ESEG_SIZE -\n+\t\t\t\t     MLX5_WQE_DSEG_SIZE);\n+\t\ttxq_ctrl->txq.inlen_empw = inlen_send;\n+\t}\n+\ttxq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);\n+\tif (tso) {\n+\t\ttxq_ctrl->max_tso_header = MLX5_MAX_TSO_HEADER;\n+\t\ttxq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->max_inline_data,\n+\t\t\t\t\t\t    MLX5_MAX_TSO_HEADER);\n+\t\ttxq_ctrl->txq.tso_en = 1;\n+\t}\n+\ttxq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;\n+\ttxq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |\n+\t\t\t\t DEV_TX_OFFLOAD_UDP_TNL_TSO |\n+\t\t\t\t DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &\n+\t\t\t\ttxq_ctrl->txq.offloads) && config->swp;\n+\tconfig->tx_inline = txq_ctrl->txq.inlen_send ? 1 : 0;\n }\n \n /**\n@@ -724,6 +882,8 @@ struct mlx5_txq_ctrl *\n \ttmpl->priv = priv;\n \ttmpl->socket = socket;\n \ttmpl->txq.elts_n = log2above(desc);\n+\ttmpl->txq.elts_s = desc;\n+\ttmpl->txq.elts_m = desc - 1;\n \ttmpl->txq.port_id = dev->data->port_id;\n \ttmpl->txq.idx = idx;\n \ttxq_set_params(tmpl);\n@@ -737,8 +897,6 @@ struct mlx5_txq_ctrl *\n \t\trte_errno = ENOMEM;\n \t\tgoto error;\n \t}\n-\ttmpl->txq.elts =\n-\t\t(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);\n \trte_atomic32_inc(&tmpl->refcnt);\n \tLIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);\n \treturn tmpl;\n",
    "prefixes": [
        "v2",
        "4/7"
    ]
}