get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/57235/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 57235,
    "url": "https://patches.dpdk.org/api/patches/57235/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1564401209-18752-12-git-send-email-matan@mellanox.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1564401209-18752-12-git-send-email-matan@mellanox.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1564401209-18752-12-git-send-email-matan@mellanox.com",
    "date": "2019-07-29T11:53:29",
    "name": "[11/11] net/mlx5: allow LRO per Rx queue",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "48ebde3f7d1fcd106f1f67f893ccecf836adfb4e",
    "submitter": {
        "id": 796,
        "url": "https://patches.dpdk.org/api/people/796/?format=api",
        "name": "Matan Azrad",
        "email": "matan@mellanox.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1564401209-18752-12-git-send-email-matan@mellanox.com/mbox/",
    "series": [
        {
            "id": 5809,
            "url": "https://patches.dpdk.org/api/series/5809/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=5809",
            "date": "2019-07-29T11:53:21",
            "name": "net/mlx5: LRO fixes and enhancements",
            "version": 1,
            "mbox": "https://patches.dpdk.org/series/5809/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/57235/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/57235/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 4524C1BF91;\n\tMon, 29 Jul 2019 14:16:54 +0200 (CEST)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n\tby dpdk.org (Postfix) with ESMTP id 5820E1BF59\n\tfor <dev@dpdk.org>; Mon, 29 Jul 2019 14:16:34 +0200 (CEST)",
            "from Internal Mail-Server by MTLPINE2 (envelope-from\n\tmatan@mellanox.com)\n\twith ESMTPS (AES256-SHA encrypted); 29 Jul 2019 15:16:30 +0300",
            "from pegasus07.mtr.labs.mlnx (pegasus07.mtr.labs.mlnx\n\t[10.210.16.112])\n\tby labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x6TCGS4P021429;\n\tMon, 29 Jul 2019 15:16:29 +0300"
        ],
        "From": "Matan Azrad <matan@mellanox.com>",
        "To": "Shahaf Shuler <shahafs@mellanox.com>, Yongseok Koh <yskoh@mellanox.com>, \n\tViacheslav Ovsiienko <viacheslavo@mellanox.com>",
        "Cc": "dev@dpdk.org, Dekel Peled <dekelp@mellanox.com>",
        "Date": "Mon, 29 Jul 2019 11:53:29 +0000",
        "Message-Id": "<1564401209-18752-12-git-send-email-matan@mellanox.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1564401209-18752-1-git-send-email-matan@mellanox.com>",
        "References": "<1564401209-18752-1-git-send-email-matan@mellanox.com>",
        "Subject": "[dpdk-dev] [PATCH 11/11] net/mlx5: allow LRO per Rx queue",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Enabling LRO offload per queue makes sense because the user will\nprobably want to allocate different mempool for LRO queues - the LRO\nmempool mbuf size may be bigger than non LRO mempool.\n\nChange the LRO offload to be per queue instead of per port.\n\nIf one of the queues is with LRO enabled, all the queues will be\nconfigured via DevX.\n\nIf RSS flows direct TCP packets to queues with different LRO enabling,\nthese flows will not be offloaded with LRO.\n\nSigned-off-by: Matan Azrad <matan@mellanox.com>\nAcked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>\n---\n drivers/net/mlx5/mlx5.h          |  3 ---\n drivers/net/mlx5/mlx5_ethdev.c   |  8 +------\n drivers/net/mlx5/mlx5_rxq.c      | 52 +++++++++++++++++++---------------------\n drivers/net/mlx5/mlx5_rxtx.h     |  6 ++---\n drivers/net/mlx5/mlx5_rxtx_vec.c |  4 ++--\n drivers/net/mlx5/mlx5_trigger.c  | 10 +++++---\n 6 files changed, 38 insertions(+), 45 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 5c40091..e812374 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -195,9 +195,6 @@ struct mlx5_hca_attr {\n #define MLX5_LRO_SUPPORTED(dev) \\\n \t(((struct mlx5_priv *)((dev)->data->dev_private))->config.lro.supported)\n \n-#define MLX5_LRO_ENABLED(dev) \\\n-\t((dev)->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO)\n-\n /* LRO configurations structure. */\n struct mlx5_lro_config {\n \tuint32_t supported:1; /* Whether LRO is supported. */\ndiff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c\nindex 9d11831..9629cfb 100644\n--- a/drivers/net/mlx5/mlx5_ethdev.c\n+++ b/drivers/net/mlx5/mlx5_ethdev.c\n@@ -389,7 +389,6 @@ struct ethtool_link_settings {\n \tconst uint8_t use_app_rss_key =\n \t\t!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;\n \tint ret = 0;\n-\tunsigned int lro_on = mlx5_lro_on(dev);\n \n \tif (use_app_rss_key &&\n \t    (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=\n@@ -454,11 +453,6 @@ struct ethtool_link_settings {\n \t\t\t\tj = 0;\n \t\t}\n \t}\n-\tif (lro_on && priv->config.cqe_comp) {\n-\t\t/* CQE compressing is not supported for LRO CQEs. */\n-\t\tDRV_LOG(WARNING, \"Rx CQE compression isn't supported with LRO\");\n-\t\tpriv->config.cqe_comp = 0;\n-\t}\n \tret = mlx5_proc_priv_init(dev);\n \tif (ret)\n \t\treturn ret;\n@@ -571,7 +565,7 @@ struct ethtool_link_settings {\n \tinfo->max_tx_queues = max;\n \tinfo->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES;\n \tinfo->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev);\n-\tinfo->rx_offload_capa = (mlx5_get_rx_port_offloads(dev) |\n+\tinfo->rx_offload_capa = (mlx5_get_rx_port_offloads() |\n \t\t\t\t info->rx_queue_offload_capa);\n \tinfo->tx_offload_capa = mlx5_get_tx_port_offloads(dev);\n \tinfo->if_index = mlx5_ifindex(dev);\ndiff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c\nindex f7e861c..a1fdeef 100644\n--- a/drivers/net/mlx5/mlx5_rxq.c\n+++ b/drivers/net/mlx5/mlx5_rxq.c\n@@ -124,21 +124,6 @@\n }\n \n /**\n- * Check whether LRO is supported and enabled for the device.\n- *\n- * @param dev\n- *   Pointer to Ethernet device.\n- *\n- * @return\n- *   0 if disabled, 1 if enabled.\n- */\n-inline int\n-mlx5_lro_on(struct rte_eth_dev *dev)\n-{\n-\treturn (MLX5_LRO_SUPPORTED(dev) && MLX5_LRO_ENABLED(dev));\n-}\n-\n-/**\n  * Allocate RX queue elements for Multi-Packet RQ.\n  *\n  * @param rxq_ctrl\n@@ -394,6 +379,8 @@\n \t\t\t     DEV_RX_OFFLOAD_TCP_CKSUM);\n \tif (config->hw_vlan_strip)\n \t\toffloads |= DEV_RX_OFFLOAD_VLAN_STRIP;\n+\tif (MLX5_LRO_SUPPORTED(dev))\n+\t\toffloads |= DEV_RX_OFFLOAD_TCP_LRO;\n \treturn offloads;\n }\n \n@@ -401,19 +388,14 @@\n /**\n  * Returns the per-port supported offloads.\n  *\n- * @param dev\n- *   Pointer to Ethernet device.\n- *\n  * @return\n  *   Supported Rx offloads.\n  */\n uint64_t\n-mlx5_get_rx_port_offloads(struct rte_eth_dev *dev)\n+mlx5_get_rx_port_offloads(void)\n {\n \tuint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;\n \n-\tif (MLX5_LRO_SUPPORTED(dev))\n-\t\toffloads |= DEV_RX_OFFLOAD_TCP_LRO;\n \treturn offloads;\n }\n \n@@ -889,7 +871,8 @@\n \tcq_attr.mlx5 = (struct mlx5dv_cq_init_attr){\n \t\t.comp_mask = 0,\n \t};\n-\tif (priv->config.cqe_comp && !rxq_data->hw_timestamp) {\n+\tif (priv->config.cqe_comp && !rxq_data->hw_timestamp &&\n+\t    !rxq_data->lro) {\n \t\tcq_attr.mlx5.comp_mask |=\n \t\t\t\tMLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;\n #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT\n@@ -911,6 +894,10 @@\n \t\t\t\"port %u Rx CQE compression is disabled for HW\"\n \t\t\t\" timestamp\",\n \t\t\tdev->data->port_id);\n+\t} else if (priv->config.cqe_comp && rxq_data->lro) {\n+\t\tDRV_LOG(DEBUG,\n+\t\t\t\"port %u Rx CQE compression is disabled for LRO\",\n+\t\t\tdev->data->port_id);\n \t}\n #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD\n \tif (priv->config.cqe_pad) {\n@@ -1607,6 +1594,7 @@ struct mlx5_rxq_ctrl *\n \t\tdesc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;\n \tuint64_t offloads = conf->offloads |\n \t\t\t   dev->data->dev_conf.rxmode.offloads;\n+\tunsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);\n \tconst int mprq_en = mlx5_check_mprq_support(dev) > 0;\n \tunsigned int max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;\n \tunsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +\n@@ -1646,7 +1634,7 @@ struct mlx5_rxq_ctrl *\n \t * In this case scatter is, for sure, enabled and an empty mbuf may be\n \t * added in the start for the head-room.\n \t */\n-\tif (mlx5_lro_on(dev) && RTE_PKTMBUF_HEADROOM > 0 &&\n+\tif (lro_on_queue && RTE_PKTMBUF_HEADROOM > 0 &&\n \t    non_scatter_min_mbuf_size > mb_len) {\n \t\tstrd_headroom_en = 0;\n \t\tmprq_stride_size = RTE_MIN(max_rx_pkt_len,\n@@ -1693,7 +1681,7 @@ struct mlx5_rxq_ctrl *\n \t\tunsigned int size = non_scatter_min_mbuf_size;\n \t\tunsigned int sges_n;\n \n-\t\tif (mlx5_lro_on(dev) && first_mb_free_size <\n+\t\tif (lro_on_queue && first_mb_free_size <\n \t\t    MLX5_MAX_LRO_HEADER_FIX) {\n \t\t\tDRV_LOG(ERR, \"Not enough space in the first segment(%u)\"\n \t\t\t\t\" to include the max header size(%u) for LRO\",\n@@ -1747,13 +1735,14 @@ struct mlx5_rxq_ctrl *\n \ttmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);\n \t/* By default, FCS (CRC) is stripped by hardware. */\n \ttmpl->rxq.crc_present = 0;\n+\ttmpl->rxq.lro = lro_on_queue;\n \tif (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {\n \t\tif (config->hw_fcs_strip) {\n \t\t\t/*\n \t\t\t * RQs used for LRO-enabled TIRs should not be\n \t\t\t * configured to scatter the FCS.\n \t\t\t */\n-\t\t\tif (mlx5_lro_on(dev))\n+\t\t\tif (lro_on_queue)\n \t\t\t\tDRV_LOG(WARNING,\n \t\t\t\t\t\"port %u CRC stripping has been \"\n \t\t\t\t\t\"disabled but will still be performed \"\n@@ -2204,7 +2193,16 @@ struct mlx5_hrxq *\n \t\t}\n \t} else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */\n \t\tstruct mlx5_devx_tir_attr tir_attr;\n-\n+\t\tuint32_t i;\n+\t\tuint32_t lro = 1;\n+\n+\t\t/* Enable TIR LRO only if all the queues were configured for. */\n+\t\tfor (i = 0; i < queues_n; ++i) {\n+\t\t\tif (!(*priv->rxqs)[queues[i]]->lro) {\n+\t\t\t\tlro = 0;\n+\t\t\t\tbreak;\n+\t\t\t}\n+\t\t}\n \t\tmemset(&tir_attr, 0, sizeof(tir_attr));\n \t\ttir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;\n \t\ttir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;\n@@ -2216,7 +2214,7 @@ struct mlx5_hrxq *\n \t\tif (dev->data->dev_conf.lpbk_mode)\n \t\t\ttir_attr.self_lb_block =\n \t\t\t\t\tMLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;\n-\t\tif (mlx5_lro_on(dev)) {\n+\t\tif (lro) {\n \t\t\ttir_attr.lro_timeout_period_usecs =\n \t\t\t\t\tpriv->config.lro.timeout;\n \t\t\ttir_attr.lro_max_msg_sz = priv->max_lro_msg_size;\ndiff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h\nindex 9b58d0a..c209d99 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.h\n+++ b/drivers/net/mlx5/mlx5_rxtx.h\n@@ -115,7 +115,8 @@ struct mlx5_rxq_data {\n \tunsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */\n \tunsigned int err_state:2; /* enum mlx5_rxq_err_state. */\n \tunsigned int strd_headroom_en:1; /* Enable mbuf headroom in MPRQ. */\n-\tunsigned int :2; /* Remaining bits. */\n+\tunsigned int lro:1; /* Enable LRO. */\n+\tunsigned int :1; /* Remaining bits. */\n \tvolatile uint32_t *rq_db;\n \tvolatile uint32_t *cq_db;\n \tuint16_t port_id;\n@@ -367,9 +368,8 @@ struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,\n int mlx5_hrxq_verify(struct rte_eth_dev *dev);\n struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);\n void mlx5_hrxq_drop_release(struct rte_eth_dev *dev);\n-uint64_t mlx5_get_rx_port_offloads(struct rte_eth_dev *dev);\n+uint64_t mlx5_get_rx_port_offloads(void);\n uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);\n-int mlx5_lro_on(struct rte_eth_dev *dev);\n \n /* mlx5_txq.c */\n \ndiff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c\nindex 3815ff6..3925f4d 100644\n--- a/drivers/net/mlx5/mlx5_rxtx_vec.c\n+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c\n@@ -129,6 +129,8 @@ int __attribute__((cold))\n \t\treturn -ENOTSUP;\n \tif (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0)\n \t\treturn -ENOTSUP;\n+\tif (rxq->lro)\n+\t\treturn -ENOTSUP;\n \treturn 1;\n }\n \n@@ -151,8 +153,6 @@ int __attribute__((cold))\n \t\treturn -ENOTSUP;\n \tif (mlx5_mprq_enabled(dev))\n \t\treturn -ENOTSUP;\n-\tif (mlx5_lro_on(dev))\n-\t\treturn -ENOTSUP;\n \t/* All the configured queues should support. */\n \tfor (i = 0; i < priv->rxqs_n; ++i) {\n \t\tstruct mlx5_rxq_data *rxq = (*priv->rxqs)[i];\ndiff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c\nindex 8bc2174..aa323ad 100644\n--- a/drivers/net/mlx5/mlx5_trigger.c\n+++ b/drivers/net/mlx5/mlx5_trigger.c\n@@ -99,10 +99,14 @@\n \tstruct mlx5_priv *priv = dev->data->dev_private;\n \tunsigned int i;\n \tint ret = 0;\n-\tunsigned int lro_on = mlx5_lro_on(dev);\n-\tenum mlx5_rxq_obj_type obj_type = lro_on ? MLX5_RXQ_OBJ_TYPE_DEVX_RQ :\n-\t\t\t\t\t\t   MLX5_RXQ_OBJ_TYPE_IBV;\n+\tenum mlx5_rxq_obj_type obj_type = MLX5_RXQ_OBJ_TYPE_IBV;\n \n+\tfor (i = 0; i < priv->rxqs_n; ++i) {\n+\t\tif ((*priv->rxqs)[i]->lro) {\n+\t\t\tobj_type =  MLX5_RXQ_OBJ_TYPE_DEVX_RQ;\n+\t\t\tbreak;\n+\t\t}\n+\t}\n \t/* Allocate/reuse/resize mempool for Multi-Packet RQ. */\n \tif (mlx5_mprq_alloc_mp(dev)) {\n \t\t/* Should not release Rx queues but return immediately. */\n",
    "prefixes": [
        "11/11"
    ]
}