get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/85796/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 85796,
    "url": "https://patches.dpdk.org/api/patches/85796/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20201228095436.14996-9-talshn@nvidia.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20201228095436.14996-9-talshn@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20201228095436.14996-9-talshn@nvidia.com",
    "date": "2020-12-28T09:54:12",
    "name": "[v5,08/32] net/mlx5: move static_assert calls to global scope",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "1ba59f47ea1cb696d376febb3c2ce23bbf67b66b",
    "submitter": {
        "id": 1893,
        "url": "https://patches.dpdk.org/api/people/1893/?format=api",
        "name": "Tal Shnaiderman",
        "email": "talshn@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "https://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20201228095436.14996-9-talshn@nvidia.com/mbox/",
    "series": [
        {
            "id": 14480,
            "url": "https://patches.dpdk.org/api/series/14480/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=14480",
            "date": "2020-12-28T09:54:04",
            "name": "mlx5 Windows support - part #5",
            "version": 5,
            "mbox": "https://patches.dpdk.org/series/14480/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/85796/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/85796/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 108D0A09FF;\n\tMon, 28 Dec 2020 11:03:52 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 7F43ECC19;\n\tMon, 28 Dec 2020 10:55:44 +0100 (CET)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n by dpdk.org (Postfix) with ESMTP id D27D5CA5A\n for <dev@dpdk.org>; Mon, 28 Dec 2020 10:54:51 +0100 (CET)",
            "from Internal Mail-Server by MTLPINE1 (envelope-from\n talshn@nvidia.com) with SMTP; 28 Dec 2020 11:54:44 +0200",
            "from nvidia.com (l-wincomp04-vm.mtl.labs.mlnx [10.237.1.5])\n by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 0BS9shDZ012171;\n Mon, 28 Dec 2020 11:54:44 +0200"
        ],
        "From": "Tal Shnaiderman <talshn@nvidia.com>",
        "To": "dev@dpdk.org",
        "Cc": "thomas@monjalon.net, matan@nvidia.com, rasland@nvidia.com,\n ophirmu@nvidia.com",
        "Date": "Mon, 28 Dec 2020 11:54:12 +0200",
        "Message-Id": "<20201228095436.14996-9-talshn@nvidia.com>",
        "X-Mailer": "git-send-email 2.16.1.windows.4",
        "In-Reply-To": "<20201228095436.14996-1-talshn@nvidia.com>",
        "References": "<20201213205005.7300-2-talshn@nvidia.com>\n <20201228095436.14996-1-talshn@nvidia.com>",
        "Subject": "[dpdk-dev] [PATCH v5 08/32] net/mlx5: move static_assert calls to\n\tglobal scope",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Ophir Munk <ophirmu@nvidia.com>\n\nSome Windows compilers consider static_assert() as calls to another\nfunction rather than a compiler directive which allows checking type\ninformation at compile time.  This only occurs if the static_assert call\nappears inside another function scope. To solve it move the\nstatic_assert calls to global scope in the files where they are used.\n\nSigned-off-by: Ophir Munk <ophirmu@nvidia.com>\nAcked-by: Matan Azrad <matan@nvidia.com>\n---\n drivers/net/mlx5/mlx5_rxtx.c | 98 ++++++++++++++++++++++----------------------\n drivers/net/mlx5/mlx5_txpp.c |  5 ++-\n 2 files changed, 53 insertions(+), 50 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c\nindex d12d746c2f..65a1f997e9 100644\n--- a/drivers/net/mlx5/mlx5_rxtx.c\n+++ b/drivers/net/mlx5/mlx5_rxtx.c\n@@ -79,6 +79,56 @@ static uint16_t mlx5_tx_burst_##func(void *txq, \\\n \n #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},\n \n+/* static asserts */\n+static_assert(MLX5_CQE_STATUS_HW_OWN < 0, \"Must be negative value\");\n+static_assert(MLX5_CQE_STATUS_SW_OWN < 0, \"Must be negative value\");\n+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==\n+\t\t(sizeof(uint16_t) +\n+\t\t sizeof(rte_v128u32_t)),\n+\t\t\"invalid Ethernet Segment data size\");\n+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==\n+\t\t(sizeof(uint16_t) +\n+\t\t sizeof(struct rte_vlan_hdr) +\n+\t\t 2 * RTE_ETHER_ADDR_LEN),\n+\t\t\"invalid Ethernet Segment data size\");\n+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==\n+\t\t(sizeof(uint16_t) +\n+\t\t sizeof(rte_v128u32_t)),\n+\t\t\"invalid Ethernet Segment data size\");\n+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==\n+\t\t(sizeof(uint16_t) +\n+\t\t sizeof(struct rte_vlan_hdr) +\n+\t\t 2 * RTE_ETHER_ADDR_LEN),\n+\t\t\"invalid Ethernet Segment data size\");\n+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==\n+\t\t(sizeof(uint16_t) +\n+\t\t sizeof(rte_v128u32_t)),\n+\t\t\"invalid Ethernet Segment data size\");\n+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==\n+\t\t(sizeof(uint16_t) +\n+\t\t sizeof(struct rte_vlan_hdr) +\n+\t\t 2 * RTE_ETHER_ADDR_LEN),\n+\t\t\"invalid Ethernet Segment data size\");\n+static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==\n+\t\t(2 * RTE_ETHER_ADDR_LEN),\n+\t\t\"invalid Data Segment data size\");\n+static_assert(MLX5_EMPW_MIN_PACKETS >= 2, \"invalid min size\");\n+static_assert(MLX5_EMPW_MIN_PACKETS >= 2, \"invalid min size\");\n+static_assert((sizeof(struct rte_vlan_hdr) +\n+\t\t\tsizeof(struct rte_ether_hdr)) ==\n+\t\tMLX5_ESEG_MIN_INLINE_SIZE,\n+\t\t\"invalid min inline data size\");\n+static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=\n+\t\tMLX5_DSEG_MAX, \"invalid WQE max size\");\n+static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,\n+\t\t\"invalid WQE Control Segment size\");\n+static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,\n+\t\t\"invalid WQE Ethernet Segment size\");\n+static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,\n+\t\t\"invalid WQE Data Segment size\");\n+static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,\n+\t\t\"invalid WQE size\");\n+\n static __rte_always_inline uint32_t\n rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,\n \t\t\t\t   volatile struct mlx5_mini_cqe8 *mcqe);\n@@ -2070,8 +2120,6 @@ mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,\n \tbool ring_doorbell = false;\n \tint ret;\n \n-\tstatic_assert(MLX5_CQE_STATUS_HW_OWN < 0, \"Must be negative value\");\n-\tstatic_assert(MLX5_CQE_STATUS_SW_OWN < 0, \"Must be negative value\");\n \tdo {\n \t\tvolatile struct mlx5_cqe *cqe;\n \n@@ -2381,15 +2429,6 @@ mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,\n \tes->metadata = MLX5_TXOFF_CONFIG(METADATA) ?\n \t\t       loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?\n \t\t       *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;\n-\tstatic_assert(MLX5_ESEG_MIN_INLINE_SIZE ==\n-\t\t\t\t(sizeof(uint16_t) +\n-\t\t\t\t sizeof(rte_v128u32_t)),\n-\t\t      \"invalid Ethernet Segment data size\");\n-\tstatic_assert(MLX5_ESEG_MIN_INLINE_SIZE ==\n-\t\t\t\t(sizeof(uint16_t) +\n-\t\t\t\t sizeof(struct rte_vlan_hdr) +\n-\t\t\t\t 2 * RTE_ETHER_ADDR_LEN),\n-\t\t      \"invalid Ethernet Segment data size\");\n \tpsrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);\n \tes->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);\n \tes->inline_data = *(unaligned_uint16_t *)psrc;\n@@ -2474,15 +2513,6 @@ mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,\n \tes->metadata = MLX5_TXOFF_CONFIG(METADATA) ?\n \t\t       loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?\n \t\t       *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;\n-\tstatic_assert(MLX5_ESEG_MIN_INLINE_SIZE ==\n-\t\t\t\t(sizeof(uint16_t) +\n-\t\t\t\t sizeof(rte_v128u32_t)),\n-\t\t      \"invalid Ethernet Segment data size\");\n-\tstatic_assert(MLX5_ESEG_MIN_INLINE_SIZE ==\n-\t\t\t\t(sizeof(uint16_t) +\n-\t\t\t\t sizeof(struct rte_vlan_hdr) +\n-\t\t\t\t 2 * RTE_ETHER_ADDR_LEN),\n-\t\t      \"invalid Ethernet Segment data size\");\n \tpsrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);\n \tes->inline_hdr_sz = rte_cpu_to_be_16(inlen);\n \tes->inline_data = *(unaligned_uint16_t *)psrc;\n@@ -2697,15 +2727,6 @@ mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,\n \tes->metadata = MLX5_TXOFF_CONFIG(METADATA) ?\n \t\t       loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?\n \t\t       *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;\n-\tstatic_assert(MLX5_ESEG_MIN_INLINE_SIZE ==\n-\t\t\t\t(sizeof(uint16_t) +\n-\t\t\t\t sizeof(rte_v128u32_t)),\n-\t\t      \"invalid Ethernet Segment data size\");\n-\tstatic_assert(MLX5_ESEG_MIN_INLINE_SIZE ==\n-\t\t\t\t(sizeof(uint16_t) +\n-\t\t\t\t sizeof(struct rte_vlan_hdr) +\n-\t\t\t\t 2 * RTE_ETHER_ADDR_LEN),\n-\t\t      \"invalid Ethernet Segment data size\");\n \tMLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);\n \tpdst = (uint8_t *)&es->inline_data;\n \tif (MLX5_TXOFF_CONFIG(VLAN) && vlan) {\n@@ -2952,9 +2973,6 @@ mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,\n \tuint8_t *pdst;\n \n \tMLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);\n-\tstatic_assert(MLX5_DSEG_MIN_INLINE_SIZE ==\n-\t\t\t\t (2 * RTE_ETHER_ADDR_LEN),\n-\t\t      \"invalid Data Segment data size\");\n \tif (!MLX5_TXOFF_CONFIG(MPW)) {\n \t\t/* Store the descriptor byte counter for eMPW sessions. */\n \t\tdseg->bcount = rte_cpu_to_be_32\n@@ -4070,7 +4088,6 @@ mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,\n \tMLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));\n \tMLX5_ASSERT(loc->elts_free && loc->wqe_free);\n \tMLX5_ASSERT(pkts_n > loc->pkts_sent);\n-\tstatic_assert(MLX5_EMPW_MIN_PACKETS >= 2, \"invalid min size\");\n \tpkts += loc->pkts_sent + 1;\n \tpkts_n -= loc->pkts_sent;\n \tfor (;;) {\n@@ -4247,7 +4264,6 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,\n \tMLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));\n \tMLX5_ASSERT(loc->elts_free && loc->wqe_free);\n \tMLX5_ASSERT(pkts_n > loc->pkts_sent);\n-\tstatic_assert(MLX5_EMPW_MIN_PACKETS >= 2, \"invalid min size\");\n \tpkts += loc->pkts_sent + 1;\n \tpkts_n -= loc->pkts_sent;\n \tfor (;;) {\n@@ -4561,10 +4577,6 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,\n \t\t\t    loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {\n \t\t\t\tvlan = sizeof(struct rte_vlan_hdr);\n \t\t\t\tinlen += vlan;\n-\t\t\t\tstatic_assert((sizeof(struct rte_vlan_hdr) +\n-\t\t\t\t\t       sizeof(struct rte_ether_hdr)) ==\n-\t\t\t\t\t       MLX5_ESEG_MIN_INLINE_SIZE,\n-\t\t\t\t\t       \"invalid min inline data size\");\n \t\t\t}\n \t\t\t/*\n \t\t\t * If inlining is enabled at configuration time\n@@ -5567,16 +5579,6 @@ mlx5_select_tx_function(struct rte_eth_dev *dev)\n \tuint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;\n \tunsigned int diff = 0, olx = 0, i, m;\n \n-\tstatic_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=\n-\t\t      MLX5_DSEG_MAX, \"invalid WQE max size\");\n-\tstatic_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,\n-\t\t      \"invalid WQE Control Segment size\");\n-\tstatic_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,\n-\t\t      \"invalid WQE Ethernet Segment size\");\n-\tstatic_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,\n-\t\t      \"invalid WQE Data Segment size\");\n-\tstatic_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,\n-\t\t      \"invalid WQE size\");\n \tMLX5_ASSERT(priv);\n \tif (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {\n \t\t/* We should support Multi-Segment Packets. */\ndiff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c\nindex a1ec294942..d61e43e55d 100644\n--- a/drivers/net/mlx5/mlx5_txpp.c\n+++ b/drivers/net/mlx5/mlx5_txpp.c\n@@ -18,6 +18,9 @@\n #include \"mlx5_rxtx.h\"\n #include \"mlx5_common_os.h\"\n \n+static_assert(sizeof(struct mlx5_cqe_ts) == sizeof(rte_int128_t),\n+\t\t\"Wrong timestamp CQE part size\");\n+\n static const char * const mlx5_txpp_stat_names[] = {\n \t\"tx_pp_missed_interrupt_errors\", /* Missed service interrupt. */\n \t\"tx_pp_rearm_queue_errors\", /* Rearm Queue errors. */\n@@ -741,8 +744,6 @@ mlx5_txpp_update_timestamp(struct mlx5_dev_ctx_shared *sh)\n \tuint64_t ts;\n \tuint16_t ci;\n \n-\tstatic_assert(sizeof(struct mlx5_cqe_ts) == sizeof(rte_int128_t),\n-\t\t      \"Wrong timestamp CQE part size\");\n \tmlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);\n \tif (to.cts.op_own >> 4) {\n \t\tDRV_LOG(DEBUG, \"Clock Queue error sync lost.\");\n",
    "prefixes": [
        "v5",
        "08/32"
    ]
}