get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/92618/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 92618,
    "url": "https://patches.dpdk.org/api/patches/92618/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/1619896064-7943-2-git-send-email-timothy.mcdaniel@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1619896064-7943-2-git-send-email-timothy.mcdaniel@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1619896064-7943-2-git-send-email-timothy.mcdaniel@intel.com",
    "date": "2021-05-01T19:07:44",
    "name": "[v3,1/1] event/dlb2: optimize Dequeue Operations",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "82df688eb14c1db809e42fc5ddfe0665cdf21049",
    "submitter": {
        "id": 826,
        "url": "https://patches.dpdk.org/api/people/826/?format=api",
        "name": "Timothy McDaniel",
        "email": "timothy.mcdaniel@intel.com"
    },
    "delegate": {
        "id": 310,
        "url": "https://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/1619896064-7943-2-git-send-email-timothy.mcdaniel@intel.com/mbox/",
    "series": [
        {
            "id": 16785,
            "url": "https://patches.dpdk.org/api/series/16785/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=16785",
            "date": "2021-05-01T19:07:43",
            "name": "Optimize DLB2 Dequeue Operations",
            "version": 3,
            "mbox": "https://patches.dpdk.org/series/16785/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/92618/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/92618/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id A6A49A0546;\n\tSat,  1 May 2021 21:09:25 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id F0965410F6;\n\tSat,  1 May 2021 21:09:22 +0200 (CEST)",
            "from mga06.intel.com (mga06.intel.com [134.134.136.31])\n by mails.dpdk.org (Postfix) with ESMTP id D46ED4014F\n for <dev@dpdk.org>; Sat,  1 May 2021 21:09:20 +0200 (CEST)",
            "from orsmga001.jf.intel.com ([10.7.209.18])\n by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 01 May 2021 12:09:20 -0700",
            "from txasoft-yocto.an.intel.com ([10.123.72.192])\n by orsmga001.jf.intel.com with ESMTP; 01 May 2021 12:09:19 -0700"
        ],
        "IronPort-SDR": [
            "\n yZLv1k8aRaeBjQXOMYxtc9PVsMZJvTcT+NKcFJEN3GoxhhfZBjeBPEph9i30Ksco4N7PVoWMXz\n VPzvOxQyp4zw==",
            "\n 3T6CTzIxAxRoY+ofbLJ//7W0WhF000VW49h0ABQ3F+TnnhTCdQAPh/fCpPIZ3Yq4iZJ/ebROaY\n UyRWIwuShXHw=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6200,9189,9971\"; a=\"258779267\"",
            "E=Sophos;i=\"5.82,266,1613462400\"; d=\"scan'208\";a=\"258779267\"",
            "E=Sophos;i=\"5.82,266,1613462400\"; d=\"scan'208\";a=\"467286414\""
        ],
        "X-ExtLoop1": "1",
        "From": "\"McDaniel, Timothy\" <timothy.mcdaniel@intel.com>",
        "To": "",
        "Cc": "dev@dpdk.org, erik.g.carrillo@intel.com, harry.van.haaren@intel.com,\n jerinj@marvell.com, thomas@monjalon.net,\n Timothy McDaniel <timothy.mcdaniel@intel.com>",
        "Date": "Sat,  1 May 2021 14:07:44 -0500",
        "Message-Id": "<1619896064-7943-2-git-send-email-timothy.mcdaniel@intel.com>",
        "X-Mailer": "git-send-email 1.7.10",
        "In-Reply-To": "<1619896064-7943-1-git-send-email-timothy.mcdaniel@intel.com>",
        "References": "<1616000564-16404-1-git-send-email-timothy.mcdaniel@intel.com>\n <1619896064-7943-1-git-send-email-timothy.mcdaniel@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v3 1/1] event/dlb2: optimize Dequeue Operations",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Timothy McDaniel <timothy.mcdaniel@intel.com>\n\nConvert code to use x86 vector instructions, thereby significantly\nimproving dequeue performance.\n\nSigned-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>\nSigned-off-by: Harry Van Haaren <harry.van.haaren@intel.com>\n---\n drivers/event/dlb2/dlb2.c      | 445 +++++++++++++++++++++++++++++----\n drivers/event/dlb2/dlb2_priv.h |  22 +-\n 2 files changed, 414 insertions(+), 53 deletions(-)",
    "diff": "diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c\nindex 818b1c367..c8a50cddf 100644\n--- a/drivers/event/dlb2/dlb2.c\n+++ b/drivers/event/dlb2/dlb2.c\n@@ -375,6 +375,26 @@ set_default_depth_thresh(const char *key __rte_unused,\n \treturn 0;\n }\n \n+static int\n+set_vector_opts_disab(const char *key __rte_unused,\n+\tconst char *value,\n+\tvoid *opaque)\n+{\n+\tbool *dlb2_vector_opts_disabled = opaque;\n+\n+\tif (value == NULL || opaque == NULL) {\n+\t\tDLB2_LOG_ERR(\"NULL pointer\\n\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif ((*value == 'y') || (*value == 'Y'))\n+\t\t*dlb2_vector_opts_disabled = true;\n+\telse\n+\t\t*dlb2_vector_opts_disabled = false;\n+\n+\treturn 0;\n+}\n+\n static int\n set_qid_depth_thresh(const char *key __rte_unused,\n \t\t     const char *value,\n@@ -1240,6 +1260,37 @@ dlb2_event_enqueue_forward_burst_delayed(void *event_port,\n \t\t\t\t\t const struct rte_event events[],\n \t\t\t\t\t uint16_t num);\n \n+/* Generate the required bitmask for rotate-style expected QE gen bits.\n+ * This requires a pattern of 1's and zeros, starting with expected as\n+ * 1 bits, so when hardware writes 0's they're \"new\". This requires the\n+ * ring size to be powers of 2 to wrap correctly.\n+ */\n+static void\n+dlb2_hw_cq_bitmask_init(struct dlb2_port *qm_port, uint32_t cq_depth)\n+{\n+\tuint64_t cq_build_mask = 0;\n+\tuint32_t i;\n+\n+\tif (cq_depth > 64)\n+\t\treturn; /* need to fall back to scalar code */\n+\n+\t/*\n+\t * all 1's in first u64, all zeros in second is correct bit pattern to\n+\t * start. Special casing == 64 easier than adapting complex loop logic.\n+\t */\n+\tif (cq_depth == 64) {\n+\t\tqm_port->cq_rolling_mask = 0;\n+\t\tqm_port->cq_rolling_mask_2 = -1;\n+\t\treturn;\n+\t}\n+\n+\tfor (i = 0; i < 64; i += (cq_depth * 2))\n+\t\tcq_build_mask |= ((1ULL << cq_depth) - 1) << (i + cq_depth);\n+\n+\tqm_port->cq_rolling_mask = cq_build_mask;\n+\tqm_port->cq_rolling_mask_2 = cq_build_mask;\n+}\n+\n static int\n dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,\n \t\t\tstruct dlb2_eventdev_port *ev_port,\n@@ -1357,6 +1408,8 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,\n \t/* starting value of gen bit - it toggles at wrap time */\n \tqm_port->gen_bit = 1;\n \n+\tdlb2_hw_cq_bitmask_init(qm_port, qm_port->cq_depth);\n+\n \tqm_port->int_armed = false;\n \n \t/* Save off for later use in info and lookup APIs. */\n@@ -1408,6 +1461,18 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,\n \t\t\t     dequeue_depth,\n \t\t\t     qm_port->credits);\n \t}\n+\n+\tqm_port->use_scalar = false;\n+\n+#if (!defined RTE_ARCH_X86_64)\n+\tqm_port->use_scalar = true;\n+#else\n+\tif ((qm_port->cq_depth > 64) ||\n+\t    (!rte_is_power_of_2(qm_port->cq_depth)) ||\n+\t    (dlb2->vector_opts_disabled == true))\n+\t\tqm_port->use_scalar = true;\n+#endif\n+\n \trte_spinlock_unlock(&handle->resource_lock);\n \n \treturn 0;\n@@ -1553,6 +1618,7 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,\n \tqm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);\n \t/* starting value of gen bit - it toggles at wrap time */\n \tqm_port->gen_bit = 1;\n+\tdlb2_hw_cq_bitmask_init(qm_port, qm_port->cq_depth);\n \n \tqm_port->int_armed = false;\n \n@@ -1593,6 +1659,16 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,\n \t\t\t     dequeue_depth,\n \t\t\t     credit_high_watermark);\n \t}\n+\n+#if (!defined RTE_ARCH_X86_64)\n+\tqm_port->use_scalar = true;\n+#else\n+\tif ((qm_port->cq_depth > 64) ||\n+\t    (!rte_is_power_of_2(qm_port->cq_depth)) ||\n+\t    (dlb2->vector_opts_disabled == true))\n+\t\tqm_port->use_scalar = true;\n+#endif\n+\n \trte_spinlock_unlock(&handle->resource_lock);\n \n \treturn 0;\n@@ -2987,10 +3063,11 @@ dlb2_event_release(struct dlb2_eventdev *dlb2,\n \t\tint j = 0;\n \n \t\t/* Zero-out QEs */\n-\t\tqm_port->qe4[0].cmd_byte = 0;\n-\t\tqm_port->qe4[1].cmd_byte = 0;\n-\t\tqm_port->qe4[2].cmd_byte = 0;\n-\t\tqm_port->qe4[3].cmd_byte = 0;\n+\t\t_mm_storeu_si128((void *)&qm_port->qe4[0], _mm_setzero_si128());\n+\t\t_mm_storeu_si128((void *)&qm_port->qe4[1], _mm_setzero_si128());\n+\t\t_mm_storeu_si128((void *)&qm_port->qe4[2], _mm_setzero_si128());\n+\t\t_mm_storeu_si128((void *)&qm_port->qe4[3], _mm_setzero_si128());\n+\n \n \t\tfor (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < n; j++) {\n \t\t\tint16_t thresh = qm_port->token_pop_thresh;\n@@ -3020,7 +3097,7 @@ dlb2_event_release(struct dlb2_eventdev *dlb2,\n \n sw_credit_update:\n \t/* each release returns one credit */\n-\tif (!ev_port->outstanding_releases) {\n+\tif (unlikely(!ev_port->outstanding_releases)) {\n \t\tDLB2_LOG_ERR(\"%s: Outstanding releases underflowed.\\n\",\n \t\t\t     __func__);\n \t\treturn;\n@@ -3137,7 +3214,7 @@ dlb2_dequeue_wait(struct dlb2_eventdev *dlb2,\n \treturn 0;\n }\n \n-static inline int\n+static __rte_noinline int\n dlb2_process_dequeue_qes(struct dlb2_eventdev_port *ev_port,\n \t\t\t struct dlb2_port *qm_port,\n \t\t\t struct rte_event *events,\n@@ -3406,8 +3483,7 @@ dlb2_recv_qe_sparse(struct dlb2_port *qm_port, struct dlb2_dequeue_qe *qe)\n \n \tcq_addr = dlb2_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;\n \n-\tidx = qm_port->cq_idx;\n-\n+\tidx = qm_port->cq_idx_unmasked & qm_port->cq_depth_mask;\n \t/* Load the next 4 QEs */\n \taddr[0] = (uintptr_t)&cq_addr[idx];\n \taddr[1] = (uintptr_t)&cq_addr[(idx +  4) & qm_port->cq_depth_mask];\n@@ -3452,6 +3528,272 @@ dlb2_recv_qe_sparse(struct dlb2_port *qm_port, struct dlb2_dequeue_qe *qe)\n \treturn __builtin_popcount(gen_bits);\n }\n \n+static inline void\n+_process_deq_qes_vec_impl(struct dlb2_port *qm_port,\n+\t\t\t  struct rte_event *events,\n+\t\t\t  __m128i v_qe_3,\n+\t\t\t  __m128i v_qe_2,\n+\t\t\t  __m128i v_qe_1,\n+\t\t\t  __m128i v_qe_0,\n+\t\t\t  __m128i v_qe_meta,\n+\t\t\t  __m128i v_qe_status,\n+\t\t\t  uint32_t valid_events)\n+{\n+\t/* Look up the event QIDs, using the hardware QIDs to index the\n+\t * port's QID mapping.\n+\t *\n+\t * Each v_qe_[0-4] is just a 16-byte load of the whole QE. It is\n+\t * passed along in registers as the QE data is required later.\n+\t *\n+\t * v_qe_meta is an u32 unpack of all 4x QEs. Aka, it contains one\n+\t * 32-bit slice of each QE, so makes up a full SSE register. This\n+\t * allows parallel processing of 4x QEs in a single register.\n+\t */\n+\n+\t__m128i v_qid_done = {0};\n+\tint hw_qid0 = _mm_extract_epi8(v_qe_meta, 2);\n+\tint hw_qid1 = _mm_extract_epi8(v_qe_meta, 6);\n+\tint hw_qid2 = _mm_extract_epi8(v_qe_meta, 10);\n+\tint hw_qid3 = _mm_extract_epi8(v_qe_meta, 14);\n+\n+\tint ev_qid0 = qm_port->qid_mappings[hw_qid0];\n+\tint ev_qid1 = qm_port->qid_mappings[hw_qid1];\n+\tint ev_qid2 = qm_port->qid_mappings[hw_qid2];\n+\tint ev_qid3 = qm_port->qid_mappings[hw_qid3];\n+\n+\tv_qid_done = _mm_insert_epi8(v_qid_done, ev_qid0, 2);\n+\tv_qid_done = _mm_insert_epi8(v_qid_done, ev_qid1, 6);\n+\tv_qid_done = _mm_insert_epi8(v_qid_done, ev_qid2, 10);\n+\tv_qid_done = _mm_insert_epi8(v_qid_done, ev_qid3, 14);\n+\n+\t/* Schedule field remapping using byte shuffle\n+\t * - Full byte containing sched field handled here (op, rsvd are zero)\n+\t * - Note sanitizing the register requires two masking ANDs:\n+\t *   1) to strip prio/msg_type from byte for correct shuffle lookup\n+\t *   2) to strip any non-sched-field lanes from any results to OR later\n+\t * - Final byte result is >> 10 to another byte-lane inside the u32.\n+\t *   This makes the final combination OR easier to make the rte_event.\n+\t */\n+\t__m128i v_sched_done;\n+\t__m128i v_sched_bits;\n+\t{\n+\t\tstatic const uint8_t sched_type_map[16] = {\n+\t\t\t[DLB2_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,\n+\t\t\t[DLB2_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,\n+\t\t\t[DLB2_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,\n+\t\t\t[DLB2_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,\n+\t\t};\n+\t\tstatic const uint8_t sched_and_mask[16] = {\n+\t\t\t0x00, 0x00, 0x00, 0x03,\n+\t\t\t0x00, 0x00, 0x00, 0x03,\n+\t\t\t0x00, 0x00, 0x00, 0x03,\n+\t\t\t0x00, 0x00, 0x00, 0x03,\n+\t\t};\n+\t\tconst __m128i v_sched_map = _mm_loadu_si128(\n+\t\t\t\t\t     (const __m128i *)sched_type_map);\n+\t\t__m128i v_sched_mask = _mm_loadu_si128(\n+\t\t\t\t\t     (const __m128i *)&sched_and_mask);\n+\t\tv_sched_bits = _mm_and_si128(v_qe_meta, v_sched_mask);\n+\t\t__m128i v_sched_remapped = _mm_shuffle_epi8(v_sched_map,\n+\t\t\t\t\t\t\t    v_sched_bits);\n+\t\t__m128i v_preshift = _mm_and_si128(v_sched_remapped,\n+\t\t\t\t\t\t   v_sched_mask);\n+\t\tv_sched_done = _mm_srli_epi32(v_preshift, 10);\n+\t}\n+\n+\t/* Priority handling\n+\t * - QE provides 3 bits of priority\n+\t * - Shift << 3 to move to MSBs for byte-prio in rte_event\n+\t * - Mask bits to avoid pollution, leaving only 3 prio MSBs in reg\n+\t */\n+\t__m128i v_prio_done;\n+\t{\n+\t\tstatic const uint8_t prio_mask[16] = {\n+\t\t\t0x00, 0x00, 0x00, 0x07 << 5,\n+\t\t\t0x00, 0x00, 0x00, 0x07 << 5,\n+\t\t\t0x00, 0x00, 0x00, 0x07 << 5,\n+\t\t\t0x00, 0x00, 0x00, 0x07 << 5,\n+\t\t};\n+\t\t__m128i v_prio_mask  = _mm_loadu_si128(\n+\t\t\t\t\t\t(const __m128i *)prio_mask);\n+\t\t__m128i v_prio_shifted = _mm_slli_epi32(v_qe_meta, 3);\n+\t\tv_prio_done = _mm_and_si128(v_prio_shifted, v_prio_mask);\n+\t}\n+\n+\t/* Event Sub/Type handling:\n+\t * we want to keep the lower 12 bits of each QE. Shift up by 20 bits\n+\t * to get the sub/ev type data into rte_event location, clearing the\n+\t * lower 20 bits in the process.\n+\t */\n+\t__m128i v_types_done;\n+\t{\n+\t\tstatic const uint8_t event_mask[16] = {\n+\t\t\t0x0f, 0x00, 0x00, 0x00,\n+\t\t\t0x0f, 0x00, 0x00, 0x00,\n+\t\t\t0x0f, 0x00, 0x00, 0x00,\n+\t\t\t0x0f, 0x00, 0x00, 0x00,\n+\t\t};\n+\t\tstatic const uint8_t sub_event_mask[16] = {\n+\t\t\t0xff, 0x00, 0x00, 0x00,\n+\t\t\t0xff, 0x00, 0x00, 0x00,\n+\t\t\t0xff, 0x00, 0x00, 0x00,\n+\t\t\t0xff, 0x00, 0x00, 0x00,\n+\t\t};\n+\t\tstatic const uint8_t flow_mask[16] = {\n+\t\t\t0xff, 0xff, 0x00, 0x00,\n+\t\t\t0xff, 0xff, 0x00, 0x00,\n+\t\t\t0xff, 0xff, 0x00, 0x00,\n+\t\t\t0xff, 0xff, 0x00, 0x00,\n+\t\t};\n+\t\t__m128i v_event_mask  = _mm_loadu_si128(\n+\t\t\t\t\t(const __m128i *)event_mask);\n+\t\t__m128i v_sub_event_mask  = _mm_loadu_si128(\n+\t\t\t\t\t(const __m128i *)sub_event_mask);\n+\t\t__m128i v_flow_mask  = _mm_loadu_si128(\n+\t\t\t\t       (const __m128i *)flow_mask);\n+\t\t__m128i v_sub = _mm_srli_epi32(v_qe_meta, 8);\n+\t\tv_sub = _mm_and_si128(v_sub, v_sub_event_mask);\n+\t\t__m128i v_type = _mm_and_si128(v_qe_meta, v_event_mask);\n+\t\tv_type = _mm_slli_epi32(v_type, 8);\n+\t\tv_types_done = _mm_or_si128(v_type, v_sub);\n+\t\tv_types_done = _mm_slli_epi32(v_types_done, 20);\n+\t\t__m128i v_flow = _mm_and_si128(v_qe_status, v_flow_mask);\n+\t\tv_types_done = _mm_or_si128(v_types_done, v_flow);\n+\t}\n+\n+\t/* Combine QID, Sched and Prio fields, then Shift >> 8 bits to align\n+\t * with the rte_event, allowing unpacks to move/blend with payload.\n+\t */\n+\t__m128i v_q_s_p_done;\n+\t{\n+\t\t__m128i v_qid_sched = _mm_or_si128(v_qid_done, v_sched_done);\n+\t\t__m128i v_q_s_prio = _mm_or_si128(v_qid_sched, v_prio_done);\n+\t\tv_q_s_p_done = _mm_srli_epi32(v_q_s_prio, 8);\n+\t}\n+\n+\t__m128i v_unpk_ev_23, v_unpk_ev_01, v_ev_2, v_ev_3, v_ev_0, v_ev_1;\n+\n+\t/* Unpack evs into u64 metadata, then indiv events */\n+\tv_unpk_ev_23 = _mm_unpackhi_epi32(v_types_done, v_q_s_p_done);\n+\tv_unpk_ev_01 = _mm_unpacklo_epi32(v_types_done, v_q_s_p_done);\n+\n+\tswitch (valid_events) {\n+\tcase 4:\n+\t\tv_ev_3 = _mm_blend_epi16(v_unpk_ev_23, v_qe_3, 0x0F);\n+\t\tv_ev_3 = _mm_alignr_epi8(v_ev_3, v_ev_3, 8);\n+\t\t_mm_storeu_si128((__m128i *)&events[3], v_ev_3);\n+\t\t/* fallthrough */\n+\tcase 3:\n+\t\tv_ev_2 = _mm_unpacklo_epi64(v_unpk_ev_23, v_qe_2);\n+\t\t_mm_storeu_si128((__m128i *)&events[2], v_ev_2);\n+\t\t/* fallthrough */\n+\tcase 2:\n+\t\tv_ev_1 = _mm_blend_epi16(v_unpk_ev_01, v_qe_1, 0x0F);\n+\t\tv_ev_1 = _mm_alignr_epi8(v_ev_1, v_ev_1, 8);\n+\t\t_mm_storeu_si128((__m128i *)&events[1], v_ev_1);\n+\t\t/* fallthrough */\n+\tcase 1:\n+\t\tv_ev_0 = _mm_unpacklo_epi64(v_unpk_ev_01, v_qe_0);\n+\t\t_mm_storeu_si128((__m128i *)&events[0], v_ev_0);\n+\t}\n+}\n+\n+static __rte_always_inline int\n+dlb2_recv_qe_sparse_vec(struct dlb2_port *qm_port, void *events,\n+\t\t\tuint32_t max_events)\n+{\n+\t/* Using unmasked idx for perf, and masking manually */\n+\tuint16_t idx = qm_port->cq_idx_unmasked;\n+\tvolatile struct dlb2_dequeue_qe *cq_addr;\n+\n+\tcq_addr = dlb2_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;\n+\n+\tuintptr_t qe_ptr_3 = (uintptr_t)&cq_addr[(idx + 12) &\n+\t\t\t\t\t\t qm_port->cq_depth_mask];\n+\tuintptr_t qe_ptr_2 = (uintptr_t)&cq_addr[(idx +  8) &\n+\t\t\t\t\t\t qm_port->cq_depth_mask];\n+\tuintptr_t qe_ptr_1 = (uintptr_t)&cq_addr[(idx +  4) &\n+\t\t\t\t\t\t qm_port->cq_depth_mask];\n+\tuintptr_t qe_ptr_0 = (uintptr_t)&cq_addr[(idx +  0) &\n+\t\t\t\t\t\t qm_port->cq_depth_mask];\n+\n+\t/* Load QEs from CQ: use compiler barriers to avoid load reordering */\n+\t__m128i v_qe_3 = _mm_loadu_si128((const __m128i *)qe_ptr_3);\n+\trte_compiler_barrier();\n+\t__m128i v_qe_2 = _mm_loadu_si128((const __m128i *)qe_ptr_2);\n+\trte_compiler_barrier();\n+\t__m128i v_qe_1 = _mm_loadu_si128((const __m128i *)qe_ptr_1);\n+\trte_compiler_barrier();\n+\t__m128i v_qe_0 = _mm_loadu_si128((const __m128i *)qe_ptr_0);\n+\n+\t/* Generate the pkt_shuffle mask;\n+\t * - Avoids load in otherwise load-heavy section of code\n+\t * - Moves bytes 3,7,11,15 (gen bit bytes) to LSB bytes in XMM\n+\t */\n+\tconst uint32_t stat_shuf_bytes = (15 << 24) | (11 << 16) | (7 << 8) | 3;\n+\t__m128i v_zeros = _mm_setzero_si128();\n+\t__m128i v_ffff = _mm_cmpeq_epi8(v_zeros, v_zeros);\n+\t__m128i v_stat_shuf_mask = _mm_insert_epi32(v_ffff, stat_shuf_bytes, 0);\n+\n+\t/* Extract u32 components required from the QE\n+\t * - QE[64 to 95 ] for metadata (qid, sched, prio, event type, ...)\n+\t * - QE[96 to 127] for status (cq gen bit, error)\n+\t *\n+\t * Note that stage 1 of the unpacking is re-used for both u32 extracts\n+\t */\n+\t__m128i v_qe_02 = _mm_unpackhi_epi32(v_qe_0, v_qe_2);\n+\t__m128i v_qe_13 = _mm_unpackhi_epi32(v_qe_1, v_qe_3);\n+\t__m128i v_qe_status = _mm_unpackhi_epi32(v_qe_02, v_qe_13);\n+\t__m128i v_qe_meta   = _mm_unpacklo_epi32(v_qe_02, v_qe_13);\n+\n+\t/* Status byte (gen_bit, error) handling:\n+\t * - Shuffle to lanes 0,1,2,3, clear all others\n+\t * - Shift right by 7 for gen bit to MSB, movemask to scalar\n+\t * - Shift right by 2 for error bit to MSB, movemask to scalar\n+\t */\n+\t__m128i v_qe_shuffled = _mm_shuffle_epi8(v_qe_status, v_stat_shuf_mask);\n+\t__m128i v_qes_shift_gen_bit = _mm_slli_epi32(v_qe_shuffled, 7);\n+\tint32_t qe_gen_bits = _mm_movemask_epi8(v_qes_shift_gen_bit) & 0xf;\n+\n+\t/* Expected vs Reality of QE Gen bits\n+\t * - cq_rolling_mask provides expected bits\n+\t * - QE loads, unpacks/shuffle and movemask provides reality\n+\t * - XOR of the two gives bitmask of new packets\n+\t * - POPCNT to get the number of new events\n+\t */\n+\tuint64_t rolling = qm_port->cq_rolling_mask & 0xF;\n+\tuint64_t qe_xor_bits = (qe_gen_bits ^ rolling);\n+\tuint32_t count_new = __builtin_popcount(qe_xor_bits);\n+\tcount_new = RTE_MIN(count_new, max_events);\n+\tif (!count_new)\n+\t\treturn 0;\n+\n+\t/* emulate a 128 bit rotate using 2x 64-bit numbers and bit-shifts */\n+\n+\tuint64_t m_rshift = qm_port->cq_rolling_mask >> count_new;\n+\tuint64_t m_lshift = qm_port->cq_rolling_mask << (64 - count_new);\n+\tuint64_t m2_rshift = qm_port->cq_rolling_mask_2 >> count_new;\n+\tuint64_t m2_lshift = qm_port->cq_rolling_mask_2 << (64 - count_new);\n+\n+\t/* shifted out of m2 into MSB of m */\n+\tqm_port->cq_rolling_mask = (m_rshift | m2_lshift);\n+\n+\t/* shifted out of m \"looped back\" into MSB of m2 */\n+\tqm_port->cq_rolling_mask_2 = (m2_rshift | m_lshift);\n+\n+\t/* Prefetch the next QEs - should run as IPC instead of cycles */\n+\trte_prefetch0(&cq_addr[(idx + 16) & qm_port->cq_depth_mask]);\n+\trte_prefetch0(&cq_addr[(idx + 20) & qm_port->cq_depth_mask]);\n+\trte_prefetch0(&cq_addr[(idx + 24) & qm_port->cq_depth_mask]);\n+\trte_prefetch0(&cq_addr[(idx + 28) & qm_port->cq_depth_mask]);\n+\n+\t/* Convert QEs from XMM regs to events and store events directly */\n+\t_process_deq_qes_vec_impl(qm_port, events, v_qe_3, v_qe_2, v_qe_1,\n+\t\t\t\t  v_qe_0, v_qe_meta, v_qe_status, count_new);\n+\n+\treturn count_new;\n+}\n+\n static inline void\n dlb2_inc_cq_idx(struct dlb2_port *qm_port, int cnt)\n {\n@@ -3469,25 +3811,15 @@ dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,\n \t\t       uint16_t max_num,\n \t\t       uint64_t dequeue_timeout_ticks)\n {\n-\tuint64_t timeout;\n \tuint64_t start_ticks = 0ULL;\n \tstruct dlb2_port *qm_port;\n \tint num = 0;\n+\tbool use_scalar;\n+\tuint64_t timeout;\n \n \tqm_port = &ev_port->qm_port;\n+\tuse_scalar = qm_port->use_scalar;\n \n-\t/* We have a special implementation for waiting. Wait can be:\n-\t * 1) no waiting at all\n-\t * 2) busy poll only\n-\t * 3) wait for interrupt. If wakeup and poll time\n-\t * has expired, then return to caller\n-\t * 4) umonitor/umwait repeatedly up to poll time\n-\t */\n-\n-\t/* If configured for per dequeue wait, then use wait value provided\n-\t * to this API. Otherwise we must use the global\n-\t * value from eventdev config time.\n-\t */\n \tif (!dlb2->global_dequeue_wait)\n \t\ttimeout = dequeue_timeout_ticks;\n \telse\n@@ -3495,35 +3827,41 @@ dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,\n \n \tstart_ticks = rte_get_timer_cycles();\n \n+\tuse_scalar = use_scalar || (max_num & 0x3);\n+\n \twhile (num < max_num) {\n \t\tstruct dlb2_dequeue_qe qes[DLB2_NUM_QES_PER_CACHE_LINE];\n \t\tint num_avail;\n-\n-\t\t/* Copy up to 4 QEs from the current cache line into qes */\n-\t\tnum_avail = dlb2_recv_qe_sparse(qm_port, qes);\n-\n-\t\t/* But don't process more than the user requested */\n-\t\tnum_avail = RTE_MIN(num_avail, max_num - num);\n-\n-\t\tdlb2_inc_cq_idx(qm_port, num_avail << 2);\n-\n-\t\tif (num_avail == DLB2_NUM_QES_PER_CACHE_LINE)\n-\t\t\tnum += dlb2_process_dequeue_four_qes(ev_port,\n-\t\t\t\t\t\t\t      qm_port,\n-\t\t\t\t\t\t\t      &events[num],\n-\t\t\t\t\t\t\t      &qes[0]);\n-\t\telse if (num_avail)\n-\t\t\tnum += dlb2_process_dequeue_qes(ev_port,\n-\t\t\t\t\t\t\t qm_port,\n-\t\t\t\t\t\t\t &events[num],\n-\t\t\t\t\t\t\t &qes[0],\n-\t\t\t\t\t\t\t num_avail);\n-\t\telse if ((timeout == 0) || (num > 0))\n-\t\t\t/* Not waiting in any form, or 1+ events received? */\n-\t\t\tbreak;\n-\t\telse if (dlb2_dequeue_wait(dlb2, ev_port, qm_port,\n-\t\t\t\t\t   timeout, start_ticks))\n-\t\t\tbreak;\n+\t\tif (use_scalar) {\n+\t\t\tnum_avail = dlb2_recv_qe_sparse(qm_port, qes);\n+\t\t\tnum_avail = RTE_MIN(num_avail, max_num - num);\n+\t\t\tdlb2_inc_cq_idx(qm_port, num_avail << 2);\n+\t\t\tif (num_avail == DLB2_NUM_QES_PER_CACHE_LINE)\n+\t\t\t\tnum += dlb2_process_dequeue_four_qes(ev_port,\n+\t\t\t\t\t\t\t\t  qm_port,\n+\t\t\t\t\t\t\t\t  &events[num],\n+\t\t\t\t\t\t\t\t  &qes[0]);\n+\t\t\telse if (num_avail)\n+\t\t\t\tnum += dlb2_process_dequeue_qes(ev_port,\n+\t\t\t\t\t\t\t\tqm_port,\n+\t\t\t\t\t\t\t\t&events[num],\n+\t\t\t\t\t\t\t\t&qes[0],\n+\t\t\t\t\t\t\t\tnum_avail);\n+\t\t} else { /* !use_scalar */\n+\t\t\tnum_avail = dlb2_recv_qe_sparse_vec(qm_port,\n+\t\t\t\t\t\t\t    &events[num],\n+\t\t\t\t\t\t\t    max_num - num);\n+\t\t\tnum += num_avail;\n+\t\t\tdlb2_inc_cq_idx(qm_port, num_avail << 2);\n+\t\t\tDLB2_INC_STAT(ev_port->stats.traffic.rx_ok, num_avail);\n+\t\t}\n+\t\tif (!num_avail) {\n+\t\t\tif (num > 0)\n+\t\t\t\tbreak;\n+\t\t\telse if (dlb2_dequeue_wait(dlb2, ev_port, qm_port,\n+\t\t\t\t\t\t   timeout, start_ticks))\n+\t\t\t\tbreak;\n+\t\t}\n \t}\n \n \tqm_port->owed_tokens += num;\n@@ -4083,6 +4421,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev,\n \tdlb2->poll_interval = dlb2_args->poll_interval;\n \tdlb2->sw_credit_quanta = dlb2_args->sw_credit_quanta;\n \tdlb2->default_depth_thresh = dlb2_args->default_depth_thresh;\n+\tdlb2->vector_opts_disabled = dlb2_args->vector_opts_disabled;\n \n \terr = dlb2_iface_open(&dlb2->qm_instance, name);\n \tif (err < 0) {\n@@ -4186,6 +4525,7 @@ dlb2_parse_params(const char *params,\n \t\t\t\t\t     DLB2_POLL_INTERVAL_ARG,\n \t\t\t\t\t     DLB2_SW_CREDIT_QUANTA_ARG,\n \t\t\t\t\t     DLB2_DEPTH_THRESH_ARG,\n+\t\t\t\t\t     DLB2_VECTOR_OPTS_DISAB_ARG,\n \t\t\t\t\t     NULL };\n \n \tif (params != NULL && params[0] != '\\0') {\n@@ -4299,6 +4639,17 @@ dlb2_parse_params(const char *params,\n \t\t\t\treturn ret;\n \t\t\t}\n \n+\t\t\tret = rte_kvargs_process(kvlist,\n+\t\t\t\t\tDLB2_VECTOR_OPTS_DISAB_ARG,\n+\t\t\t\t\tset_vector_opts_disab,\n+\t\t\t\t\t&dlb2_args->vector_opts_disabled);\n+\t\t\tif (ret != 0) {\n+\t\t\t\tDLB2_LOG_ERR(\"%s: Error parsing vector opts disabled\",\n+\t\t\t\t\t     name);\n+\t\t\t\trte_kvargs_free(kvlist);\n+\t\t\t\treturn ret;\n+\t\t\t}\n+\n \t\t\trte_kvargs_free(kvlist);\n \t\t}\n \t}\ndiff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h\nindex cf120c92d..3140764a5 100644\n--- a/drivers/event/dlb2/dlb2_priv.h\n+++ b/drivers/event/dlb2/dlb2_priv.h\n@@ -38,6 +38,7 @@\n #define DLB2_POLL_INTERVAL_ARG \"poll_interval\"\n #define DLB2_SW_CREDIT_QUANTA_ARG \"sw_credit_quanta\"\n #define DLB2_DEPTH_THRESH_ARG \"default_depth_thresh\"\n+#define DLB2_VECTOR_OPTS_DISAB_ARG \"vector_opts_disable\"\n \n /* Begin HW related defines and structs */\n \n@@ -205,9 +206,9 @@ enum dlb2_enqueue_type {\n /* hw-specific format - do not change */\n \n struct dlb2_event_type {\n-\tuint8_t major:4;\n-\tuint8_t unused:4;\n-\tuint8_t sub;\n+\tuint16_t major:4;\n+\tuint16_t unused:4;\n+\tuint16_t sub:8;\n };\n \n union dlb2_opaque_data {\n@@ -351,6 +352,12 @@ struct dlb2_port {\n \tuint16_t cq_idx_unmasked;\n \tuint16_t cq_depth_mask;\n \tuint16_t gen_bit_shift;\n+\tuint64_t cq_rolling_mask; /*\n+\t\t\t\t   * rotate to always have right expected\n+\t\t\t\t   * gen bits\n+\t\t\t\t   */\n+\tuint64_t cq_rolling_mask_2;\n+\tvoid *cq_addr_cached; /* avoid multiple refs */\n \tenum dlb2_port_state state;\n \tenum dlb2_configuration_state config_state;\n \tint num_mapped_qids;\n@@ -360,6 +367,7 @@ struct dlb2_port {\n \tstruct dlb2_cq_pop_qe *consume_qe;\n \tstruct dlb2_eventdev *dlb2; /* back ptr */\n \tstruct dlb2_eventdev_port *ev_port; /* back ptr */\n+\tbool use_scalar; /* force usage of scalar code */\n };\n \n /* Per-process per-port mmio and memory pointers */\n@@ -513,9 +521,9 @@ struct dlb2_queue {\n \tuint32_t num_qid_inflights; /* User config */\n \tuint32_t num_atm_inflights; /* User config */\n \tenum dlb2_configuration_state config_state;\n-\tint sched_type; /* LB queue only */\n-\tuint32_t id;\n-\tbool is_directed;\n+\tint  sched_type; /* LB queue only */\n+\tuint8_t id;\n+\tbool\t is_directed;\n };\n \n struct dlb2_eventdev_queue {\n@@ -558,6 +566,7 @@ struct dlb2_eventdev {\n \tuint32_t new_event_limit;\n \tint max_num_events_override;\n \tint num_dir_credits_override;\n+\tbool vector_opts_disabled;\n \tvolatile enum dlb2_run_state run_state;\n \tuint16_t num_dir_queues; /* total num of evdev dir queues requested */\n \tunion {\n@@ -617,6 +626,7 @@ struct dlb2_devargs {\n \tint poll_interval;\n \tint sw_credit_quanta;\n \tint default_depth_thresh;\n+\tbool vector_opts_disabled;\n };\n \n /* End Eventdev related defines and structs */\n",
    "prefixes": [
        "v3",
        "1/1"
    ]
}