get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/139126/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 139126,
    "url": "http://patches.dpdk.org/api/patches/139126/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20240405134831.47996-1-mb@smartsharesystems.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20240405134831.47996-1-mb@smartsharesystems.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20240405134831.47996-1-mb@smartsharesystems.com",
    "date": "2024-04-05T13:48:31",
    "name": "[v4] eal/x86: improve rte_memcpy const size 16 performance",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "800ff41ca305fbd3b03dd22e4a7e25c81ed0fb51",
    "submitter": {
        "id": 591,
        "url": "http://patches.dpdk.org/api/people/591/?format=api",
        "name": "Morten Brørup",
        "email": "mb@smartsharesystems.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20240405134831.47996-1-mb@smartsharesystems.com/mbox/",
    "series": [
        {
            "id": 31688,
            "url": "http://patches.dpdk.org/api/series/31688/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=31688",
            "date": "2024-04-05T13:48:31",
            "name": "[v4] eal/x86: improve rte_memcpy const size 16 performance",
            "version": 4,
            "mbox": "http://patches.dpdk.org/series/31688/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/139126/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/139126/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 0CDCC43E0C;\n\tFri,  5 Apr 2024 15:48:37 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 8F612402CE;\n\tFri,  5 Apr 2024 15:48:36 +0200 (CEST)",
            "from dkmailrelay1.smartsharesystems.com\n (smartserver.smartsharesystems.com [77.243.40.215])\n by mails.dpdk.org (Postfix) with ESMTP id D1D964028B\n for <dev@dpdk.org>; Fri,  5 Apr 2024 15:48:34 +0200 (CEST)",
            "from smartserver.smartsharesystems.com\n (smartserver.smartsharesys.local [192.168.4.10])\n by dkmailrelay1.smartsharesystems.com (Postfix) with ESMTP id AC99320C85;\n Fri,  5 Apr 2024 15:48:33 +0200 (CEST)",
            "from dkrd2.smartsharesys.local ([192.168.4.12]) by\n smartserver.smartsharesystems.com with Microsoft SMTPSVC(6.0.3790.4675);\n Fri, 5 Apr 2024 15:48:33 +0200"
        ],
        "From": "=?utf-8?q?Morten_Br=C3=B8rup?= <mb@smartsharesystems.com>",
        "To": "bruce.richardson@intel.com, konstantin.v.ananyev@yandex.ru,\n stephen@networkplumber.org",
        "Cc": "mattias.ronnblom@ericsson.com, dev@dpdk.org, =?utf-8?q?Morten_Br=C3=B8ru?=\n\t=?utf-8?q?p?= <mb@smartsharesystems.com>",
        "Subject": "[PATCH v4] eal/x86: improve rte_memcpy const size 16 performance",
        "Date": "Fri,  5 Apr 2024 15:48:31 +0200",
        "Message-Id": "<20240405134831.47996-1-mb@smartsharesystems.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20240302234812.9137-1-mb@smartsharesystems.com>",
        "References": "<20240302234812.9137-1-mb@smartsharesystems.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=UTF-8",
        "Content-Transfer-Encoding": "8bit",
        "X-OriginalArrivalTime": "05 Apr 2024 13:48:33.0292 (UTC)\n FILETIME=[F3365CC0:01DA875F]",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "When the rte_memcpy() size is 16, the same 16 bytes are copied twice.\nIn the case where the size is known to be 16 at build tine, omit the\nduplicate copy.\n\nReduced the amount of effectively copy-pasted code by using #ifdef\ninside functions instead of outside functions.\n\nSuggested-by: Stephen Hemminger <stephen@networkplumber.org>\nSigned-off-by: Morten Brørup <mb@smartsharesystems.com>\nAcked-by: Bruce Richardson <bruce.richardson@intel.com>\n---\nv4:\n* There are no problems compiling AVX2, only AVX. (Bruce Richardson)\nv3:\n* AVX2 is a superset of AVX;\n  for a block of AVX code, testing for AVX suffices. (Bruce Richardson)\n* Define RTE_MEMCPY_AVX if AVX is available, to avoid copy-pasting the\n  check for older GCC version. (Bruce Richardson)\nv2:\n* For GCC, version 11 is required for proper AVX handling;\n  if older GCC version, treat AVX as SSE.\n  Clang does not have this issue.\n  Note: Original code always treated AVX as SSE, regardless of compiler.\n* Do not add copyright. (Stephen Hemminger)\n---\n lib/eal/x86/include/rte_memcpy.h | 239 +++++++++----------------------\n 1 file changed, 64 insertions(+), 175 deletions(-)",
    "diff": "diff --git a/lib/eal/x86/include/rte_memcpy.h b/lib/eal/x86/include/rte_memcpy.h\nindex 72a92290e0..d687aa7756 100644\n--- a/lib/eal/x86/include/rte_memcpy.h\n+++ b/lib/eal/x86/include/rte_memcpy.h\n@@ -27,6 +27,16 @@ extern \"C\" {\n #pragma GCC diagnostic ignored \"-Wstringop-overflow\"\n #endif\n \n+/*\n+ * GCC older than version 11 doesn't compile AVX properly, so use SSE instead.\n+ * There are no problems with AVX2.\n+ */\n+#if defined __AVX2__\n+#define RTE_MEMCPY_AVX\n+#elif defined __AVX__ && !(defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 110000))\n+#define RTE_MEMCPY_AVX\n+#endif\n+\n /**\n  * Copy bytes from one location to another. The locations must not overlap.\n  *\n@@ -91,14 +101,6 @@ rte_mov15_or_less(void *dst, const void *src, size_t n)\n \treturn ret;\n }\n \n-#if defined __AVX512F__ && defined RTE_MEMCPY_AVX512\n-\n-#define ALIGNMENT_MASK 0x3F\n-\n-/**\n- * AVX512 implementation below\n- */\n-\n /**\n  * Copy 16 bytes from one location to another,\n  * locations should not overlap.\n@@ -119,10 +121,15 @@ rte_mov16(uint8_t *dst, const uint8_t *src)\n static __rte_always_inline void\n rte_mov32(uint8_t *dst, const uint8_t *src)\n {\n+#if defined RTE_MEMCPY_AVX\n \t__m256i ymm0;\n \n \tymm0 = _mm256_loadu_si256((const __m256i *)src);\n \t_mm256_storeu_si256((__m256i *)dst, ymm0);\n+#else /* SSE implementation */\n+\trte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);\n+\trte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);\n+#endif\n }\n \n /**\n@@ -132,10 +139,15 @@ rte_mov32(uint8_t *dst, const uint8_t *src)\n static __rte_always_inline void\n rte_mov64(uint8_t *dst, const uint8_t *src)\n {\n+#if defined __AVX512F__ && defined RTE_MEMCPY_AVX512\n \t__m512i zmm0;\n \n \tzmm0 = _mm512_loadu_si512((const void *)src);\n \t_mm512_storeu_si512((void *)dst, zmm0);\n+#else /* AVX2, AVX & SSE implementation */\n+\trte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);\n+\trte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);\n+#endif\n }\n \n /**\n@@ -156,12 +168,18 @@ rte_mov128(uint8_t *dst, const uint8_t *src)\n static __rte_always_inline void\n rte_mov256(uint8_t *dst, const uint8_t *src)\n {\n-\trte_mov64(dst + 0 * 64, src + 0 * 64);\n-\trte_mov64(dst + 1 * 64, src + 1 * 64);\n-\trte_mov64(dst + 2 * 64, src + 2 * 64);\n-\trte_mov64(dst + 3 * 64, src + 3 * 64);\n+\trte_mov128(dst + 0 * 128, src + 0 * 128);\n+\trte_mov128(dst + 1 * 128, src + 1 * 128);\n }\n \n+#if defined __AVX512F__ && defined RTE_MEMCPY_AVX512\n+\n+/**\n+ * AVX512 implementation below\n+ */\n+\n+#define ALIGNMENT_MASK 0x3F\n+\n /**\n  * Copy 128-byte blocks from one location to another,\n  * locations should not overlap.\n@@ -231,12 +249,22 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)\n \t/**\n \t * Fast way when copy size doesn't exceed 512 bytes\n \t */\n+\tif (__builtin_constant_p(n) && n == 32) {\n+\t\trte_mov32((uint8_t *)dst, (const uint8_t *)src);\n+\t\treturn ret;\n+\t}\n \tif (n <= 32) {\n \t\trte_mov16((uint8_t *)dst, (const uint8_t *)src);\n+\t\tif (__builtin_constant_p(n) && n == 16)\n+\t\t\treturn ret; /* avoid (harmless) duplicate copy */\n \t\trte_mov16((uint8_t *)dst - 16 + n,\n \t\t\t\t  (const uint8_t *)src - 16 + n);\n \t\treturn ret;\n \t}\n+\tif (__builtin_constant_p(n) && n == 64) {\n+\t\trte_mov64((uint8_t *)dst, (const uint8_t *)src);\n+\t\treturn ret;\n+\t}\n \tif (n <= 64) {\n \t\trte_mov32((uint8_t *)dst, (const uint8_t *)src);\n \t\trte_mov32((uint8_t *)dst - 32 + n,\n@@ -313,80 +341,13 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)\n \tgoto COPY_BLOCK_128_BACK63;\n }\n \n-#elif defined __AVX2__\n-\n-#define ALIGNMENT_MASK 0x1F\n-\n-/**\n- * AVX2 implementation below\n- */\n-\n-/**\n- * Copy 16 bytes from one location to another,\n- * locations should not overlap.\n- */\n-static __rte_always_inline void\n-rte_mov16(uint8_t *dst, const uint8_t *src)\n-{\n-\t__m128i xmm0;\n-\n-\txmm0 = _mm_loadu_si128((const __m128i *)(const void *)src);\n-\t_mm_storeu_si128((__m128i *)(void *)dst, xmm0);\n-}\n-\n-/**\n- * Copy 32 bytes from one location to another,\n- * locations should not overlap.\n- */\n-static __rte_always_inline void\n-rte_mov32(uint8_t *dst, const uint8_t *src)\n-{\n-\t__m256i ymm0;\n-\n-\tymm0 = _mm256_loadu_si256((const __m256i *)(const void *)src);\n-\t_mm256_storeu_si256((__m256i *)(void *)dst, ymm0);\n-}\n+#elif defined RTE_MEMCPY_AVX\n \n /**\n- * Copy 64 bytes from one location to another,\n- * locations should not overlap.\n- */\n-static __rte_always_inline void\n-rte_mov64(uint8_t *dst, const uint8_t *src)\n-{\n-\trte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);\n-\trte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);\n-}\n-\n-/**\n- * Copy 128 bytes from one location to another,\n- * locations should not overlap.\n+ * AVX implementation below\n  */\n-static __rte_always_inline void\n-rte_mov128(uint8_t *dst, const uint8_t *src)\n-{\n-\trte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);\n-\trte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);\n-\trte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);\n-\trte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);\n-}\n \n-/**\n- * Copy 256 bytes from one location to another,\n- * locations should not overlap.\n- */\n-static __rte_always_inline void\n-rte_mov256(uint8_t *dst, const uint8_t *src)\n-{\n-\trte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);\n-\trte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);\n-\trte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);\n-\trte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);\n-\trte_mov32((uint8_t *)dst + 4 * 32, (const uint8_t *)src + 4 * 32);\n-\trte_mov32((uint8_t *)dst + 5 * 32, (const uint8_t *)src + 5 * 32);\n-\trte_mov32((uint8_t *)dst + 6 * 32, (const uint8_t *)src + 6 * 32);\n-\trte_mov32((uint8_t *)dst + 7 * 32, (const uint8_t *)src + 7 * 32);\n-}\n+#define ALIGNMENT_MASK 0x1F\n \n /**\n  * Copy 128-byte blocks from one location to another,\n@@ -437,15 +398,14 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)\n \t/**\n \t * Fast way when copy size doesn't exceed 256 bytes\n \t */\n-\tif (n <= 32) {\n-\t\trte_mov16((uint8_t *)dst, (const uint8_t *)src);\n-\t\trte_mov16((uint8_t *)dst - 16 + n,\n-\t\t\t\t(const uint8_t *)src - 16 + n);\n+\tif (__builtin_constant_p(n) && n == 32) {\n+\t\trte_mov32((uint8_t *)dst, (const uint8_t *)src);\n \t\treturn ret;\n \t}\n-\tif (n <= 48) {\n+\tif (n <= 32) {\n \t\trte_mov16((uint8_t *)dst, (const uint8_t *)src);\n-\t\trte_mov16((uint8_t *)dst + 16, (const uint8_t *)src + 16);\n+\t\tif (__builtin_constant_p(n) && n == 16)\n+\t\t\treturn ret; /* avoid (harmless) duplicate copy */\n \t\trte_mov16((uint8_t *)dst - 16 + n,\n \t\t\t\t(const uint8_t *)src - 16 + n);\n \t\treturn ret;\n@@ -513,90 +473,11 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)\n \n #else /* __AVX512F__ */\n \n-#define ALIGNMENT_MASK 0x0F\n-\n-/**\n- * SSE & AVX implementation below\n- */\n-\n-/**\n- * Copy 16 bytes from one location to another,\n- * locations should not overlap.\n- */\n-static __rte_always_inline void\n-rte_mov16(uint8_t *dst, const uint8_t *src)\n-{\n-\t__m128i xmm0;\n-\n-\txmm0 = _mm_loadu_si128((const __m128i *)(const void *)src);\n-\t_mm_storeu_si128((__m128i *)(void *)dst, xmm0);\n-}\n-\n-/**\n- * Copy 32 bytes from one location to another,\n- * locations should not overlap.\n- */\n-static __rte_always_inline void\n-rte_mov32(uint8_t *dst, const uint8_t *src)\n-{\n-\trte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);\n-\trte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);\n-}\n-\n /**\n- * Copy 64 bytes from one location to another,\n- * locations should not overlap.\n+ * SSE implementation below\n  */\n-static __rte_always_inline void\n-rte_mov64(uint8_t *dst, const uint8_t *src)\n-{\n-\trte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);\n-\trte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);\n-\trte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);\n-\trte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);\n-}\n \n-/**\n- * Copy 128 bytes from one location to another,\n- * locations should not overlap.\n- */\n-static __rte_always_inline void\n-rte_mov128(uint8_t *dst, const uint8_t *src)\n-{\n-\trte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);\n-\trte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);\n-\trte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);\n-\trte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);\n-\trte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);\n-\trte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);\n-\trte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);\n-\trte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);\n-}\n-\n-/**\n- * Copy 256 bytes from one location to another,\n- * locations should not overlap.\n- */\n-static inline void\n-rte_mov256(uint8_t *dst, const uint8_t *src)\n-{\n-\trte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);\n-\trte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);\n-\trte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);\n-\trte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);\n-\trte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);\n-\trte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);\n-\trte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);\n-\trte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);\n-\trte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);\n-\trte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);\n-\trte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);\n-\trte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);\n-\trte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);\n-\trte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);\n-\trte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);\n-\trte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);\n-}\n+#define ALIGNMENT_MASK 0x0F\n \n /**\n  * Macro for copying unaligned block from one location to another with constant load offset,\n@@ -712,17 +593,15 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)\n \t */\n \tif (n <= 32) {\n \t\trte_mov16((uint8_t *)dst, (const uint8_t *)src);\n-\t\trte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);\n-\t\treturn ret;\n-\t}\n-\tif (n <= 48) {\n-\t\trte_mov32((uint8_t *)dst, (const uint8_t *)src);\n+\t\tif (__builtin_constant_p(n) && n == 16)\n+\t\t\treturn ret; /* avoid (harmless) duplicate copy */\n \t\trte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);\n \t\treturn ret;\n \t}\n \tif (n <= 64) {\n \t\trte_mov32((uint8_t *)dst, (const uint8_t *)src);\n-\t\trte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);\n+\t\tif (n > 48)\n+\t\t\trte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);\n \t\trte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);\n \t\treturn ret;\n \t}\n@@ -828,8 +707,14 @@ rte_memcpy_aligned(void *dst, const void *src, size_t n)\n \t}\n \n \t/* Copy 16 <= size <= 32 bytes */\n+\tif (__builtin_constant_p(n) && n == 32) {\n+\t\trte_mov32((uint8_t *)dst, (const uint8_t *)src);\n+\t\treturn ret;\n+\t}\n \tif (n <= 32) {\n \t\trte_mov16((uint8_t *)dst, (const uint8_t *)src);\n+\t\tif (__builtin_constant_p(n) && n == 16)\n+\t\t\treturn ret; /* avoid (harmless) duplicate copy */\n \t\trte_mov16((uint8_t *)dst - 16 + n,\n \t\t\t\t(const uint8_t *)src - 16 + n);\n \n@@ -837,6 +722,10 @@ rte_memcpy_aligned(void *dst, const void *src, size_t n)\n \t}\n \n \t/* Copy 32 < size <= 64 bytes */\n+\tif (__builtin_constant_p(n) && n == 64) {\n+\t\trte_mov64((uint8_t *)dst, (const uint8_t *)src);\n+\t\treturn ret;\n+\t}\n \tif (n <= 64) {\n \t\trte_mov32((uint8_t *)dst, (const uint8_t *)src);\n \t\trte_mov32((uint8_t *)dst - 32 + n,\n",
    "prefixes": [
        "v4"
    ]
}