[v8] eal/x86: improve rte_memcpy const size 16 performance

Message ID 20240530154100.25811-1-mb@smartsharesystems.com (mailing list archive)
State Superseded
Delegated to: David Marchand
Headers
Series [v8] eal/x86: improve rte_memcpy const size 16 performance |

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/loongarch-compilation success Compilation OK
ci/loongarch-unit-testing success Unit Testing PASS
ci/github-robot: build success github build: passed
ci/iol-testing fail build patch failure
ci/Intel-compilation fail Compilation issues

Commit Message

Morten Brørup May 30, 2024, 3:41 p.m. UTC
  When the rte_memcpy() size is 16, the same 16 bytes are copied twice.
In the case where the size is known to be 16 at build time, omit the
duplicate copy.

Reduced the amount of effectively copy-pasted code by using #ifdef
inside functions instead of outside functions.

Suggested-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
Depends-on: series-31578 ("provide toolchain abstracted __builtin_constant_p")

v8:
* Keep trying to fix that CI does not understand the dependency...
  Depend on series instead of patch. Github only understands series.
* Fix typo in patch description.
v7:
* Keep trying to fix that CI does not understand the dependency...
  Depend on patch instead of series.
  Move dependency out of the patch description itself, and down to the
  version log.
v6:
* Trying to fix CI not understanding dependency...
  Don't wrap dependency line.
v5:
* Fix for building with MSVC:
  Use __rte_constant() instead of __builtin_constant_p().
  Add dependency on patch providing __rte_constant().
v4:
* There are no problems compiling AVX2, only AVX. (Bruce Richardson)
v3:
* AVX2 is a superset of AVX;
  for a block of AVX code, testing for AVX suffices. (Bruce Richardson)
* Define RTE_MEMCPY_AVX if AVX is available, to avoid copy-pasting the
  check for older GCC version. (Bruce Richardson)
v2:
* For GCC, version 11 is required for proper AVX handling;
  if older GCC version, treat AVX as SSE.
  Clang does not have this issue.
  Note: Original code always treated AVX as SSE, regardless of compiler.
* Do not add copyright. (Stephen Hemminger)
---
 lib/eal/x86/include/rte_memcpy.h | 239 +++++++++----------------------
 1 file changed, 64 insertions(+), 175 deletions(-)
  

Comments

Morten Brørup June 10, 2024, 9:05 a.m. UTC | #1
PING for review.

The CI failures can be ignored: Most of the CI doesn't support the Depends-on tag, and this patch uses __rte_constant(), provided by Tyler's patch series [1].

[1]: https://inbox.dpdk.org/dev/1710970416-27841-1-git-send-email-roretzla@linux.microsoft.com/

-Morten

> From: Morten Brørup [mailto:mb@smartsharesystems.com]
> Sent: Thursday, 30 May 2024 17.41
> 
> When the rte_memcpy() size is 16, the same 16 bytes are copied twice.
> In the case where the size is known to be 16 at build time, omit the
> duplicate copy.
> 
> Reduced the amount of effectively copy-pasted code by using #ifdef
> inside functions instead of outside functions.
> 
> Suggested-by: Stephen Hemminger <stephen@networkplumber.org>
> Signed-off-by: Morten Brørup <mb@smartsharesystems.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> ---
> Depends-on: series-31578 ("provide toolchain abstracted __builtin_constant_p")
> 
> v8:
> * Keep trying to fix that CI does not understand the dependency...
>   Depend on series instead of patch. Github only understands series.
> * Fix typo in patch description.
> v7:
> * Keep trying to fix that CI does not understand the dependency...
>   Depend on patch instead of series.
>   Move dependency out of the patch description itself, and down to the
>   version log.
> v6:
> * Trying to fix CI not understanding dependency...
>   Don't wrap dependency line.
> v5:
> * Fix for building with MSVC:
>   Use __rte_constant() instead of __builtin_constant_p().
>   Add dependency on patch providing __rte_constant().
> v4:
> * There are no problems compiling AVX2, only AVX. (Bruce Richardson)
> v3:
> * AVX2 is a superset of AVX;
>   for a block of AVX code, testing for AVX suffices. (Bruce Richardson)
> * Define RTE_MEMCPY_AVX if AVX is available, to avoid copy-pasting the
>   check for older GCC version. (Bruce Richardson)
> v2:
> * For GCC, version 11 is required for proper AVX handling;
>   if older GCC version, treat AVX as SSE.
>   Clang does not have this issue.
>   Note: Original code always treated AVX as SSE, regardless of compiler.
> * Do not add copyright. (Stephen Hemminger)
> ---
>  lib/eal/x86/include/rte_memcpy.h | 239 +++++++++----------------------
>  1 file changed, 64 insertions(+), 175 deletions(-)
> 
> diff --git a/lib/eal/x86/include/rte_memcpy.h
> b/lib/eal/x86/include/rte_memcpy.h
> index 72a92290e0..1619a8f296 100644
> --- a/lib/eal/x86/include/rte_memcpy.h
> +++ b/lib/eal/x86/include/rte_memcpy.h
> @@ -27,6 +27,16 @@ extern "C" {
>  #pragma GCC diagnostic ignored "-Wstringop-overflow"
>  #endif
> 
> +/*
> + * GCC older than version 11 doesn't compile AVX properly, so use SSE
> instead.
> + * There are no problems with AVX2.
> + */
> +#if defined __AVX2__
> +#define RTE_MEMCPY_AVX
> +#elif defined __AVX__ && !(defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION <
> 110000))
> +#define RTE_MEMCPY_AVX
> +#endif
> +
>  /**
>   * Copy bytes from one location to another. The locations must not overlap.
>   *
> @@ -91,14 +101,6 @@ rte_mov15_or_less(void *dst, const void *src, size_t n)
>  	return ret;
>  }
> 
> -#if defined __AVX512F__ && defined RTE_MEMCPY_AVX512
> -
> -#define ALIGNMENT_MASK 0x3F
> -
> -/**
> - * AVX512 implementation below
> - */
> -
>  /**
>   * Copy 16 bytes from one location to another,
>   * locations should not overlap.
> @@ -119,10 +121,15 @@ rte_mov16(uint8_t *dst, const uint8_t *src)
>  static __rte_always_inline void
>  rte_mov32(uint8_t *dst, const uint8_t *src)
>  {
> +#if defined RTE_MEMCPY_AVX
>  	__m256i ymm0;
> 
>  	ymm0 = _mm256_loadu_si256((const __m256i *)src);
>  	_mm256_storeu_si256((__m256i *)dst, ymm0);
> +#else /* SSE implementation */
> +	rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> +	rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> +#endif
>  }
> 
>  /**
> @@ -132,10 +139,15 @@ rte_mov32(uint8_t *dst, const uint8_t *src)
>  static __rte_always_inline void
>  rte_mov64(uint8_t *dst, const uint8_t *src)
>  {
> +#if defined __AVX512F__ && defined RTE_MEMCPY_AVX512
>  	__m512i zmm0;
> 
>  	zmm0 = _mm512_loadu_si512((const void *)src);
>  	_mm512_storeu_si512((void *)dst, zmm0);
> +#else /* AVX2, AVX & SSE implementation */
> +	rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
> +	rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
> +#endif
>  }
> 
>  /**
> @@ -156,12 +168,18 @@ rte_mov128(uint8_t *dst, const uint8_t *src)
>  static __rte_always_inline void
>  rte_mov256(uint8_t *dst, const uint8_t *src)
>  {
> -	rte_mov64(dst + 0 * 64, src + 0 * 64);
> -	rte_mov64(dst + 1 * 64, src + 1 * 64);
> -	rte_mov64(dst + 2 * 64, src + 2 * 64);
> -	rte_mov64(dst + 3 * 64, src + 3 * 64);
> +	rte_mov128(dst + 0 * 128, src + 0 * 128);
> +	rte_mov128(dst + 1 * 128, src + 1 * 128);
>  }
> 
> +#if defined __AVX512F__ && defined RTE_MEMCPY_AVX512
> +
> +/**
> + * AVX512 implementation below
> + */
> +
> +#define ALIGNMENT_MASK 0x3F
> +
>  /**
>   * Copy 128-byte blocks from one location to another,
>   * locations should not overlap.
> @@ -231,12 +249,22 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
>  	/**
>  	 * Fast way when copy size doesn't exceed 512 bytes
>  	 */
> +	if (__rte_constant(n) && n == 32) {
> +		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> +		return ret;
> +	}
>  	if (n <= 32) {
>  		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> +		if (__rte_constant(n) && n == 16)
> +			return ret; /* avoid (harmless) duplicate copy */
>  		rte_mov16((uint8_t *)dst - 16 + n,
>  				  (const uint8_t *)src - 16 + n);
>  		return ret;
>  	}
> +	if (__rte_constant(n) && n == 64) {
> +		rte_mov64((uint8_t *)dst, (const uint8_t *)src);
> +		return ret;
> +	}
>  	if (n <= 64) {
>  		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
>  		rte_mov32((uint8_t *)dst - 32 + n,
> @@ -313,80 +341,13 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
>  	goto COPY_BLOCK_128_BACK63;
>  }
> 
> -#elif defined __AVX2__
> -
> -#define ALIGNMENT_MASK 0x1F
> -
> -/**
> - * AVX2 implementation below
> - */
> -
> -/**
> - * Copy 16 bytes from one location to another,
> - * locations should not overlap.
> - */
> -static __rte_always_inline void
> -rte_mov16(uint8_t *dst, const uint8_t *src)
> -{
> -	__m128i xmm0;
> -
> -	xmm0 = _mm_loadu_si128((const __m128i *)(const void *)src);
> -	_mm_storeu_si128((__m128i *)(void *)dst, xmm0);
> -}
> -
> -/**
> - * Copy 32 bytes from one location to another,
> - * locations should not overlap.
> - */
> -static __rte_always_inline void
> -rte_mov32(uint8_t *dst, const uint8_t *src)
> -{
> -	__m256i ymm0;
> -
> -	ymm0 = _mm256_loadu_si256((const __m256i *)(const void *)src);
> -	_mm256_storeu_si256((__m256i *)(void *)dst, ymm0);
> -}
> +#elif defined RTE_MEMCPY_AVX
> 
>  /**
> - * Copy 64 bytes from one location to another,
> - * locations should not overlap.
> - */
> -static __rte_always_inline void
> -rte_mov64(uint8_t *dst, const uint8_t *src)
> -{
> -	rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
> -	rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
> -}
> -
> -/**
> - * Copy 128 bytes from one location to another,
> - * locations should not overlap.
> + * AVX implementation below
>   */
> -static __rte_always_inline void
> -rte_mov128(uint8_t *dst, const uint8_t *src)
> -{
> -	rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
> -	rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
> -	rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
> -	rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
> -}
> 
> -/**
> - * Copy 256 bytes from one location to another,
> - * locations should not overlap.
> - */
> -static __rte_always_inline void
> -rte_mov256(uint8_t *dst, const uint8_t *src)
> -{
> -	rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
> -	rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
> -	rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
> -	rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
> -	rte_mov32((uint8_t *)dst + 4 * 32, (const uint8_t *)src + 4 * 32);
> -	rte_mov32((uint8_t *)dst + 5 * 32, (const uint8_t *)src + 5 * 32);
> -	rte_mov32((uint8_t *)dst + 6 * 32, (const uint8_t *)src + 6 * 32);
> -	rte_mov32((uint8_t *)dst + 7 * 32, (const uint8_t *)src + 7 * 32);
> -}
> +#define ALIGNMENT_MASK 0x1F
> 
>  /**
>   * Copy 128-byte blocks from one location to another,
> @@ -437,15 +398,14 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
>  	/**
>  	 * Fast way when copy size doesn't exceed 256 bytes
>  	 */
> -	if (n <= 32) {
> -		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> -		rte_mov16((uint8_t *)dst - 16 + n,
> -				(const uint8_t *)src - 16 + n);
> +	if (__rte_constant(n) && n == 32) {
> +		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
>  		return ret;
>  	}
> -	if (n <= 48) {
> +	if (n <= 32) {
>  		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> -		rte_mov16((uint8_t *)dst + 16, (const uint8_t *)src + 16);
> +		if (__rte_constant(n) && n == 16)
> +			return ret; /* avoid (harmless) duplicate copy */
>  		rte_mov16((uint8_t *)dst - 16 + n,
>  				(const uint8_t *)src - 16 + n);
>  		return ret;
> @@ -513,90 +473,11 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
> 
>  #else /* __AVX512F__ */
> 
> -#define ALIGNMENT_MASK 0x0F
> -
> -/**
> - * SSE & AVX implementation below
> - */
> -
> -/**
> - * Copy 16 bytes from one location to another,
> - * locations should not overlap.
> - */
> -static __rte_always_inline void
> -rte_mov16(uint8_t *dst, const uint8_t *src)
> -{
> -	__m128i xmm0;
> -
> -	xmm0 = _mm_loadu_si128((const __m128i *)(const void *)src);
> -	_mm_storeu_si128((__m128i *)(void *)dst, xmm0);
> -}
> -
> -/**
> - * Copy 32 bytes from one location to another,
> - * locations should not overlap.
> - */
> -static __rte_always_inline void
> -rte_mov32(uint8_t *dst, const uint8_t *src)
> -{
> -	rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> -	rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> -}
> -
>  /**
> - * Copy 64 bytes from one location to another,
> - * locations should not overlap.
> + * SSE implementation below
>   */
> -static __rte_always_inline void
> -rte_mov64(uint8_t *dst, const uint8_t *src)
> -{
> -	rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> -	rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> -	rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
> -	rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
> -}
> 
> -/**
> - * Copy 128 bytes from one location to another,
> - * locations should not overlap.
> - */
> -static __rte_always_inline void
> -rte_mov128(uint8_t *dst, const uint8_t *src)
> -{
> -	rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> -	rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> -	rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
> -	rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
> -	rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
> -	rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
> -	rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
> -	rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
> -}
> -
> -/**
> - * Copy 256 bytes from one location to another,
> - * locations should not overlap.
> - */
> -static inline void
> -rte_mov256(uint8_t *dst, const uint8_t *src)
> -{
> -	rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> -	rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> -	rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
> -	rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
> -	rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
> -	rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
> -	rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
> -	rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
> -	rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
> -	rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
> -	rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
> -	rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
> -	rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
> -	rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
> -	rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
> -	rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
> -}
> +#define ALIGNMENT_MASK 0x0F
> 
>  /**
>   * Macro for copying unaligned block from one location to another with
> constant load offset,
> @@ -712,17 +593,15 @@ rte_memcpy_generic(void *dst, const void *src, size_t n)
>  	 */
>  	if (n <= 32) {
>  		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> -		rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 +
> n);
> -		return ret;
> -	}
> -	if (n <= 48) {
> -		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> +		if (__rte_constant(n) && n == 16)
> +			return ret; /* avoid (harmless) duplicate copy */
>  		rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 +
> n);
>  		return ret;
>  	}
>  	if (n <= 64) {
>  		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> -		rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
> +		if (n > 48)
> +			rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
>  		rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 +
> n);
>  		return ret;
>  	}
> @@ -828,8 +707,14 @@ rte_memcpy_aligned(void *dst, const void *src, size_t n)
>  	}
> 
>  	/* Copy 16 <= size <= 32 bytes */
> +	if (__rte_constant(n) && n == 32) {
> +		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> +		return ret;
> +	}
>  	if (n <= 32) {
>  		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> +		if (__rte_constant(n) && n == 16)
> +			return ret; /* avoid (harmless) duplicate copy */
>  		rte_mov16((uint8_t *)dst - 16 + n,
>  				(const uint8_t *)src - 16 + n);
> 
> @@ -837,6 +722,10 @@ rte_memcpy_aligned(void *dst, const void *src, size_t n)
>  	}
> 
>  	/* Copy 32 < size <= 64 bytes */
> +	if (__rte_constant(n) && n == 64) {
> +		rte_mov64((uint8_t *)dst, (const uint8_t *)src);
> +		return ret;
> +	}
>  	if (n <= 64) {
>  		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
>  		rte_mov32((uint8_t *)dst - 32 + n,
> --
> 2.17.1
  
Konstantin Ananyev June 10, 2024, 1:40 p.m. UTC | #2
> When the rte_memcpy() size is 16, the same 16 bytes are copied twice.
> In the case where the size is known to be 16 at build time, omit the
> duplicate copy.
> 
> Reduced the amount of effectively copy-pasted code by using #ifdef
> inside functions instead of outside functions.
> 
> Suggested-by: Stephen Hemminger <stephen@networkplumber.org>
> Signed-off-by: Morten Brørup <mb@smartsharesystems.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> ---
> Depends-on: series-31578 ("provide toolchain abstracted __builtin_constant_p")
> 
> v8:
> * Keep trying to fix that CI does not understand the dependency...
>   Depend on series instead of patch. Github only understands series.
> * Fix typo in patch description.
> v7:
> * Keep trying to fix that CI does not understand the dependency...
>   Depend on patch instead of series.
>   Move dependency out of the patch description itself, and down to the
>   version log.
> v6:
> * Trying to fix CI not understanding dependency...
>   Don't wrap dependency line.
> v5:
> * Fix for building with MSVC:
>   Use __rte_constant() instead of __builtin_constant_p().
>   Add dependency on patch providing __rte_constant().
> v4:
> * There are no problems compiling AVX2, only AVX. (Bruce Richardson)
> v3:
> * AVX2 is a superset of AVX;
>   for a block of AVX code, testing for AVX suffices. (Bruce Richardson)
> * Define RTE_MEMCPY_AVX if AVX is available, to avoid copy-pasting the
>   check for older GCC version. (Bruce Richardson)
> v2:
> * For GCC, version 11 is required for proper AVX handling;
>   if older GCC version, treat AVX as SSE.
>   Clang does not have this issue.
>   Note: Original code always treated AVX as SSE, regardless of compiler.
> * Do not add copyright. (Stephen Hemminger)

Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>

The code change itself -  LGTM.
Out of interest - do you expect any perf diff with these changes?
On my box I didn’t see any with 'memcpy_perf_autotest'.
Konstantin
  
Morten Brørup June 10, 2024, 1:59 p.m. UTC | #3
> From: Konstantin Ananyev [mailto:konstantin.ananyev@huawei.com]
> Sent: Monday, 10 June 2024 15.40
> 
> > When the rte_memcpy() size is 16, the same 16 bytes are copied twice.
> > In the case where the size is known to be 16 at build time, omit the
> > duplicate copy.
> >
> > Reduced the amount of effectively copy-pasted code by using #ifdef
> > inside functions instead of outside functions.
> >
> > Suggested-by: Stephen Hemminger <stephen@networkplumber.org>
> > Signed-off-by: Morten Brørup <mb@smartsharesystems.com>
> > Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> > ---
> > Depends-on: series-31578 ("provide toolchain abstracted
> __builtin_constant_p")
> >
> > v8:
> > * Keep trying to fix that CI does not understand the dependency...
> >   Depend on series instead of patch. Github only understands series.
> > * Fix typo in patch description.
> > v7:
> > * Keep trying to fix that CI does not understand the dependency...
> >   Depend on patch instead of series.
> >   Move dependency out of the patch description itself, and down to the
> >   version log.
> > v6:
> > * Trying to fix CI not understanding dependency...
> >   Don't wrap dependency line.
> > v5:
> > * Fix for building with MSVC:
> >   Use __rte_constant() instead of __builtin_constant_p().
> >   Add dependency on patch providing __rte_constant().
> > v4:
> > * There are no problems compiling AVX2, only AVX. (Bruce Richardson)
> > v3:
> > * AVX2 is a superset of AVX;
> >   for a block of AVX code, testing for AVX suffices. (Bruce Richardson)
> > * Define RTE_MEMCPY_AVX if AVX is available, to avoid copy-pasting the
> >   check for older GCC version. (Bruce Richardson)
> > v2:
> > * For GCC, version 11 is required for proper AVX handling;
> >   if older GCC version, treat AVX as SSE.
> >   Clang does not have this issue.
> >   Note: Original code always treated AVX as SSE, regardless of compiler.
> > * Do not add copyright. (Stephen Hemminger)
> 
> Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> 
> The code change itself -  LGTM.
> Out of interest - do you expect any perf diff with these changes?

I don't expect a significant perf diff with these changes, not even for the affected special cases. But the generated code (for the affected cases) is smaller.

Stephen noticed that the code generated from rte_memcpy() was inefficient in some cases [1], so I decided to fix it.

[1]: https://inbox.dpdk.org/dev/20240302090207.428d4853@hermes.local/

The code generated from rte_memcpy() was not incorrect, only slightly inefficient (for the affected cases), so the patch is not a bugfix in need of backporting.

> On my box I didn’t see any with 'memcpy_perf_autotest'.
> Konstantin
>
  
David Marchand July 9, 2024, 9:24 a.m. UTC | #4
On Mon, Jun 10, 2024 at 3:40 PM Konstantin Ananyev
<konstantin.ananyev@huawei.com> wrote:
> > When the rte_memcpy() size is 16, the same 16 bytes are copied twice.
> > In the case where the size is known to be 16 at build time, omit the
> > duplicate copy.
> >
> > Reduced the amount of effectively copy-pasted code by using #ifdef
> > inside functions instead of outside functions.
> >
> > Suggested-by: Stephen Hemminger <stephen@networkplumber.org>
> > Signed-off-by: Morten Brørup <mb@smartsharesystems.com>
> > Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>

Applied, thanks for the cleanup.
  
David Marchand July 9, 2024, 11:42 a.m. UTC | #5
Hello,

On Tue, Jul 9, 2024 at 11:24 AM David Marchand
<david.marchand@redhat.com> wrote:
>
> On Mon, Jun 10, 2024 at 3:40 PM Konstantin Ananyev
> <konstantin.ananyev@huawei.com> wrote:
> > > When the rte_memcpy() size is 16, the same 16 bytes are copied twice.
> > > In the case where the size is known to be 16 at build time, omit the
> > > duplicate copy.
> > >
> > > Reduced the amount of effectively copy-pasted code by using #ifdef
> > > inside functions instead of outside functions.
> > >
> > > Suggested-by: Stephen Hemminger <stephen@networkplumber.org>
> > > Signed-off-by: Morten Brørup <mb@smartsharesystems.com>
> > > Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> > Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
>
> Applied, thanks for the cleanup.

This breaks OVS compilation (clang and gcc).

make[1]: *** [Makefile:4722: lib/ofp-packet.lo] Error 1
make[1]: *** Waiting for unfinished jobs....
In file included from lib/ofp-print.c:34:
In file included from ./lib/dp-packet.h:25:
In file included from /home/runner/work/ovs/ovs/dpdk-dir/include/rte_mbuf.h:38:
In file included from
/home/runner/work/ovs/ovs/dpdk-dir/include/rte_mempool.h:50:
/home/runner/work/ovs/ovs/dpdk-dir/include/rte_memcpy.h:113:25: error:
cast from 'const uint8_t *' (aka 'const unsigned char *') to 'const
__m128i *' increases required alignment from 1 to 16
[-Werror,-Wcast-align]
xmm0 = _mm_loadu_si128((const __m128i *)src);
^~~~~~~~~~~~~~~~~~~~
/home/runner/work/ovs/ovs/dpdk-dir/include/rte_memcpy.h:114:19: error:
cast from 'uint8_t *' (aka 'unsigned char *') to '__m128i *' increases
required alignment from 1 to 16 [-Werror,-Wcast-align]
_mm_storeu_si128((__m128i *)dst, xmm0);
^~~~~~~~~~~~~~
2 errors generated.
make[1]: *** [Makefile:4722: lib/ofp-print.lo] Error 1
make[1]: Leaving directory '/home/runner/work/ovs/ovs'
make: *** [Makefile:3102: all] Error 2

I dropped this patch from main for now.
Can you have a look please?
  
Morten Brørup July 9, 2024, 12:43 p.m. UTC | #6
> From: David Marchand [mailto:david.marchand@redhat.com]
> Sent: Tuesday, 9 July 2024 13.43
> 
> Hello,
> 
> On Tue, Jul 9, 2024 at 11:24 AM David Marchand
> <david.marchand@redhat.com> wrote:
> >
> > On Mon, Jun 10, 2024 at 3:40 PM Konstantin Ananyev
> > <konstantin.ananyev@huawei.com> wrote:
> > > > When the rte_memcpy() size is 16, the same 16 bytes are copied
> twice.
> > > > In the case where the size is known to be 16 at build time, omit
> the
> > > > duplicate copy.
> > > >
> > > > Reduced the amount of effectively copy-pasted code by using #ifdef
> > > > inside functions instead of outside functions.
> > > >
> > > > Suggested-by: Stephen Hemminger <stephen@networkplumber.org>
> > > > Signed-off-by: Morten Brørup <mb@smartsharesystems.com>
> > > > Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> > > Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> >
> > Applied, thanks for the cleanup.
> 
> This breaks OVS compilation (clang and gcc).
> 
> make[1]: *** [Makefile:4722: lib/ofp-packet.lo] Error 1
> make[1]: *** Waiting for unfinished jobs....
> In file included from lib/ofp-print.c:34:
> In file included from ./lib/dp-packet.h:25:
> In file included from /home/runner/work/ovs/ovs/dpdk-
> dir/include/rte_mbuf.h:38:
> In file included from
> /home/runner/work/ovs/ovs/dpdk-dir/include/rte_mempool.h:50:
> /home/runner/work/ovs/ovs/dpdk-dir/include/rte_memcpy.h:113:25: error:
> cast from 'const uint8_t *' (aka 'const unsigned char *') to 'const
> __m128i *' increases required alignment from 1 to 16
> [-Werror,-Wcast-align]
> xmm0 = _mm_loadu_si128((const __m128i *)src);
> ^~~~~~~~~~~~~~~~~~~~
> /home/runner/work/ovs/ovs/dpdk-dir/include/rte_memcpy.h:114:19: error:
> cast from 'uint8_t *' (aka 'unsigned char *') to '__m128i *' increases
> required alignment from 1 to 16 [-Werror,-Wcast-align]
> _mm_storeu_si128((__m128i *)dst, xmm0);
> ^~~~~~~~~~~~~~
> 2 errors generated.
> make[1]: *** [Makefile:4722: lib/ofp-print.lo] Error 1
> make[1]: Leaving directory '/home/runner/work/ovs/ovs'
> make: *** [Makefile:3102: all] Error 2
> 
> I dropped this patch from main for now.
> Can you have a look please?

It seems the new code casts directly to the vector register size, while the original code first cast to void*, and then to the register size.

I'll try to fix it and post a new version.

PS: The CI should catch this stuff.
  
David Marchand July 9, 2024, 12:47 p.m. UTC | #7
On Tue, Jul 9, 2024 at 2:43 PM Morten Brørup <mb@smartsharesystems.com> wrote:
> PS: The CI should catch this stuff.

Restoring OVS tests in CI has been requested and I think it was being worked on.
Not sure where we are atm, Patrick?
  
Morten Brørup July 9, 2024, 12:54 p.m. UTC | #8
> From: David Marchand [mailto:david.marchand@redhat.com]
> Sent: Tuesday, 9 July 2024 14.48
> 
> On Tue, Jul 9, 2024 at 2:43 PM Morten Brørup <mb@smartsharesystems.com>
> wrote:
> > PS: The CI should catch this stuff.

Working on fixing it now, some old variants of rte_mov16() do the extra cast, and some don't. It could be CPU feature (SSE/AVX/AVX512) specific.

Also, the header file's definition of _mm_loadu_si128() has the wrong parameter type - it is specified as a type that must be aligned, although it is not required. The intrinsic header files are full of bugs like this.

> 
> Restoring OVS tests in CI has been requested and I think it was being
> worked on.
> Not sure where we are atm, Patrick?
> 
> 
> --
> David Marchand
  
Patrick Robb July 9, 2024, 3:26 p.m. UTC | #9
On Tue, Jul 9, 2024 at 8:48 AM David Marchand <david.marchand@redhat.com> wrote:
>
> On Tue, Jul 9, 2024 at 2:43 PM Morten Brørup <mb@smartsharesystems.com> wrote:
> > PS: The CI should catch this stuff.
>
> Restoring OVS tests in CI has been requested and I think it was being worked on.
> Not sure where we are atm, Patrick?
>

OvS and SPDK compile jobs were added about a month ago. So, Morten's
series should be getting flagged for OVS fails.

An example from a series which already has CI finished:
https://mails.dpdk.org/archives/test-report/2024-July/728503.html
  

Patch

diff --git a/lib/eal/x86/include/rte_memcpy.h b/lib/eal/x86/include/rte_memcpy.h
index 72a92290e0..1619a8f296 100644
--- a/lib/eal/x86/include/rte_memcpy.h
+++ b/lib/eal/x86/include/rte_memcpy.h
@@ -27,6 +27,16 @@  extern "C" {
 #pragma GCC diagnostic ignored "-Wstringop-overflow"
 #endif
 
+/*
+ * GCC older than version 11 doesn't compile AVX properly, so use SSE instead.
+ * There are no problems with AVX2.
+ */
+#if defined __AVX2__
+#define RTE_MEMCPY_AVX
+#elif defined __AVX__ && !(defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 110000))
+#define RTE_MEMCPY_AVX
+#endif
+
 /**
  * Copy bytes from one location to another. The locations must not overlap.
  *
@@ -91,14 +101,6 @@  rte_mov15_or_less(void *dst, const void *src, size_t n)
 	return ret;
 }
 
-#if defined __AVX512F__ && defined RTE_MEMCPY_AVX512
-
-#define ALIGNMENT_MASK 0x3F
-
-/**
- * AVX512 implementation below
- */
-
 /**
  * Copy 16 bytes from one location to another,
  * locations should not overlap.
@@ -119,10 +121,15 @@  rte_mov16(uint8_t *dst, const uint8_t *src)
 static __rte_always_inline void
 rte_mov32(uint8_t *dst, const uint8_t *src)
 {
+#if defined RTE_MEMCPY_AVX
 	__m256i ymm0;
 
 	ymm0 = _mm256_loadu_si256((const __m256i *)src);
 	_mm256_storeu_si256((__m256i *)dst, ymm0);
+#else /* SSE implementation */
+	rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
+	rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+#endif
 }
 
 /**
@@ -132,10 +139,15 @@  rte_mov32(uint8_t *dst, const uint8_t *src)
 static __rte_always_inline void
 rte_mov64(uint8_t *dst, const uint8_t *src)
 {
+#if defined __AVX512F__ && defined RTE_MEMCPY_AVX512
 	__m512i zmm0;
 
 	zmm0 = _mm512_loadu_si512((const void *)src);
 	_mm512_storeu_si512((void *)dst, zmm0);
+#else /* AVX2, AVX & SSE implementation */
+	rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
+	rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
+#endif
 }
 
 /**
@@ -156,12 +168,18 @@  rte_mov128(uint8_t *dst, const uint8_t *src)
 static __rte_always_inline void
 rte_mov256(uint8_t *dst, const uint8_t *src)
 {
-	rte_mov64(dst + 0 * 64, src + 0 * 64);
-	rte_mov64(dst + 1 * 64, src + 1 * 64);
-	rte_mov64(dst + 2 * 64, src + 2 * 64);
-	rte_mov64(dst + 3 * 64, src + 3 * 64);
+	rte_mov128(dst + 0 * 128, src + 0 * 128);
+	rte_mov128(dst + 1 * 128, src + 1 * 128);
 }
 
+#if defined __AVX512F__ && defined RTE_MEMCPY_AVX512
+
+/**
+ * AVX512 implementation below
+ */
+
+#define ALIGNMENT_MASK 0x3F
+
 /**
  * Copy 128-byte blocks from one location to another,
  * locations should not overlap.
@@ -231,12 +249,22 @@  rte_memcpy_generic(void *dst, const void *src, size_t n)
 	/**
 	 * Fast way when copy size doesn't exceed 512 bytes
 	 */
+	if (__rte_constant(n) && n == 32) {
+		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+		return ret;
+	}
 	if (n <= 32) {
 		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+		if (__rte_constant(n) && n == 16)
+			return ret; /* avoid (harmless) duplicate copy */
 		rte_mov16((uint8_t *)dst - 16 + n,
 				  (const uint8_t *)src - 16 + n);
 		return ret;
 	}
+	if (__rte_constant(n) && n == 64) {
+		rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+		return ret;
+	}
 	if (n <= 64) {
 		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
 		rte_mov32((uint8_t *)dst - 32 + n,
@@ -313,80 +341,13 @@  rte_memcpy_generic(void *dst, const void *src, size_t n)
 	goto COPY_BLOCK_128_BACK63;
 }
 
-#elif defined __AVX2__
-
-#define ALIGNMENT_MASK 0x1F
-
-/**
- * AVX2 implementation below
- */
-
-/**
- * Copy 16 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov16(uint8_t *dst, const uint8_t *src)
-{
-	__m128i xmm0;
-
-	xmm0 = _mm_loadu_si128((const __m128i *)(const void *)src);
-	_mm_storeu_si128((__m128i *)(void *)dst, xmm0);
-}
-
-/**
- * Copy 32 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov32(uint8_t *dst, const uint8_t *src)
-{
-	__m256i ymm0;
-
-	ymm0 = _mm256_loadu_si256((const __m256i *)(const void *)src);
-	_mm256_storeu_si256((__m256i *)(void *)dst, ymm0);
-}
+#elif defined RTE_MEMCPY_AVX
 
 /**
- * Copy 64 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov64(uint8_t *dst, const uint8_t *src)
-{
-	rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
-	rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
-}
-
-/**
- * Copy 128 bytes from one location to another,
- * locations should not overlap.
+ * AVX implementation below
  */
-static __rte_always_inline void
-rte_mov128(uint8_t *dst, const uint8_t *src)
-{
-	rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
-	rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
-	rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
-	rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
-}
 
-/**
- * Copy 256 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov256(uint8_t *dst, const uint8_t *src)
-{
-	rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
-	rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
-	rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
-	rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
-	rte_mov32((uint8_t *)dst + 4 * 32, (const uint8_t *)src + 4 * 32);
-	rte_mov32((uint8_t *)dst + 5 * 32, (const uint8_t *)src + 5 * 32);
-	rte_mov32((uint8_t *)dst + 6 * 32, (const uint8_t *)src + 6 * 32);
-	rte_mov32((uint8_t *)dst + 7 * 32, (const uint8_t *)src + 7 * 32);
-}
+#define ALIGNMENT_MASK 0x1F
 
 /**
  * Copy 128-byte blocks from one location to another,
@@ -437,15 +398,14 @@  rte_memcpy_generic(void *dst, const void *src, size_t n)
 	/**
 	 * Fast way when copy size doesn't exceed 256 bytes
 	 */
-	if (n <= 32) {
-		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-		rte_mov16((uint8_t *)dst - 16 + n,
-				(const uint8_t *)src - 16 + n);
+	if (__rte_constant(n) && n == 32) {
+		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
 		return ret;
 	}
-	if (n <= 48) {
+	if (n <= 32) {
 		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-		rte_mov16((uint8_t *)dst + 16, (const uint8_t *)src + 16);
+		if (__rte_constant(n) && n == 16)
+			return ret; /* avoid (harmless) duplicate copy */
 		rte_mov16((uint8_t *)dst - 16 + n,
 				(const uint8_t *)src - 16 + n);
 		return ret;
@@ -513,90 +473,11 @@  rte_memcpy_generic(void *dst, const void *src, size_t n)
 
 #else /* __AVX512F__ */
 
-#define ALIGNMENT_MASK 0x0F
-
-/**
- * SSE & AVX implementation below
- */
-
-/**
- * Copy 16 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov16(uint8_t *dst, const uint8_t *src)
-{
-	__m128i xmm0;
-
-	xmm0 = _mm_loadu_si128((const __m128i *)(const void *)src);
-	_mm_storeu_si128((__m128i *)(void *)dst, xmm0);
-}
-
-/**
- * Copy 32 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov32(uint8_t *dst, const uint8_t *src)
-{
-	rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
-	rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
-}
-
 /**
- * Copy 64 bytes from one location to another,
- * locations should not overlap.
+ * SSE implementation below
  */
-static __rte_always_inline void
-rte_mov64(uint8_t *dst, const uint8_t *src)
-{
-	rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
-	rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
-	rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
-	rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
-}
 
-/**
- * Copy 128 bytes from one location to another,
- * locations should not overlap.
- */
-static __rte_always_inline void
-rte_mov128(uint8_t *dst, const uint8_t *src)
-{
-	rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
-	rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
-	rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
-	rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
-	rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
-	rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
-	rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
-	rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
-}
-
-/**
- * Copy 256 bytes from one location to another,
- * locations should not overlap.
- */
-static inline void
-rte_mov256(uint8_t *dst, const uint8_t *src)
-{
-	rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
-	rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
-	rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
-	rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
-	rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
-	rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
-	rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
-	rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
-	rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
-	rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
-	rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
-	rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
-	rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
-	rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
-	rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
-	rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
-}
+#define ALIGNMENT_MASK 0x0F
 
 /**
  * Macro for copying unaligned block from one location to another with constant load offset,
@@ -712,17 +593,15 @@  rte_memcpy_generic(void *dst, const void *src, size_t n)
 	 */
 	if (n <= 32) {
 		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-		rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
-		return ret;
-	}
-	if (n <= 48) {
-		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+		if (__rte_constant(n) && n == 16)
+			return ret; /* avoid (harmless) duplicate copy */
 		rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
 		return ret;
 	}
 	if (n <= 64) {
 		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-		rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
+		if (n > 48)
+			rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
 		rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
 		return ret;
 	}
@@ -828,8 +707,14 @@  rte_memcpy_aligned(void *dst, const void *src, size_t n)
 	}
 
 	/* Copy 16 <= size <= 32 bytes */
+	if (__rte_constant(n) && n == 32) {
+		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+		return ret;
+	}
 	if (n <= 32) {
 		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+		if (__rte_constant(n) && n == 16)
+			return ret; /* avoid (harmless) duplicate copy */
 		rte_mov16((uint8_t *)dst - 16 + n,
 				(const uint8_t *)src - 16 + n);
 
@@ -837,6 +722,10 @@  rte_memcpy_aligned(void *dst, const void *src, size_t n)
 	}
 
 	/* Copy 32 < size <= 64 bytes */
+	if (__rte_constant(n) && n == 64) {
+		rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+		return ret;
+	}
 	if (n <= 64) {
 		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
 		rte_mov32((uint8_t *)dst - 32 + n,