[dpdk-dev,v2] Clean up rte_memcpy.h file

Message ID 1429562009-11817-1-git-send-email-rkerur@gmail.com (mailing list archive)
State Changes Requested, archived
Headers

Commit Message

Ravi Kerur April 20, 2015, 8:33 p.m. UTC
  Remove unnecessary type casting in functions.

Tested on Ubuntu (14.04 x86_64) with "make test".
"make test" results match the results with baseline.
"Memcpy perf" results match the results with baseline.

Signed-off-by: Ravi Kerur <rkerur@gmail.com>
---
 .../common/include/arch/x86/rte_memcpy.h           | 340 +++++++++++----------
 1 file changed, 175 insertions(+), 165 deletions(-)
  

Comments

Ravi Kerur May 8, 2015, 11:10 p.m. UTC | #1
Any inputs here? No functionality change just cleanup. I have run "make
test" and "memcpy_perf_autotest". I have not noticed any changes in numbers.

On Mon, Apr 20, 2015 at 1:33 PM, Ravi Kerur <rkerur@gmail.com> wrote:

> Remove unnecessary type casting in functions.
>
> Tested on Ubuntu (14.04 x86_64) with "make test".
> "make test" results match the results with baseline.
> "Memcpy perf" results match the results with baseline.
>
> Signed-off-by: Ravi Kerur <rkerur@gmail.com>
> ---
>  .../common/include/arch/x86/rte_memcpy.h           | 340
> +++++++++++----------
>  1 file changed, 175 insertions(+), 165 deletions(-)
>
> diff --git a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
> b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
> index 6a57426..839d4ec 100644
> --- a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
> +++ b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
> @@ -106,8 +106,8 @@ rte_mov32(uint8_t *dst, const uint8_t *src)
>  static inline void
>  rte_mov64(uint8_t *dst, const uint8_t *src)
>  {
> -       rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
> -       rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
> +       rte_mov32(dst + 0 * 32, src + 0 * 32);
> +       rte_mov32(dst + 1 * 32, src + 1 * 32);
>  }
>
>  /**
> @@ -117,10 +117,10 @@ rte_mov64(uint8_t *dst, const uint8_t *src)
>  static inline void
>  rte_mov128(uint8_t *dst, const uint8_t *src)
>  {
> -       rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
> -       rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
> -       rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
> -       rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
> +       rte_mov32(dst + 0 * 32, src + 0 * 32);
> +       rte_mov32(dst + 1 * 32, src + 1 * 32);
> +       rte_mov32(dst + 2 * 32, src + 2 * 32);
> +       rte_mov32(dst + 3 * 32, src + 3 * 32);
>  }
>
>  /**
> @@ -130,14 +130,14 @@ rte_mov128(uint8_t *dst, const uint8_t *src)
>  static inline void
>  rte_mov256(uint8_t *dst, const uint8_t *src)
>  {
> -       rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
> -       rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
> -       rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
> -       rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
> -       rte_mov32((uint8_t *)dst + 4 * 32, (const uint8_t *)src + 4 * 32);
> -       rte_mov32((uint8_t *)dst + 5 * 32, (const uint8_t *)src + 5 * 32);
> -       rte_mov32((uint8_t *)dst + 6 * 32, (const uint8_t *)src + 6 * 32);
> -       rte_mov32((uint8_t *)dst + 7 * 32, (const uint8_t *)src + 7 * 32);
> +       rte_mov32(dst + 0 * 32, src + 0 * 32);
> +       rte_mov32(dst + 1 * 32, src + 1 * 32);
> +       rte_mov32(dst + 2 * 32, src + 2 * 32);
> +       rte_mov32(dst + 3 * 32, src + 3 * 32);
> +       rte_mov32(dst + 4 * 32, src + 4 * 32);
> +       rte_mov32(dst + 5 * 32, src + 5 * 32);
> +       rte_mov32(dst + 6 * 32, src + 6 * 32);
> +       rte_mov32(dst + 7 * 32, src + 7 * 32);
>  }
>
>  /**
> @@ -150,13 +150,16 @@ rte_mov64blocks(uint8_t *dst, const uint8_t *src,
> size_t n)
>         __m256i ymm0, ymm1;
>
>         while (n >= 64) {
> -               ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t
> *)src + 0 * 32));
> +
> +               ymm0 = _mm256_loadu_si256((const __m256i *)(src + 0 * 32));
> +               ymm1 = _mm256_loadu_si256((const __m256i *)(src + 1 * 32));
> +
> +               _mm256_storeu_si256((__m256i *)(dst + 0 * 32), ymm0);
> +               _mm256_storeu_si256((__m256i *)(dst + 1 * 32), ymm1);
> +
>                 n -= 64;
> -               ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t
> *)src + 1 * 32));
> -               src = (const uint8_t *)src + 64;
> -               _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32),
> ymm0);
> -               _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32),
> ymm1);
> -               dst = (uint8_t *)dst + 64;
> +               src = src + 64;
> +               dst = dst + 64;
>         }
>  }
>
> @@ -170,34 +173,39 @@ rte_mov256blocks(uint8_t *dst, const uint8_t *src,
> size_t n)
>         __m256i ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7;
>
>         while (n >= 256) {
> -               ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t
> *)src + 0 * 32));
> +
> +               ymm0 = _mm256_loadu_si256((const __m256i *)(src + 0 * 32));
> +               ymm1 = _mm256_loadu_si256((const __m256i *)(src + 1 * 32));
> +               ymm2 = _mm256_loadu_si256((const __m256i *)(src + 2 * 32));
> +               ymm3 = _mm256_loadu_si256((const __m256i *)(src + 3 * 32));
> +               ymm4 = _mm256_loadu_si256((const __m256i *)(src + 4 * 32));
> +               ymm5 = _mm256_loadu_si256((const __m256i *)(src + 5 * 32));
> +               ymm6 = _mm256_loadu_si256((const __m256i *)(src + 6 * 32));
> +               ymm7 = _mm256_loadu_si256((const __m256i *)(src + 7 * 32));
> +
> +               _mm256_storeu_si256((__m256i *)(dst + 0 * 32), ymm0);
> +               _mm256_storeu_si256((__m256i *)(dst + 1 * 32), ymm1);
> +               _mm256_storeu_si256((__m256i *)(dst + 2 * 32), ymm2);
> +               _mm256_storeu_si256((__m256i *)(dst + 3 * 32), ymm3);
> +               _mm256_storeu_si256((__m256i *)(dst + 4 * 32), ymm4);
> +               _mm256_storeu_si256((__m256i *)(dst + 5 * 32), ymm5);
> +               _mm256_storeu_si256((__m256i *)(dst + 6 * 32), ymm6);
> +               _mm256_storeu_si256((__m256i *)(dst + 7 * 32), ymm7);
> +
>                 n -= 256;
> -               ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t
> *)src + 1 * 32));
> -               ymm2 = _mm256_loadu_si256((const __m256i *)((const uint8_t
> *)src + 2 * 32));
> -               ymm3 = _mm256_loadu_si256((const __m256i *)((const uint8_t
> *)src + 3 * 32));
> -               ymm4 = _mm256_loadu_si256((const __m256i *)((const uint8_t
> *)src + 4 * 32));
> -               ymm5 = _mm256_loadu_si256((const __m256i *)((const uint8_t
> *)src + 5 * 32));
> -               ymm6 = _mm256_loadu_si256((const __m256i *)((const uint8_t
> *)src + 6 * 32));
> -               ymm7 = _mm256_loadu_si256((const __m256i *)((const uint8_t
> *)src + 7 * 32));
> -               src = (const uint8_t *)src + 256;
> -               _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32),
> ymm0);
> -               _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32),
> ymm1);
> -               _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 2 * 32),
> ymm2);
> -               _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 3 * 32),
> ymm3);
> -               _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 4 * 32),
> ymm4);
> -               _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 5 * 32),
> ymm5);
> -               _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 6 * 32),
> ymm6);
> -               _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 7 * 32),
> ymm7);
> -               dst = (uint8_t *)dst + 256;
> +               src = src + 256;
> +               dst = dst + 256;
>         }
>  }
>
>  static inline void *
> -rte_memcpy(void *dst, const void *src, size_t n)
> +rte_memcpy(void *_dst, const void *_src, size_t n)
>  {
> -       uintptr_t dstu = (uintptr_t)dst;
> -       uintptr_t srcu = (uintptr_t)src;
> -       void *ret = dst;
> +       const uint8_t *src = (const uint8_t *)_src;
> +       uint8_t *dst = (uint8_t *)_dst;
> +       uintptr_t dstu = (uintptr_t)_dst;
> +       uintptr_t srcu = (uintptr_t)_src;
> +       void *ret = _dst;
>         size_t dstofss;
>         size_t bits;
>
> @@ -230,43 +238,43 @@ rte_memcpy(void *dst, const void *src, size_t n)
>          * Fast way when copy size doesn't exceed 512 bytes
>          */
>         if (n <= 32) {
> -               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> -               rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src -
> 16 + n);
> +               rte_mov16(dst, src);
> +               rte_mov16(dst - 16 + n, src - 16 + n);
>                 return ret;
>         }
>         if (n <= 64) {
> -               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> -               rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src -
> 32 + n);
> +               rte_mov32(dst, src);
> +               rte_mov32(dst - 32 + n, src - 32 + n);
>                 return ret;
>         }
>         if (n <= 512) {
>                 if (n >= 256) {
>                         n -= 256;
> -                       rte_mov256((uint8_t *)dst, (const uint8_t *)src);
> -                       src = (const uint8_t *)src + 256;
> -                       dst = (uint8_t *)dst + 256;
> +                       rte_mov256(dst, src);
> +                       src = src + 256;
> +                       dst = dst + 256;
>                 }
>                 if (n >= 128) {
>                         n -= 128;
> -                       rte_mov128((uint8_t *)dst, (const uint8_t *)src);
> -                       src = (const uint8_t *)src + 128;
> -                       dst = (uint8_t *)dst + 128;
> +                       rte_mov128(dst, src);
> +                       src = src + 128;
> +                       dst = dst + 128;
>                 }
>                 if (n >= 64) {
>                         n -= 64;
> -                       rte_mov64((uint8_t *)dst, (const uint8_t *)src);
> -                       src = (const uint8_t *)src + 64;
> -                       dst = (uint8_t *)dst + 64;
> +                       rte_mov64(dst, src);
> +                       src = src + 64;
> +                       dst = dst + 64;
>                 }
>  COPY_BLOCK_64_BACK31:
>                 if (n > 32) {
> -                       rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> -                       rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t
> *)src - 32 + n);
> +                       rte_mov32(dst, src);
> +                       rte_mov32(dst - 32 + n, src - 32 + n);
>                         return ret;
>                 }
> -               if (n > 0) {
> -                       rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t
> *)src - 32 + n);
> -               }
> +               if (n > 0)
> +                       rte_mov32(dst - 32 + n, src - 32 + n);
> +
>                 return ret;
>         }
>
> @@ -275,21 +283,21 @@ COPY_BLOCK_64_BACK31:
>          */
>         dstofss = 32 - ((uintptr_t)dst & 0x1F);
>         n -= dstofss;
> -       rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> -       src = (const uint8_t *)src + dstofss;
> -       dst = (uint8_t *)dst + dstofss;
> +       rte_mov32(dst, src);
> +       src = src + dstofss;
> +       dst = dst + dstofss;
>
>         /**
>          * Copy 256-byte blocks.
>          * Use copy block function for better instruction order control,
>          * which is important when load is unaligned.
>          */
> -       rte_mov256blocks((uint8_t *)dst, (const uint8_t *)src, n);
> +       rte_mov256blocks(dst, src, n);
>         bits = n;
>         n = n & 255;
>         bits -= n;
> -       src = (const uint8_t *)src + bits;
> -       dst = (uint8_t *)dst + bits;
> +       src = src + bits;
> +       dst = dst + bits;
>
>         /**
>          * Copy 64-byte blocks.
> @@ -297,12 +305,12 @@ COPY_BLOCK_64_BACK31:
>          * which is important when load is unaligned.
>          */
>         if (n >= 64) {
> -               rte_mov64blocks((uint8_t *)dst, (const uint8_t *)src, n);
> +               rte_mov64blocks(dst, src, n);
>                 bits = n;
>                 n = n & 63;
>                 bits -= n;
> -               src = (const uint8_t *)src + bits;
> -               dst = (uint8_t *)dst + bits;
> +               src = src + bits;
> +               dst = dst + bits;
>         }
>
>         /**
> @@ -337,8 +345,8 @@ rte_mov16(uint8_t *dst, const uint8_t *src)
>  static inline void
>  rte_mov32(uint8_t *dst, const uint8_t *src)
>  {
> -       rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> -       rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> +       rte_mov16(dst + 0 * 16, src + 0 * 16);
> +       rte_mov16(dst + 1 * 16, src + 1 * 16);
>  }
>
>  /**
> @@ -348,10 +356,10 @@ rte_mov32(uint8_t *dst, const uint8_t *src)
>  static inline void
>  rte_mov64(uint8_t *dst, const uint8_t *src)
>  {
> -       rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> -       rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> -       rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
> -       rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
> +       rte_mov16(dst + 0 * 16, src + 0 * 16);
> +       rte_mov16(dst + 1 * 16, src + 1 * 16);
> +       rte_mov16(dst + 2 * 16, src + 2 * 16);
> +       rte_mov16(dst + 3 * 16, src + 3 * 16);
>  }
>
>  /**
> @@ -361,14 +369,14 @@ rte_mov64(uint8_t *dst, const uint8_t *src)
>  static inline void
>  rte_mov128(uint8_t *dst, const uint8_t *src)
>  {
> -       rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> -       rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> -       rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
> -       rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
> -       rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
> -       rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
> -       rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
> -       rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
> +       rte_mov16(dst + 0 * 16, src + 0 * 16);
> +       rte_mov16(dst + 1 * 16, src + 1 * 16);
> +       rte_mov16(dst + 2 * 16, src + 2 * 16);
> +       rte_mov16(dst + 3 * 16, src + 3 * 16);
> +       rte_mov16(dst + 4 * 16, src + 4 * 16);
> +       rte_mov16(dst + 5 * 16, src + 5 * 16);
> +       rte_mov16(dst + 6 * 16, src + 6 * 16);
> +       rte_mov16(dst + 7 * 16, src + 7 * 16);
>  }
>
>  /**
> @@ -378,22 +386,22 @@ rte_mov128(uint8_t *dst, const uint8_t *src)
>  static inline void
>  rte_mov256(uint8_t *dst, const uint8_t *src)
>  {
> -       rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> -       rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> -       rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
> -       rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
> -       rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
> -       rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
> -       rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
> -       rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
> -       rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
> -       rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
> -       rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 *
> 16);
> -       rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 *
> 16);
> -       rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 *
> 16);
> -       rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 *
> 16);
> -       rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 *
> 16);
> -       rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 *
> 16);
> +       rte_mov16(dst + 0 * 16, src + 0 * 16);
> +       rte_mov16(dst + 1 * 16, src + 1 * 16);
> +       rte_mov16(dst + 2 * 16, src + 2 * 16);
> +       rte_mov16(dst + 3 * 16, src + 3 * 16);
> +       rte_mov16(dst + 4 * 16, src + 4 * 16);
> +       rte_mov16(dst + 5 * 16, src + 5 * 16);
> +       rte_mov16(dst + 6 * 16, src + 6 * 16);
> +       rte_mov16(dst + 7 * 16, src + 7 * 16);
> +       rte_mov16(dst + 8 * 16, src + 8 * 16);
> +       rte_mov16(dst + 9 * 16, src + 9 * 16);
> +       rte_mov16(dst + 10 * 16, src + 10 * 16);
> +       rte_mov16(dst + 11 * 16, src + 11 * 16);
> +       rte_mov16(dst + 12 * 16, src + 12 * 16);
> +       rte_mov16(dst + 13 * 16, src + 13 * 16);
> +       rte_mov16(dst + 14 * 16, src + 14 * 16);
> +       rte_mov16(dst + 15 * 16, src + 15 * 16);
>  }
>
>  /**
> @@ -411,48 +419,48 @@ rte_mov256(uint8_t *dst, const uint8_t *src)
>  ({
>                                   \
>      int tmp;
>                                   \
>      while (len >= 128 + 16 - offset) {
>                                   \
> -        xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src -
> offset + 0 * 16));                  \
> -        len -= 128;
>                                    \
> -        xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src -
> offset + 1 * 16));                  \
> -        xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src -
> offset + 2 * 16));                  \
> -        xmm3 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src -
> offset + 3 * 16));                  \
> -        xmm4 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src -
> offset + 4 * 16));                  \
> -        xmm5 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src -
> offset + 5 * 16));                  \
> -        xmm6 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src -
> offset + 6 * 16));                  \
> -        xmm7 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src -
> offset + 7 * 16));                  \
> -        xmm8 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src -
> offset + 8 * 16));                  \
> +        xmm0 = _mm_loadu_si128((const __m128i *)(src - offset + 0 *
> 16));                  \
> +        len -= 128;
>                   \
> +        xmm1 = _mm_loadu_si128((const __m128i *)(src - offset + 1 *
> 16));                  \
> +        xmm2 = _mm_loadu_si128((const __m128i *)(src - offset + 2 *
> 16));                  \
> +        xmm3 = _mm_loadu_si128((const __m128i *)(src - offset + 3 *
> 16));                  \
> +        xmm4 = _mm_loadu_si128((const __m128i *)(src - offset + 4 *
> 16));                  \
> +        xmm5 = _mm_loadu_si128((const __m128i *)(src - offset + 5 *
> 16));                  \
> +        xmm6 = _mm_loadu_si128((const __m128i *)(src - offset + 6 *
> 16));                  \
> +        xmm7 = _mm_loadu_si128((const __m128i *)(src - offset + 7 *
> 16));                  \
> +        xmm8 = _mm_loadu_si128((const __m128i *)(src - offset + 8 *
> 16));                  \
>          src = (const uint8_t *)src + 128;
>                                    \
> -        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16),
> _mm_alignr_epi8(xmm1, xmm0, offset));        \
> -        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16),
> _mm_alignr_epi8(xmm2, xmm1, offset));        \
> -        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 2 * 16),
> _mm_alignr_epi8(xmm3, xmm2, offset));        \
> -        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 3 * 16),
> _mm_alignr_epi8(xmm4, xmm3, offset));        \
> -        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 4 * 16),
> _mm_alignr_epi8(xmm5, xmm4, offset));        \
> -        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 5 * 16),
> _mm_alignr_epi8(xmm6, xmm5, offset));        \
> -        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 6 * 16),
> _mm_alignr_epi8(xmm7, xmm6, offset));        \
> -        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 7 * 16),
> _mm_alignr_epi8(xmm8, xmm7, offset));        \
> +        _mm_storeu_si128((__m128i *)(dst + 0 * 16), _mm_alignr_epi8(xmm1,
> xmm0, offset));        \
> +        _mm_storeu_si128((__m128i *)(dst + 1 * 16), _mm_alignr_epi8(xmm2,
> xmm1, offset));        \
> +        _mm_storeu_si128((__m128i *)(dst + 2 * 16), _mm_alignr_epi8(xmm3,
> xmm2, offset));        \
> +        _mm_storeu_si128((__m128i *)(dst + 3 * 16), _mm_alignr_epi8(xmm4,
> xmm3, offset));        \
> +        _mm_storeu_si128((__m128i *)(dst + 4 * 16), _mm_alignr_epi8(xmm5,
> xmm4, offset));        \
> +        _mm_storeu_si128((__m128i *)(dst + 5 * 16), _mm_alignr_epi8(xmm6,
> xmm5, offset));        \
> +        _mm_storeu_si128((__m128i *)(dst + 6 * 16), _mm_alignr_epi8(xmm7,
> xmm6, offset));        \
> +        _mm_storeu_si128((__m128i *)(dst + 7 * 16), _mm_alignr_epi8(xmm8,
> xmm7, offset));        \
>          dst = (uint8_t *)dst + 128;
>                                    \
>      }
>                                    \
>      tmp = len;
>                                   \
>      len = ((len - 16 + offset) & 127) + 16 - offset;
>                                   \
>      tmp -= len;
>                                    \
> -    src = (const uint8_t *)src + tmp;
>                                    \
> -    dst = (uint8_t *)dst + tmp;
>                                    \
> +    src = src + tmp;
>                                   \
> +    dst = dst + tmp;
>                                   \
>      if (len >= 32 + 16 - offset) {
>                                   \
>          while (len >= 32 + 16 - offset) {
>                                    \
> -            xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src
> - offset + 0 * 16));              \
>              len -= 32;
>                                   \
> -            xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src
> - offset + 1 * 16));              \
> -            xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src
> - offset + 2 * 16));              \
> -            src = (const uint8_t *)src + 32;
>                                   \
> -            _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16),
> _mm_alignr_epi8(xmm1, xmm0, offset));    \
> -            _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16),
> _mm_alignr_epi8(xmm2, xmm1, offset));    \
> -            dst = (uint8_t *)dst + 32;
>                                   \
> +            xmm0 = _mm_loadu_si128((const __m128i *)(src - offset + 0 *
> 16));                               \
> +            xmm1 = _mm_loadu_si128((const __m128i *)(src - offset + 1 *
> 16));                               \
> +            xmm2 = _mm_loadu_si128((const __m128i *)(src - offset + 2 *
> 16));                               \
> +            src = src + 32;
>                                    \
> +            _mm_storeu_si128((__m128i *)(dst + 0 * 16),
> _mm_alignr_epi8(xmm1, xmm0, offset));               \
> +            _mm_storeu_si128((__m128i *)(dst + 1 * 16),
> _mm_alignr_epi8(xmm2, xmm1, offset));               \
> +            dst = dst + 32;
>                                    \
>          }
>                                    \
>          tmp = len;
>                                   \
>          len = ((len - 16 + offset) & 31) + 16 - offset;
>                                    \
>          tmp -= len;
>                                    \
> -        src = (const uint8_t *)src + tmp;
>                                    \
> -        dst = (uint8_t *)dst + tmp;
>                                    \
> +        src = src + tmp;
>                                   \
> +        dst = dst + tmp;
>                                   \
>      }
>                                    \
>  })
>
> @@ -491,12 +499,14 @@ rte_mov256(uint8_t *dst, const uint8_t *src)
>  })
>
>  static inline void *
> -rte_memcpy(void *dst, const void *src, size_t n)
> +rte_memcpy(void *_dst, const void *_src, size_t n)
>  {
>         __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
> -       uintptr_t dstu = (uintptr_t)dst;
> -       uintptr_t srcu = (uintptr_t)src;
> -       void *ret = dst;
> +       const uint8_t *src  = (const uint8_t *)_src;
> +       uint8_t *dst = (uint8_t *)_dst;
> +       uintptr_t dstu = (uintptr_t)_dst;
> +       uintptr_t srcu = (uintptr_t)_src;
> +       void *ret = _dst;
>         size_t dstofss;
>         size_t srcofs;
>
> @@ -529,61 +539,61 @@ rte_memcpy(void *dst, const void *src, size_t n)
>          * Fast way when copy size doesn't exceed 512 bytes
>          */
>         if (n <= 32) {
> -               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> -               rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src -
> 16 + n);
> +               rte_mov16(dst, src);
> +               rte_mov16(dst - 16 + n, src - 16 + n);
>                 return ret;
>         }
>         if (n <= 48) {
> -               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> -               rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src -
> 16 + n);
> +               rte_mov32(dst, src);
> +               rte_mov16(dst - 16 + n, src - 16 + n);
>                 return ret;
>         }
>         if (n <= 64) {
> -               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> -               rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
> -               rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src -
> 16 + n);
> +               rte_mov32(dst, src);
> +               rte_mov16(dst + 32, src + 32);
> +               rte_mov16(dst - 16 + n, src - 16 + n);
>                 return ret;
>         }
> -       if (n <= 128) {
> +       if (n <= 128)
>                 goto COPY_BLOCK_128_BACK15;
> -       }
> +
>         if (n <= 512) {
>                 if (n >= 256) {
>                         n -= 256;
> -                       rte_mov128((uint8_t *)dst, (const uint8_t *)src);
> -                       rte_mov128((uint8_t *)dst + 128, (const uint8_t
> *)src + 128);
> -                       src = (const uint8_t *)src + 256;
> -                       dst = (uint8_t *)dst + 256;
> +                       rte_mov128(dst, src);
> +                       rte_mov128(dst + 128, src + 128);
> +                       src = src + 256;
> +                       dst = dst + 256;
>                 }
>  COPY_BLOCK_255_BACK15:
>                 if (n >= 128) {
>                         n -= 128;
> -                       rte_mov128((uint8_t *)dst, (const uint8_t *)src);
> -                       src = (const uint8_t *)src + 128;
> -                       dst = (uint8_t *)dst + 128;
> +                       rte_mov128(dst, src);
> +                       src = src + 128;
> +                       dst = dst + 128;
>                 }
>  COPY_BLOCK_128_BACK15:
>                 if (n >= 64) {
>                         n -= 64;
> -                       rte_mov64((uint8_t *)dst, (const uint8_t *)src);
> -                       src = (const uint8_t *)src + 64;
> -                       dst = (uint8_t *)dst + 64;
> +                       rte_mov64(dst, src);
> +                       src = src + 64;
> +                       dst = dst + 64;
>                 }
>  COPY_BLOCK_64_BACK15:
>                 if (n >= 32) {
>                         n -= 32;
> -                       rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> -                       src = (const uint8_t *)src + 32;
> -                       dst = (uint8_t *)dst + 32;
> +                       rte_mov32(dst, src);
> +                       src = src + 32;
> +                       dst = dst + 32;
>                 }
>                 if (n > 16) {
> -                       rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> -                       rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t
> *)src - 16 + n);
> +                       rte_mov16(dst, src);
> +                       rte_mov16(dst - 16 + n, src - 16 + n);
>                         return ret;
>                 }
> -               if (n > 0) {
> -                       rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t
> *)src - 16 + n);
> -               }
> +               if (n > 0)
> +                       rte_mov16(dst - 16 + n, src - 16 + n);
> +
>                 return ret;
>         }
>
> @@ -595,9 +605,9 @@ COPY_BLOCK_64_BACK15:
>          */
>         dstofss = 16 - ((uintptr_t)dst & 0x0F) + 16;
>         n -= dstofss;
> -       rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> -       src = (const uint8_t *)src + dstofss;
> -       dst = (uint8_t *)dst + dstofss;
> +       rte_mov32(dst, src);
> +       src = src + dstofss;
> +       dst = dst + dstofss;
>         srcofs = ((uintptr_t)src & 0x0F);
>
>         /**
> @@ -608,9 +618,9 @@ COPY_BLOCK_64_BACK15:
>                  * Copy 256-byte blocks
>                  */
>                 for (; n >= 256; n -= 256) {
> -                       rte_mov256((uint8_t *)dst, (const uint8_t *)src);
> -                       dst = (uint8_t *)dst + 256;
> -                       src = (const uint8_t *)src + 256;
> +                       rte_mov256(dst, src);
> +                       dst = dst + 256;
> +                       src = src + 256;
>                 }
>
>                 /**
> --
> 1.9.1
>
>
  
Stephen Hemminger Oct. 14, 2015, 12:09 a.m. UTC | #2
On Mon, 20 Apr 2015 13:33:29 -0700
Ravi Kerur <rkerur@gmail.com> wrote:

> Remove unnecessary type casting in functions.
> 
> Tested on Ubuntu (14.04 x86_64) with "make test".
> "make test" results match the results with baseline.
> "Memcpy perf" results match the results with baseline.
> 
> Signed-off-by: Ravi Kerur <rkerur@gmail.com>

Getting rid of casts looks good.
My guess is no one reviewed it because no one is using rte_memcpy much..


Acked-by: Stephen Hemminger <stephen@networkplumber.org>
  
Zhihong Wang Jan. 28, 2016, 4:18 a.m. UTC | #3
> Remove unnecessary type casting in functions.
> 
> Tested on Ubuntu (14.04 x86_64) with "make test".
> "make test" results match the results with baseline.
> "Memcpy perf" results match the results with baseline.
> 
> Signed-off-by: Ravi Kerur <rkerur@gmail.com>
> Acked-by: Stephen Hemminger <stephen@networkplumber.org>
> 
> ---
> .../common/include/arch/x86/rte_memcpy.h           | 340 +++++++++++----------
>  1 file changed, 175 insertions(+), 165 deletions(-)
> 
> diff --git a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
> index 6a57426..839d4ec 100644
> --- a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
> +++ b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
 
[...]
 
>  /**
> @@ -150,13 +150,16 @@ rte_mov64blocks(uint8_t *dst, const uint8_t *src, size_t n)
>  	__m256i ymm0, ymm1;
>  
>  	while (n >= 64) {
> -		ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
> +
> +		ymm0 = _mm256_loadu_si256((const __m256i *)(src + 0 * 32));
> +		ymm1 = _mm256_loadu_si256((const __m256i *)(src + 1 * 32));
> +
> +		_mm256_storeu_si256((__m256i *)(dst + 0 * 32), ymm0);
> +		_mm256_storeu_si256((__m256i *)(dst + 1 * 32), ymm1);
> +
 
Any particular reason to change the order of the statements here? :)
Overall this patch looks good.
 
>  		n -= 64;
> -		ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
> -		src = (const uint8_t *)src + 64;
> -		_mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
> -		_mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
> -		dst = (uint8_t *)dst + 64;
> +		src = src + 64;
> +		dst = dst + 64;
>  	}
>  }
>
  
Ravi Kerur Feb. 19, 2016, 5:44 p.m. UTC | #4
On Wed, Jan 27, 2016 at 8:18 PM, Zhihong Wang <zhihong.wang@intel.com>
wrote:

> > Remove unnecessary type casting in functions.
> >
> > Tested on Ubuntu (14.04 x86_64) with "make test".
> > "make test" results match the results with baseline.
> > "Memcpy perf" results match the results with baseline.
> >
> > Signed-off-by: Ravi Kerur <rkerur@gmail.com>
> > Acked-by: Stephen Hemminger <stephen@networkplumber.org>
> >
> > ---
> > .../common/include/arch/x86/rte_memcpy.h           | 340
> +++++++++++----------
> >  1 file changed, 175 insertions(+), 165 deletions(-)
> >
> > diff --git a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
> b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
> > index 6a57426..839d4ec 100644
> > --- a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
> > +++ b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
>
> [...]
>
> >  /**
> > @@ -150,13 +150,16 @@ rte_mov64blocks(uint8_t *dst, const uint8_t *src,
> size_t n)
> >       __m256i ymm0, ymm1;
> >
> >       while (n >= 64) {
> > -             ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t
> *)src + 0 * 32));
> > +
> > +             ymm0 = _mm256_loadu_si256((const __m256i *)(src + 0 * 32));
> > +             ymm1 = _mm256_loadu_si256((const __m256i *)(src + 1 * 32));
> > +
> > +             _mm256_storeu_si256((__m256i *)(dst + 0 * 32), ymm0);
> > +             _mm256_storeu_si256((__m256i *)(dst + 1 * 32), ymm1);
> > +
>
> Any particular reason to change the order of the statements here? :)
> Overall this patch looks good.
>

Sorry for the late response. Let me double check and get back to you, it's
been a while since I did the changes.


> >               n -= 64;
> > -             ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t
> *)src + 1 * 32));
> > -             src = (const uint8_t *)src + 64;
> > -             _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32),
> ymm0);
> > -             _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32),
> ymm1);
> > -             dst = (uint8_t *)dst + 64;
> > +             src = src + 64;
> > +             dst = dst + 64;
> >       }
> >  }
> >
>
>
  
Ravi Kerur Feb. 27, 2016, 2:06 p.m. UTC | #5
On Wed, Jan 27, 2016 at 8:18 PM, Zhihong Wang <zhihong.wang@intel.com>
wrote:

> > Remove unnecessary type casting in functions.
> >
> > Tested on Ubuntu (14.04 x86_64) with "make test".
> > "make test" results match the results with baseline.
> > "Memcpy perf" results match the results with baseline.
> >
> > Signed-off-by: Ravi Kerur <rkerur@gmail.com>
> > Acked-by: Stephen Hemminger <stephen@networkplumber.org>
> >
> > ---
> > .../common/include/arch/x86/rte_memcpy.h           | 340
> +++++++++++----------
> >  1 file changed, 175 insertions(+), 165 deletions(-)
> >
> > diff --git a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
> b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
> > index 6a57426..839d4ec 100644
> > --- a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
> > +++ b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
>
> [...]
>
> >  /**
> > @@ -150,13 +150,16 @@ rte_mov64blocks(uint8_t *dst, const uint8_t *src,
> size_t n)
> >       __m256i ymm0, ymm1;
> >
> >       while (n >= 64) {
> > -             ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t
> *)src + 0 * 32));
> > +
> > +             ymm0 = _mm256_loadu_si256((const __m256i *)(src + 0 * 32));
> > +             ymm1 = _mm256_loadu_si256((const __m256i *)(src + 1 * 32));
> > +
> > +             _mm256_storeu_si256((__m256i *)(dst + 0 * 32), ymm0);
> > +             _mm256_storeu_si256((__m256i *)(dst + 1 * 32), ymm1);
> > +
>
> Any particular reason to change the order of the statements here? :)
> Overall this patch looks good.
>

I checked the code changes, initial code had moving  addresses (src and
dst) and decrement counter scattered between store and load instructions. I
changed it to loads, followed by stores and handle address/counters
increment/decrement without changing functionality.

>
> >               n -= 64;
> > -             ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t
> *)src + 1 * 32));
> > -             src = (const uint8_t *)src + 64;
> > -             _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32),
> ymm0);
> > -             _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32),
> ymm1);
> > -             dst = (uint8_t *)dst + 64;
> > +             src = src + 64;
> > +             dst = dst + 64;
> >       }
> >  }
> >
>
>
  
Zhihong Wang Feb. 29, 2016, 3:07 p.m. UTC | #6
> -----Original Message-----

> From: Ravi Kerur [mailto:rkerur@gmail.com]

> Sent: Saturday, February 27, 2016 10:06 PM

> To: Wang, Zhihong <zhihong.wang@intel.com>

> Cc: dev@dpdk.org

> Subject: Re: [dpdk-dev,v2] Clean up rte_memcpy.h file

> 

> 

> 

> On Wed, Jan 27, 2016 at 8:18 PM, Zhihong Wang <zhihong.wang@intel.com>

> wrote:

> > Remove unnecessary type casting in functions.

> >

> > Tested on Ubuntu (14.04 x86_64) with "make test".

> > "make test" results match the results with baseline.

> > "Memcpy perf" results match the results with baseline.

> >

> > Signed-off-by: Ravi Kerur <rkerur@gmail.com>

> > Acked-by: Stephen Hemminger <stephen@networkplumber.org>

> >

> > ---

> > .../common/include/arch/x86/rte_memcpy.h           | 340 +++++++++++---

> -------

> >  1 file changed, 175 insertions(+), 165 deletions(-)

> >

> > diff --git a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h

> b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h

> > index 6a57426..839d4ec 100644

> > --- a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h

> > +++ b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h

> 

> [...]

> 

> >  /**

> > @@ -150,13 +150,16 @@ rte_mov64blocks(uint8_t *dst, const uint8_t *src,

> size_t n)

> >       __m256i ymm0, ymm1;

> >

> >       while (n >= 64) {

> > -             ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t

> *)src + 0 * 32));

> > +

> > +             ymm0 = _mm256_loadu_si256((const __m256i *)(src + 0 * 32));

> > +             ymm1 = _mm256_loadu_si256((const __m256i *)(src + 1 * 32));

> > +

> > +             _mm256_storeu_si256((__m256i *)(dst + 0 * 32), ymm0);

> > +             _mm256_storeu_si256((__m256i *)(dst + 1 * 32), ymm1);

> > +

> 

> Any particular reason to change the order of the statements here? :)

> Overall this patch looks good.

> 

> I checked the code changes, initial code had moving  addresses (src and dst) and

> decrement counter scattered between store and load instructions. I changed it to

> loads, followed by stores and handle address/counters increment/decrement

> without changing functionality.

> 


It's definitely okay to do this. Actually changing it or not won't affect
the final output at all since gcc will optimize it while generating code.
It's C code we're writing after all.

But personally I prefer to keep the original order just as a comment
that what's needed in the future should be calculated ASAP, and
different kinds (CPU port) of instructions should be mixed together. :)

Could you please rebase this patch since there has been some changes
already?

> >               n -= 64;

> > -             ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t

> *)src + 1 * 32));

> > -             src = (const uint8_t *)src + 64;

> > -             _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32),

> ymm0);

> > -             _mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32),

> ymm1);

> > -             dst = (uint8_t *)dst + 64;

> > +             src = src + 64;

> > +             dst = dst + 64;

> >       }

> >  }

> >
  

Patch

diff --git a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
index 6a57426..839d4ec 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
@@ -106,8 +106,8 @@  rte_mov32(uint8_t *dst, const uint8_t *src)
 static inline void
 rte_mov64(uint8_t *dst, const uint8_t *src)
 {
-	rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
-	rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
+	rte_mov32(dst + 0 * 32, src + 0 * 32);
+	rte_mov32(dst + 1 * 32, src + 1 * 32);
 }
 
 /**
@@ -117,10 +117,10 @@  rte_mov64(uint8_t *dst, const uint8_t *src)
 static inline void
 rte_mov128(uint8_t *dst, const uint8_t *src)
 {
-	rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
-	rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
-	rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
-	rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
+	rte_mov32(dst + 0 * 32, src + 0 * 32);
+	rte_mov32(dst + 1 * 32, src + 1 * 32);
+	rte_mov32(dst + 2 * 32, src + 2 * 32);
+	rte_mov32(dst + 3 * 32, src + 3 * 32);
 }
 
 /**
@@ -130,14 +130,14 @@  rte_mov128(uint8_t *dst, const uint8_t *src)
 static inline void
 rte_mov256(uint8_t *dst, const uint8_t *src)
 {
-	rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
-	rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
-	rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
-	rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
-	rte_mov32((uint8_t *)dst + 4 * 32, (const uint8_t *)src + 4 * 32);
-	rte_mov32((uint8_t *)dst + 5 * 32, (const uint8_t *)src + 5 * 32);
-	rte_mov32((uint8_t *)dst + 6 * 32, (const uint8_t *)src + 6 * 32);
-	rte_mov32((uint8_t *)dst + 7 * 32, (const uint8_t *)src + 7 * 32);
+	rte_mov32(dst + 0 * 32, src + 0 * 32);
+	rte_mov32(dst + 1 * 32, src + 1 * 32);
+	rte_mov32(dst + 2 * 32, src + 2 * 32);
+	rte_mov32(dst + 3 * 32, src + 3 * 32);
+	rte_mov32(dst + 4 * 32, src + 4 * 32);
+	rte_mov32(dst + 5 * 32, src + 5 * 32);
+	rte_mov32(dst + 6 * 32, src + 6 * 32);
+	rte_mov32(dst + 7 * 32, src + 7 * 32);
 }
 
 /**
@@ -150,13 +150,16 @@  rte_mov64blocks(uint8_t *dst, const uint8_t *src, size_t n)
 	__m256i ymm0, ymm1;
 
 	while (n >= 64) {
-		ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
+
+		ymm0 = _mm256_loadu_si256((const __m256i *)(src + 0 * 32));
+		ymm1 = _mm256_loadu_si256((const __m256i *)(src + 1 * 32));
+
+		_mm256_storeu_si256((__m256i *)(dst + 0 * 32), ymm0);
+		_mm256_storeu_si256((__m256i *)(dst + 1 * 32), ymm1);
+
 		n -= 64;
-		ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
-		src = (const uint8_t *)src + 64;
-		_mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
-		_mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
-		dst = (uint8_t *)dst + 64;
+		src = src + 64;
+		dst = dst + 64;
 	}
 }
 
@@ -170,34 +173,39 @@  rte_mov256blocks(uint8_t *dst, const uint8_t *src, size_t n)
 	__m256i ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7;
 
 	while (n >= 256) {
-		ymm0 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 0 * 32));
+
+		ymm0 = _mm256_loadu_si256((const __m256i *)(src + 0 * 32));
+		ymm1 = _mm256_loadu_si256((const __m256i *)(src + 1 * 32));
+		ymm2 = _mm256_loadu_si256((const __m256i *)(src + 2 * 32));
+		ymm3 = _mm256_loadu_si256((const __m256i *)(src + 3 * 32));
+		ymm4 = _mm256_loadu_si256((const __m256i *)(src + 4 * 32));
+		ymm5 = _mm256_loadu_si256((const __m256i *)(src + 5 * 32));
+		ymm6 = _mm256_loadu_si256((const __m256i *)(src + 6 * 32));
+		ymm7 = _mm256_loadu_si256((const __m256i *)(src + 7 * 32));
+
+		_mm256_storeu_si256((__m256i *)(dst + 0 * 32), ymm0);
+		_mm256_storeu_si256((__m256i *)(dst + 1 * 32), ymm1);
+		_mm256_storeu_si256((__m256i *)(dst + 2 * 32), ymm2);
+		_mm256_storeu_si256((__m256i *)(dst + 3 * 32), ymm3);
+		_mm256_storeu_si256((__m256i *)(dst + 4 * 32), ymm4);
+		_mm256_storeu_si256((__m256i *)(dst + 5 * 32), ymm5);
+		_mm256_storeu_si256((__m256i *)(dst + 6 * 32), ymm6);
+		_mm256_storeu_si256((__m256i *)(dst + 7 * 32), ymm7);
+
 		n -= 256;
-		ymm1 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 1 * 32));
-		ymm2 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 2 * 32));
-		ymm3 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 3 * 32));
-		ymm4 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 4 * 32));
-		ymm5 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 5 * 32));
-		ymm6 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 6 * 32));
-		ymm7 = _mm256_loadu_si256((const __m256i *)((const uint8_t *)src + 7 * 32));
-		src = (const uint8_t *)src + 256;
-		_mm256_storeu_si256((__m256i *)((uint8_t *)dst + 0 * 32), ymm0);
-		_mm256_storeu_si256((__m256i *)((uint8_t *)dst + 1 * 32), ymm1);
-		_mm256_storeu_si256((__m256i *)((uint8_t *)dst + 2 * 32), ymm2);
-		_mm256_storeu_si256((__m256i *)((uint8_t *)dst + 3 * 32), ymm3);
-		_mm256_storeu_si256((__m256i *)((uint8_t *)dst + 4 * 32), ymm4);
-		_mm256_storeu_si256((__m256i *)((uint8_t *)dst + 5 * 32), ymm5);
-		_mm256_storeu_si256((__m256i *)((uint8_t *)dst + 6 * 32), ymm6);
-		_mm256_storeu_si256((__m256i *)((uint8_t *)dst + 7 * 32), ymm7);
-		dst = (uint8_t *)dst + 256;
+		src = src + 256;
+		dst = dst + 256;
 	}
 }
 
 static inline void *
-rte_memcpy(void *dst, const void *src, size_t n)
+rte_memcpy(void *_dst, const void *_src, size_t n)
 {
-	uintptr_t dstu = (uintptr_t)dst;
-	uintptr_t srcu = (uintptr_t)src;
-	void *ret = dst;
+	const uint8_t *src = (const uint8_t *)_src;
+	uint8_t *dst = (uint8_t *)_dst;
+	uintptr_t dstu = (uintptr_t)_dst;
+	uintptr_t srcu = (uintptr_t)_src;
+	void *ret = _dst;
 	size_t dstofss;
 	size_t bits;
 
@@ -230,43 +238,43 @@  rte_memcpy(void *dst, const void *src, size_t n)
 	 * Fast way when copy size doesn't exceed 512 bytes
 	 */
 	if (n <= 32) {
-		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-		rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+		rte_mov16(dst, src);
+		rte_mov16(dst - 16 + n, src - 16 + n);
 		return ret;
 	}
 	if (n <= 64) {
-		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-		rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
+		rte_mov32(dst, src);
+		rte_mov32(dst - 32 + n, src - 32 + n);
 		return ret;
 	}
 	if (n <= 512) {
 		if (n >= 256) {
 			n -= 256;
-			rte_mov256((uint8_t *)dst, (const uint8_t *)src);
-			src = (const uint8_t *)src + 256;
-			dst = (uint8_t *)dst + 256;
+			rte_mov256(dst, src);
+			src = src + 256;
+			dst = dst + 256;
 		}
 		if (n >= 128) {
 			n -= 128;
-			rte_mov128((uint8_t *)dst, (const uint8_t *)src);
-			src = (const uint8_t *)src + 128;
-			dst = (uint8_t *)dst + 128;
+			rte_mov128(dst, src);
+			src = src + 128;
+			dst = dst + 128;
 		}
 		if (n >= 64) {
 			n -= 64;
-			rte_mov64((uint8_t *)dst, (const uint8_t *)src);
-			src = (const uint8_t *)src + 64;
-			dst = (uint8_t *)dst + 64;
+			rte_mov64(dst, src);
+			src = src + 64;
+			dst = dst + 64;
 		}
 COPY_BLOCK_64_BACK31:
 		if (n > 32) {
-			rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-			rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
+			rte_mov32(dst, src);
+			rte_mov32(dst - 32 + n, src - 32 + n);
 			return ret;
 		}
-		if (n > 0) {
-			rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
-		}
+		if (n > 0)
+			rte_mov32(dst - 32 + n, src - 32 + n);
+
 		return ret;
 	}
 
@@ -275,21 +283,21 @@  COPY_BLOCK_64_BACK31:
 	 */
 	dstofss = 32 - ((uintptr_t)dst & 0x1F);
 	n -= dstofss;
-	rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-	src = (const uint8_t *)src + dstofss;
-	dst = (uint8_t *)dst + dstofss;
+	rte_mov32(dst, src);
+	src = src + dstofss;
+	dst = dst + dstofss;
 
 	/**
 	 * Copy 256-byte blocks.
 	 * Use copy block function for better instruction order control,
 	 * which is important when load is unaligned.
 	 */
-	rte_mov256blocks((uint8_t *)dst, (const uint8_t *)src, n);
+	rte_mov256blocks(dst, src, n);
 	bits = n;
 	n = n & 255;
 	bits -= n;
-	src = (const uint8_t *)src + bits;
-	dst = (uint8_t *)dst + bits;
+	src = src + bits;
+	dst = dst + bits;
 
 	/**
 	 * Copy 64-byte blocks.
@@ -297,12 +305,12 @@  COPY_BLOCK_64_BACK31:
 	 * which is important when load is unaligned.
 	 */
 	if (n >= 64) {
-		rte_mov64blocks((uint8_t *)dst, (const uint8_t *)src, n);
+		rte_mov64blocks(dst, src, n);
 		bits = n;
 		n = n & 63;
 		bits -= n;
-		src = (const uint8_t *)src + bits;
-		dst = (uint8_t *)dst + bits;
+		src = src + bits;
+		dst = dst + bits;
 	}
 
 	/**
@@ -337,8 +345,8 @@  rte_mov16(uint8_t *dst, const uint8_t *src)
 static inline void
 rte_mov32(uint8_t *dst, const uint8_t *src)
 {
-	rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
-	rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
+	rte_mov16(dst + 0 * 16, src + 0 * 16);
+	rte_mov16(dst + 1 * 16, src + 1 * 16);
 }
 
 /**
@@ -348,10 +356,10 @@  rte_mov32(uint8_t *dst, const uint8_t *src)
 static inline void
 rte_mov64(uint8_t *dst, const uint8_t *src)
 {
-	rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
-	rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
-	rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
-	rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
+	rte_mov16(dst + 0 * 16, src + 0 * 16);
+	rte_mov16(dst + 1 * 16, src + 1 * 16);
+	rte_mov16(dst + 2 * 16, src + 2 * 16);
+	rte_mov16(dst + 3 * 16, src + 3 * 16);
 }
 
 /**
@@ -361,14 +369,14 @@  rte_mov64(uint8_t *dst, const uint8_t *src)
 static inline void
 rte_mov128(uint8_t *dst, const uint8_t *src)
 {
-	rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
-	rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
-	rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
-	rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
-	rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
-	rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
-	rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
-	rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
+	rte_mov16(dst + 0 * 16, src + 0 * 16);
+	rte_mov16(dst + 1 * 16, src + 1 * 16);
+	rte_mov16(dst + 2 * 16, src + 2 * 16);
+	rte_mov16(dst + 3 * 16, src + 3 * 16);
+	rte_mov16(dst + 4 * 16, src + 4 * 16);
+	rte_mov16(dst + 5 * 16, src + 5 * 16);
+	rte_mov16(dst + 6 * 16, src + 6 * 16);
+	rte_mov16(dst + 7 * 16, src + 7 * 16);
 }
 
 /**
@@ -378,22 +386,22 @@  rte_mov128(uint8_t *dst, const uint8_t *src)
 static inline void
 rte_mov256(uint8_t *dst, const uint8_t *src)
 {
-	rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
-	rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
-	rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
-	rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
-	rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
-	rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
-	rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
-	rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
-	rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
-	rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
-	rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
-	rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
-	rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
-	rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
-	rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
-	rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
+	rte_mov16(dst + 0 * 16, src + 0 * 16);
+	rte_mov16(dst + 1 * 16, src + 1 * 16);
+	rte_mov16(dst + 2 * 16, src + 2 * 16);
+	rte_mov16(dst + 3 * 16, src + 3 * 16);
+	rte_mov16(dst + 4 * 16, src + 4 * 16);
+	rte_mov16(dst + 5 * 16, src + 5 * 16);
+	rte_mov16(dst + 6 * 16, src + 6 * 16);
+	rte_mov16(dst + 7 * 16, src + 7 * 16);
+	rte_mov16(dst + 8 * 16, src + 8 * 16);
+	rte_mov16(dst + 9 * 16, src + 9 * 16);
+	rte_mov16(dst + 10 * 16, src + 10 * 16);
+	rte_mov16(dst + 11 * 16, src + 11 * 16);
+	rte_mov16(dst + 12 * 16, src + 12 * 16);
+	rte_mov16(dst + 13 * 16, src + 13 * 16);
+	rte_mov16(dst + 14 * 16, src + 14 * 16);
+	rte_mov16(dst + 15 * 16, src + 15 * 16);
 }
 
 /**
@@ -411,48 +419,48 @@  rte_mov256(uint8_t *dst, const uint8_t *src)
 ({                                                                                                          \
     int tmp;                                                                                                \
     while (len >= 128 + 16 - offset) {                                                                      \
-        xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16));                  \
-        len -= 128;                                                                                         \
-        xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16));                  \
-        xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16));                  \
-        xmm3 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 3 * 16));                  \
-        xmm4 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 4 * 16));                  \
-        xmm5 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 5 * 16));                  \
-        xmm6 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 6 * 16));                  \
-        xmm7 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 7 * 16));                  \
-        xmm8 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 8 * 16));                  \
+        xmm0 = _mm_loadu_si128((const __m128i *)(src - offset + 0 * 16));                  \
+        len -= 128;                                                                        \
+        xmm1 = _mm_loadu_si128((const __m128i *)(src - offset + 1 * 16));                  \
+        xmm2 = _mm_loadu_si128((const __m128i *)(src - offset + 2 * 16));                  \
+        xmm3 = _mm_loadu_si128((const __m128i *)(src - offset + 3 * 16));                  \
+        xmm4 = _mm_loadu_si128((const __m128i *)(src - offset + 4 * 16));                  \
+        xmm5 = _mm_loadu_si128((const __m128i *)(src - offset + 5 * 16));                  \
+        xmm6 = _mm_loadu_si128((const __m128i *)(src - offset + 6 * 16));                  \
+        xmm7 = _mm_loadu_si128((const __m128i *)(src - offset + 7 * 16));                  \
+        xmm8 = _mm_loadu_si128((const __m128i *)(src - offset + 8 * 16));                  \
         src = (const uint8_t *)src + 128;                                                                   \
-        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset));        \
-        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset));        \
-        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset));        \
-        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset));        \
-        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset));        \
-        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset));        \
-        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset));        \
-        _mm_storeu_si128((__m128i *)((uint8_t *)dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset));        \
+        _mm_storeu_si128((__m128i *)(dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset));        \
+        _mm_storeu_si128((__m128i *)(dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset));        \
+        _mm_storeu_si128((__m128i *)(dst + 2 * 16), _mm_alignr_epi8(xmm3, xmm2, offset));        \
+        _mm_storeu_si128((__m128i *)(dst + 3 * 16), _mm_alignr_epi8(xmm4, xmm3, offset));        \
+        _mm_storeu_si128((__m128i *)(dst + 4 * 16), _mm_alignr_epi8(xmm5, xmm4, offset));        \
+        _mm_storeu_si128((__m128i *)(dst + 5 * 16), _mm_alignr_epi8(xmm6, xmm5, offset));        \
+        _mm_storeu_si128((__m128i *)(dst + 6 * 16), _mm_alignr_epi8(xmm7, xmm6, offset));        \
+        _mm_storeu_si128((__m128i *)(dst + 7 * 16), _mm_alignr_epi8(xmm8, xmm7, offset));        \
         dst = (uint8_t *)dst + 128;                                                                         \
     }                                                                                                       \
     tmp = len;                                                                                              \
     len = ((len - 16 + offset) & 127) + 16 - offset;                                                        \
     tmp -= len;                                                                                             \
-    src = (const uint8_t *)src + tmp;                                                                       \
-    dst = (uint8_t *)dst + tmp;                                                                             \
+    src = src + tmp;                                                                                        \
+    dst = dst + tmp;                                                                                        \
     if (len >= 32 + 16 - offset) {                                                                          \
         while (len >= 32 + 16 - offset) {                                                                   \
-            xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16));              \
             len -= 32;                                                                                      \
-            xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 1 * 16));              \
-            xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 2 * 16));              \
-            src = (const uint8_t *)src + 32;                                                                \
-            _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset));    \
-            _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset));    \
-            dst = (uint8_t *)dst + 32;                                                                      \
+            xmm0 = _mm_loadu_si128((const __m128i *)(src - offset + 0 * 16));                               \
+            xmm1 = _mm_loadu_si128((const __m128i *)(src - offset + 1 * 16));                               \
+            xmm2 = _mm_loadu_si128((const __m128i *)(src - offset + 2 * 16));                               \
+            src = src + 32;                                                                                 \
+            _mm_storeu_si128((__m128i *)(dst + 0 * 16), _mm_alignr_epi8(xmm1, xmm0, offset));               \
+            _mm_storeu_si128((__m128i *)(dst + 1 * 16), _mm_alignr_epi8(xmm2, xmm1, offset));               \
+            dst = dst + 32;                                                                                 \
         }                                                                                                   \
         tmp = len;                                                                                          \
         len = ((len - 16 + offset) & 31) + 16 - offset;                                                     \
         tmp -= len;                                                                                         \
-        src = (const uint8_t *)src + tmp;                                                                   \
-        dst = (uint8_t *)dst + tmp;                                                                         \
+        src = src + tmp;                                                                                    \
+        dst = dst + tmp;                                                                                    \
     }                                                                                                       \
 })
 
@@ -491,12 +499,14 @@  rte_mov256(uint8_t *dst, const uint8_t *src)
 })
 
 static inline void *
-rte_memcpy(void *dst, const void *src, size_t n)
+rte_memcpy(void *_dst, const void *_src, size_t n)
 {
 	__m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
-	uintptr_t dstu = (uintptr_t)dst;
-	uintptr_t srcu = (uintptr_t)src;
-	void *ret = dst;
+	const uint8_t *src  = (const uint8_t *)_src;
+	uint8_t *dst = (uint8_t *)_dst;
+	uintptr_t dstu = (uintptr_t)_dst;
+	uintptr_t srcu = (uintptr_t)_src;
+	void *ret = _dst;
 	size_t dstofss;
 	size_t srcofs;
 
@@ -529,61 +539,61 @@  rte_memcpy(void *dst, const void *src, size_t n)
 	 * Fast way when copy size doesn't exceed 512 bytes
 	 */
 	if (n <= 32) {
-		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-		rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+		rte_mov16(dst, src);
+		rte_mov16(dst - 16 + n, src - 16 + n);
 		return ret;
 	}
 	if (n <= 48) {
-		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-		rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+		rte_mov32(dst, src);
+		rte_mov16(dst - 16 + n, src - 16 + n);
 		return ret;
 	}
 	if (n <= 64) {
-		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-		rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
-		rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+		rte_mov32(dst, src);
+		rte_mov16(dst + 32, src + 32);
+		rte_mov16(dst - 16 + n, src - 16 + n);
 		return ret;
 	}
-	if (n <= 128) {
+	if (n <= 128)
 		goto COPY_BLOCK_128_BACK15;
-	}
+
 	if (n <= 512) {
 		if (n >= 256) {
 			n -= 256;
-			rte_mov128((uint8_t *)dst, (const uint8_t *)src);
-			rte_mov128((uint8_t *)dst + 128, (const uint8_t *)src + 128);
-			src = (const uint8_t *)src + 256;
-			dst = (uint8_t *)dst + 256;
+			rte_mov128(dst, src);
+			rte_mov128(dst + 128, src + 128);
+			src = src + 256;
+			dst = dst + 256;
 		}
 COPY_BLOCK_255_BACK15:
 		if (n >= 128) {
 			n -= 128;
-			rte_mov128((uint8_t *)dst, (const uint8_t *)src);
-			src = (const uint8_t *)src + 128;
-			dst = (uint8_t *)dst + 128;
+			rte_mov128(dst, src);
+			src = src + 128;
+			dst = dst + 128;
 		}
 COPY_BLOCK_128_BACK15:
 		if (n >= 64) {
 			n -= 64;
-			rte_mov64((uint8_t *)dst, (const uint8_t *)src);
-			src = (const uint8_t *)src + 64;
-			dst = (uint8_t *)dst + 64;
+			rte_mov64(dst, src);
+			src = src + 64;
+			dst = dst + 64;
 		}
 COPY_BLOCK_64_BACK15:
 		if (n >= 32) {
 			n -= 32;
-			rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-			src = (const uint8_t *)src + 32;
-			dst = (uint8_t *)dst + 32;
+			rte_mov32(dst, src);
+			src = src + 32;
+			dst = dst + 32;
 		}
 		if (n > 16) {
-			rte_mov16((uint8_t *)dst, (const uint8_t *)src);
-			rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+			rte_mov16(dst, src);
+			rte_mov16(dst - 16 + n, src - 16 + n);
 			return ret;
 		}
-		if (n > 0) {
-			rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
-		}
+		if (n > 0)
+			rte_mov16(dst - 16 + n, src - 16 + n);
+
 		return ret;
 	}
 
@@ -595,9 +605,9 @@  COPY_BLOCK_64_BACK15:
 	 */
 	dstofss = 16 - ((uintptr_t)dst & 0x0F) + 16;
 	n -= dstofss;
-	rte_mov32((uint8_t *)dst, (const uint8_t *)src);
-	src = (const uint8_t *)src + dstofss;
-	dst = (uint8_t *)dst + dstofss;
+	rte_mov32(dst, src);
+	src = src + dstofss;
+	dst = dst + dstofss;
 	srcofs = ((uintptr_t)src & 0x0F);
 
 	/**
@@ -608,9 +618,9 @@  COPY_BLOCK_64_BACK15:
 		 * Copy 256-byte blocks
 		 */
 		for (; n >= 256; n -= 256) {
-			rte_mov256((uint8_t *)dst, (const uint8_t *)src);
-			dst = (uint8_t *)dst + 256;
-			src = (const uint8_t *)src + 256;
+			rte_mov256(dst, src);
+			dst = dst + 256;
+			src = src + 256;
 		}
 
 		/**