unofficial mirror of libc-alpha@sourceware.org
 help / color / mirror / Atom feed
From: Naohiro Tamura <naohirot@fujitsu.com>
To: libc-alpha@sourceware.org
Cc: Naohiro Tamura <naohirot@jp.fujitsu.com>
Subject: [PATCH v2 3/6] aarch64: Added optimized memcpy and memmove for A64FX
Date: Wed, 12 May 2021 09:28:09 +0000	[thread overview]
Message-ID: <20210512092809.901182-1-naohirot@fujitsu.com> (raw)
In-Reply-To: <20210512092308.900998-1-naohirot@fujitsu.com>

From: Naohiro Tamura <naohirot@jp.fujitsu.com>

This patch optimizes the performance of memcpy/memmove for A64FX [1]
which implements ARMv8-A SVE and has L1 64KB cache per core and L2 8MB
cache per NUMA node.

The performance optimization makes use of Scalable Vector Register
with several techniques such as loop unrolling, memory access
alignment, cache zero fill, and software pipelining.

SVE assembler code for memcpy/memmove is implemented as Vector Length
Agnostic code so theoretically it can be run on any SOC which supports
ARMv8-A SVE standard.

We confirmed that all testcases have been passed by running 'make
check' and 'make xcheck' not only on A64FX but also on ThunderX2.

And also we confirmed that the SVE 512 bit vector register performance
is roughly 4 times better than Advanced SIMD 128 bit register and 8
times better than scalar 64 bit register by running 'make bench'.

[1] https://github.com/fujitsu/A64FX
---
 manual/tunables.texi                          |   3 +-
 sysdeps/aarch64/multiarch/Makefile            |   2 +-
 sysdeps/aarch64/multiarch/ifunc-impl-list.c   |   8 +-
 sysdeps/aarch64/multiarch/init-arch.h         |   4 +-
 sysdeps/aarch64/multiarch/memcpy.c            |  12 +-
 sysdeps/aarch64/multiarch/memcpy_a64fx.S      | 405 ++++++++++++++++++
 sysdeps/aarch64/multiarch/memmove.c           |  12 +-
 .../unix/sysv/linux/aarch64/cpu-features.c    |   4 +
 .../unix/sysv/linux/aarch64/cpu-features.h    |   4 +
 9 files changed, 446 insertions(+), 8 deletions(-)
 create mode 100644 sysdeps/aarch64/multiarch/memcpy_a64fx.S

diff --git a/manual/tunables.texi b/manual/tunables.texi
index 6de647b426..fe7c1313cc 100644
--- a/manual/tunables.texi
+++ b/manual/tunables.texi
@@ -454,7 +454,8 @@ This tunable is specific to powerpc, powerpc64 and powerpc64le.
 The @code{glibc.cpu.name=xxx} tunable allows the user to tell @theglibc{} to
 assume that the CPU is @code{xxx} where xxx may have one of these values:
 @code{generic}, @code{falkor}, @code{thunderxt88}, @code{thunderx2t99},
-@code{thunderx2t99p1}, @code{ares}, @code{emag}, @code{kunpeng}.
+@code{thunderx2t99p1}, @code{ares}, @code{emag}, @code{kunpeng},
+@code{a64fx}.
 
 This tunable is specific to aarch64.
 @end deftp
diff --git a/sysdeps/aarch64/multiarch/Makefile b/sysdeps/aarch64/multiarch/Makefile
index dc3efffb36..04c3f17121 100644
--- a/sysdeps/aarch64/multiarch/Makefile
+++ b/sysdeps/aarch64/multiarch/Makefile
@@ -1,6 +1,6 @@
 ifeq ($(subdir),string)
 sysdep_routines += memcpy_generic memcpy_advsimd memcpy_thunderx memcpy_thunderx2 \
-		   memcpy_falkor \
+		   memcpy_falkor memcpy_a64fx \
 		   memset_generic memset_falkor memset_emag memset_kunpeng \
 		   memchr_generic memchr_nosimd \
 		   strlen_mte strlen_asimd
diff --git a/sysdeps/aarch64/multiarch/ifunc-impl-list.c b/sysdeps/aarch64/multiarch/ifunc-impl-list.c
index 99a8c68aac..911393565c 100644
--- a/sysdeps/aarch64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/aarch64/multiarch/ifunc-impl-list.c
@@ -25,7 +25,7 @@
 #include <stdio.h>
 
 /* Maximum number of IFUNC implementations.  */
-#define MAX_IFUNC	4
+#define MAX_IFUNC	7
 
 size_t
 __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
@@ -43,12 +43,18 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 	      IFUNC_IMPL_ADD (array, i, memcpy, !bti, __memcpy_thunderx2)
 	      IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_falkor)
 	      IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_simd)
+#if HAVE_AARCH64_SVE_ASM
+	      IFUNC_IMPL_ADD (array, i, memcpy, sve, __memcpy_a64fx)
+#endif
 	      IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_generic))
   IFUNC_IMPL (i, name, memmove,
 	      IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_thunderx)
 	      IFUNC_IMPL_ADD (array, i, memmove, !bti, __memmove_thunderx2)
 	      IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_falkor)
 	      IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_simd)
+#if HAVE_AARCH64_SVE_ASM
+	      IFUNC_IMPL_ADD (array, i, memmove, sve, __memmove_a64fx)
+#endif
 	      IFUNC_IMPL_ADD (array, i, memmove, 1, __memmove_generic))
   IFUNC_IMPL (i, name, memset,
 	      /* Enable this on non-falkor processors too so that other cores
diff --git a/sysdeps/aarch64/multiarch/init-arch.h b/sysdeps/aarch64/multiarch/init-arch.h
index a167699e74..6d92c1bcff 100644
--- a/sysdeps/aarch64/multiarch/init-arch.h
+++ b/sysdeps/aarch64/multiarch/init-arch.h
@@ -33,4 +33,6 @@
   bool __attribute__((unused)) bti =					      \
     HAVE_AARCH64_BTI && GLRO(dl_aarch64_cpu_features).bti;		      \
   bool __attribute__((unused)) mte =					      \
-    MTE_ENABLED ();
+    MTE_ENABLED ();							      \
+  bool __attribute__((unused)) sve =					      \
+    GLRO(dl_aarch64_cpu_features).sve;
diff --git a/sysdeps/aarch64/multiarch/memcpy.c b/sysdeps/aarch64/multiarch/memcpy.c
index 0e0a5cbcfb..d90ee51ffc 100644
--- a/sysdeps/aarch64/multiarch/memcpy.c
+++ b/sysdeps/aarch64/multiarch/memcpy.c
@@ -33,6 +33,9 @@ extern __typeof (__redirect_memcpy) __memcpy_simd attribute_hidden;
 extern __typeof (__redirect_memcpy) __memcpy_thunderx attribute_hidden;
 extern __typeof (__redirect_memcpy) __memcpy_thunderx2 attribute_hidden;
 extern __typeof (__redirect_memcpy) __memcpy_falkor attribute_hidden;
+#if HAVE_AARCH64_SVE_ASM
+extern __typeof (__redirect_memcpy) __memcpy_a64fx attribute_hidden;
+#endif
 
 libc_ifunc (__libc_memcpy,
             (IS_THUNDERX (midr)
@@ -44,8 +47,13 @@ libc_ifunc (__libc_memcpy,
 		  : (IS_NEOVERSE_N1 (midr) || IS_NEOVERSE_N2 (midr)
 		     || IS_NEOVERSE_V1 (midr)
 		     ? __memcpy_simd
-		     : __memcpy_generic)))));
-
+#if HAVE_AARCH64_SVE_ASM
+                     : (IS_A64FX (midr)
+                        ? __memcpy_a64fx
+                        : __memcpy_generic))))));
+#else
+                     : __memcpy_generic)))));
+#endif
 # undef memcpy
 strong_alias (__libc_memcpy, memcpy);
 #endif
diff --git a/sysdeps/aarch64/multiarch/memcpy_a64fx.S b/sysdeps/aarch64/multiarch/memcpy_a64fx.S
new file mode 100644
index 0000000000..e28afd708f
--- /dev/null
+++ b/sysdeps/aarch64/multiarch/memcpy_a64fx.S
@@ -0,0 +1,405 @@
+/* Optimized memcpy for Fujitsu A64FX processor.
+   Copyright (C) 2012-2021 Free Software Foundation, Inc.
+
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library.  If not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include <sysdep.h>
+
+#if HAVE_AARCH64_SVE_ASM
+#if IS_IN (libc)
+# define MEMCPY __memcpy_a64fx
+# define MEMMOVE __memmove_a64fx
+
+/* Assumptions:
+ *
+ * ARMv8.2-a, AArch64, unaligned accesses, sve
+ *
+ */
+
+#define L2_SIZE         (8*1024*1024)/2 // L2 8MB/2
+#define CACHE_LINE_SIZE 256
+#define ZF_DIST         (CACHE_LINE_SIZE * 21)  // Zerofill distance
+#define dest            x0
+#define src             x1
+#define n               x2      // size
+#define tmp1            x3
+#define tmp2            x4
+#define tmp3            x5
+#define rest            x6
+#define dest_ptr        x7
+#define src_ptr         x8
+#define vector_length   x9
+#define cl_remainder    x10     // CACHE_LINE_SIZE remainder
+
+    .arch armv8.2-a+sve
+
+    .macro dc_zva times
+    dc          zva, tmp1
+    add         tmp1, tmp1, CACHE_LINE_SIZE
+    .if \times-1
+    dc_zva "(\times-1)"
+    .endif
+    .endm
+
+    .macro ld1b_unroll8
+    ld1b        z0.b, p0/z, [src_ptr, #0, mul vl]
+    ld1b        z1.b, p0/z, [src_ptr, #1, mul vl]
+    ld1b        z2.b, p0/z, [src_ptr, #2, mul vl]
+    ld1b        z3.b, p0/z, [src_ptr, #3, mul vl]
+    ld1b        z4.b, p0/z, [src_ptr, #4, mul vl]
+    ld1b        z5.b, p0/z, [src_ptr, #5, mul vl]
+    ld1b        z6.b, p0/z, [src_ptr, #6, mul vl]
+    ld1b        z7.b, p0/z, [src_ptr, #7, mul vl]
+    .endm
+
+    .macro stld1b_unroll4a
+    st1b        z0.b, p0,   [dest_ptr, #0, mul vl]
+    st1b        z1.b, p0,   [dest_ptr, #1, mul vl]
+    ld1b        z0.b, p0/z, [src_ptr,  #0, mul vl]
+    ld1b        z1.b, p0/z, [src_ptr,  #1, mul vl]
+    st1b        z2.b, p0,   [dest_ptr, #2, mul vl]
+    st1b        z3.b, p0,   [dest_ptr, #3, mul vl]
+    ld1b        z2.b, p0/z, [src_ptr,  #2, mul vl]
+    ld1b        z3.b, p0/z, [src_ptr,  #3, mul vl]
+    .endm
+
+    .macro stld1b_unroll4b
+    st1b        z4.b, p0,   [dest_ptr, #4, mul vl]
+    st1b        z5.b, p0,   [dest_ptr, #5, mul vl]
+    ld1b        z4.b, p0/z, [src_ptr,  #4, mul vl]
+    ld1b        z5.b, p0/z, [src_ptr,  #5, mul vl]
+    st1b        z6.b, p0,   [dest_ptr, #6, mul vl]
+    st1b        z7.b, p0,   [dest_ptr, #7, mul vl]
+    ld1b        z6.b, p0/z, [src_ptr,  #6, mul vl]
+    ld1b        z7.b, p0/z, [src_ptr,  #7, mul vl]
+    .endm
+
+    .macro stld1b_unroll8
+    stld1b_unroll4a
+    stld1b_unroll4b
+    .endm
+
+    .macro st1b_unroll8
+    st1b        z0.b, p0,   [dest_ptr, #0, mul vl]
+    st1b        z1.b, p0,   [dest_ptr, #1, mul vl]
+    st1b        z2.b, p0,   [dest_ptr, #2, mul vl]
+    st1b        z3.b, p0,   [dest_ptr, #3, mul vl]
+    st1b        z4.b, p0,   [dest_ptr, #4, mul vl]
+    st1b        z5.b, p0,   [dest_ptr, #5, mul vl]
+    st1b        z6.b, p0,   [dest_ptr, #6, mul vl]
+    st1b        z7.b, p0,   [dest_ptr, #7, mul vl]
+    .endm
+
+    .macro shortcut_for_small_size exit
+    // if rest <= vector_length * 2
+    whilelo     p0.b, xzr, n
+    whilelo     p1.b, vector_length, n
+    b.last      1f
+    ld1b        z0.b, p0/z, [src, #0, mul vl]
+    ld1b        z1.b, p1/z, [src, #1, mul vl]
+    st1b        z0.b, p0, [dest, #0, mul vl]
+    st1b        z1.b, p1, [dest, #1, mul vl]
+    ret
+1:  // if rest > vector_length * 8
+    cmp         n, vector_length, lsl 3 // vector_length * 8
+    b.hi        \exit
+    // if rest <= vector_length * 4
+    lsl         tmp1, vector_length, 1  // vector_length * 2
+    whilelo     p2.b, tmp1, n
+    incb        tmp1
+    whilelo     p3.b, tmp1, n
+    b.last      1f
+    ld1b        z0.b, p0/z, [src, #0, mul vl]
+    ld1b        z1.b, p1/z, [src, #1, mul vl]
+    ld1b        z2.b, p2/z, [src, #2, mul vl]
+    ld1b        z3.b, p3/z, [src, #3, mul vl]
+    st1b        z0.b, p0, [dest, #0, mul vl]
+    st1b        z1.b, p1, [dest, #1, mul vl]
+    st1b        z2.b, p2, [dest, #2, mul vl]
+    st1b        z3.b, p3, [dest, #3, mul vl]
+    ret
+1:  // if rest <= vector_length * 8
+    lsl         tmp1, vector_length, 2  // vector_length * 4
+    whilelo     p4.b, tmp1, n
+    incb        tmp1
+    whilelo     p5.b, tmp1, n
+    b.last      1f
+    ld1b        z0.b, p0/z, [src, #0, mul vl]
+    ld1b        z1.b, p1/z, [src, #1, mul vl]
+    ld1b        z2.b, p2/z, [src, #2, mul vl]
+    ld1b        z3.b, p3/z, [src, #3, mul vl]
+    ld1b        z4.b, p4/z, [src, #4, mul vl]
+    ld1b        z5.b, p5/z, [src, #5, mul vl]
+    st1b        z0.b, p0, [dest, #0, mul vl]
+    st1b        z1.b, p1, [dest, #1, mul vl]
+    st1b        z2.b, p2, [dest, #2, mul vl]
+    st1b        z3.b, p3, [dest, #3, mul vl]
+    st1b        z4.b, p4, [dest, #4, mul vl]
+    st1b        z5.b, p5, [dest, #5, mul vl]
+    ret
+1:  lsl         tmp1, vector_length, 2  // vector_length * 4
+    incb        tmp1                    // vector_length * 5
+    incb        tmp1                    // vector_length * 6
+    whilelo     p6.b, tmp1, n
+    incb        tmp1
+    whilelo     p7.b, tmp1, n
+    ld1b        z0.b, p0/z, [src, #0, mul vl]
+    ld1b        z1.b, p1/z, [src, #1, mul vl]
+    ld1b        z2.b, p2/z, [src, #2, mul vl]
+    ld1b        z3.b, p3/z, [src, #3, mul vl]
+    ld1b        z4.b, p4/z, [src, #4, mul vl]
+    ld1b        z5.b, p5/z, [src, #5, mul vl]
+    ld1b        z6.b, p6/z, [src, #6, mul vl]
+    ld1b        z7.b, p7/z, [src, #7, mul vl]
+    st1b        z0.b, p0, [dest, #0, mul vl]
+    st1b        z1.b, p1, [dest, #1, mul vl]
+    st1b        z2.b, p2, [dest, #2, mul vl]
+    st1b        z3.b, p3, [dest, #3, mul vl]
+    st1b        z4.b, p4, [dest, #4, mul vl]
+    st1b        z5.b, p5, [dest, #5, mul vl]
+    st1b        z6.b, p6, [dest, #6, mul vl]
+    st1b        z7.b, p7, [dest, #7, mul vl]
+    ret
+    .endm
+
+ENTRY (MEMCPY)
+
+    PTR_ARG (0)
+    PTR_ARG (1)
+    SIZE_ARG (2)
+
+L(memcpy):
+    cntb        vector_length
+    // shortcut for less than vector_length * 8
+    // gives a free ptrue to p0.b for n >= vector_length
+    shortcut_for_small_size L(vl_agnostic)
+    // end of shortcut
+
+L(vl_agnostic): // VL Agnostic
+    mov         rest, n
+    mov         dest_ptr, dest
+    mov         src_ptr, src
+    // if rest >= L2_SIZE && vector_length == 64 then L(L2)
+    mov         tmp1, 64
+    cmp         rest, L2_SIZE
+    ccmp        vector_length, tmp1, 0, cs
+    b.eq        L(L2)
+
+L(unroll8): // unrolling and software pipeline
+    lsl         tmp1, vector_length, 3  // vector_length * 8
+    .p2align 3
+    cmp         rest, tmp1
+    b.cc        L(last)
+    ld1b_unroll8
+    add         src_ptr, src_ptr, tmp1
+    sub         rest, rest, tmp1
+    cmp         rest, tmp1
+    b.cc        2f
+    .p2align 3
+1:  stld1b_unroll8
+    add         dest_ptr, dest_ptr, tmp1
+    add         src_ptr, src_ptr, tmp1
+    sub         rest, rest, tmp1
+    cmp         rest, tmp1
+    b.ge        1b
+2:  st1b_unroll8
+    add         dest_ptr, dest_ptr, tmp1
+
+    .p2align 3
+L(last):
+    whilelo     p0.b, xzr, rest
+    whilelo     p1.b, vector_length, rest
+    b.last      1f
+    ld1b        z0.b, p0/z, [src_ptr, #0, mul vl]
+    ld1b        z1.b, p1/z, [src_ptr, #1, mul vl]
+    st1b        z0.b, p0, [dest_ptr, #0, mul vl]
+    st1b        z1.b, p1, [dest_ptr, #1, mul vl]
+    ret
+1:  lsl         tmp1, vector_length, 1  // vector_length * 2
+    whilelo     p2.b, tmp1, rest
+    incb        tmp1
+    whilelo     p3.b, tmp1, rest
+    b.last      1f
+    ld1b        z0.b, p0/z, [src_ptr, #0, mul vl]
+    ld1b        z1.b, p1/z, [src_ptr, #1, mul vl]
+    ld1b        z2.b, p2/z, [src_ptr, #2, mul vl]
+    ld1b        z3.b, p3/z, [src_ptr, #3, mul vl]
+    st1b        z0.b, p0, [dest_ptr, #0, mul vl]
+    st1b        z1.b, p1, [dest_ptr, #1, mul vl]
+    st1b        z2.b, p2, [dest_ptr, #2, mul vl]
+    st1b        z3.b, p3, [dest_ptr, #3, mul vl]
+    ret
+1:  lsl         tmp1, vector_length, 2  // vector_length * 4
+    whilelo     p4.b, tmp1, rest
+    incb        tmp1
+    whilelo     p5.b, tmp1, rest
+    incb        tmp1
+    whilelo     p6.b, tmp1, rest
+    incb        tmp1
+    whilelo     p7.b, tmp1, rest
+    ld1b        z0.b, p0/z, [src_ptr, #0, mul vl]
+    ld1b        z1.b, p1/z, [src_ptr, #1, mul vl]
+    ld1b        z2.b, p2/z, [src_ptr, #2, mul vl]
+    ld1b        z3.b, p3/z, [src_ptr, #3, mul vl]
+    ld1b        z4.b, p4/z, [src_ptr, #4, mul vl]
+    ld1b        z5.b, p5/z, [src_ptr, #5, mul vl]
+    ld1b        z6.b, p6/z, [src_ptr, #6, mul vl]
+    ld1b        z7.b, p7/z, [src_ptr, #7, mul vl]
+    st1b        z0.b, p0, [dest_ptr, #0, mul vl]
+    st1b        z1.b, p1, [dest_ptr, #1, mul vl]
+    st1b        z2.b, p2, [dest_ptr, #2, mul vl]
+    st1b        z3.b, p3, [dest_ptr, #3, mul vl]
+    st1b        z4.b, p4, [dest_ptr, #4, mul vl]
+    st1b        z5.b, p5, [dest_ptr, #5, mul vl]
+    st1b        z6.b, p6, [dest_ptr, #6, mul vl]
+    st1b        z7.b, p7, [dest_ptr, #7, mul vl]
+    ret
+
+L(L2):
+    // align dest address at CACHE_LINE_SIZE byte boundary
+    mov         tmp1, CACHE_LINE_SIZE
+    ands        tmp2, dest_ptr, CACHE_LINE_SIZE - 1
+    // if cl_remainder == 0
+    b.eq        L(L2_dc_zva)
+    sub         cl_remainder, tmp1, tmp2
+    // process remainder until the first CACHE_LINE_SIZE boundary
+    whilelo     p1.b, xzr, cl_remainder        // keep p0.b all true
+    whilelo     p2.b, vector_length, cl_remainder
+    b.last      1f
+    ld1b        z1.b, p1/z, [src_ptr, #0, mul vl]
+    ld1b        z2.b, p2/z, [src_ptr, #1, mul vl]
+    st1b        z1.b, p1, [dest_ptr, #0, mul vl]
+    st1b        z2.b, p2, [dest_ptr, #1, mul vl]
+    b           2f
+1:  lsl         tmp1, vector_length, 1  // vector_length * 2
+    whilelo     p3.b, tmp1, cl_remainder
+    incb        tmp1
+    whilelo     p4.b, tmp1, cl_remainder
+    ld1b        z1.b, p1/z, [src_ptr, #0, mul vl]
+    ld1b        z2.b, p2/z, [src_ptr, #1, mul vl]
+    ld1b        z3.b, p3/z, [src_ptr, #2, mul vl]
+    ld1b        z4.b, p4/z, [src_ptr, #3, mul vl]
+    st1b        z1.b, p1, [dest_ptr, #0, mul vl]
+    st1b        z2.b, p2, [dest_ptr, #1, mul vl]
+    st1b        z3.b, p3, [dest_ptr, #2, mul vl]
+    st1b        z4.b, p4, [dest_ptr, #3, mul vl]
+2:  add         dest_ptr, dest_ptr, cl_remainder
+    add         src_ptr, src_ptr, cl_remainder
+    sub         rest, rest, cl_remainder
+
+L(L2_dc_zva):
+    // zero fill
+    and         tmp1, dest, 0xffffffffffffff
+    and         tmp2, src, 0xffffffffffffff
+    subs        tmp1, tmp1, tmp2     // diff
+    b.ge        1f
+    neg         tmp1, tmp1
+1:  mov         tmp3, ZF_DIST + CACHE_LINE_SIZE * 2
+    cmp         tmp1, tmp3
+    b.lo        L(unroll8)
+    mov         tmp1, dest_ptr
+    dc_zva      (ZF_DIST / CACHE_LINE_SIZE) - 1
+    // unroll
+    ld1b_unroll8        // this line has to be after "b.lo L(unroll8)"
+    add         src_ptr, src_ptr, CACHE_LINE_SIZE * 2
+    sub         rest, rest, CACHE_LINE_SIZE * 2
+    mov         tmp1, ZF_DIST
+    .p2align 3
+1:  stld1b_unroll4a
+    add         tmp2, dest_ptr, tmp1    // dest_ptr + ZF_DIST
+    dc          zva, tmp2
+    stld1b_unroll4b
+    add         tmp2, tmp2, CACHE_LINE_SIZE
+    dc          zva, tmp2
+    add         dest_ptr, dest_ptr, CACHE_LINE_SIZE * 2
+    add         src_ptr, src_ptr, CACHE_LINE_SIZE * 2
+    sub         rest, rest, CACHE_LINE_SIZE * 2
+    cmp         rest, tmp3      // ZF_DIST + CACHE_LINE_SIZE * 2
+    b.ge        1b
+    st1b_unroll8
+    add         dest_ptr, dest_ptr, CACHE_LINE_SIZE * 2
+    b           L(unroll8)
+
+END (MEMCPY)
+libc_hidden_builtin_def (MEMCPY)
+
+
+ENTRY (MEMMOVE)
+
+    PTR_ARG (0)
+    PTR_ARG (1)
+    SIZE_ARG (2)
+
+    // remove tag address
+    // dest has to be immutable because it is the return value
+    // src has to be immutable because it is used in L(bwd_last)
+    and         tmp2, dest, 0xffffffffffffff    // save dest_notag into tmp2
+    and         tmp3, src, 0xffffffffffffff     // save src_notag intp tmp3
+    cmp         n, 0
+    ccmp        tmp2, tmp3, 4, ne
+    b.ne        1f
+    ret
+1:  cntb        vector_length
+    // shortcut for less than vector_length * 8
+    // gives a free ptrue to p0.b for n >= vector_length
+    // tmp2 and tmp3 should not be used in this macro to keep notag addresses
+    shortcut_for_small_size L(dispatch)
+    // end of shortcut
+
+L(dispatch):
+    // tmp2 = dest_notag, tmp3 = src_notag
+    // diff = dest_notag - src_notag
+    sub         tmp1, tmp2, tmp3
+    // if diff <= 0 || diff >= n then memcpy
+    cmp         tmp1, 0
+    ccmp        tmp1, n, 2, gt
+    b.cs        L(vl_agnostic)
+
+L(bwd_start):
+    mov         rest, n
+    add         dest_ptr, dest, n       // dest_end
+    add         src_ptr, src, n         // src_end
+
+L(bwd_unroll8): // unrolling and software pipeline
+    lsl         tmp1, vector_length, 3  // vector_length * 8
+    .p2align 3
+    cmp         rest, tmp1
+    b.cc        L(bwd_last)
+    sub         src_ptr, src_ptr, tmp1
+    ld1b_unroll8
+    sub         rest, rest, tmp1
+    cmp         rest, tmp1
+    b.cc        2f
+    .p2align 3
+1:  sub         src_ptr, src_ptr, tmp1
+    sub         dest_ptr, dest_ptr, tmp1
+    stld1b_unroll8
+    sub         rest, rest, tmp1
+    cmp         rest, tmp1
+    b.ge        1b
+2:  sub         dest_ptr, dest_ptr, tmp1
+    st1b_unroll8
+
+L(bwd_last):
+    mov         dest_ptr, dest
+    mov         src_ptr, src
+    b           L(last)
+
+END (MEMMOVE)
+libc_hidden_builtin_def (MEMMOVE)
+#endif /* IS_IN (libc) */
+#endif /* HAVE_AARCH64_SVE_ASM */
diff --git a/sysdeps/aarch64/multiarch/memmove.c b/sysdeps/aarch64/multiarch/memmove.c
index 12d77818a9..be2d35a251 100644
--- a/sysdeps/aarch64/multiarch/memmove.c
+++ b/sysdeps/aarch64/multiarch/memmove.c
@@ -33,6 +33,9 @@ extern __typeof (__redirect_memmove) __memmove_simd attribute_hidden;
 extern __typeof (__redirect_memmove) __memmove_thunderx attribute_hidden;
 extern __typeof (__redirect_memmove) __memmove_thunderx2 attribute_hidden;
 extern __typeof (__redirect_memmove) __memmove_falkor attribute_hidden;
+#if HAVE_AARCH64_SVE_ASM
+extern __typeof (__redirect_memmove) __memmove_a64fx attribute_hidden;
+#endif
 
 libc_ifunc (__libc_memmove,
             (IS_THUNDERX (midr)
@@ -44,8 +47,13 @@ libc_ifunc (__libc_memmove,
 		  : (IS_NEOVERSE_N1 (midr) || IS_NEOVERSE_N2 (midr)
 		     || IS_NEOVERSE_V1 (midr)
 		     ? __memmove_simd
-		     : __memmove_generic)))));
-
+#if HAVE_AARCH64_SVE_ASM
+                     : (IS_A64FX (midr)
+                        ? __memmove_a64fx
+                        : __memmove_generic))))));
+#else
+                        : __memmove_generic)))));
+#endif
 # undef memmove
 strong_alias (__libc_memmove, memmove);
 #endif
diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
index db6aa3516c..6206a2f618 100644
--- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
+++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
@@ -46,6 +46,7 @@ static struct cpu_list cpu_list[] = {
       {"ares",		 0x411FD0C0},
       {"emag",		 0x503F0001},
       {"kunpeng920", 	 0x481FD010},
+      {"a64fx",		 0x460F0010},
       {"generic", 	 0x0}
 };
 
@@ -116,4 +117,7 @@ init_cpu_features (struct cpu_features *cpu_features)
 	     (PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_ASYNC | MTE_ALLOWED_TAGS),
 	     0, 0, 0);
 #endif
+
+  /* Check if SVE is supported.  */
+  cpu_features->sve = GLRO (dl_hwcap) & HWCAP_SVE;
 }
diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.h b/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
index 3b9bfed134..2b322e5414 100644
--- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
+++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
@@ -65,6 +65,9 @@
 #define IS_KUNPENG920(midr) (MIDR_IMPLEMENTOR(midr) == 'H'			   \
                         && MIDR_PARTNUM(midr) == 0xd01)
 
+#define IS_A64FX(midr) (MIDR_IMPLEMENTOR(midr) == 'F'			      \
+			&& MIDR_PARTNUM(midr) == 0x001)
+
 struct cpu_features
 {
   uint64_t midr_el1;
@@ -72,6 +75,7 @@ struct cpu_features
   bool bti;
   /* Currently, the GLIBC memory tagging tunable only defines 8 bits.  */
   uint8_t mte_state;
+  bool sve;
 };
 
 #endif /* _CPU_FEATURES_AARCH64_H  */
-- 
2.17.1


  parent reply	other threads:[~2021-05-12  9:29 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-17  2:28 [PATCH 0/5] Added optimized memcpy/memmove/memset for A64FX Naohiro Tamura
2021-03-17  2:33 ` [PATCH 1/5] config: Added HAVE_SVE_ASM_SUPPORT for aarch64 Naohiro Tamura
2021-03-29 12:11   ` Szabolcs Nagy via Libc-alpha
2021-03-30  6:19     ` naohirot
2021-03-17  2:34 ` [PATCH 2/5] aarch64: Added optimized memcpy and memmove for A64FX Naohiro Tamura
2021-03-29 12:44   ` Szabolcs Nagy via Libc-alpha
2021-03-30  7:17     ` naohirot
2021-03-17  2:34 ` [PATCH 3/5] aarch64: Added optimized memset " Naohiro Tamura
2021-03-17  2:35 ` [PATCH 4/5] scripts: Added Vector Length Set test helper script Naohiro Tamura
2021-03-29 13:20   ` Szabolcs Nagy via Libc-alpha
2021-03-30  7:25     ` naohirot
2021-03-17  2:35 ` [PATCH 5/5] benchtests: Added generic_memcpy and generic_memmove to large benchtests Naohiro Tamura
2021-03-29 12:03 ` [PATCH 0/5] Added optimized memcpy/memmove/memset for A64FX Szabolcs Nagy via Libc-alpha
2021-05-10  1:45 ` naohirot
2021-05-14 13:35   ` Szabolcs Nagy via Libc-alpha
2021-05-19  0:11     ` naohirot
2021-05-12  9:23 ` [PATCH v2 0/6] aarch64: " Naohiro Tamura
2021-05-12  9:26   ` [PATCH v2 1/6] config: Added HAVE_AARCH64_SVE_ASM for aarch64 Naohiro Tamura
2021-05-26 10:05     ` Szabolcs Nagy via Libc-alpha
2021-05-12  9:27   ` [PATCH v2 2/6] aarch64: define BTI_C and BTI_J macros as NOP unless HAVE_AARCH64_BTI Naohiro Tamura
2021-05-26 10:06     ` Szabolcs Nagy via Libc-alpha
2021-05-12  9:28   ` Naohiro Tamura [this message]
2021-05-26 10:19     ` [PATCH v2 3/6] aarch64: Added optimized memcpy and memmove for A64FX Szabolcs Nagy via Libc-alpha
2021-05-12  9:28   ` [PATCH v2 4/6] aarch64: Added optimized memset " Naohiro Tamura
2021-05-26 10:22     ` Szabolcs Nagy via Libc-alpha
2021-05-12  9:29   ` [PATCH v2 5/6] scripts: Added Vector Length Set test helper script Naohiro Tamura
2021-05-12 16:58     ` Joseph Myers
2021-05-13  9:53       ` naohirot
2021-05-20  7:34     ` Naohiro Tamura
2021-05-26 10:24       ` Szabolcs Nagy via Libc-alpha
2021-05-12  9:29   ` [PATCH v2 6/6] benchtests: Fixed bench-memcpy-random: buf1: mprotect failed Naohiro Tamura
2021-05-26 10:25     ` Szabolcs Nagy via Libc-alpha
2021-05-27  0:22   ` [PATCH v2 0/6] aarch64: Added optimized memcpy/memmove/memset for A64FX naohirot
2021-05-27 23:50     ` naohirot
2021-05-27  7:42   ` [PATCH v3 1/2] aarch64: Added optimized memcpy and memmove " Naohiro Tamura
2021-05-27  7:44   ` [PATCH v3 2/2] aarch64: Added optimized memset " Naohiro Tamura

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

  List information: https://www.gnu.org/software/libc/involved.html

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210512092809.901182-1-naohirot@fujitsu.com \
    --to=naohirot@fujitsu.com \
    --cc=libc-alpha@sourceware.org \
    --cc=naohirot@jp.fujitsu.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).