unofficial mirror of libc-alpha@sourceware.org
 help / color / mirror / Atom feed
From: Noah Goldstein via Libc-alpha <libc-alpha@sourceware.org>
To: libc-alpha@sourceware.org
Subject: [PATCH 5/5] x86_64: Add evex optimized bcmp implementation in bcmp-evex.S
Date: Mon, 13 Sep 2021 18:05:08 -0500	[thread overview]
Message-ID: <20210913230506.546749-5-goldstein.w.n@gmail.com> (raw)
In-Reply-To: <20210913230506.546749-1-goldstein.w.n@gmail.com>

No bug. This commit adds new optimized bcmp implementation for evex.

The primary optimizations are 1) skipping the logic to find the
difference of the first mismatched byte and 2) not updating src/dst
addresses as the non-equals logic does not need to be reused by
different areas.

The entry alignment has been fixed at 64. In throughput sensitive
functions which bcmp can potentially be frontend loop performance is
important to opimized for. This is impossible/difficult to do/maintain
with only 16 byte fixed alignment.

test-memcmp, test-bcmp, and test-wmemcmp are all passing.
---
 sysdeps/x86_64/multiarch/bcmp-evex.S       | 305 ++++++++++++++++++++-
 sysdeps/x86_64/multiarch/ifunc-bcmp.h      |   3 +-
 sysdeps/x86_64/multiarch/ifunc-impl-list.c |   1 -
 3 files changed, 302 insertions(+), 7 deletions(-)

diff --git a/sysdeps/x86_64/multiarch/bcmp-evex.S b/sysdeps/x86_64/multiarch/bcmp-evex.S
index ade52e8c68..1bfe824eb4 100644
--- a/sysdeps/x86_64/multiarch/bcmp-evex.S
+++ b/sysdeps/x86_64/multiarch/bcmp-evex.S
@@ -16,8 +16,305 @@
    License along with the GNU C Library; if not, see
    <https://www.gnu.org/licenses/>.  */
 
-#ifndef MEMCMP
-# define MEMCMP	__bcmp_evex
-#endif
+#if IS_IN (libc)
+
+/* bcmp is implemented as:
+   1. Use ymm vector compares when possible. The only case where
+      vector compares is not possible for when size < VEC_SIZE
+      and loading from either s1 or s2 would cause a page cross.
+   2. Use xmm vector compare when size >= 8 bytes.
+   3. Optimistically compare up to first 4 * VEC_SIZE one at a
+      to check for early mismatches. Only do this if its guranteed the
+      work is not wasted.
+   4. If size is 8 * VEC_SIZE or less, unroll the loop.
+   5. Compare 4 * VEC_SIZE at a time with the aligned first memory
+      area.
+   6. Use 2 vector compares when size is 2 * VEC_SIZE or less.
+   7. Use 4 vector compares when size is 4 * VEC_SIZE or less.
+   8. Use 8 vector compares when size is 8 * VEC_SIZE or less.  */
+
+# include <sysdep.h>
+
+# ifndef BCMP
+#  define BCMP	__bcmp_evex
+# endif
+
+# define VMOVU	vmovdqu64
+# define VPCMP	vpcmpub
+# define VPTEST	vptestmb
+
+# define VEC_SIZE	32
+# define PAGE_SIZE	4096
+
+# define YMM0		ymm16
+# define YMM1		ymm17
+# define YMM2		ymm18
+# define YMM3		ymm19
+# define YMM4		ymm20
+# define YMM5		ymm21
+# define YMM6		ymm22
+
+
+	.section .text.evex, "ax", @progbits
+ENTRY_P2ALIGN (BCMP, 6)
+# ifdef __ILP32__
+	/* Clear the upper 32 bits.  */
+	movl	%edx, %edx
+# endif
+	cmp	$VEC_SIZE, %RDX_LP
+	jb	L(less_vec)
+
+	/* From VEC to 2 * VEC.  No branch when size == VEC_SIZE.  */
+	VMOVU	(%rsi), %YMM1
+	/* Use compare not equals to directly check for mismatch.  */
+	VPCMP	$4, (%rdi), %YMM1, %k1
+	kmovd	%k1, %eax
+	testl	%eax, %eax
+	jnz	L(return_neq0)
+
+	cmpq	$(VEC_SIZE * 2), %rdx
+	jbe	L(last_1x_vec)
+
+	/* Check second VEC no matter what.  */
+	VMOVU	VEC_SIZE(%rsi), %YMM2
+	VPCMP	$4, VEC_SIZE(%rdi), %YMM2, %k1
+	kmovd	%k1, %eax
+	testl	%eax, %eax
+	jnz	L(return_neq0)
+
+	/* Less than 4 * VEC.  */
+	cmpq	$(VEC_SIZE * 4), %rdx
+	jbe	L(last_2x_vec)
+
+	/* Check third and fourth VEC no matter what.  */
+	VMOVU	(VEC_SIZE * 2)(%rsi), %YMM3
+	VPCMP	$4, (VEC_SIZE * 2)(%rdi), %YMM3, %k1
+	kmovd	%k1, %eax
+	testl	%eax, %eax
+	jnz	L(return_neq0)
+
+	VMOVU	(VEC_SIZE * 3)(%rsi), %YMM4
+	VPCMP	$4, (VEC_SIZE * 3)(%rdi), %YMM4, %k1
+	kmovd	%k1, %eax
+	testl	%eax, %eax
+	jnz	L(return_neq0)
+
+	/* Go to 4x VEC loop.  */
+	cmpq	$(VEC_SIZE * 8), %rdx
+	ja	L(more_8x_vec)
+
+	/* Handle remainder of size = 4 * VEC + 1 to 8 * VEC without any
+	   branches.  */
+
+	VMOVU	-(VEC_SIZE * 4)(%rsi, %rdx), %YMM1
+	VMOVU	-(VEC_SIZE * 3)(%rsi, %rdx), %YMM2
+	addq	%rdx, %rdi
+
+	/* Wait to load from s1 until addressed adjust due to unlamination.
+	 */
+
+	/* vpxor will be all 0s if s1 and s2 are equal. Otherwise it will
+	   have some 1s.  */
+	vpxorq	-(VEC_SIZE * 4)(%rdi), %YMM1, %YMM1
+	vpxorq	-(VEC_SIZE * 3)(%rdi), %YMM2, %YMM2
+
+	VMOVU	-(VEC_SIZE * 2)(%rsi, %rdx), %YMM3
+	vpxorq	-(VEC_SIZE * 2)(%rdi), %YMM3, %YMM3
+	/* Or together YMM1, YMM2, and YMM3 into YMM3.  */
+	vpternlogd $0xfe, %YMM1, %YMM2, %YMM3
 
-#include "memcmp-evex-movbe.S"
+	VMOVU	-(VEC_SIZE)(%rsi, %rdx), %YMM4
+	/* Ternary logic to xor (VEC_SIZE * 3)(%rdi) with YMM4 while oring
+	   with YMM3. Result is stored in YMM4.  */
+	vpternlogd $0xde, -(VEC_SIZE)(%rdi), %YMM3, %YMM4
+	/* Compare YMM4 with 0. If any 1s s1 and s2 don't match.  */
+	VPTEST	%YMM4, %YMM4, %k1
+	kmovd	%k1, %eax
+L(return_neq0):
+	ret
+
+	/* Fits in padding needed to .p2align 5 L(less_vec).  */
+L(last_1x_vec):
+	VMOVU	-(VEC_SIZE * 1)(%rsi, %rdx), %YMM1
+	VPCMP	$4, -(VEC_SIZE * 1)(%rdi, %rdx), %YMM1, %k1
+	kmovd	%k1, %eax
+	ret
+
+	/* NB: p2align 5 here will ensure the L(loop_4x_vec) is also 32 byte
+	   aligned.  */
+	.p2align 5
+L(less_vec):
+	/* Check if one or less char. This is necessary for size = 0 but is
+	   also faster for size = 1.  */
+	cmpl	$1, %edx
+	jbe	L(one_or_less)
+
+	/* Check if loading one VEC from either s1 or s2 could cause a page
+	   cross. This can have false positives but is by far the fastest
+	   method.  */
+	movl	%edi, %eax
+	orl	%esi, %eax
+	andl	$(PAGE_SIZE - 1), %eax
+	cmpl	$(PAGE_SIZE - VEC_SIZE), %eax
+	jg	L(page_cross_less_vec)
+
+	/* No page cross possible.  */
+	VMOVU	(%rsi), %YMM2
+	VPCMP	$4, (%rdi), %YMM2, %k1
+	kmovd	%k1, %eax
+	/* Result will be zero if s1 and s2 match. Otherwise first set bit
+	   will be first mismatch.  */
+	bzhil	%edx, %eax, %eax
+	ret
+
+	/* Relatively cold but placing close to L(less_vec) for 2 byte jump
+	   encoding.  */
+	.p2align 4
+L(one_or_less):
+	jb	L(zero)
+	movzbl	(%rsi), %ecx
+	movzbl	(%rdi), %eax
+	subl	%ecx, %eax
+	/* No ymm register was touched.  */
+	ret
+	/* Within the same 16 byte block is L(one_or_less).  */
+L(zero):
+	xorl	%eax, %eax
+	ret
+
+	.p2align 4
+L(last_2x_vec):
+	VMOVU	-(VEC_SIZE * 2)(%rsi, %rdx), %YMM1
+	vpxorq	-(VEC_SIZE * 2)(%rdi, %rdx), %YMM1, %YMM1
+	VMOVU	-(VEC_SIZE * 1)(%rsi, %rdx), %YMM2
+	vpternlogd $0xde, -(VEC_SIZE * 1)(%rdi, %rdx), %YMM1, %YMM2
+	VPTEST	%YMM2, %YMM2, %k1
+	kmovd	%k1, %eax
+	ret
+
+	.p2align 4
+L(more_8x_vec):
+	/* Set end of s1 in rdx.  */
+	leaq	-(VEC_SIZE * 4)(%rdi, %rdx), %rdx
+	/* rsi stores s2 - s1. This allows loop to only update one pointer.
+	 */
+	subq	%rdi, %rsi
+	/* Align s1 pointer.  */
+	andq	$-VEC_SIZE, %rdi
+	/* Adjust because first 4x vec where check already.  */
+	subq	$-(VEC_SIZE * 4), %rdi
+	.p2align 4
+L(loop_4x_vec):
+	VMOVU	(%rsi, %rdi), %YMM1
+	vpxorq	(%rdi), %YMM1, %YMM1
+
+	VMOVU	VEC_SIZE(%rsi, %rdi), %YMM2
+	vpxorq	VEC_SIZE(%rdi), %YMM2, %YMM2
+
+	VMOVU	(VEC_SIZE * 2)(%rsi, %rdi), %YMM3
+	vpxorq	(VEC_SIZE * 2)(%rdi), %YMM3, %YMM3
+	vpternlogd $0xfe, %YMM1, %YMM2, %YMM3
+
+	VMOVU	(VEC_SIZE * 3)(%rsi, %rdi), %YMM4
+	vpternlogd $0xde, (VEC_SIZE * 3)(%rdi), %YMM3, %YMM4
+	VPTEST	%YMM4, %YMM4, %k1
+	kmovd	%k1, %eax
+	testl	%eax, %eax
+	jnz	L(return_neq2)
+	subq	$-(VEC_SIZE * 4), %rdi
+	cmpq	%rdx, %rdi
+	jb	L(loop_4x_vec)
+
+	subq	%rdx, %rdi
+	VMOVU	(VEC_SIZE * 3)(%rsi, %rdx), %YMM4
+	vpxorq	(VEC_SIZE * 3)(%rdx), %YMM4, %YMM4
+	/* rdi has 4 * VEC_SIZE - remaining length.  */
+	cmpl	$(VEC_SIZE * 3), %edi
+	jae	L(8x_last_1x_vec)
+	/* Load regardless of branch.  */
+	VMOVU	(VEC_SIZE * 2)(%rsi, %rdx), %YMM3
+	/* Ternary logic to xor (VEC_SIZE * 2)(%rdx) with YMM3 while oring
+	   with YMM4. Result is stored in YMM4.  */
+	vpternlogd $0xf6, (VEC_SIZE * 2)(%rdx), %YMM3, %YMM4
+	cmpl	$(VEC_SIZE * 2), %edi
+	jae	L(8x_last_2x_vec)
+
+	VMOVU	VEC_SIZE(%rsi, %rdx), %YMM2
+	vpxorq	VEC_SIZE(%rdx), %YMM2, %YMM2
+
+	VMOVU	(%rsi, %rdx), %YMM1
+	vpxorq	(%rdx), %YMM1, %YMM1
+
+	vpternlogd $0xfe, %YMM1, %YMM2, %YMM4
+L(8x_last_1x_vec):
+L(8x_last_2x_vec):
+	VPTEST	%YMM4, %YMM4, %k1
+	kmovd	%k1, %eax
+L(return_neq2):
+	ret
+
+	/* Relatively cold case as page cross are unexpected.  */
+	.p2align 4
+L(page_cross_less_vec):
+	cmpl	$16, %edx
+	jae	L(between_16_31)
+	cmpl	$8, %edx
+	ja	L(between_9_15)
+	cmpl	$4, %edx
+	jb	L(between_2_3)
+	/* From 4 to 8 bytes.  No branch when size == 4.  */
+	movl	(%rdi), %eax
+	movl	(%rsi), %ecx
+	subl	%ecx, %eax
+	movl	-4(%rdi, %rdx), %ecx
+	movl	-4(%rsi, %rdx), %esi
+	subl	%esi, %ecx
+	orl	%ecx, %eax
+	ret
+
+	.p2align 4,, 8
+L(between_9_15):
+	/* Safe to use xmm[0, 15] as no vzeroupper is needed so RTM safe.
+	 */
+	vmovq	(%rdi), %xmm1
+	vmovq	(%rsi), %xmm2
+	vpcmpeqb %xmm1, %xmm2, %xmm3
+	vmovq	-8(%rdi, %rdx), %xmm1
+	vmovq	-8(%rsi, %rdx), %xmm2
+	vpcmpeqb %xmm1, %xmm2, %xmm2
+	vpand	%xmm2, %xmm3, %xmm3
+	vpmovmskb %xmm3, %eax
+	subl	$0xffff, %eax
+	/* No ymm register was touched.  */
+	ret
+
+	.p2align 4,, 8
+L(between_16_31):
+	/* From 16 to 31 bytes.  No branch when size == 16.  */
+
+	/* Safe to use xmm[0, 15] as no vzeroupper is needed so RTM safe.
+	 */
+	vmovdqu	(%rsi), %xmm1
+	vpcmpeqb (%rdi), %xmm1, %xmm1
+	vmovdqu	-16(%rsi, %rdx), %xmm2
+	vpcmpeqb -16(%rdi, %rdx), %xmm2, %xmm2
+	vpand	%xmm1, %xmm2, %xmm2
+	vpmovmskb %xmm2, %eax
+	subl	$0xffff, %eax
+	/* No ymm register was touched.  */
+	ret
+
+	.p2align 4,, 8
+L(between_2_3):
+	/* From 2 to 3 bytes.  No branch when size == 2.  */
+	movzwl	(%rdi), %eax
+	movzwl	(%rsi), %ecx
+	subl	%ecx, %eax
+	movzbl	-1(%rdi, %rdx), %edi
+	movzbl	-1(%rsi, %rdx), %esi
+	subl	%edi, %esi
+	orl	%esi, %eax
+	/* No ymm register was touched.  */
+	ret
+END (BCMP)
+#endif
diff --git a/sysdeps/x86_64/multiarch/ifunc-bcmp.h b/sysdeps/x86_64/multiarch/ifunc-bcmp.h
index f94516e5ee..51f251d0c9 100644
--- a/sysdeps/x86_64/multiarch/ifunc-bcmp.h
+++ b/sysdeps/x86_64/multiarch/ifunc-bcmp.h
@@ -35,8 +35,7 @@ IFUNC_SELECTOR (void)
       && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
     {
       if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
-	  && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
-	  && CPU_FEATURE_USABLE_P (cpu_features, MOVBE))
+	  && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW))
 	return OPTIMIZE (evex);
 
       if (CPU_FEATURE_USABLE_P (cpu_features, RTM))
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
index cda0316928..abbb4e407f 100644
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
@@ -52,7 +52,6 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 	      IFUNC_IMPL_ADD (array, i, bcmp,
 			      (CPU_FEATURE_USABLE (AVX512VL)
 			       && CPU_FEATURE_USABLE (AVX512BW)
-                   && CPU_FEATURE_USABLE (MOVBE)
 			       && CPU_FEATURE_USABLE (BMI2)),
 			      __bcmp_evex)
 	      IFUNC_IMPL_ADD (array, i, bcmp, CPU_FEATURE_USABLE (SSE4_1),
-- 
2.25.1


  parent reply	other threads:[~2021-09-13 23:24 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-13 23:05 [PATCH 1/5] x86_64: Add support for bcmp using sse2, sse4_1, avx2, and evex Noah Goldstein via Libc-alpha
2021-09-13 23:05 ` [PATCH 2/5] x86_64: Add sse2 optimized bcmp implementation in memcmp.S Noah Goldstein via Libc-alpha
2021-09-13 23:05 ` [PATCH 3/5] x86_64: Add sse4_1 optimized bcmp implementation in memcmp-sse4.S Noah Goldstein via Libc-alpha
2021-09-13 23:05 ` [PATCH 4/5] x86_64: Add avx2 optimized bcmp implementation in bcmp-avx2.S Noah Goldstein via Libc-alpha
2021-09-13 23:05 ` Noah Goldstein via Libc-alpha [this message]
2021-09-14  1:18   ` [PATCH 5/5] x86_64: Add evex optimized bcmp implementation in bcmp-evex.S Carlos O'Donell via Libc-alpha
2021-09-14  2:05     ` Noah Goldstein via Libc-alpha
2021-09-14  2:35       ` Carlos O'Donell via Libc-alpha
2021-09-14  2:55         ` DJ Delorie via Libc-alpha
2021-09-14  3:24           ` Noah Goldstein via Libc-alpha
2021-09-14  3:40         ` Noah Goldstein via Libc-alpha
2021-09-14  4:21           ` DJ Delorie via Libc-alpha
2021-09-14  5:29             ` Noah Goldstein via Libc-alpha
2021-09-14  5:42               ` DJ Delorie via Libc-alpha
2021-09-14  5:55                 ` Noah Goldstein via Libc-alpha
2021-09-13 23:22 ` [PATCH 1/5] x86_64: Add support for bcmp using sse2, sse4_1, avx2, and evex Noah Goldstein via Libc-alpha
2021-09-14  6:30 ` [PATCH v2 " Noah Goldstein via Libc-alpha
2021-09-14  6:30   ` [PATCH v2 2/5] x86_64: Add sse2 optimized bcmp implementation in memcmp.S Noah Goldstein via Libc-alpha
2021-09-14  6:30   ` [PATCH v2 3/5] x86_64: Add sse4_1 optimized bcmp implementation in memcmp-sse4.S Noah Goldstein via Libc-alpha
2021-09-14  6:30   ` [PATCH v2 4/5] x86_64: Add avx2 optimized bcmp implementation in bcmp-avx2.S Noah Goldstein via Libc-alpha
2021-09-14  6:30   ` [PATCH v2 5/5] x86_64: Add evex optimized bcmp implementation in bcmp-evex.S Noah Goldstein via Libc-alpha
2021-09-14 14:40   ` [PATCH v2 1/5] x86_64: Add support for bcmp using sse2, sse4_1, avx2, and evex H.J. Lu via Libc-alpha
2021-09-14 19:23     ` Noah Goldstein via Libc-alpha
2021-09-14 20:30     ` Florian Weimer via Libc-alpha
2021-09-15  0:00 ` [PATCH " Joseph Myers
2021-09-15 13:37   ` Zack Weinberg via Libc-alpha
2021-09-15 14:01     ` Re: [PATCH 1/5] x86_64: Add support for bcmp using sse2, sse 4_1, " Florian Weimer via Libc-alpha
2021-09-15 18:06       ` Noah Goldstein via Libc-alpha
2021-09-15 18:30         ` Joseph Myers
2021-09-27  1:35           ` Noah Goldstein via Libc-alpha
2021-09-27  7:29             ` Florian Weimer via Libc-alpha
2021-09-27 16:49               ` Noah Goldstein via Libc-alpha
2021-09-27 16:54                 ` Florian Weimer via Libc-alpha
2021-09-27 17:54                   ` Noah Goldstein via Libc-alpha
2021-09-27 17:56                     ` Florian Weimer via Libc-alpha
2021-09-27 18:05                       ` Noah Goldstein via Libc-alpha
2021-09-27 18:10                         ` Florian Weimer via Libc-alpha
2021-09-27 18:15                           ` Noah Goldstein via Libc-alpha
2021-09-27 18:22                             ` Florian Weimer via Libc-alpha
2021-09-27 18:34                               ` Noah Goldstein via Libc-alpha
2021-09-27 18:56                                 ` Florian Weimer via Libc-alpha
2021-09-27 19:20                                   ` Noah Goldstein via Libc-alpha
2021-09-27 19:34                                     ` Florian Weimer via Libc-alpha
2021-09-27 19:43                                       ` Noah Goldstein via Libc-alpha
2021-09-27 19:59                                         ` Florian Weimer via Libc-alpha
2021-09-27 20:22                                           ` Noah Goldstein via Libc-alpha
2021-09-27 20:24                                             ` Florian Weimer via Libc-alpha
2021-09-27 20:38                                               ` Noah Goldstein via Libc-alpha
2021-09-28  0:07                                                 ` Noah Goldstein via Libc-alpha
2021-09-27 17:42               ` Joseph Myers
2021-09-27 17:48                 ` Noah Goldstein via Libc-alpha

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

  List information: https://www.gnu.org/software/libc/involved.html

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210913230506.546749-5-goldstein.w.n@gmail.com \
    --to=libc-alpha@sourceware.org \
    --cc=goldstein.w.n@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).