* [PATCH v2 2/2] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h.S
2022-11-18 19:08 [PATCH v2 1/2] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h.S Noah Goldstein via Libc-alpha
@ 2022-11-18 19:08 ` Noah Goldstein via Libc-alpha
2022-11-18 19:56 ` H.J. Lu via Libc-alpha
2022-11-18 20:35 ` [PATCH v3 1/2] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h Noah Goldstein via Libc-alpha
` (2 subsequent siblings)
3 siblings, 1 reply; 21+ messages in thread
From: Noah Goldstein via Libc-alpha @ 2022-11-18 19:08 UTC (permalink / raw)
To: libc-alpha; +Cc: goldstein.w.n, hjl.tools, andrey.kolesov, carlos
1. Remove unnecessary spills.
2. Split the shared avx/avx512 wrappers to a new file.
3. Fix some small nit missed optimizations.
All math and mathvec tests pass on x86.
---
sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 253 ++--------------
sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 309 ++++----------------
sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h.S | 190 ++++++++++++
3 files changed, 266 insertions(+), 486 deletions(-)
create mode 100644 sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h.S
diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
index b03a2122b9..e54c16ea6e 100644
--- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
@@ -18,39 +18,38 @@
/* SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2 callee
- subq $40, %rsp
- cfi_adjust_cfa_offset (40)
+ subq $24, %rsp
+ cfi_adjust_cfa_offset (24)
movaps %xmm0, (%rsp)
call JUMPTARGET(\callee)
- movsd %xmm0, 16(%rsp)
+ movsd %xmm0, (%rsp)
movsd 8(%rsp), %xmm0
call JUMPTARGET(\callee)
- movsd 16(%rsp), %xmm1
- movsd %xmm0, 24(%rsp)
+ movsd (%rsp), %xmm1
unpcklpd %xmm0, %xmm1
movaps %xmm1, %xmm0
- addq $40, %rsp
- cfi_adjust_cfa_offset (-40)
+ addq $24, %rsp
+ cfi_adjust_cfa_offset (-24)
ret
.endm
+
/* 2 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_ff callee
- subq $56, %rsp
- cfi_adjust_cfa_offset (56)
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
movaps %xmm0, (%rsp)
movaps %xmm1, 16(%rsp)
call JUMPTARGET(\callee)
- movsd %xmm0, 32(%rsp)
+ movsd %xmm0, (%rsp)
movsd 8(%rsp), %xmm0
movsd 24(%rsp), %xmm1
call JUMPTARGET(\callee)
- movsd 32(%rsp), %xmm1
- movsd %xmm0, 40(%rsp)
+ movsd (%rsp), %xmm1
unpcklpd %xmm0, %xmm1
movaps %xmm1, %xmm0
- addq $56, %rsp
- cfi_adjust_cfa_offset (-56)
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
ret
.endm
@@ -62,229 +61,25 @@
pushq %rbx
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbx, 0)
+ subq $24, %rsp
+ cfi_adjust_cfa_offset (24)
+ movaps %xmm0, (%rsp)
movq %rdi, %rbp
movq %rsi, %rbx
- subq $40, %rsp
- cfi_adjust_cfa_offset (40)
- leaq 16(%rsp), %rsi
- leaq 24(%rsp), %rdi
- movaps %xmm0, (%rsp)
call JUMPTARGET(\callee)
- leaq 16(%rsp), %rsi
- leaq 24(%rsp), %rdi
- movsd 24(%rsp), %xmm0
- movapd (%rsp), %xmm1
- movsd %xmm0, 0(%rbp)
- unpckhpd %xmm1, %xmm1
- movsd 16(%rsp), %xmm0
- movsd %xmm0, (%rbx)
- movapd %xmm1, %xmm0
+ movsd 8(%rsp), %xmm0
+ leaq 8(%rbp), %rdi
+ leaq 8(%rbx), %rsi
call JUMPTARGET(\callee)
- movsd 24(%rsp), %xmm0
- movsd %xmm0, 8(%rbp)
- movsd 16(%rsp), %xmm0
- movsd %xmm0, 8(%rbx)
- addq $40, %rsp
- cfi_adjust_cfa_offset (-40)
- popq %rbx
+ addq $24, %rsp
+ cfi_adjust_cfa_offset (-24)
+ pop %rbx
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbx)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $32, %rsp
- vextractf128 $1, %ymm0, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovapd %xmm0, 16(%rsp)
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovapd %xmm0, %xmm1
- vmovapd 16(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $64, %rsp
- vextractf128 $1, %ymm0, 16(%rsp)
- vextractf128 $1, %ymm1, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 32(%rsp)
- vmovaps 16(%rsp), %xmm0
- vmovaps (%rsp), %xmm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 32(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
+ pop %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret
.endm
-/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- pushq %r14
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r14, 0)
- subq $48, %rsp
- movq %rsi, %r14
- movq %rdi, %r13
- vextractf128 $1, %ymm0, 32(%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %xmm0
- lea (%rsp), %rdi
- lea 16(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovapd (%rsp), %xmm0
- vmovapd 16(%rsp), %xmm1
- vmovapd %xmm0, 16(%r13)
- vmovapd %xmm1, 16(%r14)
- addq $48, %rsp
- popq %r14
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r14)
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512 callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $128, %rsp
- vmovups %zmm0, (%rsp)
- vmovupd (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 64(%rsp)
- vmovupd 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 96(%rsp)
- vmovups 64(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $192, %rsp
- vmovups %zmm0, (%rsp)
- vmovups %zmm1, 64(%rsp)
- vmovupd (%rsp), %ymm0
- vmovupd 64(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 128(%rsp)
- vmovupd 32(%rsp), %ymm0
- vmovupd 96(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 160(%rsp)
- vmovups 128(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- pushq %r12
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r12, 0)
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- subq $176, %rsp
- movq %rsi, %r13
- vmovups %zmm0, (%rsp)
- movq %rdi, %r12
- vmovupd (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd 32(%rsp), %ymm0
- lea 64(%rsp), %rdi
- lea 96(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd 64(%rsp), %ymm0
- vmovupd 96(%rsp), %ymm1
- vmovupd %ymm0, 32(%r12)
- vmovupd %ymm1, 32(%r13)
- vzeroupper
- addq $176, %rsp
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- popq %r12
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r12)
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
+#include "svml_sd_wrapper_impl.h.S"
diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
index cecf6c8384..958d1be243 100644
--- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
@@ -18,61 +18,66 @@
/* SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2 callee
- subq $40, %rsp
- cfi_adjust_cfa_offset (40)
+ push %rbx
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbx, 0)
+ subq $16, %rsp
+ cfi_adjust_cfa_offset (16)
movaps %xmm0, (%rsp)
call JUMPTARGET(\callee)
- movss %xmm0, 16(%rsp)
+ movss %xmm0, (%rsp)
movss 4(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss %xmm0, 20(%rsp)
+ movss %xmm0, 4(%rsp)
movss 8(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss %xmm0, 24(%rsp)
+ movd %xmm0, %ebx
movss 12(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss 16(%rsp), %xmm3
- movss 20(%rsp), %xmm2
- movss 24(%rsp), %xmm1
- movss %xmm0, 28(%rsp)
- unpcklps %xmm1, %xmm3
- unpcklps %xmm0, %xmm2
- unpcklps %xmm2, %xmm3
- movaps %xmm3, %xmm0
- addq $40, %rsp
- cfi_adjust_cfa_offset (-40)
+ movd %ebx, %xmm1
+ unpcklps %xmm0, %xmm1
+ movsd (%rsp), %xmm0
+ unpcklpd %xmm1, %xmm0
+ addq $16, %rsp
+ cfi_adjust_cfa_offset (-16)
+ pop %rbx
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbx)
ret
.endm
/* 2 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_ff callee
- subq $56, %rsp
- cfi_adjust_cfa_offset (56)
+ push %rbx
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbx, 0)
+ subq $32, %rsp
+ cfi_adjust_cfa_offset (40)
movaps %xmm0, (%rsp)
movaps %xmm1, 16(%rsp)
call JUMPTARGET(\callee)
- movss %xmm0, 32(%rsp)
- movss 4(%rsp), %xmm0
movss 20(%rsp), %xmm1
+ movss %xmm0, 0(%rsp)
+ movss 4(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss %xmm0, 36(%rsp)
- movss 8(%rsp), %xmm0
movss 24(%rsp), %xmm1
+ movss %xmm0, 4(%rsp)
+ movss 8(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss %xmm0, 40(%rsp)
- movss 12(%rsp), %xmm0
movss 28(%rsp), %xmm1
+ movd %xmm0, %ebx
+ movss 12(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss 32(%rsp), %xmm3
- movss 36(%rsp), %xmm2
- movss 40(%rsp), %xmm1
- movss %xmm0, 44(%rsp)
- unpcklps %xmm1, %xmm3
- unpcklps %xmm0, %xmm2
- unpcklps %xmm2, %xmm3
- movaps %xmm3, %xmm0
- addq $56, %rsp
- cfi_adjust_cfa_offset (-56)
+ /* merge 4x results into xmm0. */
+ movd %ebx, %xmm1
+ unpcklps %xmm0, %xmm1
+ movsd (%rsp), %xmm0
+ unpcklpd %xmm1, %xmm0
+ addq $32, %rsp
+ cfi_adjust_cfa_offset (-32)
+ pop %rbx
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbx)
ret
.endm
@@ -86,48 +91,24 @@
cfi_rel_offset (%rbx, 0)
movq %rdi, %rbp
movq %rsi, %rbx
- subq $40, %rsp
- cfi_adjust_cfa_offset (40)
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
+ subq $24, %rsp
+ cfi_adjust_cfa_offset (24)
movaps %xmm0, (%rsp)
call JUMPTARGET(\callee)
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movss 28(%rsp), %xmm0
- movss %xmm0, 0(%rbp)
- movaps (%rsp), %xmm1
- movss 24(%rsp), %xmm0
- movss %xmm0, (%rbx)
- movaps %xmm1, %xmm0
- shufps $85, %xmm1, %xmm0
+ movss 4(%rsp), %xmm0
+ leaq 4(%rbp), %rdi
+ leaq 4(%rbx), %rsi
call JUMPTARGET(\callee)
- movss 28(%rsp), %xmm0
- leaq 24(%rsp), %rsi
- movss %xmm0, 4(%rbp)
- leaq 28(%rsp), %rdi
- movaps (%rsp), %xmm1
- movss 24(%rsp), %xmm0
- movss %xmm0, 4(%rbx)
- movaps %xmm1, %xmm0
- unpckhps %xmm1, %xmm0
+ movss 8(%rsp), %xmm0
+ leaq 8(%rbp), %rdi
+ leaq 8(%rbx), %rsi
call JUMPTARGET(\callee)
- movaps (%rsp), %xmm1
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movss 28(%rsp), %xmm0
- shufps $255, %xmm1, %xmm1
- movss %xmm0, 8(%rbp)
- movss 24(%rsp), %xmm0
- movss %xmm0, 8(%rbx)
- movaps %xmm1, %xmm0
+ movss 12(%rsp), %xmm0
+ leaq 12(%rbp), %rdi
+ leaq 12(%rbx), %rsi
call JUMPTARGET(\callee)
- movss 28(%rsp), %xmm0
- movss %xmm0, 12(%rbp)
- movss 24(%rsp), %xmm0
- movss %xmm0, 12(%rbx)
- addq $40, %rsp
- cfi_adjust_cfa_offset (-40)
+ addq $24, %rsp
+ cfi_adjust_cfa_offset (-24)
popq %rbx
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbx)
@@ -137,190 +118,4 @@
ret
.endm
-/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $32, %rsp
- vextractf128 $1, %ymm0, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 16(%rsp)
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 16(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $64, %rsp
- vextractf128 $1, %ymm0, 16(%rsp)
- vextractf128 $1, %ymm1, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 32(%rsp)
- vmovaps 16(%rsp), %xmm0
- vmovaps (%rsp), %xmm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 32(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- pushq %r14
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r14, 0)
- subq $48, %rsp
- movq %rsi, %r14
- vmovaps %ymm0, (%rsp)
- movq %rdi, %r13
- vmovaps 16(%rsp), %xmm1
- vmovaps %xmm1, 32(%rsp)
- vzeroupper
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %xmm0
- lea (%rsp), %rdi
- lea 16(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps (%rsp), %xmm0
- vmovaps 16(%rsp), %xmm1
- vmovaps %xmm0, 16(%r13)
- vmovaps %xmm1, 16(%r14)
- addq $48, %rsp
- popq %r14
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r14)
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512 callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $128, %rsp
- vmovups %zmm0, (%rsp)
- vmovupd (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 64(%rsp)
- vmovupd 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 96(%rsp)
- vmovups 64(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $192, %rsp
- vmovups %zmm0, (%rsp)
- vmovups %zmm1, 64(%rsp)
- vmovups (%rsp), %ymm0
- vmovups 64(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovups %ymm0, 128(%rsp)
- vmovups 32(%rsp), %ymm0
- vmovups 96(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovups %ymm0, 160(%rsp)
- vmovups 128(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- pushq %r12
- pushq %r13
- subq $176, %rsp
- movq %rsi, %r13
- vmovaps %zmm0, (%rsp)
- movq %rdi, %r12
- vmovaps (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %ymm0
- lea 64(%rsp), %rdi
- lea 96(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 64(%rsp), %ymm0
- vmovaps 96(%rsp), %ymm1
- vmovaps %ymm0, 32(%r12)
- vmovaps %ymm1, 32(%r13)
- addq $176, %rsp
- popq %r13
- popq %r12
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
+#include "svml_sd_wrapper_impl.h.S"
diff --git a/sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h.S b/sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h.S
new file mode 100644
index 0000000000..bd934ad578
--- /dev/null
+++ b/sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h.S
@@ -0,0 +1,190 @@
+/* Common float/double wrapper implementations of vector math
+ functions.
+ Copyright (C) 2022 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
+.macro WRAPPER_IMPL_AVX callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $32, %rsp
+ vmovaps %ymm0, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, (%rsp)
+ vmovaps 16(%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ /* combine xmm0 (return of second call) with result of first
+ call (saved on stack). Might be worth exploring logic that
+ uses `vpblend` and reads in ymm1 using -16(rsp). */
+ vmovaps (%rsp), %xmm1
+ vinsertf128 $1, %xmm0, %ymm1, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
+.macro WRAPPER_IMPL_AVX_ff callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $64, %rsp
+ vmovaps %ymm0, (%rsp)
+ vmovaps %ymm1, 32(%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 48(%rsp), %xmm1
+ vmovaps %xmm0, (%rsp)
+ vmovaps 16(%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ /* combine xmm0 (return of second call) with result of first
+ call (saved on stack). Might be worth exploring logic that
+ uses `vpblend` and reads in ymm1 using -16(rsp). */
+ vmovaps (%rsp), %xmm1
+ vinsertf128 $1, %xmm0, %ymm1, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
+.macro WRAPPER_IMPL_AVX_fFF callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ andq $-32, %rsp
+ subq $32, %rsp
+ vmovaps %ymm0, (%rsp)
+ pushq %rbx
+ pushq %r14
+ movq %rdi, %rbx
+ movq %rsi, %r14
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 32(%rsp), %xmm0
+ leaq 16(%rbx), %rdi
+ leaq 16(%r14), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ popq %r14
+ popq %rbx
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* AVX512 ISA version as wrapper to AVX2 ISA version. */
+.macro WRAPPER_IMPL_AVX512 callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $64, %rsp
+ vmovups %zmm0, (%rsp)
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, (%rsp)
+ vmovupd 32(%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ /* combine ymm0 (return of second call) with result of first
+ call (saved on stack). */
+ vmovaps (%rsp), %ymm1
+ vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
+.macro WRAPPER_IMPL_AVX512_ff callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ addq $-128, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovups %zmm1, 64(%rsp)
+ /* ymm0 and ymm1 are already set. */
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovups 96(%rsp), %ymm1
+ vmovaps %ymm0, (%rsp)
+ vmovups 32(%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ /* combine ymm0 (return of second call) with result of first
+ call (saved on stack). */
+ vmovaps (%rsp), %ymm1
+ vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
+.macro WRAPPER_IMPL_AVX512_fFF callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $64, %rsp
+ vmovaps %zmm0, (%rsp)
+ pushq %rbx
+ pushq %r14
+ movq %rdi, %rbx
+ movq %rsi, %r14
+ /* ymm0 is already set. */
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 48(%rsp), %ymm0
+ leaq 32(%rbx), %rdi
+ leaq 32(%r14), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ popq %r14
+ popq %rbx
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
--
2.34.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* Re: [PATCH v2 2/2] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h.S
2022-11-18 19:08 ` [PATCH v2 2/2] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h.S Noah Goldstein via Libc-alpha
@ 2022-11-18 19:56 ` H.J. Lu via Libc-alpha
2022-11-18 20:36 ` Noah Goldstein via Libc-alpha
0 siblings, 1 reply; 21+ messages in thread
From: H.J. Lu via Libc-alpha @ 2022-11-18 19:56 UTC (permalink / raw)
To: Noah Goldstein; +Cc: libc-alpha, andrey.kolesov, carlos
On Fri, Nov 18, 2022 at 11:08 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
>
> 1. Remove unnecessary spills.
> 2. Split the shared avx/avx512 wrappers to a new file.
> 3. Fix some small nit missed optimizations.
>
> All math and mathvec tests pass on x86.
> ---
> sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 253 ++--------------
> sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 309 ++++----------------
> sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h.S | 190 ++++++++++++
> 3 files changed, 266 insertions(+), 486 deletions(-)
> create mode 100644 sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h.S
Please use svml_sd_wrapper_impl.h to be consistent with other .h files.
Thanks.
--
H.J.
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH v2 2/2] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h.S
2022-11-18 19:56 ` H.J. Lu via Libc-alpha
@ 2022-11-18 20:36 ` Noah Goldstein via Libc-alpha
0 siblings, 0 replies; 21+ messages in thread
From: Noah Goldstein via Libc-alpha @ 2022-11-18 20:36 UTC (permalink / raw)
To: H.J. Lu; +Cc: libc-alpha, andrey.kolesov, carlos
On Fri, Nov 18, 2022 at 11:56 AM H.J. Lu <hjl.tools@gmail.com> wrote:
>
> On Fri, Nov 18, 2022 at 11:08 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> >
> > 1. Remove unnecessary spills.
> > 2. Split the shared avx/avx512 wrappers to a new file.
> > 3. Fix some small nit missed optimizations.
> >
> > All math and mathvec tests pass on x86.
> > ---
> > sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 253 ++--------------
> > sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 309 ++++----------------
> > sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h.S | 190 ++++++++++++
> > 3 files changed, 266 insertions(+), 486 deletions(-)
> > create mode 100644 sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h.S
>
> Please use svml_sd_wrapper_impl.h to be consistent with other .h files.
Done. V3 in a new thread because changed commit message (realized was
using .h.S there).
>
> Thanks.
>
> --
> H.J.
^ permalink raw reply [flat|nested] 21+ messages in thread
* [PATCH v3 1/2] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h
2022-11-18 19:08 [PATCH v2 1/2] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h.S Noah Goldstein via Libc-alpha
2022-11-18 19:08 ` [PATCH v2 2/2] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h.S Noah Goldstein via Libc-alpha
@ 2022-11-18 20:35 ` Noah Goldstein via Libc-alpha
2022-11-18 20:35 ` [PATCH v3 2/2] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h Noah Goldstein via Libc-alpha
2022-11-18 21:22 ` [PATCH v4 1/3] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h Noah Goldstein via Libc-alpha
2022-11-19 0:13 ` [PATCH v5 " Noah Goldstein via Libc-alpha
3 siblings, 1 reply; 21+ messages in thread
From: Noah Goldstein via Libc-alpha @ 2022-11-18 20:35 UTC (permalink / raw)
To: libc-alpha; +Cc: goldstein.w.n, hjl.tools, andrey.kolesov, carlos
Just reformat with the style convention used in other x86 assembler
files. This doesn't change libm.so or libmvec.so.
---
sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 474 ++++++++++----------
sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 546 +++++++++++------------
2 files changed, 510 insertions(+), 510 deletions(-)
diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
index 2334713015..b03a2122b9 100644
--- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
@@ -18,273 +18,273 @@
/* SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2 callee
- subq $40, %rsp
- cfi_adjust_cfa_offset(40)
- movaps %xmm0, (%rsp)
- call JUMPTARGET(\callee)
- movsd %xmm0, 16(%rsp)
- movsd 8(%rsp), %xmm0
- call JUMPTARGET(\callee)
- movsd 16(%rsp), %xmm1
- movsd %xmm0, 24(%rsp)
- unpcklpd %xmm0, %xmm1
- movaps %xmm1, %xmm0
- addq $40, %rsp
- cfi_adjust_cfa_offset(-40)
- ret
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
+ movaps %xmm0, (%rsp)
+ call JUMPTARGET(\callee)
+ movsd %xmm0, 16(%rsp)
+ movsd 8(%rsp), %xmm0
+ call JUMPTARGET(\callee)
+ movsd 16(%rsp), %xmm1
+ movsd %xmm0, 24(%rsp)
+ unpcklpd %xmm0, %xmm1
+ movaps %xmm1, %xmm0
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
+ ret
.endm
/* 2 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_ff callee
- subq $56, %rsp
- cfi_adjust_cfa_offset(56)
- movaps %xmm0, (%rsp)
- movaps %xmm1, 16(%rsp)
- call JUMPTARGET(\callee)
- movsd %xmm0, 32(%rsp)
- movsd 8(%rsp), %xmm0
- movsd 24(%rsp), %xmm1
- call JUMPTARGET(\callee)
- movsd 32(%rsp), %xmm1
- movsd %xmm0, 40(%rsp)
- unpcklpd %xmm0, %xmm1
- movaps %xmm1, %xmm0
- addq $56, %rsp
- cfi_adjust_cfa_offset(-56)
- ret
+ subq $56, %rsp
+ cfi_adjust_cfa_offset (56)
+ movaps %xmm0, (%rsp)
+ movaps %xmm1, 16(%rsp)
+ call JUMPTARGET(\callee)
+ movsd %xmm0, 32(%rsp)
+ movsd 8(%rsp), %xmm0
+ movsd 24(%rsp), %xmm1
+ call JUMPTARGET(\callee)
+ movsd 32(%rsp), %xmm1
+ movsd %xmm0, 40(%rsp)
+ unpcklpd %xmm0, %xmm1
+ movaps %xmm1, %xmm0
+ addq $56, %rsp
+ cfi_adjust_cfa_offset (-56)
+ ret
.endm
/* 3 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- pushq %rbx
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbx, 0)
- movq %rdi, %rbp
- movq %rsi, %rbx
- subq $40, %rsp
- cfi_adjust_cfa_offset(40)
- leaq 16(%rsp), %rsi
- leaq 24(%rsp), %rdi
- movaps %xmm0, (%rsp)
- call JUMPTARGET(\callee)
- leaq 16(%rsp), %rsi
- leaq 24(%rsp), %rdi
- movsd 24(%rsp), %xmm0
- movapd (%rsp), %xmm1
- movsd %xmm0, 0(%rbp)
- unpckhpd %xmm1, %xmm1
- movsd 16(%rsp), %xmm0
- movsd %xmm0, (%rbx)
- movapd %xmm1, %xmm0
- call JUMPTARGET(\callee)
- movsd 24(%rsp), %xmm0
- movsd %xmm0, 8(%rbp)
- movsd 16(%rsp), %xmm0
- movsd %xmm0, 8(%rbx)
- addq $40, %rsp
- cfi_adjust_cfa_offset(-40)
- popq %rbx
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbx)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ pushq %rbx
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbx, 0)
+ movq %rdi, %rbp
+ movq %rsi, %rbx
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
+ leaq 16(%rsp), %rsi
+ leaq 24(%rsp), %rdi
+ movaps %xmm0, (%rsp)
+ call JUMPTARGET(\callee)
+ leaq 16(%rsp), %rsi
+ leaq 24(%rsp), %rdi
+ movsd 24(%rsp), %xmm0
+ movapd (%rsp), %xmm1
+ movsd %xmm0, 0(%rbp)
+ unpckhpd %xmm1, %xmm1
+ movsd 16(%rsp), %xmm0
+ movsd %xmm0, (%rbx)
+ movapd %xmm1, %xmm0
+ call JUMPTARGET(\callee)
+ movsd 24(%rsp), %xmm0
+ movsd %xmm0, 8(%rbp)
+ movsd 16(%rsp), %xmm0
+ movsd %xmm0, 8(%rbx)
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
+ popq %rbx
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbx)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $32, %rsp
- vextractf128 $1, %ymm0, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovapd %xmm0, 16(%rsp)
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovapd %xmm0, %xmm1
- vmovapd 16(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $32, %rsp
+ vextractf128 $1, %ymm0, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovapd %xmm0, 16(%rsp)
+ vmovaps (%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovapd %xmm0, %xmm1
+ vmovapd 16(%rsp), %xmm0
+ vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $64, %rsp
- vextractf128 $1, %ymm0, 16(%rsp)
- vextractf128 $1, %ymm1, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 32(%rsp)
- vmovaps 16(%rsp), %xmm0
- vmovaps (%rsp), %xmm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 32(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $64, %rsp
+ vextractf128 $1, %ymm0, 16(%rsp)
+ vextractf128 $1, %ymm1, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, 32(%rsp)
+ vmovaps 16(%rsp), %xmm0
+ vmovaps (%rsp), %xmm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, %xmm1
+ vmovaps 32(%rsp), %xmm0
+ vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- pushq %r14
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r14, 0)
- subq $48, %rsp
- movq %rsi, %r14
- movq %rdi, %r13
- vextractf128 $1, %ymm0, 32(%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %xmm0
- lea (%rsp), %rdi
- lea 16(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovapd (%rsp), %xmm0
- vmovapd 16(%rsp), %xmm1
- vmovapd %xmm0, 16(%r13)
- vmovapd %xmm1, 16(%r14)
- addq $48, %rsp
- popq %r14
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r14)
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ pushq %r13
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r13, 0)
+ pushq %r14
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r14, 0)
+ subq $48, %rsp
+ movq %rsi, %r14
+ movq %rdi, %r13
+ vextractf128 $1, %ymm0, 32(%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 32(%rsp), %xmm0
+ lea (%rsp), %rdi
+ lea 16(%rsp), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovapd (%rsp), %xmm0
+ vmovapd 16(%rsp), %xmm1
+ vmovapd %xmm0, 16(%r13)
+ vmovapd %xmm1, 16(%r14)
+ addq $48, %rsp
+ popq %r14
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r14)
+ popq %r13
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r13)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512 callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $128, %rsp
- vmovups %zmm0, (%rsp)
- vmovupd (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 64(%rsp)
- vmovupd 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 96(%rsp)
- vmovups 64(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $128, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovupd (%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 64(%rsp)
+ vmovupd 32(%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 96(%rsp)
+ vmovups 64(%rsp), %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $192, %rsp
- vmovups %zmm0, (%rsp)
- vmovups %zmm1, 64(%rsp)
- vmovupd (%rsp), %ymm0
- vmovupd 64(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 128(%rsp)
- vmovupd 32(%rsp), %ymm0
- vmovupd 96(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 160(%rsp)
- vmovups 128(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $192, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovups %zmm1, 64(%rsp)
+ vmovupd (%rsp), %ymm0
+ vmovupd 64(%rsp), %ymm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 128(%rsp)
+ vmovupd 32(%rsp), %ymm0
+ vmovupd 96(%rsp), %ymm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 160(%rsp)
+ vmovups 128(%rsp), %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- pushq %r12
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r12, 0)
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- subq $176, %rsp
- movq %rsi, %r13
- vmovups %zmm0, (%rsp)
- movq %rdi, %r12
- vmovupd (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd 32(%rsp), %ymm0
- lea 64(%rsp), %rdi
- lea 96(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd 64(%rsp), %ymm0
- vmovupd 96(%rsp), %ymm1
- vmovupd %ymm0, 32(%r12)
- vmovupd %ymm1, 32(%r13)
- vzeroupper
- addq $176, %rsp
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- popq %r12
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r12)
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ pushq %r12
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r12, 0)
+ pushq %r13
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r13, 0)
+ subq $176, %rsp
+ movq %rsi, %r13
+ vmovups %zmm0, (%rsp)
+ movq %rdi, %r12
+ vmovupd (%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd 32(%rsp), %ymm0
+ lea 64(%rsp), %rdi
+ lea 96(%rsp), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd 64(%rsp), %ymm0
+ vmovupd 96(%rsp), %ymm1
+ vmovupd %ymm0, 32(%r12)
+ vmovupd %ymm1, 32(%r13)
+ vzeroupper
+ addq $176, %rsp
+ popq %r13
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r13)
+ popq %r12
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r12)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
index c23da7ec83..cecf6c8384 100644
--- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
@@ -18,309 +18,309 @@
/* SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2 callee
- subq $40, %rsp
- cfi_adjust_cfa_offset(40)
- movaps %xmm0, (%rsp)
- call JUMPTARGET(\callee)
- movss %xmm0, 16(%rsp)
- movss 4(%rsp), %xmm0
- call JUMPTARGET(\callee)
- movss %xmm0, 20(%rsp)
- movss 8(%rsp), %xmm0
- call JUMPTARGET(\callee)
- movss %xmm0, 24(%rsp)
- movss 12(%rsp), %xmm0
- call JUMPTARGET(\callee)
- movss 16(%rsp), %xmm3
- movss 20(%rsp), %xmm2
- movss 24(%rsp), %xmm1
- movss %xmm0, 28(%rsp)
- unpcklps %xmm1, %xmm3
- unpcklps %xmm0, %xmm2
- unpcklps %xmm2, %xmm3
- movaps %xmm3, %xmm0
- addq $40, %rsp
- cfi_adjust_cfa_offset(-40)
- ret
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
+ movaps %xmm0, (%rsp)
+ call JUMPTARGET(\callee)
+ movss %xmm0, 16(%rsp)
+ movss 4(%rsp), %xmm0
+ call JUMPTARGET(\callee)
+ movss %xmm0, 20(%rsp)
+ movss 8(%rsp), %xmm0
+ call JUMPTARGET(\callee)
+ movss %xmm0, 24(%rsp)
+ movss 12(%rsp), %xmm0
+ call JUMPTARGET(\callee)
+ movss 16(%rsp), %xmm3
+ movss 20(%rsp), %xmm2
+ movss 24(%rsp), %xmm1
+ movss %xmm0, 28(%rsp)
+ unpcklps %xmm1, %xmm3
+ unpcklps %xmm0, %xmm2
+ unpcklps %xmm2, %xmm3
+ movaps %xmm3, %xmm0
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
+ ret
.endm
/* 2 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_ff callee
- subq $56, %rsp
- cfi_adjust_cfa_offset(56)
- movaps %xmm0, (%rsp)
- movaps %xmm1, 16(%rsp)
- call JUMPTARGET(\callee)
- movss %xmm0, 32(%rsp)
- movss 4(%rsp), %xmm0
- movss 20(%rsp), %xmm1
- call JUMPTARGET(\callee)
- movss %xmm0, 36(%rsp)
- movss 8(%rsp), %xmm0
- movss 24(%rsp), %xmm1
- call JUMPTARGET(\callee)
- movss %xmm0, 40(%rsp)
- movss 12(%rsp), %xmm0
- movss 28(%rsp), %xmm1
- call JUMPTARGET(\callee)
- movss 32(%rsp), %xmm3
- movss 36(%rsp), %xmm2
- movss 40(%rsp), %xmm1
- movss %xmm0, 44(%rsp)
- unpcklps %xmm1, %xmm3
- unpcklps %xmm0, %xmm2
- unpcklps %xmm2, %xmm3
- movaps %xmm3, %xmm0
- addq $56, %rsp
- cfi_adjust_cfa_offset(-56)
- ret
+ subq $56, %rsp
+ cfi_adjust_cfa_offset (56)
+ movaps %xmm0, (%rsp)
+ movaps %xmm1, 16(%rsp)
+ call JUMPTARGET(\callee)
+ movss %xmm0, 32(%rsp)
+ movss 4(%rsp), %xmm0
+ movss 20(%rsp), %xmm1
+ call JUMPTARGET(\callee)
+ movss %xmm0, 36(%rsp)
+ movss 8(%rsp), %xmm0
+ movss 24(%rsp), %xmm1
+ call JUMPTARGET(\callee)
+ movss %xmm0, 40(%rsp)
+ movss 12(%rsp), %xmm0
+ movss 28(%rsp), %xmm1
+ call JUMPTARGET(\callee)
+ movss 32(%rsp), %xmm3
+ movss 36(%rsp), %xmm2
+ movss 40(%rsp), %xmm1
+ movss %xmm0, 44(%rsp)
+ unpcklps %xmm1, %xmm3
+ unpcklps %xmm0, %xmm2
+ unpcklps %xmm2, %xmm3
+ movaps %xmm3, %xmm0
+ addq $56, %rsp
+ cfi_adjust_cfa_offset (-56)
+ ret
.endm
/* 3 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- pushq %rbx
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbx, 0)
- movq %rdi, %rbp
- movq %rsi, %rbx
- subq $40, %rsp
- cfi_adjust_cfa_offset(40)
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movaps %xmm0, (%rsp)
- call JUMPTARGET(\callee)
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movss 28(%rsp), %xmm0
- movss %xmm0, 0(%rbp)
- movaps (%rsp), %xmm1
- movss 24(%rsp), %xmm0
- movss %xmm0, (%rbx)
- movaps %xmm1, %xmm0
- shufps $85, %xmm1, %xmm0
- call JUMPTARGET(\callee)
- movss 28(%rsp), %xmm0
- leaq 24(%rsp), %rsi
- movss %xmm0, 4(%rbp)
- leaq 28(%rsp), %rdi
- movaps (%rsp), %xmm1
- movss 24(%rsp), %xmm0
- movss %xmm0, 4(%rbx)
- movaps %xmm1, %xmm0
- unpckhps %xmm1, %xmm0
- call JUMPTARGET(\callee)
- movaps (%rsp), %xmm1
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movss 28(%rsp), %xmm0
- shufps $255, %xmm1, %xmm1
- movss %xmm0, 8(%rbp)
- movss 24(%rsp), %xmm0
- movss %xmm0, 8(%rbx)
- movaps %xmm1, %xmm0
- call JUMPTARGET(\callee)
- movss 28(%rsp), %xmm0
- movss %xmm0, 12(%rbp)
- movss 24(%rsp), %xmm0
- movss %xmm0, 12(%rbx)
- addq $40, %rsp
- cfi_adjust_cfa_offset(-40)
- popq %rbx
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbx)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ pushq %rbx
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbx, 0)
+ movq %rdi, %rbp
+ movq %rsi, %rbx
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
+ leaq 24(%rsp), %rsi
+ leaq 28(%rsp), %rdi
+ movaps %xmm0, (%rsp)
+ call JUMPTARGET(\callee)
+ leaq 24(%rsp), %rsi
+ leaq 28(%rsp), %rdi
+ movss 28(%rsp), %xmm0
+ movss %xmm0, 0(%rbp)
+ movaps (%rsp), %xmm1
+ movss 24(%rsp), %xmm0
+ movss %xmm0, (%rbx)
+ movaps %xmm1, %xmm0
+ shufps $85, %xmm1, %xmm0
+ call JUMPTARGET(\callee)
+ movss 28(%rsp), %xmm0
+ leaq 24(%rsp), %rsi
+ movss %xmm0, 4(%rbp)
+ leaq 28(%rsp), %rdi
+ movaps (%rsp), %xmm1
+ movss 24(%rsp), %xmm0
+ movss %xmm0, 4(%rbx)
+ movaps %xmm1, %xmm0
+ unpckhps %xmm1, %xmm0
+ call JUMPTARGET(\callee)
+ movaps (%rsp), %xmm1
+ leaq 24(%rsp), %rsi
+ leaq 28(%rsp), %rdi
+ movss 28(%rsp), %xmm0
+ shufps $255, %xmm1, %xmm1
+ movss %xmm0, 8(%rbp)
+ movss 24(%rsp), %xmm0
+ movss %xmm0, 8(%rbx)
+ movaps %xmm1, %xmm0
+ call JUMPTARGET(\callee)
+ movss 28(%rsp), %xmm0
+ movss %xmm0, 12(%rbp)
+ movss 24(%rsp), %xmm0
+ movss %xmm0, 12(%rbx)
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
+ popq %rbx
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbx)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $32, %rsp
- vextractf128 $1, %ymm0, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 16(%rsp)
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 16(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $32, %rsp
+ vextractf128 $1, %ymm0, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, 16(%rsp)
+ vmovaps (%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, %xmm1
+ vmovaps 16(%rsp), %xmm0
+ vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $64, %rsp
- vextractf128 $1, %ymm0, 16(%rsp)
- vextractf128 $1, %ymm1, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 32(%rsp)
- vmovaps 16(%rsp), %xmm0
- vmovaps (%rsp), %xmm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 32(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $64, %rsp
+ vextractf128 $1, %ymm0, 16(%rsp)
+ vextractf128 $1, %ymm1, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, 32(%rsp)
+ vmovaps 16(%rsp), %xmm0
+ vmovaps (%rsp), %xmm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, %xmm1
+ vmovaps 32(%rsp), %xmm0
+ vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- pushq %r14
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r14, 0)
- subq $48, %rsp
- movq %rsi, %r14
- vmovaps %ymm0, (%rsp)
- movq %rdi, %r13
- vmovaps 16(%rsp), %xmm1
- vmovaps %xmm1, 32(%rsp)
- vzeroupper
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %xmm0
- lea (%rsp), %rdi
- lea 16(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps (%rsp), %xmm0
- vmovaps 16(%rsp), %xmm1
- vmovaps %xmm0, 16(%r13)
- vmovaps %xmm1, 16(%r14)
- addq $48, %rsp
- popq %r14
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r14)
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ pushq %r13
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r13, 0)
+ pushq %r14
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r14, 0)
+ subq $48, %rsp
+ movq %rsi, %r14
+ vmovaps %ymm0, (%rsp)
+ movq %rdi, %r13
+ vmovaps 16(%rsp), %xmm1
+ vmovaps %xmm1, 32(%rsp)
+ vzeroupper
+ vmovaps (%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 32(%rsp), %xmm0
+ lea (%rsp), %rdi
+ lea 16(%rsp), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps (%rsp), %xmm0
+ vmovaps 16(%rsp), %xmm1
+ vmovaps %xmm0, 16(%r13)
+ vmovaps %xmm1, 16(%r14)
+ addq $48, %rsp
+ popq %r14
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r14)
+ popq %r13
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r13)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512 callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $128, %rsp
- vmovups %zmm0, (%rsp)
- vmovupd (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 64(%rsp)
- vmovupd 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 96(%rsp)
- vmovups 64(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $128, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovupd (%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 64(%rsp)
+ vmovupd 32(%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 96(%rsp)
+ vmovups 64(%rsp), %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $192, %rsp
- vmovups %zmm0, (%rsp)
- vmovups %zmm1, 64(%rsp)
- vmovups (%rsp), %ymm0
- vmovups 64(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovups %ymm0, 128(%rsp)
- vmovups 32(%rsp), %ymm0
- vmovups 96(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovups %ymm0, 160(%rsp)
- vmovups 128(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $192, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovups %zmm1, 64(%rsp)
+ vmovups (%rsp), %ymm0
+ vmovups 64(%rsp), %ymm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovups %ymm0, 128(%rsp)
+ vmovups 32(%rsp), %ymm0
+ vmovups 96(%rsp), %ymm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovups %ymm0, 160(%rsp)
+ vmovups 128(%rsp), %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- pushq %r12
- pushq %r13
- subq $176, %rsp
- movq %rsi, %r13
- vmovaps %zmm0, (%rsp)
- movq %rdi, %r12
- vmovaps (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %ymm0
- lea 64(%rsp), %rdi
- lea 96(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 64(%rsp), %ymm0
- vmovaps 96(%rsp), %ymm1
- vmovaps %ymm0, 32(%r12)
- vmovaps %ymm1, 32(%r13)
- addq $176, %rsp
- popq %r13
- popq %r12
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ pushq %r12
+ pushq %r13
+ subq $176, %rsp
+ movq %rsi, %r13
+ vmovaps %zmm0, (%rsp)
+ movq %rdi, %r12
+ vmovaps (%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 32(%rsp), %ymm0
+ lea 64(%rsp), %rdi
+ lea 96(%rsp), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 64(%rsp), %ymm0
+ vmovaps 96(%rsp), %ymm1
+ vmovaps %ymm0, 32(%r12)
+ vmovaps %ymm1, 32(%r13)
+ addq $176, %rsp
+ popq %r13
+ popq %r12
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
--
2.34.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [PATCH v3 2/2] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h
2022-11-18 20:35 ` [PATCH v3 1/2] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h Noah Goldstein via Libc-alpha
@ 2022-11-18 20:35 ` Noah Goldstein via Libc-alpha
2022-11-18 20:49 ` H.J. Lu via Libc-alpha
0 siblings, 1 reply; 21+ messages in thread
From: Noah Goldstein via Libc-alpha @ 2022-11-18 20:35 UTC (permalink / raw)
To: libc-alpha; +Cc: goldstein.w.n, hjl.tools, andrey.kolesov, carlos
1. Remove unnecessary spills.
2. Split the shared avx/avx512 wrappers to a new file.
3. Fix some small nit missed optimizations.
All math and mathvec tests pass on x86.
---
sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 253 ++----------------
sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 309 ++++------------------
sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h | 190 +++++++++++++
3 files changed, 266 insertions(+), 486 deletions(-)
create mode 100644 sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h
diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
index b03a2122b9..52407da8ed 100644
--- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
@@ -18,39 +18,38 @@
/* SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2 callee
- subq $40, %rsp
- cfi_adjust_cfa_offset (40)
+ subq $24, %rsp
+ cfi_adjust_cfa_offset (24)
movaps %xmm0, (%rsp)
call JUMPTARGET(\callee)
- movsd %xmm0, 16(%rsp)
+ movsd %xmm0, (%rsp)
movsd 8(%rsp), %xmm0
call JUMPTARGET(\callee)
- movsd 16(%rsp), %xmm1
- movsd %xmm0, 24(%rsp)
+ movsd (%rsp), %xmm1
unpcklpd %xmm0, %xmm1
movaps %xmm1, %xmm0
- addq $40, %rsp
- cfi_adjust_cfa_offset (-40)
+ addq $24, %rsp
+ cfi_adjust_cfa_offset (-24)
ret
.endm
+
/* 2 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_ff callee
- subq $56, %rsp
- cfi_adjust_cfa_offset (56)
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
movaps %xmm0, (%rsp)
movaps %xmm1, 16(%rsp)
call JUMPTARGET(\callee)
- movsd %xmm0, 32(%rsp)
+ movsd %xmm0, (%rsp)
movsd 8(%rsp), %xmm0
movsd 24(%rsp), %xmm1
call JUMPTARGET(\callee)
- movsd 32(%rsp), %xmm1
- movsd %xmm0, 40(%rsp)
+ movsd (%rsp), %xmm1
unpcklpd %xmm0, %xmm1
movaps %xmm1, %xmm0
- addq $56, %rsp
- cfi_adjust_cfa_offset (-56)
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
ret
.endm
@@ -62,229 +61,25 @@
pushq %rbx
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbx, 0)
+ subq $24, %rsp
+ cfi_adjust_cfa_offset (24)
+ movaps %xmm0, (%rsp)
movq %rdi, %rbp
movq %rsi, %rbx
- subq $40, %rsp
- cfi_adjust_cfa_offset (40)
- leaq 16(%rsp), %rsi
- leaq 24(%rsp), %rdi
- movaps %xmm0, (%rsp)
call JUMPTARGET(\callee)
- leaq 16(%rsp), %rsi
- leaq 24(%rsp), %rdi
- movsd 24(%rsp), %xmm0
- movapd (%rsp), %xmm1
- movsd %xmm0, 0(%rbp)
- unpckhpd %xmm1, %xmm1
- movsd 16(%rsp), %xmm0
- movsd %xmm0, (%rbx)
- movapd %xmm1, %xmm0
+ movsd 8(%rsp), %xmm0
+ leaq 8(%rbp), %rdi
+ leaq 8(%rbx), %rsi
call JUMPTARGET(\callee)
- movsd 24(%rsp), %xmm0
- movsd %xmm0, 8(%rbp)
- movsd 16(%rsp), %xmm0
- movsd %xmm0, 8(%rbx)
- addq $40, %rsp
- cfi_adjust_cfa_offset (-40)
- popq %rbx
+ addq $24, %rsp
+ cfi_adjust_cfa_offset (-24)
+ pop %rbx
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbx)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $32, %rsp
- vextractf128 $1, %ymm0, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovapd %xmm0, 16(%rsp)
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovapd %xmm0, %xmm1
- vmovapd 16(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $64, %rsp
- vextractf128 $1, %ymm0, 16(%rsp)
- vextractf128 $1, %ymm1, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 32(%rsp)
- vmovaps 16(%rsp), %xmm0
- vmovaps (%rsp), %xmm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 32(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
+ pop %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret
.endm
-/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- pushq %r14
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r14, 0)
- subq $48, %rsp
- movq %rsi, %r14
- movq %rdi, %r13
- vextractf128 $1, %ymm0, 32(%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %xmm0
- lea (%rsp), %rdi
- lea 16(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovapd (%rsp), %xmm0
- vmovapd 16(%rsp), %xmm1
- vmovapd %xmm0, 16(%r13)
- vmovapd %xmm1, 16(%r14)
- addq $48, %rsp
- popq %r14
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r14)
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512 callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $128, %rsp
- vmovups %zmm0, (%rsp)
- vmovupd (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 64(%rsp)
- vmovupd 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 96(%rsp)
- vmovups 64(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $192, %rsp
- vmovups %zmm0, (%rsp)
- vmovups %zmm1, 64(%rsp)
- vmovupd (%rsp), %ymm0
- vmovupd 64(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 128(%rsp)
- vmovupd 32(%rsp), %ymm0
- vmovupd 96(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 160(%rsp)
- vmovups 128(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- pushq %r12
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r12, 0)
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- subq $176, %rsp
- movq %rsi, %r13
- vmovups %zmm0, (%rsp)
- movq %rdi, %r12
- vmovupd (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd 32(%rsp), %ymm0
- lea 64(%rsp), %rdi
- lea 96(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd 64(%rsp), %ymm0
- vmovupd 96(%rsp), %ymm1
- vmovupd %ymm0, 32(%r12)
- vmovupd %ymm1, 32(%r13)
- vzeroupper
- addq $176, %rsp
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- popq %r12
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r12)
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
+#include "svml_sd_wrapper_impl.h"
diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
index cecf6c8384..d9266563ef 100644
--- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
@@ -18,61 +18,66 @@
/* SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2 callee
- subq $40, %rsp
- cfi_adjust_cfa_offset (40)
+ push %rbx
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbx, 0)
+ subq $16, %rsp
+ cfi_adjust_cfa_offset (16)
movaps %xmm0, (%rsp)
call JUMPTARGET(\callee)
- movss %xmm0, 16(%rsp)
+ movss %xmm0, (%rsp)
movss 4(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss %xmm0, 20(%rsp)
+ movss %xmm0, 4(%rsp)
movss 8(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss %xmm0, 24(%rsp)
+ movd %xmm0, %ebx
movss 12(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss 16(%rsp), %xmm3
- movss 20(%rsp), %xmm2
- movss 24(%rsp), %xmm1
- movss %xmm0, 28(%rsp)
- unpcklps %xmm1, %xmm3
- unpcklps %xmm0, %xmm2
- unpcklps %xmm2, %xmm3
- movaps %xmm3, %xmm0
- addq $40, %rsp
- cfi_adjust_cfa_offset (-40)
+ movd %ebx, %xmm1
+ unpcklps %xmm0, %xmm1
+ movsd (%rsp), %xmm0
+ unpcklpd %xmm1, %xmm0
+ addq $16, %rsp
+ cfi_adjust_cfa_offset (-16)
+ pop %rbx
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbx)
ret
.endm
/* 2 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_ff callee
- subq $56, %rsp
- cfi_adjust_cfa_offset (56)
+ push %rbx
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbx, 0)
+ subq $32, %rsp
+ cfi_adjust_cfa_offset (40)
movaps %xmm0, (%rsp)
movaps %xmm1, 16(%rsp)
call JUMPTARGET(\callee)
- movss %xmm0, 32(%rsp)
- movss 4(%rsp), %xmm0
movss 20(%rsp), %xmm1
+ movss %xmm0, 0(%rsp)
+ movss 4(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss %xmm0, 36(%rsp)
- movss 8(%rsp), %xmm0
movss 24(%rsp), %xmm1
+ movss %xmm0, 4(%rsp)
+ movss 8(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss %xmm0, 40(%rsp)
- movss 12(%rsp), %xmm0
movss 28(%rsp), %xmm1
+ movd %xmm0, %ebx
+ movss 12(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss 32(%rsp), %xmm3
- movss 36(%rsp), %xmm2
- movss 40(%rsp), %xmm1
- movss %xmm0, 44(%rsp)
- unpcklps %xmm1, %xmm3
- unpcklps %xmm0, %xmm2
- unpcklps %xmm2, %xmm3
- movaps %xmm3, %xmm0
- addq $56, %rsp
- cfi_adjust_cfa_offset (-56)
+ /* merge 4x results into xmm0. */
+ movd %ebx, %xmm1
+ unpcklps %xmm0, %xmm1
+ movsd (%rsp), %xmm0
+ unpcklpd %xmm1, %xmm0
+ addq $32, %rsp
+ cfi_adjust_cfa_offset (-32)
+ pop %rbx
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbx)
ret
.endm
@@ -86,48 +91,24 @@
cfi_rel_offset (%rbx, 0)
movq %rdi, %rbp
movq %rsi, %rbx
- subq $40, %rsp
- cfi_adjust_cfa_offset (40)
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
+ subq $24, %rsp
+ cfi_adjust_cfa_offset (24)
movaps %xmm0, (%rsp)
call JUMPTARGET(\callee)
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movss 28(%rsp), %xmm0
- movss %xmm0, 0(%rbp)
- movaps (%rsp), %xmm1
- movss 24(%rsp), %xmm0
- movss %xmm0, (%rbx)
- movaps %xmm1, %xmm0
- shufps $85, %xmm1, %xmm0
+ movss 4(%rsp), %xmm0
+ leaq 4(%rbp), %rdi
+ leaq 4(%rbx), %rsi
call JUMPTARGET(\callee)
- movss 28(%rsp), %xmm0
- leaq 24(%rsp), %rsi
- movss %xmm0, 4(%rbp)
- leaq 28(%rsp), %rdi
- movaps (%rsp), %xmm1
- movss 24(%rsp), %xmm0
- movss %xmm0, 4(%rbx)
- movaps %xmm1, %xmm0
- unpckhps %xmm1, %xmm0
+ movss 8(%rsp), %xmm0
+ leaq 8(%rbp), %rdi
+ leaq 8(%rbx), %rsi
call JUMPTARGET(\callee)
- movaps (%rsp), %xmm1
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movss 28(%rsp), %xmm0
- shufps $255, %xmm1, %xmm1
- movss %xmm0, 8(%rbp)
- movss 24(%rsp), %xmm0
- movss %xmm0, 8(%rbx)
- movaps %xmm1, %xmm0
+ movss 12(%rsp), %xmm0
+ leaq 12(%rbp), %rdi
+ leaq 12(%rbx), %rsi
call JUMPTARGET(\callee)
- movss 28(%rsp), %xmm0
- movss %xmm0, 12(%rbp)
- movss 24(%rsp), %xmm0
- movss %xmm0, 12(%rbx)
- addq $40, %rsp
- cfi_adjust_cfa_offset (-40)
+ addq $24, %rsp
+ cfi_adjust_cfa_offset (-24)
popq %rbx
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbx)
@@ -137,190 +118,4 @@
ret
.endm
-/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $32, %rsp
- vextractf128 $1, %ymm0, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 16(%rsp)
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 16(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $64, %rsp
- vextractf128 $1, %ymm0, 16(%rsp)
- vextractf128 $1, %ymm1, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 32(%rsp)
- vmovaps 16(%rsp), %xmm0
- vmovaps (%rsp), %xmm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 32(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- pushq %r14
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r14, 0)
- subq $48, %rsp
- movq %rsi, %r14
- vmovaps %ymm0, (%rsp)
- movq %rdi, %r13
- vmovaps 16(%rsp), %xmm1
- vmovaps %xmm1, 32(%rsp)
- vzeroupper
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %xmm0
- lea (%rsp), %rdi
- lea 16(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps (%rsp), %xmm0
- vmovaps 16(%rsp), %xmm1
- vmovaps %xmm0, 16(%r13)
- vmovaps %xmm1, 16(%r14)
- addq $48, %rsp
- popq %r14
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r14)
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512 callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $128, %rsp
- vmovups %zmm0, (%rsp)
- vmovupd (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 64(%rsp)
- vmovupd 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 96(%rsp)
- vmovups 64(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $192, %rsp
- vmovups %zmm0, (%rsp)
- vmovups %zmm1, 64(%rsp)
- vmovups (%rsp), %ymm0
- vmovups 64(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovups %ymm0, 128(%rsp)
- vmovups 32(%rsp), %ymm0
- vmovups 96(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovups %ymm0, 160(%rsp)
- vmovups 128(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- pushq %r12
- pushq %r13
- subq $176, %rsp
- movq %rsi, %r13
- vmovaps %zmm0, (%rsp)
- movq %rdi, %r12
- vmovaps (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %ymm0
- lea 64(%rsp), %rdi
- lea 96(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 64(%rsp), %ymm0
- vmovaps 96(%rsp), %ymm1
- vmovaps %ymm0, 32(%r12)
- vmovaps %ymm1, 32(%r13)
- addq $176, %rsp
- popq %r13
- popq %r12
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
+#include "svml_sd_wrapper_impl.h"
diff --git a/sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h
new file mode 100644
index 0000000000..bd934ad578
--- /dev/null
+++ b/sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h
@@ -0,0 +1,190 @@
+/* Common float/double wrapper implementations of vector math
+ functions.
+ Copyright (C) 2022 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
+.macro WRAPPER_IMPL_AVX callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $32, %rsp
+ vmovaps %ymm0, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, (%rsp)
+ vmovaps 16(%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ /* combine xmm0 (return of second call) with result of first
+ call (saved on stack). Might be worth exploring logic that
+ uses `vpblend` and reads in ymm1 using -16(rsp). */
+ vmovaps (%rsp), %xmm1
+ vinsertf128 $1, %xmm0, %ymm1, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
+.macro WRAPPER_IMPL_AVX_ff callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $64, %rsp
+ vmovaps %ymm0, (%rsp)
+ vmovaps %ymm1, 32(%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 48(%rsp), %xmm1
+ vmovaps %xmm0, (%rsp)
+ vmovaps 16(%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ /* combine xmm0 (return of second call) with result of first
+ call (saved on stack). Might be worth exploring logic that
+ uses `vpblend` and reads in ymm1 using -16(rsp). */
+ vmovaps (%rsp), %xmm1
+ vinsertf128 $1, %xmm0, %ymm1, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
+.macro WRAPPER_IMPL_AVX_fFF callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ andq $-32, %rsp
+ subq $32, %rsp
+ vmovaps %ymm0, (%rsp)
+ pushq %rbx
+ pushq %r14
+ movq %rdi, %rbx
+ movq %rsi, %r14
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 32(%rsp), %xmm0
+ leaq 16(%rbx), %rdi
+ leaq 16(%r14), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ popq %r14
+ popq %rbx
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* AVX512 ISA version as wrapper to AVX2 ISA version. */
+.macro WRAPPER_IMPL_AVX512 callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $64, %rsp
+ vmovups %zmm0, (%rsp)
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, (%rsp)
+ vmovupd 32(%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ /* combine ymm0 (return of second call) with result of first
+ call (saved on stack). */
+ vmovaps (%rsp), %ymm1
+ vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
+.macro WRAPPER_IMPL_AVX512_ff callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ addq $-128, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovups %zmm1, 64(%rsp)
+ /* ymm0 and ymm1 are already set. */
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovups 96(%rsp), %ymm1
+ vmovaps %ymm0, (%rsp)
+ vmovups 32(%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ /* combine ymm0 (return of second call) with result of first
+ call (saved on stack). */
+ vmovaps (%rsp), %ymm1
+ vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
+.macro WRAPPER_IMPL_AVX512_fFF callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $64, %rsp
+ vmovaps %zmm0, (%rsp)
+ pushq %rbx
+ pushq %r14
+ movq %rdi, %rbx
+ movq %rsi, %r14
+ /* ymm0 is already set. */
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 48(%rsp), %ymm0
+ leaq 32(%rbx), %rdi
+ leaq 32(%r14), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ popq %r14
+ popq %rbx
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
--
2.34.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* Re: [PATCH v3 2/2] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h
2022-11-18 20:35 ` [PATCH v3 2/2] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h Noah Goldstein via Libc-alpha
@ 2022-11-18 20:49 ` H.J. Lu via Libc-alpha
2022-11-18 21:23 ` Noah Goldstein via Libc-alpha
0 siblings, 1 reply; 21+ messages in thread
From: H.J. Lu via Libc-alpha @ 2022-11-18 20:49 UTC (permalink / raw)
To: Noah Goldstein; +Cc: libc-alpha, andrey.kolesov, carlos
On Fri, Nov 18, 2022 at 12:35 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
>
> 1. Remove unnecessary spills.
> 2. Split the shared avx/avx512 wrappers to a new file.
Please make #2 a separate patch.
> 3. Fix some small nit missed optimizations.
>
Thanks.
--
H.J.
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH v3 2/2] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h
2022-11-18 20:49 ` H.J. Lu via Libc-alpha
@ 2022-11-18 21:23 ` Noah Goldstein via Libc-alpha
0 siblings, 0 replies; 21+ messages in thread
From: Noah Goldstein via Libc-alpha @ 2022-11-18 21:23 UTC (permalink / raw)
To: H.J. Lu; +Cc: libc-alpha, andrey.kolesov, carlos
On Fri, Nov 18, 2022 at 12:49 PM H.J. Lu <hjl.tools@gmail.com> wrote:
>
> On Fri, Nov 18, 2022 at 12:35 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> >
> > 1. Remove unnecessary spills.
> > 2. Split the shared avx/avx512 wrappers to a new file.
>
> Please make #2 a separate patch.
Done in V4.
>
> > 3. Fix some small nit missed optimizations.
> >
>
> Thanks.
>
> --
> H.J.
^ permalink raw reply [flat|nested] 21+ messages in thread
* [PATCH v4 1/3] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h
2022-11-18 19:08 [PATCH v2 1/2] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h.S Noah Goldstein via Libc-alpha
2022-11-18 19:08 ` [PATCH v2 2/2] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h.S Noah Goldstein via Libc-alpha
2022-11-18 20:35 ` [PATCH v3 1/2] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h Noah Goldstein via Libc-alpha
@ 2022-11-18 21:22 ` Noah Goldstein via Libc-alpha
2022-11-18 21:22 ` [PATCH v4 2/3] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h Noah Goldstein via Libc-alpha
` (2 more replies)
2022-11-19 0:13 ` [PATCH v5 " Noah Goldstein via Libc-alpha
3 siblings, 3 replies; 21+ messages in thread
From: Noah Goldstein via Libc-alpha @ 2022-11-18 21:22 UTC (permalink / raw)
To: libc-alpha; +Cc: goldstein.w.n, hjl.tools, andrey.kolesov, carlos
Just reformat with the style convention used in other x86 assembler
files. This doesn't change libm.so or libmvec.so.
---
sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 474 ++++++++++----------
sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 546 +++++++++++------------
2 files changed, 510 insertions(+), 510 deletions(-)
diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
index 2334713015..b03a2122b9 100644
--- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
@@ -18,273 +18,273 @@
/* SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2 callee
- subq $40, %rsp
- cfi_adjust_cfa_offset(40)
- movaps %xmm0, (%rsp)
- call JUMPTARGET(\callee)
- movsd %xmm0, 16(%rsp)
- movsd 8(%rsp), %xmm0
- call JUMPTARGET(\callee)
- movsd 16(%rsp), %xmm1
- movsd %xmm0, 24(%rsp)
- unpcklpd %xmm0, %xmm1
- movaps %xmm1, %xmm0
- addq $40, %rsp
- cfi_adjust_cfa_offset(-40)
- ret
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
+ movaps %xmm0, (%rsp)
+ call JUMPTARGET(\callee)
+ movsd %xmm0, 16(%rsp)
+ movsd 8(%rsp), %xmm0
+ call JUMPTARGET(\callee)
+ movsd 16(%rsp), %xmm1
+ movsd %xmm0, 24(%rsp)
+ unpcklpd %xmm0, %xmm1
+ movaps %xmm1, %xmm0
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
+ ret
.endm
/* 2 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_ff callee
- subq $56, %rsp
- cfi_adjust_cfa_offset(56)
- movaps %xmm0, (%rsp)
- movaps %xmm1, 16(%rsp)
- call JUMPTARGET(\callee)
- movsd %xmm0, 32(%rsp)
- movsd 8(%rsp), %xmm0
- movsd 24(%rsp), %xmm1
- call JUMPTARGET(\callee)
- movsd 32(%rsp), %xmm1
- movsd %xmm0, 40(%rsp)
- unpcklpd %xmm0, %xmm1
- movaps %xmm1, %xmm0
- addq $56, %rsp
- cfi_adjust_cfa_offset(-56)
- ret
+ subq $56, %rsp
+ cfi_adjust_cfa_offset (56)
+ movaps %xmm0, (%rsp)
+ movaps %xmm1, 16(%rsp)
+ call JUMPTARGET(\callee)
+ movsd %xmm0, 32(%rsp)
+ movsd 8(%rsp), %xmm0
+ movsd 24(%rsp), %xmm1
+ call JUMPTARGET(\callee)
+ movsd 32(%rsp), %xmm1
+ movsd %xmm0, 40(%rsp)
+ unpcklpd %xmm0, %xmm1
+ movaps %xmm1, %xmm0
+ addq $56, %rsp
+ cfi_adjust_cfa_offset (-56)
+ ret
.endm
/* 3 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- pushq %rbx
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbx, 0)
- movq %rdi, %rbp
- movq %rsi, %rbx
- subq $40, %rsp
- cfi_adjust_cfa_offset(40)
- leaq 16(%rsp), %rsi
- leaq 24(%rsp), %rdi
- movaps %xmm0, (%rsp)
- call JUMPTARGET(\callee)
- leaq 16(%rsp), %rsi
- leaq 24(%rsp), %rdi
- movsd 24(%rsp), %xmm0
- movapd (%rsp), %xmm1
- movsd %xmm0, 0(%rbp)
- unpckhpd %xmm1, %xmm1
- movsd 16(%rsp), %xmm0
- movsd %xmm0, (%rbx)
- movapd %xmm1, %xmm0
- call JUMPTARGET(\callee)
- movsd 24(%rsp), %xmm0
- movsd %xmm0, 8(%rbp)
- movsd 16(%rsp), %xmm0
- movsd %xmm0, 8(%rbx)
- addq $40, %rsp
- cfi_adjust_cfa_offset(-40)
- popq %rbx
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbx)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ pushq %rbx
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbx, 0)
+ movq %rdi, %rbp
+ movq %rsi, %rbx
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
+ leaq 16(%rsp), %rsi
+ leaq 24(%rsp), %rdi
+ movaps %xmm0, (%rsp)
+ call JUMPTARGET(\callee)
+ leaq 16(%rsp), %rsi
+ leaq 24(%rsp), %rdi
+ movsd 24(%rsp), %xmm0
+ movapd (%rsp), %xmm1
+ movsd %xmm0, 0(%rbp)
+ unpckhpd %xmm1, %xmm1
+ movsd 16(%rsp), %xmm0
+ movsd %xmm0, (%rbx)
+ movapd %xmm1, %xmm0
+ call JUMPTARGET(\callee)
+ movsd 24(%rsp), %xmm0
+ movsd %xmm0, 8(%rbp)
+ movsd 16(%rsp), %xmm0
+ movsd %xmm0, 8(%rbx)
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
+ popq %rbx
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbx)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $32, %rsp
- vextractf128 $1, %ymm0, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovapd %xmm0, 16(%rsp)
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovapd %xmm0, %xmm1
- vmovapd 16(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $32, %rsp
+ vextractf128 $1, %ymm0, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovapd %xmm0, 16(%rsp)
+ vmovaps (%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovapd %xmm0, %xmm1
+ vmovapd 16(%rsp), %xmm0
+ vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $64, %rsp
- vextractf128 $1, %ymm0, 16(%rsp)
- vextractf128 $1, %ymm1, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 32(%rsp)
- vmovaps 16(%rsp), %xmm0
- vmovaps (%rsp), %xmm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 32(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $64, %rsp
+ vextractf128 $1, %ymm0, 16(%rsp)
+ vextractf128 $1, %ymm1, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, 32(%rsp)
+ vmovaps 16(%rsp), %xmm0
+ vmovaps (%rsp), %xmm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, %xmm1
+ vmovaps 32(%rsp), %xmm0
+ vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- pushq %r14
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r14, 0)
- subq $48, %rsp
- movq %rsi, %r14
- movq %rdi, %r13
- vextractf128 $1, %ymm0, 32(%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %xmm0
- lea (%rsp), %rdi
- lea 16(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovapd (%rsp), %xmm0
- vmovapd 16(%rsp), %xmm1
- vmovapd %xmm0, 16(%r13)
- vmovapd %xmm1, 16(%r14)
- addq $48, %rsp
- popq %r14
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r14)
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ pushq %r13
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r13, 0)
+ pushq %r14
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r14, 0)
+ subq $48, %rsp
+ movq %rsi, %r14
+ movq %rdi, %r13
+ vextractf128 $1, %ymm0, 32(%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 32(%rsp), %xmm0
+ lea (%rsp), %rdi
+ lea 16(%rsp), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovapd (%rsp), %xmm0
+ vmovapd 16(%rsp), %xmm1
+ vmovapd %xmm0, 16(%r13)
+ vmovapd %xmm1, 16(%r14)
+ addq $48, %rsp
+ popq %r14
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r14)
+ popq %r13
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r13)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512 callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $128, %rsp
- vmovups %zmm0, (%rsp)
- vmovupd (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 64(%rsp)
- vmovupd 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 96(%rsp)
- vmovups 64(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $128, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovupd (%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 64(%rsp)
+ vmovupd 32(%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 96(%rsp)
+ vmovups 64(%rsp), %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $192, %rsp
- vmovups %zmm0, (%rsp)
- vmovups %zmm1, 64(%rsp)
- vmovupd (%rsp), %ymm0
- vmovupd 64(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 128(%rsp)
- vmovupd 32(%rsp), %ymm0
- vmovupd 96(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 160(%rsp)
- vmovups 128(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $192, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovups %zmm1, 64(%rsp)
+ vmovupd (%rsp), %ymm0
+ vmovupd 64(%rsp), %ymm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 128(%rsp)
+ vmovupd 32(%rsp), %ymm0
+ vmovupd 96(%rsp), %ymm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 160(%rsp)
+ vmovups 128(%rsp), %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- pushq %r12
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r12, 0)
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- subq $176, %rsp
- movq %rsi, %r13
- vmovups %zmm0, (%rsp)
- movq %rdi, %r12
- vmovupd (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd 32(%rsp), %ymm0
- lea 64(%rsp), %rdi
- lea 96(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd 64(%rsp), %ymm0
- vmovupd 96(%rsp), %ymm1
- vmovupd %ymm0, 32(%r12)
- vmovupd %ymm1, 32(%r13)
- vzeroupper
- addq $176, %rsp
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- popq %r12
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r12)
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ pushq %r12
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r12, 0)
+ pushq %r13
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r13, 0)
+ subq $176, %rsp
+ movq %rsi, %r13
+ vmovups %zmm0, (%rsp)
+ movq %rdi, %r12
+ vmovupd (%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd 32(%rsp), %ymm0
+ lea 64(%rsp), %rdi
+ lea 96(%rsp), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd 64(%rsp), %ymm0
+ vmovupd 96(%rsp), %ymm1
+ vmovupd %ymm0, 32(%r12)
+ vmovupd %ymm1, 32(%r13)
+ vzeroupper
+ addq $176, %rsp
+ popq %r13
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r13)
+ popq %r12
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r12)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
index c23da7ec83..cecf6c8384 100644
--- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
@@ -18,309 +18,309 @@
/* SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2 callee
- subq $40, %rsp
- cfi_adjust_cfa_offset(40)
- movaps %xmm0, (%rsp)
- call JUMPTARGET(\callee)
- movss %xmm0, 16(%rsp)
- movss 4(%rsp), %xmm0
- call JUMPTARGET(\callee)
- movss %xmm0, 20(%rsp)
- movss 8(%rsp), %xmm0
- call JUMPTARGET(\callee)
- movss %xmm0, 24(%rsp)
- movss 12(%rsp), %xmm0
- call JUMPTARGET(\callee)
- movss 16(%rsp), %xmm3
- movss 20(%rsp), %xmm2
- movss 24(%rsp), %xmm1
- movss %xmm0, 28(%rsp)
- unpcklps %xmm1, %xmm3
- unpcklps %xmm0, %xmm2
- unpcklps %xmm2, %xmm3
- movaps %xmm3, %xmm0
- addq $40, %rsp
- cfi_adjust_cfa_offset(-40)
- ret
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
+ movaps %xmm0, (%rsp)
+ call JUMPTARGET(\callee)
+ movss %xmm0, 16(%rsp)
+ movss 4(%rsp), %xmm0
+ call JUMPTARGET(\callee)
+ movss %xmm0, 20(%rsp)
+ movss 8(%rsp), %xmm0
+ call JUMPTARGET(\callee)
+ movss %xmm0, 24(%rsp)
+ movss 12(%rsp), %xmm0
+ call JUMPTARGET(\callee)
+ movss 16(%rsp), %xmm3
+ movss 20(%rsp), %xmm2
+ movss 24(%rsp), %xmm1
+ movss %xmm0, 28(%rsp)
+ unpcklps %xmm1, %xmm3
+ unpcklps %xmm0, %xmm2
+ unpcklps %xmm2, %xmm3
+ movaps %xmm3, %xmm0
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
+ ret
.endm
/* 2 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_ff callee
- subq $56, %rsp
- cfi_adjust_cfa_offset(56)
- movaps %xmm0, (%rsp)
- movaps %xmm1, 16(%rsp)
- call JUMPTARGET(\callee)
- movss %xmm0, 32(%rsp)
- movss 4(%rsp), %xmm0
- movss 20(%rsp), %xmm1
- call JUMPTARGET(\callee)
- movss %xmm0, 36(%rsp)
- movss 8(%rsp), %xmm0
- movss 24(%rsp), %xmm1
- call JUMPTARGET(\callee)
- movss %xmm0, 40(%rsp)
- movss 12(%rsp), %xmm0
- movss 28(%rsp), %xmm1
- call JUMPTARGET(\callee)
- movss 32(%rsp), %xmm3
- movss 36(%rsp), %xmm2
- movss 40(%rsp), %xmm1
- movss %xmm0, 44(%rsp)
- unpcklps %xmm1, %xmm3
- unpcklps %xmm0, %xmm2
- unpcklps %xmm2, %xmm3
- movaps %xmm3, %xmm0
- addq $56, %rsp
- cfi_adjust_cfa_offset(-56)
- ret
+ subq $56, %rsp
+ cfi_adjust_cfa_offset (56)
+ movaps %xmm0, (%rsp)
+ movaps %xmm1, 16(%rsp)
+ call JUMPTARGET(\callee)
+ movss %xmm0, 32(%rsp)
+ movss 4(%rsp), %xmm0
+ movss 20(%rsp), %xmm1
+ call JUMPTARGET(\callee)
+ movss %xmm0, 36(%rsp)
+ movss 8(%rsp), %xmm0
+ movss 24(%rsp), %xmm1
+ call JUMPTARGET(\callee)
+ movss %xmm0, 40(%rsp)
+ movss 12(%rsp), %xmm0
+ movss 28(%rsp), %xmm1
+ call JUMPTARGET(\callee)
+ movss 32(%rsp), %xmm3
+ movss 36(%rsp), %xmm2
+ movss 40(%rsp), %xmm1
+ movss %xmm0, 44(%rsp)
+ unpcklps %xmm1, %xmm3
+ unpcklps %xmm0, %xmm2
+ unpcklps %xmm2, %xmm3
+ movaps %xmm3, %xmm0
+ addq $56, %rsp
+ cfi_adjust_cfa_offset (-56)
+ ret
.endm
/* 3 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- pushq %rbx
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbx, 0)
- movq %rdi, %rbp
- movq %rsi, %rbx
- subq $40, %rsp
- cfi_adjust_cfa_offset(40)
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movaps %xmm0, (%rsp)
- call JUMPTARGET(\callee)
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movss 28(%rsp), %xmm0
- movss %xmm0, 0(%rbp)
- movaps (%rsp), %xmm1
- movss 24(%rsp), %xmm0
- movss %xmm0, (%rbx)
- movaps %xmm1, %xmm0
- shufps $85, %xmm1, %xmm0
- call JUMPTARGET(\callee)
- movss 28(%rsp), %xmm0
- leaq 24(%rsp), %rsi
- movss %xmm0, 4(%rbp)
- leaq 28(%rsp), %rdi
- movaps (%rsp), %xmm1
- movss 24(%rsp), %xmm0
- movss %xmm0, 4(%rbx)
- movaps %xmm1, %xmm0
- unpckhps %xmm1, %xmm0
- call JUMPTARGET(\callee)
- movaps (%rsp), %xmm1
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movss 28(%rsp), %xmm0
- shufps $255, %xmm1, %xmm1
- movss %xmm0, 8(%rbp)
- movss 24(%rsp), %xmm0
- movss %xmm0, 8(%rbx)
- movaps %xmm1, %xmm0
- call JUMPTARGET(\callee)
- movss 28(%rsp), %xmm0
- movss %xmm0, 12(%rbp)
- movss 24(%rsp), %xmm0
- movss %xmm0, 12(%rbx)
- addq $40, %rsp
- cfi_adjust_cfa_offset(-40)
- popq %rbx
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbx)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ pushq %rbx
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbx, 0)
+ movq %rdi, %rbp
+ movq %rsi, %rbx
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
+ leaq 24(%rsp), %rsi
+ leaq 28(%rsp), %rdi
+ movaps %xmm0, (%rsp)
+ call JUMPTARGET(\callee)
+ leaq 24(%rsp), %rsi
+ leaq 28(%rsp), %rdi
+ movss 28(%rsp), %xmm0
+ movss %xmm0, 0(%rbp)
+ movaps (%rsp), %xmm1
+ movss 24(%rsp), %xmm0
+ movss %xmm0, (%rbx)
+ movaps %xmm1, %xmm0
+ shufps $85, %xmm1, %xmm0
+ call JUMPTARGET(\callee)
+ movss 28(%rsp), %xmm0
+ leaq 24(%rsp), %rsi
+ movss %xmm0, 4(%rbp)
+ leaq 28(%rsp), %rdi
+ movaps (%rsp), %xmm1
+ movss 24(%rsp), %xmm0
+ movss %xmm0, 4(%rbx)
+ movaps %xmm1, %xmm0
+ unpckhps %xmm1, %xmm0
+ call JUMPTARGET(\callee)
+ movaps (%rsp), %xmm1
+ leaq 24(%rsp), %rsi
+ leaq 28(%rsp), %rdi
+ movss 28(%rsp), %xmm0
+ shufps $255, %xmm1, %xmm1
+ movss %xmm0, 8(%rbp)
+ movss 24(%rsp), %xmm0
+ movss %xmm0, 8(%rbx)
+ movaps %xmm1, %xmm0
+ call JUMPTARGET(\callee)
+ movss 28(%rsp), %xmm0
+ movss %xmm0, 12(%rbp)
+ movss 24(%rsp), %xmm0
+ movss %xmm0, 12(%rbx)
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
+ popq %rbx
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbx)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $32, %rsp
- vextractf128 $1, %ymm0, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 16(%rsp)
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 16(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $32, %rsp
+ vextractf128 $1, %ymm0, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, 16(%rsp)
+ vmovaps (%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, %xmm1
+ vmovaps 16(%rsp), %xmm0
+ vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $64, %rsp
- vextractf128 $1, %ymm0, 16(%rsp)
- vextractf128 $1, %ymm1, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 32(%rsp)
- vmovaps 16(%rsp), %xmm0
- vmovaps (%rsp), %xmm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 32(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $64, %rsp
+ vextractf128 $1, %ymm0, 16(%rsp)
+ vextractf128 $1, %ymm1, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, 32(%rsp)
+ vmovaps 16(%rsp), %xmm0
+ vmovaps (%rsp), %xmm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, %xmm1
+ vmovaps 32(%rsp), %xmm0
+ vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- pushq %r14
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r14, 0)
- subq $48, %rsp
- movq %rsi, %r14
- vmovaps %ymm0, (%rsp)
- movq %rdi, %r13
- vmovaps 16(%rsp), %xmm1
- vmovaps %xmm1, 32(%rsp)
- vzeroupper
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %xmm0
- lea (%rsp), %rdi
- lea 16(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps (%rsp), %xmm0
- vmovaps 16(%rsp), %xmm1
- vmovaps %xmm0, 16(%r13)
- vmovaps %xmm1, 16(%r14)
- addq $48, %rsp
- popq %r14
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r14)
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ pushq %r13
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r13, 0)
+ pushq %r14
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r14, 0)
+ subq $48, %rsp
+ movq %rsi, %r14
+ vmovaps %ymm0, (%rsp)
+ movq %rdi, %r13
+ vmovaps 16(%rsp), %xmm1
+ vmovaps %xmm1, 32(%rsp)
+ vzeroupper
+ vmovaps (%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 32(%rsp), %xmm0
+ lea (%rsp), %rdi
+ lea 16(%rsp), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps (%rsp), %xmm0
+ vmovaps 16(%rsp), %xmm1
+ vmovaps %xmm0, 16(%r13)
+ vmovaps %xmm1, 16(%r14)
+ addq $48, %rsp
+ popq %r14
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r14)
+ popq %r13
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r13)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512 callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $128, %rsp
- vmovups %zmm0, (%rsp)
- vmovupd (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 64(%rsp)
- vmovupd 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 96(%rsp)
- vmovups 64(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $128, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovupd (%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 64(%rsp)
+ vmovupd 32(%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 96(%rsp)
+ vmovups 64(%rsp), %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $192, %rsp
- vmovups %zmm0, (%rsp)
- vmovups %zmm1, 64(%rsp)
- vmovups (%rsp), %ymm0
- vmovups 64(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovups %ymm0, 128(%rsp)
- vmovups 32(%rsp), %ymm0
- vmovups 96(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovups %ymm0, 160(%rsp)
- vmovups 128(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $192, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovups %zmm1, 64(%rsp)
+ vmovups (%rsp), %ymm0
+ vmovups 64(%rsp), %ymm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovups %ymm0, 128(%rsp)
+ vmovups 32(%rsp), %ymm0
+ vmovups 96(%rsp), %ymm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovups %ymm0, 160(%rsp)
+ vmovups 128(%rsp), %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- pushq %r12
- pushq %r13
- subq $176, %rsp
- movq %rsi, %r13
- vmovaps %zmm0, (%rsp)
- movq %rdi, %r12
- vmovaps (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %ymm0
- lea 64(%rsp), %rdi
- lea 96(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 64(%rsp), %ymm0
- vmovaps 96(%rsp), %ymm1
- vmovaps %ymm0, 32(%r12)
- vmovaps %ymm1, 32(%r13)
- addq $176, %rsp
- popq %r13
- popq %r12
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ pushq %r12
+ pushq %r13
+ subq $176, %rsp
+ movq %rsi, %r13
+ vmovaps %zmm0, (%rsp)
+ movq %rdi, %r12
+ vmovaps (%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 32(%rsp), %ymm0
+ lea 64(%rsp), %rdi
+ lea 96(%rsp), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 64(%rsp), %ymm0
+ vmovaps 96(%rsp), %ymm1
+ vmovaps %ymm0, 32(%r12)
+ vmovaps %ymm1, 32(%r13)
+ addq $176, %rsp
+ popq %r13
+ popq %r12
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
--
2.34.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [PATCH v4 2/3] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h
2022-11-18 21:22 ` [PATCH v4 1/3] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h Noah Goldstein via Libc-alpha
@ 2022-11-18 21:22 ` Noah Goldstein via Libc-alpha
2022-11-19 0:06 ` H.J. Lu via Libc-alpha
2022-11-18 21:22 ` [PATCH v4 3/3] x86/fpu: Factor out shared avx2/avx512 " Noah Goldstein via Libc-alpha
2022-11-18 23:25 ` [PATCH v4 1/3] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h H.J. Lu via Libc-alpha
2 siblings, 1 reply; 21+ messages in thread
From: Noah Goldstein via Libc-alpha @ 2022-11-18 21:22 UTC (permalink / raw)
To: libc-alpha; +Cc: goldstein.w.n, hjl.tools, andrey.kolesov, carlos
1. Remove unnecessary spills.
2. Fix some small nit missed optimizations.
All math and mathvec tests pass on x86.
---
sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 183 +++++++-----------
sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 235 ++++++++++-------------
2 files changed, 174 insertions(+), 244 deletions(-)
diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
index b03a2122b9..78c30c56cb 100644
--- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
@@ -18,39 +18,38 @@
/* SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2 callee
- subq $40, %rsp
- cfi_adjust_cfa_offset (40)
+ subq $24, %rsp
+ cfi_adjust_cfa_offset (24)
movaps %xmm0, (%rsp)
call JUMPTARGET(\callee)
- movsd %xmm0, 16(%rsp)
+ movsd %xmm0, (%rsp)
movsd 8(%rsp), %xmm0
call JUMPTARGET(\callee)
- movsd 16(%rsp), %xmm1
- movsd %xmm0, 24(%rsp)
+ movsd (%rsp), %xmm1
unpcklpd %xmm0, %xmm1
movaps %xmm1, %xmm0
- addq $40, %rsp
- cfi_adjust_cfa_offset (-40)
+ addq $24, %rsp
+ cfi_adjust_cfa_offset (-24)
ret
.endm
+
/* 2 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_ff callee
- subq $56, %rsp
- cfi_adjust_cfa_offset (56)
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
movaps %xmm0, (%rsp)
movaps %xmm1, 16(%rsp)
call JUMPTARGET(\callee)
- movsd %xmm0, 32(%rsp)
+ movsd %xmm0, (%rsp)
movsd 8(%rsp), %xmm0
movsd 24(%rsp), %xmm1
call JUMPTARGET(\callee)
- movsd 32(%rsp), %xmm1
- movsd %xmm0, 40(%rsp)
+ movsd (%rsp), %xmm1
unpcklpd %xmm0, %xmm1
movaps %xmm1, %xmm0
- addq $56, %rsp
- cfi_adjust_cfa_offset (-56)
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
ret
.endm
@@ -62,34 +61,22 @@
pushq %rbx
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbx, 0)
+ subq $24, %rsp
+ cfi_adjust_cfa_offset (24)
+ movaps %xmm0, (%rsp)
movq %rdi, %rbp
movq %rsi, %rbx
- subq $40, %rsp
- cfi_adjust_cfa_offset (40)
- leaq 16(%rsp), %rsi
- leaq 24(%rsp), %rdi
- movaps %xmm0, (%rsp)
call JUMPTARGET(\callee)
- leaq 16(%rsp), %rsi
- leaq 24(%rsp), %rdi
- movsd 24(%rsp), %xmm0
- movapd (%rsp), %xmm1
- movsd %xmm0, 0(%rbp)
- unpckhpd %xmm1, %xmm1
- movsd 16(%rsp), %xmm0
- movsd %xmm0, (%rbx)
- movapd %xmm1, %xmm0
+ movsd 8(%rsp), %xmm0
+ leaq 8(%rbp), %rdi
+ leaq 8(%rbx), %rsi
call JUMPTARGET(\callee)
- movsd 24(%rsp), %xmm0
- movsd %xmm0, 8(%rbp)
- movsd 16(%rsp), %xmm0
- movsd %xmm0, 8(%rbx)
- addq $40, %rsp
- cfi_adjust_cfa_offset (-40)
- popq %rbx
+ addq $24, %rsp
+ cfi_adjust_cfa_offset (-24)
+ pop %rbx
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbx)
- popq %rbp
+ pop %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret
@@ -104,15 +91,17 @@
cfi_def_cfa_register (%rbp)
andq $-32, %rsp
subq $32, %rsp
- vextractf128 $1, %ymm0, (%rsp)
+ vmovaps %ymm0, (%rsp)
vzeroupper
call HIDDEN_JUMPTARGET(\callee)
- vmovapd %xmm0, 16(%rsp)
- vmovaps (%rsp), %xmm0
+ vmovaps %xmm0, (%rsp)
+ vmovaps 16(%rsp), %xmm0
call HIDDEN_JUMPTARGET(\callee)
- vmovapd %xmm0, %xmm1
- vmovapd 16(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ /* combine xmm0 (return of second call) with result of first
+ call (saved on stack). Might be worth exploring logic that
+ uses `vpblend` and reads in ymm1 using -16(rsp). */
+ vmovaps (%rsp), %xmm1
+ vinsertf128 $1, %xmm0, %ymm1, %ymm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -130,17 +119,19 @@
cfi_def_cfa_register (%rbp)
andq $-32, %rsp
subq $64, %rsp
- vextractf128 $1, %ymm0, 16(%rsp)
- vextractf128 $1, %ymm1, (%rsp)
+ vmovaps %ymm0, (%rsp)
+ vmovaps %ymm1, 32(%rsp)
vzeroupper
call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 32(%rsp)
+ vmovaps 48(%rsp), %xmm1
+ vmovaps %xmm0, (%rsp)
vmovaps 16(%rsp), %xmm0
- vmovaps (%rsp), %xmm1
call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 32(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ /* combine xmm0 (return of second call) with result of first
+ call (saved on stack). Might be worth exploring logic that
+ uses `vpblend` and reads in ymm1 using -16(rsp). */
+ vmovaps (%rsp), %xmm1
+ vinsertf128 $1, %xmm0, %ymm1, %ymm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -155,35 +146,21 @@
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
andq $-32, %rsp
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
+ subq $32, %rsp
+ vmovaps %ymm0, (%rsp)
+ pushq %rbx
pushq %r14
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r14, 0)
- subq $48, %rsp
+ movq %rdi, %rbx
movq %rsi, %r14
- movq %rdi, %r13
- vextractf128 $1, %ymm0, 32(%rsp)
vzeroupper
call HIDDEN_JUMPTARGET(\callee)
vmovaps 32(%rsp), %xmm0
- lea (%rsp), %rdi
- lea 16(%rsp), %rsi
+ leaq 16(%rbx), %rdi
+ leaq 16(%r14), %rsi
call HIDDEN_JUMPTARGET(\callee)
- vmovapd (%rsp), %xmm0
- vmovapd 16(%rsp), %xmm1
- vmovapd %xmm0, 16(%r13)
- vmovapd %xmm1, 16(%r14)
- addq $48, %rsp
popq %r14
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r14)
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
+ popq %rbx
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -200,15 +177,16 @@
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
- subq $128, %rsp
+ subq $64, %rsp
vmovups %zmm0, (%rsp)
- vmovupd (%rsp), %ymm0
call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 64(%rsp)
+ vmovupd %ymm0, (%rsp)
vmovupd 32(%rsp), %ymm0
call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 96(%rsp)
- vmovups 64(%rsp), %zmm0
+ /* combine ymm0 (return of second call) with result of first
+ call (saved on stack). */
+ vmovaps (%rsp), %ymm1
+ vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -225,18 +203,19 @@
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
- subq $192, %rsp
+ addq $-128, %rsp
vmovups %zmm0, (%rsp)
vmovups %zmm1, 64(%rsp)
- vmovupd (%rsp), %ymm0
- vmovupd 64(%rsp), %ymm1
+ /* ymm0 and ymm1 are already set. */
call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 128(%rsp)
- vmovupd 32(%rsp), %ymm0
- vmovupd 96(%rsp), %ymm1
+ vmovups 96(%rsp), %ymm1
+ vmovaps %ymm0, (%rsp)
+ vmovups 32(%rsp), %ymm0
call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 160(%rsp)
- vmovups 128(%rsp), %zmm0
+ /* combine ymm0 (return of second call) with result of first
+ call (saved on stack). */
+ vmovaps (%rsp), %ymm1
+ vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -253,34 +232,20 @@
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
- pushq %r12
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r12, 0)
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- subq $176, %rsp
- movq %rsi, %r13
- vmovups %zmm0, (%rsp)
- movq %rdi, %r12
- vmovupd (%rsp), %ymm0
+ subq $64, %rsp
+ vmovaps %zmm0, (%rsp)
+ pushq %rbx
+ pushq %r14
+ movq %rdi, %rbx
+ movq %rsi, %r14
+ /* ymm0 is already set. */
call HIDDEN_JUMPTARGET(\callee)
- vmovupd 32(%rsp), %ymm0
- lea 64(%rsp), %rdi
- lea 96(%rsp), %rsi
+ vmovaps 48(%rsp), %ymm0
+ leaq 32(%rbx), %rdi
+ leaq 32(%r14), %rsi
call HIDDEN_JUMPTARGET(\callee)
- vmovupd 64(%rsp), %ymm0
- vmovupd 96(%rsp), %ymm1
- vmovupd %ymm0, 32(%r12)
- vmovupd %ymm1, 32(%r13)
- vzeroupper
- addq $176, %rsp
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- popq %r12
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r12)
+ popq %r14
+ popq %rbx
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
index cecf6c8384..43f2b91f32 100644
--- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
@@ -18,61 +18,66 @@
/* SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2 callee
- subq $40, %rsp
- cfi_adjust_cfa_offset (40)
+ push %rbx
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbx, 0)
+ subq $16, %rsp
+ cfi_adjust_cfa_offset (16)
movaps %xmm0, (%rsp)
call JUMPTARGET(\callee)
- movss %xmm0, 16(%rsp)
+ movss %xmm0, (%rsp)
movss 4(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss %xmm0, 20(%rsp)
+ movss %xmm0, 4(%rsp)
movss 8(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss %xmm0, 24(%rsp)
+ movd %xmm0, %ebx
movss 12(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss 16(%rsp), %xmm3
- movss 20(%rsp), %xmm2
- movss 24(%rsp), %xmm1
- movss %xmm0, 28(%rsp)
- unpcklps %xmm1, %xmm3
- unpcklps %xmm0, %xmm2
- unpcklps %xmm2, %xmm3
- movaps %xmm3, %xmm0
- addq $40, %rsp
- cfi_adjust_cfa_offset (-40)
+ movd %ebx, %xmm1
+ unpcklps %xmm0, %xmm1
+ movsd (%rsp), %xmm0
+ unpcklpd %xmm1, %xmm0
+ addq $16, %rsp
+ cfi_adjust_cfa_offset (-16)
+ pop %rbx
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbx)
ret
.endm
/* 2 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_ff callee
- subq $56, %rsp
- cfi_adjust_cfa_offset (56)
+ push %rbx
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbx, 0)
+ subq $32, %rsp
+ cfi_adjust_cfa_offset (40)
movaps %xmm0, (%rsp)
movaps %xmm1, 16(%rsp)
call JUMPTARGET(\callee)
- movss %xmm0, 32(%rsp)
- movss 4(%rsp), %xmm0
movss 20(%rsp), %xmm1
+ movss %xmm0, 0(%rsp)
+ movss 4(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss %xmm0, 36(%rsp)
- movss 8(%rsp), %xmm0
movss 24(%rsp), %xmm1
+ movss %xmm0, 4(%rsp)
+ movss 8(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss %xmm0, 40(%rsp)
- movss 12(%rsp), %xmm0
movss 28(%rsp), %xmm1
+ movd %xmm0, %ebx
+ movss 12(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss 32(%rsp), %xmm3
- movss 36(%rsp), %xmm2
- movss 40(%rsp), %xmm1
- movss %xmm0, 44(%rsp)
- unpcklps %xmm1, %xmm3
- unpcklps %xmm0, %xmm2
- unpcklps %xmm2, %xmm3
- movaps %xmm3, %xmm0
- addq $56, %rsp
- cfi_adjust_cfa_offset (-56)
+ /* merge 4x results into xmm0. */
+ movd %ebx, %xmm1
+ unpcklps %xmm0, %xmm1
+ movsd (%rsp), %xmm0
+ unpcklpd %xmm1, %xmm0
+ addq $32, %rsp
+ cfi_adjust_cfa_offset (-32)
+ pop %rbx
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbx)
ret
.endm
@@ -86,48 +91,24 @@
cfi_rel_offset (%rbx, 0)
movq %rdi, %rbp
movq %rsi, %rbx
- subq $40, %rsp
- cfi_adjust_cfa_offset (40)
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
+ subq $24, %rsp
+ cfi_adjust_cfa_offset (24)
movaps %xmm0, (%rsp)
call JUMPTARGET(\callee)
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movss 28(%rsp), %xmm0
- movss %xmm0, 0(%rbp)
- movaps (%rsp), %xmm1
- movss 24(%rsp), %xmm0
- movss %xmm0, (%rbx)
- movaps %xmm1, %xmm0
- shufps $85, %xmm1, %xmm0
+ movss 4(%rsp), %xmm0
+ leaq 4(%rbp), %rdi
+ leaq 4(%rbx), %rsi
call JUMPTARGET(\callee)
- movss 28(%rsp), %xmm0
- leaq 24(%rsp), %rsi
- movss %xmm0, 4(%rbp)
- leaq 28(%rsp), %rdi
- movaps (%rsp), %xmm1
- movss 24(%rsp), %xmm0
- movss %xmm0, 4(%rbx)
- movaps %xmm1, %xmm0
- unpckhps %xmm1, %xmm0
+ movss 8(%rsp), %xmm0
+ leaq 8(%rbp), %rdi
+ leaq 8(%rbx), %rsi
call JUMPTARGET(\callee)
- movaps (%rsp), %xmm1
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movss 28(%rsp), %xmm0
- shufps $255, %xmm1, %xmm1
- movss %xmm0, 8(%rbp)
- movss 24(%rsp), %xmm0
- movss %xmm0, 8(%rbx)
- movaps %xmm1, %xmm0
+ movss 12(%rsp), %xmm0
+ leaq 12(%rbp), %rdi
+ leaq 12(%rbx), %rsi
call JUMPTARGET(\callee)
- movss 28(%rsp), %xmm0
- movss %xmm0, 12(%rbp)
- movss 24(%rsp), %xmm0
- movss %xmm0, 12(%rbx)
- addq $40, %rsp
- cfi_adjust_cfa_offset (-40)
+ addq $24, %rsp
+ cfi_adjust_cfa_offset (-24)
popq %rbx
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbx)
@@ -146,15 +127,17 @@
cfi_def_cfa_register (%rbp)
andq $-32, %rsp
subq $32, %rsp
- vextractf128 $1, %ymm0, (%rsp)
+ vmovaps %ymm0, (%rsp)
vzeroupper
call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 16(%rsp)
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
+ vmovaps %xmm0, (%rsp)
vmovaps 16(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ /* combine xmm0 (return of second call) with result of first
+ call (saved on stack). Might be worth exploring logic that
+ uses `vpblend` and reads in ymm1 using -16(rsp). */
+ vmovaps (%rsp), %xmm1
+ vinsertf128 $1, %xmm0, %ymm1, %ymm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -172,17 +155,19 @@
cfi_def_cfa_register (%rbp)
andq $-32, %rsp
subq $64, %rsp
- vextractf128 $1, %ymm0, 16(%rsp)
- vextractf128 $1, %ymm1, (%rsp)
+ vmovaps %ymm0, (%rsp)
+ vmovaps %ymm1, 32(%rsp)
vzeroupper
call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 32(%rsp)
+ vmovaps 48(%rsp), %xmm1
+ vmovaps %xmm0, (%rsp)
vmovaps 16(%rsp), %xmm0
- vmovaps (%rsp), %xmm1
call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 32(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ /* combine xmm0 (return of second call) with result of first
+ call (saved on stack). Might be worth exploring logic that
+ uses `vpblend` and reads in ymm1 using -16(rsp). */
+ vmovaps (%rsp), %xmm1
+ vinsertf128 $1, %xmm0, %ymm1, %ymm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -197,38 +182,21 @@
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
andq $-32, %rsp
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
+ subq $32, %rsp
+ vmovaps %ymm0, (%rsp)
+ pushq %rbx
pushq %r14
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r14, 0)
- subq $48, %rsp
+ movq %rdi, %rbx
movq %rsi, %r14
- vmovaps %ymm0, (%rsp)
- movq %rdi, %r13
- vmovaps 16(%rsp), %xmm1
- vmovaps %xmm1, 32(%rsp)
vzeroupper
- vmovaps (%rsp), %xmm0
call HIDDEN_JUMPTARGET(\callee)
vmovaps 32(%rsp), %xmm0
- lea (%rsp), %rdi
- lea 16(%rsp), %rsi
+ leaq 16(%rbx), %rdi
+ leaq 16(%r14), %rsi
call HIDDEN_JUMPTARGET(\callee)
- vmovaps (%rsp), %xmm0
- vmovaps 16(%rsp), %xmm1
- vmovaps %xmm0, 16(%r13)
- vmovaps %xmm1, 16(%r14)
- addq $48, %rsp
popq %r14
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r14)
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
+ popq %rbx
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -245,15 +213,16 @@
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
- subq $128, %rsp
+ subq $64, %rsp
vmovups %zmm0, (%rsp)
- vmovupd (%rsp), %ymm0
call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 64(%rsp)
+ vmovupd %ymm0, (%rsp)
vmovupd 32(%rsp), %ymm0
call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 96(%rsp)
- vmovups 64(%rsp), %zmm0
+ /* combine ymm0 (return of second call) with result of first
+ call (saved on stack). */
+ vmovaps (%rsp), %ymm1
+ vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -270,18 +239,19 @@
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
- subq $192, %rsp
+ addq $-128, %rsp
vmovups %zmm0, (%rsp)
vmovups %zmm1, 64(%rsp)
- vmovups (%rsp), %ymm0
- vmovups 64(%rsp), %ymm1
+ /* ymm0 and ymm1 are already set. */
call HIDDEN_JUMPTARGET(\callee)
- vmovups %ymm0, 128(%rsp)
- vmovups 32(%rsp), %ymm0
vmovups 96(%rsp), %ymm1
+ vmovaps %ymm0, (%rsp)
+ vmovups 32(%rsp), %ymm0
call HIDDEN_JUMPTARGET(\callee)
- vmovups %ymm0, 160(%rsp)
- vmovups 128(%rsp), %zmm0
+ /* combine ymm0 (return of second call) with result of first
+ call (saved on stack). */
+ vmovaps (%rsp), %ymm1
+ vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -298,25 +268,20 @@
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
- pushq %r12
- pushq %r13
- subq $176, %rsp
- movq %rsi, %r13
+ subq $64, %rsp
vmovaps %zmm0, (%rsp)
- movq %rdi, %r12
- vmovaps (%rsp), %ymm0
+ pushq %rbx
+ pushq %r14
+ movq %rdi, %rbx
+ movq %rsi, %r14
+ /* ymm0 is already set. */
call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %ymm0
- lea 64(%rsp), %rdi
- lea 96(%rsp), %rsi
+ vmovaps 48(%rsp), %ymm0
+ leaq 32(%rbx), %rdi
+ leaq 32(%r14), %rsi
call HIDDEN_JUMPTARGET(\callee)
- vmovaps 64(%rsp), %ymm0
- vmovaps 96(%rsp), %ymm1
- vmovaps %ymm0, 32(%r12)
- vmovaps %ymm1, 32(%r13)
- addq $176, %rsp
- popq %r13
- popq %r12
+ popq %r14
+ popq %rbx
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
--
2.34.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* Re: [PATCH v4 2/3] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h
2022-11-18 21:22 ` [PATCH v4 2/3] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h Noah Goldstein via Libc-alpha
@ 2022-11-19 0:06 ` H.J. Lu via Libc-alpha
2022-11-19 0:13 ` Noah Goldstein via Libc-alpha
0 siblings, 1 reply; 21+ messages in thread
From: H.J. Lu via Libc-alpha @ 2022-11-19 0:06 UTC (permalink / raw)
To: Noah Goldstein; +Cc: libc-alpha, andrey.kolesov, carlos
On Fri, Nov 18, 2022 at 1:23 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
>
> 1. Remove unnecessary spills.
> 2. Fix some small nit missed optimizations.
>
> All math and mathvec tests pass on x86.
> ---
> sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 183 +++++++-----------
> sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 235 ++++++++++-------------
> 2 files changed, 174 insertions(+), 244 deletions(-)
>
> diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
> index b03a2122b9..78c30c56cb 100644
> --- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
> +++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
> @@ -18,39 +18,38 @@
>
> /* SSE2 ISA version as wrapper to scalar. */
> .macro WRAPPER_IMPL_SSE2 callee
> - subq $40, %rsp
> - cfi_adjust_cfa_offset (40)
> + subq $24, %rsp
> + cfi_adjust_cfa_offset (24)
> movaps %xmm0, (%rsp)
> call JUMPTARGET(\callee)
> - movsd %xmm0, 16(%rsp)
> + movsd %xmm0, (%rsp)
> movsd 8(%rsp), %xmm0
> call JUMPTARGET(\callee)
> - movsd 16(%rsp), %xmm1
> - movsd %xmm0, 24(%rsp)
> + movsd (%rsp), %xmm1
> unpcklpd %xmm0, %xmm1
> movaps %xmm1, %xmm0
> - addq $40, %rsp
> - cfi_adjust_cfa_offset (-40)
> + addq $24, %rsp
> + cfi_adjust_cfa_offset (-24)
> ret
> .endm
>
> +
> /* 2 argument SSE2 ISA version as wrapper to scalar. */
> .macro WRAPPER_IMPL_SSE2_ff callee
> - subq $56, %rsp
> - cfi_adjust_cfa_offset (56)
> + subq $40, %rsp
> + cfi_adjust_cfa_offset (40)
> movaps %xmm0, (%rsp)
> movaps %xmm1, 16(%rsp)
> call JUMPTARGET(\callee)
> - movsd %xmm0, 32(%rsp)
> + movsd %xmm0, (%rsp)
> movsd 8(%rsp), %xmm0
> movsd 24(%rsp), %xmm1
> call JUMPTARGET(\callee)
> - movsd 32(%rsp), %xmm1
> - movsd %xmm0, 40(%rsp)
> + movsd (%rsp), %xmm1
> unpcklpd %xmm0, %xmm1
> movaps %xmm1, %xmm0
> - addq $56, %rsp
> - cfi_adjust_cfa_offset (-56)
> + addq $40, %rsp
> + cfi_adjust_cfa_offset (-40)
> ret
> .endm
>
> @@ -62,34 +61,22 @@
> pushq %rbx
> cfi_adjust_cfa_offset (8)
> cfi_rel_offset (%rbx, 0)
> + subq $24, %rsp
> + cfi_adjust_cfa_offset (24)
> + movaps %xmm0, (%rsp)
> movq %rdi, %rbp
> movq %rsi, %rbx
> - subq $40, %rsp
> - cfi_adjust_cfa_offset (40)
> - leaq 16(%rsp), %rsi
> - leaq 24(%rsp), %rdi
> - movaps %xmm0, (%rsp)
> call JUMPTARGET(\callee)
> - leaq 16(%rsp), %rsi
> - leaq 24(%rsp), %rdi
> - movsd 24(%rsp), %xmm0
> - movapd (%rsp), %xmm1
> - movsd %xmm0, 0(%rbp)
> - unpckhpd %xmm1, %xmm1
> - movsd 16(%rsp), %xmm0
> - movsd %xmm0, (%rbx)
> - movapd %xmm1, %xmm0
> + movsd 8(%rsp), %xmm0
> + leaq 8(%rbp), %rdi
> + leaq 8(%rbx), %rsi
> call JUMPTARGET(\callee)
> - movsd 24(%rsp), %xmm0
> - movsd %xmm0, 8(%rbp)
> - movsd 16(%rsp), %xmm0
> - movsd %xmm0, 8(%rbx)
> - addq $40, %rsp
> - cfi_adjust_cfa_offset (-40)
> - popq %rbx
> + addq $24, %rsp
> + cfi_adjust_cfa_offset (-24)
> + pop %rbx
popq
> cfi_adjust_cfa_offset (-8)
> cfi_restore (%rbx)
> - popq %rbp
> + pop %rbp
Why this change?
> cfi_adjust_cfa_offset (-8)
> cfi_restore (%rbp)
> ret
> @@ -104,15 +91,17 @@
> cfi_def_cfa_register (%rbp)
> andq $-32, %rsp
> subq $32, %rsp
> - vextractf128 $1, %ymm0, (%rsp)
> + vmovaps %ymm0, (%rsp)
> vzeroupper
> call HIDDEN_JUMPTARGET(\callee)
> - vmovapd %xmm0, 16(%rsp)
> - vmovaps (%rsp), %xmm0
> + vmovaps %xmm0, (%rsp)
> + vmovaps 16(%rsp), %xmm0
> call HIDDEN_JUMPTARGET(\callee)
> - vmovapd %xmm0, %xmm1
> - vmovapd 16(%rsp), %xmm0
> - vinsertf128 $1, %xmm1, %ymm0, %ymm0
> + /* combine xmm0 (return of second call) with result of first
> + call (saved on stack). Might be worth exploring logic that
> + uses `vpblend` and reads in ymm1 using -16(rsp). */
> + vmovaps (%rsp), %xmm1
> + vinsertf128 $1, %xmm0, %ymm1, %ymm0
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -130,17 +119,19 @@
> cfi_def_cfa_register (%rbp)
> andq $-32, %rsp
> subq $64, %rsp
> - vextractf128 $1, %ymm0, 16(%rsp)
> - vextractf128 $1, %ymm1, (%rsp)
> + vmovaps %ymm0, (%rsp)
> + vmovaps %ymm1, 32(%rsp)
> vzeroupper
> call HIDDEN_JUMPTARGET(\callee)
> - vmovaps %xmm0, 32(%rsp)
> + vmovaps 48(%rsp), %xmm1
> + vmovaps %xmm0, (%rsp)
> vmovaps 16(%rsp), %xmm0
> - vmovaps (%rsp), %xmm1
> call HIDDEN_JUMPTARGET(\callee)
> - vmovaps %xmm0, %xmm1
> - vmovaps 32(%rsp), %xmm0
> - vinsertf128 $1, %xmm1, %ymm0, %ymm0
> + /* combine xmm0 (return of second call) with result of first
> + call (saved on stack). Might be worth exploring logic that
> + uses `vpblend` and reads in ymm1 using -16(rsp). */
> + vmovaps (%rsp), %xmm1
> + vinsertf128 $1, %xmm0, %ymm1, %ymm0
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -155,35 +146,21 @@
> cfi_adjust_cfa_offset (8)
> cfi_rel_offset (%rbp, 0)
> movq %rsp, %rbp
> - cfi_def_cfa_register (%rbp)
> andq $-32, %rsp
> - pushq %r13
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%r13, 0)
> + subq $32, %rsp
> + vmovaps %ymm0, (%rsp)
> + pushq %rbx
> pushq %r14
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%r14, 0)
> - subq $48, %rsp
> + movq %rdi, %rbx
> movq %rsi, %r14
> - movq %rdi, %r13
> - vextractf128 $1, %ymm0, 32(%rsp)
> vzeroupper
> call HIDDEN_JUMPTARGET(\callee)
> vmovaps 32(%rsp), %xmm0
> - lea (%rsp), %rdi
> - lea 16(%rsp), %rsi
> + leaq 16(%rbx), %rdi
> + leaq 16(%r14), %rsi
> call HIDDEN_JUMPTARGET(\callee)
> - vmovapd (%rsp), %xmm0
> - vmovapd 16(%rsp), %xmm1
> - vmovapd %xmm0, 16(%r13)
> - vmovapd %xmm1, 16(%r14)
> - addq $48, %rsp
> popq %r14
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%r14)
> - popq %r13
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%r13)
> + popq %rbx
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -200,15 +177,16 @@
> movq %rsp, %rbp
> cfi_def_cfa_register (%rbp)
> andq $-64, %rsp
> - subq $128, %rsp
> + subq $64, %rsp
> vmovups %zmm0, (%rsp)
> - vmovupd (%rsp), %ymm0
> call HIDDEN_JUMPTARGET(\callee)
> - vmovupd %ymm0, 64(%rsp)
> + vmovupd %ymm0, (%rsp)
> vmovupd 32(%rsp), %ymm0
> call HIDDEN_JUMPTARGET(\callee)
> - vmovupd %ymm0, 96(%rsp)
> - vmovups 64(%rsp), %zmm0
> + /* combine ymm0 (return of second call) with result of first
> + call (saved on stack). */
> + vmovaps (%rsp), %ymm1
> + vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -225,18 +203,19 @@
> movq %rsp, %rbp
> cfi_def_cfa_register (%rbp)
> andq $-64, %rsp
> - subq $192, %rsp
> + addq $-128, %rsp
> vmovups %zmm0, (%rsp)
> vmovups %zmm1, 64(%rsp)
> - vmovupd (%rsp), %ymm0
> - vmovupd 64(%rsp), %ymm1
> + /* ymm0 and ymm1 are already set. */
> call HIDDEN_JUMPTARGET(\callee)
> - vmovupd %ymm0, 128(%rsp)
> - vmovupd 32(%rsp), %ymm0
> - vmovupd 96(%rsp), %ymm1
> + vmovups 96(%rsp), %ymm1
> + vmovaps %ymm0, (%rsp)
> + vmovups 32(%rsp), %ymm0
> call HIDDEN_JUMPTARGET(\callee)
> - vmovupd %ymm0, 160(%rsp)
> - vmovups 128(%rsp), %zmm0
> + /* combine ymm0 (return of second call) with result of first
> + call (saved on stack). */
> + vmovaps (%rsp), %ymm1
> + vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -253,34 +232,20 @@
> movq %rsp, %rbp
> cfi_def_cfa_register (%rbp)
> andq $-64, %rsp
> - pushq %r12
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%r12, 0)
> - pushq %r13
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%r13, 0)
> - subq $176, %rsp
> - movq %rsi, %r13
> - vmovups %zmm0, (%rsp)
> - movq %rdi, %r12
> - vmovupd (%rsp), %ymm0
> + subq $64, %rsp
> + vmovaps %zmm0, (%rsp)
> + pushq %rbx
> + pushq %r14
> + movq %rdi, %rbx
> + movq %rsi, %r14
> + /* ymm0 is already set. */
> call HIDDEN_JUMPTARGET(\callee)
> - vmovupd 32(%rsp), %ymm0
> - lea 64(%rsp), %rdi
> - lea 96(%rsp), %rsi
> + vmovaps 48(%rsp), %ymm0
> + leaq 32(%rbx), %rdi
> + leaq 32(%r14), %rsi
> call HIDDEN_JUMPTARGET(\callee)
> - vmovupd 64(%rsp), %ymm0
> - vmovupd 96(%rsp), %ymm1
> - vmovupd %ymm0, 32(%r12)
> - vmovupd %ymm1, 32(%r13)
> - vzeroupper
> - addq $176, %rsp
> - popq %r13
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%r13)
> - popq %r12
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%r12)
> + popq %r14
> + popq %rbx
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
> index cecf6c8384..43f2b91f32 100644
> --- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
> +++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
> @@ -18,61 +18,66 @@
>
> /* SSE2 ISA version as wrapper to scalar. */
> .macro WRAPPER_IMPL_SSE2 callee
> - subq $40, %rsp
> - cfi_adjust_cfa_offset (40)
> + push %rbx
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbx, 0)
> + subq $16, %rsp
> + cfi_adjust_cfa_offset (16)
> movaps %xmm0, (%rsp)
> call JUMPTARGET(\callee)
> - movss %xmm0, 16(%rsp)
> + movss %xmm0, (%rsp)
> movss 4(%rsp), %xmm0
> call JUMPTARGET(\callee)
> - movss %xmm0, 20(%rsp)
> + movss %xmm0, 4(%rsp)
> movss 8(%rsp), %xmm0
> call JUMPTARGET(\callee)
> - movss %xmm0, 24(%rsp)
> + movd %xmm0, %ebx
> movss 12(%rsp), %xmm0
> call JUMPTARGET(\callee)
> - movss 16(%rsp), %xmm3
> - movss 20(%rsp), %xmm2
> - movss 24(%rsp), %xmm1
> - movss %xmm0, 28(%rsp)
> - unpcklps %xmm1, %xmm3
> - unpcklps %xmm0, %xmm2
> - unpcklps %xmm2, %xmm3
> - movaps %xmm3, %xmm0
> - addq $40, %rsp
> - cfi_adjust_cfa_offset (-40)
> + movd %ebx, %xmm1
> + unpcklps %xmm0, %xmm1
> + movsd (%rsp), %xmm0
> + unpcklpd %xmm1, %xmm0
> + addq $16, %rsp
> + cfi_adjust_cfa_offset (-16)
> + pop %rbx
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbx)
> ret
> .endm
>
> /* 2 argument SSE2 ISA version as wrapper to scalar. */
> .macro WRAPPER_IMPL_SSE2_ff callee
> - subq $56, %rsp
> - cfi_adjust_cfa_offset (56)
> + push %rbx
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbx, 0)
> + subq $32, %rsp
> + cfi_adjust_cfa_offset (40)
> movaps %xmm0, (%rsp)
> movaps %xmm1, 16(%rsp)
> call JUMPTARGET(\callee)
> - movss %xmm0, 32(%rsp)
> - movss 4(%rsp), %xmm0
> movss 20(%rsp), %xmm1
> + movss %xmm0, 0(%rsp)
> + movss 4(%rsp), %xmm0
> call JUMPTARGET(\callee)
> - movss %xmm0, 36(%rsp)
> - movss 8(%rsp), %xmm0
> movss 24(%rsp), %xmm1
> + movss %xmm0, 4(%rsp)
> + movss 8(%rsp), %xmm0
> call JUMPTARGET(\callee)
> - movss %xmm0, 40(%rsp)
> - movss 12(%rsp), %xmm0
> movss 28(%rsp), %xmm1
> + movd %xmm0, %ebx
> + movss 12(%rsp), %xmm0
> call JUMPTARGET(\callee)
> - movss 32(%rsp), %xmm3
> - movss 36(%rsp), %xmm2
> - movss 40(%rsp), %xmm1
> - movss %xmm0, 44(%rsp)
> - unpcklps %xmm1, %xmm3
> - unpcklps %xmm0, %xmm2
> - unpcklps %xmm2, %xmm3
> - movaps %xmm3, %xmm0
> - addq $56, %rsp
> - cfi_adjust_cfa_offset (-56)
> + /* merge 4x results into xmm0. */
> + movd %ebx, %xmm1
> + unpcklps %xmm0, %xmm1
> + movsd (%rsp), %xmm0
> + unpcklpd %xmm1, %xmm0
> + addq $32, %rsp
> + cfi_adjust_cfa_offset (-32)
> + pop %rbx
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbx)
> ret
> .endm
>
> @@ -86,48 +91,24 @@
> cfi_rel_offset (%rbx, 0)
> movq %rdi, %rbp
> movq %rsi, %rbx
> - subq $40, %rsp
> - cfi_adjust_cfa_offset (40)
> - leaq 24(%rsp), %rsi
> - leaq 28(%rsp), %rdi
> + subq $24, %rsp
> + cfi_adjust_cfa_offset (24)
> movaps %xmm0, (%rsp)
> call JUMPTARGET(\callee)
> - leaq 24(%rsp), %rsi
> - leaq 28(%rsp), %rdi
> - movss 28(%rsp), %xmm0
> - movss %xmm0, 0(%rbp)
> - movaps (%rsp), %xmm1
> - movss 24(%rsp), %xmm0
> - movss %xmm0, (%rbx)
> - movaps %xmm1, %xmm0
> - shufps $85, %xmm1, %xmm0
> + movss 4(%rsp), %xmm0
> + leaq 4(%rbp), %rdi
> + leaq 4(%rbx), %rsi
> call JUMPTARGET(\callee)
> - movss 28(%rsp), %xmm0
> - leaq 24(%rsp), %rsi
> - movss %xmm0, 4(%rbp)
> - leaq 28(%rsp), %rdi
> - movaps (%rsp), %xmm1
> - movss 24(%rsp), %xmm0
> - movss %xmm0, 4(%rbx)
> - movaps %xmm1, %xmm0
> - unpckhps %xmm1, %xmm0
> + movss 8(%rsp), %xmm0
> + leaq 8(%rbp), %rdi
> + leaq 8(%rbx), %rsi
> call JUMPTARGET(\callee)
> - movaps (%rsp), %xmm1
> - leaq 24(%rsp), %rsi
> - leaq 28(%rsp), %rdi
> - movss 28(%rsp), %xmm0
> - shufps $255, %xmm1, %xmm1
> - movss %xmm0, 8(%rbp)
> - movss 24(%rsp), %xmm0
> - movss %xmm0, 8(%rbx)
> - movaps %xmm1, %xmm0
> + movss 12(%rsp), %xmm0
> + leaq 12(%rbp), %rdi
> + leaq 12(%rbx), %rsi
> call JUMPTARGET(\callee)
> - movss 28(%rsp), %xmm0
> - movss %xmm0, 12(%rbp)
> - movss 24(%rsp), %xmm0
> - movss %xmm0, 12(%rbx)
> - addq $40, %rsp
> - cfi_adjust_cfa_offset (-40)
> + addq $24, %rsp
> + cfi_adjust_cfa_offset (-24)
> popq %rbx
> cfi_adjust_cfa_offset (-8)
> cfi_restore (%rbx)
> @@ -146,15 +127,17 @@
> cfi_def_cfa_register (%rbp)
> andq $-32, %rsp
> subq $32, %rsp
> - vextractf128 $1, %ymm0, (%rsp)
> + vmovaps %ymm0, (%rsp)
> vzeroupper
> call HIDDEN_JUMPTARGET(\callee)
> - vmovaps %xmm0, 16(%rsp)
> - vmovaps (%rsp), %xmm0
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovaps %xmm0, %xmm1
> + vmovaps %xmm0, (%rsp)
> vmovaps 16(%rsp), %xmm0
> - vinsertf128 $1, %xmm1, %ymm0, %ymm0
> + call HIDDEN_JUMPTARGET(\callee)
> + /* combine xmm0 (return of second call) with result of first
> + call (saved on stack). Might be worth exploring logic that
> + uses `vpblend` and reads in ymm1 using -16(rsp). */
> + vmovaps (%rsp), %xmm1
> + vinsertf128 $1, %xmm0, %ymm1, %ymm0
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -172,17 +155,19 @@
> cfi_def_cfa_register (%rbp)
> andq $-32, %rsp
> subq $64, %rsp
> - vextractf128 $1, %ymm0, 16(%rsp)
> - vextractf128 $1, %ymm1, (%rsp)
> + vmovaps %ymm0, (%rsp)
> + vmovaps %ymm1, 32(%rsp)
> vzeroupper
> call HIDDEN_JUMPTARGET(\callee)
> - vmovaps %xmm0, 32(%rsp)
> + vmovaps 48(%rsp), %xmm1
> + vmovaps %xmm0, (%rsp)
> vmovaps 16(%rsp), %xmm0
> - vmovaps (%rsp), %xmm1
> call HIDDEN_JUMPTARGET(\callee)
> - vmovaps %xmm0, %xmm1
> - vmovaps 32(%rsp), %xmm0
> - vinsertf128 $1, %xmm1, %ymm0, %ymm0
> + /* combine xmm0 (return of second call) with result of first
> + call (saved on stack). Might be worth exploring logic that
> + uses `vpblend` and reads in ymm1 using -16(rsp). */
> + vmovaps (%rsp), %xmm1
> + vinsertf128 $1, %xmm0, %ymm1, %ymm0
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -197,38 +182,21 @@
> cfi_adjust_cfa_offset (8)
> cfi_rel_offset (%rbp, 0)
> movq %rsp, %rbp
> - cfi_def_cfa_register (%rbp)
> andq $-32, %rsp
> - pushq %r13
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%r13, 0)
> + subq $32, %rsp
> + vmovaps %ymm0, (%rsp)
> + pushq %rbx
> pushq %r14
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%r14, 0)
> - subq $48, %rsp
> + movq %rdi, %rbx
> movq %rsi, %r14
> - vmovaps %ymm0, (%rsp)
> - movq %rdi, %r13
> - vmovaps 16(%rsp), %xmm1
> - vmovaps %xmm1, 32(%rsp)
> vzeroupper
> - vmovaps (%rsp), %xmm0
> call HIDDEN_JUMPTARGET(\callee)
> vmovaps 32(%rsp), %xmm0
> - lea (%rsp), %rdi
> - lea 16(%rsp), %rsi
> + leaq 16(%rbx), %rdi
> + leaq 16(%r14), %rsi
> call HIDDEN_JUMPTARGET(\callee)
> - vmovaps (%rsp), %xmm0
> - vmovaps 16(%rsp), %xmm1
> - vmovaps %xmm0, 16(%r13)
> - vmovaps %xmm1, 16(%r14)
> - addq $48, %rsp
> popq %r14
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%r14)
> - popq %r13
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%r13)
> + popq %rbx
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -245,15 +213,16 @@
> movq %rsp, %rbp
> cfi_def_cfa_register (%rbp)
> andq $-64, %rsp
> - subq $128, %rsp
> + subq $64, %rsp
> vmovups %zmm0, (%rsp)
> - vmovupd (%rsp), %ymm0
> call HIDDEN_JUMPTARGET(\callee)
> - vmovupd %ymm0, 64(%rsp)
> + vmovupd %ymm0, (%rsp)
> vmovupd 32(%rsp), %ymm0
> call HIDDEN_JUMPTARGET(\callee)
> - vmovupd %ymm0, 96(%rsp)
> - vmovups 64(%rsp), %zmm0
> + /* combine ymm0 (return of second call) with result of first
> + call (saved on stack). */
> + vmovaps (%rsp), %ymm1
> + vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -270,18 +239,19 @@
> movq %rsp, %rbp
> cfi_def_cfa_register (%rbp)
> andq $-64, %rsp
> - subq $192, %rsp
> + addq $-128, %rsp
> vmovups %zmm0, (%rsp)
> vmovups %zmm1, 64(%rsp)
> - vmovups (%rsp), %ymm0
> - vmovups 64(%rsp), %ymm1
> + /* ymm0 and ymm1 are already set. */
> call HIDDEN_JUMPTARGET(\callee)
> - vmovups %ymm0, 128(%rsp)
> - vmovups 32(%rsp), %ymm0
> vmovups 96(%rsp), %ymm1
> + vmovaps %ymm0, (%rsp)
> + vmovups 32(%rsp), %ymm0
> call HIDDEN_JUMPTARGET(\callee)
> - vmovups %ymm0, 160(%rsp)
> - vmovups 128(%rsp), %zmm0
> + /* combine ymm0 (return of second call) with result of first
> + call (saved on stack). */
> + vmovaps (%rsp), %ymm1
> + vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -298,25 +268,20 @@
> movq %rsp, %rbp
> cfi_def_cfa_register (%rbp)
> andq $-64, %rsp
> - pushq %r12
> - pushq %r13
> - subq $176, %rsp
> - movq %rsi, %r13
> + subq $64, %rsp
> vmovaps %zmm0, (%rsp)
> - movq %rdi, %r12
> - vmovaps (%rsp), %ymm0
> + pushq %rbx
> + pushq %r14
> + movq %rdi, %rbx
> + movq %rsi, %r14
> + /* ymm0 is already set. */
> call HIDDEN_JUMPTARGET(\callee)
> - vmovaps 32(%rsp), %ymm0
> - lea 64(%rsp), %rdi
> - lea 96(%rsp), %rsi
> + vmovaps 48(%rsp), %ymm0
> + leaq 32(%rbx), %rdi
> + leaq 32(%r14), %rsi
> call HIDDEN_JUMPTARGET(\callee)
> - vmovaps 64(%rsp), %ymm0
> - vmovaps 96(%rsp), %ymm1
> - vmovaps %ymm0, 32(%r12)
> - vmovaps %ymm1, 32(%r13)
> - addq $176, %rsp
> - popq %r13
> - popq %r12
> + popq %r14
> + popq %rbx
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> --
> 2.34.1
>
--
H.J.
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH v4 2/3] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h
2022-11-19 0:06 ` H.J. Lu via Libc-alpha
@ 2022-11-19 0:13 ` Noah Goldstein via Libc-alpha
0 siblings, 0 replies; 21+ messages in thread
From: Noah Goldstein via Libc-alpha @ 2022-11-19 0:13 UTC (permalink / raw)
To: H.J. Lu; +Cc: libc-alpha, andrey.kolesov, carlos
On Fri, Nov 18, 2022 at 4:07 PM H.J. Lu <hjl.tools@gmail.com> wrote:
>
> On Fri, Nov 18, 2022 at 1:23 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> >
> > 1. Remove unnecessary spills.
> > 2. Fix some small nit missed optimizations.
> >
> > All math and mathvec tests pass on x86.
> > ---
> > sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 183 +++++++-----------
> > sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 235 ++++++++++-------------
> > 2 files changed, 174 insertions(+), 244 deletions(-)
> >
> > diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
> > index b03a2122b9..78c30c56cb 100644
> > --- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
> > +++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
> > @@ -18,39 +18,38 @@
> >
> > /* SSE2 ISA version as wrapper to scalar. */
> > .macro WRAPPER_IMPL_SSE2 callee
> > - subq $40, %rsp
> > - cfi_adjust_cfa_offset (40)
> > + subq $24, %rsp
> > + cfi_adjust_cfa_offset (24)
> > movaps %xmm0, (%rsp)
> > call JUMPTARGET(\callee)
> > - movsd %xmm0, 16(%rsp)
> > + movsd %xmm0, (%rsp)
> > movsd 8(%rsp), %xmm0
> > call JUMPTARGET(\callee)
> > - movsd 16(%rsp), %xmm1
> > - movsd %xmm0, 24(%rsp)
> > + movsd (%rsp), %xmm1
> > unpcklpd %xmm0, %xmm1
> > movaps %xmm1, %xmm0
> > - addq $40, %rsp
> > - cfi_adjust_cfa_offset (-40)
> > + addq $24, %rsp
> > + cfi_adjust_cfa_offset (-24)
> > ret
> > .endm
> >
> > +
> > /* 2 argument SSE2 ISA version as wrapper to scalar. */
> > .macro WRAPPER_IMPL_SSE2_ff callee
> > - subq $56, %rsp
> > - cfi_adjust_cfa_offset (56)
> > + subq $40, %rsp
> > + cfi_adjust_cfa_offset (40)
> > movaps %xmm0, (%rsp)
> > movaps %xmm1, 16(%rsp)
> > call JUMPTARGET(\callee)
> > - movsd %xmm0, 32(%rsp)
> > + movsd %xmm0, (%rsp)
> > movsd 8(%rsp), %xmm0
> > movsd 24(%rsp), %xmm1
> > call JUMPTARGET(\callee)
> > - movsd 32(%rsp), %xmm1
> > - movsd %xmm0, 40(%rsp)
> > + movsd (%rsp), %xmm1
> > unpcklpd %xmm0, %xmm1
> > movaps %xmm1, %xmm0
> > - addq $56, %rsp
> > - cfi_adjust_cfa_offset (-56)
> > + addq $40, %rsp
> > + cfi_adjust_cfa_offset (-40)
> > ret
> > .endm
> >
> > @@ -62,34 +61,22 @@
> > pushq %rbx
> > cfi_adjust_cfa_offset (8)
> > cfi_rel_offset (%rbx, 0)
> > + subq $24, %rsp
> > + cfi_adjust_cfa_offset (24)
> > + movaps %xmm0, (%rsp)
> > movq %rdi, %rbp
> > movq %rsi, %rbx
> > - subq $40, %rsp
> > - cfi_adjust_cfa_offset (40)
> > - leaq 16(%rsp), %rsi
> > - leaq 24(%rsp), %rdi
> > - movaps %xmm0, (%rsp)
> > call JUMPTARGET(\callee)
> > - leaq 16(%rsp), %rsi
> > - leaq 24(%rsp), %rdi
> > - movsd 24(%rsp), %xmm0
> > - movapd (%rsp), %xmm1
> > - movsd %xmm0, 0(%rbp)
> > - unpckhpd %xmm1, %xmm1
> > - movsd 16(%rsp), %xmm0
> > - movsd %xmm0, (%rbx)
> > - movapd %xmm1, %xmm0
> > + movsd 8(%rsp), %xmm0
> > + leaq 8(%rbp), %rdi
> > + leaq 8(%rbx), %rsi
> > call JUMPTARGET(\callee)
> > - movsd 24(%rsp), %xmm0
> > - movsd %xmm0, 8(%rbp)
> > - movsd 16(%rsp), %xmm0
> > - movsd %xmm0, 8(%rbx)
> > - addq $40, %rsp
> > - cfi_adjust_cfa_offset (-40)
> > - popq %rbx
> > + addq $24, %rsp
> > + cfi_adjust_cfa_offset (-24)
> > + pop %rbx
>
> popq
Fixed in V5.
>
> > cfi_adjust_cfa_offset (-8)
> > cfi_restore (%rbx)
> > - popq %rbp
> > + pop %rbp
>
> Why this change?
No reason. Fixed in V5.
>
> > cfi_adjust_cfa_offset (-8)
> > cfi_restore (%rbp)
> > ret
> > @@ -104,15 +91,17 @@
> > cfi_def_cfa_register (%rbp)
> > andq $-32, %rsp
> > subq $32, %rsp
> > - vextractf128 $1, %ymm0, (%rsp)
> > + vmovaps %ymm0, (%rsp)
> > vzeroupper
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovapd %xmm0, 16(%rsp)
> > - vmovaps (%rsp), %xmm0
> > + vmovaps %xmm0, (%rsp)
> > + vmovaps 16(%rsp), %xmm0
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovapd %xmm0, %xmm1
> > - vmovapd 16(%rsp), %xmm0
> > - vinsertf128 $1, %xmm1, %ymm0, %ymm0
> > + /* combine xmm0 (return of second call) with result of first
> > + call (saved on stack). Might be worth exploring logic that
> > + uses `vpblend` and reads in ymm1 using -16(rsp). */
> > + vmovaps (%rsp), %xmm1
> > + vinsertf128 $1, %xmm0, %ymm1, %ymm0
> > movq %rbp, %rsp
> > cfi_def_cfa_register (%rsp)
> > popq %rbp
> > @@ -130,17 +119,19 @@
> > cfi_def_cfa_register (%rbp)
> > andq $-32, %rsp
> > subq $64, %rsp
> > - vextractf128 $1, %ymm0, 16(%rsp)
> > - vextractf128 $1, %ymm1, (%rsp)
> > + vmovaps %ymm0, (%rsp)
> > + vmovaps %ymm1, 32(%rsp)
> > vzeroupper
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovaps %xmm0, 32(%rsp)
> > + vmovaps 48(%rsp), %xmm1
> > + vmovaps %xmm0, (%rsp)
> > vmovaps 16(%rsp), %xmm0
> > - vmovaps (%rsp), %xmm1
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovaps %xmm0, %xmm1
> > - vmovaps 32(%rsp), %xmm0
> > - vinsertf128 $1, %xmm1, %ymm0, %ymm0
> > + /* combine xmm0 (return of second call) with result of first
> > + call (saved on stack). Might be worth exploring logic that
> > + uses `vpblend` and reads in ymm1 using -16(rsp). */
> > + vmovaps (%rsp), %xmm1
> > + vinsertf128 $1, %xmm0, %ymm1, %ymm0
> > movq %rbp, %rsp
> > cfi_def_cfa_register (%rsp)
> > popq %rbp
> > @@ -155,35 +146,21 @@
> > cfi_adjust_cfa_offset (8)
> > cfi_rel_offset (%rbp, 0)
> > movq %rsp, %rbp
> > - cfi_def_cfa_register (%rbp)
> > andq $-32, %rsp
> > - pushq %r13
> > - cfi_adjust_cfa_offset (8)
> > - cfi_rel_offset (%r13, 0)
> > + subq $32, %rsp
> > + vmovaps %ymm0, (%rsp)
> > + pushq %rbx
> > pushq %r14
> > - cfi_adjust_cfa_offset (8)
> > - cfi_rel_offset (%r14, 0)
> > - subq $48, %rsp
> > + movq %rdi, %rbx
> > movq %rsi, %r14
> > - movq %rdi, %r13
> > - vextractf128 $1, %ymm0, 32(%rsp)
> > vzeroupper
> > call HIDDEN_JUMPTARGET(\callee)
> > vmovaps 32(%rsp), %xmm0
> > - lea (%rsp), %rdi
> > - lea 16(%rsp), %rsi
> > + leaq 16(%rbx), %rdi
> > + leaq 16(%r14), %rsi
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovapd (%rsp), %xmm0
> > - vmovapd 16(%rsp), %xmm1
> > - vmovapd %xmm0, 16(%r13)
> > - vmovapd %xmm1, 16(%r14)
> > - addq $48, %rsp
> > popq %r14
> > - cfi_adjust_cfa_offset (-8)
> > - cfi_restore (%r14)
> > - popq %r13
> > - cfi_adjust_cfa_offset (-8)
> > - cfi_restore (%r13)
> > + popq %rbx
> > movq %rbp, %rsp
> > cfi_def_cfa_register (%rsp)
> > popq %rbp
> > @@ -200,15 +177,16 @@
> > movq %rsp, %rbp
> > cfi_def_cfa_register (%rbp)
> > andq $-64, %rsp
> > - subq $128, %rsp
> > + subq $64, %rsp
> > vmovups %zmm0, (%rsp)
> > - vmovupd (%rsp), %ymm0
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovupd %ymm0, 64(%rsp)
> > + vmovupd %ymm0, (%rsp)
> > vmovupd 32(%rsp), %ymm0
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovupd %ymm0, 96(%rsp)
> > - vmovups 64(%rsp), %zmm0
> > + /* combine ymm0 (return of second call) with result of first
> > + call (saved on stack). */
> > + vmovaps (%rsp), %ymm1
> > + vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
> > movq %rbp, %rsp
> > cfi_def_cfa_register (%rsp)
> > popq %rbp
> > @@ -225,18 +203,19 @@
> > movq %rsp, %rbp
> > cfi_def_cfa_register (%rbp)
> > andq $-64, %rsp
> > - subq $192, %rsp
> > + addq $-128, %rsp
> > vmovups %zmm0, (%rsp)
> > vmovups %zmm1, 64(%rsp)
> > - vmovupd (%rsp), %ymm0
> > - vmovupd 64(%rsp), %ymm1
> > + /* ymm0 and ymm1 are already set. */
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovupd %ymm0, 128(%rsp)
> > - vmovupd 32(%rsp), %ymm0
> > - vmovupd 96(%rsp), %ymm1
> > + vmovups 96(%rsp), %ymm1
> > + vmovaps %ymm0, (%rsp)
> > + vmovups 32(%rsp), %ymm0
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovupd %ymm0, 160(%rsp)
> > - vmovups 128(%rsp), %zmm0
> > + /* combine ymm0 (return of second call) with result of first
> > + call (saved on stack). */
> > + vmovaps (%rsp), %ymm1
> > + vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
> > movq %rbp, %rsp
> > cfi_def_cfa_register (%rsp)
> > popq %rbp
> > @@ -253,34 +232,20 @@
> > movq %rsp, %rbp
> > cfi_def_cfa_register (%rbp)
> > andq $-64, %rsp
> > - pushq %r12
> > - cfi_adjust_cfa_offset (8)
> > - cfi_rel_offset (%r12, 0)
> > - pushq %r13
> > - cfi_adjust_cfa_offset (8)
> > - cfi_rel_offset (%r13, 0)
> > - subq $176, %rsp
> > - movq %rsi, %r13
> > - vmovups %zmm0, (%rsp)
> > - movq %rdi, %r12
> > - vmovupd (%rsp), %ymm0
> > + subq $64, %rsp
> > + vmovaps %zmm0, (%rsp)
> > + pushq %rbx
> > + pushq %r14
> > + movq %rdi, %rbx
> > + movq %rsi, %r14
> > + /* ymm0 is already set. */
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovupd 32(%rsp), %ymm0
> > - lea 64(%rsp), %rdi
> > - lea 96(%rsp), %rsi
> > + vmovaps 48(%rsp), %ymm0
> > + leaq 32(%rbx), %rdi
> > + leaq 32(%r14), %rsi
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovupd 64(%rsp), %ymm0
> > - vmovupd 96(%rsp), %ymm1
> > - vmovupd %ymm0, 32(%r12)
> > - vmovupd %ymm1, 32(%r13)
> > - vzeroupper
> > - addq $176, %rsp
> > - popq %r13
> > - cfi_adjust_cfa_offset (-8)
> > - cfi_restore (%r13)
> > - popq %r12
> > - cfi_adjust_cfa_offset (-8)
> > - cfi_restore (%r12)
> > + popq %r14
> > + popq %rbx
> > movq %rbp, %rsp
> > cfi_def_cfa_register (%rsp)
> > popq %rbp
> > diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
> > index cecf6c8384..43f2b91f32 100644
> > --- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
> > +++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
> > @@ -18,61 +18,66 @@
> >
> > /* SSE2 ISA version as wrapper to scalar. */
> > .macro WRAPPER_IMPL_SSE2 callee
> > - subq $40, %rsp
> > - cfi_adjust_cfa_offset (40)
> > + push %rbx
> > + cfi_adjust_cfa_offset (8)
> > + cfi_rel_offset (%rbx, 0)
> > + subq $16, %rsp
> > + cfi_adjust_cfa_offset (16)
> > movaps %xmm0, (%rsp)
> > call JUMPTARGET(\callee)
> > - movss %xmm0, 16(%rsp)
> > + movss %xmm0, (%rsp)
> > movss 4(%rsp), %xmm0
> > call JUMPTARGET(\callee)
> > - movss %xmm0, 20(%rsp)
> > + movss %xmm0, 4(%rsp)
> > movss 8(%rsp), %xmm0
> > call JUMPTARGET(\callee)
> > - movss %xmm0, 24(%rsp)
> > + movd %xmm0, %ebx
> > movss 12(%rsp), %xmm0
> > call JUMPTARGET(\callee)
> > - movss 16(%rsp), %xmm3
> > - movss 20(%rsp), %xmm2
> > - movss 24(%rsp), %xmm1
> > - movss %xmm0, 28(%rsp)
> > - unpcklps %xmm1, %xmm3
> > - unpcklps %xmm0, %xmm2
> > - unpcklps %xmm2, %xmm3
> > - movaps %xmm3, %xmm0
> > - addq $40, %rsp
> > - cfi_adjust_cfa_offset (-40)
> > + movd %ebx, %xmm1
> > + unpcklps %xmm0, %xmm1
> > + movsd (%rsp), %xmm0
> > + unpcklpd %xmm1, %xmm0
> > + addq $16, %rsp
> > + cfi_adjust_cfa_offset (-16)
> > + pop %rbx
> > + cfi_adjust_cfa_offset (-8)
> > + cfi_restore (%rbx)
> > ret
> > .endm
> >
> > /* 2 argument SSE2 ISA version as wrapper to scalar. */
> > .macro WRAPPER_IMPL_SSE2_ff callee
> > - subq $56, %rsp
> > - cfi_adjust_cfa_offset (56)
> > + push %rbx
> > + cfi_adjust_cfa_offset (8)
> > + cfi_rel_offset (%rbx, 0)
> > + subq $32, %rsp
> > + cfi_adjust_cfa_offset (40)
> > movaps %xmm0, (%rsp)
> > movaps %xmm1, 16(%rsp)
> > call JUMPTARGET(\callee)
> > - movss %xmm0, 32(%rsp)
> > - movss 4(%rsp), %xmm0
> > movss 20(%rsp), %xmm1
> > + movss %xmm0, 0(%rsp)
> > + movss 4(%rsp), %xmm0
> > call JUMPTARGET(\callee)
> > - movss %xmm0, 36(%rsp)
> > - movss 8(%rsp), %xmm0
> > movss 24(%rsp), %xmm1
> > + movss %xmm0, 4(%rsp)
> > + movss 8(%rsp), %xmm0
> > call JUMPTARGET(\callee)
> > - movss %xmm0, 40(%rsp)
> > - movss 12(%rsp), %xmm0
> > movss 28(%rsp), %xmm1
> > + movd %xmm0, %ebx
> > + movss 12(%rsp), %xmm0
> > call JUMPTARGET(\callee)
> > - movss 32(%rsp), %xmm3
> > - movss 36(%rsp), %xmm2
> > - movss 40(%rsp), %xmm1
> > - movss %xmm0, 44(%rsp)
> > - unpcklps %xmm1, %xmm3
> > - unpcklps %xmm0, %xmm2
> > - unpcklps %xmm2, %xmm3
> > - movaps %xmm3, %xmm0
> > - addq $56, %rsp
> > - cfi_adjust_cfa_offset (-56)
> > + /* merge 4x results into xmm0. */
> > + movd %ebx, %xmm1
> > + unpcklps %xmm0, %xmm1
> > + movsd (%rsp), %xmm0
> > + unpcklpd %xmm1, %xmm0
> > + addq $32, %rsp
> > + cfi_adjust_cfa_offset (-32)
> > + pop %rbx
> > + cfi_adjust_cfa_offset (-8)
> > + cfi_restore (%rbx)
> > ret
> > .endm
> >
> > @@ -86,48 +91,24 @@
> > cfi_rel_offset (%rbx, 0)
> > movq %rdi, %rbp
> > movq %rsi, %rbx
> > - subq $40, %rsp
> > - cfi_adjust_cfa_offset (40)
> > - leaq 24(%rsp), %rsi
> > - leaq 28(%rsp), %rdi
> > + subq $24, %rsp
> > + cfi_adjust_cfa_offset (24)
> > movaps %xmm0, (%rsp)
> > call JUMPTARGET(\callee)
> > - leaq 24(%rsp), %rsi
> > - leaq 28(%rsp), %rdi
> > - movss 28(%rsp), %xmm0
> > - movss %xmm0, 0(%rbp)
> > - movaps (%rsp), %xmm1
> > - movss 24(%rsp), %xmm0
> > - movss %xmm0, (%rbx)
> > - movaps %xmm1, %xmm0
> > - shufps $85, %xmm1, %xmm0
> > + movss 4(%rsp), %xmm0
> > + leaq 4(%rbp), %rdi
> > + leaq 4(%rbx), %rsi
> > call JUMPTARGET(\callee)
> > - movss 28(%rsp), %xmm0
> > - leaq 24(%rsp), %rsi
> > - movss %xmm0, 4(%rbp)
> > - leaq 28(%rsp), %rdi
> > - movaps (%rsp), %xmm1
> > - movss 24(%rsp), %xmm0
> > - movss %xmm0, 4(%rbx)
> > - movaps %xmm1, %xmm0
> > - unpckhps %xmm1, %xmm0
> > + movss 8(%rsp), %xmm0
> > + leaq 8(%rbp), %rdi
> > + leaq 8(%rbx), %rsi
> > call JUMPTARGET(\callee)
> > - movaps (%rsp), %xmm1
> > - leaq 24(%rsp), %rsi
> > - leaq 28(%rsp), %rdi
> > - movss 28(%rsp), %xmm0
> > - shufps $255, %xmm1, %xmm1
> > - movss %xmm0, 8(%rbp)
> > - movss 24(%rsp), %xmm0
> > - movss %xmm0, 8(%rbx)
> > - movaps %xmm1, %xmm0
> > + movss 12(%rsp), %xmm0
> > + leaq 12(%rbp), %rdi
> > + leaq 12(%rbx), %rsi
> > call JUMPTARGET(\callee)
> > - movss 28(%rsp), %xmm0
> > - movss %xmm0, 12(%rbp)
> > - movss 24(%rsp), %xmm0
> > - movss %xmm0, 12(%rbx)
> > - addq $40, %rsp
> > - cfi_adjust_cfa_offset (-40)
> > + addq $24, %rsp
> > + cfi_adjust_cfa_offset (-24)
> > popq %rbx
> > cfi_adjust_cfa_offset (-8)
> > cfi_restore (%rbx)
> > @@ -146,15 +127,17 @@
> > cfi_def_cfa_register (%rbp)
> > andq $-32, %rsp
> > subq $32, %rsp
> > - vextractf128 $1, %ymm0, (%rsp)
> > + vmovaps %ymm0, (%rsp)
> > vzeroupper
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovaps %xmm0, 16(%rsp)
> > - vmovaps (%rsp), %xmm0
> > - call HIDDEN_JUMPTARGET(\callee)
> > - vmovaps %xmm0, %xmm1
> > + vmovaps %xmm0, (%rsp)
> > vmovaps 16(%rsp), %xmm0
> > - vinsertf128 $1, %xmm1, %ymm0, %ymm0
> > + call HIDDEN_JUMPTARGET(\callee)
> > + /* combine xmm0 (return of second call) with result of first
> > + call (saved on stack). Might be worth exploring logic that
> > + uses `vpblend` and reads in ymm1 using -16(rsp). */
> > + vmovaps (%rsp), %xmm1
> > + vinsertf128 $1, %xmm0, %ymm1, %ymm0
> > movq %rbp, %rsp
> > cfi_def_cfa_register (%rsp)
> > popq %rbp
> > @@ -172,17 +155,19 @@
> > cfi_def_cfa_register (%rbp)
> > andq $-32, %rsp
> > subq $64, %rsp
> > - vextractf128 $1, %ymm0, 16(%rsp)
> > - vextractf128 $1, %ymm1, (%rsp)
> > + vmovaps %ymm0, (%rsp)
> > + vmovaps %ymm1, 32(%rsp)
> > vzeroupper
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovaps %xmm0, 32(%rsp)
> > + vmovaps 48(%rsp), %xmm1
> > + vmovaps %xmm0, (%rsp)
> > vmovaps 16(%rsp), %xmm0
> > - vmovaps (%rsp), %xmm1
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovaps %xmm0, %xmm1
> > - vmovaps 32(%rsp), %xmm0
> > - vinsertf128 $1, %xmm1, %ymm0, %ymm0
> > + /* combine xmm0 (return of second call) with result of first
> > + call (saved on stack). Might be worth exploring logic that
> > + uses `vpblend` and reads in ymm1 using -16(rsp). */
> > + vmovaps (%rsp), %xmm1
> > + vinsertf128 $1, %xmm0, %ymm1, %ymm0
> > movq %rbp, %rsp
> > cfi_def_cfa_register (%rsp)
> > popq %rbp
> > @@ -197,38 +182,21 @@
> > cfi_adjust_cfa_offset (8)
> > cfi_rel_offset (%rbp, 0)
> > movq %rsp, %rbp
> > - cfi_def_cfa_register (%rbp)
> > andq $-32, %rsp
> > - pushq %r13
> > - cfi_adjust_cfa_offset (8)
> > - cfi_rel_offset (%r13, 0)
> > + subq $32, %rsp
> > + vmovaps %ymm0, (%rsp)
> > + pushq %rbx
> > pushq %r14
> > - cfi_adjust_cfa_offset (8)
> > - cfi_rel_offset (%r14, 0)
> > - subq $48, %rsp
> > + movq %rdi, %rbx
> > movq %rsi, %r14
> > - vmovaps %ymm0, (%rsp)
> > - movq %rdi, %r13
> > - vmovaps 16(%rsp), %xmm1
> > - vmovaps %xmm1, 32(%rsp)
> > vzeroupper
> > - vmovaps (%rsp), %xmm0
> > call HIDDEN_JUMPTARGET(\callee)
> > vmovaps 32(%rsp), %xmm0
> > - lea (%rsp), %rdi
> > - lea 16(%rsp), %rsi
> > + leaq 16(%rbx), %rdi
> > + leaq 16(%r14), %rsi
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovaps (%rsp), %xmm0
> > - vmovaps 16(%rsp), %xmm1
> > - vmovaps %xmm0, 16(%r13)
> > - vmovaps %xmm1, 16(%r14)
> > - addq $48, %rsp
> > popq %r14
> > - cfi_adjust_cfa_offset (-8)
> > - cfi_restore (%r14)
> > - popq %r13
> > - cfi_adjust_cfa_offset (-8)
> > - cfi_restore (%r13)
> > + popq %rbx
> > movq %rbp, %rsp
> > cfi_def_cfa_register (%rsp)
> > popq %rbp
> > @@ -245,15 +213,16 @@
> > movq %rsp, %rbp
> > cfi_def_cfa_register (%rbp)
> > andq $-64, %rsp
> > - subq $128, %rsp
> > + subq $64, %rsp
> > vmovups %zmm0, (%rsp)
> > - vmovupd (%rsp), %ymm0
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovupd %ymm0, 64(%rsp)
> > + vmovupd %ymm0, (%rsp)
> > vmovupd 32(%rsp), %ymm0
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovupd %ymm0, 96(%rsp)
> > - vmovups 64(%rsp), %zmm0
> > + /* combine ymm0 (return of second call) with result of first
> > + call (saved on stack). */
> > + vmovaps (%rsp), %ymm1
> > + vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
> > movq %rbp, %rsp
> > cfi_def_cfa_register (%rsp)
> > popq %rbp
> > @@ -270,18 +239,19 @@
> > movq %rsp, %rbp
> > cfi_def_cfa_register (%rbp)
> > andq $-64, %rsp
> > - subq $192, %rsp
> > + addq $-128, %rsp
> > vmovups %zmm0, (%rsp)
> > vmovups %zmm1, 64(%rsp)
> > - vmovups (%rsp), %ymm0
> > - vmovups 64(%rsp), %ymm1
> > + /* ymm0 and ymm1 are already set. */
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovups %ymm0, 128(%rsp)
> > - vmovups 32(%rsp), %ymm0
> > vmovups 96(%rsp), %ymm1
> > + vmovaps %ymm0, (%rsp)
> > + vmovups 32(%rsp), %ymm0
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovups %ymm0, 160(%rsp)
> > - vmovups 128(%rsp), %zmm0
> > + /* combine ymm0 (return of second call) with result of first
> > + call (saved on stack). */
> > + vmovaps (%rsp), %ymm1
> > + vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
> > movq %rbp, %rsp
> > cfi_def_cfa_register (%rsp)
> > popq %rbp
> > @@ -298,25 +268,20 @@
> > movq %rsp, %rbp
> > cfi_def_cfa_register (%rbp)
> > andq $-64, %rsp
> > - pushq %r12
> > - pushq %r13
> > - subq $176, %rsp
> > - movq %rsi, %r13
> > + subq $64, %rsp
> > vmovaps %zmm0, (%rsp)
> > - movq %rdi, %r12
> > - vmovaps (%rsp), %ymm0
> > + pushq %rbx
> > + pushq %r14
> > + movq %rdi, %rbx
> > + movq %rsi, %r14
> > + /* ymm0 is already set. */
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovaps 32(%rsp), %ymm0
> > - lea 64(%rsp), %rdi
> > - lea 96(%rsp), %rsi
> > + vmovaps 48(%rsp), %ymm0
> > + leaq 32(%rbx), %rdi
> > + leaq 32(%r14), %rsi
> > call HIDDEN_JUMPTARGET(\callee)
> > - vmovaps 64(%rsp), %ymm0
> > - vmovaps 96(%rsp), %ymm1
> > - vmovaps %ymm0, 32(%r12)
> > - vmovaps %ymm1, 32(%r13)
> > - addq $176, %rsp
> > - popq %r13
> > - popq %r12
> > + popq %r14
> > + popq %rbx
> > movq %rbp, %rsp
> > cfi_def_cfa_register (%rsp)
> > popq %rbp
> > --
> > 2.34.1
> >
>
>
> --
> H.J.
^ permalink raw reply [flat|nested] 21+ messages in thread
* [PATCH v4 3/3] x86/fpu: Factor out shared avx2/avx512 code in svml_{s|d}_wrapper_impl.h
2022-11-18 21:22 ` [PATCH v4 1/3] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h Noah Goldstein via Libc-alpha
2022-11-18 21:22 ` [PATCH v4 2/3] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h Noah Goldstein via Libc-alpha
@ 2022-11-18 21:22 ` Noah Goldstein via Libc-alpha
2022-11-18 21:27 ` H.J. Lu via Libc-alpha
2022-11-18 23:25 ` [PATCH v4 1/3] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h H.J. Lu via Libc-alpha
2 siblings, 1 reply; 21+ messages in thread
From: Noah Goldstein via Libc-alpha @ 2022-11-18 21:22 UTC (permalink / raw)
To: libc-alpha; +Cc: goldstein.w.n, hjl.tools, andrey.kolesov, carlos
Code is exactly the same for the two so better to only maintain one
version.
All math and mathvec tests pass on x86.
---
sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 172 +-------------------
sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 172 +-------------------
sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h | 190 ++++++++++++++++++++++
3 files changed, 192 insertions(+), 342 deletions(-)
create mode 100644 sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h
diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
index 78c30c56cb..52407da8ed 100644
--- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
@@ -82,174 +82,4 @@
ret
.endm
-/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $32, %rsp
- vmovaps %ymm0, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, (%rsp)
- vmovaps 16(%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- /* combine xmm0 (return of second call) with result of first
- call (saved on stack). Might be worth exploring logic that
- uses `vpblend` and reads in ymm1 using -16(rsp). */
- vmovaps (%rsp), %xmm1
- vinsertf128 $1, %xmm0, %ymm1, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $64, %rsp
- vmovaps %ymm0, (%rsp)
- vmovaps %ymm1, 32(%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 48(%rsp), %xmm1
- vmovaps %xmm0, (%rsp)
- vmovaps 16(%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- /* combine xmm0 (return of second call) with result of first
- call (saved on stack). Might be worth exploring logic that
- uses `vpblend` and reads in ymm1 using -16(rsp). */
- vmovaps (%rsp), %xmm1
- vinsertf128 $1, %xmm0, %ymm1, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- andq $-32, %rsp
- subq $32, %rsp
- vmovaps %ymm0, (%rsp)
- pushq %rbx
- pushq %r14
- movq %rdi, %rbx
- movq %rsi, %r14
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %xmm0
- leaq 16(%rbx), %rdi
- leaq 16(%r14), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- popq %r14
- popq %rbx
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512 callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $64, %rsp
- vmovups %zmm0, (%rsp)
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, (%rsp)
- vmovupd 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- /* combine ymm0 (return of second call) with result of first
- call (saved on stack). */
- vmovaps (%rsp), %ymm1
- vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- addq $-128, %rsp
- vmovups %zmm0, (%rsp)
- vmovups %zmm1, 64(%rsp)
- /* ymm0 and ymm1 are already set. */
- call HIDDEN_JUMPTARGET(\callee)
- vmovups 96(%rsp), %ymm1
- vmovaps %ymm0, (%rsp)
- vmovups 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- /* combine ymm0 (return of second call) with result of first
- call (saved on stack). */
- vmovaps (%rsp), %ymm1
- vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $64, %rsp
- vmovaps %zmm0, (%rsp)
- pushq %rbx
- pushq %r14
- movq %rdi, %rbx
- movq %rsi, %r14
- /* ymm0 is already set. */
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 48(%rsp), %ymm0
- leaq 32(%rbx), %rdi
- leaq 32(%r14), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- popq %r14
- popq %rbx
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
+#include "svml_sd_wrapper_impl.h"
diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
index 43f2b91f32..d9266563ef 100644
--- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
@@ -118,174 +118,4 @@
ret
.endm
-/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $32, %rsp
- vmovaps %ymm0, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, (%rsp)
- vmovaps 16(%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- /* combine xmm0 (return of second call) with result of first
- call (saved on stack). Might be worth exploring logic that
- uses `vpblend` and reads in ymm1 using -16(rsp). */
- vmovaps (%rsp), %xmm1
- vinsertf128 $1, %xmm0, %ymm1, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $64, %rsp
- vmovaps %ymm0, (%rsp)
- vmovaps %ymm1, 32(%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 48(%rsp), %xmm1
- vmovaps %xmm0, (%rsp)
- vmovaps 16(%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- /* combine xmm0 (return of second call) with result of first
- call (saved on stack). Might be worth exploring logic that
- uses `vpblend` and reads in ymm1 using -16(rsp). */
- vmovaps (%rsp), %xmm1
- vinsertf128 $1, %xmm0, %ymm1, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- andq $-32, %rsp
- subq $32, %rsp
- vmovaps %ymm0, (%rsp)
- pushq %rbx
- pushq %r14
- movq %rdi, %rbx
- movq %rsi, %r14
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %xmm0
- leaq 16(%rbx), %rdi
- leaq 16(%r14), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- popq %r14
- popq %rbx
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512 callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $64, %rsp
- vmovups %zmm0, (%rsp)
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, (%rsp)
- vmovupd 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- /* combine ymm0 (return of second call) with result of first
- call (saved on stack). */
- vmovaps (%rsp), %ymm1
- vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- addq $-128, %rsp
- vmovups %zmm0, (%rsp)
- vmovups %zmm1, 64(%rsp)
- /* ymm0 and ymm1 are already set. */
- call HIDDEN_JUMPTARGET(\callee)
- vmovups 96(%rsp), %ymm1
- vmovaps %ymm0, (%rsp)
- vmovups 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- /* combine ymm0 (return of second call) with result of first
- call (saved on stack). */
- vmovaps (%rsp), %ymm1
- vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $64, %rsp
- vmovaps %zmm0, (%rsp)
- pushq %rbx
- pushq %r14
- movq %rdi, %rbx
- movq %rsi, %r14
- /* ymm0 is already set. */
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 48(%rsp), %ymm0
- leaq 32(%rbx), %rdi
- leaq 32(%r14), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- popq %r14
- popq %rbx
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
+#include "svml_sd_wrapper_impl.h"
diff --git a/sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h
new file mode 100644
index 0000000000..bd934ad578
--- /dev/null
+++ b/sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h
@@ -0,0 +1,190 @@
+/* Common float/double wrapper implementations of vector math
+ functions.
+ Copyright (C) 2022 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
+.macro WRAPPER_IMPL_AVX callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $32, %rsp
+ vmovaps %ymm0, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, (%rsp)
+ vmovaps 16(%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ /* combine xmm0 (return of second call) with result of first
+ call (saved on stack). Might be worth exploring logic that
+ uses `vpblend` and reads in ymm1 using -16(rsp). */
+ vmovaps (%rsp), %xmm1
+ vinsertf128 $1, %xmm0, %ymm1, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
+.macro WRAPPER_IMPL_AVX_ff callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $64, %rsp
+ vmovaps %ymm0, (%rsp)
+ vmovaps %ymm1, 32(%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 48(%rsp), %xmm1
+ vmovaps %xmm0, (%rsp)
+ vmovaps 16(%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ /* combine xmm0 (return of second call) with result of first
+ call (saved on stack). Might be worth exploring logic that
+ uses `vpblend` and reads in ymm1 using -16(rsp). */
+ vmovaps (%rsp), %xmm1
+ vinsertf128 $1, %xmm0, %ymm1, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
+.macro WRAPPER_IMPL_AVX_fFF callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ andq $-32, %rsp
+ subq $32, %rsp
+ vmovaps %ymm0, (%rsp)
+ pushq %rbx
+ pushq %r14
+ movq %rdi, %rbx
+ movq %rsi, %r14
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 32(%rsp), %xmm0
+ leaq 16(%rbx), %rdi
+ leaq 16(%r14), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ popq %r14
+ popq %rbx
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* AVX512 ISA version as wrapper to AVX2 ISA version. */
+.macro WRAPPER_IMPL_AVX512 callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $64, %rsp
+ vmovups %zmm0, (%rsp)
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, (%rsp)
+ vmovupd 32(%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ /* combine ymm0 (return of second call) with result of first
+ call (saved on stack). */
+ vmovaps (%rsp), %ymm1
+ vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
+.macro WRAPPER_IMPL_AVX512_ff callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ addq $-128, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovups %zmm1, 64(%rsp)
+ /* ymm0 and ymm1 are already set. */
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovups 96(%rsp), %ymm1
+ vmovaps %ymm0, (%rsp)
+ vmovups 32(%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ /* combine ymm0 (return of second call) with result of first
+ call (saved on stack). */
+ vmovaps (%rsp), %ymm1
+ vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
+.macro WRAPPER_IMPL_AVX512_fFF callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $64, %rsp
+ vmovaps %zmm0, (%rsp)
+ pushq %rbx
+ pushq %r14
+ movq %rdi, %rbx
+ movq %rsi, %r14
+ /* ymm0 is already set. */
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 48(%rsp), %ymm0
+ leaq 32(%rbx), %rdi
+ leaq 32(%r14), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ popq %r14
+ popq %rbx
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
--
2.34.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* Re: [PATCH v4 3/3] x86/fpu: Factor out shared avx2/avx512 code in svml_{s|d}_wrapper_impl.h
2022-11-18 21:22 ` [PATCH v4 3/3] x86/fpu: Factor out shared avx2/avx512 " Noah Goldstein via Libc-alpha
@ 2022-11-18 21:27 ` H.J. Lu via Libc-alpha
2022-11-18 21:59 ` Noah Goldstein via Libc-alpha
0 siblings, 1 reply; 21+ messages in thread
From: H.J. Lu via Libc-alpha @ 2022-11-18 21:27 UTC (permalink / raw)
To: Noah Goldstein; +Cc: libc-alpha, andrey.kolesov, carlos
On Fri, Nov 18, 2022 at 1:23 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
>
> Code is exactly the same for the two so better to only maintain one
> version.
>
> All math and mathvec tests pass on x86.
Is the output of "objdump -dw" on libmvec.so the same before and after
the patch?
--
H.J.
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH v4 3/3] x86/fpu: Factor out shared avx2/avx512 code in svml_{s|d}_wrapper_impl.h
2022-11-18 21:27 ` H.J. Lu via Libc-alpha
@ 2022-11-18 21:59 ` Noah Goldstein via Libc-alpha
2022-11-19 0:07 ` H.J. Lu via Libc-alpha
0 siblings, 1 reply; 21+ messages in thread
From: Noah Goldstein via Libc-alpha @ 2022-11-18 21:59 UTC (permalink / raw)
To: H.J. Lu; +Cc: libc-alpha, andrey.kolesov, carlos
On Fri, Nov 18, 2022 at 1:28 PM H.J. Lu <hjl.tools@gmail.com> wrote:
>
> On Fri, Nov 18, 2022 at 1:23 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> >
> > Code is exactly the same for the two so better to only maintain one
> > version.
> >
> > All math and mathvec tests pass on x86.
>
> Is the output of "objdump -dw" on libmvec.so the same before and after
> the patch?
>
Yes for both libmvec.so and libm.so.
> --
> H.J.
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH v4 3/3] x86/fpu: Factor out shared avx2/avx512 code in svml_{s|d}_wrapper_impl.h
2022-11-18 21:59 ` Noah Goldstein via Libc-alpha
@ 2022-11-19 0:07 ` H.J. Lu via Libc-alpha
0 siblings, 0 replies; 21+ messages in thread
From: H.J. Lu via Libc-alpha @ 2022-11-19 0:07 UTC (permalink / raw)
To: Noah Goldstein; +Cc: libc-alpha, andrey.kolesov, carlos
On Fri, Nov 18, 2022 at 1:59 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
>
> On Fri, Nov 18, 2022 at 1:28 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> >
> > On Fri, Nov 18, 2022 at 1:23 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> > >
> > > Code is exactly the same for the two so better to only maintain one
> > > version.
> > >
> > > All math and mathvec tests pass on x86.
> >
> > Is the output of "objdump -dw" on libmvec.so the same before and after
> > the patch?
> >
>
> Yes for both libmvec.so and libm.so.
Please mention this in the commit log. OK with the change.
Thanks.
--
H.J.
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [PATCH v4 1/3] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h
2022-11-18 21:22 ` [PATCH v4 1/3] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h Noah Goldstein via Libc-alpha
2022-11-18 21:22 ` [PATCH v4 2/3] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h Noah Goldstein via Libc-alpha
2022-11-18 21:22 ` [PATCH v4 3/3] x86/fpu: Factor out shared avx2/avx512 " Noah Goldstein via Libc-alpha
@ 2022-11-18 23:25 ` H.J. Lu via Libc-alpha
2 siblings, 0 replies; 21+ messages in thread
From: H.J. Lu via Libc-alpha @ 2022-11-18 23:25 UTC (permalink / raw)
To: Noah Goldstein; +Cc: libc-alpha, andrey.kolesov, carlos
On Fri, Nov 18, 2022 at 1:23 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
>
> Just reformat with the style convention used in other x86 assembler
> files. This doesn't change libm.so or libmvec.so.
> ---
> sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 474 ++++++++++----------
> sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 546 +++++++++++------------
> 2 files changed, 510 insertions(+), 510 deletions(-)
>
> diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
> index 2334713015..b03a2122b9 100644
> --- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
> +++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
> @@ -18,273 +18,273 @@
>
> /* SSE2 ISA version as wrapper to scalar. */
> .macro WRAPPER_IMPL_SSE2 callee
> - subq $40, %rsp
> - cfi_adjust_cfa_offset(40)
> - movaps %xmm0, (%rsp)
> - call JUMPTARGET(\callee)
> - movsd %xmm0, 16(%rsp)
> - movsd 8(%rsp), %xmm0
> - call JUMPTARGET(\callee)
> - movsd 16(%rsp), %xmm1
> - movsd %xmm0, 24(%rsp)
> - unpcklpd %xmm0, %xmm1
> - movaps %xmm1, %xmm0
> - addq $40, %rsp
> - cfi_adjust_cfa_offset(-40)
> - ret
> + subq $40, %rsp
> + cfi_adjust_cfa_offset (40)
> + movaps %xmm0, (%rsp)
> + call JUMPTARGET(\callee)
> + movsd %xmm0, 16(%rsp)
> + movsd 8(%rsp), %xmm0
> + call JUMPTARGET(\callee)
> + movsd 16(%rsp), %xmm1
> + movsd %xmm0, 24(%rsp)
> + unpcklpd %xmm0, %xmm1
> + movaps %xmm1, %xmm0
> + addq $40, %rsp
> + cfi_adjust_cfa_offset (-40)
> + ret
> .endm
>
> /* 2 argument SSE2 ISA version as wrapper to scalar. */
> .macro WRAPPER_IMPL_SSE2_ff callee
> - subq $56, %rsp
> - cfi_adjust_cfa_offset(56)
> - movaps %xmm0, (%rsp)
> - movaps %xmm1, 16(%rsp)
> - call JUMPTARGET(\callee)
> - movsd %xmm0, 32(%rsp)
> - movsd 8(%rsp), %xmm0
> - movsd 24(%rsp), %xmm1
> - call JUMPTARGET(\callee)
> - movsd 32(%rsp), %xmm1
> - movsd %xmm0, 40(%rsp)
> - unpcklpd %xmm0, %xmm1
> - movaps %xmm1, %xmm0
> - addq $56, %rsp
> - cfi_adjust_cfa_offset(-56)
> - ret
> + subq $56, %rsp
> + cfi_adjust_cfa_offset (56)
> + movaps %xmm0, (%rsp)
> + movaps %xmm1, 16(%rsp)
> + call JUMPTARGET(\callee)
> + movsd %xmm0, 32(%rsp)
> + movsd 8(%rsp), %xmm0
> + movsd 24(%rsp), %xmm1
> + call JUMPTARGET(\callee)
> + movsd 32(%rsp), %xmm1
> + movsd %xmm0, 40(%rsp)
> + unpcklpd %xmm0, %xmm1
> + movaps %xmm1, %xmm0
> + addq $56, %rsp
> + cfi_adjust_cfa_offset (-56)
> + ret
> .endm
>
> /* 3 argument SSE2 ISA version as wrapper to scalar. */
> .macro WRAPPER_IMPL_SSE2_fFF callee
> - pushq %rbp
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%rbp, 0)
> - pushq %rbx
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%rbx, 0)
> - movq %rdi, %rbp
> - movq %rsi, %rbx
> - subq $40, %rsp
> - cfi_adjust_cfa_offset(40)
> - leaq 16(%rsp), %rsi
> - leaq 24(%rsp), %rdi
> - movaps %xmm0, (%rsp)
> - call JUMPTARGET(\callee)
> - leaq 16(%rsp), %rsi
> - leaq 24(%rsp), %rdi
> - movsd 24(%rsp), %xmm0
> - movapd (%rsp), %xmm1
> - movsd %xmm0, 0(%rbp)
> - unpckhpd %xmm1, %xmm1
> - movsd 16(%rsp), %xmm0
> - movsd %xmm0, (%rbx)
> - movapd %xmm1, %xmm0
> - call JUMPTARGET(\callee)
> - movsd 24(%rsp), %xmm0
> - movsd %xmm0, 8(%rbp)
> - movsd 16(%rsp), %xmm0
> - movsd %xmm0, 8(%rbx)
> - addq $40, %rsp
> - cfi_adjust_cfa_offset(-40)
> - popq %rbx
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%rbx)
> - popq %rbp
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%rbp)
> - ret
> + pushq %rbp
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbp, 0)
> + pushq %rbx
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbx, 0)
> + movq %rdi, %rbp
> + movq %rsi, %rbx
> + subq $40, %rsp
> + cfi_adjust_cfa_offset (40)
> + leaq 16(%rsp), %rsi
> + leaq 24(%rsp), %rdi
> + movaps %xmm0, (%rsp)
> + call JUMPTARGET(\callee)
> + leaq 16(%rsp), %rsi
> + leaq 24(%rsp), %rdi
> + movsd 24(%rsp), %xmm0
> + movapd (%rsp), %xmm1
> + movsd %xmm0, 0(%rbp)
> + unpckhpd %xmm1, %xmm1
> + movsd 16(%rsp), %xmm0
> + movsd %xmm0, (%rbx)
> + movapd %xmm1, %xmm0
> + call JUMPTARGET(\callee)
> + movsd 24(%rsp), %xmm0
> + movsd %xmm0, 8(%rbp)
> + movsd 16(%rsp), %xmm0
> + movsd %xmm0, 8(%rbx)
> + addq $40, %rsp
> + cfi_adjust_cfa_offset (-40)
> + popq %rbx
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbx)
> + popq %rbp
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbp)
> + ret
> .endm
>
> /* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
> .macro WRAPPER_IMPL_AVX callee
> - pushq %rbp
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%rbp, 0)
> - movq %rsp, %rbp
> - cfi_def_cfa_register (%rbp)
> - andq $-32, %rsp
> - subq $32, %rsp
> - vextractf128 $1, %ymm0, (%rsp)
> - vzeroupper
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovapd %xmm0, 16(%rsp)
> - vmovaps (%rsp), %xmm0
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovapd %xmm0, %xmm1
> - vmovapd 16(%rsp), %xmm0
> - vinsertf128 $1, %xmm1, %ymm0, %ymm0
> - movq %rbp, %rsp
> - cfi_def_cfa_register (%rsp)
> - popq %rbp
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%rbp)
> - ret
> + pushq %rbp
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbp, 0)
> + movq %rsp, %rbp
> + cfi_def_cfa_register (%rbp)
> + andq $-32, %rsp
> + subq $32, %rsp
> + vextractf128 $1, %ymm0, (%rsp)
> + vzeroupper
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovapd %xmm0, 16(%rsp)
> + vmovaps (%rsp), %xmm0
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovapd %xmm0, %xmm1
> + vmovapd 16(%rsp), %xmm0
> + vinsertf128 $1, %xmm1, %ymm0, %ymm0
> + movq %rbp, %rsp
> + cfi_def_cfa_register (%rsp)
> + popq %rbp
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbp)
> + ret
> .endm
>
> /* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
> .macro WRAPPER_IMPL_AVX_ff callee
> - pushq %rbp
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%rbp, 0)
> - movq %rsp, %rbp
> - cfi_def_cfa_register (%rbp)
> - andq $-32, %rsp
> - subq $64, %rsp
> - vextractf128 $1, %ymm0, 16(%rsp)
> - vextractf128 $1, %ymm1, (%rsp)
> - vzeroupper
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovaps %xmm0, 32(%rsp)
> - vmovaps 16(%rsp), %xmm0
> - vmovaps (%rsp), %xmm1
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovaps %xmm0, %xmm1
> - vmovaps 32(%rsp), %xmm0
> - vinsertf128 $1, %xmm1, %ymm0, %ymm0
> - movq %rbp, %rsp
> - cfi_def_cfa_register (%rsp)
> - popq %rbp
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%rbp)
> - ret
> + pushq %rbp
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbp, 0)
> + movq %rsp, %rbp
> + cfi_def_cfa_register (%rbp)
> + andq $-32, %rsp
> + subq $64, %rsp
> + vextractf128 $1, %ymm0, 16(%rsp)
> + vextractf128 $1, %ymm1, (%rsp)
> + vzeroupper
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovaps %xmm0, 32(%rsp)
> + vmovaps 16(%rsp), %xmm0
> + vmovaps (%rsp), %xmm1
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovaps %xmm0, %xmm1
> + vmovaps 32(%rsp), %xmm0
> + vinsertf128 $1, %xmm1, %ymm0, %ymm0
> + movq %rbp, %rsp
> + cfi_def_cfa_register (%rsp)
> + popq %rbp
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbp)
> + ret
> .endm
>
> /* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
> .macro WRAPPER_IMPL_AVX_fFF callee
> - pushq %rbp
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%rbp, 0)
> - movq %rsp, %rbp
> - cfi_def_cfa_register (%rbp)
> - andq $-32, %rsp
> - pushq %r13
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%r13, 0)
> - pushq %r14
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%r14, 0)
> - subq $48, %rsp
> - movq %rsi, %r14
> - movq %rdi, %r13
> - vextractf128 $1, %ymm0, 32(%rsp)
> - vzeroupper
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovaps 32(%rsp), %xmm0
> - lea (%rsp), %rdi
> - lea 16(%rsp), %rsi
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovapd (%rsp), %xmm0
> - vmovapd 16(%rsp), %xmm1
> - vmovapd %xmm0, 16(%r13)
> - vmovapd %xmm1, 16(%r14)
> - addq $48, %rsp
> - popq %r14
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%r14)
> - popq %r13
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%r13)
> - movq %rbp, %rsp
> - cfi_def_cfa_register (%rsp)
> - popq %rbp
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%rbp)
> - ret
> + pushq %rbp
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbp, 0)
> + movq %rsp, %rbp
> + cfi_def_cfa_register (%rbp)
> + andq $-32, %rsp
> + pushq %r13
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%r13, 0)
> + pushq %r14
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%r14, 0)
> + subq $48, %rsp
> + movq %rsi, %r14
> + movq %rdi, %r13
> + vextractf128 $1, %ymm0, 32(%rsp)
> + vzeroupper
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovaps 32(%rsp), %xmm0
> + lea (%rsp), %rdi
> + lea 16(%rsp), %rsi
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovapd (%rsp), %xmm0
> + vmovapd 16(%rsp), %xmm1
> + vmovapd %xmm0, 16(%r13)
> + vmovapd %xmm1, 16(%r14)
> + addq $48, %rsp
> + popq %r14
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%r14)
> + popq %r13
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%r13)
> + movq %rbp, %rsp
> + cfi_def_cfa_register (%rsp)
> + popq %rbp
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbp)
> + ret
> .endm
>
> /* AVX512 ISA version as wrapper to AVX2 ISA version. */
> .macro WRAPPER_IMPL_AVX512 callee
> - pushq %rbp
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%rbp, 0)
> - movq %rsp, %rbp
> - cfi_def_cfa_register (%rbp)
> - andq $-64, %rsp
> - subq $128, %rsp
> - vmovups %zmm0, (%rsp)
> - vmovupd (%rsp), %ymm0
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovupd %ymm0, 64(%rsp)
> - vmovupd 32(%rsp), %ymm0
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovupd %ymm0, 96(%rsp)
> - vmovups 64(%rsp), %zmm0
> - movq %rbp, %rsp
> - cfi_def_cfa_register (%rsp)
> - popq %rbp
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%rbp)
> - ret
> + pushq %rbp
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbp, 0)
> + movq %rsp, %rbp
> + cfi_def_cfa_register (%rbp)
> + andq $-64, %rsp
> + subq $128, %rsp
> + vmovups %zmm0, (%rsp)
> + vmovupd (%rsp), %ymm0
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovupd %ymm0, 64(%rsp)
> + vmovupd 32(%rsp), %ymm0
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovupd %ymm0, 96(%rsp)
> + vmovups 64(%rsp), %zmm0
> + movq %rbp, %rsp
> + cfi_def_cfa_register (%rsp)
> + popq %rbp
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbp)
> + ret
> .endm
>
> /* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
> .macro WRAPPER_IMPL_AVX512_ff callee
> - pushq %rbp
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%rbp, 0)
> - movq %rsp, %rbp
> - cfi_def_cfa_register (%rbp)
> - andq $-64, %rsp
> - subq $192, %rsp
> - vmovups %zmm0, (%rsp)
> - vmovups %zmm1, 64(%rsp)
> - vmovupd (%rsp), %ymm0
> - vmovupd 64(%rsp), %ymm1
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovupd %ymm0, 128(%rsp)
> - vmovupd 32(%rsp), %ymm0
> - vmovupd 96(%rsp), %ymm1
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovupd %ymm0, 160(%rsp)
> - vmovups 128(%rsp), %zmm0
> - movq %rbp, %rsp
> - cfi_def_cfa_register (%rsp)
> - popq %rbp
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%rbp)
> - ret
> + pushq %rbp
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbp, 0)
> + movq %rsp, %rbp
> + cfi_def_cfa_register (%rbp)
> + andq $-64, %rsp
> + subq $192, %rsp
> + vmovups %zmm0, (%rsp)
> + vmovups %zmm1, 64(%rsp)
> + vmovupd (%rsp), %ymm0
> + vmovupd 64(%rsp), %ymm1
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovupd %ymm0, 128(%rsp)
> + vmovupd 32(%rsp), %ymm0
> + vmovupd 96(%rsp), %ymm1
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovupd %ymm0, 160(%rsp)
> + vmovups 128(%rsp), %zmm0
> + movq %rbp, %rsp
> + cfi_def_cfa_register (%rsp)
> + popq %rbp
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbp)
> + ret
> .endm
>
> /* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
> .macro WRAPPER_IMPL_AVX512_fFF callee
> - pushq %rbp
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%rbp, 0)
> - movq %rsp, %rbp
> - cfi_def_cfa_register (%rbp)
> - andq $-64, %rsp
> - pushq %r12
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%r12, 0)
> - pushq %r13
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%r13, 0)
> - subq $176, %rsp
> - movq %rsi, %r13
> - vmovups %zmm0, (%rsp)
> - movq %rdi, %r12
> - vmovupd (%rsp), %ymm0
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovupd 32(%rsp), %ymm0
> - lea 64(%rsp), %rdi
> - lea 96(%rsp), %rsi
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovupd 64(%rsp), %ymm0
> - vmovupd 96(%rsp), %ymm1
> - vmovupd %ymm0, 32(%r12)
> - vmovupd %ymm1, 32(%r13)
> - vzeroupper
> - addq $176, %rsp
> - popq %r13
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%r13)
> - popq %r12
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%r12)
> - movq %rbp, %rsp
> - cfi_def_cfa_register (%rsp)
> - popq %rbp
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%rbp)
> - ret
> + pushq %rbp
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbp, 0)
> + movq %rsp, %rbp
> + cfi_def_cfa_register (%rbp)
> + andq $-64, %rsp
> + pushq %r12
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%r12, 0)
> + pushq %r13
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%r13, 0)
> + subq $176, %rsp
> + movq %rsi, %r13
> + vmovups %zmm0, (%rsp)
> + movq %rdi, %r12
> + vmovupd (%rsp), %ymm0
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovupd 32(%rsp), %ymm0
> + lea 64(%rsp), %rdi
> + lea 96(%rsp), %rsi
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovupd 64(%rsp), %ymm0
> + vmovupd 96(%rsp), %ymm1
> + vmovupd %ymm0, 32(%r12)
> + vmovupd %ymm1, 32(%r13)
> + vzeroupper
> + addq $176, %rsp
> + popq %r13
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%r13)
> + popq %r12
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%r12)
> + movq %rbp, %rsp
> + cfi_def_cfa_register (%rsp)
> + popq %rbp
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbp)
> + ret
> .endm
> diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
> index c23da7ec83..cecf6c8384 100644
> --- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
> +++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
> @@ -18,309 +18,309 @@
>
> /* SSE2 ISA version as wrapper to scalar. */
> .macro WRAPPER_IMPL_SSE2 callee
> - subq $40, %rsp
> - cfi_adjust_cfa_offset(40)
> - movaps %xmm0, (%rsp)
> - call JUMPTARGET(\callee)
> - movss %xmm0, 16(%rsp)
> - movss 4(%rsp), %xmm0
> - call JUMPTARGET(\callee)
> - movss %xmm0, 20(%rsp)
> - movss 8(%rsp), %xmm0
> - call JUMPTARGET(\callee)
> - movss %xmm0, 24(%rsp)
> - movss 12(%rsp), %xmm0
> - call JUMPTARGET(\callee)
> - movss 16(%rsp), %xmm3
> - movss 20(%rsp), %xmm2
> - movss 24(%rsp), %xmm1
> - movss %xmm0, 28(%rsp)
> - unpcklps %xmm1, %xmm3
> - unpcklps %xmm0, %xmm2
> - unpcklps %xmm2, %xmm3
> - movaps %xmm3, %xmm0
> - addq $40, %rsp
> - cfi_adjust_cfa_offset(-40)
> - ret
> + subq $40, %rsp
> + cfi_adjust_cfa_offset (40)
> + movaps %xmm0, (%rsp)
> + call JUMPTARGET(\callee)
> + movss %xmm0, 16(%rsp)
> + movss 4(%rsp), %xmm0
> + call JUMPTARGET(\callee)
> + movss %xmm0, 20(%rsp)
> + movss 8(%rsp), %xmm0
> + call JUMPTARGET(\callee)
> + movss %xmm0, 24(%rsp)
> + movss 12(%rsp), %xmm0
> + call JUMPTARGET(\callee)
> + movss 16(%rsp), %xmm3
> + movss 20(%rsp), %xmm2
> + movss 24(%rsp), %xmm1
> + movss %xmm0, 28(%rsp)
> + unpcklps %xmm1, %xmm3
> + unpcklps %xmm0, %xmm2
> + unpcklps %xmm2, %xmm3
> + movaps %xmm3, %xmm0
> + addq $40, %rsp
> + cfi_adjust_cfa_offset (-40)
> + ret
> .endm
>
> /* 2 argument SSE2 ISA version as wrapper to scalar. */
> .macro WRAPPER_IMPL_SSE2_ff callee
> - subq $56, %rsp
> - cfi_adjust_cfa_offset(56)
> - movaps %xmm0, (%rsp)
> - movaps %xmm1, 16(%rsp)
> - call JUMPTARGET(\callee)
> - movss %xmm0, 32(%rsp)
> - movss 4(%rsp), %xmm0
> - movss 20(%rsp), %xmm1
> - call JUMPTARGET(\callee)
> - movss %xmm0, 36(%rsp)
> - movss 8(%rsp), %xmm0
> - movss 24(%rsp), %xmm1
> - call JUMPTARGET(\callee)
> - movss %xmm0, 40(%rsp)
> - movss 12(%rsp), %xmm0
> - movss 28(%rsp), %xmm1
> - call JUMPTARGET(\callee)
> - movss 32(%rsp), %xmm3
> - movss 36(%rsp), %xmm2
> - movss 40(%rsp), %xmm1
> - movss %xmm0, 44(%rsp)
> - unpcklps %xmm1, %xmm3
> - unpcklps %xmm0, %xmm2
> - unpcklps %xmm2, %xmm3
> - movaps %xmm3, %xmm0
> - addq $56, %rsp
> - cfi_adjust_cfa_offset(-56)
> - ret
> + subq $56, %rsp
> + cfi_adjust_cfa_offset (56)
> + movaps %xmm0, (%rsp)
> + movaps %xmm1, 16(%rsp)
> + call JUMPTARGET(\callee)
> + movss %xmm0, 32(%rsp)
> + movss 4(%rsp), %xmm0
> + movss 20(%rsp), %xmm1
> + call JUMPTARGET(\callee)
> + movss %xmm0, 36(%rsp)
> + movss 8(%rsp), %xmm0
> + movss 24(%rsp), %xmm1
> + call JUMPTARGET(\callee)
> + movss %xmm0, 40(%rsp)
> + movss 12(%rsp), %xmm0
> + movss 28(%rsp), %xmm1
> + call JUMPTARGET(\callee)
> + movss 32(%rsp), %xmm3
> + movss 36(%rsp), %xmm2
> + movss 40(%rsp), %xmm1
> + movss %xmm0, 44(%rsp)
> + unpcklps %xmm1, %xmm3
> + unpcklps %xmm0, %xmm2
> + unpcklps %xmm2, %xmm3
> + movaps %xmm3, %xmm0
> + addq $56, %rsp
> + cfi_adjust_cfa_offset (-56)
> + ret
> .endm
>
> /* 3 argument SSE2 ISA version as wrapper to scalar. */
> .macro WRAPPER_IMPL_SSE2_fFF callee
> - pushq %rbp
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%rbp, 0)
> - pushq %rbx
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%rbx, 0)
> - movq %rdi, %rbp
> - movq %rsi, %rbx
> - subq $40, %rsp
> - cfi_adjust_cfa_offset(40)
> - leaq 24(%rsp), %rsi
> - leaq 28(%rsp), %rdi
> - movaps %xmm0, (%rsp)
> - call JUMPTARGET(\callee)
> - leaq 24(%rsp), %rsi
> - leaq 28(%rsp), %rdi
> - movss 28(%rsp), %xmm0
> - movss %xmm0, 0(%rbp)
> - movaps (%rsp), %xmm1
> - movss 24(%rsp), %xmm0
> - movss %xmm0, (%rbx)
> - movaps %xmm1, %xmm0
> - shufps $85, %xmm1, %xmm0
> - call JUMPTARGET(\callee)
> - movss 28(%rsp), %xmm0
> - leaq 24(%rsp), %rsi
> - movss %xmm0, 4(%rbp)
> - leaq 28(%rsp), %rdi
> - movaps (%rsp), %xmm1
> - movss 24(%rsp), %xmm0
> - movss %xmm0, 4(%rbx)
> - movaps %xmm1, %xmm0
> - unpckhps %xmm1, %xmm0
> - call JUMPTARGET(\callee)
> - movaps (%rsp), %xmm1
> - leaq 24(%rsp), %rsi
> - leaq 28(%rsp), %rdi
> - movss 28(%rsp), %xmm0
> - shufps $255, %xmm1, %xmm1
> - movss %xmm0, 8(%rbp)
> - movss 24(%rsp), %xmm0
> - movss %xmm0, 8(%rbx)
> - movaps %xmm1, %xmm0
> - call JUMPTARGET(\callee)
> - movss 28(%rsp), %xmm0
> - movss %xmm0, 12(%rbp)
> - movss 24(%rsp), %xmm0
> - movss %xmm0, 12(%rbx)
> - addq $40, %rsp
> - cfi_adjust_cfa_offset(-40)
> - popq %rbx
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%rbx)
> - popq %rbp
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%rbp)
> - ret
> + pushq %rbp
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbp, 0)
> + pushq %rbx
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbx, 0)
> + movq %rdi, %rbp
> + movq %rsi, %rbx
> + subq $40, %rsp
> + cfi_adjust_cfa_offset (40)
> + leaq 24(%rsp), %rsi
> + leaq 28(%rsp), %rdi
> + movaps %xmm0, (%rsp)
> + call JUMPTARGET(\callee)
> + leaq 24(%rsp), %rsi
> + leaq 28(%rsp), %rdi
> + movss 28(%rsp), %xmm0
> + movss %xmm0, 0(%rbp)
> + movaps (%rsp), %xmm1
> + movss 24(%rsp), %xmm0
> + movss %xmm0, (%rbx)
> + movaps %xmm1, %xmm0
> + shufps $85, %xmm1, %xmm0
> + call JUMPTARGET(\callee)
> + movss 28(%rsp), %xmm0
> + leaq 24(%rsp), %rsi
> + movss %xmm0, 4(%rbp)
> + leaq 28(%rsp), %rdi
> + movaps (%rsp), %xmm1
> + movss 24(%rsp), %xmm0
> + movss %xmm0, 4(%rbx)
> + movaps %xmm1, %xmm0
> + unpckhps %xmm1, %xmm0
> + call JUMPTARGET(\callee)
> + movaps (%rsp), %xmm1
> + leaq 24(%rsp), %rsi
> + leaq 28(%rsp), %rdi
> + movss 28(%rsp), %xmm0
> + shufps $255, %xmm1, %xmm1
> + movss %xmm0, 8(%rbp)
> + movss 24(%rsp), %xmm0
> + movss %xmm0, 8(%rbx)
> + movaps %xmm1, %xmm0
> + call JUMPTARGET(\callee)
> + movss 28(%rsp), %xmm0
> + movss %xmm0, 12(%rbp)
> + movss 24(%rsp), %xmm0
> + movss %xmm0, 12(%rbx)
> + addq $40, %rsp
> + cfi_adjust_cfa_offset (-40)
> + popq %rbx
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbx)
> + popq %rbp
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbp)
> + ret
> .endm
>
> /* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
> .macro WRAPPER_IMPL_AVX callee
> - pushq %rbp
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%rbp, 0)
> - movq %rsp, %rbp
> - cfi_def_cfa_register (%rbp)
> - andq $-32, %rsp
> - subq $32, %rsp
> - vextractf128 $1, %ymm0, (%rsp)
> - vzeroupper
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovaps %xmm0, 16(%rsp)
> - vmovaps (%rsp), %xmm0
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovaps %xmm0, %xmm1
> - vmovaps 16(%rsp), %xmm0
> - vinsertf128 $1, %xmm1, %ymm0, %ymm0
> - movq %rbp, %rsp
> - cfi_def_cfa_register (%rsp)
> - popq %rbp
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%rbp)
> - ret
> + pushq %rbp
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbp, 0)
> + movq %rsp, %rbp
> + cfi_def_cfa_register (%rbp)
> + andq $-32, %rsp
> + subq $32, %rsp
> + vextractf128 $1, %ymm0, (%rsp)
> + vzeroupper
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovaps %xmm0, 16(%rsp)
> + vmovaps (%rsp), %xmm0
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovaps %xmm0, %xmm1
> + vmovaps 16(%rsp), %xmm0
> + vinsertf128 $1, %xmm1, %ymm0, %ymm0
> + movq %rbp, %rsp
> + cfi_def_cfa_register (%rsp)
> + popq %rbp
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbp)
> + ret
> .endm
>
> /* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
> .macro WRAPPER_IMPL_AVX_ff callee
> - pushq %rbp
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%rbp, 0)
> - movq %rsp, %rbp
> - cfi_def_cfa_register (%rbp)
> - andq $-32, %rsp
> - subq $64, %rsp
> - vextractf128 $1, %ymm0, 16(%rsp)
> - vextractf128 $1, %ymm1, (%rsp)
> - vzeroupper
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovaps %xmm0, 32(%rsp)
> - vmovaps 16(%rsp), %xmm0
> - vmovaps (%rsp), %xmm1
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovaps %xmm0, %xmm1
> - vmovaps 32(%rsp), %xmm0
> - vinsertf128 $1, %xmm1, %ymm0, %ymm0
> - movq %rbp, %rsp
> - cfi_def_cfa_register (%rsp)
> - popq %rbp
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%rbp)
> - ret
> + pushq %rbp
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbp, 0)
> + movq %rsp, %rbp
> + cfi_def_cfa_register (%rbp)
> + andq $-32, %rsp
> + subq $64, %rsp
> + vextractf128 $1, %ymm0, 16(%rsp)
> + vextractf128 $1, %ymm1, (%rsp)
> + vzeroupper
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovaps %xmm0, 32(%rsp)
> + vmovaps 16(%rsp), %xmm0
> + vmovaps (%rsp), %xmm1
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovaps %xmm0, %xmm1
> + vmovaps 32(%rsp), %xmm0
> + vinsertf128 $1, %xmm1, %ymm0, %ymm0
> + movq %rbp, %rsp
> + cfi_def_cfa_register (%rsp)
> + popq %rbp
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbp)
> + ret
> .endm
>
> /* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
> .macro WRAPPER_IMPL_AVX_fFF callee
> - pushq %rbp
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%rbp, 0)
> - movq %rsp, %rbp
> - cfi_def_cfa_register (%rbp)
> - andq $-32, %rsp
> - pushq %r13
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%r13, 0)
> - pushq %r14
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%r14, 0)
> - subq $48, %rsp
> - movq %rsi, %r14
> - vmovaps %ymm0, (%rsp)
> - movq %rdi, %r13
> - vmovaps 16(%rsp), %xmm1
> - vmovaps %xmm1, 32(%rsp)
> - vzeroupper
> - vmovaps (%rsp), %xmm0
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovaps 32(%rsp), %xmm0
> - lea (%rsp), %rdi
> - lea 16(%rsp), %rsi
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovaps (%rsp), %xmm0
> - vmovaps 16(%rsp), %xmm1
> - vmovaps %xmm0, 16(%r13)
> - vmovaps %xmm1, 16(%r14)
> - addq $48, %rsp
> - popq %r14
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%r14)
> - popq %r13
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%r13)
> - movq %rbp, %rsp
> - cfi_def_cfa_register (%rsp)
> - popq %rbp
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%rbp)
> - ret
> + pushq %rbp
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbp, 0)
> + movq %rsp, %rbp
> + cfi_def_cfa_register (%rbp)
> + andq $-32, %rsp
> + pushq %r13
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%r13, 0)
> + pushq %r14
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%r14, 0)
> + subq $48, %rsp
> + movq %rsi, %r14
> + vmovaps %ymm0, (%rsp)
> + movq %rdi, %r13
> + vmovaps 16(%rsp), %xmm1
> + vmovaps %xmm1, 32(%rsp)
> + vzeroupper
> + vmovaps (%rsp), %xmm0
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovaps 32(%rsp), %xmm0
> + lea (%rsp), %rdi
> + lea 16(%rsp), %rsi
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovaps (%rsp), %xmm0
> + vmovaps 16(%rsp), %xmm1
> + vmovaps %xmm0, 16(%r13)
> + vmovaps %xmm1, 16(%r14)
> + addq $48, %rsp
> + popq %r14
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%r14)
> + popq %r13
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%r13)
> + movq %rbp, %rsp
> + cfi_def_cfa_register (%rsp)
> + popq %rbp
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbp)
> + ret
> .endm
>
> /* AVX512 ISA version as wrapper to AVX2 ISA version. */
> .macro WRAPPER_IMPL_AVX512 callee
> - pushq %rbp
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%rbp, 0)
> - movq %rsp, %rbp
> - cfi_def_cfa_register (%rbp)
> - andq $-64, %rsp
> - subq $128, %rsp
> - vmovups %zmm0, (%rsp)
> - vmovupd (%rsp), %ymm0
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovupd %ymm0, 64(%rsp)
> - vmovupd 32(%rsp), %ymm0
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovupd %ymm0, 96(%rsp)
> - vmovups 64(%rsp), %zmm0
> - movq %rbp, %rsp
> - cfi_def_cfa_register (%rsp)
> - popq %rbp
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%rbp)
> - ret
> + pushq %rbp
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbp, 0)
> + movq %rsp, %rbp
> + cfi_def_cfa_register (%rbp)
> + andq $-64, %rsp
> + subq $128, %rsp
> + vmovups %zmm0, (%rsp)
> + vmovupd (%rsp), %ymm0
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovupd %ymm0, 64(%rsp)
> + vmovupd 32(%rsp), %ymm0
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovupd %ymm0, 96(%rsp)
> + vmovups 64(%rsp), %zmm0
> + movq %rbp, %rsp
> + cfi_def_cfa_register (%rsp)
> + popq %rbp
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbp)
> + ret
> .endm
>
> /* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
> .macro WRAPPER_IMPL_AVX512_ff callee
> - pushq %rbp
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%rbp, 0)
> - movq %rsp, %rbp
> - cfi_def_cfa_register (%rbp)
> - andq $-64, %rsp
> - subq $192, %rsp
> - vmovups %zmm0, (%rsp)
> - vmovups %zmm1, 64(%rsp)
> - vmovups (%rsp), %ymm0
> - vmovups 64(%rsp), %ymm1
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovups %ymm0, 128(%rsp)
> - vmovups 32(%rsp), %ymm0
> - vmovups 96(%rsp), %ymm1
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovups %ymm0, 160(%rsp)
> - vmovups 128(%rsp), %zmm0
> - movq %rbp, %rsp
> - cfi_def_cfa_register (%rsp)
> - popq %rbp
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%rbp)
> - ret
> + pushq %rbp
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbp, 0)
> + movq %rsp, %rbp
> + cfi_def_cfa_register (%rbp)
> + andq $-64, %rsp
> + subq $192, %rsp
> + vmovups %zmm0, (%rsp)
> + vmovups %zmm1, 64(%rsp)
> + vmovups (%rsp), %ymm0
> + vmovups 64(%rsp), %ymm1
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovups %ymm0, 128(%rsp)
> + vmovups 32(%rsp), %ymm0
> + vmovups 96(%rsp), %ymm1
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovups %ymm0, 160(%rsp)
> + vmovups 128(%rsp), %zmm0
> + movq %rbp, %rsp
> + cfi_def_cfa_register (%rsp)
> + popq %rbp
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbp)
> + ret
> .endm
>
> /* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
> .macro WRAPPER_IMPL_AVX512_fFF callee
> - pushq %rbp
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%rbp, 0)
> - movq %rsp, %rbp
> - cfi_def_cfa_register (%rbp)
> - andq $-64, %rsp
> - pushq %r12
> - pushq %r13
> - subq $176, %rsp
> - movq %rsi, %r13
> - vmovaps %zmm0, (%rsp)
> - movq %rdi, %r12
> - vmovaps (%rsp), %ymm0
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovaps 32(%rsp), %ymm0
> - lea 64(%rsp), %rdi
> - lea 96(%rsp), %rsi
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovaps 64(%rsp), %ymm0
> - vmovaps 96(%rsp), %ymm1
> - vmovaps %ymm0, 32(%r12)
> - vmovaps %ymm1, 32(%r13)
> - addq $176, %rsp
> - popq %r13
> - popq %r12
> - movq %rbp, %rsp
> - cfi_def_cfa_register (%rsp)
> - popq %rbp
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%rbp)
> - ret
> + pushq %rbp
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbp, 0)
> + movq %rsp, %rbp
> + cfi_def_cfa_register (%rbp)
> + andq $-64, %rsp
> + pushq %r12
> + pushq %r13
> + subq $176, %rsp
> + movq %rsi, %r13
> + vmovaps %zmm0, (%rsp)
> + movq %rdi, %r12
> + vmovaps (%rsp), %ymm0
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovaps 32(%rsp), %ymm0
> + lea 64(%rsp), %rdi
> + lea 96(%rsp), %rsi
> + call HIDDEN_JUMPTARGET(\callee)
> + vmovaps 64(%rsp), %ymm0
> + vmovaps 96(%rsp), %ymm1
> + vmovaps %ymm0, 32(%r12)
> + vmovaps %ymm1, 32(%r13)
> + addq $176, %rsp
> + popq %r13
> + popq %r12
> + movq %rbp, %rsp
> + cfi_def_cfa_register (%rsp)
> + popq %rbp
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbp)
> + ret
> .endm
> --
> 2.34.1
>
LGTM.
Thanks.
--
H.J.
^ permalink raw reply [flat|nested] 21+ messages in thread
* [PATCH v5 1/3] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h
2022-11-18 19:08 [PATCH v2 1/2] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h.S Noah Goldstein via Libc-alpha
` (2 preceding siblings ...)
2022-11-18 21:22 ` [PATCH v4 1/3] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h Noah Goldstein via Libc-alpha
@ 2022-11-19 0:13 ` Noah Goldstein via Libc-alpha
2022-11-19 0:13 ` [PATCH v5 2/3] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h Noah Goldstein via Libc-alpha
2022-11-19 0:13 ` [PATCH v5 3/3] x86/fpu: Factor out shared avx2/avx512 " Noah Goldstein via Libc-alpha
3 siblings, 2 replies; 21+ messages in thread
From: Noah Goldstein via Libc-alpha @ 2022-11-19 0:13 UTC (permalink / raw)
To: libc-alpha; +Cc: goldstein.w.n, hjl.tools, andrey.kolesov, carlos
Just reformat with the style convention used in other x86 assembler
files. This doesn't change libm.so or libmvec.so.
---
sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 474 ++++++++++----------
sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 546 +++++++++++------------
2 files changed, 510 insertions(+), 510 deletions(-)
diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
index 2334713015..b03a2122b9 100644
--- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
@@ -18,273 +18,273 @@
/* SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2 callee
- subq $40, %rsp
- cfi_adjust_cfa_offset(40)
- movaps %xmm0, (%rsp)
- call JUMPTARGET(\callee)
- movsd %xmm0, 16(%rsp)
- movsd 8(%rsp), %xmm0
- call JUMPTARGET(\callee)
- movsd 16(%rsp), %xmm1
- movsd %xmm0, 24(%rsp)
- unpcklpd %xmm0, %xmm1
- movaps %xmm1, %xmm0
- addq $40, %rsp
- cfi_adjust_cfa_offset(-40)
- ret
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
+ movaps %xmm0, (%rsp)
+ call JUMPTARGET(\callee)
+ movsd %xmm0, 16(%rsp)
+ movsd 8(%rsp), %xmm0
+ call JUMPTARGET(\callee)
+ movsd 16(%rsp), %xmm1
+ movsd %xmm0, 24(%rsp)
+ unpcklpd %xmm0, %xmm1
+ movaps %xmm1, %xmm0
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
+ ret
.endm
/* 2 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_ff callee
- subq $56, %rsp
- cfi_adjust_cfa_offset(56)
- movaps %xmm0, (%rsp)
- movaps %xmm1, 16(%rsp)
- call JUMPTARGET(\callee)
- movsd %xmm0, 32(%rsp)
- movsd 8(%rsp), %xmm0
- movsd 24(%rsp), %xmm1
- call JUMPTARGET(\callee)
- movsd 32(%rsp), %xmm1
- movsd %xmm0, 40(%rsp)
- unpcklpd %xmm0, %xmm1
- movaps %xmm1, %xmm0
- addq $56, %rsp
- cfi_adjust_cfa_offset(-56)
- ret
+ subq $56, %rsp
+ cfi_adjust_cfa_offset (56)
+ movaps %xmm0, (%rsp)
+ movaps %xmm1, 16(%rsp)
+ call JUMPTARGET(\callee)
+ movsd %xmm0, 32(%rsp)
+ movsd 8(%rsp), %xmm0
+ movsd 24(%rsp), %xmm1
+ call JUMPTARGET(\callee)
+ movsd 32(%rsp), %xmm1
+ movsd %xmm0, 40(%rsp)
+ unpcklpd %xmm0, %xmm1
+ movaps %xmm1, %xmm0
+ addq $56, %rsp
+ cfi_adjust_cfa_offset (-56)
+ ret
.endm
/* 3 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- pushq %rbx
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbx, 0)
- movq %rdi, %rbp
- movq %rsi, %rbx
- subq $40, %rsp
- cfi_adjust_cfa_offset(40)
- leaq 16(%rsp), %rsi
- leaq 24(%rsp), %rdi
- movaps %xmm0, (%rsp)
- call JUMPTARGET(\callee)
- leaq 16(%rsp), %rsi
- leaq 24(%rsp), %rdi
- movsd 24(%rsp), %xmm0
- movapd (%rsp), %xmm1
- movsd %xmm0, 0(%rbp)
- unpckhpd %xmm1, %xmm1
- movsd 16(%rsp), %xmm0
- movsd %xmm0, (%rbx)
- movapd %xmm1, %xmm0
- call JUMPTARGET(\callee)
- movsd 24(%rsp), %xmm0
- movsd %xmm0, 8(%rbp)
- movsd 16(%rsp), %xmm0
- movsd %xmm0, 8(%rbx)
- addq $40, %rsp
- cfi_adjust_cfa_offset(-40)
- popq %rbx
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbx)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ pushq %rbx
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbx, 0)
+ movq %rdi, %rbp
+ movq %rsi, %rbx
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
+ leaq 16(%rsp), %rsi
+ leaq 24(%rsp), %rdi
+ movaps %xmm0, (%rsp)
+ call JUMPTARGET(\callee)
+ leaq 16(%rsp), %rsi
+ leaq 24(%rsp), %rdi
+ movsd 24(%rsp), %xmm0
+ movapd (%rsp), %xmm1
+ movsd %xmm0, 0(%rbp)
+ unpckhpd %xmm1, %xmm1
+ movsd 16(%rsp), %xmm0
+ movsd %xmm0, (%rbx)
+ movapd %xmm1, %xmm0
+ call JUMPTARGET(\callee)
+ movsd 24(%rsp), %xmm0
+ movsd %xmm0, 8(%rbp)
+ movsd 16(%rsp), %xmm0
+ movsd %xmm0, 8(%rbx)
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
+ popq %rbx
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbx)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $32, %rsp
- vextractf128 $1, %ymm0, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovapd %xmm0, 16(%rsp)
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovapd %xmm0, %xmm1
- vmovapd 16(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $32, %rsp
+ vextractf128 $1, %ymm0, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovapd %xmm0, 16(%rsp)
+ vmovaps (%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovapd %xmm0, %xmm1
+ vmovapd 16(%rsp), %xmm0
+ vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $64, %rsp
- vextractf128 $1, %ymm0, 16(%rsp)
- vextractf128 $1, %ymm1, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 32(%rsp)
- vmovaps 16(%rsp), %xmm0
- vmovaps (%rsp), %xmm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 32(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $64, %rsp
+ vextractf128 $1, %ymm0, 16(%rsp)
+ vextractf128 $1, %ymm1, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, 32(%rsp)
+ vmovaps 16(%rsp), %xmm0
+ vmovaps (%rsp), %xmm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, %xmm1
+ vmovaps 32(%rsp), %xmm0
+ vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- pushq %r14
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r14, 0)
- subq $48, %rsp
- movq %rsi, %r14
- movq %rdi, %r13
- vextractf128 $1, %ymm0, 32(%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %xmm0
- lea (%rsp), %rdi
- lea 16(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovapd (%rsp), %xmm0
- vmovapd 16(%rsp), %xmm1
- vmovapd %xmm0, 16(%r13)
- vmovapd %xmm1, 16(%r14)
- addq $48, %rsp
- popq %r14
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r14)
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ pushq %r13
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r13, 0)
+ pushq %r14
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r14, 0)
+ subq $48, %rsp
+ movq %rsi, %r14
+ movq %rdi, %r13
+ vextractf128 $1, %ymm0, 32(%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 32(%rsp), %xmm0
+ lea (%rsp), %rdi
+ lea 16(%rsp), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovapd (%rsp), %xmm0
+ vmovapd 16(%rsp), %xmm1
+ vmovapd %xmm0, 16(%r13)
+ vmovapd %xmm1, 16(%r14)
+ addq $48, %rsp
+ popq %r14
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r14)
+ popq %r13
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r13)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512 callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $128, %rsp
- vmovups %zmm0, (%rsp)
- vmovupd (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 64(%rsp)
- vmovupd 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 96(%rsp)
- vmovups 64(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $128, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovupd (%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 64(%rsp)
+ vmovupd 32(%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 96(%rsp)
+ vmovups 64(%rsp), %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $192, %rsp
- vmovups %zmm0, (%rsp)
- vmovups %zmm1, 64(%rsp)
- vmovupd (%rsp), %ymm0
- vmovupd 64(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 128(%rsp)
- vmovupd 32(%rsp), %ymm0
- vmovupd 96(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 160(%rsp)
- vmovups 128(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $192, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovups %zmm1, 64(%rsp)
+ vmovupd (%rsp), %ymm0
+ vmovupd 64(%rsp), %ymm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 128(%rsp)
+ vmovupd 32(%rsp), %ymm0
+ vmovupd 96(%rsp), %ymm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 160(%rsp)
+ vmovups 128(%rsp), %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- pushq %r12
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r12, 0)
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- subq $176, %rsp
- movq %rsi, %r13
- vmovups %zmm0, (%rsp)
- movq %rdi, %r12
- vmovupd (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd 32(%rsp), %ymm0
- lea 64(%rsp), %rdi
- lea 96(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd 64(%rsp), %ymm0
- vmovupd 96(%rsp), %ymm1
- vmovupd %ymm0, 32(%r12)
- vmovupd %ymm1, 32(%r13)
- vzeroupper
- addq $176, %rsp
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- popq %r12
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r12)
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ pushq %r12
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r12, 0)
+ pushq %r13
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r13, 0)
+ subq $176, %rsp
+ movq %rsi, %r13
+ vmovups %zmm0, (%rsp)
+ movq %rdi, %r12
+ vmovupd (%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd 32(%rsp), %ymm0
+ lea 64(%rsp), %rdi
+ lea 96(%rsp), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd 64(%rsp), %ymm0
+ vmovupd 96(%rsp), %ymm1
+ vmovupd %ymm0, 32(%r12)
+ vmovupd %ymm1, 32(%r13)
+ vzeroupper
+ addq $176, %rsp
+ popq %r13
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r13)
+ popq %r12
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r12)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
index c23da7ec83..cecf6c8384 100644
--- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
@@ -18,309 +18,309 @@
/* SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2 callee
- subq $40, %rsp
- cfi_adjust_cfa_offset(40)
- movaps %xmm0, (%rsp)
- call JUMPTARGET(\callee)
- movss %xmm0, 16(%rsp)
- movss 4(%rsp), %xmm0
- call JUMPTARGET(\callee)
- movss %xmm0, 20(%rsp)
- movss 8(%rsp), %xmm0
- call JUMPTARGET(\callee)
- movss %xmm0, 24(%rsp)
- movss 12(%rsp), %xmm0
- call JUMPTARGET(\callee)
- movss 16(%rsp), %xmm3
- movss 20(%rsp), %xmm2
- movss 24(%rsp), %xmm1
- movss %xmm0, 28(%rsp)
- unpcklps %xmm1, %xmm3
- unpcklps %xmm0, %xmm2
- unpcklps %xmm2, %xmm3
- movaps %xmm3, %xmm0
- addq $40, %rsp
- cfi_adjust_cfa_offset(-40)
- ret
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
+ movaps %xmm0, (%rsp)
+ call JUMPTARGET(\callee)
+ movss %xmm0, 16(%rsp)
+ movss 4(%rsp), %xmm0
+ call JUMPTARGET(\callee)
+ movss %xmm0, 20(%rsp)
+ movss 8(%rsp), %xmm0
+ call JUMPTARGET(\callee)
+ movss %xmm0, 24(%rsp)
+ movss 12(%rsp), %xmm0
+ call JUMPTARGET(\callee)
+ movss 16(%rsp), %xmm3
+ movss 20(%rsp), %xmm2
+ movss 24(%rsp), %xmm1
+ movss %xmm0, 28(%rsp)
+ unpcklps %xmm1, %xmm3
+ unpcklps %xmm0, %xmm2
+ unpcklps %xmm2, %xmm3
+ movaps %xmm3, %xmm0
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
+ ret
.endm
/* 2 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_ff callee
- subq $56, %rsp
- cfi_adjust_cfa_offset(56)
- movaps %xmm0, (%rsp)
- movaps %xmm1, 16(%rsp)
- call JUMPTARGET(\callee)
- movss %xmm0, 32(%rsp)
- movss 4(%rsp), %xmm0
- movss 20(%rsp), %xmm1
- call JUMPTARGET(\callee)
- movss %xmm0, 36(%rsp)
- movss 8(%rsp), %xmm0
- movss 24(%rsp), %xmm1
- call JUMPTARGET(\callee)
- movss %xmm0, 40(%rsp)
- movss 12(%rsp), %xmm0
- movss 28(%rsp), %xmm1
- call JUMPTARGET(\callee)
- movss 32(%rsp), %xmm3
- movss 36(%rsp), %xmm2
- movss 40(%rsp), %xmm1
- movss %xmm0, 44(%rsp)
- unpcklps %xmm1, %xmm3
- unpcklps %xmm0, %xmm2
- unpcklps %xmm2, %xmm3
- movaps %xmm3, %xmm0
- addq $56, %rsp
- cfi_adjust_cfa_offset(-56)
- ret
+ subq $56, %rsp
+ cfi_adjust_cfa_offset (56)
+ movaps %xmm0, (%rsp)
+ movaps %xmm1, 16(%rsp)
+ call JUMPTARGET(\callee)
+ movss %xmm0, 32(%rsp)
+ movss 4(%rsp), %xmm0
+ movss 20(%rsp), %xmm1
+ call JUMPTARGET(\callee)
+ movss %xmm0, 36(%rsp)
+ movss 8(%rsp), %xmm0
+ movss 24(%rsp), %xmm1
+ call JUMPTARGET(\callee)
+ movss %xmm0, 40(%rsp)
+ movss 12(%rsp), %xmm0
+ movss 28(%rsp), %xmm1
+ call JUMPTARGET(\callee)
+ movss 32(%rsp), %xmm3
+ movss 36(%rsp), %xmm2
+ movss 40(%rsp), %xmm1
+ movss %xmm0, 44(%rsp)
+ unpcklps %xmm1, %xmm3
+ unpcklps %xmm0, %xmm2
+ unpcklps %xmm2, %xmm3
+ movaps %xmm3, %xmm0
+ addq $56, %rsp
+ cfi_adjust_cfa_offset (-56)
+ ret
.endm
/* 3 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- pushq %rbx
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbx, 0)
- movq %rdi, %rbp
- movq %rsi, %rbx
- subq $40, %rsp
- cfi_adjust_cfa_offset(40)
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movaps %xmm0, (%rsp)
- call JUMPTARGET(\callee)
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movss 28(%rsp), %xmm0
- movss %xmm0, 0(%rbp)
- movaps (%rsp), %xmm1
- movss 24(%rsp), %xmm0
- movss %xmm0, (%rbx)
- movaps %xmm1, %xmm0
- shufps $85, %xmm1, %xmm0
- call JUMPTARGET(\callee)
- movss 28(%rsp), %xmm0
- leaq 24(%rsp), %rsi
- movss %xmm0, 4(%rbp)
- leaq 28(%rsp), %rdi
- movaps (%rsp), %xmm1
- movss 24(%rsp), %xmm0
- movss %xmm0, 4(%rbx)
- movaps %xmm1, %xmm0
- unpckhps %xmm1, %xmm0
- call JUMPTARGET(\callee)
- movaps (%rsp), %xmm1
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movss 28(%rsp), %xmm0
- shufps $255, %xmm1, %xmm1
- movss %xmm0, 8(%rbp)
- movss 24(%rsp), %xmm0
- movss %xmm0, 8(%rbx)
- movaps %xmm1, %xmm0
- call JUMPTARGET(\callee)
- movss 28(%rsp), %xmm0
- movss %xmm0, 12(%rbp)
- movss 24(%rsp), %xmm0
- movss %xmm0, 12(%rbx)
- addq $40, %rsp
- cfi_adjust_cfa_offset(-40)
- popq %rbx
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbx)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ pushq %rbx
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbx, 0)
+ movq %rdi, %rbp
+ movq %rsi, %rbx
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
+ leaq 24(%rsp), %rsi
+ leaq 28(%rsp), %rdi
+ movaps %xmm0, (%rsp)
+ call JUMPTARGET(\callee)
+ leaq 24(%rsp), %rsi
+ leaq 28(%rsp), %rdi
+ movss 28(%rsp), %xmm0
+ movss %xmm0, 0(%rbp)
+ movaps (%rsp), %xmm1
+ movss 24(%rsp), %xmm0
+ movss %xmm0, (%rbx)
+ movaps %xmm1, %xmm0
+ shufps $85, %xmm1, %xmm0
+ call JUMPTARGET(\callee)
+ movss 28(%rsp), %xmm0
+ leaq 24(%rsp), %rsi
+ movss %xmm0, 4(%rbp)
+ leaq 28(%rsp), %rdi
+ movaps (%rsp), %xmm1
+ movss 24(%rsp), %xmm0
+ movss %xmm0, 4(%rbx)
+ movaps %xmm1, %xmm0
+ unpckhps %xmm1, %xmm0
+ call JUMPTARGET(\callee)
+ movaps (%rsp), %xmm1
+ leaq 24(%rsp), %rsi
+ leaq 28(%rsp), %rdi
+ movss 28(%rsp), %xmm0
+ shufps $255, %xmm1, %xmm1
+ movss %xmm0, 8(%rbp)
+ movss 24(%rsp), %xmm0
+ movss %xmm0, 8(%rbx)
+ movaps %xmm1, %xmm0
+ call JUMPTARGET(\callee)
+ movss 28(%rsp), %xmm0
+ movss %xmm0, 12(%rbp)
+ movss 24(%rsp), %xmm0
+ movss %xmm0, 12(%rbx)
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
+ popq %rbx
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbx)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $32, %rsp
- vextractf128 $1, %ymm0, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 16(%rsp)
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 16(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $32, %rsp
+ vextractf128 $1, %ymm0, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, 16(%rsp)
+ vmovaps (%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, %xmm1
+ vmovaps 16(%rsp), %xmm0
+ vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $64, %rsp
- vextractf128 $1, %ymm0, 16(%rsp)
- vextractf128 $1, %ymm1, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 32(%rsp)
- vmovaps 16(%rsp), %xmm0
- vmovaps (%rsp), %xmm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 32(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $64, %rsp
+ vextractf128 $1, %ymm0, 16(%rsp)
+ vextractf128 $1, %ymm1, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, 32(%rsp)
+ vmovaps 16(%rsp), %xmm0
+ vmovaps (%rsp), %xmm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, %xmm1
+ vmovaps 32(%rsp), %xmm0
+ vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- pushq %r14
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r14, 0)
- subq $48, %rsp
- movq %rsi, %r14
- vmovaps %ymm0, (%rsp)
- movq %rdi, %r13
- vmovaps 16(%rsp), %xmm1
- vmovaps %xmm1, 32(%rsp)
- vzeroupper
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %xmm0
- lea (%rsp), %rdi
- lea 16(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps (%rsp), %xmm0
- vmovaps 16(%rsp), %xmm1
- vmovaps %xmm0, 16(%r13)
- vmovaps %xmm1, 16(%r14)
- addq $48, %rsp
- popq %r14
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r14)
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ pushq %r13
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r13, 0)
+ pushq %r14
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r14, 0)
+ subq $48, %rsp
+ movq %rsi, %r14
+ vmovaps %ymm0, (%rsp)
+ movq %rdi, %r13
+ vmovaps 16(%rsp), %xmm1
+ vmovaps %xmm1, 32(%rsp)
+ vzeroupper
+ vmovaps (%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 32(%rsp), %xmm0
+ lea (%rsp), %rdi
+ lea 16(%rsp), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps (%rsp), %xmm0
+ vmovaps 16(%rsp), %xmm1
+ vmovaps %xmm0, 16(%r13)
+ vmovaps %xmm1, 16(%r14)
+ addq $48, %rsp
+ popq %r14
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r14)
+ popq %r13
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r13)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512 callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $128, %rsp
- vmovups %zmm0, (%rsp)
- vmovupd (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 64(%rsp)
- vmovupd 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 96(%rsp)
- vmovups 64(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $128, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovupd (%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 64(%rsp)
+ vmovupd 32(%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 96(%rsp)
+ vmovups 64(%rsp), %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $192, %rsp
- vmovups %zmm0, (%rsp)
- vmovups %zmm1, 64(%rsp)
- vmovups (%rsp), %ymm0
- vmovups 64(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovups %ymm0, 128(%rsp)
- vmovups 32(%rsp), %ymm0
- vmovups 96(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovups %ymm0, 160(%rsp)
- vmovups 128(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $192, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovups %zmm1, 64(%rsp)
+ vmovups (%rsp), %ymm0
+ vmovups 64(%rsp), %ymm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovups %ymm0, 128(%rsp)
+ vmovups 32(%rsp), %ymm0
+ vmovups 96(%rsp), %ymm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovups %ymm0, 160(%rsp)
+ vmovups 128(%rsp), %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- pushq %r12
- pushq %r13
- subq $176, %rsp
- movq %rsi, %r13
- vmovaps %zmm0, (%rsp)
- movq %rdi, %r12
- vmovaps (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %ymm0
- lea 64(%rsp), %rdi
- lea 96(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 64(%rsp), %ymm0
- vmovaps 96(%rsp), %ymm1
- vmovaps %ymm0, 32(%r12)
- vmovaps %ymm1, 32(%r13)
- addq $176, %rsp
- popq %r13
- popq %r12
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ pushq %r12
+ pushq %r13
+ subq $176, %rsp
+ movq %rsi, %r13
+ vmovaps %zmm0, (%rsp)
+ movq %rdi, %r12
+ vmovaps (%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 32(%rsp), %ymm0
+ lea 64(%rsp), %rdi
+ lea 96(%rsp), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 64(%rsp), %ymm0
+ vmovaps 96(%rsp), %ymm1
+ vmovaps %ymm0, 32(%r12)
+ vmovaps %ymm1, 32(%r13)
+ addq $176, %rsp
+ popq %r13
+ popq %r12
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
--
2.34.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [PATCH v5 2/3] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h
2022-11-19 0:13 ` [PATCH v5 " Noah Goldstein via Libc-alpha
@ 2022-11-19 0:13 ` Noah Goldstein via Libc-alpha
2022-11-19 0:36 ` H.J. Lu via Libc-alpha
2022-11-19 0:13 ` [PATCH v5 3/3] x86/fpu: Factor out shared avx2/avx512 " Noah Goldstein via Libc-alpha
1 sibling, 1 reply; 21+ messages in thread
From: Noah Goldstein via Libc-alpha @ 2022-11-19 0:13 UTC (permalink / raw)
To: libc-alpha; +Cc: goldstein.w.n, hjl.tools, andrey.kolesov, carlos
1. Remove unnecessary spills.
2. Fix some small nit missed optimizations.
All math and mathvec tests pass on x86.
---
sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 179 +++++++----------
sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 235 ++++++++++-------------
2 files changed, 172 insertions(+), 242 deletions(-)
diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
index b03a2122b9..9900f85a55 100644
--- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
@@ -18,39 +18,38 @@
/* SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2 callee
- subq $40, %rsp
- cfi_adjust_cfa_offset (40)
+ subq $24, %rsp
+ cfi_adjust_cfa_offset (24)
movaps %xmm0, (%rsp)
call JUMPTARGET(\callee)
- movsd %xmm0, 16(%rsp)
+ movsd %xmm0, (%rsp)
movsd 8(%rsp), %xmm0
call JUMPTARGET(\callee)
- movsd 16(%rsp), %xmm1
- movsd %xmm0, 24(%rsp)
+ movsd (%rsp), %xmm1
unpcklpd %xmm0, %xmm1
movaps %xmm1, %xmm0
- addq $40, %rsp
- cfi_adjust_cfa_offset (-40)
+ addq $24, %rsp
+ cfi_adjust_cfa_offset (-24)
ret
.endm
+
/* 2 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_ff callee
- subq $56, %rsp
- cfi_adjust_cfa_offset (56)
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
movaps %xmm0, (%rsp)
movaps %xmm1, 16(%rsp)
call JUMPTARGET(\callee)
- movsd %xmm0, 32(%rsp)
+ movsd %xmm0, (%rsp)
movsd 8(%rsp), %xmm0
movsd 24(%rsp), %xmm1
call JUMPTARGET(\callee)
- movsd 32(%rsp), %xmm1
- movsd %xmm0, 40(%rsp)
+ movsd (%rsp), %xmm1
unpcklpd %xmm0, %xmm1
movaps %xmm1, %xmm0
- addq $56, %rsp
- cfi_adjust_cfa_offset (-56)
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
ret
.endm
@@ -62,30 +61,18 @@
pushq %rbx
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbx, 0)
+ subq $24, %rsp
+ cfi_adjust_cfa_offset (24)
+ movaps %xmm0, (%rsp)
movq %rdi, %rbp
movq %rsi, %rbx
- subq $40, %rsp
- cfi_adjust_cfa_offset (40)
- leaq 16(%rsp), %rsi
- leaq 24(%rsp), %rdi
- movaps %xmm0, (%rsp)
call JUMPTARGET(\callee)
- leaq 16(%rsp), %rsi
- leaq 24(%rsp), %rdi
- movsd 24(%rsp), %xmm0
- movapd (%rsp), %xmm1
- movsd %xmm0, 0(%rbp)
- unpckhpd %xmm1, %xmm1
- movsd 16(%rsp), %xmm0
- movsd %xmm0, (%rbx)
- movapd %xmm1, %xmm0
+ movsd 8(%rsp), %xmm0
+ leaq 8(%rbp), %rdi
+ leaq 8(%rbx), %rsi
call JUMPTARGET(\callee)
- movsd 24(%rsp), %xmm0
- movsd %xmm0, 8(%rbp)
- movsd 16(%rsp), %xmm0
- movsd %xmm0, 8(%rbx)
- addq $40, %rsp
- cfi_adjust_cfa_offset (-40)
+ addq $24, %rsp
+ cfi_adjust_cfa_offset (-24)
popq %rbx
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbx)
@@ -104,15 +91,17 @@
cfi_def_cfa_register (%rbp)
andq $-32, %rsp
subq $32, %rsp
- vextractf128 $1, %ymm0, (%rsp)
+ vmovaps %ymm0, (%rsp)
vzeroupper
call HIDDEN_JUMPTARGET(\callee)
- vmovapd %xmm0, 16(%rsp)
- vmovaps (%rsp), %xmm0
+ vmovaps %xmm0, (%rsp)
+ vmovaps 16(%rsp), %xmm0
call HIDDEN_JUMPTARGET(\callee)
- vmovapd %xmm0, %xmm1
- vmovapd 16(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ /* combine xmm0 (return of second call) with result of first
+ call (saved on stack). Might be worth exploring logic that
+ uses `vpblend` and reads in ymm1 using -16(rsp). */
+ vmovaps (%rsp), %xmm1
+ vinsertf128 $1, %xmm0, %ymm1, %ymm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -130,17 +119,19 @@
cfi_def_cfa_register (%rbp)
andq $-32, %rsp
subq $64, %rsp
- vextractf128 $1, %ymm0, 16(%rsp)
- vextractf128 $1, %ymm1, (%rsp)
+ vmovaps %ymm0, (%rsp)
+ vmovaps %ymm1, 32(%rsp)
vzeroupper
call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 32(%rsp)
+ vmovaps 48(%rsp), %xmm1
+ vmovaps %xmm0, (%rsp)
vmovaps 16(%rsp), %xmm0
- vmovaps (%rsp), %xmm1
call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 32(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ /* combine xmm0 (return of second call) with result of first
+ call (saved on stack). Might be worth exploring logic that
+ uses `vpblend` and reads in ymm1 using -16(rsp). */
+ vmovaps (%rsp), %xmm1
+ vinsertf128 $1, %xmm0, %ymm1, %ymm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -155,35 +146,21 @@
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
andq $-32, %rsp
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
+ subq $32, %rsp
+ vmovaps %ymm0, (%rsp)
+ pushq %rbx
pushq %r14
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r14, 0)
- subq $48, %rsp
+ movq %rdi, %rbx
movq %rsi, %r14
- movq %rdi, %r13
- vextractf128 $1, %ymm0, 32(%rsp)
vzeroupper
call HIDDEN_JUMPTARGET(\callee)
vmovaps 32(%rsp), %xmm0
- lea (%rsp), %rdi
- lea 16(%rsp), %rsi
+ leaq 16(%rbx), %rdi
+ leaq 16(%r14), %rsi
call HIDDEN_JUMPTARGET(\callee)
- vmovapd (%rsp), %xmm0
- vmovapd 16(%rsp), %xmm1
- vmovapd %xmm0, 16(%r13)
- vmovapd %xmm1, 16(%r14)
- addq $48, %rsp
popq %r14
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r14)
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
+ popq %rbx
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -200,15 +177,16 @@
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
- subq $128, %rsp
+ subq $64, %rsp
vmovups %zmm0, (%rsp)
- vmovupd (%rsp), %ymm0
call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 64(%rsp)
+ vmovupd %ymm0, (%rsp)
vmovupd 32(%rsp), %ymm0
call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 96(%rsp)
- vmovups 64(%rsp), %zmm0
+ /* combine ymm0 (return of second call) with result of first
+ call (saved on stack). */
+ vmovaps (%rsp), %ymm1
+ vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -225,18 +203,19 @@
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
- subq $192, %rsp
+ addq $-128, %rsp
vmovups %zmm0, (%rsp)
vmovups %zmm1, 64(%rsp)
- vmovupd (%rsp), %ymm0
- vmovupd 64(%rsp), %ymm1
+ /* ymm0 and ymm1 are already set. */
call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 128(%rsp)
- vmovupd 32(%rsp), %ymm0
- vmovupd 96(%rsp), %ymm1
+ vmovups 96(%rsp), %ymm1
+ vmovaps %ymm0, (%rsp)
+ vmovups 32(%rsp), %ymm0
call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 160(%rsp)
- vmovups 128(%rsp), %zmm0
+ /* combine ymm0 (return of second call) with result of first
+ call (saved on stack). */
+ vmovaps (%rsp), %ymm1
+ vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -253,34 +232,20 @@
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
- pushq %r12
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r12, 0)
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- subq $176, %rsp
- movq %rsi, %r13
- vmovups %zmm0, (%rsp)
- movq %rdi, %r12
- vmovupd (%rsp), %ymm0
+ subq $64, %rsp
+ vmovaps %zmm0, (%rsp)
+ pushq %rbx
+ pushq %r14
+ movq %rdi, %rbx
+ movq %rsi, %r14
+ /* ymm0 is already set. */
call HIDDEN_JUMPTARGET(\callee)
- vmovupd 32(%rsp), %ymm0
- lea 64(%rsp), %rdi
- lea 96(%rsp), %rsi
+ vmovaps 48(%rsp), %ymm0
+ leaq 32(%rbx), %rdi
+ leaq 32(%r14), %rsi
call HIDDEN_JUMPTARGET(\callee)
- vmovupd 64(%rsp), %ymm0
- vmovupd 96(%rsp), %ymm1
- vmovupd %ymm0, 32(%r12)
- vmovupd %ymm1, 32(%r13)
- vzeroupper
- addq $176, %rsp
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- popq %r12
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r12)
+ popq %r14
+ popq %rbx
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
index cecf6c8384..fd9b363045 100644
--- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
@@ -18,61 +18,66 @@
/* SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2 callee
- subq $40, %rsp
- cfi_adjust_cfa_offset (40)
+ push %rbx
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbx, 0)
+ subq $16, %rsp
+ cfi_adjust_cfa_offset (16)
movaps %xmm0, (%rsp)
call JUMPTARGET(\callee)
- movss %xmm0, 16(%rsp)
+ movss %xmm0, (%rsp)
movss 4(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss %xmm0, 20(%rsp)
+ movss %xmm0, 4(%rsp)
movss 8(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss %xmm0, 24(%rsp)
+ movd %xmm0, %ebx
movss 12(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss 16(%rsp), %xmm3
- movss 20(%rsp), %xmm2
- movss 24(%rsp), %xmm1
- movss %xmm0, 28(%rsp)
- unpcklps %xmm1, %xmm3
- unpcklps %xmm0, %xmm2
- unpcklps %xmm2, %xmm3
- movaps %xmm3, %xmm0
- addq $40, %rsp
- cfi_adjust_cfa_offset (-40)
+ movd %ebx, %xmm1
+ unpcklps %xmm0, %xmm1
+ movsd (%rsp), %xmm0
+ unpcklpd %xmm1, %xmm0
+ addq $16, %rsp
+ cfi_adjust_cfa_offset (-16)
+ popq %rbx
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbx)
ret
.endm
/* 2 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_ff callee
- subq $56, %rsp
- cfi_adjust_cfa_offset (56)
+ push %rbx
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbx, 0)
+ subq $32, %rsp
+ cfi_adjust_cfa_offset (40)
movaps %xmm0, (%rsp)
movaps %xmm1, 16(%rsp)
call JUMPTARGET(\callee)
- movss %xmm0, 32(%rsp)
- movss 4(%rsp), %xmm0
movss 20(%rsp), %xmm1
+ movss %xmm0, 0(%rsp)
+ movss 4(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss %xmm0, 36(%rsp)
- movss 8(%rsp), %xmm0
movss 24(%rsp), %xmm1
+ movss %xmm0, 4(%rsp)
+ movss 8(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss %xmm0, 40(%rsp)
- movss 12(%rsp), %xmm0
movss 28(%rsp), %xmm1
+ movd %xmm0, %ebx
+ movss 12(%rsp), %xmm0
call JUMPTARGET(\callee)
- movss 32(%rsp), %xmm3
- movss 36(%rsp), %xmm2
- movss 40(%rsp), %xmm1
- movss %xmm0, 44(%rsp)
- unpcklps %xmm1, %xmm3
- unpcklps %xmm0, %xmm2
- unpcklps %xmm2, %xmm3
- movaps %xmm3, %xmm0
- addq $56, %rsp
- cfi_adjust_cfa_offset (-56)
+ /* merge 4x results into xmm0. */
+ movd %ebx, %xmm1
+ unpcklps %xmm0, %xmm1
+ movsd (%rsp), %xmm0
+ unpcklpd %xmm1, %xmm0
+ addq $32, %rsp
+ cfi_adjust_cfa_offset (-32)
+ popq %rbx
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbx)
ret
.endm
@@ -86,48 +91,24 @@
cfi_rel_offset (%rbx, 0)
movq %rdi, %rbp
movq %rsi, %rbx
- subq $40, %rsp
- cfi_adjust_cfa_offset (40)
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
+ subq $24, %rsp
+ cfi_adjust_cfa_offset (24)
movaps %xmm0, (%rsp)
call JUMPTARGET(\callee)
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movss 28(%rsp), %xmm0
- movss %xmm0, 0(%rbp)
- movaps (%rsp), %xmm1
- movss 24(%rsp), %xmm0
- movss %xmm0, (%rbx)
- movaps %xmm1, %xmm0
- shufps $85, %xmm1, %xmm0
+ movss 4(%rsp), %xmm0
+ leaq 4(%rbp), %rdi
+ leaq 4(%rbx), %rsi
call JUMPTARGET(\callee)
- movss 28(%rsp), %xmm0
- leaq 24(%rsp), %rsi
- movss %xmm0, 4(%rbp)
- leaq 28(%rsp), %rdi
- movaps (%rsp), %xmm1
- movss 24(%rsp), %xmm0
- movss %xmm0, 4(%rbx)
- movaps %xmm1, %xmm0
- unpckhps %xmm1, %xmm0
+ movss 8(%rsp), %xmm0
+ leaq 8(%rbp), %rdi
+ leaq 8(%rbx), %rsi
call JUMPTARGET(\callee)
- movaps (%rsp), %xmm1
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movss 28(%rsp), %xmm0
- shufps $255, %xmm1, %xmm1
- movss %xmm0, 8(%rbp)
- movss 24(%rsp), %xmm0
- movss %xmm0, 8(%rbx)
- movaps %xmm1, %xmm0
+ movss 12(%rsp), %xmm0
+ leaq 12(%rbp), %rdi
+ leaq 12(%rbx), %rsi
call JUMPTARGET(\callee)
- movss 28(%rsp), %xmm0
- movss %xmm0, 12(%rbp)
- movss 24(%rsp), %xmm0
- movss %xmm0, 12(%rbx)
- addq $40, %rsp
- cfi_adjust_cfa_offset (-40)
+ addq $24, %rsp
+ cfi_adjust_cfa_offset (-24)
popq %rbx
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbx)
@@ -146,15 +127,17 @@
cfi_def_cfa_register (%rbp)
andq $-32, %rsp
subq $32, %rsp
- vextractf128 $1, %ymm0, (%rsp)
+ vmovaps %ymm0, (%rsp)
vzeroupper
call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 16(%rsp)
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
+ vmovaps %xmm0, (%rsp)
vmovaps 16(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ /* combine xmm0 (return of second call) with result of first
+ call (saved on stack). Might be worth exploring logic that
+ uses `vpblend` and reads in ymm1 using -16(rsp). */
+ vmovaps (%rsp), %xmm1
+ vinsertf128 $1, %xmm0, %ymm1, %ymm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -172,17 +155,19 @@
cfi_def_cfa_register (%rbp)
andq $-32, %rsp
subq $64, %rsp
- vextractf128 $1, %ymm0, 16(%rsp)
- vextractf128 $1, %ymm1, (%rsp)
+ vmovaps %ymm0, (%rsp)
+ vmovaps %ymm1, 32(%rsp)
vzeroupper
call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 32(%rsp)
+ vmovaps 48(%rsp), %xmm1
+ vmovaps %xmm0, (%rsp)
vmovaps 16(%rsp), %xmm0
- vmovaps (%rsp), %xmm1
call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 32(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ /* combine xmm0 (return of second call) with result of first
+ call (saved on stack). Might be worth exploring logic that
+ uses `vpblend` and reads in ymm1 using -16(rsp). */
+ vmovaps (%rsp), %xmm1
+ vinsertf128 $1, %xmm0, %ymm1, %ymm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -197,38 +182,21 @@
cfi_adjust_cfa_offset (8)
cfi_rel_offset (%rbp, 0)
movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
andq $-32, %rsp
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
+ subq $32, %rsp
+ vmovaps %ymm0, (%rsp)
+ pushq %rbx
pushq %r14
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r14, 0)
- subq $48, %rsp
+ movq %rdi, %rbx
movq %rsi, %r14
- vmovaps %ymm0, (%rsp)
- movq %rdi, %r13
- vmovaps 16(%rsp), %xmm1
- vmovaps %xmm1, 32(%rsp)
vzeroupper
- vmovaps (%rsp), %xmm0
call HIDDEN_JUMPTARGET(\callee)
vmovaps 32(%rsp), %xmm0
- lea (%rsp), %rdi
- lea 16(%rsp), %rsi
+ leaq 16(%rbx), %rdi
+ leaq 16(%r14), %rsi
call HIDDEN_JUMPTARGET(\callee)
- vmovaps (%rsp), %xmm0
- vmovaps 16(%rsp), %xmm1
- vmovaps %xmm0, 16(%r13)
- vmovaps %xmm1, 16(%r14)
- addq $48, %rsp
popq %r14
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r14)
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
+ popq %rbx
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -245,15 +213,16 @@
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
- subq $128, %rsp
+ subq $64, %rsp
vmovups %zmm0, (%rsp)
- vmovupd (%rsp), %ymm0
call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 64(%rsp)
+ vmovupd %ymm0, (%rsp)
vmovupd 32(%rsp), %ymm0
call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 96(%rsp)
- vmovups 64(%rsp), %zmm0
+ /* combine ymm0 (return of second call) with result of first
+ call (saved on stack). */
+ vmovaps (%rsp), %ymm1
+ vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -270,18 +239,19 @@
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
- subq $192, %rsp
+ addq $-128, %rsp
vmovups %zmm0, (%rsp)
vmovups %zmm1, 64(%rsp)
- vmovups (%rsp), %ymm0
- vmovups 64(%rsp), %ymm1
+ /* ymm0 and ymm1 are already set. */
call HIDDEN_JUMPTARGET(\callee)
- vmovups %ymm0, 128(%rsp)
- vmovups 32(%rsp), %ymm0
vmovups 96(%rsp), %ymm1
+ vmovaps %ymm0, (%rsp)
+ vmovups 32(%rsp), %ymm0
call HIDDEN_JUMPTARGET(\callee)
- vmovups %ymm0, 160(%rsp)
- vmovups 128(%rsp), %zmm0
+ /* combine ymm0 (return of second call) with result of first
+ call (saved on stack). */
+ vmovaps (%rsp), %ymm1
+ vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
@@ -298,25 +268,20 @@
movq %rsp, %rbp
cfi_def_cfa_register (%rbp)
andq $-64, %rsp
- pushq %r12
- pushq %r13
- subq $176, %rsp
- movq %rsi, %r13
+ subq $64, %rsp
vmovaps %zmm0, (%rsp)
- movq %rdi, %r12
- vmovaps (%rsp), %ymm0
+ pushq %rbx
+ pushq %r14
+ movq %rdi, %rbx
+ movq %rsi, %r14
+ /* ymm0 is already set. */
call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %ymm0
- lea 64(%rsp), %rdi
- lea 96(%rsp), %rsi
+ vmovaps 48(%rsp), %ymm0
+ leaq 32(%rbx), %rdi
+ leaq 32(%r14), %rsi
call HIDDEN_JUMPTARGET(\callee)
- vmovaps 64(%rsp), %ymm0
- vmovaps 96(%rsp), %ymm1
- vmovaps %ymm0, 32(%r12)
- vmovaps %ymm1, 32(%r13)
- addq $176, %rsp
- popq %r13
- popq %r12
+ popq %r14
+ popq %rbx
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
popq %rbp
--
2.34.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* Re: [PATCH v5 2/3] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h
2022-11-19 0:13 ` [PATCH v5 2/3] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h Noah Goldstein via Libc-alpha
@ 2022-11-19 0:36 ` H.J. Lu via Libc-alpha
0 siblings, 0 replies; 21+ messages in thread
From: H.J. Lu via Libc-alpha @ 2022-11-19 0:36 UTC (permalink / raw)
To: Noah Goldstein; +Cc: libc-alpha, andrey.kolesov, carlos
On Fri, Nov 18, 2022 at 4:13 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
>
> 1. Remove unnecessary spills.
> 2. Fix some small nit missed optimizations.
>
> All math and mathvec tests pass on x86.
> ---
> sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 179 +++++++----------
> sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 235 ++++++++++-------------
> 2 files changed, 172 insertions(+), 242 deletions(-)
>
> diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
> index b03a2122b9..9900f85a55 100644
> --- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
> +++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
> @@ -18,39 +18,38 @@
>
> /* SSE2 ISA version as wrapper to scalar. */
> .macro WRAPPER_IMPL_SSE2 callee
> - subq $40, %rsp
> - cfi_adjust_cfa_offset (40)
> + subq $24, %rsp
> + cfi_adjust_cfa_offset (24)
> movaps %xmm0, (%rsp)
> call JUMPTARGET(\callee)
> - movsd %xmm0, 16(%rsp)
> + movsd %xmm0, (%rsp)
> movsd 8(%rsp), %xmm0
> call JUMPTARGET(\callee)
> - movsd 16(%rsp), %xmm1
> - movsd %xmm0, 24(%rsp)
> + movsd (%rsp), %xmm1
> unpcklpd %xmm0, %xmm1
> movaps %xmm1, %xmm0
> - addq $40, %rsp
> - cfi_adjust_cfa_offset (-40)
> + addq $24, %rsp
> + cfi_adjust_cfa_offset (-24)
> ret
> .endm
>
> +
> /* 2 argument SSE2 ISA version as wrapper to scalar. */
> .macro WRAPPER_IMPL_SSE2_ff callee
> - subq $56, %rsp
> - cfi_adjust_cfa_offset (56)
> + subq $40, %rsp
> + cfi_adjust_cfa_offset (40)
> movaps %xmm0, (%rsp)
> movaps %xmm1, 16(%rsp)
> call JUMPTARGET(\callee)
> - movsd %xmm0, 32(%rsp)
> + movsd %xmm0, (%rsp)
> movsd 8(%rsp), %xmm0
> movsd 24(%rsp), %xmm1
> call JUMPTARGET(\callee)
> - movsd 32(%rsp), %xmm1
> - movsd %xmm0, 40(%rsp)
> + movsd (%rsp), %xmm1
> unpcklpd %xmm0, %xmm1
> movaps %xmm1, %xmm0
> - addq $56, %rsp
> - cfi_adjust_cfa_offset (-56)
> + addq $40, %rsp
> + cfi_adjust_cfa_offset (-40)
> ret
> .endm
>
> @@ -62,30 +61,18 @@
> pushq %rbx
> cfi_adjust_cfa_offset (8)
> cfi_rel_offset (%rbx, 0)
> + subq $24, %rsp
> + cfi_adjust_cfa_offset (24)
> + movaps %xmm0, (%rsp)
> movq %rdi, %rbp
> movq %rsi, %rbx
> - subq $40, %rsp
> - cfi_adjust_cfa_offset (40)
> - leaq 16(%rsp), %rsi
> - leaq 24(%rsp), %rdi
> - movaps %xmm0, (%rsp)
> call JUMPTARGET(\callee)
> - leaq 16(%rsp), %rsi
> - leaq 24(%rsp), %rdi
> - movsd 24(%rsp), %xmm0
> - movapd (%rsp), %xmm1
> - movsd %xmm0, 0(%rbp)
> - unpckhpd %xmm1, %xmm1
> - movsd 16(%rsp), %xmm0
> - movsd %xmm0, (%rbx)
> - movapd %xmm1, %xmm0
> + movsd 8(%rsp), %xmm0
> + leaq 8(%rbp), %rdi
> + leaq 8(%rbx), %rsi
> call JUMPTARGET(\callee)
> - movsd 24(%rsp), %xmm0
> - movsd %xmm0, 8(%rbp)
> - movsd 16(%rsp), %xmm0
> - movsd %xmm0, 8(%rbx)
> - addq $40, %rsp
> - cfi_adjust_cfa_offset (-40)
> + addq $24, %rsp
> + cfi_adjust_cfa_offset (-24)
> popq %rbx
> cfi_adjust_cfa_offset (-8)
> cfi_restore (%rbx)
> @@ -104,15 +91,17 @@
> cfi_def_cfa_register (%rbp)
> andq $-32, %rsp
> subq $32, %rsp
> - vextractf128 $1, %ymm0, (%rsp)
> + vmovaps %ymm0, (%rsp)
> vzeroupper
> call HIDDEN_JUMPTARGET(\callee)
> - vmovapd %xmm0, 16(%rsp)
> - vmovaps (%rsp), %xmm0
> + vmovaps %xmm0, (%rsp)
> + vmovaps 16(%rsp), %xmm0
> call HIDDEN_JUMPTARGET(\callee)
> - vmovapd %xmm0, %xmm1
> - vmovapd 16(%rsp), %xmm0
> - vinsertf128 $1, %xmm1, %ymm0, %ymm0
> + /* combine xmm0 (return of second call) with result of first
> + call (saved on stack). Might be worth exploring logic that
> + uses `vpblend` and reads in ymm1 using -16(rsp). */
> + vmovaps (%rsp), %xmm1
> + vinsertf128 $1, %xmm0, %ymm1, %ymm0
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -130,17 +119,19 @@
> cfi_def_cfa_register (%rbp)
> andq $-32, %rsp
> subq $64, %rsp
> - vextractf128 $1, %ymm0, 16(%rsp)
> - vextractf128 $1, %ymm1, (%rsp)
> + vmovaps %ymm0, (%rsp)
> + vmovaps %ymm1, 32(%rsp)
> vzeroupper
> call HIDDEN_JUMPTARGET(\callee)
> - vmovaps %xmm0, 32(%rsp)
> + vmovaps 48(%rsp), %xmm1
> + vmovaps %xmm0, (%rsp)
> vmovaps 16(%rsp), %xmm0
> - vmovaps (%rsp), %xmm1
> call HIDDEN_JUMPTARGET(\callee)
> - vmovaps %xmm0, %xmm1
> - vmovaps 32(%rsp), %xmm0
> - vinsertf128 $1, %xmm1, %ymm0, %ymm0
> + /* combine xmm0 (return of second call) with result of first
> + call (saved on stack). Might be worth exploring logic that
> + uses `vpblend` and reads in ymm1 using -16(rsp). */
> + vmovaps (%rsp), %xmm1
> + vinsertf128 $1, %xmm0, %ymm1, %ymm0
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -155,35 +146,21 @@
> cfi_adjust_cfa_offset (8)
> cfi_rel_offset (%rbp, 0)
> movq %rsp, %rbp
> - cfi_def_cfa_register (%rbp)
> andq $-32, %rsp
> - pushq %r13
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%r13, 0)
> + subq $32, %rsp
> + vmovaps %ymm0, (%rsp)
> + pushq %rbx
> pushq %r14
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%r14, 0)
> - subq $48, %rsp
> + movq %rdi, %rbx
> movq %rsi, %r14
> - movq %rdi, %r13
> - vextractf128 $1, %ymm0, 32(%rsp)
> vzeroupper
> call HIDDEN_JUMPTARGET(\callee)
> vmovaps 32(%rsp), %xmm0
> - lea (%rsp), %rdi
> - lea 16(%rsp), %rsi
> + leaq 16(%rbx), %rdi
> + leaq 16(%r14), %rsi
> call HIDDEN_JUMPTARGET(\callee)
> - vmovapd (%rsp), %xmm0
> - vmovapd 16(%rsp), %xmm1
> - vmovapd %xmm0, 16(%r13)
> - vmovapd %xmm1, 16(%r14)
> - addq $48, %rsp
> popq %r14
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%r14)
> - popq %r13
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%r13)
> + popq %rbx
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -200,15 +177,16 @@
> movq %rsp, %rbp
> cfi_def_cfa_register (%rbp)
> andq $-64, %rsp
> - subq $128, %rsp
> + subq $64, %rsp
> vmovups %zmm0, (%rsp)
> - vmovupd (%rsp), %ymm0
> call HIDDEN_JUMPTARGET(\callee)
> - vmovupd %ymm0, 64(%rsp)
> + vmovupd %ymm0, (%rsp)
> vmovupd 32(%rsp), %ymm0
> call HIDDEN_JUMPTARGET(\callee)
> - vmovupd %ymm0, 96(%rsp)
> - vmovups 64(%rsp), %zmm0
> + /* combine ymm0 (return of second call) with result of first
> + call (saved on stack). */
> + vmovaps (%rsp), %ymm1
> + vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -225,18 +203,19 @@
> movq %rsp, %rbp
> cfi_def_cfa_register (%rbp)
> andq $-64, %rsp
> - subq $192, %rsp
> + addq $-128, %rsp
> vmovups %zmm0, (%rsp)
> vmovups %zmm1, 64(%rsp)
> - vmovupd (%rsp), %ymm0
> - vmovupd 64(%rsp), %ymm1
> + /* ymm0 and ymm1 are already set. */
> call HIDDEN_JUMPTARGET(\callee)
> - vmovupd %ymm0, 128(%rsp)
> - vmovupd 32(%rsp), %ymm0
> - vmovupd 96(%rsp), %ymm1
> + vmovups 96(%rsp), %ymm1
> + vmovaps %ymm0, (%rsp)
> + vmovups 32(%rsp), %ymm0
> call HIDDEN_JUMPTARGET(\callee)
> - vmovupd %ymm0, 160(%rsp)
> - vmovups 128(%rsp), %zmm0
> + /* combine ymm0 (return of second call) with result of first
> + call (saved on stack). */
> + vmovaps (%rsp), %ymm1
> + vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -253,34 +232,20 @@
> movq %rsp, %rbp
> cfi_def_cfa_register (%rbp)
> andq $-64, %rsp
> - pushq %r12
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%r12, 0)
> - pushq %r13
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%r13, 0)
> - subq $176, %rsp
> - movq %rsi, %r13
> - vmovups %zmm0, (%rsp)
> - movq %rdi, %r12
> - vmovupd (%rsp), %ymm0
> + subq $64, %rsp
> + vmovaps %zmm0, (%rsp)
> + pushq %rbx
> + pushq %r14
> + movq %rdi, %rbx
> + movq %rsi, %r14
> + /* ymm0 is already set. */
> call HIDDEN_JUMPTARGET(\callee)
> - vmovupd 32(%rsp), %ymm0
> - lea 64(%rsp), %rdi
> - lea 96(%rsp), %rsi
> + vmovaps 48(%rsp), %ymm0
> + leaq 32(%rbx), %rdi
> + leaq 32(%r14), %rsi
> call HIDDEN_JUMPTARGET(\callee)
> - vmovupd 64(%rsp), %ymm0
> - vmovupd 96(%rsp), %ymm1
> - vmovupd %ymm0, 32(%r12)
> - vmovupd %ymm1, 32(%r13)
> - vzeroupper
> - addq $176, %rsp
> - popq %r13
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%r13)
> - popq %r12
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%r12)
> + popq %r14
> + popq %rbx
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
> index cecf6c8384..fd9b363045 100644
> --- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
> +++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
> @@ -18,61 +18,66 @@
>
> /* SSE2 ISA version as wrapper to scalar. */
> .macro WRAPPER_IMPL_SSE2 callee
> - subq $40, %rsp
> - cfi_adjust_cfa_offset (40)
> + push %rbx
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbx, 0)
> + subq $16, %rsp
> + cfi_adjust_cfa_offset (16)
> movaps %xmm0, (%rsp)
> call JUMPTARGET(\callee)
> - movss %xmm0, 16(%rsp)
> + movss %xmm0, (%rsp)
> movss 4(%rsp), %xmm0
> call JUMPTARGET(\callee)
> - movss %xmm0, 20(%rsp)
> + movss %xmm0, 4(%rsp)
> movss 8(%rsp), %xmm0
> call JUMPTARGET(\callee)
> - movss %xmm0, 24(%rsp)
> + movd %xmm0, %ebx
> movss 12(%rsp), %xmm0
> call JUMPTARGET(\callee)
> - movss 16(%rsp), %xmm3
> - movss 20(%rsp), %xmm2
> - movss 24(%rsp), %xmm1
> - movss %xmm0, 28(%rsp)
> - unpcklps %xmm1, %xmm3
> - unpcklps %xmm0, %xmm2
> - unpcklps %xmm2, %xmm3
> - movaps %xmm3, %xmm0
> - addq $40, %rsp
> - cfi_adjust_cfa_offset (-40)
> + movd %ebx, %xmm1
> + unpcklps %xmm0, %xmm1
> + movsd (%rsp), %xmm0
> + unpcklpd %xmm1, %xmm0
> + addq $16, %rsp
> + cfi_adjust_cfa_offset (-16)
> + popq %rbx
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbx)
> ret
> .endm
>
> /* 2 argument SSE2 ISA version as wrapper to scalar. */
> .macro WRAPPER_IMPL_SSE2_ff callee
> - subq $56, %rsp
> - cfi_adjust_cfa_offset (56)
> + push %rbx
> + cfi_adjust_cfa_offset (8)
> + cfi_rel_offset (%rbx, 0)
> + subq $32, %rsp
> + cfi_adjust_cfa_offset (40)
> movaps %xmm0, (%rsp)
> movaps %xmm1, 16(%rsp)
> call JUMPTARGET(\callee)
> - movss %xmm0, 32(%rsp)
> - movss 4(%rsp), %xmm0
> movss 20(%rsp), %xmm1
> + movss %xmm0, 0(%rsp)
> + movss 4(%rsp), %xmm0
> call JUMPTARGET(\callee)
> - movss %xmm0, 36(%rsp)
> - movss 8(%rsp), %xmm0
> movss 24(%rsp), %xmm1
> + movss %xmm0, 4(%rsp)
> + movss 8(%rsp), %xmm0
> call JUMPTARGET(\callee)
> - movss %xmm0, 40(%rsp)
> - movss 12(%rsp), %xmm0
> movss 28(%rsp), %xmm1
> + movd %xmm0, %ebx
> + movss 12(%rsp), %xmm0
> call JUMPTARGET(\callee)
> - movss 32(%rsp), %xmm3
> - movss 36(%rsp), %xmm2
> - movss 40(%rsp), %xmm1
> - movss %xmm0, 44(%rsp)
> - unpcklps %xmm1, %xmm3
> - unpcklps %xmm0, %xmm2
> - unpcklps %xmm2, %xmm3
> - movaps %xmm3, %xmm0
> - addq $56, %rsp
> - cfi_adjust_cfa_offset (-56)
> + /* merge 4x results into xmm0. */
> + movd %ebx, %xmm1
> + unpcklps %xmm0, %xmm1
> + movsd (%rsp), %xmm0
> + unpcklpd %xmm1, %xmm0
> + addq $32, %rsp
> + cfi_adjust_cfa_offset (-32)
> + popq %rbx
> + cfi_adjust_cfa_offset (-8)
> + cfi_restore (%rbx)
> ret
> .endm
>
> @@ -86,48 +91,24 @@
> cfi_rel_offset (%rbx, 0)
> movq %rdi, %rbp
> movq %rsi, %rbx
> - subq $40, %rsp
> - cfi_adjust_cfa_offset (40)
> - leaq 24(%rsp), %rsi
> - leaq 28(%rsp), %rdi
> + subq $24, %rsp
> + cfi_adjust_cfa_offset (24)
> movaps %xmm0, (%rsp)
> call JUMPTARGET(\callee)
> - leaq 24(%rsp), %rsi
> - leaq 28(%rsp), %rdi
> - movss 28(%rsp), %xmm0
> - movss %xmm0, 0(%rbp)
> - movaps (%rsp), %xmm1
> - movss 24(%rsp), %xmm0
> - movss %xmm0, (%rbx)
> - movaps %xmm1, %xmm0
> - shufps $85, %xmm1, %xmm0
> + movss 4(%rsp), %xmm0
> + leaq 4(%rbp), %rdi
> + leaq 4(%rbx), %rsi
> call JUMPTARGET(\callee)
> - movss 28(%rsp), %xmm0
> - leaq 24(%rsp), %rsi
> - movss %xmm0, 4(%rbp)
> - leaq 28(%rsp), %rdi
> - movaps (%rsp), %xmm1
> - movss 24(%rsp), %xmm0
> - movss %xmm0, 4(%rbx)
> - movaps %xmm1, %xmm0
> - unpckhps %xmm1, %xmm0
> + movss 8(%rsp), %xmm0
> + leaq 8(%rbp), %rdi
> + leaq 8(%rbx), %rsi
> call JUMPTARGET(\callee)
> - movaps (%rsp), %xmm1
> - leaq 24(%rsp), %rsi
> - leaq 28(%rsp), %rdi
> - movss 28(%rsp), %xmm0
> - shufps $255, %xmm1, %xmm1
> - movss %xmm0, 8(%rbp)
> - movss 24(%rsp), %xmm0
> - movss %xmm0, 8(%rbx)
> - movaps %xmm1, %xmm0
> + movss 12(%rsp), %xmm0
> + leaq 12(%rbp), %rdi
> + leaq 12(%rbx), %rsi
> call JUMPTARGET(\callee)
> - movss 28(%rsp), %xmm0
> - movss %xmm0, 12(%rbp)
> - movss 24(%rsp), %xmm0
> - movss %xmm0, 12(%rbx)
> - addq $40, %rsp
> - cfi_adjust_cfa_offset (-40)
> + addq $24, %rsp
> + cfi_adjust_cfa_offset (-24)
> popq %rbx
> cfi_adjust_cfa_offset (-8)
> cfi_restore (%rbx)
> @@ -146,15 +127,17 @@
> cfi_def_cfa_register (%rbp)
> andq $-32, %rsp
> subq $32, %rsp
> - vextractf128 $1, %ymm0, (%rsp)
> + vmovaps %ymm0, (%rsp)
> vzeroupper
> call HIDDEN_JUMPTARGET(\callee)
> - vmovaps %xmm0, 16(%rsp)
> - vmovaps (%rsp), %xmm0
> - call HIDDEN_JUMPTARGET(\callee)
> - vmovaps %xmm0, %xmm1
> + vmovaps %xmm0, (%rsp)
> vmovaps 16(%rsp), %xmm0
> - vinsertf128 $1, %xmm1, %ymm0, %ymm0
> + call HIDDEN_JUMPTARGET(\callee)
> + /* combine xmm0 (return of second call) with result of first
> + call (saved on stack). Might be worth exploring logic that
> + uses `vpblend` and reads in ymm1 using -16(rsp). */
> + vmovaps (%rsp), %xmm1
> + vinsertf128 $1, %xmm0, %ymm1, %ymm0
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -172,17 +155,19 @@
> cfi_def_cfa_register (%rbp)
> andq $-32, %rsp
> subq $64, %rsp
> - vextractf128 $1, %ymm0, 16(%rsp)
> - vextractf128 $1, %ymm1, (%rsp)
> + vmovaps %ymm0, (%rsp)
> + vmovaps %ymm1, 32(%rsp)
> vzeroupper
> call HIDDEN_JUMPTARGET(\callee)
> - vmovaps %xmm0, 32(%rsp)
> + vmovaps 48(%rsp), %xmm1
> + vmovaps %xmm0, (%rsp)
> vmovaps 16(%rsp), %xmm0
> - vmovaps (%rsp), %xmm1
> call HIDDEN_JUMPTARGET(\callee)
> - vmovaps %xmm0, %xmm1
> - vmovaps 32(%rsp), %xmm0
> - vinsertf128 $1, %xmm1, %ymm0, %ymm0
> + /* combine xmm0 (return of second call) with result of first
> + call (saved on stack). Might be worth exploring logic that
> + uses `vpblend` and reads in ymm1 using -16(rsp). */
> + vmovaps (%rsp), %xmm1
> + vinsertf128 $1, %xmm0, %ymm1, %ymm0
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -197,38 +182,21 @@
> cfi_adjust_cfa_offset (8)
> cfi_rel_offset (%rbp, 0)
> movq %rsp, %rbp
> - cfi_def_cfa_register (%rbp)
> andq $-32, %rsp
> - pushq %r13
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%r13, 0)
> + subq $32, %rsp
> + vmovaps %ymm0, (%rsp)
> + pushq %rbx
> pushq %r14
> - cfi_adjust_cfa_offset (8)
> - cfi_rel_offset (%r14, 0)
> - subq $48, %rsp
> + movq %rdi, %rbx
> movq %rsi, %r14
> - vmovaps %ymm0, (%rsp)
> - movq %rdi, %r13
> - vmovaps 16(%rsp), %xmm1
> - vmovaps %xmm1, 32(%rsp)
> vzeroupper
> - vmovaps (%rsp), %xmm0
> call HIDDEN_JUMPTARGET(\callee)
> vmovaps 32(%rsp), %xmm0
> - lea (%rsp), %rdi
> - lea 16(%rsp), %rsi
> + leaq 16(%rbx), %rdi
> + leaq 16(%r14), %rsi
> call HIDDEN_JUMPTARGET(\callee)
> - vmovaps (%rsp), %xmm0
> - vmovaps 16(%rsp), %xmm1
> - vmovaps %xmm0, 16(%r13)
> - vmovaps %xmm1, 16(%r14)
> - addq $48, %rsp
> popq %r14
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%r14)
> - popq %r13
> - cfi_adjust_cfa_offset (-8)
> - cfi_restore (%r13)
> + popq %rbx
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -245,15 +213,16 @@
> movq %rsp, %rbp
> cfi_def_cfa_register (%rbp)
> andq $-64, %rsp
> - subq $128, %rsp
> + subq $64, %rsp
> vmovups %zmm0, (%rsp)
> - vmovupd (%rsp), %ymm0
> call HIDDEN_JUMPTARGET(\callee)
> - vmovupd %ymm0, 64(%rsp)
> + vmovupd %ymm0, (%rsp)
> vmovupd 32(%rsp), %ymm0
> call HIDDEN_JUMPTARGET(\callee)
> - vmovupd %ymm0, 96(%rsp)
> - vmovups 64(%rsp), %zmm0
> + /* combine ymm0 (return of second call) with result of first
> + call (saved on stack). */
> + vmovaps (%rsp), %ymm1
> + vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -270,18 +239,19 @@
> movq %rsp, %rbp
> cfi_def_cfa_register (%rbp)
> andq $-64, %rsp
> - subq $192, %rsp
> + addq $-128, %rsp
> vmovups %zmm0, (%rsp)
> vmovups %zmm1, 64(%rsp)
> - vmovups (%rsp), %ymm0
> - vmovups 64(%rsp), %ymm1
> + /* ymm0 and ymm1 are already set. */
> call HIDDEN_JUMPTARGET(\callee)
> - vmovups %ymm0, 128(%rsp)
> - vmovups 32(%rsp), %ymm0
> vmovups 96(%rsp), %ymm1
> + vmovaps %ymm0, (%rsp)
> + vmovups 32(%rsp), %ymm0
> call HIDDEN_JUMPTARGET(\callee)
> - vmovups %ymm0, 160(%rsp)
> - vmovups 128(%rsp), %zmm0
> + /* combine ymm0 (return of second call) with result of first
> + call (saved on stack). */
> + vmovaps (%rsp), %ymm1
> + vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> @@ -298,25 +268,20 @@
> movq %rsp, %rbp
> cfi_def_cfa_register (%rbp)
> andq $-64, %rsp
> - pushq %r12
> - pushq %r13
> - subq $176, %rsp
> - movq %rsi, %r13
> + subq $64, %rsp
> vmovaps %zmm0, (%rsp)
> - movq %rdi, %r12
> - vmovaps (%rsp), %ymm0
> + pushq %rbx
> + pushq %r14
> + movq %rdi, %rbx
> + movq %rsi, %r14
> + /* ymm0 is already set. */
> call HIDDEN_JUMPTARGET(\callee)
> - vmovaps 32(%rsp), %ymm0
> - lea 64(%rsp), %rdi
> - lea 96(%rsp), %rsi
> + vmovaps 48(%rsp), %ymm0
> + leaq 32(%rbx), %rdi
> + leaq 32(%r14), %rsi
> call HIDDEN_JUMPTARGET(\callee)
> - vmovaps 64(%rsp), %ymm0
> - vmovaps 96(%rsp), %ymm1
> - vmovaps %ymm0, 32(%r12)
> - vmovaps %ymm1, 32(%r13)
> - addq $176, %rsp
> - popq %r13
> - popq %r12
> + popq %r14
> + popq %rbx
> movq %rbp, %rsp
> cfi_def_cfa_register (%rsp)
> popq %rbp
> --
> 2.34.1
>
LGTM.
Thanks.
--
H.J.
^ permalink raw reply [flat|nested] 21+ messages in thread
* [PATCH v5 3/3] x86/fpu: Factor out shared avx2/avx512 code in svml_{s|d}_wrapper_impl.h
2022-11-19 0:13 ` [PATCH v5 " Noah Goldstein via Libc-alpha
2022-11-19 0:13 ` [PATCH v5 2/3] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h Noah Goldstein via Libc-alpha
@ 2022-11-19 0:13 ` Noah Goldstein via Libc-alpha
1 sibling, 0 replies; 21+ messages in thread
From: Noah Goldstein via Libc-alpha @ 2022-11-19 0:13 UTC (permalink / raw)
To: libc-alpha; +Cc: goldstein.w.n, hjl.tools, andrey.kolesov, carlos
Code is exactly the same for the two so better to only maintain one
version.
All math and mathvec tests pass on x86.
---
sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 172 +-------------------
sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 172 +-------------------
sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h | 190 ++++++++++++++++++++++
3 files changed, 192 insertions(+), 342 deletions(-)
create mode 100644 sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h
diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
index 9900f85a55..f63b49f4b8 100644
--- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
@@ -82,174 +82,4 @@
ret
.endm
-/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $32, %rsp
- vmovaps %ymm0, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, (%rsp)
- vmovaps 16(%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- /* combine xmm0 (return of second call) with result of first
- call (saved on stack). Might be worth exploring logic that
- uses `vpblend` and reads in ymm1 using -16(rsp). */
- vmovaps (%rsp), %xmm1
- vinsertf128 $1, %xmm0, %ymm1, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $64, %rsp
- vmovaps %ymm0, (%rsp)
- vmovaps %ymm1, 32(%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 48(%rsp), %xmm1
- vmovaps %xmm0, (%rsp)
- vmovaps 16(%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- /* combine xmm0 (return of second call) with result of first
- call (saved on stack). Might be worth exploring logic that
- uses `vpblend` and reads in ymm1 using -16(rsp). */
- vmovaps (%rsp), %xmm1
- vinsertf128 $1, %xmm0, %ymm1, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- andq $-32, %rsp
- subq $32, %rsp
- vmovaps %ymm0, (%rsp)
- pushq %rbx
- pushq %r14
- movq %rdi, %rbx
- movq %rsi, %r14
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %xmm0
- leaq 16(%rbx), %rdi
- leaq 16(%r14), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- popq %r14
- popq %rbx
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512 callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $64, %rsp
- vmovups %zmm0, (%rsp)
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, (%rsp)
- vmovupd 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- /* combine ymm0 (return of second call) with result of first
- call (saved on stack). */
- vmovaps (%rsp), %ymm1
- vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- addq $-128, %rsp
- vmovups %zmm0, (%rsp)
- vmovups %zmm1, 64(%rsp)
- /* ymm0 and ymm1 are already set. */
- call HIDDEN_JUMPTARGET(\callee)
- vmovups 96(%rsp), %ymm1
- vmovaps %ymm0, (%rsp)
- vmovups 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- /* combine ymm0 (return of second call) with result of first
- call (saved on stack). */
- vmovaps (%rsp), %ymm1
- vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $64, %rsp
- vmovaps %zmm0, (%rsp)
- pushq %rbx
- pushq %r14
- movq %rdi, %rbx
- movq %rsi, %r14
- /* ymm0 is already set. */
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 48(%rsp), %ymm0
- leaq 32(%rbx), %rdi
- leaq 32(%r14), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- popq %r14
- popq %rbx
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
+#include "svml_sd_wrapper_impl.h"
diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
index fd9b363045..8d8e5ef7ec 100644
--- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
@@ -118,174 +118,4 @@
ret
.endm
-/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $32, %rsp
- vmovaps %ymm0, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, (%rsp)
- vmovaps 16(%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- /* combine xmm0 (return of second call) with result of first
- call (saved on stack). Might be worth exploring logic that
- uses `vpblend` and reads in ymm1 using -16(rsp). */
- vmovaps (%rsp), %xmm1
- vinsertf128 $1, %xmm0, %ymm1, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $64, %rsp
- vmovaps %ymm0, (%rsp)
- vmovaps %ymm1, 32(%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 48(%rsp), %xmm1
- vmovaps %xmm0, (%rsp)
- vmovaps 16(%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- /* combine xmm0 (return of second call) with result of first
- call (saved on stack). Might be worth exploring logic that
- uses `vpblend` and reads in ymm1 using -16(rsp). */
- vmovaps (%rsp), %xmm1
- vinsertf128 $1, %xmm0, %ymm1, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
-.macro WRAPPER_IMPL_AVX_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- andq $-32, %rsp
- subq $32, %rsp
- vmovaps %ymm0, (%rsp)
- pushq %rbx
- pushq %r14
- movq %rdi, %rbx
- movq %rsi, %r14
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %xmm0
- leaq 16(%rbx), %rdi
- leaq 16(%r14), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- popq %r14
- popq %rbx
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512 callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $64, %rsp
- vmovups %zmm0, (%rsp)
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, (%rsp)
- vmovupd 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- /* combine ymm0 (return of second call) with result of first
- call (saved on stack). */
- vmovaps (%rsp), %ymm1
- vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- addq $-128, %rsp
- vmovups %zmm0, (%rsp)
- vmovups %zmm1, 64(%rsp)
- /* ymm0 and ymm1 are already set. */
- call HIDDEN_JUMPTARGET(\callee)
- vmovups 96(%rsp), %ymm1
- vmovaps %ymm0, (%rsp)
- vmovups 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- /* combine ymm0 (return of second call) with result of first
- call (saved on stack). */
- vmovaps (%rsp), %ymm1
- vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
-
-/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
-.macro WRAPPER_IMPL_AVX512_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $64, %rsp
- vmovaps %zmm0, (%rsp)
- pushq %rbx
- pushq %r14
- movq %rdi, %rbx
- movq %rsi, %r14
- /* ymm0 is already set. */
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 48(%rsp), %ymm0
- leaq 32(%rbx), %rdi
- leaq 32(%r14), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- popq %r14
- popq %rbx
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
-.endm
+#include "svml_sd_wrapper_impl.h"
diff --git a/sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h
new file mode 100644
index 0000000000..bd934ad578
--- /dev/null
+++ b/sysdeps/x86_64/fpu/svml_sd_wrapper_impl.h
@@ -0,0 +1,190 @@
+/* Common float/double wrapper implementations of vector math
+ functions.
+ Copyright (C) 2022 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
+.macro WRAPPER_IMPL_AVX callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $32, %rsp
+ vmovaps %ymm0, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, (%rsp)
+ vmovaps 16(%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ /* combine xmm0 (return of second call) with result of first
+ call (saved on stack). Might be worth exploring logic that
+ uses `vpblend` and reads in ymm1 using -16(rsp). */
+ vmovaps (%rsp), %xmm1
+ vinsertf128 $1, %xmm0, %ymm1, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
+.macro WRAPPER_IMPL_AVX_ff callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $64, %rsp
+ vmovaps %ymm0, (%rsp)
+ vmovaps %ymm1, 32(%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 48(%rsp), %xmm1
+ vmovaps %xmm0, (%rsp)
+ vmovaps 16(%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ /* combine xmm0 (return of second call) with result of first
+ call (saved on stack). Might be worth exploring logic that
+ uses `vpblend` and reads in ymm1 using -16(rsp). */
+ vmovaps (%rsp), %xmm1
+ vinsertf128 $1, %xmm0, %ymm1, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
+.macro WRAPPER_IMPL_AVX_fFF callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ andq $-32, %rsp
+ subq $32, %rsp
+ vmovaps %ymm0, (%rsp)
+ pushq %rbx
+ pushq %r14
+ movq %rdi, %rbx
+ movq %rsi, %r14
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 32(%rsp), %xmm0
+ leaq 16(%rbx), %rdi
+ leaq 16(%r14), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ popq %r14
+ popq %rbx
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* AVX512 ISA version as wrapper to AVX2 ISA version. */
+.macro WRAPPER_IMPL_AVX512 callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $64, %rsp
+ vmovups %zmm0, (%rsp)
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, (%rsp)
+ vmovupd 32(%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ /* combine ymm0 (return of second call) with result of first
+ call (saved on stack). */
+ vmovaps (%rsp), %ymm1
+ vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
+.macro WRAPPER_IMPL_AVX512_ff callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ addq $-128, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovups %zmm1, 64(%rsp)
+ /* ymm0 and ymm1 are already set. */
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovups 96(%rsp), %ymm1
+ vmovaps %ymm0, (%rsp)
+ vmovups 32(%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ /* combine ymm0 (return of second call) with result of first
+ call (saved on stack). */
+ vmovaps (%rsp), %ymm1
+ vinserti64x4 $0x1, %ymm0, %zmm1, %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
+.macro WRAPPER_IMPL_AVX512_fFF callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $64, %rsp
+ vmovaps %zmm0, (%rsp)
+ pushq %rbx
+ pushq %r14
+ movq %rdi, %rbx
+ movq %rsi, %r14
+ /* ymm0 is already set. */
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 48(%rsp), %ymm0
+ leaq 32(%rbx), %rdi
+ leaq 32(%r14), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ popq %r14
+ popq %rbx
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
--
2.34.1
^ permalink raw reply related [flat|nested] 21+ messages in thread