aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNoah Goldstein <goldstein.w.n@gmail.com>2022-06-24 09:42:12 -0700
committerAndreas K. Hüttel <dilfridge@gentoo.org>2022-09-03 22:21:25 +0200
commit5ffe2198bab1249c6084d3b8c2f87f7719182786 (patch)
tree4a82bb0fd81e77d580030e136b2753d3b62710c9 /sysdeps
parentx86: Add BMI1/BMI2 checks for ISA_V3 check (diff)
downloadglibc-5ffe2198bab1249c6084d3b8c2f87f7719182786.tar.gz
glibc-5ffe2198bab1249c6084d3b8c2f87f7719182786.tar.bz2
glibc-5ffe2198bab1249c6084d3b8c2f87f7719182786.zip
x86: Align entry for memrchr to 64-bytes.
The function was tuned around 64-byte entry alignment and performs better for all sizes with it. As well different code boths where explicitly written to touch the minimum number of cache line i.e sizes <= 32 touch only the entry cache line. (cherry picked from commit 227afaa67213efcdce6a870ef5086200f1076438) (cherry picked from commit 3eb17048c4aa5672d2d3e0473b83d0790206214c)
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/x86_64/multiarch/memrchr-avx2.S2
1 files changed, 1 insertions, 1 deletions
diff --git a/sysdeps/x86_64/multiarch/memrchr-avx2.S b/sysdeps/x86_64/multiarch/memrchr-avx2.S
index 9c83c76d3c..f300d7daf4 100644
--- a/sysdeps/x86_64/multiarch/memrchr-avx2.S
+++ b/sysdeps/x86_64/multiarch/memrchr-avx2.S
@@ -35,7 +35,7 @@
# define VEC_SIZE 32
# define PAGE_SIZE 4096
.section SECTION(.text), "ax", @progbits
-ENTRY(MEMRCHR)
+ENTRY_P2ALIGN(MEMRCHR, 6)
# ifdef __ILP32__
/* Clear upper bits. */
and %RDX_LP, %RDX_LP