919fb7f2e0
This patch includes handwritten avx2 assembly for memset 64-bit. Uses non-temporal stores for very large sizes. Also includes dynamic dispatch for APIs having multiple implementations. Convincing benchmark improvements for sizes above 512 bytes, and although the slight regression for small sizes is unfortunate, it's probably small enough to be okay? Before: BM_string_memset/8/0 3.06 ns 3.04 ns 222703428 bytes_per_second=2.45261G/s BM_string_memset/16/0 3.50 ns 3.47 ns 202569932 bytes_per_second=4.29686G/s BM_string_memset/32/0 3.50 ns 3.48 ns 200064955 bytes_per_second=8.57386G/s BM_string_memset/64/0 3.49 ns 3.46 ns 201928186 bytes_per_second=17.2184G/s BM_string_memset/512/0 14.8 ns 14.7 ns 47776178 bytes_per_second=32.3887G/s BM_string_memset/1024/0 27.3 ns 27.1 ns 25884933 bytes_per_second=35.2515G/s BM_string_memset/8192/0 203 ns 201 ns 3476903 bytes_per_second=37.9311G/s BM_string_memset/16384/0 402 ns 399 ns 1750471 bytes_per_second=38.2725G/s BM_string_memset/32768/0 932 ns 925 ns 755750 bytes_per_second=33.0071G/s BM_string_memset/65536/0 2038 ns 2014 ns 347060 bytes_per_second=30.3057G/s BM_string_memset/131072/0 4012 ns 3980 ns 175186 bytes_per_second=30.6682G/s After: BM_string_memset/8/0 3.32 ns 3.23 ns 208939089 bytes_per_second=2.3051G/s BM_string_memset/16/0 4.07 ns 3.98 ns 173479615 bytes_per_second=3.74822G/s BM_string_memset/32/0 4.07 ns 3.95 ns 177208119 bytes_per_second=7.54344G/s BM_string_memset/64/0 4.09 ns 4.00 ns 174729144 bytes_per_second=14.8878G/s BM_string_memset/512/0 10.7 ns 10.4 ns 65922763 bytes_per_second=45.6611G/s BM_string_memset/1024/0 18.0 ns 17.6 ns 40489136 bytes_per_second=54.3166G/s BM_string_memset/8192/0 109 ns 106 ns 6577711 bytes_per_second=71.7667G/s BM_string_memset/16384/0 221 ns 210 ns 3343800 bytes_per_second=72.684G/s BM_string_memset/32768/0 655 ns 623 ns 1153501 bytes_per_second=48.9781G/s BM_string_memset/65536/0 1547 ns 1495 ns 461702 bytes_per_second=40.8154G/s BM_string_memset/131072/0 2991 ns 2924 ns 240189 bytes_per_second=41.7438G/s This patch drops the wmemset() code because we don't even have a microbenchmark for it, we have as many implementations checked in as we have non-test call sites (!), so at this point it seems like we've spent more time maintaining wmemset() than running it! Test: bionic/tests/run-on-host.sh 64 Signed-off-by: ahs <amrita.h.s@intel.com> Change-Id: Ie5047df5300638c1e4c69f8285d33d034f79c83b
149 lines
3.5 KiB
ArmAsm
149 lines
3.5 KiB
ArmAsm
/*
|
|
Copyright (c) 2014, Intel Corporation
|
|
All rights reserved.
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
|
modification, are permitted provided that the following conditions are met:
|
|
|
|
* Redistributions of source code must retain the above copyright notice,
|
|
* this list of conditions and the following disclaimer.
|
|
|
|
* Redistributions in binary form must reproduce the above copyright notice,
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
* and/or other materials provided with the distribution.
|
|
|
|
* Neither the name of Intel Corporation nor the names of its contributors
|
|
* may be used to endorse or promote products derived from this software
|
|
* without specific prior written permission.
|
|
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
|
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
|
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
#include <private/bionic_asm.h>
|
|
|
|
#include "cache.h"
|
|
|
|
#ifndef L
|
|
# define L(label) .L##label
|
|
#endif
|
|
|
|
#ifndef ALIGN
|
|
# define ALIGN(n) .p2align n
|
|
#endif
|
|
|
|
|
|
ENTRY(__memset_chk_generic)
|
|
# %rdi = dst, %rsi = byte, %rdx = n, %rcx = dst_len
|
|
cmp %rcx, %rdx
|
|
ja __memset_chk_fail
|
|
// Fall through to memset...
|
|
END(__memset_chk_generic)
|
|
|
|
|
|
.section .text.sse2,"ax",@progbits
|
|
ENTRY(memset_generic)
|
|
movq %rdi, %rax
|
|
and $0xff, %rsi
|
|
mov $0x0101010101010101, %rcx
|
|
imul %rsi, %rcx
|
|
cmpq $16, %rdx
|
|
jae L(16bytesormore)
|
|
testb $8, %dl
|
|
jnz L(8_15bytes)
|
|
testb $4, %dl
|
|
jnz L(4_7bytes)
|
|
testb $2, %dl
|
|
jnz L(2_3bytes)
|
|
testb $1, %dl
|
|
jz L(return)
|
|
movb %cl, (%rdi)
|
|
L(return):
|
|
ret
|
|
|
|
L(8_15bytes):
|
|
movq %rcx, (%rdi)
|
|
movq %rcx, -8(%rdi, %rdx)
|
|
ret
|
|
|
|
L(4_7bytes):
|
|
movl %ecx, (%rdi)
|
|
movl %ecx, -4(%rdi, %rdx)
|
|
ret
|
|
|
|
L(2_3bytes):
|
|
movw %cx, (%rdi)
|
|
movw %cx, -2(%rdi, %rdx)
|
|
ret
|
|
|
|
ALIGN (4)
|
|
L(16bytesormore):
|
|
movd %rcx, %xmm0
|
|
pshufd $0, %xmm0, %xmm0
|
|
movdqu %xmm0, (%rdi)
|
|
movdqu %xmm0, -16(%rdi, %rdx)
|
|
cmpq $32, %rdx
|
|
jbe L(32bytesless)
|
|
movdqu %xmm0, 16(%rdi)
|
|
movdqu %xmm0, -32(%rdi, %rdx)
|
|
cmpq $64, %rdx
|
|
jbe L(64bytesless)
|
|
movdqu %xmm0, 32(%rdi)
|
|
movdqu %xmm0, 48(%rdi)
|
|
movdqu %xmm0, -64(%rdi, %rdx)
|
|
movdqu %xmm0, -48(%rdi, %rdx)
|
|
cmpq $128, %rdx
|
|
ja L(128bytesmore)
|
|
L(32bytesless):
|
|
L(64bytesless):
|
|
ret
|
|
|
|
ALIGN (4)
|
|
L(128bytesmore):
|
|
leaq 64(%rdi), %rcx
|
|
andq $-64, %rcx
|
|
movq %rdx, %r8
|
|
addq %rdi, %rdx
|
|
andq $-64, %rdx
|
|
cmpq %rcx, %rdx
|
|
je L(return)
|
|
|
|
#ifdef SHARED_CACHE_SIZE
|
|
cmp $SHARED_CACHE_SIZE, %r8
|
|
#else
|
|
cmp __x86_64_shared_cache_size(%rip), %r8
|
|
#endif
|
|
ja L(128bytesmore_nt)
|
|
|
|
ALIGN (4)
|
|
L(128bytesmore_normal):
|
|
movdqa %xmm0, (%rcx)
|
|
movaps %xmm0, 0x10(%rcx)
|
|
movaps %xmm0, 0x20(%rcx)
|
|
movaps %xmm0, 0x30(%rcx)
|
|
addq $64, %rcx
|
|
cmpq %rcx, %rdx
|
|
jne L(128bytesmore_normal)
|
|
ret
|
|
|
|
ALIGN (4)
|
|
L(128bytesmore_nt):
|
|
movntdq %xmm0, (%rcx)
|
|
movntdq %xmm0, 0x10(%rcx)
|
|
movntdq %xmm0, 0x20(%rcx)
|
|
movntdq %xmm0, 0x30(%rcx)
|
|
leaq 64(%rcx), %rcx
|
|
cmpq %rcx, %rdx
|
|
jne L(128bytesmore_nt)
|
|
sfence
|
|
ret
|
|
|
|
END(memset_generic)
|