platform_bionic/libc/arch-x86/generic/string/strncmp.S
Varvara Rainchik 5a92284167 Add 32-bit Silvermont-optimized string/memory functions.
Add following functions:
bcopy, memcpy, memmove, memset, bzero, memcmp, wmemcmp, strlen,
strcpy, strncpy, stpcpy, stpncpy.
Create new directories inside arch-x86 to specify architecture: atom,
silvermont and generic (non atom or silvermont architectures are treated like generic).
Due to introducing optimized versions of stpcpy and stpncpy,
c-implementations of these functions are moved from
common for architectures makefile to arm and mips specific makefiles.

Change-Id: I990f8061c3e9bca1f154119303da9e781c5d086e
Signed-off-by: Varvara Rainchik <varvara.rainchik@intel.com>
2014-05-12 13:56:59 -07:00

114 lines
1.5 KiB
ArmAsm

/* $OpenBSD: strncmp.S,v 1.3 2005/08/07 11:30:38 espie Exp $ */
/*
* Written by J.T. Conklin <jtc@netbsd.org>.
* Public domain.
*/
#include <private/bionic_asm.h>
/*
* NOTE: I've unrolled the loop eight times: large enough to make a
* significant difference, and small enough not to totally trash the
* cache.
*/
ENTRY(strncmp)
pushl %ebx
movl 8(%esp),%eax
movl 12(%esp),%ecx
movl 16(%esp),%edx
testl %edx,%edx
jmp L2 /* Jump into the loop! */
.align 2,0x90
L1: incl %eax
incl %ecx
decl %edx
L2: jz L4 /* strings are equal */
movb (%eax),%bl
testb %bl,%bl
jz L3
cmpb %bl,(%ecx)
jne L3
incl %eax
incl %ecx
decl %edx
jz L4
movb (%eax),%bl
testb %bl,%bl
jz L3
cmpb %bl,(%ecx)
jne L3
incl %eax
incl %ecx
decl %edx
jz L4
movb (%eax),%bl
testb %bl,%bl
jz L3
cmpb %bl,(%ecx)
jne L3
incl %eax
incl %ecx
decl %edx
jz L4
movb (%eax),%bl
testb %bl,%bl
jz L3
cmpb %bl,(%ecx)
jne L3
incl %eax
incl %ecx
decl %edx
jz L4
movb (%eax),%bl
testb %bl,%bl
jz L3
cmpb %bl,(%ecx)
jne L3
incl %eax
incl %ecx
decl %edx
jz L4
movb (%eax),%bl
testb %bl,%bl
jz L3
cmpb %bl,(%ecx)
jne L3
incl %eax
incl %ecx
decl %edx
jz L4
movb (%eax),%bl
testb %bl,%bl
jz L3
cmpb %bl,(%ecx)
jne L3
incl %eax
incl %ecx
decl %edx
jz L4
movb (%eax),%bl
testb %bl,%bl
jz L3
cmpb %bl,(%ecx)
je L1
.align 2,0x90
L3: movzbl (%eax),%eax /* unsigned comparision */
movzbl (%ecx),%ecx
subl %ecx,%eax
popl %ebx
ret
.align 2,0x90
L4: xorl %eax,%eax
popl %ebx
ret
END(strncmp)