diff --git a/config.guess b/config.guess index 1972fda8e..7f9b7f42e 100755 --- a/config.guess +++ b/config.guess @@ -2,7 +2,7 @@ # Attempt to guess a canonical system name. # Copyright 1992-2021 Free Software Foundation, Inc. -timestamp='2021-01-25' +timestamp='2024-05-24' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -949,7 +949,7 @@ EOF if test "$?" = 0 ; then LIBC=gnulibc1 ; fi echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; - arc:Linux:*:* | arceb:Linux:*:*) + arc:Linux:*:* | arceb:Linux:*:* | arc64:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; arm*:Linux:*:*) diff --git a/config.sub b/config.sub index 63c1f1c8b..9def97c14 100755 --- a/config.sub +++ b/config.sub @@ -2,7 +2,7 @@ # Configuration validation subroutine script. # Copyright 1992-2021 Free Software Foundation, Inc. -timestamp='2021-01-08' +timestamp='2024-05-24' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -1165,7 +1165,7 @@ case $cpu-$vendor in | alphapca5[67] | alpha64pca5[67] \ | am33_2.0 \ | amdgcn \ - | arc | arceb \ + | arc | arceb | arc64 | arc32 \ | arm | arm[lb]e | arme[lb] | armv* \ | avr | avr32 \ | asmjs \ diff --git a/libgloss/Makefile.am b/libgloss/Makefile.am index 77a199e96..c3bae6eb4 100644 --- a/libgloss/Makefile.am +++ b/libgloss/Makefile.am @@ -86,6 +86,9 @@ endif if CONFIG_ARC include arc/Makefile.inc endif +if CONFIG_ARC64 +include arc64/Makefile.inc +endif if CONFIG_ARM include arm/Makefile.inc endif diff --git a/libgloss/arc64/Makefile.inc b/libgloss/arc64/Makefile.inc new file mode 100644 index 000000000..2ab1b7836 --- /dev/null +++ b/libgloss/arc64/Makefile.inc @@ -0,0 +1,44 @@ +multilibtool_LIBRARIES += %D%/libnsim.a +%C%_libnsim_a_CPPFLAGS = -I$(srcdir)/arc +%C%_libnsim_a_SOURCES = \ + arc/libcfunc.c \ + arc/nsim-syscalls.c \ + arc/sbrk.c + +multilibtool_LIBRARIES += %D%/libhl.a +%C%_libhl_a_CFLAGS = -mvolatile-di +%C%_libhl_a_CPPFLAGS = -I$(srcdir)/arc +%C%_libhl_a_SOURCES = \ + arc/arc-timer.c \ + arc/hl-stub.c \ + arc/hl-setup.c \ + arc/libcfunc.c \ + arc/sbrk.c \ + arc/hl/hl_gw.c \ + arc/hl/hl_api.c \ + arc/hl/hl_open.c \ + arc/hl/hl_close.c \ + arc/hl/hl_read.c \ + arc/hl/hl_write.c \ + arc/hl/hl_lseek.c \ + arc/hl/hl_unlink.c \ + arc/hl/hl_isatty.c \ + arc/hl/hl_clock.c \ + arc/hl/hl_gettimeofday.c \ + arc/hl/hl_argc.c \ + arc/hl/hl_argv.c \ + arc/hl/hl_exit.c + +multilibtool_DATA += \ + %D%/crt0.o \ + %D%/crtn.o \ + %D%/crti.o \ + arc/arc-main-helper.o \ + arc/nsim.specs \ + arc/hl.specs + +libobjs_a_SOURCES += \ + %D%/crt0.S \ + %D%/crtn.S \ + %D%/crti.S \ + arc/arc-main-helper.c diff --git a/libgloss/arc64/asm.h b/libgloss/arc64/asm.h new file mode 100644 index 000000000..b68c0b404 --- /dev/null +++ b/libgloss/arc64/asm.h @@ -0,0 +1,123 @@ +/* + Copyright (c) 2024, Synopsys, Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1) Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3) Neither the name of the Synopsys, Inc., nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef _ARC64_ASM_H +#define _ARC64_ASM_H + +#if defined (__ARC64_ARCH32__) + +/* Define 32-bit word. */ +.macro WORD w + .word \w +.endm + +/* Move register immediate (short): r:reg, i:immediate */ +.macro MOVRI_S r, i + mov_s \r, \i +.endm + +/* Move register immediate (short): r:reg, ri:reg/immediate */ +.macro MOVR_S r, ri + mov_s \r, \ri +.endm + +/* Move register immediate: r:reg, ri:reg/immediate */ +.macro MOVR r, ri + mov \r, \ri +.endm + +/* Push register: r:reg */ +.macro PUSHR r + push \r +.endm + +/* Pop register: r:reg */ +.macro POPR r + pop \r +.endm + +/* Subtract register: r1(reg), r2(reg), r3(reg) */ +.macro SUBR r1, r2, r3 + sub \r1, \r2, \r3 +.endm + +/* Add PCL-rel: r:reg, symb: symbol */ +.macro ADDPCL r,symb + add \r, pcl, \symb +.endm + +#elif defined (__ARC64_ARCH64__) + +/* Define 64-bit word. */ +.macro WORD w + .xword \w +.endm + +/* Move immediate (short): r:reg, i:immediate */ +.macro MOVRI_S r, i + movhl_s \r, \i + orl_s \r, \r, \i +.endm + +/* Move register immediate (short): r:reg, ri:reg/immediate */ +.macro MOVR_S r, ri + movl_s \r, \ri +.endm + +/* Move register immediate: r:reg, ri:reg/immediate */ +.macro MOVR r, ri + movl \r, \ri +.endm + +/* Push register: r:reg */ +.macro PUSHR r + pushl_s \r +.endm + +/* Pop register: r:reg */ +.macro POPR r + popl_s \r +.endm + +/* Subtract register: r1(reg), r2(reg), r3(reg) */ +.macro SUBR r1, r2, r3 + subl \r1, \r2, \r3 +.endm + +/* Add PCL-rel: r:reg, symb: symbol */ +.macro ADDPCL r, symb + addl \r, pcl, \symb@pcl +.endm + +#else /* !__ARC64_ARC32__ && !__ARC64_ARC64__ */ +# error Please use either 32-bit or 64-bit version of arc64 compiler +#endif + +#endif /* _ARC64_ASM_H */ diff --git a/libgloss/arc64/crt0.S b/libgloss/arc64/crt0.S new file mode 100644 index 000000000..c5fe57b0d --- /dev/null +++ b/libgloss/arc64/crt0.S @@ -0,0 +1,160 @@ +/* + Copyright (c) 2024, Synopsys, Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1) Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3) Neither the name of the Synopsys, Inc., nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +/* + The startup code for the ARC family of processors does the following before + transferring control to user defined main label: + 1. Set sp to __stack_top (link time variable) + 2. Zero out the bss section (for uninitialized globals) + After returning from main, the processor is halted and the pipeline is + flushed out. +*/ + +#include "asm.h" +#include "../arc/arc-symbols.h" + + .macro initfp from=0, to=31 + fmvi2s f0+\from,r0 + .if \to-\from + initfp "(\from+1)",\to + .endif + .endm + + .extern main + + .section .text.__startup, "ax", @progbits + .global __start + .type __start, @function + .align 4 +__start: + MOVRI_S sp, STACK_TOP ; Stack address + + ; Allow unaligned accesses. + lr r2, [0xA] + bset r2, r2, 19 + flag r2 + +;;; Clear the bss segment. + MOVRI_S r0, SMALL_DATA_BSS_START ; r0 = start of the bss section + MOVRI_S r2, SMALL_DATA_BSS_END + SUBR r2, r2, r0 ; r2 = size of the bss section in bytes + MOVR_S r1, 0 ; r1 = bytes to fill in + bl memset + +;;; Clear the registers. + MOVR_S r0, 0 + MOVR_S r1, 0 + MOVR_S r2, 0 + MOVR_S r3, 0 + MOVR_S r4, 0 + MOVR_S r5, 0 + MOVR_S r6, 0 + MOVR_S r7, 0 + MOVR_S r8, 0 + MOVR_S r9, 0 + MOVR_S r10, 0 + MOVR_S r11, 0 + MOVR_S r12, 0 + MOVR_S r13, 0 + MOVR_S r14, 0 + MOVR_S r15, 0 + MOVR_S r16, 0 + MOVR_S r17, 0 + MOVR_S r18, 0 + MOVR_S r19, 0 + MOVR_S r20, 0 + MOVR_S r21, 0 + MOVR_S r22, 0 + MOVR_S r23, 0 + MOVR_S r24, 0 + MOVR_S r25, 0 + MOVR_S r26, 0 + MOVR_S r27, 0 + ;; SP (r28) is initialized + MOVR_S ilink, 0 + MOVR r30, 0 + +#if defined(__ARC_FPU_DP__) || defined (__ARC_FPU_SP__) + initfp 0,31 +#endif + + ;; Call constructors. + jl _init + +;;; Setup fini routines to be called from exit + MOVRI_S r0, _fini + jl atexit + + jl __setup_argv_and_call_main + + j exit + .size __start, .-__start + +;;; arc-main-helper.o object can be used to replace this function +;;; and properly set up arguments and/or other low-level stuff. + .section .text.__setup_argv_and_call_main,"ax",@progbits + .weak __setup_argv_and_call_main + .type __setup_argv_and_call_main, @function + .align 4 + +__setup_argv_and_call_main: + PUSHR blink +;;; Call main() with argv[0] set to "baremetal", argv[1] to NULL and empty envp + MOVR_S r0, 1 ; Set argc to 1 + ADDPCL r1,.argv_data ; Set argv to the appropriate pointer + MOVR_S r2, 0 + MOVR_S r3, 0 + + MOVRI_S blink, main + jl [blink] + + POPR blink + j_s [blink] + + .align 4 + ;; Dummy program name +.prog_name: + .string "baremetal" +;; argv data +.argv_data: + WORD @.prog_name + WORD 0x00 + .size __setup_argv_and_call_main, .-__setup_argv_and_call_main + + .section .text._exit_halt,"ax",@progbits + .global _exit_halt + .type _exit_halt, @function + .align 4 +_exit_halt: + ; r0 contains exit code + mov r0, r0 + flag 1 + b _exit_halt + .size _exit_halt, .-_exit_halt diff --git a/libgloss/arc64/crti.S b/libgloss/arc64/crti.S new file mode 100644 index 000000000..4921cdf66 --- /dev/null +++ b/libgloss/arc64/crti.S @@ -0,0 +1,50 @@ +/* + Copyright (c) 2024, Synopsys, Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1) Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3) Neither the name of the Synopsys, Inc., nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +/* + This file contains the stack frame setup for contents of the .fini and + .init sections. +*/ + +#include "asm.h" + + .section .init + .global _init + WORD 0 + .type _init,@function +_init: + PUSHR blink + + .section .fini + .global _fini + WORD 0 + .type _fini,@function +_fini: + PUSHR blink diff --git a/libgloss/arc64/crtn.S b/libgloss/arc64/crtn.S new file mode 100644 index 000000000..baa31d4d2 --- /dev/null +++ b/libgloss/arc64/crtn.S @@ -0,0 +1,45 @@ +/* + Copyright (c) 2024, Synopsys, Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1) Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3) Neither the name of the Synopsys, Inc., nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +/* + This file just makes sure that the .fini and .init sections do in + fact return. This file is the last thing linked into any executable. +*/ + +#include "asm.h" + + .section .init + POPR blink + j_s [blink] + + + .section .fini + POPR blink + j_s [blink] diff --git a/libgloss/configure.ac b/libgloss/configure.ac index be7cb1212..628f173d6 100644 --- a/libgloss/configure.ac +++ b/libgloss/configure.ac @@ -51,9 +51,12 @@ case "${target}" in config_aarch64=true config_testsuite=true ;; - arc*-*-*) + arc-*-* | arceb-*-*) config_arc=true ;; + arc[[6432]]*-*-*) + config_arc64=true + ;; csky*-*-*) config_csky=true ;; @@ -217,7 +220,7 @@ AC_SUBST(subdirs) dnl These subdirs have converted to non-recursive make. Hopefully someday all dnl the ports above will too! m4_foreach_w([SUBDIR], [ - aarch64 arc arm bfin cr16 csky d30v epiphany fr30 frv ft32 i386 i960 iq2000 + aarch64 arc arc64 arm bfin cr16 csky d30v epiphany fr30 frv ft32 i386 i960 iq2000 libnosys lm32 m32r mcore microblaze mn10200 mn10300 moxie msp430 nds32 nios2 or1k pru riscv rl78 rx sparc sparc_leon tic6x v850 visium diff --git a/libgloss/libnosys/acinclude.m4 b/libgloss/libnosys/acinclude.m4 index df7433e70..abb456782 100644 --- a/libgloss/libnosys/acinclude.m4 +++ b/libgloss/libnosys/acinclude.m4 @@ -3,7 +3,7 @@ case "${target}" in *-*-cygwin*) ;; a29k-amd-udi) ;; aarch64*-*-*) ;; - arc-*-*) ;; + arc*-*-*) ;; arm*-*-*) ;; bfin-*-*) ;; cris-*-* | crisv32-*-*) ;; diff --git a/newlib/configure.host b/newlib/configure.host index 386183466..a8a73f9bf 100644 --- a/newlib/configure.host +++ b/newlib/configure.host @@ -122,9 +122,12 @@ case "${host_cpu}" in libm_machine_dir=amdgcn newlib_cv_initfinit_array=yes ;; - arc*) + arc | arceb) machine_dir=arc ;; + arc64 | arc32) + machine_dir=arc64 + ;; arm*) machine_dir=arm libm_machine_dir=arm diff --git a/newlib/libc/acinclude.m4 b/newlib/libc/acinclude.m4 index 24148b13f..48a7dee51 100644 --- a/newlib/libc/acinclude.m4 +++ b/newlib/libc/acinclude.m4 @@ -42,7 +42,7 @@ m4_include([libc/machine/spu/acinclude.m4]) m4_include([libc/machine/xtensa/acinclude.m4]) m4_foreach_w([MACHINE], [ - aarch64 amdgcn arc arm + aarch64 amdgcn arc arc64 arm bfin cr16 cris crx csky d10v d30v diff --git a/newlib/libc/include/machine/ieeefp.h b/newlib/libc/include/machine/ieeefp.h index ede75e1aa..f99577bea 100644 --- a/newlib/libc/include/machine/ieeefp.h +++ b/newlib/libc/include/machine/ieeefp.h @@ -337,6 +337,10 @@ #endif #endif +#ifdef __ARC64__ +#define __IEEE_LITTLE_ENDIAN +#endif + #ifdef __CRX__ #define __IEEE_LITTLE_ENDIAN #endif diff --git a/newlib/libc/include/machine/setjmp.h b/newlib/libc/include/machine/setjmp.h index 515e84c6d..102582c8e 100644 --- a/newlib/libc/include/machine/setjmp.h +++ b/newlib/libc/include/machine/setjmp.h @@ -284,6 +284,16 @@ _BEGIN_STD_C #define _JBLEN 25 /* r13-r30,blink,lp_count,lp_start,lp_end,status32,r58,r59 */ #endif +#ifdef __ARC64__ +/* r14-r27,sp,ilink,r30,blink */ +#define _JBLEN 18 +#ifdef __ARC64_ARCH64__ +#define _JBTYPE long long +#else /* __ARC64_ARCH32__ */ +#define _JBTYPE long +#endif +#endif /* __ARC64__ */ + #ifdef __MMIX__ /* Using a layout compatible with GCC's built-in. */ #define _JBLEN 5 diff --git a/newlib/libc/machine/Makefile.inc b/newlib/libc/machine/Makefile.inc index a53cf9c10..162be4f66 100644 --- a/newlib/libc/machine/Makefile.inc +++ b/newlib/libc/machine/Makefile.inc @@ -7,6 +7,9 @@ endif if HAVE_LIBC_MACHINE_ARC include %D%/arc/Makefile.inc endif +if HAVE_LIBC_MACHINE_ARC64 +include %D%/arc64/Makefile.inc +endif if HAVE_LIBC_MACHINE_ARM include %D%/arm/Makefile.inc endif diff --git a/newlib/libc/machine/arc64/Makefile.inc b/newlib/libc/machine/arc64/Makefile.inc new file mode 100644 index 000000000..65158573e --- /dev/null +++ b/newlib/libc/machine/arc64/Makefile.inc @@ -0,0 +1,13 @@ +libc_a_SOURCES += \ + %D%/memcmp.S \ + %D%/memcmp-stub.c \ + %D%/memcpy.S \ + %D%/memcpy-stub.c \ + %D%/memset.S \ + %D%/memset-stub.c \ + %D%/setjmp.S \ + %D%/strlen.S \ + %D%/strcat.S \ + %D%/memmove.S\ + %D%/strcmp.S \ + %D%/memchr.S diff --git a/newlib/libc/machine/arc64/memchr.S b/newlib/libc/machine/arc64/memchr.S new file mode 100644 index 000000000..3545c3720 --- /dev/null +++ b/newlib/libc/machine/arc64/memchr.S @@ -0,0 +1,371 @@ +/* + Copyright (c) 2024, Synopsys, Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1) Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3) Neither the name of the Synopsys, Inc., nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +; r0 void* ptr +; r1 int ch +; r2 size_t count + +#if defined (__ARC64_ARCH32__) + +ENTRY (memchr) + LSRP.f 0, r2, 4 ; counter for 16-byte chunks + beq.d @.L_start_1_byte_search + + ; Filter for 1 byte + bmsk r1, r1, 7 + lsl8 r9, r1 + + or r9, r9, r1 + vpack2hl r1, r9, r9 + + ; r1 is now setup with the special 4 byte repetition of the target byte + ; We use r1 because we dont have any more registers free inside the main loop + ; r9 can be repurposed + mov r8, NULL_32DT_1 + ror r9, r8 + + xor r3, r3, r3 + +.L_search_16_bytes: + +#if defined (__ARC64_LL64__) + + ldd.ab r4r5, [r0, +8] + ldd.ab r6r7, [r0, +8] + +#else + + ld.ab r4, [r0, +4] + ld.ab r5, [r0, +4] + ld.ab r6, [r0, +4] + ld.ab r7, [r0, +4] + +#endif + + xor r4, r4, r1 + xor r5, r5, r1 + xor r6, r6, r1 + xor r7, r7, r1 + + sub r10, r4, r8 + sub r11, r5, r8 + sub r12, r6, r8 + sub r13, r7, r8 + + bic r10, r10, r4 + bic r11, r11, r5 + bic r12, r12, r6 + bic r13, r13, r7 + + tst r10, r9 + bset.ne r3, r3, 4 + + tst r11, r9 + bset.ne r3, r3, 3 + + tst r12, r9 + bset.ne r3, r3, 2 + + tst r13, r9 + bset.ne r3, r3, 1 + + ; Break if found + brne.d r3, 0, @.L_found_in_16B + + ; Keep going we have more 16 byte chunks + sub r2, r2, 16 + + brge r2, 16, @.L_search_16_bytes + + ; Reset byte repetition of r1 to 1 single byte + bmsk r1, r1, 7 + +.L_start_1_byte_search: + ; Check if r2 is 0 + breq.d r2, 0, @.L_byte_not_found + ldb.ab r10, [r0, +1] + +.L_search_1_byte: + + breq r10, r1, @.L_found_byte + + dbnz.d r2, @.L_search_1_byte + ldb.ab r10, [r0, +1] + +; Byte not found +.L_byte_not_found: + j.d [blink] + MOVP r0, 0 + +.L_found_byte: + j_s.d [blink] + SUBP r0, r0, 1 + +.L_found_in_16B: + + fls r5, r3 ; [2] + +; Select appropriate register to analyze [4] + mov r2, r13 + +; Point r13 to first NULL byte containing double word [3] + sub2 r0, r0, r5 + + + asr.f r3, r3, 3 + mov.c r2, r12 + + asr.f r3, r3, 1 + mov.c r2, r11 + + asr.f r3, r3, 1 + mov.c r2, r10 + + and r2, r2, r9 ; [5] + + ffs r2, r2 ; [6] + + xbfu r2, r2, 0b0111000011 ; [7] + + j.d [blink] + add r0, r0, r2 ; [8] + +ENDFUNC (memchr) + +#else + +ENTRY (memchr) + lsrl.f 0, r2, 5 ; counter for 32-byte chunks + beq.d @.L_start_1_byte_search + + ; Filter for 1 byte + bmsk r1, r1, 7 + lsl8 r9, r1 + + or r9, r9, r1 + + vpack2hl r1, r9, r9 + vpack2wl r1, r1, r1 + + ; r1 is now setup with the special 4 byte repetition of the target byte + ; We use r1 because we dont have any more registers free inside the main loop + ; r9 can be repurposed + vpack2wl r8, NULL_32DT_1, NULL_32DT_1 + asll r9, r8, 7 + + xorl r3, r3, r3 + +.L_search_32_bytes: + +; Using 128-bit memory operations +#if defined (__ARC64_M128__) + + lddl.ab r4r5, [r0, +16] + lddl.ab r6r7, [r0, +16] + +; The 64-bit crunching implementation. +#elif defined (__ARC64_ARCH64__) + + ldl.ab r4, [r0, +8] + ldl.ab r5, [r0, +8] + ldl.ab r6, [r0, +8] + ldl.ab r7, [r0, +8] + +#else + # error Unknown configuration +#endif + + xorl r4, r4, r1 + xorl r5, r5, r1 + xorl r6, r6, r1 + xorl r7, r7, r1 + + subl r10, r4, r8 + subl r11, r5, r8 + subl r12, r6, r8 + subl r13, r7, r8 + + bicl r10, r10, r4 + bicl r11, r11, r5 + bicl r12, r12, r6 + bicl r13, r13, r7 + + tstl r10, r9 + bset.ne r3, r3, 4 + + tstl r11, r9 + bset.ne r3, r3, 3 + + tstl r12, r9 + bset.ne r3, r3, 2 + + tstl r13, r9 + bset.ne r3, r3, 1 + + ; Break if found + brne.d r3, 0, @.L_found_in_32B + + ; Keep going we have more 16 byte chunks + subl r2, r2, 32 + brge r2, 32, @.L_search_32_bytes + + ; Reset byte repetition of r1 to 1 single byte + bmskl r1, r1, 7 + +.L_start_1_byte_search: + ; Check if r2 is 0 + breq.d r2, 0, @.L_byte_not_found + ldb.ab r10, [r0, +1] + +.L_search_1_byte: + + breq r10, r1, @.L_found_byte + + dbnz.d r2, @.L_search_1_byte + ldb.ab r10, [r0, +1] + +; Byte not found +.L_byte_not_found: + j.d [blink] + movl r0, 0 + +.L_found_byte: + j_s.d [blink] + subl r0, r0, 1 + +.L_found_in_32B: + + fls r5, r3 ; [2] + +; Select appropriate register to analyze [4] + movl r2, r13 + +; Point r13 to first NULL byte containing double word [3] + sub3l r0, r0, r5 + + asr.f r3, r3, 3 + movl.c r2, r12 + + asr.f r3, r3, 1 + movl.c r2, r11 + + asr.f r3, r3, 1 + movl.c r2, r10 + + andl r2, r2, r9 ; [5] + + ffsl r2, r2 ; [6] + + xbful r2, r2, 0b0111000011 ; [7] + + j.d [blink] + addl r0, r0, r2 ; [8] + +ENDFUNC (memchr) +#endif + +;; This code uses a common technique for NULL byte detection inside a word. +;; Details on this technique can be found in: +;; (https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord) +; +; In sum, this technique allows for detecting a NULL byte inside any given +; amount of bits by performing the following operation +; DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080) [0] +; +; The code above implements this by setting r8 to a 0x01010101... sequence and +; r9 to a 0x80808080... sequence of appropriate length +; As LIMM are 32 bit only, we need to perform MOVHL and ORL [1] operations to +; have the appropriate 64 bit values in place +; +; As we want a specific byte and not a NULL byte, we create in r1 a constant +; that is made up of the target byte, on each byte position, that we xor with +; the loaded data to force a NULL byte only if the target byte is present. +; After that we can use the technique directly +; +;; Search is done 32 bytes at a time, either with 64 bit loads or 128 bit loads +;; If the target byte is detected, the position of the double word is encoded +;; in r3, which is eventually used to adjust r0 +; +; r3 is set via bset, which means we can simply use a fls to obtain the first +; match (or ffs depending on the values in bset) [2]. +; The reason for starting at 1 and not 0 is so r3 encodes how many double +; words to go back, and it wouldnt make sense to go back 0 (the byte would be +; in the next loop iteration). +; +; The first step to take is point r0 to the appropriate double word. +; As the chosen encoded information is how many double words to go back, +; we can simply multiply r3 by 8 and reduce r0 by that amount [3] +; +; Then, we need to place the loaded double word containing the first target byte +; found, into a "common" register we can operate on later [4]. +; +; To do this without any jumps, we can shift r3 and perform a conditional mov +; based on the carry flag value. +; The order is very important because the byte can appear in several double +; words, so we want to analyze from last to first. +; +; We can ignore the first asr (which would be asr.f 2, as we started r3 on 1) +; because if r13 isnt the target byte, r2 will always be overwritten so we can +; just decide to start at r7, and overwrite it if needed. +; +; Now comes the tricky part. In order to obtain the first target byte, we need +; to understand the NULL byte detection operation. It is explained in depth in +; the link above but in short, it works by first setting the highest bit of each +; byte to 1, if the corresponding byte is either 0 or more than 0x80 +; Then, separately, it makes the highest bit of each byte 1, if the byte is +; less than 0x80. The last step is to AND these two values (this operation is +; simplified with the SUB, BIC and TST instructions). +; +; This means that the evaluated equation result value [5] has zeros for all non +; zero bytes, except for the NULL bytes (which are the target bytes after the +; xor). Therefore, we can simply find the first non zero bit (counting from bit +; 0) which will be inside the position of the first NULL byte. +; +; One thing to note, is that ffs oddly returns 31 if no bit is found, setting +; the zero flag. As r9 is never all 0s at this stage (would mean there is no +; NULL byte and we wouldnt be here) we dont need to worry about that. [6] +; +; We can then convert the bit position into the last byte position by looking +; into bits 3 to 5, and shifting 3 bits to the right. This can be combined into +; a single xbful operation. The bottom 000011 represent shift by 3 and the top +; 0111 represents the mask (3 to 5 shifted by 3 is 0 to 2). We dont need to +; worry about the case where ffs does not find a bit, because we know for sure +; there is at least one NULL byte, and therefore one of the highest bits is set +; to 1 [7] +; +; Finally, we can add the NULL/target byte position inside the loaded double +; word to r0 to obtain the bytes absolute position [8] +; +; +; Some operations are re-ordered such that register dependency is reduced, +; allowing the CPU to run more instructions in parallel +; diff --git a/newlib/libc/machine/arc64/memcmp-stub.c b/newlib/libc/machine/arc64/memcmp-stub.c new file mode 100644 index 000000000..3fa06d57d --- /dev/null +++ b/newlib/libc/machine/arc64/memcmp-stub.c @@ -0,0 +1,35 @@ +/* + Copyright (c) 2024, Synopsys, Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1) Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3) Neither the name of the Synopsys, Inc., nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +#if defined (__ARC64_ARCH32__) +# include "../../string/memcmp.c" +#else +/* See memcpy.S. */ +#endif diff --git a/newlib/libc/machine/arc64/memcmp.S b/newlib/libc/machine/arc64/memcmp.S new file mode 100644 index 000000000..5defd0cbd --- /dev/null +++ b/newlib/libc/machine/arc64/memcmp.S @@ -0,0 +1,269 @@ +/* + Copyright (c) 2024, Synopsys, Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1) Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3) Neither the name of the Synopsys, Inc., nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#if defined (__ARC64_ARCH64__) + +; R0: lhs +; R1: rhs +; R2: count +; ret (R0): +; - lhs < rhs: <0 +; - lhs = rhs: 0 +; - lhs > rhs: >0 +ENTRY (memcmp) + cmpl r2, 64 + bls.d @.L_compare_1_bytes + movl r3, r0 ; "r0" will be used as return value + ; If one is curious why the code below looks like the way it does, + ; there is a documentation at the end of this file. + lsrl r12, r2, 5 ; counter for 32-byte chunks + xor r13, r13, r13 ; the mask showing inequal registers + ldl.ab r4, [r3, +8] + ldl.ab r5, [r1, +8] +.L_compare_32_bytes: + ldl.ab r6, [r3, +8] + ldl.ab r7, [r1, +8] + ldl.ab r8, [r3, +8] + ldl.ab r9, [r1, +8] + ldl.ab r10, [r3, +8] + ldl.ab r11, [r1, +8] + xorl.f 0, r4, r5 + xor.ne r13, r13, 0b0001 + xorl.f 0, r6, r7 + xor.ne r13, r13, 0b0010 + xorl.f 0, r8, r9 + xor.ne r13, r13, 0b0100 + xorl.f 0, r10, r11 + xor.ne r13, r13, 0b1000 + brne r13, 0, @.L_unequal_find + ldl.ab r4, [r3, +8] + dbnz.d r12, @.L_compare_32_bytes + ldl.ab r5, [r1, +8] + ; Adjusting the pointers because of the extra loads in the end + subl r1, r1, 8 + subl r3, r3, 8 + bmsk_s r2, r2, 4 ; any remaining bytes to compare +.L_compare_1_bytes: + cmp r2, 0 + jeq.d [blink] + xor_s r0, r0, r0 + ldb.ab r4, [r3, +1] + ldb.ab r5, [r1, +1] +2: + sub.f r0, r4, r5 + jne.d [blink] + ldb.ab r4, [r3, +1] + dbnz.d r2, @2b + ldb.ab r5, [r1, +1] ; this load may read beyond the "count". + j_s [blink] +; At this point, we want to find the _first_ comparison that marked the +; inequality of "lhs" and "rhs". The rest acts like a multiplexer: +; +; if r4 was not equal to r5 --> r1=r4, r2=r5 +; if r6 was not equal to r7 --> r1=r6, r2=r7 +; if r8 was not equal to r9 --> r1=r8, r2=r9 +; if r10 was not equal to r11 --> r1=r10, r2=r11 +; find_different_byte(r1, r2) +; +; About the "bi [n]" (branch index) instruction: This instruction alters +; next PC (program counter): +; +; next_pc = current_pc + n*4 n*4 is the same as n<<2 +; +; In other words, it tells the processor to execute the n'th instruction +; from where we are (assuming all the next instructions are 4 bytes long). +; +; We used this to our benefit. We made each "case" (unequal_r4r5, +; unequal_r5r6, ...) 16 bytes long (power of 2) and fed "bi" an index +; that is already multiplied by 4 (asl r13, r13, 2). This translates +; into "bi [n]" jumping to 16-bytes slots. The last slot we did not +; make 16 bytes long with "nop" because we don't need to address after +; it. +.L_unequal_find: + ffs r13, r13 + asl r13, r13, 2 + bi [r13] +.L_unequal_r4r5: + movl r1, r4 + b.d @.L_diff_byte_in_regs + movl r2, r5 + nop +.L_unequal_r6r7: + movl r1, r6 + b.d @.L_diff_byte_in_regs + movl r2, r7 + nop +.L_unequal_r8r9: + movl r1, r8 + b.d @.L_diff_byte_in_regs + movl r2, r9 + nop +.L_unequal_r10r11: + movl r1, r10 + movl r2, r11 + ; fall-through +; If we're here, that means the two operands are not equal. +; 1) First we have to get a mask of their inequality through "xor" +; 2) Then, find the first bit position that they're different: "ffs" +; 3) Depending on the bit position, we want the whole byte containing +; that bit, in both operands, to become the very first byte (least +; significant byte), so that we can subtract one from another. +; Below is an illustration of bit positions and how much we should +; shift the numbers right: +; bit position range : (in binary) | shift right by : (in binary) +; -------------------+-------------------+----------------+------------ +; [ 0, 7] : (000000 - 000111) | lsr 0 : 000000 +; [ 8,15] : (001000 - 001111) | lsr 8 : 001000 +; [16,23] : (010000 - 010111) | lsr 16 : 010000 +; [24,31] : (011000 - 011111) | lsr 24 : 011000 +; ... : ... | ... : ... +; [56,63] : (111000 - 111111) | lsr 56 : 111000 +; We need to ignore the least 3 bits of "position" to get "shift right" +; amount: "and 0x38, ..." +; 4) When the bytes are positioned at byte #0, mask out the rest of the +; bytes and subtract the two operands: lhs - rhs +.L_diff_byte_in_regs: + xorl r0, r1, r2 ; (1) + ffsl r0, r0 ; (2) + and r0, r0, 0x38 ; (3) + lsrl r1, r1, r0 ; (3) + lsrl r2, r2, r0 ; (3) + bmsk_s r1, r1, 7 ; (4) + bmsk_s r2, r2, 7 ; (4) + j_s.d [blink] + subl r0, r1, r2 ; (4) +ENDFUNC (memcmp) + +; __ARC64_ARCH64__ +#endif + +; The loop at the heart of the "memcmp" function follows some specific +; logic and has gone through a few optimisation filters. Knowing them +; will help understand the code better. +; +; The comparison logic +; -------------------- +; In each loop, we compare 32 bytes of data from "lhs" and "rhs". Those +; comparisons takes place by using 8 sets of registers: +; +; r4 == r5 xor.f 0, r4, r5 lhs[i+0] == rhs[i+0] +; r6 == r7 xor.f 0, r6, r7 lhs[i+8] == rhs[i+8] +; r8 == r9 xor.f 0, r8, r9 lhs[i+16] == rhs[i+16] +; r10 == r11 xor.f 0, r10, r11 lhs[i+24] == rhs[i+32] +; +; The idea is to set a corresponding bit in r13 register for each +; comparison that fails. The relation between the bits and the +; comparisons are: +; +; r13[0..63] = 0 +; r13[0] = 1 if r4 != r5 +; r13[1] = 1 if r6 != r7 +; r13[2] = 1 if r8 != r9 +; r13[3] = 1 if r10 != r11 +; +; If r13 remains 0, the next possible iteration of the loop begins. +; If it is not 0 anymore, the algorithm will be interested in the +; lowest bit that is set to 1. That is achieved by the "ffs" +; (find first set) instruction. +; +; The loop transformation +; ----------------------- +; 1) At first, the loop looks like below: +; +; .loop +; ldl.ab r4, [r3, +8] +; ldl.ab r5, [r1, +8] +; ... +; ldl.ab r10, [r3, +8] +; ldl.ab r11, [r1, +8] +; xorl.f 0, r4, r5 +; xor.ne r13, r13, 0b0001 +; ... +; xorl.f 0, r10, r11 +; xor.ne r13, r13, 0b1000 +; brne r13, 0, @.unequal_find +; dbnz r12, @.loop +; +; 2) "dbnz" instruction has a delay slot. To make the code more +; efficient, we can bring the first 2 instructions of the loop +; to the end (they will be executed just before the next iteration +; begins). To make the logic of the program sound, those 2 +; instructions need to be duplicated before the loop start as well: +; +; ldl.ab r4, [r3, +8] +; ldl.ab r5, [r1, +8] +; .loop +; ldl.ab r6, [r3, +8] +; ldl.ab r7, [r1, +8] +; ... +; ldl.ab r10, [r3, +8] +; ldl.ab r11, [r1, +8] +; xorl.f 0, r4, r5 +; xor.ne r13, r13, 0b0001 +; ... +; xorl.f 0, r10, r11 +; xor.ne r13, r13, 0b1000 +; brne r13, 0, @.unequal_find +; ldl.ab r4, [r3, +8] +; dbnz.d r12, @.loop +; ldl.ab r5, [r1, +8] +; +; There is one more loose end to take care of: At the last iteration +; of the loop, there is an extra load into r4 and r5 registers while +; incrementing the pointers (r3 and r1). We have to correct for that +; after the loop: +; +; .loop: +; .. +; brne r13, 0, @.unequal_find +; ldl.ab r4, [r3, +8] +; dbnz.d r12, @.loop +; ldl.ab r5, [r1, +8] +; subl r1, r1, 8 +; subl r3, r3, 8 +; +; One last remark about NOT filling the delay slot of "brne" with +; "ldl.ab r4, ...". If the branch is taken, the rest of code that +; is responsible for finding the differentiating bytes relies that +; all 8 registers hold the comparison data of the loop. Putting +; "ldl.ab r4, ..." into the delay slot of "brne ..." would clobber +; the "r4" register: +; +; .loop: +; .. +; brne.d r13, 0, @.unequal_find --> this branch might be taken +; ldl.ab r4, [r3, +8] --> clobbers r4 +; dbnz.d r12, @.loop +; ldl.ab r5, [r1, +8] +; +; Having "ldl.ab r4, ..." between "brne" and "dbnz" as two control flow +; altering instructions is good enough. diff --git a/newlib/libc/machine/arc64/memcpy-stub.c b/newlib/libc/machine/arc64/memcpy-stub.c new file mode 100644 index 000000000..06d7305d3 --- /dev/null +++ b/newlib/libc/machine/arc64/memcpy-stub.c @@ -0,0 +1,35 @@ +/* + Copyright (c) 2024, Synopsys, Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1) Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3) Neither the name of the Synopsys, Inc., nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +#if !defined (__ARC64_ARCH32__) && !defined (__ARC64_ARCH64__) +# include "../../string/memcpy.c" +#else +/* See memcpy.S. */ +#endif diff --git a/newlib/libc/machine/arc64/memcpy.S b/newlib/libc/machine/arc64/memcpy.S new file mode 100644 index 000000000..77cf307a7 --- /dev/null +++ b/newlib/libc/machine/arc64/memcpy.S @@ -0,0 +1,236 @@ +/* + Copyright (c) 2024, Synopsys, Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1) Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3) Neither the name of the Synopsys, Inc., nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +; This file contains variants of the same function with different +; instructions. The generic one, the implementation that comes the +; last after the #else macro, is the most commented. + +; Using 128-bit memory operations +#if defined (__ARC64_M128__) + +ENTRY (memcpy) + lsrl.f r12, r2, 6 ; Check size < 64bytes + beq.d @.L_write_1_bytes + movl r3, r0 +.L_write_64_bytes: + lddl.ab r4r5, [r1, +16] + lddl.ab r6r7, [r1, +16] + lddl.ab r8r9, [r1, +16] + lddl.ab r10r11, [r1, +16] + stdl.ab r4r5, [r3, +16] + stdl.ab r6r7, [r3, +16] + stdl.ab r8r9, [r3, +16] + dbnz.d r12, @.L_write_64_bytes + stdl.ab r10r11, [r3, +16] +.L_write_1_bytes: + ;; Handle anything between 15bytes < size < 64bytes + ;; The algorithm has two phases: + ;; - copy 16, 32, or 48 bytes of data using 128bit ops + ;; - copy the remaining 15 bytes of data using a single stdl/lddl pair + bmsk.f r2, r2, 5 ; Check size == 0 + jeq.d [blink] + lsr.f r12, r2, 4 ; Check size < 16bytes + beq.d @1f + xor r12, r12, 3 + ;; R12 can be 3,2, or 1, which are indicating how much data we should + ;; copy: 3 -> 48bytes, 2 -> 32bytes, 1 -> 16bytes. + ;; Zero case shouldn't happen as we check for it above. + ;; Then I use the BI instructions to implement the following code + ;; switch ($R12) + ;; case 3: + ;; lddl RA, ... + ;; stdl RA, ... + ;; case 2: + ;; lddl RA, ... + ;; stdl RA, ... + ;; case 1: + ;; lddl RA, ... + ;; stdl RA, ... + ;; case 0: + ;; break + ;; N.B the BI instruction works the other way than I expected, namely + ;; BI's entry 0 is the closest to instruction, hence I need to bit + ;; invert R12 to get the desired behaviour (done by above XOR). + asl r12,r12,1 + bi [r12] + lddl.ab r4r5, [r1, +16] + stdl.ab r4r5, [r3, +16] + lddl.ab r6r7, [r1, +16] + stdl.ab r6r7, [r3, +16] + lddl.ab r8r9, [r1, +16] + stdl.ab r8r9, [r3, +16] + bmsk.f r2, r2, 3 ; Check size == 0 + jeq.d [blink] + subl r2, r2, 16 + ;; We are still having 15 bytes top to transfer, exactly like in the + ;; case of below byte-by-byte transfer. However, we already transfered + ;; at least 16bytes before, thus, we can create a new 16byte load which + ;; re-reads parts of the already transfer data AND the remaining up to + ;; 15 bytes of data still to be transfered. + ;; The position of the window is controlled by the $r12 which is the + ;; complement of the number of remaining bytes. + addl r3, r3, r2 + lddl r4r5, [r1, r2] + j_s.d [blink] + stdl r4r5, [r3] +1: + ;; Anything size < 16 we go byte by byte. + ldb.ab r4, [r1, +1] + dbnz.d r2, @1b + stb.ab r4, [r3, +1] + j_s [blink] +ENDFUNC (memcpy) + +; The 64-bit crunching implementation. +#elif defined (__ARC64_ARCH64__) \ + || (defined (__ARC64_ARCH32__) && defined (__ARC64_LL64__)) + +; R0: dest +; R1: source +; R2: count +; ret (R0): dest +; clobber: r1, r3, r4r5, r6r7, r8r9, r10r11, r12 +ENTRY (memcpy) + LSRP.f r12, r2, 5 ; counter for 32-byte chunks + beq.d @.L_write_31_bytes + MOVP r3, r0 ; do not clobber the "dest" +.L_write_32_bytes: ; Take care of 32 byte chunks + LD64.ab r4, [r1, +8] + LD64.ab r6, [r1, +8] + LD64.ab r8, [r1, +8] + LD64.ab r10,[r1, +8] + ST64.ab r4, [r3, +8] + ST64.ab r6, [r3, +8] + ST64.ab r8, [r3, +8] + dbnz.d r12, @.L_write_32_bytes + ST64.ab r10, [r3, +8] ; Shove store in delay slot + bmsk_s r2, r2, 4 ; From now on, we only care for the remainder % 32 + + +; The remainder bits indicating how many more bytes to copy +; .------------------------. +; | b4 | b3 | b2 | b1 | b0 | +; `------------------------' +; 16 8 4 2 1 +.L_write_31_bytes: + bbit0.d r2, 2, @1f ; is b2 set? then copy 4 bytes + lsr r12, r2, 3 ; see the notes below + ld.ab r4, [r1, 4] + st.ab r4, [r3, 4] +1: + bbit0.d r2, 1, @1f ; is b1 set? then copy 2 bytes + xor r12, r12, 3 + ldh.ab r4, [r1, 2] + sth.ab r4, [r3, 2] +1: + bbit0.d r2, 0, @1f ; is b0 set? then copy 1 byte + asl r12, r12, 1 + ldb.ab r4, [r1, 1] + stb.ab r4, [r3, 1] + +; Interpreting bits (b4,b3) [1] and how they correlate to branch index: +; +; (b4,b3) | bytes to copy | branch index +; --------+---------------+------------- +; 00b | 0 | 3 (11b) +; 01b | 8 | 2 (10b) +; 10b | 16 | 1 (01b) +; 11b | 24 | 0 (00b) +; +; To go from (b4,b3) to branch index, the bits must be flipped. +; In other words, they must be XORed with 11b [2]. +; +; Last but not least, "bi" jumps at boundaries of 4. We need to double +; the index to jump 8 bytes [3]. +; +; Hence, the 3 operations for calculating the branch index that are spread +; in "bbit0" delay slots: +; +; lsr r12, r2, 3 [1] +; xor r12, r12, 3 [2] +; asl r12, r12, 1 [3] +1: + bi [r12] + LD64.ab r4, [r1, 8] + ST64.ab r4, [r3, 8] + LD64.ab r4, [r1, 8] + ST64.ab r4, [r3, 8] + LD64.ab r4, [r1, 8] + ST64.ab r4, [r3, 8] + + j_s [blink] +ENDFUNC (memcpy) + +#elif defined (__ARC64_ARCH32__) + +ENTRY (memcpy) + lsr.f r11, r2, 4 ; counter for 16-byte chunks + beq.d @.L_write_15_bytes + mov r3, r0 ; work on a copy of "r0" +.L_write_16_bytes: + ld.ab r4, [r1, 4] + ld.ab r5, [r1, 4] + ld.ab r6, [r1, 4] + ld.ab r7, [r1, 4] + st.ab r4, [r3, 4] + st.ab r5, [r3, 4] + st.ab r6, [r3, 4] + dbnz.d r11, @.L_write_16_bytes + st.ab r7, [r3, 4] + bmsk_s r2, r2, 3 + +.L_write_15_bytes: + bbit0.d r2, 1, @1f + lsr r11, r2, 2 + ldh.ab r4, [r1, 2] + sth.ab r4, [r3, 2] +1: + bbit0.d r2, 0, @1f + xor r11, r11, 3 + ldb.ab r4, [r1, 1] + stb.ab r4, [r3, 1] +1: + asl r11, r11, 1 + bi [r11] + ld.ab r4,[r1, 4] + st.ab r4,[r3, 4] + ld.ab r4,[r1, 4] + st.ab r4,[r3, 4] + ld r4,[r1] + st r4,[r3] + + j_s [blink] +ENDFUNC (memcpy) + +#else +# error Unknown configuration +#endif diff --git a/newlib/libc/machine/arc64/memmove.S b/newlib/libc/machine/arc64/memmove.S new file mode 100644 index 000000000..5458dd012 --- /dev/null +++ b/newlib/libc/machine/arc64/memmove.S @@ -0,0 +1,312 @@ +/* + Copyright (c) 2024, Synopsys, Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1) Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3) Neither the name of the Synopsys, Inc., nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +; r0 void* dest +; r1 const void* src +; r2 size_t count + +; The 64-bit crunching implementation. + +#if defined (__ARC64_ARCH32__) && !defined(__ARC64_LL64__) + +ENTRY (memmove) + +; If the destination is greater than the source + cmp r0, r1 + ADDP r4, r1, r2 +; or if the source plus count is smaller than the destination + cmp.eq r4, r0 + +; We can safely perform a normal memcpy. Otherwise, we need to perform it +; backwards + blo.d @.L_normal_memcpy + lsr.f r11, r2, 4 ; counter for 16-byte chunks + + ADDP r3, r0, r2 + +; Backwards search +; The only thing that changes between memcpy and memmove is copy direction +; in case the dest and src address memory locations overlap +; More detailed information is in the forwards copy and at the end of +; this document + + ADDP r1, r1, r2 + bmsk_s r2, r2, 3 + + bbit0.d r2, 1, @1f + lsr r5, r2, 2 + ldh.aw r4, [r1, -2] + sth.aw r4, [r3, -2] +1: + bbit0.d r2, 0, @1f + xor r5, r5, 3 + ldb.aw r4, [r1, -1] + stb.aw r4, [r3, -1] +1: + asl r5, r5, 1 + bi [r5] + ld.aw r4,[r1, -4] + st.aw r4,[r3, -4] + ld.aw r4,[r1, -4] + st.aw r4,[r3, -4] + ld.aw r4,[r1, -4] + st.aw r4,[r3, -4] + +; Return if there are no 16 byte chunks + jeq [blink] + +.L_write_backwards_16_bytes: + ld.aw r4, [r1, -4] + ld.aw r5, [r1, -4] + ld.aw r6, [r1, -4] + ld.aw r7, [r1, -4] + st.aw r4, [r3, -4] + st.aw r5, [r3, -4] + st.aw r6, [r3, -4] + dbnz.d r11, @.L_write_backwards_16_bytes + st.aw r7, [r3, -4] + + j_s [blink] + +.L_normal_memcpy: + beq.d @.L_write_forwards_15_bytes + mov r3, r0 ; work on a copy of "r0" + +.L_write_forwards_16_bytes: + ld.ab r4, [r1, 4] + ld.ab r5, [r1, 4] + ld.ab r6, [r1, 4] + ld.ab r7, [r1, 4] + st.ab r4, [r3, 4] + st.ab r5, [r3, 4] + st.ab r6, [r3, 4] + dbnz.d r11, @.L_write_forwards_16_bytes + st.ab r7, [r3, 4] + bmsk_s r2, r2, 3 + +.L_write_forwards_15_bytes: + bbit0.d r2, 1, @1f + lsr r11, r2, 2 + ldh.ab r4, [r1, 2] + sth.ab r4, [r3, 2] +1: + bbit0.d r2, 0, @1f + xor r11, r11, 3 + ldb.ab r4, [r1, 1] + stb.ab r4, [r3, 1] +1: + asl r11, r11, 1 + bi [r11] + ld.ab r4,[r1, 4] + st.ab r4,[r3, 4] + ld.ab r4,[r1, 4] + st.ab r4,[r3, 4] + ld r4,[r1] + st r4,[r3] + + j_s [blink] + +ENDFUNC (memmove) + +#else + +ENTRY (memmove) +; If the destination is greater than the source + cmp r0, r1 + ADDP r4, r1, r2 +; or if the source plus count is smaller than the destination + cmp.eq r4, r0 + +; We can safely perform a normal memcpy. Otherwise, we need to perform it +; backwards + blo.d @.L_normal_memcpy + LSRP.f r12, r2, 5 ; counter for 32-byte chunks + + ADDP r3, r0, r2 + +; Backwards search +; The only thing that changes between memcpy and memmove is copy direction +; in case the dest and src address memory locations overlap +; More detailed information is in the forwards copy and at the end of +; this document + +; Set both r0 and r1 to point to the end of each memory location + ADDP r1, r1, r2 + bmsk_s r2, r2, 4 + + bbit0.d r2, 0, @1f + lsr r11, r2, 3 + ldb.aw r4, [r1, -1] + stb.aw r4, [r3, -1] +1: + bbit0.d r2, 1, @1f + xor r11, r11, 3 + ldh.aw r4, [r1, -2] + sth.aw r4, [r3, -2] +1: + bbit0.d r2, 2, @1f + asl r11, r11, 1 + ld.aw r4, [r1, -4] + st.aw r4, [r3, -4] +1: + bi [r11] + LD64.aw r4, [r1, -8] + ST64.aw r4, [r3, -8] + LD64.aw r4, [r1, -8] + ST64.aw r4, [r3, -8] + LD64.aw r4, [r1, -8] + ST64.aw r4, [r3, -8] + +; Jump if there are no 32 byte chunks + jeq [blink] + +.L_write_backwards_32_bytes: ; Take care of 32 byte chunks +#if defined (__ARC64_M128__) + + lddl.aw r4r5, [r1, -16] + lddl.aw r6r7, [r1, -16] + + stdl.aw r4r5, [r3, -16] + stdl.aw r6r7, [r3, -16] + dbnz r12, @.L_write_backwards_32_bytes + +#elif defined (__ARC64_ARCH64__) || ( defined (__ARC64_ARCH32__) && defined (__ARC64_LL64__) ) + + LD64.aw r4, [r1, -8] + LD64.aw r6, [r1, -8] + LD64.aw r8, [r1, -8] + LD64.aw r10,[r1, -8] + + ST64.aw r4, [r3, -8] + ST64.aw r6, [r3, -8] + ST64.aw r8, [r3, -8] + dbnz.d r12, @.L_write_backwards_32_bytes + ST64.aw r10, [r3, -8] + +#else +# error Unknown configuration +#endif + + j_s [blink] + +; Normal memcpy +.L_normal_memcpy: + ;LSRP.f r12, r2, 5 ; Moved up + + beq.d @.L_write_forwards_31_bytes + MOVP r3, r0 ; do not clobber the "dest" + +.L_write_forwards_32_bytes: ; Take care of 32 byte chunks +#if defined (__ARC64_M128__) + + lddl.ab r4r5, [r1, +16] + lddl.ab r6r7, [r1, +16] + + stdl.ab r4r5, [r3, +16] + stdl.ab r6r7, [r3, +16] + dbnz r12, @.L_write_forwards_32_bytes + +#elif defined (__ARC64_ARCH64__) || ( defined (__ARC64_ARCH32__) && defined (__ARC64_LL64__) ) + + LD64.ab r4, [r1, +8] + LD64.ab r6, [r1, +8] + LD64.ab r8, [r1, +8] + LD64.ab r10,[r1, +8] + ST64.ab r4, [r3, +8] + ST64.ab r6, [r3, +8] + ST64.ab r8, [r3, +8] + dbnz.d r12, @.L_write_forwards_32_bytes + ST64.ab r10, [r3, +8] ; Shove store in delay slot + +#else +# error Unknown configuration +#endif + + bmsk_s r2, r2, 4 ; From now on, we only care for the remainder % 32 + + +; The remainder bits indicating how many more bytes to copy +; .------------------------. +; | b4 | b3 | b2 | b1 | b0 | +; `------------------------' +; 16 8 4 2 1 +.L_write_forwards_31_bytes: + bbit0.d r2, 2, @1f ; is b2 set? then copy 4 bytes + lsr r12, r2, 3 ; see the notes below + ld.ab r4, [r1, 4] + st.ab r4, [r3, 4] +1: + bbit0.d r2, 1, @1f ; is b1 set? then copy 2 bytes + xor r12, r12, 3 + ldh.ab r4, [r1, 2] + sth.ab r4, [r3, 2] +1: + bbit0.d r2, 0, @1f ; is b0 set? then copy 1 byte + asl r12, r12, 1 + ldb.ab r4, [r1, 1] + stb.ab r4, [r3, 1] + +; Interpreting bits (b4,b3) [1] and how they correlate to branch index: +; +; (b4,b3) | bytes to copy | branch index +; --------+---------------+------------- +; 00b | 0 | 3 (11b) +; 01b | 8 | 2 (10b) +; 10b | 16 | 1 (01b) +; 11b | 24 | 0 (00b) +; +; To go from (b4,b3) to branch index, the bits must be flipped. +; In other words, they must be XORed with 11b [2]. +; +; Last but not least, "bi" jumps at boundaries of 4. We need to double +; the index to jump 8 bytes [3]. +; +; Hence, the 3 operations for calculating the branch index that are spread +; in "bbit0" delay slots: +; +; lsr r12, r2, 3 [1] +; xor r12, r12, 3 [2] +; asl r12, r12, 1 [3] +1: + bi [r12] + LD64.ab r4, [r1, 8] + ST64.ab r4, [r3, 8] + LD64.ab r4, [r1, 8] + ST64.ab r4, [r3, 8] + LD64.ab r4, [r1, 8] + ST64.ab r4, [r3, 8] + + j_s [blink] + +ENDFUNC (memmove) + +#endif diff --git a/newlib/libc/machine/arc64/memset-stub.c b/newlib/libc/machine/arc64/memset-stub.c new file mode 100644 index 000000000..e7f81eeca --- /dev/null +++ b/newlib/libc/machine/arc64/memset-stub.c @@ -0,0 +1,35 @@ +/* + Copyright (c) 2024, Synopsys, Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1) Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3) Neither the name of the Synopsys, Inc., nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +#if !defined (__ARC64_ARCH32__) && !defined (__ARC64_ARCH64__) +# include "../../string/memset.c" +#else +/* See memcpy.S. */ +#endif diff --git a/newlib/libc/machine/arc64/memset.S b/newlib/libc/machine/arc64/memset.S new file mode 100644 index 000000000..88c8e09fc --- /dev/null +++ b/newlib/libc/machine/arc64/memset.S @@ -0,0 +1,184 @@ +/* + Copyright (c) 2024, Synopsys, Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1) Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3) Neither the name of the Synopsys, Inc., nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +; This file contains variants of the same function with different +; instructions. The generic one, the implementation that comes the +; last after the #else macro, is the most commented. + +; Using 128-bit memory operations +#if defined (__ARC64_M128__) + +ENTRY (memset) + ;; Assemble 128b token + bmsk_s r1, r1, 7 + lsl8 r3, r1 + or_s r1, r1, r3 + lsl16 r3, r1 + or r6, r1, r3 + addhl r6, r6, r6 + movl r7, r6 + + lsrl.f r5, r2, 6 + beq.d @.L_write_63_bytes + movl r4, r0 +.L_write_64_bytes: + stdl.ab r6r7, [r4, +16] + stdl.ab r6r7, [r4, +16] + stdl.ab r6r7, [r4, +16] + dbnz.d r5, @.L_write_64_bytes + stdl.ab r6r7, [r4, +16] + bmsk_s r2, r2, 5 + +.L_write_63_bytes: + bbit0.d r2, 3, @1f + lsr r3, r2, 4 + stl.ab r6, [r4, 8] +1: + bbit0.d r2, 2, @1f + xor r3, r3, 3 + st.ab r6, [r4, 4] +1: + bbit0 r2, 1, @1f + sth.ab r6, [r4, 2] +1: + bbit0 r2, 0, @1f + stb.ab r6, [r4, 1] +1: + bi [r3] + stdl.ab r6r7,[r4, 16] + stdl.ab r6r7,[r4, 16] + stdl.ab r6r7,[r4, 16] + + j_s [blink] + +.L_write_1_bytes: + breq r2, 0, @.L_return + dbnz.d r2, @. + stb.ab r1, [r4, +1] +.L_return: + j_s [blink] +ENDFUNC (memset) + +; The generic 64-bit implementation without any frills. +#elif defined (__ARC64_ARCH64__) || defined (__ARC64_LL64__) + +#if defined (__ARC64_ARCH32__) +# define MOVH mov r7,r6 +#elif defined (__ARC64_ARCH64__) +# define MOVH addhl r6,r6,r6 +#else +# error Please use either 32-bit or 64-bit version of arc64 compiler +#endif + +; R0: dest +; R1: ch +; R2: count +; ret (R0): dest +ENTRY (memset) + ;; Assemble the bytes to 64bit words + bmsk_s r1, r1, 7 ; treat it like unsigned char + lsl8 r3, r1 + or_s r1, r1, r3 + lsl16 r3, r1 + or r6, r1, r3 + MOVH + + LSRP.f r5, r2, 5 ; counter for 32-byte chunks + beq.d @.L_write_31_bytes + MOVP r4, r0 ; work on a copy of "r0" +.L_write_32_bytes: + ST64.ab r6, [r4, +8] + ST64.ab r6, [r4, +8] + ST64.ab r6, [r4, +8] + dbnz.d r5, @.L_write_32_bytes + ST64.ab r6, [r4, +8] + bmsk_s r2, r2, 4 + +.L_write_31_bytes: + bbit0.d r2, 2, @1f + lsr r3, r2, 3 + st.ab r6, [r4, 4] +1: + bbit0.d r2, 1, @1f + xor r3, r3, 3 + sth.ab r6, [r4, 2] +1: + bbit0 r2, 0, @1f + stb.ab r6, [r4, 1] +1: + bi [r3] + ST64.ab r6,[r4, 8] + ST64.ab r6,[r4, 8] + ST64.ab r6,[r4, 8] + + j_s [blink] +ENDFUNC (memset) + +#elif defined (__ARC64_ARCH32__) +ENTRY (memset) + ;; Assemble the bytes to 32bit words + bmsk_s r1, r1, 7 ; treat it like unsigned char + lsl8 r3, r1 + or_s r1, r1, r3 + lsl16 r3, r1 + or r6, r1, r3 + + lsr.f r5, r2, 4 ; counter for 16-byte chunks + beq.d @.L_write_15_bytes + mov r4, r0 ; work on a copy of "r0" +.L_write_16_bytes: + st.ab r6, [r4, 4] + st.ab r6, [r4, 4] + st.ab r6, [r4, 4] + dbnz.d r5, @.L_write_16_bytes + st.ab r6, [r4, 4] + bmsk_s r2, r2, 3 + +.L_write_15_bytes: + bbit0.d r2, 1, @1f + lsr r3, r2, 2 + sth.ab r6, [r4, 2] +1: + bbit0.d r2, 0, @1f + xor r3, r3, 3 + stb.ab r6, [r4, 1] +1: + bi [r3] + st.ab r6,[r4, 4] + st.ab r6,[r4, 4] + st.ab r6,[r4, 4] + + j_s [blink] +ENDFUNC (memset) +#else +# error Unknown configuration +#endif diff --git a/newlib/libc/machine/arc64/setjmp.S b/newlib/libc/machine/arc64/setjmp.S new file mode 100644 index 000000000..a4c9cf1be --- /dev/null +++ b/newlib/libc/machine/arc64/setjmp.S @@ -0,0 +1,106 @@ +/* + Copyright (c) 2024, Synopsys, Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1) Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3) Neither the name of the Synopsys, Inc., nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +/* ABI interface file + these are the stack mappings for the registers + as stored in the ABI for ARC */ + +ABIr14 = 0 +ABIr15 = ABIr14 + REG_SZ +ABIr16 = ABIr15 + REG_SZ +ABIr17 = ABIr16 + REG_SZ +ABIr18 = ABIr17 + REG_SZ +ABIr19 = ABIr18 + REG_SZ +ABIr20 = ABIr19 + REG_SZ +ABIr21 = ABIr20 + REG_SZ +ABIr22 = ABIr21 + REG_SZ +ABIr23 = ABIr22 + REG_SZ +ABIr24 = ABIr23 + REG_SZ +ABIr25 = ABIr24 + REG_SZ +ABIr26 = ABIr25 + REG_SZ +ABIr27 = ABIr26 + REG_SZ +ABIr28 = ABIr27 + REG_SZ +ABIr29 = ABIr28 + REG_SZ +ABIr30 = ABIr29 + REG_SZ +ABIr31 = ABIr30 + REG_SZ + + +ENTRY (setjmp) + REG_ST r14, [r0, ABIr14] + REG_ST r15, [r0, ABIr15] + REG_ST r16, [r0, ABIr16] + REG_ST r17, [r0, ABIr17] + REG_ST r18, [r0, ABIr18] + REG_ST r19, [r0, ABIr19] + REG_ST r20, [r0, ABIr20] + REG_ST r21, [r0, ABIr21] + REG_ST r22, [r0, ABIr22] + REG_ST r23, [r0, ABIr23] + REG_ST r24, [r0, ABIr24] + REG_ST r25, [r0, ABIr25] + REG_ST r26, [r0, ABIr26] + REG_ST r27, [r0, ABIr27] + REG_ST r28, [r0, ABIr28] + REG_ST r29, [r0, ABIr29] + REG_ST r30, [r0, ABIr30] + REG_ST blink, [r0, ABIr31] + + j.d [blink] + mov r0,0 + .size setjmp,.-setjmp + +ENTRY (longjmp) + ; load registers + REG_LD r14, [r0, ABIr14] + REG_LD r15, [r0, ABIr15] + REG_LD r16, [r0, ABIr16] + REG_LD r17, [r0, ABIr17] + REG_LD r18, [r0, ABIr18] + REG_LD r19, [r0, ABIr19] + REG_LD r20, [r0, ABIr20] + REG_LD r21, [r0, ABIr21] + REG_LD r22, [r0, ABIr22] + REG_LD r23, [r0, ABIr23] + REG_LD r24, [r0, ABIr24] + REG_LD r25, [r0, ABIr25] + REG_LD r26, [r0, ABIr26] + REG_LD r27, [r0, ABIr27] + REG_LD r28, [r0, ABIr28] + REG_LD r29, [r0, ABIr29] + REG_LD r30, [r0, ABIr30] + REG_LD blink, [r0, ABIr31] + + mov.f r1, r1 ; to avoid return 0 from longjmp + mov.z r1, 1 + j.d [blink] + mov r0,r1 + .size longjmp,.-longjmp diff --git a/newlib/libc/machine/arc64/strcat.S b/newlib/libc/machine/arc64/strcat.S new file mode 100644 index 000000000..48e6ce1f2 --- /dev/null +++ b/newlib/libc/machine/arc64/strcat.S @@ -0,0 +1,592 @@ +/* + Copyright (c) 2024, Synopsys, Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1) Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3) Neither the name of the Synopsys, Inc., nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + + +; r0 char* dest +; r1 const char* src + +; dest and src MUST NOT intercept + +; Brief: +; Perform the same operation as strlen for finding the end of r0 string +; If r0 and r1 have +; If 4 byte aligned +; Do 4 byte search until there are no more 4 byte chunks +; Then, do 1 byte search +; Otherwise, 1 byte search until alignment +; Then, do 4 byte search as previously specified +; +;; More in depth description at the end +; +; R0 char* dest (destination string) +; R1 const char* src (source string) +; ret (R0): +; - char* (destiantion string) +; + +#if defined (__ARC64_ARCH32__) + +ENTRY (strcat) +; Find end of r0 string +; ========================== STRLEN CODE START ========================== + +; Preserve r0 for size calculation when returning + mov r13, r0 + xor r6, r6, r6 + +; Setup byte detector (more information below) [1] + mov r8, NULL_32DT_1 + asl r9, r8, 7 + +.L_4_4B_search: + +#if defined (__ARC64_LL64__) + + ldd.ab r2r3, [r13, +8] + ldd.ab r4r5, [r13, +8] + +#else + + ld.ab r2, [r13, +4] + ld.ab r3, [r13, +4] + ld.ab r4, [r13, +4] + ld.ab r5, [r13, +4] + +#endif + +; NULL byte position is detected and encoded in r6 [0] [9] + sub r10, r2, r8 + sub r11, r3, r8 + sub r12, r4, r8 + sub r7, r5, r8 + + bic r10, r10, r2 + bic r11, r11, r3 + bic r12, r12, r4 + bic r7, r7, r5 + + tst r10, r9 + bset.ne r6, r6, 4 + + tst r11, r9 + bset.ne r6, r6, 3 + + tst r12, r9 + bset.ne r6, r6, 2 + + tst r7, r9 + bset.ne r6, r6, 1 + + breq.d r6, 0, @.L_4_4B_search + + fls r5, r6 ; [2] + +; Point r13 to first NULL byte containing double word [3] + sub2 r13, r13, r5 + + ; Select appropriate register to analyze [4] + mov r2, r7 + + asr.f r6, r6, 3 + mov.c r2, r12 + + asr.f r6, r6, 1 + mov.c r2, r11 + + asr.f r6, r6, 1 + mov.c r2, r10 + +; Point r13 to first NULL byte in selected double word + and r2, r2, r9 ; [5] + + ffs r2, r2 ; [6] + + xbfu r2, r2, 0b0111000011 ; [7] + + add r13, r13, r2 ; [8] + + +; ========================== STRLEN CODE END >|< ========================== + + xor r6, r6, r6 + +.L_4_4B_search_src: + +#if defined (__ARC64_LL64__) + + ldd.ab r2r3, [r1, +8] + ldd.ab r4r5, [r1, +8] + +#else + + ld.ab r2, [r1, +4] + ld.ab r3, [r1, +4] + ld.ab r4, [r1, +4] + ld.ab r5, [r1, +4] + +#endif + +; NULL byte position is detected and encoded in r6 [0] [9] + sub r10, r2, r8 + sub r11, r3, r8 + sub r12, r4, r8 + sub r7, r5, r8 + + bic r10, r10, r2 + bic r11, r11, r3 + bic r12, r12, r4 + bic r7, r7, r5 + + tst r10, r9 + bset.ne r6, r6, 4 + + tst r11, r9 + bset.ne r6, r6, 3 + + tst r12, r9 + bset.ne r6, r6, 2 + + tst r7, r9 + bset.ne r6, r6, 1 + + brne r6, 0, @.L_found_in_32B + +#if defined (__ARC64_LL64__) + + std.ab r2r3, [r13, +8] + std.ab r4r5, [r13, +8] + +#else + + st.ab r2, [r13, +4] + st.ab r3, [r13, +4] + st.ab r4, [r13, +4] + st.ab r5, [r13, +4] + +#endif + + b @.L_4_4B_search_src + +.L_found_in_32B: + + fls r6, r6 ; [2] + +; Point r1 to first NULL byte containing double word [3] + sub2 r1, r1, r6 + +;; Store the already loaded data + + ; 4 -> 1 to 3 -> 0 + ;subl r6, r6, 1 + +; Invert so the biggest branch is at the end, and we dont need to increase +; block size + ; 3 -> 0 to 0 -> 3 + ;subl r6, 3, r6 + + ; Condense the two subs here + rsub r6, r6, 4 + + asl r6, r6, 2 + +; Store double words + bi [r6] + + b.d @.L_store_lastL32bits + mov r11, r2 + nop + nop + + st.ab r2, [r13, +4] + b.d @.L_store_lastL32bits + mov r11, r3 + nop + + st.ab r2, [r13, +4] + st.ab r3, [r13, +4] + b.d @.L_store_lastL32bits + mov r11, r4 + + st.ab r2, [r13, +4] + st.ab r3, [r13, +4] + st.ab r4, [r13, +4] + mov r11, r5 + +; r11 now contains the data to write +.L_store_lastL32bits: + sub r10, r11, r8 + bic r10, r10, r11 + and r10, r10, r9 ; [5] + + ffs r2, r10 ; [6] + add r2, r2, 1 + + xbfu r2, r2, 0b0111000011 ; [7] + + mov r3, -1; Bitmask setup + + ; If the NULL byte is in byte 3 (starting from the right) + ; we want to store 8-3 bytes + rsub r2, r2, 8 + asl r2, r2, 3 + + ; According to the target byte, setup masks + lsr r3, r3, r2 + not r4, r3 + + ; Obtain relevant data from destination + ld r10, [r13] + + ; Get which data from dest is not to be overwritten and OR it + ; with the relevant data to write + and r3, r3, r11 + and r4, r4, r10 + + or r3, r3, r4 + + j_s.d [blink] + st.ab r3, [r13, +4] + + + +ENDFUNC (strcat) + +#else + +ENTRY (strcat) +; Find end of r0 string +; ========================== STRLEN CODE START ========================== + +; Preserve r0 for size calculation when returning + movl r13, r0 + xorl r6, r6, r6 + +; Setup byte detector (more information below) [1] + vpack2wl r8, NULL_32DT_1, NULL_32DT_1 + asll r9, r8, 7 + +.L_4_8B_search: + +; Using 128-bit memory operations +#if defined (__ARC64_M128__) + + lddl.ab r2r3, [r13, +16] + lddl.ab r4r5, [r13, +16] + +; The 64-bit crunching implementation. +#elif defined (__ARC64_ARCH64__) + + ldl.ab r2, [r13, +8] + ldl.ab r3, [r13, +8] + ldl.ab r4, [r13, +8] + ldl.ab r5, [r13, +8] + +#else +# error Unknown configuration +#endif + +; NULL byte position is detected and encoded in r6 [0] [9] + subl r10, r2, r8 + subl r11, r3, r8 + subl r12, r4, r8 + subl r7, r5, r8 + + bicl r10, r10, r2 + bicl r11, r11, r3 + bicl r12, r12, r4 + bicl r7, r7, r5 + + tstl r10, r9 + bset.ne r6, r6, 4 + + tstl r11, r9 + bset.ne r6, r6, 3 + + tstl r12, r9 + bset.ne r6, r6, 2 + + tstl r7, r9 + bset.ne r6, r6, 1 + + breq.d r6, 0, @.L_4_8B_search + + fls r5, r6 ; [2] + +; Point r13 to first NULL byte containing double word [3] + sub3l r13, r13, r5 + + ; Select appropriate register to analyze [4] + MOVP r2, r7 + + asr.f r6, r6, 3 + MOVP.c r2, r12 + + asr.f r6, r6, 1 + MOVP.c r2, r11 + + asr.f r6, r6, 1 + MOVP.c r2, r10 + +; Point r13 to first NULL byte in selected double word + andl r2, r2, r9 ; [5] + + ffsl r2, r2 ; [6] + + xbful r2, r2, 0b0111000011 ; [7] + + addl r13, r13, r2 ; [8] + + +; ========================== STRLEN CODE END >|< ========================== + + xorl r6, r6, r6 + +.L_4_8B_search_src: +#if defined (__ARC64_M128__) + + lddl.ab r2r3, [r1, +16] + lddl.ab r4r5, [r1, +16] + +#elif defined (__ARC64_ARCH64__) + + ldl.ab r2, [r1, +8] + ldl.ab r3, [r1, +8] + ldl.ab r4, [r1, +8] + ldl.ab r5, [r1, +8] + +#else + # error Unknown configuration +#endif + +; NULL byte position is detected and encoded in r6 [0] [9] + subl r10, r2, r8 + subl r11, r3, r8 + subl r12, r4, r8 + subl r7, r5, r8 + + bicl r10, r10, r2 + bicl r11, r11, r3 + bicl r12, r12, r4 + bicl r7, r7, r5 + + tstl r10, r9 + bset.ne r6, r6, 4 + + tstl r11, r9 + bset.ne r6, r6, 3 + + tstl r12, r9 + bset.ne r6, r6, 2 + + tstl r7, r9 + bset.ne r6, r6, 1 + + brne r6, 0, @.L_found_in_32B + +#if defined (__ARC64_M128__) + + stdl.ab r2r3, [r13, +16] + stdl.ab r4r5, [r13, +16] + +#elif defined (__ARC64_ARCH64__) + + stl.ab r2, [r13, +8] + stl.ab r3, [r13, +8] + stl.ab r4, [r13, +8] + stl.ab r5, [r13, +8] + +#else +# error Unknown configuration +#endif + + b @.L_4_8B_search_src + +.L_found_in_32B: + + fls r6, r6 ; [2] + +; Point r1 to first NULL byte containing double word [3] + sub3l r1, r1, r6 + +;; Store the already loaded data + + ; 4 -> 1 to 3 -> 0 + ;subl r6, r6, 1 + +; Invert so the biggest branch is at the end, and we dont need to increase +; block size + ; 3 -> 0 to 0 -> 3 + ;subl r6, 3, r6 + + ; Condense the two subs here + rsubl r6, r6, 4 + + asll r6, r6, 2 + +; Store double words + bi [r6] + + b.d @.L_store_lastL64bits + MOVP r11, r2 + nop + nop + + stl.ab r2, [r13, +8] + b.d @.L_store_lastL64bits + MOVP r11, r3 + nop + + stl.ab r2, [r13, +8] + stl.ab r3, [r13, +8] + b.d @.L_store_lastL64bits + MOVP r11, r4 + + stl.ab r2, [r13, +8] + stl.ab r3, [r13, +8] + stl.ab r4, [r13, +8] + MOVP r11, r5 + +; r11 now contains the data to write +.L_store_lastL64bits: + subl r10, r11, r8 + bicl r10, r10, r11 + + andl r10, r10, r9 ; [5] + + ffsl r2, r10 ; [6] + addl r2, r2, 1 + + xbful r2, r2, 0b0111000011 ; [7] + + movl r3, -1; Bitmask setup + + ; If the NULL byte is in byte 3 (starting from the right) + ; we want to store 8-3 bytes + rsubl r2, r2, 8 + asl r2, r2, 3 + + ; According to the target byte, setup masks + lsrl r3, r3, r2 + notl r4, r3 + + ; Obtain relevant data from destination + ldl r10, [r13] + + ; Get which data from dest is not to be overwritten and OR it + ; with the relevant data to write + andl r3, r3, r11 + andl r4, r4, r10 + + orl r3, r3, r4 + + j_s.d [blink] + stl.ab r3, [r13, +8] + + +ENDFUNC (strcat) + +#endif + +;; This code uses a common technique for NULL byte detection inside a word. +;; Details on this technique can be found in: +;; (https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord) +; +; In sum, this technique allows for detecting a NULL byte inside any given +; amount of bits by performing the following operation +; DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080) [0] +; +; The code above implements this by setting r8 to a 0x01010101... sequence and +; r9 to a 0x80808080... sequence of appropriate length +; As LIMM are 32 bit only, we need to perform MOVHL and ORL [1] operations to +; have the appropriate 64 bit values in place +; +;; Search is done 32 bytes at a time, either with 64 bit loads or 128 bit loads +;; If a NULL byte is detected, the position of the double word is encoded +;; in r6, which is then used to adjust r13 to the exact byte +; +; r6 is set via bset, which means we can simply use a fls to obtain the first +; match (or ffs depending on the values in bset) [2]. +; The reason for starting at 1 and not 0 is so r6 encodes how many double +; words to go back, and it wouldnt make sense to go back 0 (the NULL would be +; in the next loop iteration). +; +; The first step to take is point r13 to the appropriate double word. +; As the chosen encoded information is how many double words to go back, +; we can simply multiply r6 by 8 and reduce r13 by that amount [3] +; +; Then, we need to place the loaded double word containing the first NULL byte +; into a "common" register we can operate on later [4]. +; +; To do this without any jumps, we can shift r6 and perform a conditional mov +; based on the carry flag value. +; The order is very important because the NULL byte can appear in several +; double words, so we want to analyze from last to first. +; +; We can ignore the first asr (which would be asr.f 2, as we started r6 on 1) +; because if r7 isnt the NULL byte, r2 will always be overwritten so we can +; just decide to start at r7, and overwrite it if needed. +; +; Now comes the tricky part. In order to obtain the first NULL byte, we need to +; understand the NULL byte detection operation. It is explained in depth in the +; link above but in short, it works by first setting the highest bit of each +; byte to 1, if the corresponding byte is either 0 or less than 0x80 +; Then, separately, it makes the highest bit of each byte 1, if the byte is +; less than 0x80. The last step is to and these two values (this operation is +; simplified with the subl, bicl and tst instructions). +; +; This means that the evaluated equation result value [5] has zeros for all non +; zero bytes, except for the NULL bytes. Therefore, we can simply find the +; first non zero bit (counting from bit 0) which will be inside the position of +; the first NULL byte. +; +; One thing to note, is that ffs oddly returns 31 if no bit is found, setting +; the zero flag. As r9 is never all 0s at this stage (would mean there is no +; NULL byte and we wouldnt be here) we dont need to worry about that. [6] +; +; We can then convert the bit position into the last byte position by looking +; into bits 3 to 5, and shifting 3 bits to the right. This can be combined into +; a single xbful operation. The bottom 000011 represent shift by 3 and the top +; 0111 represents the mask (3 to 5 shifted by 3 is 0 to 2). We dont need to worry +; about the case where ffs does not find a bit, because we know for sure there is +; at least one NULL byte, and therefore one of the highest bits is set to 1 [7] +; +; Finally, we can add the NULL byte position inside the loaded double word to +; r13 and subtract r0 from r13 to obtain the string size [8] +; +; Some operations are re-ordered such that register dependency is reduced, +; allowing the CPU to run more instructions in parallel [9] +; +; +; Some data was already read, and needs to be stored following the same read +; order. To do this, we need to make the +; +; diff --git a/newlib/libc/machine/arc64/strcmp.S b/newlib/libc/machine/arc64/strcmp.S new file mode 100644 index 000000000..0cad55b07 --- /dev/null +++ b/newlib/libc/machine/arc64/strcmp.S @@ -0,0 +1,343 @@ +/* + Copyright (c) 2024, Synopsys, Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1) Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3) Neither the name of the Synopsys, Inc., nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#if defined (__ARC64_ARCH32__) + +; 64 bit version has the same working principles, with slightly different +; instructions, so it is more commented + +ENTRY (strcmp) + xor r12, r12, r12 + + mov r8, NULL_32DT_1 + + asl r9, r8, 7 + +.L_3_4B_comparison: + + ld.ab r6, [r0, +4] + + ld.ab r7, [r1, +4] + +#if defined (__ARC64_LL64__) + + ldd.ab r2r3, [r0, +8] + + ldd.ab r4r5, [r1, +8] + +#else + + ld.ab r2, [r0, +4] + ld.ab r3, [r0, +4] + + ld.ab r4, [r1, +4] + ld.ab r5, [r1, +4] + +#endif + + sub r13, r6, r8 + sub r10, r2, r8 + sub r11, r3, r8 + + bic r13, r13, r6 + bic r10, r10, r2 + bic r11, r11, r3 + + ; Look for difference + sub.f 0, r6, r7 + bset.ne r12, r12, 3 + + sub.f 0, r2, r4 + bset.ne r12, r12, 2 + + sub.f 0, r3, r5 + bset.ne r12, r12, 1 + + + ; Look for NULL byte + and.f r13, r13, r9 + bset.ne r12, r12, 3 + + and.f r10, r10, r9 + bset.ne r12, r12, 2 + + and.f r11, r11, r9 + bset.ne r12, r12, 1 + + breq r12, 0, @.L_3_4B_comparison + +; Setup r0, r3 and r5 with the relevant loaded and intermediate values + mov r0, r11 + mov r3, r3 + mov r5, r5 + + asr.f r12, r12, 3 + + mov.c r0, r10 + mov.c r3, r2 + mov.c r5, r4 + + asr.f r12, r12, 1 + + mov.c r0, r13 + mov.c r3, r6 + mov.c r5, r7 + + + ffs.f r10, r0 + xor r12, r3, r5 + + mov.z r10, 32 + ffs r12, r12 + + xbfu r10, r10, 0b0111000011 + xbfu r12, r12, 0b0111000011 + + + sub.f 0, r10, r12 + + asl.ge r12, r12, 3 + +; Difference is first + lsr.ge r3, r3, r12 + lsr.ge r5, r5, r12 + + bmsk r3, r3, 7 + bmsk r5, r5, 7 + + j_s.d [blink] + sub r0, r3, r5 + + +ENDFUNC(strcmp) + +#else + +ENTRY (strcmp) + + xorl r12, r12, r12 + +; Setup byte detector (more information bellow) [1] + vpack2wl r8, NULL_32DT_1, NULL_32DT_1 +; Set r9 as a copy of r8 for vectorized sub + asll r9, r8, 7 + +.L_3_8B_comparison: + + ldl.ab r6, [r0, +8] + + ldl.ab r7, [r1, +8] + +; Using 128-bit memory operations +#if defined (__ARC64_M128__) + + lddl.ab r2r3, [r0, +16] + + lddl.ab r4r5, [r1, +16] + +; The 64-bit crunching implementation. +#elif defined (__ARC64_ARCH64__) + + ldl.ab r2, [r0, +8] + ldl.ab r3, [r0, +8] + + ldl.ab r4, [r1, +8] + ldl.ab r5, [r1, +8] + +#else + # error Unknown configuration +#endif + + subl r13, r6, r8 + subl r10, r2, r8 + subl r11, r3, r8 + + bicl r13, r13, r6 + bicl r10, r10, r2 + bicl r11, r11, r3 + +; Look for difference + subl.f 0, r6, r7 + bset.ne r12, r12, 3 + + subl.f 0, r2, r4 + bset.ne r12, r12, 2 + + subl.f 0, r3, r5 + bset.ne r12, r12, 1 + +; Look for NULL byte + andl.f r13, r13, r9 + bset.ne r12, r12, 3 + + andl.f r10, r10, r9 + bset.ne r12, r12, 2 + + andl.f r11, r11, r9 + bset.ne r12, r12, 1 + + breq r12, 0, @.L_3_8B_comparison + +; Setup r0, r3 and r5 with the relevant loaded and intermediate values [2] + ; [3] + movl r0, r11 + movl r3, r3 + movl r5, r5 + + asr.f r12, r12, 3 + + movl.c r0, r10 + movl.c r3, r2 + movl.c r5, r4 + + asr.f r12, r12, 1 + + movl.c r0, r13 + movl.c r3, r6 + movl.c r5, r7 + + ffsl.f r10, r0 ; [5] + xorl r12, r3, r5 + + movl.z r10, 64 ; [6] + ffsl r12, r12 ; [8] + + xbful r10, r10, 0b0111000011 ; [7] + xbful r12, r12, 0b0111000011 + +; r12 contains position of difference and r10 the position of a NULL byte +; r3 and r5 contain the differing 8 bytes + +; Is there a difference? + subl.f 0, r10, r12 +; Multiply the byte position by 8 to get bit shift + asll.ge r12, r12, 3 + + lsrl.ge r3, r3, r12 + lsrl.ge r5, r5, r12 + +; There is no difference. Up until the NULL byte which must be + + bmskl r3, r3, 7 + bmskl r5, r5, 7 + + j_s.d [blink] + subl r0, r3, r5 + + +ENDFUNC (strcmp) + +#endif + +;; One important thing to note, is that we look for the first byte difference on +;; both strings but we only look for the NULL byte in one string. +;; This is because if a NULL byte appears first, it will be the first different +;; byte. If it doesnt, the difference is what matters either way. If there is no +;; difference, the NULL bytes will coincide! +; +; +;; This code uses a common technique for NULL byte detection inside a word. +;; Details on this technique can be found in: +;; (https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord) +; +; In sum, this technique allows for detecting a NULL byte inside any given +; amount of bits by performing the following operation +; DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080) [0] +; +; The code above implements this by setting r8 to a 0x01010101... sequence and +; r1 to a 0x80808080... sequence of appropriate length +; As LIMM are 32 bit only, we need to perform MOVHL and ORL [1] operations to +; have the appropriate 64 bit values in place +; +;; Comparison is done 24 bytes at a time, either with 3 64 bit loads or 1 128 bit +;; load and 1 64 bit. +;; If either a NULL byte or a difference between the strings is found, r12 is +;; used to know in which word the NULL/difference is found +; +; With the carry bit from r12, we can use mov.c to only move the appropriate +; registers into the ones we will operate on [2]. We can safely directly move +; the last set of registers without looking at r12, because if they aren't the +; appropriate ones, they will be rewritten afterwards. [3] +; +;; Knowing the registers that contain the relevant information, we only need to +;; look into where the difference and one of the zeros is. +;; This is because, if the zeros are in different places, the difference will +;; either be an earlier difference, or the first zero, so the actual zeros are +;; irrelevant. +;; Zero position is only relevant if there is no difference. And if there is no +;; difference, the zeros have the same position. +; +; So now comes the tricky part. In order to obtain the position of a "first +; NULL byte", we need to understand the NULL byte detection operation. +; It is explained in depth in the link above but in short, it works by first +; setting the highest bit of each byte to 1, if the corresponding byte is either +; 0 or more than 0x80 +; Then, it makes the highest bit of each byte 1, if the byte is less than 0x80. +; The last step is to AND these two values (this operation is simplified with +; the SUB, BIC and TST instructions). +; +; This means that the evaluated equation result value has zeros for all non +; zero bytes, except for the NULL bytes. Therefore, we can simply find the +; first non zero bit (counting from bit 0) which will be inside the position of +; the first NULL byte. [5] +; +; One thing to note, is that ffs oddly returns 31/63 if no bit is found, setting +; the zero flag. As there can be that no NULL byte is present on one or both +; strings at this point, we must set r10 and r11 to 32/64 when appropriate. [6] +; +; We can then convert the bit position into the last byte position by looking +; into bits 3 to 5, and shifting 3 bits to the right. This can be combined into +; a single xbful operation. The bottom 000011 represent shift by 3 and the top +; 0111 represents the mask (3 to 5 shifted by 3 is 0 to 2). [7] +; +; To obtain the position of the difference, all we need to do is xor the two +; registers. This way, every equal byte cancels out and all we are left with +; is gibberish in the differing bytes. We can use the same ffs and xbuf +; operations to get the differing byte position. +; +; Note that the order of the operations isnt the same as in this explanation, +; to reduce register dependency between instructions +; +; +; Unlike with r10, we dont need to check the zero flag for r12s' ffs because if +; it is 0, it means there is no difference in the loaded data so any subtraction +; operation will return 0 [8] +; +; There is one optimization that is being overlooked, which is returning 0 if +; there is no difference, but there are NULL bytes anywhere, right after the +; main loop. The reason for this is because the only way this can happen is if +; the strings have the same length AND either are a multiple of 16/8 bytes, or +; the bytes that follow the NULL bytes also match. As this is extremely +; unlikely, it isnt worth it to perform this optimization since it would require +; an extra branch in all runs +; + diff --git a/newlib/libc/machine/arc64/strlen.S b/newlib/libc/machine/arc64/strlen.S new file mode 100644 index 000000000..2f1a96aba --- /dev/null +++ b/newlib/libc/machine/arc64/strlen.S @@ -0,0 +1,301 @@ +/* + Copyright (c) 2024, Synopsys, Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1) Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3) Neither the name of the Synopsys, Inc., nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +; Code Brief (more info at the bottom): +; Searches the provided string, 32 bytes at a time, using 128 bit loads +; Finds the NULL bytes inside the loaded data +; Analyzes the first NULL byte containing double word and calculates +; size appropriately +; +; R0 const char* ptr (string to measure) +; ret (R0): +; - unsigned (string size) +; + +#if defined (__ARC64_ARCH32__) + +ENTRY (strlen) + +; Preserve r0 for size calculation when returning + mov r13, r0 + xor r12, r12, r12 + +; Setup byte detector (more information bellow) [1] + mov r8, NULL_32DT_1 +; Set r9 as a copy of r8 for vectorized sub + mov r9, r8 + + asl r1, r8, 7 + +.L_4_4B_search: + +#if defined (__ARC64_LL64__) + + ldd.ab r2r3, [r13, +8] + ldd.ab r4r5, [r13, +8] + +#else + + ld.ab r2, [r13, +4] + ld.ab r3, [r13, +4] + ld.ab r4, [r13, +4] + ld.ab r5, [r13, +4] + +#endif + +; NULL byte position is detected and encoded in r12 [0] [9] + + vsub2 r10, r2, r8 + vsub2 r6, r4, r8 + + bic r10, r10, r2 + bic r11, r11, r3 + bic r6, r6, r4 + bic r7, r7, r5 + + tst r10, r1 + bset.ne r12, r12, 4 + + tst r11, r1 + bset.ne r12, r12, 3 + + tst r6, r1 + bset.ne r12, r12, 2 + + tst r7, r1 + bset.ne r12, r12, 1 + + breq.d r12, 0, @.L_4_4B_search + + fls r5, r12 ; [2] + +; Point r13 to first NULL byte containing double word [3] + sub2 r13, r13, r5 + +; Select appropriate register to analyze [4] + mov r2, r7 + + asr.f r12, r12, 3 + mov.c r2, r6 + + asr.f r12, r12, 1 + mov.c r2, r11 + + asr.f r12, r12, 1 + mov.c r2, r10 + +; Point r13 to first NULL byte in selected double word +.L_fix_r13: + and r1, r2, r1 ; [5] + + ffs r1, r1 ; [6] + + xbfu r1, r1, 0b0111000011 ; [7] + + add r13, r13, r1 ; [8] + + j_s.d [blink] + sub r0, r13, r0 + + +ENDFUNC (strlen) + +#else + +ENTRY (strlen) + +; Preserve r0 for size calculation when returning + movl r13, r0 + xor r12, r12, r12 + +; Setup byte detector (more information bellow) [1] + vpack2wl r8, NULL_32DT_1, NULL_32DT_1 + + asll r1, r8, 7 + +.L_4_8B_search: + +; Using 128-bit memory operations +#if defined (__ARC64_M128__) + + lddl.ab r2r3, [r13, +16] + lddl.ab r4r5, [r13, +16] + +; The 64-bit crunching implementation. +#elif defined (__ARC64_ARCH64__) + + ldl.ab r2, [r13, +8] + ldl.ab r3, [r13, +8] + ldl.ab r4, [r13, +8] + ldl.ab r5, [r13, +8] + +#else + # error Unknown configuration +#endif + +; NULL byte position is detected and encoded in r6 [0] [9] + subl r10, r2, r8 + subl r11, r3, r8 + subl r6, r4, r8 + subl r7, r5, r8 + + bicl r10, r10, r2 + bicl r11, r11, r3 + bicl r6, r6, r4 + bicl r7, r7, r5 + + tstl r10, r1 + bset.ne r12, r12, 4 + + tstl r11, r1 + bset.ne r12, r12, 3 + + tstl r6, r1 + bset.ne r12, r12, 2 + + tstl r7, r1 + bset.ne r12, r12, 1 + + breq.d r12, 0, @.L_4_8B_search + + flsl r5, r12 ; [2] + +; Point r13 to first NULL byte containing double word [3] + sub3l r13, r13, r5 + +; Select appropriate register to analyze [4] + movl r2, r7 + + asr.f r12, r12, 3 + movl.c r2, r6 + + asr.f r12, r12, 1 + movl.c r2, r11 + + asr.f r12, r12, 1 + movl.c r2, r10 + +; Point r13 to first NULL byte in selected double word +.L_fix_r13: + andl r1, r2, r1 ; [5] + + ffsl r1, r1 ; [6] + + xbful r1, r1, 0b0111000011 ; [7] + + addl r13, r13, r1 ; [8] + + j_s.d [blink] + subl r0, r13, r0 + + +ENDFUNC (strlen) + +#endif + +;; This code uses a common technique for NULL byte detection inside a word. +;; Details on this technique can be found in: +;; (https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord) +; +; In sum, this technique allows for detecting a NULL byte inside any given +; amount of bits by performing the following operation +; DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080) [0] +; +; The code above implements this by setting r8 to a +; 0x01010101... sequence and r1 to a 0x80808080... sequence of +; appropriate length As LIMM are 32 bit only, we need to perform MOVHL +; and ORL [1] operations to have the appropriate 64 bit values in +; place +; +;; Search is done 32 bytes at a time, either with 64 bit loads or 128 +;; bit loads If a NULL byte is detected, the position of the double +;; word is encoded in r12, which is then used to adjust r13 +; +; r12 is set via bset, which means we can simply use a fls to obtain +; the first match (or ffs depending on the values in bset) [2]. The +; reason for starting at 1 and not 0 is so r12 encodes how many double +; words to go back, and it wouldnt make sense to go back 0 (the NULL +; would be in the next loop iteration). +; +; The first step to take is point r13 to the appropriate double word. +; As the chosen encoded information is how many double words to go +; back, we can simply multiply r12 by 8 and reduce r13 by that amount +; [3] +; +; Then, we need to place the loaded double word containing the first +; NULL byte into a "common" register we can operate on later [4]. +; +; To do this without any jumps, we can shift r12 and perform a +; conditional mov based on the carry flag value. The order is very +; important because the NULL byte can appear in several double words, +; so we want to analyze from last to first. +; +; We can ignore the first asr (which would be asr.f 2, as we started +; r12 on 1) because if r7 isnt the NULL byte, r2 will always be +; overwritten so we can just decide to start at r7, and overwrite it +; if needed. +; +; Now comes the tricky part. In order to obtain the first NULL byte, +; we need to understand the NULL byte detection operation. It is +; explained in depth in the link above but in short, it works by first +; setting the highest bit of each byte to 1, if the corresponding byte +; is either 0 or more than 0x80 Then, separately, it makes the highest +; bit of each byte 1, if the byte is less than 0x80. The last step is +; to AND these two values (this operation is simplified with the SUB, +; BIC and TST instructions). +; +; This means that the evaluated equation result value [5] has zeros +; for all non zero bytes, except for the NULL bytes. Therefore, we can +; simply find the first non zero bit (counting from bit 0) which will +; be inside the position of the first NULL byte. +; +; One thing to note, is that ffs oddly returns 31 if no bit is found, +; setting the zero flag. As r9 is never all 0s at this stage (would +; mean there is no NULL byte and we wouldnt be here) we dont need to +; worry about that. [6] +; +; We can then convert the bit position into the last byte position by +; looking into bits 3 to 5, and shifting 3 bits to the right. This can +; be combined into a single xbful operation. The bottom 000011 +; represent shift by 3 and the top 0111 represents the mask (3 to 5 +; shifted by 3 is 0 to 2). We dont need to worry about the case where +; ffs does not find a bit, because we know for sure there is at least +; one NULL byte, and therefore one of the highest bits is set to 1 [7] +; +; Finally, we can add the NULL byte position inside the loaded double +; word to r13 and subtract r0 from r13 to obtain the string size [8] +; +; +; Some operations are re-ordered such that register dependency is +; reduced, allowing the CPU to run more instructions in parallel [9] +; +; diff --git a/newlib/libc/machine/arc64/sys/asm.h b/newlib/libc/machine/arc64/sys/asm.h new file mode 100644 index 000000000..d1fbb75e8 --- /dev/null +++ b/newlib/libc/machine/arc64/sys/asm.h @@ -0,0 +1,70 @@ +/* + Copyright (c) 2024, Synopsys, Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1) Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2) Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3) Neither the name of the Synopsys, Inc., nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef _SYS_ASM_H +#define _SYS_ASM_H + +/* + * Macros to handle different pointer/register sizes for 32/64-bit code + */ +#if defined (__ARC64_ARCH32__) +# define ST64 std +# define LD64 ldd +# define MOVP mov +# define LSRP lsr +# define ADDP add +# define SUBP sub +# define REG_SZ 4 +# define REG_ST st +# define REG_LD ld +#elif defined (__ARC64_ARCH64__) +# define ST64 stl +# define LD64 ldl +# define MOVP movl +# define LSRP lsrl +# define ADDP addl +# define SUBP subl +# define REG_SZ 8 +# define REG_ST stl +# define REG_LD ldl +#else +# error Please use either 32-bit or 64-bit version of arc64 compiler +#endif + +# define NULL_32DT_1 0x01010101 +# define NULL_32DT_2 0x80808080 + +#define _ENTRY(name) .text ` .balign 4 ` .globl name ` name: +#define FUNC(name) .type name,@function +#define ENDFUNC0(name) .Lfe_##name: .size name,.Lfe_##name-name +#define ENDFUNC(name) ENDFUNC0 (name) +#define ENTRY(name) _ENTRY (name) ` FUNC (name) + +#endif /* _SYS_ASM_H */