Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * arch/xtensa/lib/strncpy_user.S |
| 3 | * |
| 4 | * This file is subject to the terms and conditions of the GNU General |
| 5 | * Public License. See the file "COPYING" in the main directory of |
| 6 | * this archive for more details. |
| 7 | * |
| 8 | * Returns: -EFAULT if exception before terminator, N if the entire |
| 9 | * buffer filled, else strlen. |
| 10 | * |
| 11 | * Copyright (C) 2002 Tensilica Inc. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/errno.h> |
| 15 | #include <linux/linkage.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 16 | #include <asm/asmmacro.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 17 | #include <asm/core.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 18 | |
| 19 | /* |
| 20 | * char *__strncpy_user(char *dst, const char *src, size_t len) |
| 21 | */ |
| 22 | |
| 23 | #ifdef __XTENSA_EB__ |
| 24 | # define MASK0 0xff000000 |
| 25 | # define MASK1 0x00ff0000 |
| 26 | # define MASK2 0x0000ff00 |
| 27 | # define MASK3 0x000000ff |
| 28 | #else |
| 29 | # define MASK0 0x000000ff |
| 30 | # define MASK1 0x0000ff00 |
| 31 | # define MASK2 0x00ff0000 |
| 32 | # define MASK3 0xff000000 |
| 33 | #endif |
| 34 | |
| 35 | # Register use |
| 36 | # a0/ return address |
| 37 | # a1/ stack pointer |
| 38 | # a2/ return value |
| 39 | # a3/ src |
| 40 | # a4/ len |
| 41 | # a5/ mask0 |
| 42 | # a6/ mask1 |
| 43 | # a7/ mask2 |
| 44 | # a8/ mask3 |
| 45 | # a9/ tmp |
| 46 | # a10/ tmp |
| 47 | # a11/ dst |
| 48 | # a12/ tmp |
| 49 | |
| 50 | .text |
| 51 | ENTRY(__strncpy_user) |
| 52 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 53 | abi_entry_default |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 54 | # a2/ dst, a3/ src, a4/ len |
| 55 | mov a11, a2 # leave dst in return value register |
| 56 | beqz a4, .Lret # if len is zero |
| 57 | movi a5, MASK0 # mask for byte 0 |
| 58 | movi a6, MASK1 # mask for byte 1 |
| 59 | movi a7, MASK2 # mask for byte 2 |
| 60 | movi a8, MASK3 # mask for byte 3 |
| 61 | bbsi.l a3, 0, .Lsrc1mod2 # if only 8-bit aligned |
| 62 | bbsi.l a3, 1, .Lsrc2mod4 # if only 16-bit aligned |
| 63 | .Lsrcaligned: # return here when src is word-aligned |
| 64 | srli a12, a4, 2 # number of loop iterations with 4B per loop |
| 65 | movi a9, 3 |
| 66 | bnone a11, a9, .Laligned |
| 67 | j .Ldstunaligned |
| 68 | |
| 69 | .Lsrc1mod2: # src address is odd |
| 70 | EX(11f) l8ui a9, a3, 0 # get byte 0 |
| 71 | addi a3, a3, 1 # advance src pointer |
| 72 | EX(10f) s8i a9, a11, 0 # store byte 0 |
| 73 | beqz a9, .Lret # if byte 0 is zero |
| 74 | addi a11, a11, 1 # advance dst pointer |
| 75 | addi a4, a4, -1 # decrement len |
| 76 | beqz a4, .Lret # if len is zero |
| 77 | bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned |
| 78 | |
| 79 | .Lsrc2mod4: # src address is 2 mod 4 |
| 80 | EX(11f) l8ui a9, a3, 0 # get byte 0 |
| 81 | /* 1-cycle interlock */ |
| 82 | EX(10f) s8i a9, a11, 0 # store byte 0 |
| 83 | beqz a9, .Lret # if byte 0 is zero |
| 84 | addi a11, a11, 1 # advance dst pointer |
| 85 | addi a4, a4, -1 # decrement len |
| 86 | beqz a4, .Lret # if len is zero |
| 87 | EX(11f) l8ui a9, a3, 1 # get byte 0 |
| 88 | addi a3, a3, 2 # advance src pointer |
| 89 | EX(10f) s8i a9, a11, 0 # store byte 0 |
| 90 | beqz a9, .Lret # if byte 0 is zero |
| 91 | addi a11, a11, 1 # advance dst pointer |
| 92 | addi a4, a4, -1 # decrement len |
| 93 | bnez a4, .Lsrcaligned # if len is nonzero |
| 94 | .Lret: |
| 95 | sub a2, a11, a2 # compute strlen |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 96 | abi_ret_default |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 97 | |
| 98 | /* |
| 99 | * dst is word-aligned, src is word-aligned |
| 100 | */ |
| 101 | .align 4 # 1 mod 4 alignment for LOOPNEZ |
| 102 | .byte 0 # (0 mod 4 alignment for LBEG) |
| 103 | .Laligned: |
| 104 | #if XCHAL_HAVE_LOOPS |
| 105 | loopnez a12, .Loop1done |
| 106 | #else |
| 107 | beqz a12, .Loop1done |
| 108 | slli a12, a12, 2 |
| 109 | add a12, a12, a11 # a12 = end of last 4B chunck |
| 110 | #endif |
| 111 | .Loop1: |
| 112 | EX(11f) l32i a9, a3, 0 # get word from src |
| 113 | addi a3, a3, 4 # advance src pointer |
| 114 | bnone a9, a5, .Lz0 # if byte 0 is zero |
| 115 | bnone a9, a6, .Lz1 # if byte 1 is zero |
| 116 | bnone a9, a7, .Lz2 # if byte 2 is zero |
| 117 | EX(10f) s32i a9, a11, 0 # store word to dst |
| 118 | bnone a9, a8, .Lz3 # if byte 3 is zero |
| 119 | addi a11, a11, 4 # advance dst pointer |
| 120 | #if !XCHAL_HAVE_LOOPS |
| 121 | blt a11, a12, .Loop1 |
| 122 | #endif |
| 123 | |
| 124 | .Loop1done: |
| 125 | bbci.l a4, 1, .L100 |
| 126 | # copy 2 bytes |
| 127 | EX(11f) l16ui a9, a3, 0 |
| 128 | addi a3, a3, 2 # advance src pointer |
| 129 | #ifdef __XTENSA_EB__ |
| 130 | bnone a9, a7, .Lz0 # if byte 2 is zero |
| 131 | bnone a9, a8, .Lz1 # if byte 3 is zero |
| 132 | #else |
| 133 | bnone a9, a5, .Lz0 # if byte 0 is zero |
| 134 | bnone a9, a6, .Lz1 # if byte 1 is zero |
| 135 | #endif |
| 136 | EX(10f) s16i a9, a11, 0 |
| 137 | addi a11, a11, 2 # advance dst pointer |
| 138 | .L100: |
| 139 | bbci.l a4, 0, .Lret |
| 140 | EX(11f) l8ui a9, a3, 0 |
| 141 | /* slot */ |
| 142 | EX(10f) s8i a9, a11, 0 |
| 143 | beqz a9, .Lret # if byte is zero |
| 144 | addi a11, a11, 1-3 # advance dst ptr 1, but also cancel |
| 145 | # the effect of adding 3 in .Lz3 code |
| 146 | /* fall thru to .Lz3 and "retw" */ |
| 147 | |
| 148 | .Lz3: # byte 3 is zero |
| 149 | addi a11, a11, 3 # advance dst pointer |
| 150 | sub a2, a11, a2 # compute strlen |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 151 | abi_ret_default |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 152 | .Lz0: # byte 0 is zero |
| 153 | #ifdef __XTENSA_EB__ |
| 154 | movi a9, 0 |
| 155 | #endif /* __XTENSA_EB__ */ |
| 156 | EX(10f) s8i a9, a11, 0 |
| 157 | sub a2, a11, a2 # compute strlen |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 158 | abi_ret_default |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 159 | .Lz1: # byte 1 is zero |
| 160 | #ifdef __XTENSA_EB__ |
| 161 | extui a9, a9, 16, 16 |
| 162 | #endif /* __XTENSA_EB__ */ |
| 163 | EX(10f) s16i a9, a11, 0 |
| 164 | addi a11, a11, 1 # advance dst pointer |
| 165 | sub a2, a11, a2 # compute strlen |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 166 | abi_ret_default |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 167 | .Lz2: # byte 2 is zero |
| 168 | #ifdef __XTENSA_EB__ |
| 169 | extui a9, a9, 16, 16 |
| 170 | #endif /* __XTENSA_EB__ */ |
| 171 | EX(10f) s16i a9, a11, 0 |
| 172 | movi a9, 0 |
| 173 | EX(10f) s8i a9, a11, 2 |
| 174 | addi a11, a11, 2 # advance dst pointer |
| 175 | sub a2, a11, a2 # compute strlen |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 176 | abi_ret_default |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 177 | |
| 178 | .align 4 # 1 mod 4 alignment for LOOPNEZ |
| 179 | .byte 0 # (0 mod 4 alignment for LBEG) |
| 180 | .Ldstunaligned: |
| 181 | /* |
| 182 | * for now just use byte copy loop |
| 183 | */ |
| 184 | #if XCHAL_HAVE_LOOPS |
| 185 | loopnez a4, .Lunalignedend |
| 186 | #else |
| 187 | beqz a4, .Lunalignedend |
| 188 | add a12, a11, a4 # a12 = ending address |
| 189 | #endif /* XCHAL_HAVE_LOOPS */ |
| 190 | .Lnextbyte: |
| 191 | EX(11f) l8ui a9, a3, 0 |
| 192 | addi a3, a3, 1 |
| 193 | EX(10f) s8i a9, a11, 0 |
| 194 | beqz a9, .Lunalignedend |
| 195 | addi a11, a11, 1 |
| 196 | #if !XCHAL_HAVE_LOOPS |
| 197 | blt a11, a12, .Lnextbyte |
| 198 | #endif |
| 199 | |
| 200 | .Lunalignedend: |
| 201 | sub a2, a11, a2 # compute strlen |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 202 | abi_ret_default |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 203 | |
| 204 | ENDPROC(__strncpy_user) |
| 205 | |
| 206 | .section .fixup, "ax" |
| 207 | .align 4 |
| 208 | |
| 209 | /* For now, just return -EFAULT. Future implementations might |
| 210 | * like to clear remaining kernel space, like the fixup |
| 211 | * implementation in memset(). Thus, we differentiate between |
| 212 | * load/store fixups. */ |
| 213 | |
| 214 | 10: |
| 215 | 11: |
| 216 | movi a2, -EFAULT |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 217 | abi_ret_default |