David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | ; SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | ; |
| 3 | ; Port on Texas Instruments TMS320C6x architecture |
| 4 | ; |
| 5 | ; Copyright (C) 2004-2011 Texas Instruments Incorporated |
| 6 | ; Author: Aurelien Jacquiot (aurelien.jacquiot@virtuallogix.com) |
| 7 | ; Updated for 2.6.34: Mark Salter <msalter@redhat.com> |
| 8 | ; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9 | |
| 10 | #include <linux/sys.h> |
| 11 | #include <linux/linkage.h> |
| 12 | #include <asm/thread_info.h> |
| 13 | #include <asm/asm-offsets.h> |
| 14 | #include <asm/unistd.h> |
| 15 | #include <asm/errno.h> |
| 16 | |
| 17 | ; Registers naming |
| 18 | #define DP B14 |
| 19 | #define SP B15 |
| 20 | |
| 21 | #ifndef CONFIG_PREEMPT |
| 22 | #define resume_kernel restore_all |
| 23 | #endif |
| 24 | |
| 25 | .altmacro |
| 26 | |
| 27 | .macro MASK_INT reg |
| 28 | MVC .S2 CSR,reg |
| 29 | CLR .S2 reg,0,0,reg |
| 30 | MVC .S2 reg,CSR |
| 31 | .endm |
| 32 | |
| 33 | .macro UNMASK_INT reg |
| 34 | MVC .S2 CSR,reg |
| 35 | SET .S2 reg,0,0,reg |
| 36 | MVC .S2 reg,CSR |
| 37 | .endm |
| 38 | |
| 39 | .macro GET_THREAD_INFO reg |
| 40 | SHR .S1X SP,THREAD_SHIFT,reg |
| 41 | SHL .S1 reg,THREAD_SHIFT,reg |
| 42 | .endm |
| 43 | |
| 44 | ;; |
| 45 | ;; This defines the normal kernel pt_regs layout. |
| 46 | ;; |
| 47 | .macro SAVE_ALL __rp __tsr |
| 48 | STW .D2T2 B0,*SP--[2] ; save original B0 |
| 49 | MVKL .S2 current_ksp,B0 |
| 50 | MVKH .S2 current_ksp,B0 |
| 51 | LDW .D2T2 *B0,B1 ; KSP |
| 52 | |
| 53 | NOP 3 |
| 54 | STW .D2T2 B1,*+SP[1] ; save original B1 |
| 55 | XOR .D2 SP,B1,B0 ; (SP ^ KSP) |
| 56 | LDW .D2T2 *+SP[1],B1 ; restore B0/B1 |
| 57 | LDW .D2T2 *++SP[2],B0 |
| 58 | SHR .S2 B0,THREAD_SHIFT,B0 ; 0 if already using kstack |
| 59 | [B0] STDW .D2T2 SP:DP,*--B1[1] ; user: save user sp/dp kstack |
| 60 | [B0] MV .S2 B1,SP ; and switch to kstack |
| 61 | ||[!B0] STDW .D2T2 SP:DP,*--SP[1] ; kernel: save on current stack |
| 62 | |
| 63 | SUBAW .D2 SP,2,SP |
| 64 | |
| 65 | ADD .D1X SP,-8,A15 |
| 66 | || STDW .D2T1 A15:A14,*SP--[16] ; save A15:A14 |
| 67 | |
| 68 | STDW .D2T2 B13:B12,*SP--[1] |
| 69 | || STDW .D1T1 A13:A12,*A15--[1] |
| 70 | || MVC .S2 __rp,B13 |
| 71 | |
| 72 | STDW .D2T2 B11:B10,*SP--[1] |
| 73 | || STDW .D1T1 A11:A10,*A15--[1] |
| 74 | || MVC .S2 CSR,B12 |
| 75 | |
| 76 | STDW .D2T2 B9:B8,*SP--[1] |
| 77 | || STDW .D1T1 A9:A8,*A15--[1] |
| 78 | || MVC .S2 RILC,B11 |
| 79 | STDW .D2T2 B7:B6,*SP--[1] |
| 80 | || STDW .D1T1 A7:A6,*A15--[1] |
| 81 | || MVC .S2 ILC,B10 |
| 82 | |
| 83 | STDW .D2T2 B5:B4,*SP--[1] |
| 84 | || STDW .D1T1 A5:A4,*A15--[1] |
| 85 | |
| 86 | STDW .D2T2 B3:B2,*SP--[1] |
| 87 | || STDW .D1T1 A3:A2,*A15--[1] |
| 88 | || MVC .S2 __tsr,B5 |
| 89 | |
| 90 | STDW .D2T2 B1:B0,*SP--[1] |
| 91 | || STDW .D1T1 A1:A0,*A15--[1] |
| 92 | || MV .S1X B5,A5 |
| 93 | |
| 94 | STDW .D2T2 B31:B30,*SP--[1] |
| 95 | || STDW .D1T1 A31:A30,*A15--[1] |
| 96 | STDW .D2T2 B29:B28,*SP--[1] |
| 97 | || STDW .D1T1 A29:A28,*A15--[1] |
| 98 | STDW .D2T2 B27:B26,*SP--[1] |
| 99 | || STDW .D1T1 A27:A26,*A15--[1] |
| 100 | STDW .D2T2 B25:B24,*SP--[1] |
| 101 | || STDW .D1T1 A25:A24,*A15--[1] |
| 102 | STDW .D2T2 B23:B22,*SP--[1] |
| 103 | || STDW .D1T1 A23:A22,*A15--[1] |
| 104 | STDW .D2T2 B21:B20,*SP--[1] |
| 105 | || STDW .D1T1 A21:A20,*A15--[1] |
| 106 | STDW .D2T2 B19:B18,*SP--[1] |
| 107 | || STDW .D1T1 A19:A18,*A15--[1] |
| 108 | STDW .D2T2 B17:B16,*SP--[1] |
| 109 | || STDW .D1T1 A17:A16,*A15--[1] |
| 110 | |
| 111 | STDW .D2T2 B13:B12,*SP--[1] ; save PC and CSR |
| 112 | |
| 113 | STDW .D2T2 B11:B10,*SP--[1] ; save RILC and ILC |
| 114 | STDW .D2T1 A5:A4,*SP--[1] ; save TSR and orig A4 |
| 115 | |
| 116 | ;; We left an unused word on the stack just above pt_regs. |
| 117 | ;; It is used to save whether or not this frame is due to |
| 118 | ;; a syscall. It is cleared here, but the syscall handler |
| 119 | ;; sets it to a non-zero value. |
| 120 | MVK .L2 0,B1 |
| 121 | STW .D2T2 B1,*+SP(REGS__END+8) ; clear syscall flag |
| 122 | .endm |
| 123 | |
| 124 | .macro RESTORE_ALL __rp __tsr |
| 125 | LDDW .D2T2 *++SP[1],B9:B8 ; get TSR (B9) |
| 126 | LDDW .D2T2 *++SP[1],B11:B10 ; get RILC (B11) and ILC (B10) |
| 127 | LDDW .D2T2 *++SP[1],B13:B12 ; get PC (B13) and CSR (B12) |
| 128 | |
| 129 | ADDAW .D1X SP,30,A15 |
| 130 | |
| 131 | LDDW .D1T1 *++A15[1],A17:A16 |
| 132 | || LDDW .D2T2 *++SP[1],B17:B16 |
| 133 | LDDW .D1T1 *++A15[1],A19:A18 |
| 134 | || LDDW .D2T2 *++SP[1],B19:B18 |
| 135 | LDDW .D1T1 *++A15[1],A21:A20 |
| 136 | || LDDW .D2T2 *++SP[1],B21:B20 |
| 137 | LDDW .D1T1 *++A15[1],A23:A22 |
| 138 | || LDDW .D2T2 *++SP[1],B23:B22 |
| 139 | LDDW .D1T1 *++A15[1],A25:A24 |
| 140 | || LDDW .D2T2 *++SP[1],B25:B24 |
| 141 | LDDW .D1T1 *++A15[1],A27:A26 |
| 142 | || LDDW .D2T2 *++SP[1],B27:B26 |
| 143 | LDDW .D1T1 *++A15[1],A29:A28 |
| 144 | || LDDW .D2T2 *++SP[1],B29:B28 |
| 145 | LDDW .D1T1 *++A15[1],A31:A30 |
| 146 | || LDDW .D2T2 *++SP[1],B31:B30 |
| 147 | |
| 148 | LDDW .D1T1 *++A15[1],A1:A0 |
| 149 | || LDDW .D2T2 *++SP[1],B1:B0 |
| 150 | |
| 151 | LDDW .D1T1 *++A15[1],A3:A2 |
| 152 | || LDDW .D2T2 *++SP[1],B3:B2 |
| 153 | || MVC .S2 B9,__tsr |
| 154 | LDDW .D1T1 *++A15[1],A5:A4 |
| 155 | || LDDW .D2T2 *++SP[1],B5:B4 |
| 156 | || MVC .S2 B11,RILC |
| 157 | LDDW .D1T1 *++A15[1],A7:A6 |
| 158 | || LDDW .D2T2 *++SP[1],B7:B6 |
| 159 | || MVC .S2 B10,ILC |
| 160 | |
| 161 | LDDW .D1T1 *++A15[1],A9:A8 |
| 162 | || LDDW .D2T2 *++SP[1],B9:B8 |
| 163 | || MVC .S2 B13,__rp |
| 164 | |
| 165 | LDDW .D1T1 *++A15[1],A11:A10 |
| 166 | || LDDW .D2T2 *++SP[1],B11:B10 |
| 167 | || MVC .S2 B12,CSR |
| 168 | |
| 169 | LDDW .D1T1 *++A15[1],A13:A12 |
| 170 | || LDDW .D2T2 *++SP[1],B13:B12 |
| 171 | |
| 172 | MV .D2X A15,SP |
| 173 | || MVKL .S1 current_ksp,A15 |
| 174 | MVKH .S1 current_ksp,A15 |
| 175 | || ADDAW .D1X SP,6,A14 |
| 176 | STW .D1T1 A14,*A15 ; save kernel stack pointer |
| 177 | |
| 178 | LDDW .D2T1 *++SP[1],A15:A14 |
| 179 | |
| 180 | B .S2 __rp ; return from interruption |
| 181 | LDDW .D2T2 *+SP[1],SP:DP |
| 182 | NOP 4 |
| 183 | .endm |
| 184 | |
| 185 | .section .text |
| 186 | |
| 187 | ;; |
| 188 | ;; Jump to schedule() then return to ret_from_exception |
| 189 | ;; |
| 190 | _reschedule: |
| 191 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 192 | MVKL .S1 schedule,A0 |
| 193 | MVKH .S1 schedule,A0 |
| 194 | B .S2X A0 |
| 195 | #else |
| 196 | B .S1 schedule |
| 197 | #endif |
| 198 | ADDKPC .S2 ret_from_exception,B3,4 |
| 199 | |
| 200 | ;; |
| 201 | ;; Called before syscall handler when process is being debugged |
| 202 | ;; |
| 203 | tracesys_on: |
| 204 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 205 | MVKL .S1 syscall_trace_entry,A0 |
| 206 | MVKH .S1 syscall_trace_entry,A0 |
| 207 | B .S2X A0 |
| 208 | #else |
| 209 | B .S1 syscall_trace_entry |
| 210 | #endif |
| 211 | ADDKPC .S2 ret_from_syscall_trace,B3,3 |
| 212 | ADD .S1X 8,SP,A4 |
| 213 | |
| 214 | ret_from_syscall_trace: |
| 215 | ;; tracing returns (possibly new) syscall number |
| 216 | MV .D2X A4,B0 |
| 217 | || MVK .S2 __NR_syscalls,B1 |
| 218 | CMPLTU .L2 B0,B1,B1 |
| 219 | |
| 220 | [!B1] BNOP .S2 ret_from_syscall_function,5 |
| 221 | || MVK .S1 -ENOSYS,A4 |
| 222 | |
| 223 | ;; reload syscall args from (possibly modified) stack frame |
| 224 | ;; and get syscall handler addr from sys_call_table: |
| 225 | LDW .D2T2 *+SP(REGS_B4+8),B4 |
| 226 | || MVKL .S2 sys_call_table,B1 |
| 227 | LDW .D2T1 *+SP(REGS_A6+8),A6 |
| 228 | || MVKH .S2 sys_call_table,B1 |
| 229 | LDW .D2T2 *+B1[B0],B0 |
| 230 | || MVKL .S2 ret_from_syscall_function,B3 |
| 231 | LDW .D2T2 *+SP(REGS_B6+8),B6 |
| 232 | || MVKH .S2 ret_from_syscall_function,B3 |
| 233 | LDW .D2T1 *+SP(REGS_A8+8),A8 |
| 234 | LDW .D2T2 *+SP(REGS_B8+8),B8 |
| 235 | NOP |
| 236 | ; B0 = sys_call_table[__NR_*] |
| 237 | BNOP .S2 B0,5 ; branch to syscall handler |
| 238 | || LDW .D2T1 *+SP(REGS_ORIG_A4+8),A4 |
| 239 | |
| 240 | syscall_exit_work: |
| 241 | AND .D1 _TIF_SYSCALL_TRACE,A2,A0 |
| 242 | [!A0] BNOP .S1 work_pending,5 |
| 243 | [A0] B .S2 syscall_trace_exit |
| 244 | ADDKPC .S2 resume_userspace,B3,1 |
| 245 | MVC .S2 CSR,B1 |
| 246 | SET .S2 B1,0,0,B1 |
| 247 | MVC .S2 B1,CSR ; enable ints |
| 248 | |
| 249 | work_pending: |
| 250 | AND .D1 _TIF_NEED_RESCHED,A2,A0 |
| 251 | [!A0] BNOP .S1 work_notifysig,5 |
| 252 | |
| 253 | work_resched: |
| 254 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 255 | MVKL .S1 schedule,A1 |
| 256 | MVKH .S1 schedule,A1 |
| 257 | B .S2X A1 |
| 258 | #else |
| 259 | B .S2 schedule |
| 260 | #endif |
| 261 | ADDKPC .S2 work_rescheduled,B3,4 |
| 262 | work_rescheduled: |
| 263 | ;; make sure we don't miss an interrupt setting need_resched or |
| 264 | ;; sigpending between sampling and the rti |
| 265 | MASK_INT B2 |
| 266 | GET_THREAD_INFO A12 |
| 267 | LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2 |
| 268 | MVK .S1 _TIF_WORK_MASK,A1 |
| 269 | MVK .S1 _TIF_NEED_RESCHED,A3 |
| 270 | NOP 2 |
| 271 | AND .D1 A1,A2,A0 |
| 272 | || AND .S1 A3,A2,A1 |
| 273 | [!A0] BNOP .S1 restore_all,5 |
| 274 | [A1] BNOP .S1 work_resched,5 |
| 275 | |
| 276 | work_notifysig: |
| 277 | ;; enable interrupts for do_notify_resume() |
| 278 | UNMASK_INT B2 |
| 279 | B .S2 do_notify_resume |
| 280 | LDW .D2T1 *+SP(REGS__END+8),A6 ; syscall flag |
| 281 | ADDKPC .S2 resume_userspace,B3,1 |
| 282 | ADD .S1X 8,SP,A4 ; pt_regs pointer is first arg |
| 283 | MV .D2X A2,B4 ; thread_info flags is second arg |
| 284 | |
| 285 | ;; |
| 286 | ;; On C64x+, the return way from exception and interrupt |
| 287 | ;; is a little bit different |
| 288 | ;; |
| 289 | ENTRY(ret_from_exception) |
| 290 | #ifdef CONFIG_PREEMPT |
| 291 | MASK_INT B2 |
| 292 | #endif |
| 293 | |
| 294 | ENTRY(ret_from_interrupt) |
| 295 | ;; |
| 296 | ;; Check if we are comming from user mode. |
| 297 | ;; |
| 298 | LDW .D2T2 *+SP(REGS_TSR+8),B0 |
| 299 | MVK .S2 0x40,B1 |
| 300 | NOP 3 |
| 301 | AND .D2 B0,B1,B0 |
| 302 | [!B0] BNOP .S2 resume_kernel,5 |
| 303 | |
| 304 | resume_userspace: |
| 305 | ;; make sure we don't miss an interrupt setting need_resched or |
| 306 | ;; sigpending between sampling and the rti |
| 307 | MASK_INT B2 |
| 308 | GET_THREAD_INFO A12 |
| 309 | LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2 |
| 310 | MVK .S1 _TIF_WORK_MASK,A1 |
| 311 | MVK .S1 _TIF_NEED_RESCHED,A3 |
| 312 | NOP 2 |
| 313 | AND .D1 A1,A2,A0 |
| 314 | [A0] BNOP .S1 work_pending,5 |
| 315 | BNOP .S1 restore_all,5 |
| 316 | |
| 317 | ;; |
| 318 | ;; System call handling |
| 319 | ;; B0 = syscall number (in sys_call_table) |
| 320 | ;; A4,B4,A6,B6,A8,B8 = arguments of the syscall function |
| 321 | ;; A4 is the return value register |
| 322 | ;; |
| 323 | system_call_saved: |
| 324 | MVK .L2 1,B2 |
| 325 | STW .D2T2 B2,*+SP(REGS__END+8) ; set syscall flag |
| 326 | MVC .S2 B2,ECR ; ack the software exception |
| 327 | |
| 328 | UNMASK_INT B2 ; re-enable global IT |
| 329 | |
| 330 | system_call_saved_noack: |
| 331 | ;; Check system call number |
| 332 | MVK .S2 __NR_syscalls,B1 |
| 333 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 334 | || MVKL .S1 sys_ni_syscall,A0 |
| 335 | #endif |
| 336 | CMPLTU .L2 B0,B1,B1 |
| 337 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 338 | || MVKH .S1 sys_ni_syscall,A0 |
| 339 | #endif |
| 340 | |
| 341 | ;; Check for ptrace |
| 342 | GET_THREAD_INFO A12 |
| 343 | |
| 344 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 345 | [!B1] B .S2X A0 |
| 346 | #else |
| 347 | [!B1] B .S2 sys_ni_syscall |
| 348 | #endif |
| 349 | [!B1] ADDKPC .S2 ret_from_syscall_function,B3,4 |
| 350 | |
| 351 | ;; Get syscall handler addr from sys_call_table |
| 352 | ;; call tracesys_on or call syscall handler |
| 353 | LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2 |
| 354 | || MVKL .S2 sys_call_table,B1 |
| 355 | MVKH .S2 sys_call_table,B1 |
| 356 | LDW .D2T2 *+B1[B0],B0 |
| 357 | NOP 2 |
| 358 | ; A2 = thread_info flags |
| 359 | AND .D1 _TIF_SYSCALL_TRACE,A2,A2 |
| 360 | [A2] BNOP .S1 tracesys_on,5 |
| 361 | ;; B0 = _sys_call_table[__NR_*] |
| 362 | B .S2 B0 |
| 363 | ADDKPC .S2 ret_from_syscall_function,B3,4 |
| 364 | |
| 365 | ret_from_syscall_function: |
| 366 | STW .D2T1 A4,*+SP(REGS_A4+8) ; save return value in A4 |
| 367 | ; original A4 is in orig_A4 |
| 368 | syscall_exit: |
| 369 | ;; make sure we don't miss an interrupt setting need_resched or |
| 370 | ;; sigpending between sampling and the rti |
| 371 | MASK_INT B2 |
| 372 | LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2 |
| 373 | MVK .S1 _TIF_ALLWORK_MASK,A1 |
| 374 | NOP 3 |
| 375 | AND .D1 A1,A2,A2 ; check for work to do |
| 376 | [A2] BNOP .S1 syscall_exit_work,5 |
| 377 | |
| 378 | restore_all: |
| 379 | RESTORE_ALL NRP,NTSR |
| 380 | |
| 381 | ;; |
| 382 | ;; After a fork we jump here directly from resume, |
| 383 | ;; so that A4 contains the previous task structure. |
| 384 | ;; |
| 385 | ENTRY(ret_from_fork) |
| 386 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 387 | MVKL .S1 schedule_tail,A0 |
| 388 | MVKH .S1 schedule_tail,A0 |
| 389 | B .S2X A0 |
| 390 | #else |
| 391 | B .S2 schedule_tail |
| 392 | #endif |
| 393 | ADDKPC .S2 ret_from_fork_2,B3,4 |
| 394 | ret_from_fork_2: |
| 395 | ;; return 0 in A4 for child process |
| 396 | GET_THREAD_INFO A12 |
| 397 | BNOP .S2 syscall_exit,3 |
| 398 | MVK .L2 0,B0 |
| 399 | STW .D2T2 B0,*+SP(REGS_A4+8) |
| 400 | ENDPROC(ret_from_fork) |
| 401 | |
| 402 | ENTRY(ret_from_kernel_thread) |
| 403 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 404 | MVKL .S1 schedule_tail,A0 |
| 405 | MVKH .S1 schedule_tail,A0 |
| 406 | B .S2X A0 |
| 407 | #else |
| 408 | B .S2 schedule_tail |
| 409 | #endif |
| 410 | LDW .D2T2 *+SP(REGS_A0+8),B10 /* get fn */ |
| 411 | ADDKPC .S2 0f,B3,3 |
| 412 | 0: |
| 413 | B .S2 B10 /* call fn */ |
| 414 | LDW .D2T1 *+SP(REGS_A1+8),A4 /* get arg */ |
| 415 | ADDKPC .S2 ret_from_fork_2,B3,3 |
| 416 | ENDPROC(ret_from_kernel_thread) |
| 417 | |
| 418 | ;; |
| 419 | ;; These are the interrupt handlers, responsible for calling c6x_do_IRQ() |
| 420 | ;; |
| 421 | .macro SAVE_ALL_INT |
| 422 | SAVE_ALL IRP,ITSR |
| 423 | .endm |
| 424 | |
| 425 | .macro CALL_INT int |
| 426 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 427 | MVKL .S1 c6x_do_IRQ,A0 |
| 428 | MVKH .S1 c6x_do_IRQ,A0 |
| 429 | BNOP .S2X A0,1 |
| 430 | MVK .S1 int,A4 |
| 431 | ADDAW .D2 SP,2,B4 |
| 432 | MVKL .S2 ret_from_interrupt,B3 |
| 433 | MVKH .S2 ret_from_interrupt,B3 |
| 434 | #else |
| 435 | CALLP .S2 c6x_do_IRQ,B3 |
| 436 | || MVK .S1 int,A4 |
| 437 | || ADDAW .D2 SP,2,B4 |
| 438 | B .S1 ret_from_interrupt |
| 439 | NOP 5 |
| 440 | #endif |
| 441 | .endm |
| 442 | |
| 443 | ENTRY(_int4_handler) |
| 444 | SAVE_ALL_INT |
| 445 | CALL_INT 4 |
| 446 | ENDPROC(_int4_handler) |
| 447 | |
| 448 | ENTRY(_int5_handler) |
| 449 | SAVE_ALL_INT |
| 450 | CALL_INT 5 |
| 451 | ENDPROC(_int5_handler) |
| 452 | |
| 453 | ENTRY(_int6_handler) |
| 454 | SAVE_ALL_INT |
| 455 | CALL_INT 6 |
| 456 | ENDPROC(_int6_handler) |
| 457 | |
| 458 | ENTRY(_int7_handler) |
| 459 | SAVE_ALL_INT |
| 460 | CALL_INT 7 |
| 461 | ENDPROC(_int7_handler) |
| 462 | |
| 463 | ENTRY(_int8_handler) |
| 464 | SAVE_ALL_INT |
| 465 | CALL_INT 8 |
| 466 | ENDPROC(_int8_handler) |
| 467 | |
| 468 | ENTRY(_int9_handler) |
| 469 | SAVE_ALL_INT |
| 470 | CALL_INT 9 |
| 471 | ENDPROC(_int9_handler) |
| 472 | |
| 473 | ENTRY(_int10_handler) |
| 474 | SAVE_ALL_INT |
| 475 | CALL_INT 10 |
| 476 | ENDPROC(_int10_handler) |
| 477 | |
| 478 | ENTRY(_int11_handler) |
| 479 | SAVE_ALL_INT |
| 480 | CALL_INT 11 |
| 481 | ENDPROC(_int11_handler) |
| 482 | |
| 483 | ENTRY(_int12_handler) |
| 484 | SAVE_ALL_INT |
| 485 | CALL_INT 12 |
| 486 | ENDPROC(_int12_handler) |
| 487 | |
| 488 | ENTRY(_int13_handler) |
| 489 | SAVE_ALL_INT |
| 490 | CALL_INT 13 |
| 491 | ENDPROC(_int13_handler) |
| 492 | |
| 493 | ENTRY(_int14_handler) |
| 494 | SAVE_ALL_INT |
| 495 | CALL_INT 14 |
| 496 | ENDPROC(_int14_handler) |
| 497 | |
| 498 | ENTRY(_int15_handler) |
| 499 | SAVE_ALL_INT |
| 500 | CALL_INT 15 |
| 501 | ENDPROC(_int15_handler) |
| 502 | |
| 503 | ;; |
| 504 | ;; Handler for uninitialized and spurious interrupts |
| 505 | ;; |
| 506 | ENTRY(_bad_interrupt) |
| 507 | B .S2 IRP |
| 508 | NOP 5 |
| 509 | ENDPROC(_bad_interrupt) |
| 510 | |
| 511 | ;; |
| 512 | ;; Entry for NMI/exceptions/syscall |
| 513 | ;; |
| 514 | ENTRY(_nmi_handler) |
| 515 | SAVE_ALL NRP,NTSR |
| 516 | |
| 517 | MVC .S2 EFR,B2 |
| 518 | CMPEQ .L2 1,B2,B2 |
| 519 | || MVC .S2 TSR,B1 |
| 520 | CLR .S2 B1,10,10,B1 |
| 521 | MVC .S2 B1,TSR |
| 522 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 523 | [!B2] MVKL .S1 process_exception,A0 |
| 524 | [!B2] MVKH .S1 process_exception,A0 |
| 525 | [!B2] B .S2X A0 |
| 526 | #else |
| 527 | [!B2] B .S2 process_exception |
| 528 | #endif |
| 529 | [B2] B .S2 system_call_saved |
| 530 | [!B2] ADDAW .D2 SP,2,B1 |
| 531 | [!B2] MV .D1X B1,A4 |
| 532 | ADDKPC .S2 ret_from_trap,B3,2 |
| 533 | |
| 534 | ret_from_trap: |
| 535 | MV .D2X A4,B0 |
| 536 | [!B0] BNOP .S2 ret_from_exception,5 |
| 537 | |
| 538 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 539 | MVKL .S2 system_call_saved_noack,B3 |
| 540 | MVKH .S2 system_call_saved_noack,B3 |
| 541 | #endif |
| 542 | LDW .D2T2 *+SP(REGS_B0+8),B0 |
| 543 | LDW .D2T1 *+SP(REGS_A4+8),A4 |
| 544 | LDW .D2T2 *+SP(REGS_B4+8),B4 |
| 545 | LDW .D2T1 *+SP(REGS_A6+8),A6 |
| 546 | LDW .D2T2 *+SP(REGS_B6+8),B6 |
| 547 | LDW .D2T1 *+SP(REGS_A8+8),A8 |
| 548 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 549 | || B .S2 B3 |
| 550 | #else |
| 551 | || B .S2 system_call_saved_noack |
| 552 | #endif |
| 553 | LDW .D2T2 *+SP(REGS_B8+8),B8 |
| 554 | NOP 4 |
| 555 | ENDPROC(_nmi_handler) |
| 556 | |
| 557 | ;; |
| 558 | ;; Jump to schedule() then return to ret_from_isr |
| 559 | ;; |
| 560 | #ifdef CONFIG_PREEMPT |
| 561 | resume_kernel: |
| 562 | GET_THREAD_INFO A12 |
| 563 | LDW .D1T1 *+A12(THREAD_INFO_PREEMPT_COUNT),A1 |
| 564 | NOP 4 |
| 565 | [A1] BNOP .S2 restore_all,5 |
| 566 | |
| 567 | preempt_schedule: |
| 568 | GET_THREAD_INFO A2 |
| 569 | LDW .D1T1 *+A2(THREAD_INFO_FLAGS),A1 |
| 570 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 571 | MVKL .S2 preempt_schedule_irq,B0 |
| 572 | MVKH .S2 preempt_schedule_irq,B0 |
| 573 | NOP 2 |
| 574 | #else |
| 575 | NOP 4 |
| 576 | #endif |
| 577 | AND .D1 _TIF_NEED_RESCHED,A1,A1 |
| 578 | [!A1] BNOP .S2 restore_all,5 |
| 579 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 580 | B .S2 B0 |
| 581 | #else |
| 582 | B .S2 preempt_schedule_irq |
| 583 | #endif |
| 584 | ADDKPC .S2 preempt_schedule,B3,4 |
| 585 | #endif /* CONFIG_PREEMPT */ |
| 586 | |
| 587 | ENTRY(enable_exception) |
| 588 | DINT |
| 589 | MVC .S2 TSR,B0 |
| 590 | MVC .S2 B3,NRP |
| 591 | MVK .L2 0xc,B1 |
| 592 | OR .D2 B0,B1,B0 |
| 593 | MVC .S2 B0,TSR ; Set GEE and XEN in TSR |
| 594 | B .S2 NRP |
| 595 | NOP 5 |
| 596 | ENDPROC(enable_exception) |
| 597 | |
| 598 | ;; |
| 599 | ;; Special system calls |
| 600 | ;; return address is in B3 |
| 601 | ;; |
| 602 | ENTRY(sys_rt_sigreturn) |
| 603 | ADD .D1X SP,8,A4 |
| 604 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 605 | || MVKL .S1 do_rt_sigreturn,A0 |
| 606 | MVKH .S1 do_rt_sigreturn,A0 |
| 607 | BNOP .S2X A0,5 |
| 608 | #else |
| 609 | || B .S2 do_rt_sigreturn |
| 610 | NOP 5 |
| 611 | #endif |
| 612 | ENDPROC(sys_rt_sigreturn) |
| 613 | |
| 614 | ENTRY(sys_pread_c6x) |
| 615 | MV .D2X A8,B7 |
| 616 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 617 | || MVKL .S1 sys_pread64,A0 |
| 618 | MVKH .S1 sys_pread64,A0 |
| 619 | BNOP .S2X A0,5 |
| 620 | #else |
| 621 | || B .S2 sys_pread64 |
| 622 | NOP 5 |
| 623 | #endif |
| 624 | ENDPROC(sys_pread_c6x) |
| 625 | |
| 626 | ENTRY(sys_pwrite_c6x) |
| 627 | MV .D2X A8,B7 |
| 628 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 629 | || MVKL .S1 sys_pwrite64,A0 |
| 630 | MVKH .S1 sys_pwrite64,A0 |
| 631 | BNOP .S2X A0,5 |
| 632 | #else |
| 633 | || B .S2 sys_pwrite64 |
| 634 | NOP 5 |
| 635 | #endif |
| 636 | ENDPROC(sys_pwrite_c6x) |
| 637 | |
| 638 | ;; On Entry |
| 639 | ;; A4 - path |
| 640 | ;; B4 - offset_lo (LE), offset_hi (BE) |
| 641 | ;; A6 - offset_lo (BE), offset_hi (LE) |
| 642 | ENTRY(sys_truncate64_c6x) |
| 643 | #ifdef CONFIG_CPU_BIG_ENDIAN |
| 644 | MV .S2 B4,B5 |
| 645 | MV .D2X A6,B4 |
| 646 | #else |
| 647 | MV .D2X A6,B5 |
| 648 | #endif |
| 649 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 650 | || MVKL .S1 sys_truncate64,A0 |
| 651 | MVKH .S1 sys_truncate64,A0 |
| 652 | BNOP .S2X A0,5 |
| 653 | #else |
| 654 | || B .S2 sys_truncate64 |
| 655 | NOP 5 |
| 656 | #endif |
| 657 | ENDPROC(sys_truncate64_c6x) |
| 658 | |
| 659 | ;; On Entry |
| 660 | ;; A4 - fd |
| 661 | ;; B4 - offset_lo (LE), offset_hi (BE) |
| 662 | ;; A6 - offset_lo (BE), offset_hi (LE) |
| 663 | ENTRY(sys_ftruncate64_c6x) |
| 664 | #ifdef CONFIG_CPU_BIG_ENDIAN |
| 665 | MV .S2 B4,B5 |
| 666 | MV .D2X A6,B4 |
| 667 | #else |
| 668 | MV .D2X A6,B5 |
| 669 | #endif |
| 670 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 671 | || MVKL .S1 sys_ftruncate64,A0 |
| 672 | MVKH .S1 sys_ftruncate64,A0 |
| 673 | BNOP .S2X A0,5 |
| 674 | #else |
| 675 | || B .S2 sys_ftruncate64 |
| 676 | NOP 5 |
| 677 | #endif |
| 678 | ENDPROC(sys_ftruncate64_c6x) |
| 679 | |
| 680 | ;; On Entry |
| 681 | ;; A4 - fd |
| 682 | ;; B4 - offset_lo (LE), offset_hi (BE) |
| 683 | ;; A6 - offset_lo (BE), offset_hi (LE) |
| 684 | ;; B6 - len_lo (LE), len_hi (BE) |
| 685 | ;; A8 - len_lo (BE), len_hi (LE) |
| 686 | ;; B8 - advice |
| 687 | ENTRY(sys_fadvise64_64_c6x) |
| 688 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 689 | MVKL .S1 sys_fadvise64_64,A0 |
| 690 | MVKH .S1 sys_fadvise64_64,A0 |
| 691 | BNOP .S2X A0,2 |
| 692 | #else |
| 693 | B .S2 sys_fadvise64_64 |
| 694 | NOP 2 |
| 695 | #endif |
| 696 | #ifdef CONFIG_CPU_BIG_ENDIAN |
| 697 | MV .L2 B4,B5 |
| 698 | || MV .D2X A6,B4 |
| 699 | MV .L1 A8,A6 |
| 700 | || MV .D1X B6,A7 |
| 701 | #else |
| 702 | MV .D2X A6,B5 |
| 703 | MV .L1 A8,A7 |
| 704 | || MV .D1X B6,A6 |
| 705 | #endif |
| 706 | MV .L2 B8,B6 |
| 707 | ENDPROC(sys_fadvise64_64_c6x) |
| 708 | |
| 709 | ;; On Entry |
| 710 | ;; A4 - fd |
| 711 | ;; B4 - mode |
| 712 | ;; A6 - offset_hi |
| 713 | ;; B6 - offset_lo |
| 714 | ;; A8 - len_hi |
| 715 | ;; B8 - len_lo |
| 716 | ENTRY(sys_fallocate_c6x) |
| 717 | #ifdef CONFIG_C6X_BIG_KERNEL |
| 718 | MVKL .S1 sys_fallocate,A0 |
| 719 | MVKH .S1 sys_fallocate,A0 |
| 720 | BNOP .S2X A0,1 |
| 721 | #else |
| 722 | B .S2 sys_fallocate |
| 723 | NOP |
| 724 | #endif |
| 725 | MV .D1 A6,A7 |
| 726 | MV .D1X B6,A6 |
| 727 | MV .D2X A8,B7 |
| 728 | MV .D2 B8,B6 |
| 729 | ENDPROC(sys_fallocate_c6x) |
| 730 | |
| 731 | ;; put this in .neardata for faster access when using DSBT mode |
| 732 | .section .neardata,"aw",@progbits |
| 733 | .global current_ksp |
| 734 | .hidden current_ksp |
| 735 | current_ksp: |
| 736 | .word init_thread_union + THREAD_START_SP |