1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * linux/arch/arm/kernel/entry-armv.S
4 *
5 * Copyright (C) 1996,1997,1998 Russell King.
6 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
7 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
8 *
9 * Low-level vector interface routines
10 *
11 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
12 * that causes it to save wrong values... Be aware!
13 */
14
15#include <linux/init.h>
16
17#include <asm/assembler.h>
18#include <asm/page.h>
19#include <asm/glue-df.h>
20#include <asm/glue-pf.h>
21#include <asm/vfpmacros.h>
22#include <asm/thread_notify.h>
23#include <asm/unwind.h>
24#include <asm/unistd.h>
25#include <asm/tls.h>
26#include <asm/system_info.h>
27#include <asm/uaccess-asm.h>
28
29#include "entry-header.S"
30#include <asm/probes.h>
31
32/*
33 * Interrupt handling.
34 */
35 .macro irq_handler, from_user:req
36 mov r1, sp
37 ldr_this_cpu r2, irq_stack_ptr, r2, r3
38 .if \from_user == 0
39 @
40 @ If we took the interrupt while running in the kernel, we may already
41 @ be using the IRQ stack, so revert to the original value in that case.
42 @
43 subs r3, r2, r1 @ SP above bottom of IRQ stack?
44 rsbscs r3, r3, #THREAD_SIZE @ ... and below the top?
45#ifdef CONFIG_VMAP_STACK
46 ldr_va r3, high_memory, cc @ End of the linear region
47 cmpcc r3, r1 @ Stack pointer was below it?
48#endif
49 bcc 0f @ If not, switch to the IRQ stack
50 mov r0, r1
51 bl generic_handle_arch_irq
52 b 1f
530:
54 .endif
55
56 mov_l r0, generic_handle_arch_irq
57 bl call_with_stack
581:
59 .endm
60
61 .macro pabt_helper
62 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
63#ifdef MULTI_PABORT
64 ldr_va ip, processor, offset=PROCESSOR_PABT_FUNC
65 bl_r ip
66#else
67 bl CPU_PABORT_HANDLER
68#endif
69 .endm
70
71 .macro dabt_helper
72
73 @
74 @ Call the processor-specific abort handler:
75 @
76 @ r2 - pt_regs
77 @ r4 - aborted context pc
78 @ r5 - aborted context psr
79 @
80 @ The abort handler must return the aborted address in r0, and
81 @ the fault status register in r1. r9 must be preserved.
82 @
83#ifdef MULTI_DABORT
84 ldr_va ip, processor, offset=PROCESSOR_DABT_FUNC
85 bl_r ip
86#else
87 bl CPU_DABORT_HANDLER
88#endif
89 .endm
90
91 .section .entry.text,"ax",%progbits
92
93/*
94 * Invalid mode handlers
95 */
96 .macro inv_entry, reason
97 sub sp, sp, #PT_REGS_SIZE
98 ARM( stmib sp, {r1 - lr} )
99 THUMB( stmia sp, {r0 - r12} )
100 THUMB( str sp, [sp, #S_SP] )
101 THUMB( str lr, [sp, #S_LR] )
102 mov r1, #\reason
103 .endm
104
105__pabt_invalid:
106 inv_entry BAD_PREFETCH
107 b common_invalid
108ENDPROC(__pabt_invalid)
109
110__dabt_invalid:
111 inv_entry BAD_DATA
112 b common_invalid
113ENDPROC(__dabt_invalid)
114
115__irq_invalid:
116 inv_entry BAD_IRQ
117 b common_invalid
118ENDPROC(__irq_invalid)
119
120__und_invalid:
121 inv_entry BAD_UNDEFINSTR
122
123 @
124 @ XXX fall through to common_invalid
125 @
126
127@
128@ common_invalid - generic code for failed exception (re-entrant version of handlers)
129@
130common_invalid:
131 zero_fp
132
133 ldmia r0, {r4 - r6}
134 add r0, sp, #S_PC @ here for interlock avoidance
135 mov r7, #-1 @ "" "" "" ""
136 str r4, [sp] @ save preserved r0
137 stmia r0, {r5 - r7} @ lr_<exception>,
138 @ cpsr_<exception>, "old_r0"
139
140 mov r0, sp
141 b bad_mode
142ENDPROC(__und_invalid)
143
144/*
145 * SVC mode handlers
146 */
147
148#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
149#define SPFIX(code...) code
150#else
151#define SPFIX(code...)
152#endif
153
154 .macro svc_entry, stack_hole=0, trace=1, uaccess=1, overflow_check=1
155 UNWIND(.fnstart )
156 sub sp, sp, #(SVC_REGS_SIZE + \stack_hole)
157 THUMB( add sp, r1 ) @ get SP in a GPR without
158 THUMB( sub r1, sp, r1 ) @ using a temp register
159
160 .if \overflow_check
161 UNWIND(.save {r0 - pc} )
162 do_overflow_check (SVC_REGS_SIZE + \stack_hole)
163 .endif
164
165#ifdef CONFIG_THUMB2_KERNEL
166 tst r1, #4 @ test stack pointer alignment
167 sub r1, sp, r1 @ restore original R1
168 sub sp, r1 @ restore original SP
169#else
170 SPFIX( tst sp, #4 )
171#endif
172 SPFIX( subne sp, sp, #4 )
173
174 ARM( stmib sp, {r1 - r12} )
175 THUMB( stmia sp, {r0 - r12} ) @ No STMIB in Thumb-2
176
177 ldmia r0, {r3 - r5}
178 add r7, sp, #S_SP @ here for interlock avoidance
179 mov r6, #-1 @ "" "" "" ""
180 add r2, sp, #(SVC_REGS_SIZE + \stack_hole)
181 SPFIX( addne r2, r2, #4 )
182 str r3, [sp] @ save the "real" r0 copied
183 @ from the exception stack
184
185 mov r3, lr
186
187 @
188 @ We are now ready to fill in the remaining blanks on the stack:
189 @
190 @ r2 - sp_svc
191 @ r3 - lr_svc
192 @ r4 - lr_<exception>, already fixed up for correct return/restart
193 @ r5 - spsr_<exception>
194 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
195 @
196 stmia r7, {r2 - r6}
197
198 get_thread_info tsk
199 uaccess_entry tsk, r0, r1, r2, \uaccess
200
201 .if \trace
202#ifdef CONFIG_TRACE_IRQFLAGS
203 bl trace_hardirqs_off
204#endif
205 .endif
206 .endm
207
208 .align 5
209__dabt_svc:
210 svc_entry uaccess=0
211 mov r2, sp
212 dabt_helper
213 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
214 svc_exit r5 @ return from exception
215 UNWIND(.fnend )
216ENDPROC(__dabt_svc)
217
218 .align 5
219__irq_svc:
220 svc_entry
221 irq_handler from_user=0
222
223#ifdef CONFIG_PREEMPTION
224 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
225 ldr r0, [tsk, #TI_FLAGS] @ get flags
226 teq r8, #0 @ if preempt count != 0
227 movne r0, #0 @ force flags to 0
228 tst r0, #_TIF_NEED_RESCHED
229 blne svc_preempt
230#endif
231
232 svc_exit r5, irq = 1 @ return from exception
233 UNWIND(.fnend )
234ENDPROC(__irq_svc)
235
236 .ltorg
237
238#ifdef CONFIG_PREEMPTION
239svc_preempt:
240 mov r8, lr
2411: bl preempt_schedule_irq @ irq en/disable is done inside
242 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
243 tst r0, #_TIF_NEED_RESCHED
244 reteq r8 @ go again
245 b 1b
246#endif
247
248__und_fault:
249 @ Correct the PC such that it is pointing at the instruction
250 @ which caused the fault. If the faulting instruction was ARM
251 @ the PC will be pointing at the next instruction, and have to
252 @ subtract 4. Otherwise, it is Thumb, and the PC will be
253 @ pointing at the second half of the Thumb instruction. We
254 @ have to subtract 2.
255 ldr r2, [r0, #S_PC]
256 sub r2, r2, r1
257 str r2, [r0, #S_PC]
258 b do_undefinstr
259ENDPROC(__und_fault)
260
261 .align 5
262__und_svc:
263#ifdef CONFIG_KPROBES
264 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
265 @ it obviously needs free stack space which then will belong to
266 @ the saved context.
267 svc_entry MAX_STACK_SIZE
268#else
269 svc_entry
270#endif
271
272 mov r1, #4 @ PC correction to apply
273 THUMB( tst r5, #PSR_T_BIT ) @ exception taken in Thumb mode?
274 THUMB( movne r1, #2 ) @ if so, fix up PC correction
275 mov r0, sp @ struct pt_regs *regs
276 bl __und_fault
277
278__und_svc_finish:
279 get_thread_info tsk
280 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
281 svc_exit r5 @ return from exception
282 UNWIND(.fnend )
283ENDPROC(__und_svc)
284
285 .align 5
286__pabt_svc:
287 svc_entry
288 mov r2, sp @ regs
289 pabt_helper
290 svc_exit r5 @ return from exception
291 UNWIND(.fnend )
292ENDPROC(__pabt_svc)
293
294 .align 5
295__fiq_svc:
296 svc_entry trace=0
297 mov r0, sp @ struct pt_regs *regs
298 bl handle_fiq_as_nmi
299 svc_exit_via_fiq
300 UNWIND(.fnend )
301ENDPROC(__fiq_svc)
302
303/*
304 * Abort mode handlers
305 */
306
307@
308@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
309@ and reuses the same macros. However in abort mode we must also
310@ save/restore lr_abt and spsr_abt to make nested aborts safe.
311@
312 .align 5
313__fiq_abt:
314 svc_entry trace=0
315
316 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
317 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
318 THUMB( msr cpsr_c, r0 )
319 mov r1, lr @ Save lr_abt
320 mrs r2, spsr @ Save spsr_abt, abort is now safe
321 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
322 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
323 THUMB( msr cpsr_c, r0 )
324 stmfd sp!, {r1 - r2}
325
326 add r0, sp, #8 @ struct pt_regs *regs
327 bl handle_fiq_as_nmi
328
329 ldmfd sp!, {r1 - r2}
330 ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
331 THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
332 THUMB( msr cpsr_c, r0 )
333 mov lr, r1 @ Restore lr_abt, abort is unsafe
334 msr spsr_cxsf, r2 @ Restore spsr_abt
335 ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
336 THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
337 THUMB( msr cpsr_c, r0 )
338
339 svc_exit_via_fiq
340 UNWIND(.fnend )
341ENDPROC(__fiq_abt)
342
343/*
344 * User mode handlers
345 *
346 * EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
347 */
348
349#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
350#error "sizeof(struct pt_regs) must be a multiple of 8"
351#endif
352
353 .macro usr_entry, trace=1, uaccess=1
354 UNWIND(.fnstart )
355 UNWIND(.cantunwind ) @ don't unwind the user space
356 sub sp, sp, #PT_REGS_SIZE
357 ARM( stmib sp, {r1 - r12} )
358 THUMB( stmia sp, {r0 - r12} )
359
360 ATRAP( mrc p15, 0, r7, c1, c0, 0)
361 ATRAP( ldr_va r8, cr_alignment)
362
363 ldmia r0, {r3 - r5}
364 add r0, sp, #S_PC @ here for interlock avoidance
365 mov r6, #-1 @ "" "" "" ""
366
367 str r3, [sp] @ save the "real" r0 copied
368 @ from the exception stack
369
370 @
371 @ We are now ready to fill in the remaining blanks on the stack:
372 @
373 @ r4 - lr_<exception>, already fixed up for correct return/restart
374 @ r5 - spsr_<exception>
375 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
376 @
377 @ Also, separately save sp_usr and lr_usr
378 @
379 stmia r0, {r4 - r6}
380 ARM( stmdb r0, {sp, lr}^ )
381 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
382
383 .if \uaccess
384 uaccess_disable ip
385 .endif
386
387 @ Enable the alignment trap while in kernel mode
388 ATRAP( teq r8, r7)
389 ATRAP( mcrne p15, 0, r8, c1, c0, 0)
390
391 reload_current r7, r8
392
393 @
394 @ Clear FP to mark the first stack frame
395 @
396 zero_fp
397
398 .if \trace
399#ifdef CONFIG_TRACE_IRQFLAGS
400 bl trace_hardirqs_off
401#endif
402 ct_user_exit save = 0
403 .endif
404 .endm
405
406 .macro kuser_cmpxchg_check
407#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
408#ifndef CONFIG_MMU
409#warning "NPTL on non MMU needs fixing"
410#else
411 @ Make sure our user space atomic helper is restarted
412 @ if it was interrupted in a critical region. Here we
413 @ perform a quick test inline since it should be false
414 @ 99.9999% of the time. The rest is done out of line.
415 ldr r0, =TASK_SIZE
416 cmp r4, r0
417 blhs kuser_cmpxchg64_fixup
418#endif
419#endif
420 .endm
421
422 .align 5
423__dabt_usr:
424 usr_entry uaccess=0
425 kuser_cmpxchg_check
426 mov r2, sp
427 dabt_helper
428 b ret_from_exception
429 UNWIND(.fnend )
430ENDPROC(__dabt_usr)
431
432 .align 5
433__irq_usr:
434 usr_entry
435 kuser_cmpxchg_check
436 irq_handler from_user=1
437 get_thread_info tsk
438 mov why, #0
439 b ret_to_user_from_irq
440 UNWIND(.fnend )
441ENDPROC(__irq_usr)
442
443 .ltorg
444
445 .align 5
446__und_usr:
447 usr_entry uaccess=0
448
449 @ IRQs must be enabled before attempting to read the instruction from
450 @ user space since that could cause a page/translation fault if the
451 @ page table was modified by another CPU.
452 enable_irq
453
454 tst r5, #PSR_T_BIT @ Thumb mode?
455 mov r1, #2 @ set insn size to 2 for Thumb
456 bne 0f @ handle as Thumb undef exception
457#ifdef CONFIG_FPE_NWFPE
458 adr r9, ret_from_exception
459 bl call_fpe @ returns via R9 on success
460#endif
461 mov r1, #4 @ set insn size to 4 for ARM
4620: mov r0, sp
463 uaccess_disable ip
464 bl __und_fault
465 b ret_from_exception
466 UNWIND(.fnend)
467ENDPROC(__und_usr)
468
469 .align 5
470__pabt_usr:
471 usr_entry
472 mov r2, sp @ regs
473 pabt_helper
474 UNWIND(.fnend )
475 /* fall through */
476/*
477 * This is the return code to user mode for abort handlers
478 */
479ENTRY(ret_from_exception)
480 UNWIND(.fnstart )
481 UNWIND(.cantunwind )
482 get_thread_info tsk
483 mov why, #0
484 b ret_to_user
485 UNWIND(.fnend )
486ENDPROC(__pabt_usr)
487ENDPROC(ret_from_exception)
488
489 .align 5
490__fiq_usr:
491 usr_entry trace=0
492 kuser_cmpxchg_check
493 mov r0, sp @ struct pt_regs *regs
494 bl handle_fiq_as_nmi
495 get_thread_info tsk
496 restore_user_regs fast = 0, offset = 0
497 UNWIND(.fnend )
498ENDPROC(__fiq_usr)
499
500/*
501 * Register switch for ARMv3 and ARMv4 processors
502 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
503 * previous and next are guaranteed not to be the same.
504 */
505ENTRY(__switch_to)
506 UNWIND(.fnstart )
507 UNWIND(.cantunwind )
508 add ip, r1, #TI_CPU_SAVE
509 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
510 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
511 THUMB( str sp, [ip], #4 )
512 THUMB( str lr, [ip], #4 )
513 ldr r4, [r2, #TI_TP_VALUE]
514 ldr r5, [r2, #TI_TP_VALUE + 4]
515#ifdef CONFIG_CPU_USE_DOMAINS
516 mrc p15, 0, r6, c3, c0, 0 @ Get domain register
517 str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
518 ldr r6, [r2, #TI_CPU_DOMAIN]
519#endif
520 switch_tls r1, r4, r5, r3, r7
521#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
522 !defined(CONFIG_STACKPROTECTOR_PER_TASK)
523 ldr r8, =__stack_chk_guard
524 .if (TSK_STACK_CANARY > IMM12_MASK)
525 add r9, r2, #TSK_STACK_CANARY & ~IMM12_MASK
526 ldr r9, [r9, #TSK_STACK_CANARY & IMM12_MASK]
527 .else
528 ldr r9, [r2, #TSK_STACK_CANARY & IMM12_MASK]
529 .endif
530#endif
531 mov r7, r2 @ Preserve 'next'
532#ifdef CONFIG_CPU_USE_DOMAINS
533 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
534#endif
535 mov r5, r0
536 add r4, r2, #TI_CPU_SAVE
537 ldr r0, =thread_notify_head
538 mov r1, #THREAD_NOTIFY_SWITCH
539 bl atomic_notifier_call_chain
540#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
541 !defined(CONFIG_STACKPROTECTOR_PER_TASK)
542 str r9, [r8]
543#endif
544 mov r0, r5
545#if !defined(CONFIG_THUMB2_KERNEL) && !defined(CONFIG_VMAP_STACK)
546 set_current r7, r8
547 ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
548#else
549 mov r1, r7
550 ldmia r4, {r4 - sl, fp, ip, lr} @ Load all regs saved previously
551#ifdef CONFIG_VMAP_STACK
552 @
553 @ Do a dummy read from the new stack while running from the old one so
554 @ that we can rely on do_translation_fault() to fix up any stale PMD
555 @ entries covering the vmalloc region.
556 @
557 ldr r2, [ip]
558#endif
559
560 @ When CONFIG_THREAD_INFO_IN_TASK=n, the update of SP itself is what
561 @ effectuates the task switch, as that is what causes the observable
562 @ values of current and current_thread_info to change. When
563 @ CONFIG_THREAD_INFO_IN_TASK=y, setting current (and therefore
564 @ current_thread_info) is done explicitly, and the update of SP just
565 @ switches us to another stack, with few other side effects. In order
566 @ to prevent this distinction from causing any inconsistencies, let's
567 @ keep the 'set_current' call as close as we can to the update of SP.
568 set_current r1, r2
569 mov sp, ip
570 ret lr
571#endif
572 UNWIND(.fnend )
573ENDPROC(__switch_to)
574
575#ifdef CONFIG_VMAP_STACK
576 .text
577 .align 2
578__bad_stack:
579 @
580 @ We've just detected an overflow. We need to load the address of this
581 @ CPU's overflow stack into the stack pointer register. We have only one
582 @ scratch register so let's use a sequence of ADDs including one
583 @ involving the PC, and decorate them with PC-relative group
584 @ relocations. As these are ARM only, switch to ARM mode first.
585 @
586 @ We enter here with IP clobbered and its value stashed on the mode
587 @ stack.
588 @
589THUMB( bx pc )
590THUMB( nop )
591THUMB( .arm )
592 ldr_this_cpu_armv6 ip, overflow_stack_ptr
593
594 str sp, [ip, #-4]! @ Preserve original SP value
595 mov sp, ip @ Switch to overflow stack
596 pop {ip} @ Original SP in IP
597
598#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
599 mov ip, ip @ mov expected by unwinder
600 push {fp, ip, lr, pc} @ GCC flavor frame record
601#else
602 str ip, [sp, #-8]! @ store original SP
603 push {fpreg, lr} @ Clang flavor frame record
604#endif
605UNWIND( ldr ip, [r0, #4] ) @ load exception LR
606UNWIND( str ip, [sp, #12] ) @ store in the frame record
607 ldr ip, [r0, #12] @ reload IP
608
609 @ Store the original GPRs to the new stack.
610 svc_entry uaccess=0, overflow_check=0
611
612UNWIND( .save {sp, pc} )
613UNWIND( .save {fpreg, lr} )
614UNWIND( .setfp fpreg, sp )
615
616 ldr fpreg, [sp, #S_SP] @ Add our frame record
617 @ to the linked list
618#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
619 ldr r1, [fp, #4] @ reload SP at entry
620 add fp, fp, #12
621#else
622 ldr r1, [fpreg, #8]
623#endif
624 str r1, [sp, #S_SP] @ store in pt_regs
625
626 @ Stash the regs for handle_bad_stack
627 mov r0, sp
628
629 @ Time to die
630 bl handle_bad_stack
631 nop
632UNWIND( .fnend )
633ENDPROC(__bad_stack)
634#endif
635
636 __INIT
637
638/*
639 * User helpers.
640 *
641 * Each segment is 32-byte aligned and will be moved to the top of the high
642 * vector page. New segments (if ever needed) must be added in front of
643 * existing ones. This mechanism should be used only for things that are
644 * really small and justified, and not be abused freely.
645 *
646 * See Documentation/arch/arm/kernel_user_helpers.rst for formal definitions.
647 */
648 THUMB( .arm )
649
650 .macro usr_ret, reg
651#ifdef CONFIG_ARM_THUMB
652 bx \reg
653#else
654 ret \reg
655#endif
656 .endm
657
658 .macro kuser_pad, sym, size
659 .if (. - \sym) & 3
660 .rept 4 - (. - \sym) & 3
661 .byte 0
662 .endr
663 .endif
664 .rept (\size - (. - \sym)) / 4
665 .word 0xe7fddef1
666 .endr
667 .endm
668
669#ifdef CONFIG_KUSER_HELPERS
670 .align 5
671 .globl __kuser_helper_start
672__kuser_helper_start:
673
674/*
675 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
676 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
677 */
678
679__kuser_cmpxchg64: @ 0xffff0f60
680
681#if defined(CONFIG_CPU_32v6K)
682
683 stmfd sp!, {r4, r5, r6, r7}
684 ldrd r4, r5, [r0] @ load old val
685 ldrd r6, r7, [r1] @ load new val
686 smp_dmb arm
6871: ldrexd r0, r1, [r2] @ load current val
688 eors r3, r0, r4 @ compare with oldval (1)
689 eorseq r3, r1, r5 @ compare with oldval (2)
690 strexdeq r3, r6, r7, [r2] @ store newval if eq
691 teqeq r3, #1 @ success?
692 beq 1b @ if no then retry
693 smp_dmb arm
694 rsbs r0, r3, #0 @ set returned val and C flag
695 ldmfd sp!, {r4, r5, r6, r7}
696 usr_ret lr
697
698#elif !defined(CONFIG_SMP)
699
700#ifdef CONFIG_MMU
701
702 /*
703 * The only thing that can break atomicity in this cmpxchg64
704 * implementation is either an IRQ or a data abort exception
705 * causing another process/thread to be scheduled in the middle of
706 * the critical sequence. The same strategy as for cmpxchg is used.
707 */
708 stmfd sp!, {r4, r5, r6, lr}
709 ldmia r0, {r4, r5} @ load old val
710 ldmia r1, {r6, lr} @ load new val
7111: ldmia r2, {r0, r1} @ load current val
712 eors r3, r0, r4 @ compare with oldval (1)
713 eorseq r3, r1, r5 @ compare with oldval (2)
7142: stmiaeq r2, {r6, lr} @ store newval if eq
715 rsbs r0, r3, #0 @ set return val and C flag
716 ldmfd sp!, {r4, r5, r6, pc}
717
718 .text
719kuser_cmpxchg64_fixup:
720 @ Called from kuser_cmpxchg_fixup.
721 @ r4 = address of interrupted insn (must be preserved).
722 @ sp = saved regs. r7 and r8 are clobbered.
723 @ 1b = first critical insn, 2b = last critical insn.
724 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
725 mov r7, #0xffff0fff
726 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
727 subs r8, r4, r7
728 rsbscs r8, r8, #(2b - 1b)
729 strcs r7, [sp, #S_PC]
730#if __LINUX_ARM_ARCH__ < 6
731 bcc kuser_cmpxchg32_fixup
732#endif
733 ret lr
734 .previous
735
736#else
737#warning "NPTL on non MMU needs fixing"
738 mov r0, #-1
739 adds r0, r0, #0
740 usr_ret lr
741#endif
742
743#else
744#error "incoherent kernel configuration"
745#endif
746
747 kuser_pad __kuser_cmpxchg64, 64
748
749__kuser_memory_barrier: @ 0xffff0fa0
750 smp_dmb arm
751 usr_ret lr
752
753 kuser_pad __kuser_memory_barrier, 32
754
755__kuser_cmpxchg: @ 0xffff0fc0
756
757#if __LINUX_ARM_ARCH__ < 6
758
759#ifdef CONFIG_MMU
760
761 /*
762 * The only thing that can break atomicity in this cmpxchg
763 * implementation is either an IRQ or a data abort exception
764 * causing another process/thread to be scheduled in the middle
765 * of the critical sequence. To prevent this, code is added to
766 * the IRQ and data abort exception handlers to set the pc back
767 * to the beginning of the critical section if it is found to be
768 * within that critical section (see kuser_cmpxchg_fixup).
769 */
7701: ldr r3, [r2] @ load current val
771 subs r3, r3, r0 @ compare with oldval
7722: streq r1, [r2] @ store newval if eq
773 rsbs r0, r3, #0 @ set return val and C flag
774 usr_ret lr
775
776 .text
777kuser_cmpxchg32_fixup:
778 @ Called from kuser_cmpxchg_check macro.
779 @ r4 = address of interrupted insn (must be preserved).
780 @ sp = saved regs. r7 and r8 are clobbered.
781 @ 1b = first critical insn, 2b = last critical insn.
782 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
783 mov r7, #0xffff0fff
784 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
785 subs r8, r4, r7
786 rsbscs r8, r8, #(2b - 1b)
787 strcs r7, [sp, #S_PC]
788 ret lr
789 .previous
790
791#else
792#warning "NPTL on non MMU needs fixing"
793 mov r0, #-1
794 adds r0, r0, #0
795 usr_ret lr
796#endif
797
798#else
799
800 smp_dmb arm
8011: ldrex r3, [r2]
802 subs r3, r3, r0
803 strexeq r3, r1, [r2]
804 teqeq r3, #1
805 beq 1b
806 rsbs r0, r3, #0
807 /* beware -- each __kuser slot must be 8 instructions max */
808 ALT_SMP(b __kuser_memory_barrier)
809 ALT_UP(usr_ret lr)
810
811#endif
812
813 kuser_pad __kuser_cmpxchg, 32
814
815__kuser_get_tls: @ 0xffff0fe0
816 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
817 usr_ret lr
818 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
819 kuser_pad __kuser_get_tls, 16
820 .rep 3
821 .word 0 @ 0xffff0ff0 software TLS value, then
822 .endr @ pad up to __kuser_helper_version
823
824__kuser_helper_version: @ 0xffff0ffc
825 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
826
827 .globl __kuser_helper_end
828__kuser_helper_end:
829
830#endif
831
832 THUMB( .thumb )
833
834/*
835 * Vector stubs.
836 *
837 * This code is copied to 0xffff1000 so we can use branches in the
838 * vectors, rather than ldr's. Note that this code must not exceed
839 * a page size.
840 *
841 * Common stub entry macro:
842 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
843 *
844 * SP points to a minimal amount of processor-private memory, the address
845 * of which is copied into r0 for the mode specific abort handler.
846 */
847 .macro vector_stub, name, mode, correction=0
848 .align 5
849#ifdef CONFIG_HARDEN_BRANCH_HISTORY
850vector_bhb_bpiall_\name:
851 mcr p15, 0, r0, c7, c5, 6 @ BPIALL
852 @ isb not needed due to "movs pc, lr" in the vector stub
853 @ which gives a "context synchronisation".
854#endif
855
856vector_\name:
857 .if \correction
858 sub lr, lr, #\correction
859 .endif
860
861 @ Save r0, lr_<exception> (parent PC)
862 stmia sp, {r0, lr} @ save r0, lr
863
864 @ Save spsr_<exception> (parent CPSR)
865.Lvec_\name:
866 mrs lr, spsr
867 str lr, [sp, #8] @ save spsr
868
869 @
870 @ Prepare for SVC32 mode. IRQs remain disabled.
871 @
872 mrs r0, cpsr
873 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
874 msr spsr_cxsf, r0
875
876 @
877 @ the branch table must immediately follow this code
878 @
879 and lr, lr, #0x0f
880 THUMB( adr r0, 1f )
881 THUMB( ldr lr, [r0, lr, lsl #2] )
882 mov r0, sp
883 ARM( ldr lr, [pc, lr, lsl #2] )
884 movs pc, lr @ branch to handler in SVC mode
885ENDPROC(vector_\name)
886
887#ifdef CONFIG_HARDEN_BRANCH_HISTORY
888 .subsection 1
889 .align 5
890vector_bhb_loop8_\name:
891 .if \correction
892 sub lr, lr, #\correction
893 .endif
894
895 @ Save r0, lr_<exception> (parent PC)
896 stmia sp, {r0, lr}
897
898 @ bhb workaround
899 mov r0, #8
9003: W(b) . + 4
901 subs r0, r0, #1
902 bne 3b
903 dsb nsh
904 @ isb not needed due to "movs pc, lr" in the vector stub
905 @ which gives a "context synchronisation".
906 b .Lvec_\name
907ENDPROC(vector_bhb_loop8_\name)
908 .previous
909#endif
910
911 .align 2
912 @ handler addresses follow this label
9131:
914 .endm
915
916 .section .stubs, "ax", %progbits
917 @ These need to remain at the start of the section so that
918 @ they are in range of the 'SWI' entries in the vector tables
919 @ located 4k down.
920.L__vector_swi:
921 .word vector_swi
922#ifdef CONFIG_HARDEN_BRANCH_HISTORY
923.L__vector_bhb_loop8_swi:
924 .word vector_bhb_loop8_swi
925.L__vector_bhb_bpiall_swi:
926 .word vector_bhb_bpiall_swi
927#endif
928
929vector_rst:
930 ARM( swi SYS_ERROR0 )
931 THUMB( svc #0 )
932 THUMB( nop )
933 b vector_und
934
935/*
936 * Interrupt dispatcher
937 */
938 vector_stub irq, IRQ_MODE, 4
939
940 .long __irq_usr @ 0 (USR_26 / USR_32)
941 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
942 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
943 .long __irq_svc @ 3 (SVC_26 / SVC_32)
944 .long __irq_invalid @ 4
945 .long __irq_invalid @ 5
946 .long __irq_invalid @ 6
947 .long __irq_invalid @ 7
948 .long __irq_invalid @ 8
949 .long __irq_invalid @ 9
950 .long __irq_invalid @ a
951 .long __irq_invalid @ b
952 .long __irq_invalid @ c
953 .long __irq_invalid @ d
954 .long __irq_invalid @ e
955 .long __irq_invalid @ f
956
957/*
958 * Data abort dispatcher
959 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
960 */
961 vector_stub dabt, ABT_MODE, 8
962
963 .long __dabt_usr @ 0 (USR_26 / USR_32)
964 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
965 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
966 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
967 .long __dabt_invalid @ 4
968 .long __dabt_invalid @ 5
969 .long __dabt_invalid @ 6
970 .long __dabt_invalid @ 7
971 .long __dabt_invalid @ 8
972 .long __dabt_invalid @ 9
973 .long __dabt_invalid @ a
974 .long __dabt_invalid @ b
975 .long __dabt_invalid @ c
976 .long __dabt_invalid @ d
977 .long __dabt_invalid @ e
978 .long __dabt_invalid @ f
979
980/*
981 * Prefetch abort dispatcher
982 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
983 */
984 vector_stub pabt, ABT_MODE, 4
985
986 .long __pabt_usr @ 0 (USR_26 / USR_32)
987 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
988 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
989 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
990 .long __pabt_invalid @ 4
991 .long __pabt_invalid @ 5
992 .long __pabt_invalid @ 6
993 .long __pabt_invalid @ 7
994 .long __pabt_invalid @ 8
995 .long __pabt_invalid @ 9
996 .long __pabt_invalid @ a
997 .long __pabt_invalid @ b
998 .long __pabt_invalid @ c
999 .long __pabt_invalid @ d
1000 .long __pabt_invalid @ e
1001 .long __pabt_invalid @ f
1002
1003/*
1004 * Undef instr entry dispatcher
1005 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1006 */
1007 vector_stub und, UND_MODE
1008
1009 .long __und_usr @ 0 (USR_26 / USR_32)
1010 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1011 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1012 .long __und_svc @ 3 (SVC_26 / SVC_32)
1013 .long __und_invalid @ 4
1014 .long __und_invalid @ 5
1015 .long __und_invalid @ 6
1016 .long __und_invalid @ 7
1017 .long __und_invalid @ 8
1018 .long __und_invalid @ 9
1019 .long __und_invalid @ a
1020 .long __und_invalid @ b
1021 .long __und_invalid @ c
1022 .long __und_invalid @ d
1023 .long __und_invalid @ e
1024 .long __und_invalid @ f
1025
1026 .align 5
1027
1028/*=============================================================================
1029 * Address exception handler
1030 *-----------------------------------------------------------------------------
1031 * These aren't too critical.
1032 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1033 */
1034
1035vector_addrexcptn:
1036 b vector_addrexcptn
1037
1038/*=============================================================================
1039 * FIQ "NMI" handler
1040 *-----------------------------------------------------------------------------
1041 * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
1042 * systems. This must be the last vector stub, so lets place it in its own
1043 * subsection.
1044 */
1045 .subsection 2
1046 vector_stub fiq, FIQ_MODE, 4
1047
1048 .long __fiq_usr @ 0 (USR_26 / USR_32)
1049 .long __fiq_svc @ 1 (FIQ_26 / FIQ_32)
1050 .long __fiq_svc @ 2 (IRQ_26 / IRQ_32)
1051 .long __fiq_svc @ 3 (SVC_26 / SVC_32)
1052 .long __fiq_svc @ 4
1053 .long __fiq_svc @ 5
1054 .long __fiq_svc @ 6
1055 .long __fiq_abt @ 7
1056 .long __fiq_svc @ 8
1057 .long __fiq_svc @ 9
1058 .long __fiq_svc @ a
1059 .long __fiq_svc @ b
1060 .long __fiq_svc @ c
1061 .long __fiq_svc @ d
1062 .long __fiq_svc @ e
1063 .long __fiq_svc @ f
1064
1065 .globl vector_fiq
1066
1067 .section .vectors, "ax", %progbits
1068 W(b) vector_rst
1069 W(b) vector_und
1070ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_swi )
1071THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_swi )
1072 W(ldr) pc, .
1073 W(b) vector_pabt
1074 W(b) vector_dabt
1075 W(b) vector_addrexcptn
1076 W(b) vector_irq
1077 W(b) vector_fiq
1078
1079#ifdef CONFIG_HARDEN_BRANCH_HISTORY
1080 .section .vectors.bhb.loop8, "ax", %progbits
1081 W(b) vector_rst
1082 W(b) vector_bhb_loop8_und
1083ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi )
1084THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_loop8_swi )
1085 W(ldr) pc, .
1086 W(b) vector_bhb_loop8_pabt
1087 W(b) vector_bhb_loop8_dabt
1088 W(b) vector_addrexcptn
1089 W(b) vector_bhb_loop8_irq
1090 W(b) vector_bhb_loop8_fiq
1091
1092 .section .vectors.bhb.bpiall, "ax", %progbits
1093 W(b) vector_rst
1094 W(b) vector_bhb_bpiall_und
1095ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi )
1096THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_bpiall_swi )
1097 W(ldr) pc, .
1098 W(b) vector_bhb_bpiall_pabt
1099 W(b) vector_bhb_bpiall_dabt
1100 W(b) vector_addrexcptn
1101 W(b) vector_bhb_bpiall_irq
1102 W(b) vector_bhb_bpiall_fiq
1103#endif
1104
1105 .data
1106 .align 2
1107
1108 .globl cr_alignment
1109cr_alignment:
1110 .space 4
1111

source code of linux/arch/arm/kernel/entry-armv.S