1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * TLB Exception Handling for ARC |
4 | * |
5 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
6 | * |
7 | * Vineetg: April 2011 : |
8 | * -MMU v1: moved out legacy code into a seperate file |
9 | * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore, |
10 | * helps avoid a shift when preparing PD0 from PTE |
11 | * |
12 | * Vineetg: July 2009 |
13 | * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB |
14 | * entry, so that it doesn't knock out it's I-TLB entry |
15 | * -Some more fine tuning: |
16 | * bmsk instead of add, asl.cc instead of branch, delay slot utilise etc |
17 | * |
18 | * Vineetg: July 2009 |
19 | * -Practically rewrote the I/D TLB Miss handlers |
20 | * Now 40 and 135 instructions a peice as compared to 131 and 449 resp. |
21 | * Hence Leaner by 1.5 K |
22 | * Used Conditional arithmetic to replace excessive branching |
23 | * Also used short instructions wherever possible |
24 | * |
25 | * Vineetg: Aug 13th 2008 |
26 | * -Passing ECR (Exception Cause REG) to do_page_fault( ) for printing |
27 | * more information in case of a Fatality |
28 | * |
29 | * Vineetg: March 25th Bug #92690 |
30 | * -Added Debug Code to check if sw-ASID == hw-ASID |
31 | |
32 | * Rahul Trivedi, Amit Bhor: Codito Technologies 2004 |
33 | */ |
34 | |
35 | #include <linux/linkage.h> |
36 | #include <linux/pgtable.h> |
37 | #include <asm/entry.h> |
38 | #include <asm/mmu.h> |
39 | #include <asm/arcregs.h> |
40 | #include <asm/cache.h> |
41 | #include <asm/processor.h> |
42 | |
43 | #ifdef CONFIG_ISA_ARCOMPACT |
44 | ;----------------------------------------------------------------- |
45 | ; ARC700 Exception Handling doesn't auto-switch stack and it only provides |
46 | ; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0" |
47 | ; |
48 | ; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a |
49 | ; "global" is used to free-up FIRST core reg to be able to code the rest of |
50 | ; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe). |
51 | ; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3 |
52 | ; need to be saved as well by extending the "global" to be 4 words. Hence |
53 | ; ".size ex_saved_reg1, 16" |
54 | ; [All of this dance is to avoid stack switching for each TLB Miss, since we |
55 | ; only need to save only a handful of regs, as opposed to complete reg file] |
56 | ; |
57 | ; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST |
58 | ; core reg as it will not be SMP safe. |
59 | ; Thus scratch AUX reg is used (and no longer used to cache task PGD). |
60 | ; To save the rest of 3 regs - per cpu, the global is made "per-cpu" . |
61 | ; Epilogue thus has to locate the "per-cpu" storage for regs. |
62 | ; To avoid cache line bouncing the per-cpu global is aligned/sized per |
63 | ; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence |
64 | ; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)" |
65 | |
66 | ; As simple as that.... |
67 | ;-------------------------------------------------------------------------- |
68 | |
69 | ; scratch memory to save [r0-r3] used to code TLB refill Handler |
70 | ARCFP_DATA ex_saved_reg1 |
71 | .align 1 << L1_CACHE_SHIFT |
72 | .type ex_saved_reg1, @object |
73 | #ifdef CONFIG_SMP |
74 | .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT) |
75 | ex_saved_reg1: |
76 | .zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT) |
77 | #else |
78 | .size ex_saved_reg1, 16 |
79 | ex_saved_reg1: |
80 | .zero 16 |
81 | #endif |
82 | |
83 | .macro TLBMISS_FREEUP_REGS |
84 | #ifdef CONFIG_SMP |
85 | sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with |
86 | GET_CPU_ID r0 ; get to per cpu scratch mem, |
87 | asl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu |
88 | add r0, @ex_saved_reg1, r0 |
89 | #else |
90 | st r0, [@ex_saved_reg1] |
91 | mov_s r0, @ex_saved_reg1 |
92 | #endif |
93 | st_s r1, [r0, 4] |
94 | st_s r2, [r0, 8] |
95 | st_s r3, [r0, 12] |
96 | .endm |
97 | |
98 | .macro TLBMISS_RESTORE_REGS |
99 | #ifdef CONFIG_SMP |
100 | GET_CPU_ID r0 ; get to per cpu scratch mem |
101 | asl r0, r0, L1_CACHE_SHIFT ; each is cache line wide |
102 | add r0, @ex_saved_reg1, r0 |
103 | ld_s r3, [r0,12] |
104 | ld_s r2, [r0, 8] |
105 | ld_s r1, [r0, 4] |
106 | lr r0, [ARC_REG_SCRATCH_DATA0] |
107 | #else |
108 | mov_s r0, @ex_saved_reg1 |
109 | ld_s r3, [r0,12] |
110 | ld_s r2, [r0, 8] |
111 | ld_s r1, [r0, 4] |
112 | ld_s r0, [r0] |
113 | #endif |
114 | .endm |
115 | |
116 | #else /* ARCv2 */ |
117 | |
118 | .macro TLBMISS_FREEUP_REGS |
119 | #ifdef CONFIG_ARC_HAS_LL64 |
120 | std r0, [sp, -16] |
121 | std r2, [sp, -8] |
122 | #else |
123 | PUSH r0 |
124 | PUSH r1 |
125 | PUSH r2 |
126 | PUSH r3 |
127 | #endif |
128 | .endm |
129 | |
130 | .macro TLBMISS_RESTORE_REGS |
131 | #ifdef CONFIG_ARC_HAS_LL64 |
132 | ldd r0, [sp, -16] |
133 | ldd r2, [sp, -8] |
134 | #else |
135 | POP r3 |
136 | POP r2 |
137 | POP r1 |
138 | POP r0 |
139 | #endif |
140 | .endm |
141 | |
142 | #endif |
143 | |
144 | ;============================================================================ |
145 | ;TLB Miss handling Code |
146 | ;============================================================================ |
147 | |
148 | #ifndef PMD_SHIFT |
149 | #define PMD_SHIFT PUD_SHIFT |
150 | #endif |
151 | |
152 | #ifndef PUD_SHIFT |
153 | #define PUD_SHIFT PGDIR_SHIFT |
154 | #endif |
155 | |
156 | ;----------------------------------------------------------------------------- |
157 | ; This macro does the page-table lookup for the faulting address. |
158 | ; OUT: r0 = PTE faulted on, r1 = ptr to PTE, r2 = Faulting V-address |
159 | .macro LOAD_FAULT_PTE |
160 | |
161 | lr r2, [efa] |
162 | |
163 | #ifdef CONFIG_ISA_ARCV2 |
164 | lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd |
165 | #else |
166 | GET_CURR_TASK_ON_CPU r1 |
167 | ld r1, [r1, TASK_ACT_MM] |
168 | ld r1, [r1, MM_PGD] |
169 | #endif |
170 | |
171 | lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD |
172 | ld.as r3, [r1, r0] ; PGD entry corresp to faulting addr |
173 | tst r3, r3 |
174 | bz do_slow_path_pf ; if no Page Table, do page fault |
175 | |
176 | #if CONFIG_PGTABLE_LEVELS > 3 |
177 | lsr r0, r2, PUD_SHIFT ; Bits for indexing into PUD |
178 | and r0, r0, (PTRS_PER_PUD - 1) |
179 | ld.as r1, [r3, r0] ; PMD entry |
180 | tst r1, r1 |
181 | bz do_slow_path_pf |
182 | mov r3, r1 |
183 | #endif |
184 | |
185 | #if CONFIG_PGTABLE_LEVELS > 2 |
186 | lsr r0, r2, PMD_SHIFT ; Bits for indexing into PMD |
187 | and r0, r0, (PTRS_PER_PMD - 1) |
188 | ld.as r1, [r3, r0] ; PMD entry |
189 | tst r1, r1 |
190 | bz do_slow_path_pf |
191 | mov r3, r1 |
192 | #endif |
193 | |
194 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
195 | and.f 0, r3, _PAGE_HW_SZ ; Is this Huge PMD (thp) |
196 | add2.nz r1, r1, r0 |
197 | bnz.d 2f ; YES: PGD == PMD has THP PTE: stop pgd walk |
198 | mov.nz r0, r3 |
199 | |
200 | #endif |
201 | and r1, r3, PAGE_MASK |
202 | |
203 | ; Get the PTE entry: The idea is |
204 | ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr |
205 | ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index |
206 | ; (3) z = (pgtbl + y * 4) |
207 | |
208 | #ifdef CONFIG_ARC_HAS_PAE40 |
209 | #define PTE_SIZE_LOG 3 /* 8 == 2 ^ 3 */ |
210 | #else |
211 | #define PTE_SIZE_LOG 2 /* 4 == 2 ^ 2 */ |
212 | #endif |
213 | |
214 | ; multiply in step (3) above avoided by shifting lesser in step (1) |
215 | lsr r0, r2, ( PAGE_SHIFT - PTE_SIZE_LOG ) |
216 | and r0, r0, ( (PTRS_PER_PTE - 1) << PTE_SIZE_LOG ) |
217 | ld.aw r0, [r1, r0] ; r0: PTE (lower word only for PAE40) |
218 | ; r1: PTE ptr |
219 | |
220 | 2: |
221 | |
222 | .endm |
223 | |
224 | ;----------------------------------------------------------------- |
225 | ; Convert Linux PTE entry into TLB entry |
226 | ; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu |
227 | ; (for PAE40, two-words PTE, while three-word TLB Entry [PD0:PD1:PD1HI]) |
228 | ; IN: r0 = PTE, r1 = ptr to PTE |
229 | |
230 | .macro CONV_PTE_TO_TLB |
231 | and r3, r0, PTE_BITS_RWX ; r w x |
232 | asl r2, r3, 3 ; Kr Kw Kx 0 0 0 (GLOBAL, kernel only) |
233 | and.f 0, r0, _PAGE_GLOBAL |
234 | or.z r2, r2, r3 ; Kr Kw Kx Ur Uw Ux (!GLOBAL, user page) |
235 | |
236 | and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE |
237 | or r3, r3, r2 |
238 | |
239 | sr r3, [ARC_REG_TLBPD1] ; paddr[31..13] | Kr Kw Kx Ur Uw Ux | C |
240 | #ifdef CONFIG_ARC_HAS_PAE40 |
241 | ld r3, [r1, 4] ; paddr[39..32] |
242 | sr r3, [ARC_REG_TLBPD1HI] |
243 | #endif |
244 | |
245 | and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb |
246 | |
247 | lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid |
248 | |
249 | or r3, r3, r2 ; S | vaddr | {sasid|asid} |
250 | sr r3,[ARC_REG_TLBPD0] ; rewrite PD0 |
251 | .endm |
252 | |
253 | ;----------------------------------------------------------------- |
254 | ; Commit the TLB entry into MMU |
255 | |
256 | .macro COMMIT_ENTRY_TO_MMU |
257 | #ifdef CONFIG_ARC_MMU_V3 |
258 | |
259 | /* Get free TLB slot: Set = computed from vaddr, way = random */ |
260 | sr TLBGetIndex, [ARC_REG_TLBCOMMAND] |
261 | |
262 | /* Commit the Write */ |
263 | sr TLBWriteNI, [ARC_REG_TLBCOMMAND] |
264 | |
265 | #else |
266 | sr TLBInsertEntry, [ARC_REG_TLBCOMMAND] |
267 | #endif |
268 | |
269 | 88: |
270 | .endm |
271 | |
272 | |
273 | ARCFP_CODE ;Fast Path Code, candidate for ICCM |
274 | |
275 | ;----------------------------------------------------------------------------- |
276 | ; I-TLB Miss Exception Handler |
277 | ;----------------------------------------------------------------------------- |
278 | |
279 | ENTRY(EV_TLBMissI) |
280 | |
281 | TLBMISS_FREEUP_REGS |
282 | |
283 | ;---------------------------------------------------------------- |
284 | ; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA |
285 | LOAD_FAULT_PTE |
286 | |
287 | ;---------------------------------------------------------------- |
288 | ; VERIFY_PTE: Check if PTE permissions approp for executing code |
289 | cmp_s r2, VMALLOC_START |
290 | mov_s r2, (_PAGE_PRESENT | _PAGE_EXECUTE) |
291 | or.hs r2, r2, _PAGE_GLOBAL |
292 | |
293 | and r3, r0, r2 ; Mask out NON Flag bits from PTE |
294 | xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test ) |
295 | bnz do_slow_path_pf |
296 | |
297 | ; Let Linux VM know that the page was accessed |
298 | or r0, r0, _PAGE_ACCESSED ; set Accessed Bit |
299 | st_s r0, [r1] ; Write back PTE |
300 | |
301 | CONV_PTE_TO_TLB |
302 | COMMIT_ENTRY_TO_MMU |
303 | TLBMISS_RESTORE_REGS |
304 | EV_TLBMissI_fast_ret: ; additional label for VDK OS-kit instrumentation |
305 | rtie |
306 | |
307 | END(EV_TLBMissI) |
308 | |
309 | ;----------------------------------------------------------------------------- |
310 | ; D-TLB Miss Exception Handler |
311 | ;----------------------------------------------------------------------------- |
312 | |
313 | ENTRY(EV_TLBMissD) |
314 | |
315 | TLBMISS_FREEUP_REGS |
316 | |
317 | ;---------------------------------------------------------------- |
318 | ; Get the PTE corresponding to V-addr accessed |
319 | ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA |
320 | LOAD_FAULT_PTE |
321 | |
322 | ;---------------------------------------------------------------- |
323 | ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W) |
324 | |
325 | cmp_s r2, VMALLOC_START |
326 | mov_s r2, _PAGE_PRESENT ; common bit for K/U PTE |
327 | or.hs r2, r2, _PAGE_GLOBAL ; kernel PTE only |
328 | |
329 | ; Linux PTE [RWX] bits are semantically overloaded: |
330 | ; -If PAGE_GLOBAL set, they refer to kernel-only flags (vmalloc) |
331 | ; -Otherwise they are user-mode permissions, and those are exactly |
332 | ; same for kernel mode as well (e.g. copy_(to|from)_user) |
333 | |
334 | lr r3, [ecr] |
335 | btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access |
336 | or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE |
337 | btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access |
338 | or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE |
339 | ; Above laddering takes care of XCHG access (both R and W) |
340 | |
341 | ; By now, r2 setup with all the Flags we need to check in PTE |
342 | and r3, r0, r2 ; Mask out NON Flag bits from PTE |
343 | brne.d r3, r2, do_slow_path_pf ; is ((pte & flags_test) == flags_test) |
344 | |
345 | ;---------------------------------------------------------------- |
346 | ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty |
347 | or r0, r0, _PAGE_ACCESSED ; Accessed bit always |
348 | or.nz r0, r0, _PAGE_DIRTY ; if Write, set Dirty bit as well |
349 | st_s r0, [r1] ; Write back PTE |
350 | |
351 | CONV_PTE_TO_TLB |
352 | |
353 | COMMIT_ENTRY_TO_MMU |
354 | TLBMISS_RESTORE_REGS |
355 | EV_TLBMissD_fast_ret: ; additional label for VDK OS-kit instrumentation |
356 | rtie |
357 | |
358 | ;-------- Common routine to call Linux Page Fault Handler ----------- |
359 | do_slow_path_pf: |
360 | |
361 | #ifdef CONFIG_ISA_ARCV2 |
362 | ; Set Z flag if exception in U mode. Hardware micro-ops do this on any |
363 | ; taken interrupt/exception, and thus is already the case at the entry |
364 | ; above, but ensuing code would have already clobbered. |
365 | ; EXCEPTION_PROLOGUE called in slow path, relies on correct Z flag set |
366 | |
367 | lr r2, [erstatus] |
368 | and r2, r2, STATUS_U_MASK |
369 | bxor.f 0, r2, STATUS_U_BIT |
370 | #endif |
371 | |
372 | ; Restore the 4-scratch regs saved by fast path miss handler |
373 | TLBMISS_RESTORE_REGS |
374 | |
375 | ; Slow path TLB Miss handled as a regular ARC Exception |
376 | ; (stack switching / save the complete reg-file). |
377 | b call_do_page_fault |
378 | END(EV_TLBMissD) |
379 | |