1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/sparc/mm/leon_m.c
4 *
5 * Copyright (C) 2004 Konrad Eisele (eiselekd@web.de, konrad@gaisler.com) Gaisler Research
6 * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
7 * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
8 *
9 * do srmmu probe in software
10 *
11 */
12
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <asm/asi.h>
16#include <asm/leon.h>
17#include <asm/tlbflush.h>
18
19#include "mm_32.h"
20
21int leon_flush_during_switch = 1;
22static int srmmu_swprobe_trace;
23
24static inline unsigned long leon_get_ctable_ptr(void)
25{
26 unsigned int retval;
27
28 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
29 "=r" (retval) :
30 "r" (SRMMU_CTXTBL_PTR),
31 "i" (ASI_LEON_MMUREGS));
32 return (retval & SRMMU_CTX_PMASK) << 4;
33}
34
35
36unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
37{
38
39 unsigned int ctxtbl;
40 unsigned int pgd, pmd, ped;
41 unsigned int ptr;
42 unsigned int lvl, pte, paddrbase;
43 unsigned int ctx;
44 unsigned int paddr_calc;
45
46 paddrbase = 0;
47
48 if (srmmu_swprobe_trace)
49 printk(KERN_INFO "swprobe: trace on\n");
50
51 ctxtbl = leon_get_ctable_ptr();
52 if (!(ctxtbl)) {
53 if (srmmu_swprobe_trace)
54 printk(KERN_INFO "swprobe: leon_get_ctable_ptr returned 0=>0\n");
55 return 0;
56 }
57 if (!_pfn_valid(PFN(ctxtbl))) {
58 if (srmmu_swprobe_trace)
59 printk(KERN_INFO
60 "swprobe: !_pfn_valid(%x)=>0\n",
61 PFN(ctxtbl));
62 return 0;
63 }
64
65 ctx = srmmu_get_context();
66 if (srmmu_swprobe_trace)
67 printk(KERN_INFO "swprobe: --- ctx (%x) ---\n", ctx);
68
69 pgd = LEON_BYPASS_LOAD_PA(ctxtbl + (ctx * 4));
70
71 if (((pgd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
72 if (srmmu_swprobe_trace)
73 printk(KERN_INFO "swprobe: pgd is entry level 3\n");
74 lvl = 3;
75 pte = pgd;
76 paddrbase = pgd & _SRMMU_PTE_PMASK_LEON;
77 goto ready;
78 }
79 if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
80 if (srmmu_swprobe_trace)
81 printk(KERN_INFO "swprobe: pgd is invalid => 0\n");
82 return 0;
83 }
84
85 if (srmmu_swprobe_trace)
86 printk(KERN_INFO "swprobe: --- pgd (%x) ---\n", pgd);
87
88 ptr = (pgd & SRMMU_PTD_PMASK) << 4;
89 ptr += ((((vaddr) >> LEON_PGD_SH) & LEON_PGD_M) * 4);
90 if (!_pfn_valid(PFN(ptr)))
91 return 0;
92
93 pmd = LEON_BYPASS_LOAD_PA(ptr);
94 if (((pmd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
95 if (srmmu_swprobe_trace)
96 printk(KERN_INFO "swprobe: pmd is entry level 2\n");
97 lvl = 2;
98 pte = pmd;
99 paddrbase = pmd & _SRMMU_PTE_PMASK_LEON;
100 goto ready;
101 }
102 if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
103 if (srmmu_swprobe_trace)
104 printk(KERN_INFO "swprobe: pmd is invalid => 0\n");
105 return 0;
106 }
107
108 if (srmmu_swprobe_trace)
109 printk(KERN_INFO "swprobe: --- pmd (%x) ---\n", pmd);
110
111 ptr = (pmd & SRMMU_PTD_PMASK) << 4;
112 ptr += (((vaddr >> LEON_PMD_SH) & LEON_PMD_M) * 4);
113 if (!_pfn_valid(PFN(ptr))) {
114 if (srmmu_swprobe_trace)
115 printk(KERN_INFO "swprobe: !_pfn_valid(%x)=>0\n",
116 PFN(ptr));
117 return 0;
118 }
119
120 ped = LEON_BYPASS_LOAD_PA(ptr);
121
122 if (((ped & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
123 if (srmmu_swprobe_trace)
124 printk(KERN_INFO "swprobe: ped is entry level 1\n");
125 lvl = 1;
126 pte = ped;
127 paddrbase = ped & _SRMMU_PTE_PMASK_LEON;
128 goto ready;
129 }
130 if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
131 if (srmmu_swprobe_trace)
132 printk(KERN_INFO "swprobe: ped is invalid => 0\n");
133 return 0;
134 }
135
136 if (srmmu_swprobe_trace)
137 printk(KERN_INFO "swprobe: --- ped (%x) ---\n", ped);
138
139 ptr = (ped & SRMMU_PTD_PMASK) << 4;
140 ptr += (((vaddr >> LEON_PTE_SH) & LEON_PTE_M) * 4);
141 if (!_pfn_valid(PFN(ptr)))
142 return 0;
143
144 ptr = LEON_BYPASS_LOAD_PA(ptr);
145 if (((ptr & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
146 if (srmmu_swprobe_trace)
147 printk(KERN_INFO "swprobe: ptr is entry level 0\n");
148 lvl = 0;
149 pte = ptr;
150 paddrbase = ptr & _SRMMU_PTE_PMASK_LEON;
151 goto ready;
152 }
153 if (srmmu_swprobe_trace)
154 printk(KERN_INFO "swprobe: ptr is invalid => 0\n");
155 return 0;
156
157ready:
158 switch (lvl) {
159 case 0:
160 paddr_calc =
161 (vaddr & ~(-1 << LEON_PTE_SH)) | ((pte & ~0xff) << 4);
162 break;
163 case 1:
164 paddr_calc =
165 (vaddr & ~(-1 << LEON_PMD_SH)) | ((pte & ~0xff) << 4);
166 break;
167 case 2:
168 paddr_calc =
169 (vaddr & ~(-1 << LEON_PGD_SH)) | ((pte & ~0xff) << 4);
170 break;
171 default:
172 case 3:
173 paddr_calc = vaddr;
174 break;
175 }
176 if (srmmu_swprobe_trace)
177 printk(KERN_INFO "swprobe: padde %x\n", paddr_calc);
178 if (paddr)
179 *paddr = paddr_calc;
180 return pte;
181}
182
183void leon_flush_icache_all(void)
184{
185 __asm__ __volatile__(" flush "); /*iflush*/
186}
187
188void leon_flush_dcache_all(void)
189{
190 __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
191 "i"(ASI_LEON_DFLUSH) : "memory");
192}
193
194void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page)
195{
196 if (vma->vm_flags & VM_EXEC)
197 leon_flush_icache_all();
198 leon_flush_dcache_all();
199}
200
201void leon_flush_cache_all(void)
202{
203 __asm__ __volatile__(" flush "); /*iflush*/
204 __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
205 "i"(ASI_LEON_DFLUSH) : "memory");
206}
207
208void leon_flush_tlb_all(void)
209{
210 leon_flush_cache_all();
211 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r"(0x400),
212 "i"(ASI_LEON_MMUFLUSH) : "memory");
213}
214
215/* get all cache regs */
216void leon3_getCacheRegs(struct leon3_cacheregs *regs)
217{
218 unsigned long ccr, iccr, dccr;
219
220 if (!regs)
221 return;
222 /* Get Cache regs from "Cache ASI" address 0x0, 0x8 and 0xC */
223 __asm__ __volatile__("lda [%%g0] %3, %0\n\t"
224 "mov 0x08, %%g1\n\t"
225 "lda [%%g1] %3, %1\n\t"
226 "mov 0x0c, %%g1\n\t"
227 "lda [%%g1] %3, %2\n\t"
228 : "=r"(ccr), "=r"(iccr), "=r"(dccr)
229 /* output */
230 : "i"(ASI_LEON_CACHEREGS) /* input */
231 : "g1" /* clobber list */
232 );
233 regs->ccr = ccr;
234 regs->iccr = iccr;
235 regs->dccr = dccr;
236}
237
238/* Due to virtual cache we need to check cache configuration if
239 * it is possible to skip flushing in some cases.
240 *
241 * Leon2 and Leon3 differ in their way of telling cache information
242 *
243 */
244int __init leon_flush_needed(void)
245{
246 int flush_needed = -1;
247 unsigned int ssize, sets;
248 char *setStr[4] =
249 { "direct mapped", "2-way associative", "3-way associative",
250 "4-way associative"
251 };
252 /* leon 3 */
253 struct leon3_cacheregs cregs;
254 leon3_getCacheRegs(regs: &cregs);
255 sets = (cregs.dccr & LEON3_XCCR_SETS_MASK) >> 24;
256 /* (ssize=>realsize) 0=>1k, 1=>2k, 2=>4k, 3=>8k ... */
257 ssize = 1 << ((cregs.dccr & LEON3_XCCR_SSIZE_MASK) >> 20);
258
259 printk(KERN_INFO "CACHE: %s cache, set size %dk\n",
260 sets > 3 ? "unknown" : setStr[sets], ssize);
261 if ((ssize <= (PAGE_SIZE / 1024)) && (sets == 0)) {
262 /* Set Size <= Page size ==>
263 flush on every context switch not needed. */
264 flush_needed = 0;
265 printk(KERN_INFO "CACHE: not flushing on every context switch\n");
266 }
267 return flush_needed;
268}
269
270void leon_switch_mm(void)
271{
272 flush_tlb_mm((void *)0);
273 if (leon_flush_during_switch)
274 leon_flush_cache_all();
275}
276
277static void leon_flush_cache_mm(struct mm_struct *mm)
278{
279 leon_flush_cache_all();
280}
281
282static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
283{
284 leon_flush_pcache_all(vma, page);
285}
286
287static void leon_flush_cache_range(struct vm_area_struct *vma,
288 unsigned long start,
289 unsigned long end)
290{
291 leon_flush_cache_all();
292}
293
294static void leon_flush_tlb_mm(struct mm_struct *mm)
295{
296 leon_flush_tlb_all();
297}
298
299static void leon_flush_tlb_page(struct vm_area_struct *vma,
300 unsigned long page)
301{
302 leon_flush_tlb_all();
303}
304
305static void leon_flush_tlb_range(struct vm_area_struct *vma,
306 unsigned long start,
307 unsigned long end)
308{
309 leon_flush_tlb_all();
310}
311
312static void leon_flush_page_to_ram(unsigned long page)
313{
314 leon_flush_cache_all();
315}
316
317static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page)
318{
319 leon_flush_cache_all();
320}
321
322static void leon_flush_page_for_dma(unsigned long page)
323{
324 leon_flush_dcache_all();
325}
326
327void __init poke_leonsparc(void)
328{
329}
330
331static const struct sparc32_cachetlb_ops leon_ops = {
332 .cache_all = leon_flush_cache_all,
333 .cache_mm = leon_flush_cache_mm,
334 .cache_page = leon_flush_cache_page,
335 .cache_range = leon_flush_cache_range,
336 .tlb_all = leon_flush_tlb_all,
337 .tlb_mm = leon_flush_tlb_mm,
338 .tlb_page = leon_flush_tlb_page,
339 .tlb_range = leon_flush_tlb_range,
340 .page_to_ram = leon_flush_page_to_ram,
341 .sig_insns = leon_flush_sig_insns,
342 .page_for_dma = leon_flush_page_for_dma,
343};
344
345void __init init_leon(void)
346{
347 srmmu_name = "LEON";
348 sparc32_cachetlb_ops = &leon_ops;
349 poke_srmmu = poke_leonsparc;
350
351 leon_flush_during_switch = leon_flush_needed();
352}
353

source code of linux/arch/sparc/mm/leon_mm.c