1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * Copyright (C) 1994 - 2000 Ralf Baechle |
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
8 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com |
9 | * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. |
10 | */ |
11 | #include <linux/bug.h> |
12 | #include <linux/init.h> |
13 | #include <linux/export.h> |
14 | #include <linux/signal.h> |
15 | #include <linux/sched.h> |
16 | #include <linux/smp.h> |
17 | #include <linux/kernel.h> |
18 | #include <linux/errno.h> |
19 | #include <linux/string.h> |
20 | #include <linux/types.h> |
21 | #include <linux/pagemap.h> |
22 | #include <linux/ptrace.h> |
23 | #include <linux/mman.h> |
24 | #include <linux/mm.h> |
25 | #include <linux/memblock.h> |
26 | #include <linux/highmem.h> |
27 | #include <linux/swap.h> |
28 | #include <linux/proc_fs.h> |
29 | #include <linux/pfn.h> |
30 | #include <linux/hardirq.h> |
31 | #include <linux/gfp.h> |
32 | #include <linux/kcore.h> |
33 | #include <linux/initrd.h> |
34 | |
35 | #include <asm/bootinfo.h> |
36 | #include <asm/cachectl.h> |
37 | #include <asm/cpu.h> |
38 | #include <asm/dma.h> |
39 | #include <asm/maar.h> |
40 | #include <asm/mmu_context.h> |
41 | #include <asm/mmzone.h> |
42 | #include <asm/sections.h> |
43 | #include <asm/pgalloc.h> |
44 | #include <asm/tlb.h> |
45 | #include <asm/fixmap.h> |
46 | |
47 | /* |
48 | * We have up to 8 empty zeroed pages so we can map one of the right colour |
49 | * when needed. This is necessary only on R4000 / R4400 SC and MC versions |
50 | * where we have to avoid VCED / VECI exceptions for good performance at |
51 | * any price. Since page is never written to after the initialization we |
52 | * don't have to care about aliases on other CPUs. |
53 | */ |
54 | unsigned long empty_zero_page, zero_page_mask; |
55 | EXPORT_SYMBOL_GPL(empty_zero_page); |
56 | EXPORT_SYMBOL(zero_page_mask); |
57 | |
58 | /* |
59 | * Not static inline because used by IP27 special magic initialization code |
60 | */ |
61 | void setup_zero_pages(void) |
62 | { |
63 | unsigned int order, i; |
64 | struct page *page; |
65 | |
66 | if (cpu_has_vce) |
67 | order = 3; |
68 | else |
69 | order = 0; |
70 | |
71 | empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
72 | if (!empty_zero_page) |
73 | panic(fmt: "Oh boy, that early out of memory?" ); |
74 | |
75 | page = virt_to_page((void *)empty_zero_page); |
76 | split_page(page, order); |
77 | for (i = 0; i < (1 << order); i++, page++) |
78 | mark_page_reserved(page); |
79 | |
80 | zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; |
81 | } |
82 | |
83 | static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) |
84 | { |
85 | enum fixed_addresses idx; |
86 | unsigned int old_mmid; |
87 | unsigned long vaddr, flags, entrylo; |
88 | unsigned long old_ctx; |
89 | pte_t pte; |
90 | int tlbidx; |
91 | |
92 | BUG_ON(folio_test_dcache_dirty(page_folio(page))); |
93 | |
94 | preempt_disable(); |
95 | pagefault_disable(); |
96 | idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); |
97 | idx += in_interrupt() ? FIX_N_COLOURS : 0; |
98 | vaddr = __fix_to_virt(FIX_CMAP_END - idx); |
99 | pte = mk_pte(page, prot); |
100 | #if defined(CONFIG_XPA) |
101 | entrylo = pte_to_entrylo(pte.pte_high); |
102 | #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) |
103 | entrylo = pte.pte_high; |
104 | #else |
105 | entrylo = pte_to_entrylo(pte_val(pte)); |
106 | #endif |
107 | |
108 | local_irq_save(flags); |
109 | old_ctx = read_c0_entryhi(); |
110 | write_c0_entryhi(vaddr & (PAGE_MASK << 1)); |
111 | write_c0_entrylo0(entrylo); |
112 | write_c0_entrylo1(entrylo); |
113 | if (cpu_has_mmid) { |
114 | old_mmid = read_c0_memorymapid(); |
115 | write_c0_memorymapid(MMID_KERNEL_WIRED); |
116 | } |
117 | #ifdef CONFIG_XPA |
118 | if (cpu_has_xpa) { |
119 | entrylo = (pte.pte_low & _PFNX_MASK); |
120 | writex_c0_entrylo0(entrylo); |
121 | writex_c0_entrylo1(entrylo); |
122 | } |
123 | #endif |
124 | tlbidx = num_wired_entries(); |
125 | write_c0_wired(tlbidx + 1); |
126 | write_c0_index(tlbidx); |
127 | mtc0_tlbw_hazard(); |
128 | tlb_write_indexed(); |
129 | tlbw_use_hazard(); |
130 | write_c0_entryhi(old_ctx); |
131 | if (cpu_has_mmid) |
132 | write_c0_memorymapid(old_mmid); |
133 | local_irq_restore(flags); |
134 | |
135 | return (void*) vaddr; |
136 | } |
137 | |
138 | void *kmap_coherent(struct page *page, unsigned long addr) |
139 | { |
140 | return __kmap_pgprot(page, addr, PAGE_KERNEL); |
141 | } |
142 | |
143 | void *kmap_noncoherent(struct page *page, unsigned long addr) |
144 | { |
145 | return __kmap_pgprot(page, addr, prot: PAGE_KERNEL_NC); |
146 | } |
147 | |
148 | void kunmap_coherent(void) |
149 | { |
150 | unsigned int wired; |
151 | unsigned long flags, old_ctx; |
152 | |
153 | local_irq_save(flags); |
154 | old_ctx = read_c0_entryhi(); |
155 | wired = num_wired_entries() - 1; |
156 | write_c0_wired(wired); |
157 | write_c0_index(wired); |
158 | write_c0_entryhi(UNIQUE_ENTRYHI(wired)); |
159 | write_c0_entrylo0(0); |
160 | write_c0_entrylo1(0); |
161 | mtc0_tlbw_hazard(); |
162 | tlb_write_indexed(); |
163 | tlbw_use_hazard(); |
164 | write_c0_entryhi(old_ctx); |
165 | local_irq_restore(flags); |
166 | pagefault_enable(); |
167 | preempt_enable(); |
168 | } |
169 | |
170 | void copy_user_highpage(struct page *to, struct page *from, |
171 | unsigned long vaddr, struct vm_area_struct *vma) |
172 | { |
173 | struct folio *src = page_folio(from); |
174 | void *vfrom, *vto; |
175 | |
176 | vto = kmap_atomic(page: to); |
177 | if (cpu_has_dc_aliases && |
178 | folio_mapped(folio: src) && !folio_test_dcache_dirty(src)) { |
179 | vfrom = kmap_coherent(page: from, addr: vaddr); |
180 | copy_page(to: vto, from: vfrom); |
181 | kunmap_coherent(); |
182 | } else { |
183 | vfrom = kmap_atomic(page: from); |
184 | copy_page(to: vto, from: vfrom); |
185 | kunmap_atomic(vfrom); |
186 | } |
187 | if ((!cpu_has_ic_fills_f_dc) || |
188 | pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) |
189 | flush_data_cache_page((unsigned long)vto); |
190 | kunmap_atomic(vto); |
191 | /* Make sure this page is cleared on other CPU's too before using it */ |
192 | smp_wmb(); |
193 | } |
194 | |
195 | void copy_to_user_page(struct vm_area_struct *vma, |
196 | struct page *page, unsigned long vaddr, void *dst, const void *src, |
197 | unsigned long len) |
198 | { |
199 | struct folio *folio = page_folio(page); |
200 | |
201 | if (cpu_has_dc_aliases && |
202 | folio_mapped(folio) && !folio_test_dcache_dirty(folio)) { |
203 | void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); |
204 | memcpy(vto, src, len); |
205 | kunmap_coherent(); |
206 | } else { |
207 | memcpy(dst, src, len); |
208 | if (cpu_has_dc_aliases) |
209 | folio_set_dcache_dirty(folio); |
210 | } |
211 | if (vma->vm_flags & VM_EXEC) |
212 | flush_cache_page(vma, vaddr, page_to_pfn(page)); |
213 | } |
214 | |
215 | void copy_from_user_page(struct vm_area_struct *vma, |
216 | struct page *page, unsigned long vaddr, void *dst, const void *src, |
217 | unsigned long len) |
218 | { |
219 | struct folio *folio = page_folio(page); |
220 | |
221 | if (cpu_has_dc_aliases && |
222 | folio_mapped(folio) && !folio_test_dcache_dirty(folio)) { |
223 | void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); |
224 | memcpy(dst, vfrom, len); |
225 | kunmap_coherent(); |
226 | } else { |
227 | memcpy(dst, src, len); |
228 | if (cpu_has_dc_aliases) |
229 | folio_set_dcache_dirty(folio); |
230 | } |
231 | } |
232 | EXPORT_SYMBOL_GPL(copy_from_user_page); |
233 | |
234 | void __init fixrange_init(unsigned long start, unsigned long end, |
235 | pgd_t *pgd_base) |
236 | { |
237 | #ifdef CONFIG_HIGHMEM |
238 | pgd_t *pgd; |
239 | pud_t *pud; |
240 | pmd_t *pmd; |
241 | pte_t *pte; |
242 | int i, j, k; |
243 | unsigned long vaddr; |
244 | |
245 | vaddr = start; |
246 | i = pgd_index(vaddr); |
247 | j = pud_index(vaddr); |
248 | k = pmd_index(vaddr); |
249 | pgd = pgd_base + i; |
250 | |
251 | for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { |
252 | pud = (pud_t *)pgd; |
253 | for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) { |
254 | pmd = (pmd_t *)pud; |
255 | for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) { |
256 | if (pmd_none(*pmd)) { |
257 | pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, |
258 | PAGE_SIZE); |
259 | if (!pte) |
260 | panic("%s: Failed to allocate %lu bytes align=%lx\n" , |
261 | __func__, PAGE_SIZE, |
262 | PAGE_SIZE); |
263 | |
264 | set_pmd(pmd, __pmd((unsigned long)pte)); |
265 | BUG_ON(pte != pte_offset_kernel(pmd, 0)); |
266 | } |
267 | vaddr += PMD_SIZE; |
268 | } |
269 | k = 0; |
270 | } |
271 | j = 0; |
272 | } |
273 | #endif |
274 | } |
275 | |
276 | struct maar_walk_info { |
277 | struct maar_config cfg[16]; |
278 | unsigned int num_cfg; |
279 | }; |
280 | |
281 | static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages, |
282 | void *data) |
283 | { |
284 | struct maar_walk_info *wi = data; |
285 | struct maar_config *cfg = &wi->cfg[wi->num_cfg]; |
286 | unsigned int maar_align; |
287 | |
288 | /* MAAR registers hold physical addresses right shifted by 4 bits */ |
289 | maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4); |
290 | |
291 | /* Fill in the MAAR config entry */ |
292 | cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align); |
293 | cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1; |
294 | cfg->attrs = MIPS_MAAR_S; |
295 | |
296 | /* Ensure we don't overflow the cfg array */ |
297 | if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg))) |
298 | wi->num_cfg++; |
299 | |
300 | return 0; |
301 | } |
302 | |
303 | |
304 | unsigned __weak platform_maar_init(unsigned num_pairs) |
305 | { |
306 | unsigned int num_configured; |
307 | struct maar_walk_info wi; |
308 | |
309 | wi.num_cfg = 0; |
310 | walk_system_ram_range(start_pfn: 0, nr_pages: max_pfn, arg: &wi, func: maar_res_walk); |
311 | |
312 | num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs); |
313 | if (num_configured < wi.num_cfg) |
314 | pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n" , |
315 | num_pairs, wi.num_cfg); |
316 | |
317 | return num_configured; |
318 | } |
319 | |
320 | void maar_init(void) |
321 | { |
322 | unsigned num_maars, used, i; |
323 | phys_addr_t lower, upper, attr; |
324 | static struct { |
325 | struct maar_config cfgs[3]; |
326 | unsigned used; |
327 | } recorded = { { { 0 } }, 0 }; |
328 | |
329 | if (!cpu_has_maar) |
330 | return; |
331 | |
332 | /* Detect the number of MAARs */ |
333 | write_c0_maari(~0); |
334 | back_to_back_c0_hazard(); |
335 | num_maars = read_c0_maari() + 1; |
336 | |
337 | /* MAARs should be in pairs */ |
338 | WARN_ON(num_maars % 2); |
339 | |
340 | /* Set MAARs using values we recorded already */ |
341 | if (recorded.used) { |
342 | used = maar_config(recorded.cfgs, recorded.used, num_maars / 2); |
343 | BUG_ON(used != recorded.used); |
344 | } else { |
345 | /* Configure the required MAARs */ |
346 | used = platform_maar_init(num_pairs: num_maars / 2); |
347 | } |
348 | |
349 | /* Disable any further MAARs */ |
350 | for (i = (used * 2); i < num_maars; i++) { |
351 | write_c0_maari(i); |
352 | back_to_back_c0_hazard(); |
353 | write_c0_maar(0); |
354 | back_to_back_c0_hazard(); |
355 | } |
356 | |
357 | if (recorded.used) |
358 | return; |
359 | |
360 | pr_info("MAAR configuration:\n" ); |
361 | for (i = 0; i < num_maars; i += 2) { |
362 | write_c0_maari(i); |
363 | back_to_back_c0_hazard(); |
364 | upper = read_c0_maar(); |
365 | #ifdef CONFIG_XPA |
366 | upper |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT; |
367 | #endif |
368 | |
369 | write_c0_maari(i + 1); |
370 | back_to_back_c0_hazard(); |
371 | lower = read_c0_maar(); |
372 | #ifdef CONFIG_XPA |
373 | lower |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT; |
374 | #endif |
375 | |
376 | attr = lower & upper; |
377 | lower = (lower & MIPS_MAAR_ADDR) << 4; |
378 | upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff; |
379 | |
380 | pr_info(" [%d]: " , i / 2); |
381 | if ((attr & MIPS_MAAR_V) != MIPS_MAAR_V) { |
382 | pr_cont("disabled\n" ); |
383 | continue; |
384 | } |
385 | |
386 | pr_cont("%pa-%pa" , &lower, &upper); |
387 | |
388 | if (attr & MIPS_MAAR_S) |
389 | pr_cont(" speculate" ); |
390 | |
391 | pr_cont("\n" ); |
392 | |
393 | /* Record the setup for use on secondary CPUs */ |
394 | if (used <= ARRAY_SIZE(recorded.cfgs)) { |
395 | recorded.cfgs[recorded.used].lower = lower; |
396 | recorded.cfgs[recorded.used].upper = upper; |
397 | recorded.cfgs[recorded.used].attrs = attr; |
398 | recorded.used++; |
399 | } |
400 | } |
401 | } |
402 | |
403 | #ifndef CONFIG_NUMA |
404 | void __init paging_init(void) |
405 | { |
406 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
407 | |
408 | pagetable_init(); |
409 | |
410 | #ifdef CONFIG_ZONE_DMA |
411 | max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; |
412 | #endif |
413 | #ifdef CONFIG_ZONE_DMA32 |
414 | max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; |
415 | #endif |
416 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
417 | #ifdef CONFIG_HIGHMEM |
418 | max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; |
419 | |
420 | if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) { |
421 | printk(KERN_WARNING "This processor doesn't support highmem." |
422 | " %ldk highmem ignored\n" , |
423 | (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10)); |
424 | max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; |
425 | |
426 | max_mapnr = max_low_pfn; |
427 | } else if (highend_pfn) { |
428 | max_mapnr = highend_pfn; |
429 | } else { |
430 | max_mapnr = max_low_pfn; |
431 | } |
432 | #else |
433 | max_mapnr = max_low_pfn; |
434 | #endif |
435 | high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); |
436 | |
437 | free_area_init(max_zone_pfns); |
438 | } |
439 | |
440 | #ifdef CONFIG_64BIT |
441 | static struct kcore_list kcore_kseg0; |
442 | #endif |
443 | |
444 | static inline void __init mem_init_free_highmem(void) |
445 | { |
446 | #ifdef CONFIG_HIGHMEM |
447 | unsigned long tmp; |
448 | |
449 | if (cpu_has_dc_aliases) |
450 | return; |
451 | |
452 | for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { |
453 | struct page *page = pfn_to_page(tmp); |
454 | |
455 | if (!memblock_is_memory(PFN_PHYS(tmp))) |
456 | SetPageReserved(page); |
457 | else |
458 | free_highmem_page(page); |
459 | } |
460 | #endif |
461 | } |
462 | |
463 | void __init mem_init(void) |
464 | { |
465 | /* |
466 | * When PFN_PTE_SHIFT is greater than PAGE_SHIFT we won't have enough PTE |
467 | * bits to hold a full 32b physical address on MIPS32 systems. |
468 | */ |
469 | BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT)); |
470 | |
471 | maar_init(); |
472 | memblock_free_all(); |
473 | setup_zero_pages(); /* Setup zeroed pages. */ |
474 | mem_init_free_highmem(); |
475 | |
476 | #ifdef CONFIG_64BIT |
477 | if ((unsigned long) &_text > (unsigned long) CKSEG0) |
478 | /* The -4 is a hack so that user tools don't have to handle |
479 | the overflow. */ |
480 | kclist_add(&kcore_kseg0, (void *) CKSEG0, |
481 | 0x80000000 - 4, KCORE_TEXT); |
482 | #endif |
483 | } |
484 | #endif /* !CONFIG_NUMA */ |
485 | |
486 | void free_init_pages(const char *what, unsigned long begin, unsigned long end) |
487 | { |
488 | unsigned long pfn; |
489 | |
490 | for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { |
491 | struct page *page = pfn_to_page(pfn); |
492 | void *addr = phys_to_virt(PFN_PHYS(pfn)); |
493 | |
494 | memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); |
495 | free_reserved_page(page); |
496 | } |
497 | printk(KERN_INFO "Freeing %s: %ldk freed\n" , what, (end - begin) >> 10); |
498 | } |
499 | |
500 | void (*free_init_pages_eva)(void *begin, void *end) = NULL; |
501 | |
502 | void __weak __init prom_free_prom_memory(void) |
503 | { |
504 | /* nothing to do */ |
505 | } |
506 | |
507 | void __ref free_initmem(void) |
508 | { |
509 | prom_free_prom_memory(); |
510 | /* |
511 | * Let the platform define a specific function to free the |
512 | * init section since EVA may have used any possible mapping |
513 | * between virtual and physical addresses. |
514 | */ |
515 | if (free_init_pages_eva) |
516 | free_init_pages_eva((void *)&__init_begin, (void *)&__init_end); |
517 | else |
518 | free_initmem_default(POISON_FREE_INITMEM); |
519 | } |
520 | |
521 | #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA |
522 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; |
523 | EXPORT_SYMBOL(__per_cpu_offset); |
524 | |
525 | static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) |
526 | { |
527 | return node_distance(cpu_to_node(from), cpu_to_node(to)); |
528 | } |
529 | |
530 | static int __init pcpu_cpu_to_node(int cpu) |
531 | { |
532 | return cpu_to_node(cpu); |
533 | } |
534 | |
535 | void __init setup_per_cpu_areas(void) |
536 | { |
537 | unsigned long delta; |
538 | unsigned int cpu; |
539 | int rc; |
540 | |
541 | /* |
542 | * Always reserve area for module percpu variables. That's |
543 | * what the legacy allocator did. |
544 | */ |
545 | rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, |
546 | PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, |
547 | cpu_distance_fn: pcpu_cpu_distance, |
548 | cpu_to_nd_fn: pcpu_cpu_to_node); |
549 | if (rc < 0) |
550 | panic(fmt: "Failed to initialize percpu areas." ); |
551 | |
552 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; |
553 | for_each_possible_cpu(cpu) |
554 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; |
555 | } |
556 | #endif |
557 | |
558 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
559 | unsigned long pgd_current[NR_CPUS]; |
560 | #endif |
561 | |
562 | /* |
563 | * Align swapper_pg_dir in to 64K, allows its address to be loaded |
564 | * with a single LUI instruction in the TLB handlers. If we used |
565 | * __aligned(64K), its size would get rounded up to the alignment |
566 | * size, and waste space. So we place it in its own section and align |
567 | * it in the linker script. |
568 | */ |
569 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir" ); |
570 | #ifndef __PAGETABLE_PUD_FOLDED |
571 | pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss; |
572 | #endif |
573 | #ifndef __PAGETABLE_PMD_FOLDED |
574 | pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss; |
575 | EXPORT_SYMBOL_GPL(invalid_pmd_table); |
576 | #endif |
577 | pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; |
578 | EXPORT_SYMBOL(invalid_pte_table); |
579 | |