1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * linux/arch/parisc/mm/init.c |
4 | * |
5 | * Copyright (C) 1995 Linus Torvalds |
6 | * Copyright 1999 SuSE GmbH |
7 | * changed by Philipp Rumpf |
8 | * Copyright 1999 Philipp Rumpf (prumpf@tux.org) |
9 | * Copyright 2004 Randolph Chung (tausq@debian.org) |
10 | * Copyright 2006-2007 Helge Deller (deller@gmx.de) |
11 | * |
12 | */ |
13 | |
14 | |
15 | #include <linux/module.h> |
16 | #include <linux/mm.h> |
17 | #include <linux/memblock.h> |
18 | #include <linux/gfp.h> |
19 | #include <linux/delay.h> |
20 | #include <linux/init.h> |
21 | #include <linux/initrd.h> |
22 | #include <linux/swap.h> |
23 | #include <linux/unistd.h> |
24 | #include <linux/nodemask.h> /* for node_online_map */ |
25 | #include <linux/pagemap.h> /* for release_pages */ |
26 | #include <linux/compat.h> |
27 | |
28 | #include <asm/pgalloc.h> |
29 | #include <asm/tlb.h> |
30 | #include <asm/pdc_chassis.h> |
31 | #include <asm/mmzone.h> |
32 | #include <asm/sections.h> |
33 | #include <asm/msgbuf.h> |
34 | #include <asm/sparsemem.h> |
35 | #include <asm/asm-offsets.h> |
36 | #include <asm/shmbuf.h> |
37 | |
38 | extern int data_start; |
39 | extern void parisc_kernel_start(void); /* Kernel entry point in head.S */ |
40 | |
41 | #if CONFIG_PGTABLE_LEVELS == 3 |
42 | pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd" ) __attribute__ ((aligned(PAGE_SIZE))); |
43 | #endif |
44 | |
45 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".data..vm0.pgd" ) __attribute__ ((aligned(PAGE_SIZE))); |
46 | pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __section(".data..vm0.pte" ) __attribute__ ((aligned(PAGE_SIZE))); |
47 | |
48 | static struct resource data_resource = { |
49 | .name = "Kernel data" , |
50 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, |
51 | }; |
52 | |
53 | static struct resource code_resource = { |
54 | .name = "Kernel code" , |
55 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, |
56 | }; |
57 | |
58 | static struct resource pdcdata_resource = { |
59 | .name = "PDC data (Page Zero)" , |
60 | .start = 0, |
61 | .end = 0x9ff, |
62 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, |
63 | }; |
64 | |
65 | static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init; |
66 | |
67 | /* The following array is initialized from the firmware specific |
68 | * information retrieved in kernel/inventory.c. |
69 | */ |
70 | |
71 | physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata; |
72 | int npmem_ranges __initdata; |
73 | |
74 | #ifdef CONFIG_64BIT |
75 | #define MAX_MEM (1UL << MAX_PHYSMEM_BITS) |
76 | #else /* !CONFIG_64BIT */ |
77 | #define MAX_MEM (3584U*1024U*1024U) |
78 | #endif /* !CONFIG_64BIT */ |
79 | |
80 | static unsigned long mem_limit __read_mostly = MAX_MEM; |
81 | |
82 | static void __init mem_limit_func(void) |
83 | { |
84 | char *cp, *end; |
85 | unsigned long limit; |
86 | |
87 | /* We need this before __setup() functions are called */ |
88 | |
89 | limit = MAX_MEM; |
90 | for (cp = boot_command_line; *cp; ) { |
91 | if (memcmp(p: cp, q: "mem=" , size: 4) == 0) { |
92 | cp += 4; |
93 | limit = memparse(ptr: cp, retptr: &end); |
94 | if (end != cp) |
95 | break; |
96 | cp = end; |
97 | } else { |
98 | while (*cp != ' ' && *cp) |
99 | ++cp; |
100 | while (*cp == ' ') |
101 | ++cp; |
102 | } |
103 | } |
104 | |
105 | if (limit < mem_limit) |
106 | mem_limit = limit; |
107 | } |
108 | |
109 | #define MAX_GAP (0x40000000UL >> PAGE_SHIFT) |
110 | |
111 | static void __init setup_bootmem(void) |
112 | { |
113 | unsigned long mem_max; |
114 | #ifndef CONFIG_SPARSEMEM |
115 | physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1]; |
116 | int npmem_holes; |
117 | #endif |
118 | int i, sysram_resource_count; |
119 | |
120 | disable_sr_hashing(); /* Turn off space register hashing */ |
121 | |
122 | /* |
123 | * Sort the ranges. Since the number of ranges is typically |
124 | * small, and performance is not an issue here, just do |
125 | * a simple insertion sort. |
126 | */ |
127 | |
128 | for (i = 1; i < npmem_ranges; i++) { |
129 | int j; |
130 | |
131 | for (j = i; j > 0; j--) { |
132 | if (pmem_ranges[j-1].start_pfn < |
133 | pmem_ranges[j].start_pfn) { |
134 | |
135 | break; |
136 | } |
137 | swap(pmem_ranges[j-1], pmem_ranges[j]); |
138 | } |
139 | } |
140 | |
141 | #ifndef CONFIG_SPARSEMEM |
142 | /* |
143 | * Throw out ranges that are too far apart (controlled by |
144 | * MAX_GAP). |
145 | */ |
146 | |
147 | for (i = 1; i < npmem_ranges; i++) { |
148 | if (pmem_ranges[i].start_pfn - |
149 | (pmem_ranges[i-1].start_pfn + |
150 | pmem_ranges[i-1].pages) > MAX_GAP) { |
151 | npmem_ranges = i; |
152 | printk("Large gap in memory detected (%ld pages). " |
153 | "Consider turning on CONFIG_SPARSEMEM\n" , |
154 | pmem_ranges[i].start_pfn - |
155 | (pmem_ranges[i-1].start_pfn + |
156 | pmem_ranges[i-1].pages)); |
157 | break; |
158 | } |
159 | } |
160 | #endif |
161 | |
162 | /* Print the memory ranges */ |
163 | pr_info("Memory Ranges:\n" ); |
164 | |
165 | for (i = 0; i < npmem_ranges; i++) { |
166 | struct resource *res = &sysram_resources[i]; |
167 | unsigned long start; |
168 | unsigned long size; |
169 | |
170 | size = (pmem_ranges[i].pages << PAGE_SHIFT); |
171 | start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); |
172 | pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n" , |
173 | i, start, start + (size - 1), size >> 20); |
174 | |
175 | /* request memory resource */ |
176 | res->name = "System RAM" ; |
177 | res->start = start; |
178 | res->end = start + size - 1; |
179 | res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; |
180 | request_resource(root: &iomem_resource, new: res); |
181 | } |
182 | |
183 | sysram_resource_count = npmem_ranges; |
184 | |
185 | /* |
186 | * For 32 bit kernels we limit the amount of memory we can |
187 | * support, in order to preserve enough kernel address space |
188 | * for other purposes. For 64 bit kernels we don't normally |
189 | * limit the memory, but this mechanism can be used to |
190 | * artificially limit the amount of memory (and it is written |
191 | * to work with multiple memory ranges). |
192 | */ |
193 | |
194 | mem_limit_func(); /* check for "mem=" argument */ |
195 | |
196 | mem_max = 0; |
197 | for (i = 0; i < npmem_ranges; i++) { |
198 | unsigned long rsize; |
199 | |
200 | rsize = pmem_ranges[i].pages << PAGE_SHIFT; |
201 | if ((mem_max + rsize) > mem_limit) { |
202 | printk(KERN_WARNING "Memory truncated to %ld MB\n" , mem_limit >> 20); |
203 | if (mem_max == mem_limit) |
204 | npmem_ranges = i; |
205 | else { |
206 | pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT) |
207 | - (mem_max >> PAGE_SHIFT); |
208 | npmem_ranges = i + 1; |
209 | mem_max = mem_limit; |
210 | } |
211 | break; |
212 | } |
213 | mem_max += rsize; |
214 | } |
215 | |
216 | printk(KERN_INFO "Total Memory: %ld MB\n" ,mem_max >> 20); |
217 | |
218 | #ifndef CONFIG_SPARSEMEM |
219 | /* Merge the ranges, keeping track of the holes */ |
220 | { |
221 | unsigned long end_pfn; |
222 | unsigned long hole_pages; |
223 | |
224 | npmem_holes = 0; |
225 | end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; |
226 | for (i = 1; i < npmem_ranges; i++) { |
227 | |
228 | hole_pages = pmem_ranges[i].start_pfn - end_pfn; |
229 | if (hole_pages) { |
230 | pmem_holes[npmem_holes].start_pfn = end_pfn; |
231 | pmem_holes[npmem_holes++].pages = hole_pages; |
232 | end_pfn += hole_pages; |
233 | } |
234 | end_pfn += pmem_ranges[i].pages; |
235 | } |
236 | |
237 | pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn; |
238 | npmem_ranges = 1; |
239 | } |
240 | #endif |
241 | |
242 | /* |
243 | * Initialize and free the full range of memory in each range. |
244 | */ |
245 | |
246 | max_pfn = 0; |
247 | for (i = 0; i < npmem_ranges; i++) { |
248 | unsigned long start_pfn; |
249 | unsigned long npages; |
250 | unsigned long start; |
251 | unsigned long size; |
252 | |
253 | start_pfn = pmem_ranges[i].start_pfn; |
254 | npages = pmem_ranges[i].pages; |
255 | |
256 | start = start_pfn << PAGE_SHIFT; |
257 | size = npages << PAGE_SHIFT; |
258 | |
259 | /* add system RAM memblock */ |
260 | memblock_add(base: start, size); |
261 | |
262 | if ((start_pfn + npages) > max_pfn) |
263 | max_pfn = start_pfn + npages; |
264 | } |
265 | |
266 | /* |
267 | * We can't use memblock top-down allocations because we only |
268 | * created the initial mapping up to KERNEL_INITIAL_SIZE in |
269 | * the assembly bootup code. |
270 | */ |
271 | memblock_set_bottom_up(enable: true); |
272 | |
273 | /* IOMMU is always used to access "high mem" on those boxes |
274 | * that can support enough mem that a PCI device couldn't |
275 | * directly DMA to any physical addresses. |
276 | * ISA DMA support will need to revisit this. |
277 | */ |
278 | max_low_pfn = max_pfn; |
279 | |
280 | /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */ |
281 | |
282 | #define PDC_CONSOLE_IO_IODC_SIZE 32768 |
283 | |
284 | memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free + |
285 | PDC_CONSOLE_IO_IODC_SIZE)); |
286 | memblock_reserve(__pa(KERNEL_BINARY_TEXT_START), |
287 | (unsigned long)(_end - KERNEL_BINARY_TEXT_START)); |
288 | |
289 | #ifndef CONFIG_SPARSEMEM |
290 | |
291 | /* reserve the holes */ |
292 | |
293 | for (i = 0; i < npmem_holes; i++) { |
294 | memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT), |
295 | (pmem_holes[i].pages << PAGE_SHIFT)); |
296 | } |
297 | #endif |
298 | |
299 | #ifdef CONFIG_BLK_DEV_INITRD |
300 | if (initrd_start) { |
301 | printk(KERN_INFO "initrd: %08lx-%08lx\n" , initrd_start, initrd_end); |
302 | if (__pa(initrd_start) < mem_max) { |
303 | unsigned long initrd_reserve; |
304 | |
305 | if (__pa(initrd_end) > mem_max) { |
306 | initrd_reserve = mem_max - __pa(initrd_start); |
307 | } else { |
308 | initrd_reserve = initrd_end - initrd_start; |
309 | } |
310 | initrd_below_start_ok = 1; |
311 | printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n" , __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max); |
312 | |
313 | memblock_reserve(__pa(initrd_start), size: initrd_reserve); |
314 | } |
315 | } |
316 | #endif |
317 | |
318 | data_resource.start = virt_to_phys(address: &data_start); |
319 | data_resource.end = virt_to_phys(address: _end) - 1; |
320 | code_resource.start = virt_to_phys(address: _text); |
321 | code_resource.end = virt_to_phys(address: &data_start)-1; |
322 | |
323 | /* We don't know which region the kernel will be in, so try |
324 | * all of them. |
325 | */ |
326 | for (i = 0; i < sysram_resource_count; i++) { |
327 | struct resource *res = &sysram_resources[i]; |
328 | request_resource(root: res, new: &code_resource); |
329 | request_resource(root: res, new: &data_resource); |
330 | } |
331 | request_resource(root: &sysram_resources[0], new: &pdcdata_resource); |
332 | |
333 | /* Initialize Page Deallocation Table (PDT) and check for bad memory. */ |
334 | pdc_pdt_init(); |
335 | |
336 | memblock_allow_resize(); |
337 | memblock_dump_all(); |
338 | } |
339 | |
340 | static bool kernel_set_to_readonly; |
341 | |
342 | static void __ref map_pages(unsigned long start_vaddr, |
343 | unsigned long start_paddr, unsigned long size, |
344 | pgprot_t pgprot, int force) |
345 | { |
346 | pmd_t *pmd; |
347 | pte_t *pg_table; |
348 | unsigned long end_paddr; |
349 | unsigned long start_pmd; |
350 | unsigned long start_pte; |
351 | unsigned long tmp1; |
352 | unsigned long tmp2; |
353 | unsigned long address; |
354 | unsigned long vaddr; |
355 | unsigned long ro_start; |
356 | unsigned long ro_end; |
357 | unsigned long kernel_start, kernel_end; |
358 | |
359 | ro_start = __pa((unsigned long)_text); |
360 | ro_end = __pa((unsigned long)&data_start); |
361 | kernel_start = __pa((unsigned long)&__init_begin); |
362 | kernel_end = __pa((unsigned long)&_end); |
363 | |
364 | end_paddr = start_paddr + size; |
365 | |
366 | /* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */ |
367 | start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); |
368 | start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); |
369 | |
370 | address = start_paddr; |
371 | vaddr = start_vaddr; |
372 | while (address < end_paddr) { |
373 | pgd_t *pgd = pgd_offset_k(vaddr); |
374 | p4d_t *p4d = p4d_offset(pgd, address: vaddr); |
375 | pud_t *pud = pud_offset(p4d, address: vaddr); |
376 | |
377 | #if CONFIG_PGTABLE_LEVELS == 3 |
378 | if (pud_none(*pud)) { |
379 | pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER, |
380 | PAGE_SIZE << PMD_TABLE_ORDER); |
381 | if (!pmd) |
382 | panic("pmd allocation failed.\n" ); |
383 | pud_populate(NULL, pud, pmd); |
384 | } |
385 | #endif |
386 | |
387 | pmd = pmd_offset(pud, address: vaddr); |
388 | for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { |
389 | if (pmd_none(pmd: *pmd)) { |
390 | pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
391 | if (!pg_table) |
392 | panic(fmt: "page table allocation failed\n" ); |
393 | pmd_populate_kernel(NULL, pmd, pte: pg_table); |
394 | } |
395 | |
396 | pg_table = pte_offset_kernel(pmd, address: vaddr); |
397 | for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { |
398 | pte_t pte; |
399 | pgprot_t prot; |
400 | bool huge = false; |
401 | |
402 | if (force) { |
403 | prot = pgprot; |
404 | } else if (address < kernel_start || address >= kernel_end) { |
405 | /* outside kernel memory */ |
406 | prot = PAGE_KERNEL; |
407 | } else if (!kernel_set_to_readonly) { |
408 | /* still initializing, allow writing to RO memory */ |
409 | prot = PAGE_KERNEL_RWX; |
410 | huge = true; |
411 | } else if (address >= ro_start) { |
412 | /* Code (ro) and Data areas */ |
413 | prot = (address < ro_end) ? |
414 | PAGE_KERNEL_EXEC : PAGE_KERNEL; |
415 | huge = true; |
416 | } else { |
417 | prot = PAGE_KERNEL; |
418 | } |
419 | |
420 | pte = __mk_pte(address, prot); |
421 | if (huge) |
422 | pte = pte_mkhuge(pte); |
423 | |
424 | if (address >= end_paddr) |
425 | break; |
426 | |
427 | set_pte(ptep: pg_table, pte); |
428 | |
429 | address += PAGE_SIZE; |
430 | vaddr += PAGE_SIZE; |
431 | } |
432 | start_pte = 0; |
433 | |
434 | if (address >= end_paddr) |
435 | break; |
436 | } |
437 | start_pmd = 0; |
438 | } |
439 | } |
440 | |
441 | void __init set_kernel_text_rw(int enable_read_write) |
442 | { |
443 | unsigned long start = (unsigned long) __init_begin; |
444 | unsigned long end = (unsigned long) &data_start; |
445 | |
446 | map_pages(start_vaddr: start, __pa(start), size: end-start, |
447 | pgprot: PAGE_KERNEL_RWX, force: enable_read_write ? 1:0); |
448 | |
449 | /* force the kernel to see the new page table entries */ |
450 | flush_cache_all(); |
451 | flush_tlb_all(); |
452 | } |
453 | |
454 | void free_initmem(void) |
455 | { |
456 | unsigned long init_begin = (unsigned long)__init_begin; |
457 | unsigned long init_end = (unsigned long)__init_end; |
458 | unsigned long kernel_end = (unsigned long)&_end; |
459 | |
460 | /* Remap kernel text and data, but do not touch init section yet. */ |
461 | kernel_set_to_readonly = true; |
462 | map_pages(start_vaddr: init_end, __pa(init_end), size: kernel_end - init_end, |
463 | PAGE_KERNEL, force: 0); |
464 | |
465 | /* The init text pages are marked R-X. We have to |
466 | * flush the icache and mark them RW- |
467 | * |
468 | * Do a dummy remap of the data section first (the data |
469 | * section is already PAGE_KERNEL) to pull in the TLB entries |
470 | * for map_kernel */ |
471 | map_pages(start_vaddr: init_begin, __pa(init_begin), size: init_end - init_begin, |
472 | pgprot: PAGE_KERNEL_RWX, force: 1); |
473 | /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute |
474 | * map_pages */ |
475 | map_pages(start_vaddr: init_begin, __pa(init_begin), size: init_end - init_begin, |
476 | PAGE_KERNEL, force: 1); |
477 | |
478 | /* force the kernel to see the new TLB entries */ |
479 | __flush_tlb_range(0, init_begin, kernel_end); |
480 | |
481 | /* finally dump all the instructions which were cached, since the |
482 | * pages are no-longer executable */ |
483 | flush_icache_range(start: init_begin, end: init_end); |
484 | |
485 | free_initmem_default(POISON_FREE_INITMEM); |
486 | |
487 | /* set up a new led state on systems shipped LED State panel */ |
488 | pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); |
489 | } |
490 | |
491 | |
492 | #ifdef CONFIG_STRICT_KERNEL_RWX |
493 | void mark_rodata_ro(void) |
494 | { |
495 | /* rodata memory was already mapped with KERNEL_RO access rights by |
496 | pagetable_init() and map_pages(). No need to do additional stuff here */ |
497 | unsigned long roai_size = __end_ro_after_init - __start_ro_after_init; |
498 | |
499 | pr_info("Write protected read-only-after-init data: %luk\n" , roai_size >> 10); |
500 | } |
501 | #endif |
502 | |
503 | |
504 | /* |
505 | * Just an arbitrary offset to serve as a "hole" between mapping areas |
506 | * (between top of physical memory and a potential pcxl dma mapping |
507 | * area, and below the vmalloc mapping area). |
508 | * |
509 | * The current 32K value just means that there will be a 32K "hole" |
510 | * between mapping areas. That means that any out-of-bounds memory |
511 | * accesses will hopefully be caught. The vmalloc() routines leaves |
512 | * a hole of 4kB between each vmalloced area for the same reason. |
513 | */ |
514 | |
515 | /* Leave room for gateway page expansion */ |
516 | #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE |
517 | #error KERNEL_MAP_START is in gateway reserved region |
518 | #endif |
519 | #define MAP_START (KERNEL_MAP_START) |
520 | |
521 | #define VM_MAP_OFFSET (32*1024) |
522 | #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ |
523 | & ~(VM_MAP_OFFSET-1))) |
524 | |
525 | void *parisc_vmalloc_start __ro_after_init; |
526 | EXPORT_SYMBOL(parisc_vmalloc_start); |
527 | |
528 | void __init mem_init(void) |
529 | { |
530 | /* Do sanity checks on IPC (compat) structures */ |
531 | BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48); |
532 | #ifndef CONFIG_64BIT |
533 | BUILD_BUG_ON(sizeof(struct semid64_ds) != 80); |
534 | BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104); |
535 | BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104); |
536 | #endif |
537 | #ifdef CONFIG_COMPAT |
538 | BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm)); |
539 | BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80); |
540 | BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104); |
541 | BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104); |
542 | #endif |
543 | |
544 | /* Do sanity checks on page table constants */ |
545 | BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t)); |
546 | BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t)); |
547 | BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t)); |
548 | BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD |
549 | > BITS_PER_LONG); |
550 | #if CONFIG_PGTABLE_LEVELS == 3 |
551 | BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PMD); |
552 | #else |
553 | BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD); |
554 | #endif |
555 | |
556 | #ifdef CONFIG_64BIT |
557 | /* avoid ldil_%L() asm statements to sign-extend into upper 32-bits */ |
558 | BUILD_BUG_ON(__PAGE_OFFSET >= 0x80000000); |
559 | BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000); |
560 | #endif |
561 | |
562 | high_memory = __va((max_pfn << PAGE_SHIFT)); |
563 | set_max_mapnr(max_low_pfn); |
564 | memblock_free_all(); |
565 | |
566 | #ifdef CONFIG_PA11 |
567 | if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) { |
568 | pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); |
569 | parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start |
570 | + PCXL_DMA_MAP_SIZE); |
571 | } else |
572 | #endif |
573 | parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); |
574 | |
575 | #if 0 |
576 | /* |
577 | * Do not expose the virtual kernel memory layout to userspace. |
578 | * But keep code for debugging purposes. |
579 | */ |
580 | printk("virtual kernel memory layout:\n" |
581 | " vmalloc : 0x%px - 0x%px (%4ld MB)\n" |
582 | " fixmap : 0x%px - 0x%px (%4ld kB)\n" |
583 | " memory : 0x%px - 0x%px (%4ld MB)\n" |
584 | " .init : 0x%px - 0x%px (%4ld kB)\n" |
585 | " .data : 0x%px - 0x%px (%4ld kB)\n" |
586 | " .text : 0x%px - 0x%px (%4ld kB)\n" , |
587 | |
588 | (void*)VMALLOC_START, (void*)VMALLOC_END, |
589 | (VMALLOC_END - VMALLOC_START) >> 20, |
590 | |
591 | (void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE), |
592 | (unsigned long)(FIXMAP_SIZE / 1024), |
593 | |
594 | __va(0), high_memory, |
595 | ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, |
596 | |
597 | __init_begin, __init_end, |
598 | ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10, |
599 | |
600 | _etext, _edata, |
601 | ((unsigned long)_edata - (unsigned long)_etext) >> 10, |
602 | |
603 | _text, _etext, |
604 | ((unsigned long)_etext - (unsigned long)_text) >> 10); |
605 | #endif |
606 | } |
607 | |
608 | unsigned long *empty_zero_page __ro_after_init; |
609 | EXPORT_SYMBOL(empty_zero_page); |
610 | |
611 | /* |
612 | * pagetable_init() sets up the page tables |
613 | * |
614 | * Note that gateway_init() places the Linux gateway page at page 0. |
615 | * Since gateway pages cannot be dereferenced this has the desirable |
616 | * side effect of trapping those pesky NULL-reference errors in the |
617 | * kernel. |
618 | */ |
619 | static void __init pagetable_init(void) |
620 | { |
621 | int range; |
622 | |
623 | /* Map each physical memory range to its kernel vaddr */ |
624 | |
625 | for (range = 0; range < npmem_ranges; range++) { |
626 | unsigned long start_paddr; |
627 | unsigned long size; |
628 | |
629 | start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; |
630 | size = pmem_ranges[range].pages << PAGE_SHIFT; |
631 | |
632 | map_pages(start_vaddr: (unsigned long)__va(start_paddr), start_paddr, |
633 | size, PAGE_KERNEL, force: 0); |
634 | } |
635 | |
636 | #ifdef CONFIG_BLK_DEV_INITRD |
637 | if (initrd_end && initrd_end > mem_limit) { |
638 | printk(KERN_INFO "initrd: mapping %08lx-%08lx\n" , initrd_start, initrd_end); |
639 | map_pages(start_vaddr: initrd_start, __pa(initrd_start), |
640 | size: initrd_end - initrd_start, PAGE_KERNEL, force: 0); |
641 | } |
642 | #endif |
643 | |
644 | empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
645 | if (!empty_zero_page) |
646 | panic(fmt: "zero page allocation failed.\n" ); |
647 | |
648 | } |
649 | |
650 | static void __init gateway_init(void) |
651 | { |
652 | unsigned long linux_gateway_page_addr; |
653 | /* FIXME: This is 'const' in order to trick the compiler |
654 | into not treating it as DP-relative data. */ |
655 | extern void * const linux_gateway_page; |
656 | |
657 | linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK; |
658 | |
659 | /* |
660 | * Setup Linux Gateway page. |
661 | * |
662 | * The Linux gateway page will reside in kernel space (on virtual |
663 | * page 0), so it doesn't need to be aliased into user space. |
664 | */ |
665 | |
666 | map_pages(start_vaddr: linux_gateway_page_addr, __pa(&linux_gateway_page), |
667 | PAGE_SIZE, pgprot: PAGE_GATEWAY, force: 1); |
668 | } |
669 | |
670 | static void __init fixmap_init(void) |
671 | { |
672 | unsigned long addr = FIXMAP_START; |
673 | unsigned long end = FIXMAP_START + FIXMAP_SIZE; |
674 | pgd_t *pgd = pgd_offset_k(addr); |
675 | p4d_t *p4d = p4d_offset(pgd, address: addr); |
676 | pud_t *pud = pud_offset(p4d, address: addr); |
677 | pmd_t *pmd; |
678 | |
679 | BUILD_BUG_ON(FIXMAP_SIZE > PMD_SIZE); |
680 | |
681 | #if CONFIG_PGTABLE_LEVELS == 3 |
682 | if (pud_none(*pud)) { |
683 | pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER, |
684 | PAGE_SIZE << PMD_TABLE_ORDER); |
685 | if (!pmd) |
686 | panic("fixmap: pmd allocation failed.\n" ); |
687 | pud_populate(NULL, pud, pmd); |
688 | } |
689 | #endif |
690 | |
691 | pmd = pmd_offset(pud, address: addr); |
692 | do { |
693 | pte_t *pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
694 | if (!pte) |
695 | panic(fmt: "fixmap: pte allocation failed.\n" ); |
696 | |
697 | pmd_populate_kernel(mm: &init_mm, pmd, pte); |
698 | |
699 | addr += PAGE_SIZE; |
700 | } while (addr < end); |
701 | } |
702 | |
703 | static void __init parisc_bootmem_free(void) |
704 | { |
705 | unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, }; |
706 | |
707 | max_zone_pfn[0] = memblock_end_of_DRAM(); |
708 | |
709 | free_area_init(max_zone_pfn); |
710 | } |
711 | |
712 | void __init paging_init(void) |
713 | { |
714 | setup_bootmem(); |
715 | pagetable_init(); |
716 | gateway_init(); |
717 | fixmap_init(); |
718 | flush_cache_all_local(); /* start with known state */ |
719 | flush_tlb_all_local(NULL); |
720 | |
721 | sparse_init(); |
722 | parisc_bootmem_free(); |
723 | } |
724 | |
725 | static void alloc_btlb(unsigned long start, unsigned long end, int *slot, |
726 | unsigned long entry_info) |
727 | { |
728 | const int slot_max = btlb_info.fixed_range_info.num_comb; |
729 | int min_num_pages = btlb_info.min_size; |
730 | unsigned long size; |
731 | |
732 | /* map at minimum 4 pages */ |
733 | if (min_num_pages < 4) |
734 | min_num_pages = 4; |
735 | |
736 | size = HUGEPAGE_SIZE; |
737 | while (start < end && *slot < slot_max && size >= PAGE_SIZE) { |
738 | /* starting address must have same alignment as size! */ |
739 | /* if correctly aligned and fits in double size, increase */ |
740 | if (((start & (2 * size - 1)) == 0) && |
741 | (end - start) >= (2 * size)) { |
742 | size <<= 1; |
743 | continue; |
744 | } |
745 | /* if current size alignment is too big, try smaller size */ |
746 | if ((start & (size - 1)) != 0) { |
747 | size >>= 1; |
748 | continue; |
749 | } |
750 | if ((end - start) >= size) { |
751 | if ((size >> PAGE_SHIFT) >= min_num_pages) |
752 | pdc_btlb_insert(start >> PAGE_SHIFT, __pa(start) >> PAGE_SHIFT, |
753 | size >> PAGE_SHIFT, entry_info, *slot); |
754 | (*slot)++; |
755 | start += size; |
756 | continue; |
757 | } |
758 | size /= 2; |
759 | continue; |
760 | } |
761 | } |
762 | |
763 | void btlb_init_per_cpu(void) |
764 | { |
765 | unsigned long s, t, e; |
766 | int slot; |
767 | |
768 | /* BTLBs are not available on 64-bit CPUs */ |
769 | if (IS_ENABLED(CONFIG_PA20)) |
770 | return; |
771 | else if (pdc_btlb_info(&btlb_info) < 0) { |
772 | memset(&btlb_info, 0, sizeof btlb_info); |
773 | } |
774 | |
775 | /* insert BLTLBs for code and data segments */ |
776 | s = (uintptr_t) dereference_function_descriptor(&_stext); |
777 | e = (uintptr_t) dereference_function_descriptor(&_etext); |
778 | t = (uintptr_t) dereference_function_descriptor(&_sdata); |
779 | BUG_ON(t != e); |
780 | |
781 | /* code segments */ |
782 | slot = 0; |
783 | alloc_btlb(start: s, end: e, slot: &slot, entry_info: 0x13800000); |
784 | |
785 | /* sanity check */ |
786 | t = (uintptr_t) dereference_function_descriptor(&_edata); |
787 | e = (uintptr_t) dereference_function_descriptor(&__bss_start); |
788 | BUG_ON(t != e); |
789 | |
790 | /* data segments */ |
791 | s = (uintptr_t) dereference_function_descriptor(&_sdata); |
792 | e = (uintptr_t) dereference_function_descriptor(&__bss_stop); |
793 | alloc_btlb(start: s, end: e, slot: &slot, entry_info: 0x11800000); |
794 | } |
795 | |
796 | #ifdef CONFIG_PA20 |
797 | |
798 | /* |
799 | * Currently, all PA20 chips have 18 bit protection IDs, which is the |
800 | * limiting factor (space ids are 32 bits). |
801 | */ |
802 | |
803 | #define NR_SPACE_IDS 262144 |
804 | |
805 | #else |
806 | |
807 | /* |
808 | * Currently we have a one-to-one relationship between space IDs and |
809 | * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only |
810 | * support 15 bit protection IDs, so that is the limiting factor. |
811 | * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's |
812 | * probably not worth the effort for a special case here. |
813 | */ |
814 | |
815 | #define NR_SPACE_IDS 32768 |
816 | |
817 | #endif /* !CONFIG_PA20 */ |
818 | |
819 | #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2) |
820 | #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long))) |
821 | |
822 | static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */ |
823 | static unsigned long dirty_space_id[SID_ARRAY_SIZE]; |
824 | static unsigned long space_id_index; |
825 | static unsigned long free_space_ids = NR_SPACE_IDS - 1; |
826 | static unsigned long dirty_space_ids; |
827 | |
828 | static DEFINE_SPINLOCK(sid_lock); |
829 | |
830 | unsigned long alloc_sid(void) |
831 | { |
832 | unsigned long index; |
833 | |
834 | spin_lock(lock: &sid_lock); |
835 | |
836 | if (free_space_ids == 0) { |
837 | if (dirty_space_ids != 0) { |
838 | spin_unlock(lock: &sid_lock); |
839 | flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */ |
840 | spin_lock(lock: &sid_lock); |
841 | } |
842 | BUG_ON(free_space_ids == 0); |
843 | } |
844 | |
845 | free_space_ids--; |
846 | |
847 | index = find_next_zero_bit(addr: space_id, NR_SPACE_IDS, offset: space_id_index); |
848 | space_id[BIT_WORD(index)] |= BIT_MASK(index); |
849 | space_id_index = index; |
850 | |
851 | spin_unlock(lock: &sid_lock); |
852 | |
853 | return index << SPACEID_SHIFT; |
854 | } |
855 | |
856 | void free_sid(unsigned long spaceid) |
857 | { |
858 | unsigned long index = spaceid >> SPACEID_SHIFT; |
859 | unsigned long *dirty_space_offset, mask; |
860 | |
861 | dirty_space_offset = &dirty_space_id[BIT_WORD(index)]; |
862 | mask = BIT_MASK(index); |
863 | |
864 | spin_lock(lock: &sid_lock); |
865 | |
866 | BUG_ON(*dirty_space_offset & mask); /* attempt to free space id twice */ |
867 | |
868 | *dirty_space_offset |= mask; |
869 | dirty_space_ids++; |
870 | |
871 | spin_unlock(lock: &sid_lock); |
872 | } |
873 | |
874 | |
875 | #ifdef CONFIG_SMP |
876 | static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array) |
877 | { |
878 | int i; |
879 | |
880 | /* NOTE: sid_lock must be held upon entry */ |
881 | |
882 | *ndirtyptr = dirty_space_ids; |
883 | if (dirty_space_ids != 0) { |
884 | for (i = 0; i < SID_ARRAY_SIZE; i++) { |
885 | dirty_array[i] = dirty_space_id[i]; |
886 | dirty_space_id[i] = 0; |
887 | } |
888 | dirty_space_ids = 0; |
889 | } |
890 | |
891 | return; |
892 | } |
893 | |
894 | static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array) |
895 | { |
896 | int i; |
897 | |
898 | /* NOTE: sid_lock must be held upon entry */ |
899 | |
900 | if (ndirty != 0) { |
901 | for (i = 0; i < SID_ARRAY_SIZE; i++) { |
902 | space_id[i] ^= dirty_array[i]; |
903 | } |
904 | |
905 | free_space_ids += ndirty; |
906 | space_id_index = 0; |
907 | } |
908 | } |
909 | |
910 | #else /* CONFIG_SMP */ |
911 | |
912 | static void recycle_sids(void) |
913 | { |
914 | int i; |
915 | |
916 | /* NOTE: sid_lock must be held upon entry */ |
917 | |
918 | if (dirty_space_ids != 0) { |
919 | for (i = 0; i < SID_ARRAY_SIZE; i++) { |
920 | space_id[i] ^= dirty_space_id[i]; |
921 | dirty_space_id[i] = 0; |
922 | } |
923 | |
924 | free_space_ids += dirty_space_ids; |
925 | dirty_space_ids = 0; |
926 | space_id_index = 0; |
927 | } |
928 | } |
929 | #endif |
930 | |
931 | /* |
932 | * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is |
933 | * purged, we can safely reuse the space ids that were released but |
934 | * not flushed from the tlb. |
935 | */ |
936 | |
937 | #ifdef CONFIG_SMP |
938 | |
939 | static unsigned long recycle_ndirty; |
940 | static unsigned long recycle_dirty_array[SID_ARRAY_SIZE]; |
941 | static unsigned int recycle_inuse; |
942 | |
943 | void flush_tlb_all(void) |
944 | { |
945 | int do_recycle; |
946 | |
947 | do_recycle = 0; |
948 | spin_lock(lock: &sid_lock); |
949 | __inc_irq_stat(irq_tlb_count); |
950 | if (dirty_space_ids > RECYCLE_THRESHOLD) { |
951 | BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */ |
952 | get_dirty_sids(ndirtyptr: &recycle_ndirty,dirty_array: recycle_dirty_array); |
953 | recycle_inuse++; |
954 | do_recycle++; |
955 | } |
956 | spin_unlock(lock: &sid_lock); |
957 | on_each_cpu(flush_tlb_all_local, NULL, 1); |
958 | if (do_recycle) { |
959 | spin_lock(lock: &sid_lock); |
960 | recycle_sids(ndirty: recycle_ndirty,dirty_array: recycle_dirty_array); |
961 | recycle_inuse = 0; |
962 | spin_unlock(lock: &sid_lock); |
963 | } |
964 | } |
965 | #else |
966 | void flush_tlb_all(void) |
967 | { |
968 | spin_lock(&sid_lock); |
969 | __inc_irq_stat(irq_tlb_count); |
970 | flush_tlb_all_local(NULL); |
971 | recycle_sids(); |
972 | spin_unlock(&sid_lock); |
973 | } |
974 | #endif |
975 | |
976 | static const pgprot_t protection_map[16] = { |
977 | [VM_NONE] = PAGE_NONE, |
978 | [VM_READ] = PAGE_READONLY, |
979 | [VM_WRITE] = PAGE_NONE, |
980 | [VM_WRITE | VM_READ] = PAGE_READONLY, |
981 | [VM_EXEC] = PAGE_EXECREAD, |
982 | [VM_EXEC | VM_READ] = PAGE_EXECREAD, |
983 | [VM_EXEC | VM_WRITE] = PAGE_EXECREAD, |
984 | [VM_EXEC | VM_WRITE | VM_READ] = PAGE_EXECREAD, |
985 | [VM_SHARED] = PAGE_NONE, |
986 | [VM_SHARED | VM_READ] = PAGE_READONLY, |
987 | [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY, |
988 | [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, |
989 | [VM_SHARED | VM_EXEC] = PAGE_EXECREAD, |
990 | [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD, |
991 | [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX, |
992 | [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX |
993 | }; |
994 | DECLARE_VM_GET_PAGE_PROT |
995 | |