1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * ARC700 mmap |
4 | * |
5 | * (started from arm version - for VIPT alias handling) |
6 | * |
7 | * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) |
8 | */ |
9 | |
10 | #include <linux/fs.h> |
11 | #include <linux/mm.h> |
12 | #include <linux/mman.h> |
13 | #include <linux/sched/mm.h> |
14 | |
15 | #include <asm/cacheflush.h> |
16 | |
17 | /* |
18 | * Ensure that shared mappings are correctly aligned to |
19 | * avoid aliasing issues with VIPT caches. |
20 | * We need to ensure that |
21 | * a specific page of an object is always mapped at a multiple of |
22 | * SHMLBA bytes. |
23 | */ |
24 | unsigned long |
25 | arch_get_unmapped_area(struct file *filp, unsigned long addr, |
26 | unsigned long len, unsigned long pgoff, unsigned long flags) |
27 | { |
28 | struct mm_struct *mm = current->mm; |
29 | struct vm_area_struct *vma; |
30 | struct vm_unmapped_area_info info; |
31 | |
32 | /* |
33 | * We enforce the MAP_FIXED case. |
34 | */ |
35 | if (flags & MAP_FIXED) { |
36 | if (flags & MAP_SHARED && |
37 | (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) |
38 | return -EINVAL; |
39 | return addr; |
40 | } |
41 | |
42 | if (len > TASK_SIZE) |
43 | return -ENOMEM; |
44 | |
45 | if (addr) { |
46 | addr = PAGE_ALIGN(addr); |
47 | |
48 | vma = find_vma(mm, addr); |
49 | if (TASK_SIZE - len >= addr && |
50 | (!vma || addr + len <= vm_start_gap(vma))) |
51 | return addr; |
52 | } |
53 | |
54 | info.flags = 0; |
55 | info.length = len; |
56 | info.low_limit = mm->mmap_base; |
57 | info.high_limit = TASK_SIZE; |
58 | info.align_mask = 0; |
59 | info.align_offset = pgoff << PAGE_SHIFT; |
60 | return vm_unmapped_area(info: &info); |
61 | } |
62 | |
63 | static const pgprot_t protection_map[16] = { |
64 | [VM_NONE] = PAGE_U_NONE, |
65 | [VM_READ] = PAGE_U_R, |
66 | [VM_WRITE] = PAGE_U_R, |
67 | [VM_WRITE | VM_READ] = PAGE_U_R, |
68 | [VM_EXEC] = PAGE_U_X_R, |
69 | [VM_EXEC | VM_READ] = PAGE_U_X_R, |
70 | [VM_EXEC | VM_WRITE] = PAGE_U_X_R, |
71 | [VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_R, |
72 | [VM_SHARED] = PAGE_U_NONE, |
73 | [VM_SHARED | VM_READ] = PAGE_U_R, |
74 | [VM_SHARED | VM_WRITE] = PAGE_U_W_R, |
75 | [VM_SHARED | VM_WRITE | VM_READ] = PAGE_U_W_R, |
76 | [VM_SHARED | VM_EXEC] = PAGE_U_X_R, |
77 | [VM_SHARED | VM_EXEC | VM_READ] = PAGE_U_X_R, |
78 | [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_U_X_W_R, |
79 | [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_W_R |
80 | }; |
81 | DECLARE_VM_GET_PAGE_PROT |
82 | |