1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * linux/mm/mincore.c |
4 | * |
5 | * Copyright (C) 1994-2006 Linus Torvalds |
6 | */ |
7 | |
8 | /* |
9 | * The mincore() system call. |
10 | */ |
11 | #include <linux/pagemap.h> |
12 | #include <linux/gfp.h> |
13 | #include <linux/pagewalk.h> |
14 | #include <linux/mman.h> |
15 | #include <linux/syscalls.h> |
16 | #include <linux/swap.h> |
17 | #include <linux/swapops.h> |
18 | #include <linux/shmem_fs.h> |
19 | #include <linux/hugetlb.h> |
20 | #include <linux/pgtable.h> |
21 | |
22 | #include <linux/uaccess.h> |
23 | #include "swap.h" |
24 | |
25 | static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, |
26 | unsigned long end, struct mm_walk *walk) |
27 | { |
28 | #ifdef CONFIG_HUGETLB_PAGE |
29 | unsigned char present; |
30 | unsigned char *vec = walk->private; |
31 | |
32 | /* |
33 | * Hugepages under user process are always in RAM and never |
34 | * swapped out, but theoretically it needs to be checked. |
35 | */ |
36 | present = pte && !huge_pte_none_mostly(pte: huge_ptep_get(ptep: pte)); |
37 | for (; addr != end; vec++, addr += PAGE_SIZE) |
38 | *vec = present; |
39 | walk->private = vec; |
40 | #else |
41 | BUG(); |
42 | #endif |
43 | return 0; |
44 | } |
45 | |
46 | /* |
47 | * Later we can get more picky about what "in core" means precisely. |
48 | * For now, simply check to see if the page is in the page cache, |
49 | * and is up to date; i.e. that no page-in operation would be required |
50 | * at this time if an application were to map and access this page. |
51 | */ |
52 | static unsigned char mincore_page(struct address_space *mapping, pgoff_t index) |
53 | { |
54 | unsigned char present = 0; |
55 | struct folio *folio; |
56 | |
57 | /* |
58 | * When tmpfs swaps out a page from a file, any process mapping that |
59 | * file will not get a swp_entry_t in its pte, but rather it is like |
60 | * any other file mapping (ie. marked !present and faulted in with |
61 | * tmpfs's .fault). So swapped out tmpfs mappings are tested here. |
62 | */ |
63 | folio = filemap_get_incore_folio(mapping, index); |
64 | if (!IS_ERR(ptr: folio)) { |
65 | present = folio_test_uptodate(folio); |
66 | folio_put(folio); |
67 | } |
68 | |
69 | return present; |
70 | } |
71 | |
72 | static int __mincore_unmapped_range(unsigned long addr, unsigned long end, |
73 | struct vm_area_struct *vma, unsigned char *vec) |
74 | { |
75 | unsigned long nr = (end - addr) >> PAGE_SHIFT; |
76 | int i; |
77 | |
78 | if (vma->vm_file) { |
79 | pgoff_t pgoff; |
80 | |
81 | pgoff = linear_page_index(vma, address: addr); |
82 | for (i = 0; i < nr; i++, pgoff++) |
83 | vec[i] = mincore_page(mapping: vma->vm_file->f_mapping, index: pgoff); |
84 | } else { |
85 | for (i = 0; i < nr; i++) |
86 | vec[i] = 0; |
87 | } |
88 | return nr; |
89 | } |
90 | |
91 | static int mincore_unmapped_range(unsigned long addr, unsigned long end, |
92 | __always_unused int depth, |
93 | struct mm_walk *walk) |
94 | { |
95 | walk->private += __mincore_unmapped_range(addr, end, |
96 | vma: walk->vma, vec: walk->private); |
97 | return 0; |
98 | } |
99 | |
100 | static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
101 | struct mm_walk *walk) |
102 | { |
103 | spinlock_t *ptl; |
104 | struct vm_area_struct *vma = walk->vma; |
105 | pte_t *ptep; |
106 | unsigned char *vec = walk->private; |
107 | int nr = (end - addr) >> PAGE_SHIFT; |
108 | |
109 | ptl = pmd_trans_huge_lock(pmd, vma); |
110 | if (ptl) { |
111 | memset(vec, 1, nr); |
112 | spin_unlock(lock: ptl); |
113 | goto out; |
114 | } |
115 | |
116 | ptep = pte_offset_map_lock(mm: walk->mm, pmd, addr, ptlp: &ptl); |
117 | if (!ptep) { |
118 | walk->action = ACTION_AGAIN; |
119 | return 0; |
120 | } |
121 | for (; addr != end; ptep++, addr += PAGE_SIZE) { |
122 | pte_t pte = ptep_get(ptep); |
123 | |
124 | /* We need to do cache lookup too for pte markers */ |
125 | if (pte_none_mostly(pte)) |
126 | __mincore_unmapped_range(addr, end: addr + PAGE_SIZE, |
127 | vma, vec); |
128 | else if (pte_present(a: pte)) |
129 | *vec = 1; |
130 | else { /* pte is a swap entry */ |
131 | swp_entry_t entry = pte_to_swp_entry(pte); |
132 | |
133 | if (non_swap_entry(entry)) { |
134 | /* |
135 | * migration or hwpoison entries are always |
136 | * uptodate |
137 | */ |
138 | *vec = 1; |
139 | } else { |
140 | #ifdef CONFIG_SWAP |
141 | *vec = mincore_page(swap_address_space(entry), |
142 | index: swp_offset(entry)); |
143 | #else |
144 | WARN_ON(1); |
145 | *vec = 1; |
146 | #endif |
147 | } |
148 | } |
149 | vec++; |
150 | } |
151 | pte_unmap_unlock(ptep - 1, ptl); |
152 | out: |
153 | walk->private += nr; |
154 | cond_resched(); |
155 | return 0; |
156 | } |
157 | |
158 | static inline bool can_do_mincore(struct vm_area_struct *vma) |
159 | { |
160 | if (vma_is_anonymous(vma)) |
161 | return true; |
162 | if (!vma->vm_file) |
163 | return false; |
164 | /* |
165 | * Reveal pagecache information only for non-anonymous mappings that |
166 | * correspond to the files the calling process could (if tried) open |
167 | * for writing; otherwise we'd be including shared non-exclusive |
168 | * mappings, which opens a side channel. |
169 | */ |
170 | return inode_owner_or_capable(idmap: &nop_mnt_idmap, |
171 | inode: file_inode(f: vma->vm_file)) || |
172 | file_permission(file: vma->vm_file, MAY_WRITE) == 0; |
173 | } |
174 | |
175 | static const struct mm_walk_ops mincore_walk_ops = { |
176 | .pmd_entry = mincore_pte_range, |
177 | .pte_hole = mincore_unmapped_range, |
178 | .hugetlb_entry = mincore_hugetlb, |
179 | .walk_lock = PGWALK_RDLOCK, |
180 | }; |
181 | |
182 | /* |
183 | * Do a chunk of "sys_mincore()". We've already checked |
184 | * all the arguments, we hold the mmap semaphore: we should |
185 | * just return the amount of info we're asked for. |
186 | */ |
187 | static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) |
188 | { |
189 | struct vm_area_struct *vma; |
190 | unsigned long end; |
191 | int err; |
192 | |
193 | vma = vma_lookup(current->mm, addr); |
194 | if (!vma) |
195 | return -ENOMEM; |
196 | end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); |
197 | if (!can_do_mincore(vma)) { |
198 | unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE); |
199 | memset(vec, 1, pages); |
200 | return pages; |
201 | } |
202 | err = walk_page_range(mm: vma->vm_mm, start: addr, end, ops: &mincore_walk_ops, private: vec); |
203 | if (err < 0) |
204 | return err; |
205 | return (end - addr) >> PAGE_SHIFT; |
206 | } |
207 | |
208 | /* |
209 | * The mincore(2) system call. |
210 | * |
211 | * mincore() returns the memory residency status of the pages in the |
212 | * current process's address space specified by [addr, addr + len). |
213 | * The status is returned in a vector of bytes. The least significant |
214 | * bit of each byte is 1 if the referenced page is in memory, otherwise |
215 | * it is zero. |
216 | * |
217 | * Because the status of a page can change after mincore() checks it |
218 | * but before it returns to the application, the returned vector may |
219 | * contain stale information. Only locked pages are guaranteed to |
220 | * remain in memory. |
221 | * |
222 | * return values: |
223 | * zero - success |
224 | * -EFAULT - vec points to an illegal address |
225 | * -EINVAL - addr is not a multiple of PAGE_SIZE |
226 | * -ENOMEM - Addresses in the range [addr, addr + len] are |
227 | * invalid for the address space of this process, or |
228 | * specify one or more pages which are not currently |
229 | * mapped |
230 | * -EAGAIN - A kernel resource was temporarily unavailable. |
231 | */ |
232 | SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len, |
233 | unsigned char __user *, vec) |
234 | { |
235 | long retval; |
236 | unsigned long pages; |
237 | unsigned char *tmp; |
238 | |
239 | start = untagged_addr(start); |
240 | |
241 | /* Check the start address: needs to be page-aligned.. */ |
242 | if (start & ~PAGE_MASK) |
243 | return -EINVAL; |
244 | |
245 | /* ..and we need to be passed a valid user-space range */ |
246 | if (!access_ok((void __user *) start, len)) |
247 | return -ENOMEM; |
248 | |
249 | /* This also avoids any overflows on PAGE_ALIGN */ |
250 | pages = len >> PAGE_SHIFT; |
251 | pages += (offset_in_page(len)) != 0; |
252 | |
253 | if (!access_ok(vec, pages)) |
254 | return -EFAULT; |
255 | |
256 | tmp = (void *) __get_free_page(GFP_USER); |
257 | if (!tmp) |
258 | return -EAGAIN; |
259 | |
260 | retval = 0; |
261 | while (pages) { |
262 | /* |
263 | * Do at most PAGE_SIZE entries per iteration, due to |
264 | * the temporary buffer size. |
265 | */ |
266 | mmap_read_lock(current->mm); |
267 | retval = do_mincore(addr: start, min(pages, PAGE_SIZE), vec: tmp); |
268 | mmap_read_unlock(current->mm); |
269 | |
270 | if (retval <= 0) |
271 | break; |
272 | if (copy_to_user(to: vec, from: tmp, n: retval)) { |
273 | retval = -EFAULT; |
274 | break; |
275 | } |
276 | pages -= retval; |
277 | vec += retval; |
278 | start += retval << PAGE_SHIFT; |
279 | retval = 0; |
280 | } |
281 | free_page((unsigned long) tmp); |
282 | return retval; |
283 | } |
284 | |