1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * Copyright (C) 2020 ARM Ltd. |
4 | */ |
5 | |
6 | #include <linux/bitops.h> |
7 | #include <linux/cpu.h> |
8 | #include <linux/kernel.h> |
9 | #include <linux/mm.h> |
10 | #include <linux/prctl.h> |
11 | #include <linux/sched.h> |
12 | #include <linux/sched/mm.h> |
13 | #include <linux/string.h> |
14 | #include <linux/swap.h> |
15 | #include <linux/swapops.h> |
16 | #include <linux/thread_info.h> |
17 | #include <linux/types.h> |
18 | #include <linux/uaccess.h> |
19 | #include <linux/uio.h> |
20 | |
21 | #include <asm/barrier.h> |
22 | #include <asm/cpufeature.h> |
23 | #include <asm/mte.h> |
24 | #include <asm/ptrace.h> |
25 | #include <asm/sysreg.h> |
26 | |
27 | static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred); |
28 | |
29 | #ifdef CONFIG_KASAN_HW_TAGS |
30 | /* |
31 | * The asynchronous and asymmetric MTE modes have the same behavior for |
32 | * store operations. This flag is set when either of these modes is enabled. |
33 | */ |
34 | DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode); |
35 | EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode); |
36 | #endif |
37 | |
38 | void mte_sync_tags(pte_t pte, unsigned int nr_pages) |
39 | { |
40 | struct page *page = pte_page(pte); |
41 | struct folio *folio = page_folio(page); |
42 | unsigned long i; |
43 | |
44 | if (folio_test_hugetlb(folio)) { |
45 | unsigned long nr = folio_nr_pages(folio); |
46 | |
47 | /* Hugetlb MTE flags are set for head page only */ |
48 | if (folio_try_hugetlb_mte_tagging(folio)) { |
49 | for (i = 0; i < nr; i++, page++) |
50 | mte_clear_page_tags(page_address(page)); |
51 | folio_set_hugetlb_mte_tagged(folio); |
52 | } |
53 | |
54 | /* ensure the tags are visible before the PTE is set */ |
55 | smp_wmb(); |
56 | |
57 | return; |
58 | } |
59 | |
60 | /* if PG_mte_tagged is set, tags have already been initialised */ |
61 | for (i = 0; i < nr_pages; i++, page++) { |
62 | if (try_page_mte_tagging(page)) { |
63 | mte_clear_page_tags(page_address(page)); |
64 | set_page_mte_tagged(page); |
65 | } |
66 | } |
67 | |
68 | /* ensure the tags are visible before the PTE is set */ |
69 | smp_wmb(); |
70 | } |
71 | |
72 | int memcmp_pages(struct page *page1, struct page *page2) |
73 | { |
74 | char *addr1, *addr2; |
75 | int ret; |
76 | |
77 | addr1 = page_address(page1); |
78 | addr2 = page_address(page2); |
79 | ret = memcmp(p: addr1, q: addr2, PAGE_SIZE); |
80 | |
81 | if (!system_supports_mte() || ret) |
82 | return ret; |
83 | |
84 | /* |
85 | * If the page content is identical but at least one of the pages is |
86 | * tagged, return non-zero to avoid KSM merging. If only one of the |
87 | * pages is tagged, __set_ptes() may zero or change the tags of the |
88 | * other page via mte_sync_tags(). |
89 | */ |
90 | if (page_mte_tagged(page1) || page_mte_tagged(page2)) |
91 | return addr1 != addr2; |
92 | |
93 | return ret; |
94 | } |
95 | |
96 | static inline void __mte_enable_kernel(const char *mode, unsigned long tcf) |
97 | { |
98 | /* Enable MTE Sync Mode for EL1. */ |
99 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK, |
100 | SYS_FIELD_PREP(SCTLR_EL1, TCF, tcf)); |
101 | isb(); |
102 | |
103 | pr_info_once("MTE: enabled in %s mode at EL1\n", mode); |
104 | } |
105 | |
106 | #ifdef CONFIG_KASAN_HW_TAGS |
107 | void mte_enable_kernel_sync(void) |
108 | { |
109 | /* |
110 | * Make sure we enter this function when no PE has set |
111 | * async mode previously. |
112 | */ |
113 | WARN_ONCE(system_uses_mte_async_or_asymm_mode(), |
114 | "MTE async mode enabled system wide!"); |
115 | |
116 | __mte_enable_kernel("synchronous", SCTLR_EL1_TCF_SYNC); |
117 | } |
118 | |
119 | void mte_enable_kernel_async(void) |
120 | { |
121 | __mte_enable_kernel("asynchronous", SCTLR_EL1_TCF_ASYNC); |
122 | |
123 | /* |
124 | * MTE async mode is set system wide by the first PE that |
125 | * executes this function. |
126 | * |
127 | * Note: If in future KASAN acquires a runtime switching |
128 | * mode in between sync and async, this strategy needs |
129 | * to be reviewed. |
130 | */ |
131 | if (!system_uses_mte_async_or_asymm_mode()) |
132 | static_branch_enable(&mte_async_or_asymm_mode); |
133 | } |
134 | |
135 | void mte_enable_kernel_asymm(void) |
136 | { |
137 | if (cpus_have_cap(ARM64_MTE_ASYMM)) { |
138 | __mte_enable_kernel("asymmetric", SCTLR_EL1_TCF_ASYMM); |
139 | |
140 | /* |
141 | * MTE asymm mode behaves as async mode for store |
142 | * operations. The mode is set system wide by the |
143 | * first PE that executes this function. |
144 | * |
145 | * Note: If in future KASAN acquires a runtime switching |
146 | * mode in between sync and async, this strategy needs |
147 | * to be reviewed. |
148 | */ |
149 | if (!system_uses_mte_async_or_asymm_mode()) |
150 | static_branch_enable(&mte_async_or_asymm_mode); |
151 | } else { |
152 | /* |
153 | * If the CPU does not support MTE asymmetric mode the |
154 | * kernel falls back on synchronous mode which is the |
155 | * default for kasan=on. |
156 | */ |
157 | mte_enable_kernel_sync(); |
158 | } |
159 | } |
160 | #endif |
161 | |
162 | #ifdef CONFIG_KASAN_HW_TAGS |
163 | void mte_check_tfsr_el1(void) |
164 | { |
165 | u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1); |
166 | |
167 | if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) { |
168 | /* |
169 | * Note: isb() is not required after this direct write |
170 | * because there is no indirect read subsequent to it |
171 | * (per ARM DDI 0487F.c table D13-1). |
172 | */ |
173 | write_sysreg_s(0, SYS_TFSR_EL1); |
174 | |
175 | kasan_report_async(); |
176 | } |
177 | } |
178 | #endif |
179 | |
180 | /* |
181 | * This is where we actually resolve the system and process MTE mode |
182 | * configuration into an actual value in SCTLR_EL1 that affects |
183 | * userspace. |
184 | */ |
185 | static void mte_update_sctlr_user(struct task_struct *task) |
186 | { |
187 | /* |
188 | * This must be called with preemption disabled and can only be called |
189 | * on the current or next task since the CPU must match where the thread |
190 | * is going to run. The caller is responsible for calling |
191 | * update_sctlr_el1() later in the same preemption disabled block. |
192 | */ |
193 | unsigned long sctlr = task->thread.sctlr_user; |
194 | unsigned long mte_ctrl = task->thread.mte_ctrl; |
195 | unsigned long pref, resolved_mte_tcf; |
196 | |
197 | pref = __this_cpu_read(mte_tcf_preferred); |
198 | /* |
199 | * If there is no overlap between the system preferred and |
200 | * program requested values go with what was requested. |
201 | */ |
202 | resolved_mte_tcf = (mte_ctrl & pref) ? pref : mte_ctrl; |
203 | sctlr &= ~SCTLR_EL1_TCF0_MASK; |
204 | /* |
205 | * Pick an actual setting. The order in which we check for |
206 | * set bits and map into register values determines our |
207 | * default order. |
208 | */ |
209 | if (resolved_mte_tcf & MTE_CTRL_TCF_ASYMM) |
210 | sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYMM); |
211 | else if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC) |
212 | sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYNC); |
213 | else if (resolved_mte_tcf & MTE_CTRL_TCF_SYNC) |
214 | sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, SYNC); |
215 | task->thread.sctlr_user = sctlr; |
216 | } |
217 | |
218 | static void mte_update_gcr_excl(struct task_struct *task) |
219 | { |
220 | /* |
221 | * SYS_GCR_EL1 will be set to current->thread.mte_ctrl value by |
222 | * mte_set_user_gcr() in kernel_exit, but only if KASAN is enabled. |
223 | */ |
224 | if (kasan_hw_tags_enabled()) |
225 | return; |
226 | |
227 | write_sysreg_s( |
228 | ((task->thread.mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) & |
229 | SYS_GCR_EL1_EXCL_MASK) | SYS_GCR_EL1_RRND, |
230 | SYS_GCR_EL1); |
231 | } |
232 | |
233 | #ifdef CONFIG_KASAN_HW_TAGS |
234 | /* Only called from assembly, silence sparse */ |
235 | void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr, |
236 | __le32 *updptr, int nr_inst); |
237 | |
238 | void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr, |
239 | __le32 *updptr, int nr_inst) |
240 | { |
241 | BUG_ON(nr_inst != 1); /* Branch -> NOP */ |
242 | |
243 | if (kasan_hw_tags_enabled()) |
244 | *updptr = cpu_to_le32(aarch64_insn_gen_nop()); |
245 | } |
246 | #endif |
247 | |
248 | void mte_thread_init_user(void) |
249 | { |
250 | if (!system_supports_mte()) |
251 | return; |
252 | |
253 | /* clear any pending asynchronous tag fault */ |
254 | dsb(ish); |
255 | write_sysreg_s(0, SYS_TFSRE0_EL1); |
256 | clear_thread_flag(TIF_MTE_ASYNC_FAULT); |
257 | /* disable tag checking and reset tag generation mask */ |
258 | set_mte_ctrl(current, 0); |
259 | } |
260 | |
261 | void mte_thread_switch(struct task_struct *next) |
262 | { |
263 | if (!system_supports_mte()) |
264 | return; |
265 | |
266 | mte_update_sctlr_user(task: next); |
267 | mte_update_gcr_excl(task: next); |
268 | |
269 | /* TCO may not have been disabled on exception entry for the current task. */ |
270 | mte_disable_tco_entry(next); |
271 | |
272 | /* |
273 | * Check if an async tag exception occurred at EL1. |
274 | * |
275 | * Note: On the context switch path we rely on the dsb() present |
276 | * in __switch_to() to guarantee that the indirect writes to TFSR_EL1 |
277 | * are synchronized before this point. |
278 | */ |
279 | isb(); |
280 | mte_check_tfsr_el1(); |
281 | } |
282 | |
283 | void mte_cpu_setup(void) |
284 | { |
285 | u64 rgsr; |
286 | |
287 | /* |
288 | * CnP must be enabled only after the MAIR_EL1 register has been set |
289 | * up. Inconsistent MAIR_EL1 between CPUs sharing the same TLB may |
290 | * lead to the wrong memory type being used for a brief window during |
291 | * CPU power-up. |
292 | * |
293 | * CnP is not a boot feature so MTE gets enabled before CnP, but let's |
294 | * make sure that is the case. |
295 | */ |
296 | BUG_ON(read_sysreg(ttbr0_el1) & TTBR_CNP_BIT); |
297 | BUG_ON(read_sysreg(ttbr1_el1) & TTBR_CNP_BIT); |
298 | |
299 | /* Normal Tagged memory type at the corresponding MAIR index */ |
300 | sysreg_clear_set(mair_el1, |
301 | MAIR_ATTRIDX(MAIR_ATTR_MASK, MT_NORMAL_TAGGED), |
302 | MAIR_ATTRIDX(MAIR_ATTR_NORMAL_TAGGED, |
303 | MT_NORMAL_TAGGED)); |
304 | |
305 | write_sysreg_s(KERNEL_GCR_EL1, SYS_GCR_EL1); |
306 | |
307 | /* |
308 | * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then |
309 | * RGSR_EL1.SEED must be non-zero for IRG to produce |
310 | * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we |
311 | * must initialize it. |
312 | */ |
313 | rgsr = (read_sysreg(CNTVCT_EL0) & SYS_RGSR_EL1_SEED_MASK) << |
314 | SYS_RGSR_EL1_SEED_SHIFT; |
315 | if (rgsr == 0) |
316 | rgsr = 1 << SYS_RGSR_EL1_SEED_SHIFT; |
317 | write_sysreg_s(rgsr, SYS_RGSR_EL1); |
318 | |
319 | /* clear any pending tag check faults in TFSR*_EL1 */ |
320 | write_sysreg_s(0, SYS_TFSR_EL1); |
321 | write_sysreg_s(0, SYS_TFSRE0_EL1); |
322 | |
323 | local_flush_tlb_all(); |
324 | } |
325 | |
326 | void mte_suspend_enter(void) |
327 | { |
328 | if (!system_supports_mte()) |
329 | return; |
330 | |
331 | /* |
332 | * The barriers are required to guarantee that the indirect writes |
333 | * to TFSR_EL1 are synchronized before we report the state. |
334 | */ |
335 | dsb(nsh); |
336 | isb(); |
337 | |
338 | /* Report SYS_TFSR_EL1 before suspend entry */ |
339 | mte_check_tfsr_el1(); |
340 | } |
341 | |
342 | void mte_suspend_exit(void) |
343 | { |
344 | if (!system_supports_mte()) |
345 | return; |
346 | |
347 | mte_cpu_setup(); |
348 | } |
349 | |
350 | long set_mte_ctrl(struct task_struct *task, unsigned long arg) |
351 | { |
352 | u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) & |
353 | SYS_GCR_EL1_EXCL_MASK) << MTE_CTRL_GCR_USER_EXCL_SHIFT; |
354 | |
355 | if (!system_supports_mte()) |
356 | return 0; |
357 | |
358 | if (arg & PR_MTE_TCF_ASYNC) |
359 | mte_ctrl |= MTE_CTRL_TCF_ASYNC; |
360 | if (arg & PR_MTE_TCF_SYNC) |
361 | mte_ctrl |= MTE_CTRL_TCF_SYNC; |
362 | |
363 | /* |
364 | * If the system supports it and both sync and async modes are |
365 | * specified then implicitly enable asymmetric mode. |
366 | * Userspace could see a mix of both sync and async anyway due |
367 | * to differing or changing defaults on CPUs. |
368 | */ |
369 | if (cpus_have_cap(ARM64_MTE_ASYMM) && |
370 | (arg & PR_MTE_TCF_ASYNC) && |
371 | (arg & PR_MTE_TCF_SYNC)) |
372 | mte_ctrl |= MTE_CTRL_TCF_ASYMM; |
373 | |
374 | task->thread.mte_ctrl = mte_ctrl; |
375 | if (task == current) { |
376 | preempt_disable(); |
377 | mte_update_sctlr_user(task); |
378 | mte_update_gcr_excl(task); |
379 | update_sctlr_el1(task->thread.sctlr_user); |
380 | preempt_enable(); |
381 | } |
382 | |
383 | return 0; |
384 | } |
385 | |
386 | long get_mte_ctrl(struct task_struct *task) |
387 | { |
388 | unsigned long ret; |
389 | u64 mte_ctrl = task->thread.mte_ctrl; |
390 | u64 incl = (~mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) & |
391 | SYS_GCR_EL1_EXCL_MASK; |
392 | |
393 | if (!system_supports_mte()) |
394 | return 0; |
395 | |
396 | ret = incl << PR_MTE_TAG_SHIFT; |
397 | if (mte_ctrl & MTE_CTRL_TCF_ASYNC) |
398 | ret |= PR_MTE_TCF_ASYNC; |
399 | if (mte_ctrl & MTE_CTRL_TCF_SYNC) |
400 | ret |= PR_MTE_TCF_SYNC; |
401 | |
402 | return ret; |
403 | } |
404 | |
405 | /* |
406 | * Access MTE tags in another process' address space as given in mm. Update |
407 | * the number of tags copied. Return 0 if any tags copied, error otherwise. |
408 | * Inspired by __access_remote_vm(). |
409 | */ |
410 | static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, |
411 | struct iovec *kiov, unsigned int gup_flags) |
412 | { |
413 | void __user *buf = kiov->iov_base; |
414 | size_t len = kiov->iov_len; |
415 | int err = 0; |
416 | int write = gup_flags & FOLL_WRITE; |
417 | |
418 | if (!access_ok(buf, len)) |
419 | return -EFAULT; |
420 | |
421 | if (mmap_read_lock_killable(mm)) |
422 | return -EIO; |
423 | |
424 | while (len) { |
425 | struct vm_area_struct *vma; |
426 | unsigned long tags, offset; |
427 | void *maddr; |
428 | struct page *page = get_user_page_vma_remote(mm, addr, |
429 | gup_flags, vmap: &vma); |
430 | struct folio *folio; |
431 | |
432 | if (IS_ERR(ptr: page)) { |
433 | err = PTR_ERR(ptr: page); |
434 | break; |
435 | } |
436 | |
437 | /* |
438 | * Only copy tags if the page has been mapped as PROT_MTE |
439 | * (PG_mte_tagged set). Otherwise the tags are not valid and |
440 | * not accessible to user. Moreover, an mprotect(PROT_MTE) |
441 | * would cause the existing tags to be cleared if the page |
442 | * was never mapped with PROT_MTE. |
443 | */ |
444 | if (!(vma->vm_flags & VM_MTE)) { |
445 | err = -EOPNOTSUPP; |
446 | put_page(page); |
447 | break; |
448 | } |
449 | |
450 | folio = page_folio(page); |
451 | if (folio_test_hugetlb(folio)) |
452 | WARN_ON_ONCE(!folio_test_hugetlb_mte_tagged(folio)); |
453 | else |
454 | WARN_ON_ONCE(!page_mte_tagged(page)); |
455 | |
456 | /* limit access to the end of the page */ |
457 | offset = offset_in_page(addr); |
458 | tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE); |
459 | |
460 | maddr = page_address(page); |
461 | if (write) { |
462 | tags = mte_copy_tags_from_user(maddr + offset, buf, tags); |
463 | set_page_dirty_lock(page); |
464 | } else { |
465 | tags = mte_copy_tags_to_user(buf, maddr + offset, tags); |
466 | } |
467 | put_page(page); |
468 | |
469 | /* error accessing the tracer's buffer */ |
470 | if (!tags) |
471 | break; |
472 | |
473 | len -= tags; |
474 | buf += tags; |
475 | addr += tags * MTE_GRANULE_SIZE; |
476 | } |
477 | mmap_read_unlock(mm); |
478 | |
479 | /* return an error if no tags copied */ |
480 | kiov->iov_len = buf - kiov->iov_base; |
481 | if (!kiov->iov_len) { |
482 | /* check for error accessing the tracee's address space */ |
483 | if (err) |
484 | return -EIO; |
485 | else |
486 | return -EFAULT; |
487 | } |
488 | |
489 | return 0; |
490 | } |
491 | |
492 | /* |
493 | * Copy MTE tags in another process' address space at 'addr' to/from tracer's |
494 | * iovec buffer. Return 0 on success. Inspired by ptrace_access_vm(). |
495 | */ |
496 | static int access_remote_tags(struct task_struct *tsk, unsigned long addr, |
497 | struct iovec *kiov, unsigned int gup_flags) |
498 | { |
499 | struct mm_struct *mm; |
500 | int ret; |
501 | |
502 | mm = get_task_mm(task: tsk); |
503 | if (!mm) |
504 | return -EPERM; |
505 | |
506 | if (!tsk->ptrace || (current != tsk->parent) || |
507 | ((get_dumpable(mm) != SUID_DUMP_USER) && |
508 | !ptracer_capable(tsk, ns: mm->user_ns))) { |
509 | mmput(mm); |
510 | return -EPERM; |
511 | } |
512 | |
513 | ret = __access_remote_tags(mm, addr, kiov, gup_flags); |
514 | mmput(mm); |
515 | |
516 | return ret; |
517 | } |
518 | |
519 | int mte_ptrace_copy_tags(struct task_struct *child, long request, |
520 | unsigned long addr, unsigned long data) |
521 | { |
522 | int ret; |
523 | struct iovec kiov; |
524 | struct iovec __user *uiov = (void __user *)data; |
525 | unsigned int gup_flags = FOLL_FORCE; |
526 | |
527 | if (!system_supports_mte()) |
528 | return -EIO; |
529 | |
530 | if (get_user(kiov.iov_base, &uiov->iov_base) || |
531 | get_user(kiov.iov_len, &uiov->iov_len)) |
532 | return -EFAULT; |
533 | |
534 | if (request == PTRACE_POKEMTETAGS) |
535 | gup_flags |= FOLL_WRITE; |
536 | |
537 | /* align addr to the MTE tag granule */ |
538 | addr &= MTE_GRANULE_MASK; |
539 | |
540 | ret = access_remote_tags(tsk: child, addr, kiov: &kiov, gup_flags); |
541 | if (!ret) |
542 | ret = put_user(kiov.iov_len, &uiov->iov_len); |
543 | |
544 | return ret; |
545 | } |
546 | |
547 | static ssize_t mte_tcf_preferred_show(struct device *dev, |
548 | struct device_attribute *attr, char *buf) |
549 | { |
550 | switch (per_cpu(mte_tcf_preferred, dev->id)) { |
551 | case MTE_CTRL_TCF_ASYNC: |
552 | return sysfs_emit(buf, fmt: "async\n"); |
553 | case MTE_CTRL_TCF_SYNC: |
554 | return sysfs_emit(buf, fmt: "sync\n"); |
555 | case MTE_CTRL_TCF_ASYMM: |
556 | return sysfs_emit(buf, fmt: "asymm\n"); |
557 | default: |
558 | return sysfs_emit(buf, fmt: "???\n"); |
559 | } |
560 | } |
561 | |
562 | static ssize_t mte_tcf_preferred_store(struct device *dev, |
563 | struct device_attribute *attr, |
564 | const char *buf, size_t count) |
565 | { |
566 | u64 tcf; |
567 | |
568 | if (sysfs_streq(s1: buf, s2: "async")) |
569 | tcf = MTE_CTRL_TCF_ASYNC; |
570 | else if (sysfs_streq(s1: buf, s2: "sync")) |
571 | tcf = MTE_CTRL_TCF_SYNC; |
572 | else if (cpus_have_cap(ARM64_MTE_ASYMM) && sysfs_streq(buf, "asymm")) |
573 | tcf = MTE_CTRL_TCF_ASYMM; |
574 | else |
575 | return -EINVAL; |
576 | |
577 | device_lock(dev); |
578 | per_cpu(mte_tcf_preferred, dev->id) = tcf; |
579 | device_unlock(dev); |
580 | |
581 | return count; |
582 | } |
583 | static DEVICE_ATTR_RW(mte_tcf_preferred); |
584 | |
585 | static int register_mte_tcf_preferred_sysctl(void) |
586 | { |
587 | unsigned int cpu; |
588 | |
589 | if (!system_supports_mte()) |
590 | return 0; |
591 | |
592 | for_each_possible_cpu(cpu) { |
593 | per_cpu(mte_tcf_preferred, cpu) = MTE_CTRL_TCF_ASYNC; |
594 | device_create_file(device: get_cpu_device(cpu), |
595 | entry: &dev_attr_mte_tcf_preferred); |
596 | } |
597 | |
598 | return 0; |
599 | } |
600 | subsys_initcall(register_mte_tcf_preferred_sysctl); |
601 | |
602 | /* |
603 | * Return 0 on success, the number of bytes not probed otherwise. |
604 | */ |
605 | size_t mte_probe_user_range(const char __user *uaddr, size_t size) |
606 | { |
607 | const char __user *end = uaddr + size; |
608 | char val; |
609 | |
610 | __raw_get_user(val, uaddr, efault); |
611 | |
612 | uaddr = PTR_ALIGN(uaddr, MTE_GRANULE_SIZE); |
613 | while (uaddr < end) { |
614 | /* |
615 | * A read is sufficient for mte, the caller should have probed |
616 | * for the pte write permission if required. |
617 | */ |
618 | __raw_get_user(val, uaddr, efault); |
619 | uaddr += MTE_GRANULE_SIZE; |
620 | } |
621 | (void)val; |
622 | |
623 | return 0; |
624 | |
625 | efault: |
626 | return end - uaddr; |
627 | } |
628 |
Definitions
- mte_tcf_preferred
- mte_sync_tags
- memcmp_pages
- __mte_enable_kernel
- mte_update_sctlr_user
- mte_update_gcr_excl
- mte_thread_init_user
- mte_thread_switch
- mte_cpu_setup
- mte_suspend_enter
- mte_suspend_exit
- set_mte_ctrl
- get_mte_ctrl
- __access_remote_tags
- access_remote_tags
- mte_ptrace_copy_tags
- mte_tcf_preferred_show
- mte_tcf_preferred_store
- register_mte_tcf_preferred_sysctl
Improve your Profiling and Debugging skills
Find out more