1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2020 ARM Ltd. |
4 | */ |
5 | |
6 | #include <linux/bitops.h> |
7 | #include <linux/cpu.h> |
8 | #include <linux/kernel.h> |
9 | #include <linux/mm.h> |
10 | #include <linux/prctl.h> |
11 | #include <linux/sched.h> |
12 | #include <linux/sched/mm.h> |
13 | #include <linux/string.h> |
14 | #include <linux/swap.h> |
15 | #include <linux/swapops.h> |
16 | #include <linux/thread_info.h> |
17 | #include <linux/types.h> |
18 | #include <linux/uaccess.h> |
19 | #include <linux/uio.h> |
20 | |
21 | #include <asm/barrier.h> |
22 | #include <asm/cpufeature.h> |
23 | #include <asm/mte.h> |
24 | #include <asm/ptrace.h> |
25 | #include <asm/sysreg.h> |
26 | |
27 | static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred); |
28 | |
29 | #ifdef CONFIG_KASAN_HW_TAGS |
30 | /* |
31 | * The asynchronous and asymmetric MTE modes have the same behavior for |
32 | * store operations. This flag is set when either of these modes is enabled. |
33 | */ |
34 | DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode); |
35 | EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode); |
36 | #endif |
37 | |
38 | void mte_sync_tags(pte_t pte, unsigned int nr_pages) |
39 | { |
40 | struct page *page = pte_page(pte); |
41 | unsigned int i; |
42 | |
43 | /* if PG_mte_tagged is set, tags have already been initialised */ |
44 | for (i = 0; i < nr_pages; i++, page++) { |
45 | if (try_page_mte_tagging(page)) { |
46 | mte_clear_page_tags(page_address(page)); |
47 | set_page_mte_tagged(page); |
48 | } |
49 | } |
50 | |
51 | /* ensure the tags are visible before the PTE is set */ |
52 | smp_wmb(); |
53 | } |
54 | |
55 | int memcmp_pages(struct page *page1, struct page *page2) |
56 | { |
57 | char *addr1, *addr2; |
58 | int ret; |
59 | |
60 | addr1 = page_address(page1); |
61 | addr2 = page_address(page2); |
62 | ret = memcmp(p: addr1, q: addr2, PAGE_SIZE); |
63 | |
64 | if (!system_supports_mte() || ret) |
65 | return ret; |
66 | |
67 | /* |
68 | * If the page content is identical but at least one of the pages is |
69 | * tagged, return non-zero to avoid KSM merging. If only one of the |
70 | * pages is tagged, set_pte_at() may zero or change the tags of the |
71 | * other page via mte_sync_tags(). |
72 | */ |
73 | if (page_mte_tagged(page1) || page_mte_tagged(page2)) |
74 | return addr1 != addr2; |
75 | |
76 | return ret; |
77 | } |
78 | |
79 | static inline void __mte_enable_kernel(const char *mode, unsigned long tcf) |
80 | { |
81 | /* Enable MTE Sync Mode for EL1. */ |
82 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK, |
83 | SYS_FIELD_PREP(SCTLR_EL1, TCF, tcf)); |
84 | isb(); |
85 | |
86 | pr_info_once("MTE: enabled in %s mode at EL1\n" , mode); |
87 | } |
88 | |
89 | #ifdef CONFIG_KASAN_HW_TAGS |
90 | void mte_enable_kernel_sync(void) |
91 | { |
92 | /* |
93 | * Make sure we enter this function when no PE has set |
94 | * async mode previously. |
95 | */ |
96 | WARN_ONCE(system_uses_mte_async_or_asymm_mode(), |
97 | "MTE async mode enabled system wide!" ); |
98 | |
99 | __mte_enable_kernel("synchronous" , SCTLR_EL1_TCF_SYNC); |
100 | } |
101 | |
102 | void mte_enable_kernel_async(void) |
103 | { |
104 | __mte_enable_kernel("asynchronous" , SCTLR_EL1_TCF_ASYNC); |
105 | |
106 | /* |
107 | * MTE async mode is set system wide by the first PE that |
108 | * executes this function. |
109 | * |
110 | * Note: If in future KASAN acquires a runtime switching |
111 | * mode in between sync and async, this strategy needs |
112 | * to be reviewed. |
113 | */ |
114 | if (!system_uses_mte_async_or_asymm_mode()) |
115 | static_branch_enable(&mte_async_or_asymm_mode); |
116 | } |
117 | |
118 | void mte_enable_kernel_asymm(void) |
119 | { |
120 | if (cpus_have_cap(ARM64_MTE_ASYMM)) { |
121 | __mte_enable_kernel("asymmetric" , SCTLR_EL1_TCF_ASYMM); |
122 | |
123 | /* |
124 | * MTE asymm mode behaves as async mode for store |
125 | * operations. The mode is set system wide by the |
126 | * first PE that executes this function. |
127 | * |
128 | * Note: If in future KASAN acquires a runtime switching |
129 | * mode in between sync and async, this strategy needs |
130 | * to be reviewed. |
131 | */ |
132 | if (!system_uses_mte_async_or_asymm_mode()) |
133 | static_branch_enable(&mte_async_or_asymm_mode); |
134 | } else { |
135 | /* |
136 | * If the CPU does not support MTE asymmetric mode the |
137 | * kernel falls back on synchronous mode which is the |
138 | * default for kasan=on. |
139 | */ |
140 | mte_enable_kernel_sync(); |
141 | } |
142 | } |
143 | #endif |
144 | |
145 | #ifdef CONFIG_KASAN_HW_TAGS |
146 | void mte_check_tfsr_el1(void) |
147 | { |
148 | u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1); |
149 | |
150 | if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) { |
151 | /* |
152 | * Note: isb() is not required after this direct write |
153 | * because there is no indirect read subsequent to it |
154 | * (per ARM DDI 0487F.c table D13-1). |
155 | */ |
156 | write_sysreg_s(0, SYS_TFSR_EL1); |
157 | |
158 | kasan_report_async(); |
159 | } |
160 | } |
161 | #endif |
162 | |
163 | /* |
164 | * This is where we actually resolve the system and process MTE mode |
165 | * configuration into an actual value in SCTLR_EL1 that affects |
166 | * userspace. |
167 | */ |
168 | static void mte_update_sctlr_user(struct task_struct *task) |
169 | { |
170 | /* |
171 | * This must be called with preemption disabled and can only be called |
172 | * on the current or next task since the CPU must match where the thread |
173 | * is going to run. The caller is responsible for calling |
174 | * update_sctlr_el1() later in the same preemption disabled block. |
175 | */ |
176 | unsigned long sctlr = task->thread.sctlr_user; |
177 | unsigned long mte_ctrl = task->thread.mte_ctrl; |
178 | unsigned long pref, resolved_mte_tcf; |
179 | |
180 | pref = __this_cpu_read(mte_tcf_preferred); |
181 | /* |
182 | * If there is no overlap between the system preferred and |
183 | * program requested values go with what was requested. |
184 | */ |
185 | resolved_mte_tcf = (mte_ctrl & pref) ? pref : mte_ctrl; |
186 | sctlr &= ~SCTLR_EL1_TCF0_MASK; |
187 | /* |
188 | * Pick an actual setting. The order in which we check for |
189 | * set bits and map into register values determines our |
190 | * default order. |
191 | */ |
192 | if (resolved_mte_tcf & MTE_CTRL_TCF_ASYMM) |
193 | sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYMM); |
194 | else if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC) |
195 | sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYNC); |
196 | else if (resolved_mte_tcf & MTE_CTRL_TCF_SYNC) |
197 | sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, SYNC); |
198 | task->thread.sctlr_user = sctlr; |
199 | } |
200 | |
201 | static void mte_update_gcr_excl(struct task_struct *task) |
202 | { |
203 | /* |
204 | * SYS_GCR_EL1 will be set to current->thread.mte_ctrl value by |
205 | * mte_set_user_gcr() in kernel_exit, but only if KASAN is enabled. |
206 | */ |
207 | if (kasan_hw_tags_enabled()) |
208 | return; |
209 | |
210 | write_sysreg_s( |
211 | ((task->thread.mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) & |
212 | SYS_GCR_EL1_EXCL_MASK) | SYS_GCR_EL1_RRND, |
213 | SYS_GCR_EL1); |
214 | } |
215 | |
216 | #ifdef CONFIG_KASAN_HW_TAGS |
217 | /* Only called from assembly, silence sparse */ |
218 | void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr, |
219 | __le32 *updptr, int nr_inst); |
220 | |
221 | void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr, |
222 | __le32 *updptr, int nr_inst) |
223 | { |
224 | BUG_ON(nr_inst != 1); /* Branch -> NOP */ |
225 | |
226 | if (kasan_hw_tags_enabled()) |
227 | *updptr = cpu_to_le32(aarch64_insn_gen_nop()); |
228 | } |
229 | #endif |
230 | |
231 | void mte_thread_init_user(void) |
232 | { |
233 | if (!system_supports_mte()) |
234 | return; |
235 | |
236 | /* clear any pending asynchronous tag fault */ |
237 | dsb(ish); |
238 | write_sysreg_s(0, SYS_TFSRE0_EL1); |
239 | clear_thread_flag(TIF_MTE_ASYNC_FAULT); |
240 | /* disable tag checking and reset tag generation mask */ |
241 | set_mte_ctrl(current, 0); |
242 | } |
243 | |
244 | void mte_thread_switch(struct task_struct *next) |
245 | { |
246 | if (!system_supports_mte()) |
247 | return; |
248 | |
249 | mte_update_sctlr_user(task: next); |
250 | mte_update_gcr_excl(task: next); |
251 | |
252 | /* TCO may not have been disabled on exception entry for the current task. */ |
253 | mte_disable_tco_entry(next); |
254 | |
255 | /* |
256 | * Check if an async tag exception occurred at EL1. |
257 | * |
258 | * Note: On the context switch path we rely on the dsb() present |
259 | * in __switch_to() to guarantee that the indirect writes to TFSR_EL1 |
260 | * are synchronized before this point. |
261 | */ |
262 | isb(); |
263 | mte_check_tfsr_el1(); |
264 | } |
265 | |
266 | void mte_cpu_setup(void) |
267 | { |
268 | u64 rgsr; |
269 | |
270 | /* |
271 | * CnP must be enabled only after the MAIR_EL1 register has been set |
272 | * up. Inconsistent MAIR_EL1 between CPUs sharing the same TLB may |
273 | * lead to the wrong memory type being used for a brief window during |
274 | * CPU power-up. |
275 | * |
276 | * CnP is not a boot feature so MTE gets enabled before CnP, but let's |
277 | * make sure that is the case. |
278 | */ |
279 | BUG_ON(read_sysreg(ttbr0_el1) & TTBR_CNP_BIT); |
280 | BUG_ON(read_sysreg(ttbr1_el1) & TTBR_CNP_BIT); |
281 | |
282 | /* Normal Tagged memory type at the corresponding MAIR index */ |
283 | sysreg_clear_set(mair_el1, |
284 | MAIR_ATTRIDX(MAIR_ATTR_MASK, MT_NORMAL_TAGGED), |
285 | MAIR_ATTRIDX(MAIR_ATTR_NORMAL_TAGGED, |
286 | MT_NORMAL_TAGGED)); |
287 | |
288 | write_sysreg_s(KERNEL_GCR_EL1, SYS_GCR_EL1); |
289 | |
290 | /* |
291 | * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then |
292 | * RGSR_EL1.SEED must be non-zero for IRG to produce |
293 | * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we |
294 | * must initialize it. |
295 | */ |
296 | rgsr = (read_sysreg(CNTVCT_EL0) & SYS_RGSR_EL1_SEED_MASK) << |
297 | SYS_RGSR_EL1_SEED_SHIFT; |
298 | if (rgsr == 0) |
299 | rgsr = 1 << SYS_RGSR_EL1_SEED_SHIFT; |
300 | write_sysreg_s(rgsr, SYS_RGSR_EL1); |
301 | |
302 | /* clear any pending tag check faults in TFSR*_EL1 */ |
303 | write_sysreg_s(0, SYS_TFSR_EL1); |
304 | write_sysreg_s(0, SYS_TFSRE0_EL1); |
305 | |
306 | local_flush_tlb_all(); |
307 | } |
308 | |
309 | void mte_suspend_enter(void) |
310 | { |
311 | if (!system_supports_mte()) |
312 | return; |
313 | |
314 | /* |
315 | * The barriers are required to guarantee that the indirect writes |
316 | * to TFSR_EL1 are synchronized before we report the state. |
317 | */ |
318 | dsb(nsh); |
319 | isb(); |
320 | |
321 | /* Report SYS_TFSR_EL1 before suspend entry */ |
322 | mte_check_tfsr_el1(); |
323 | } |
324 | |
325 | void mte_suspend_exit(void) |
326 | { |
327 | if (!system_supports_mte()) |
328 | return; |
329 | |
330 | mte_cpu_setup(); |
331 | } |
332 | |
333 | long set_mte_ctrl(struct task_struct *task, unsigned long arg) |
334 | { |
335 | u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) & |
336 | SYS_GCR_EL1_EXCL_MASK) << MTE_CTRL_GCR_USER_EXCL_SHIFT; |
337 | |
338 | if (!system_supports_mte()) |
339 | return 0; |
340 | |
341 | if (arg & PR_MTE_TCF_ASYNC) |
342 | mte_ctrl |= MTE_CTRL_TCF_ASYNC; |
343 | if (arg & PR_MTE_TCF_SYNC) |
344 | mte_ctrl |= MTE_CTRL_TCF_SYNC; |
345 | |
346 | /* |
347 | * If the system supports it and both sync and async modes are |
348 | * specified then implicitly enable asymmetric mode. |
349 | * Userspace could see a mix of both sync and async anyway due |
350 | * to differing or changing defaults on CPUs. |
351 | */ |
352 | if (cpus_have_cap(ARM64_MTE_ASYMM) && |
353 | (arg & PR_MTE_TCF_ASYNC) && |
354 | (arg & PR_MTE_TCF_SYNC)) |
355 | mte_ctrl |= MTE_CTRL_TCF_ASYMM; |
356 | |
357 | task->thread.mte_ctrl = mte_ctrl; |
358 | if (task == current) { |
359 | preempt_disable(); |
360 | mte_update_sctlr_user(task); |
361 | mte_update_gcr_excl(task); |
362 | update_sctlr_el1(task->thread.sctlr_user); |
363 | preempt_enable(); |
364 | } |
365 | |
366 | return 0; |
367 | } |
368 | |
369 | long get_mte_ctrl(struct task_struct *task) |
370 | { |
371 | unsigned long ret; |
372 | u64 mte_ctrl = task->thread.mte_ctrl; |
373 | u64 incl = (~mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) & |
374 | SYS_GCR_EL1_EXCL_MASK; |
375 | |
376 | if (!system_supports_mte()) |
377 | return 0; |
378 | |
379 | ret = incl << PR_MTE_TAG_SHIFT; |
380 | if (mte_ctrl & MTE_CTRL_TCF_ASYNC) |
381 | ret |= PR_MTE_TCF_ASYNC; |
382 | if (mte_ctrl & MTE_CTRL_TCF_SYNC) |
383 | ret |= PR_MTE_TCF_SYNC; |
384 | |
385 | return ret; |
386 | } |
387 | |
388 | /* |
389 | * Access MTE tags in another process' address space as given in mm. Update |
390 | * the number of tags copied. Return 0 if any tags copied, error otherwise. |
391 | * Inspired by __access_remote_vm(). |
392 | */ |
393 | static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, |
394 | struct iovec *kiov, unsigned int gup_flags) |
395 | { |
396 | void __user *buf = kiov->iov_base; |
397 | size_t len = kiov->iov_len; |
398 | int err = 0; |
399 | int write = gup_flags & FOLL_WRITE; |
400 | |
401 | if (!access_ok(buf, len)) |
402 | return -EFAULT; |
403 | |
404 | if (mmap_read_lock_killable(mm)) |
405 | return -EIO; |
406 | |
407 | while (len) { |
408 | struct vm_area_struct *vma; |
409 | unsigned long tags, offset; |
410 | void *maddr; |
411 | struct page *page = get_user_page_vma_remote(mm, addr, |
412 | gup_flags, vmap: &vma); |
413 | |
414 | if (IS_ERR(ptr: page)) { |
415 | err = PTR_ERR(ptr: page); |
416 | break; |
417 | } |
418 | |
419 | /* |
420 | * Only copy tags if the page has been mapped as PROT_MTE |
421 | * (PG_mte_tagged set). Otherwise the tags are not valid and |
422 | * not accessible to user. Moreover, an mprotect(PROT_MTE) |
423 | * would cause the existing tags to be cleared if the page |
424 | * was never mapped with PROT_MTE. |
425 | */ |
426 | if (!(vma->vm_flags & VM_MTE)) { |
427 | err = -EOPNOTSUPP; |
428 | put_page(page); |
429 | break; |
430 | } |
431 | WARN_ON_ONCE(!page_mte_tagged(page)); |
432 | |
433 | /* limit access to the end of the page */ |
434 | offset = offset_in_page(addr); |
435 | tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE); |
436 | |
437 | maddr = page_address(page); |
438 | if (write) { |
439 | tags = mte_copy_tags_from_user(maddr + offset, buf, tags); |
440 | set_page_dirty_lock(page); |
441 | } else { |
442 | tags = mte_copy_tags_to_user(buf, maddr + offset, tags); |
443 | } |
444 | put_page(page); |
445 | |
446 | /* error accessing the tracer's buffer */ |
447 | if (!tags) |
448 | break; |
449 | |
450 | len -= tags; |
451 | buf += tags; |
452 | addr += tags * MTE_GRANULE_SIZE; |
453 | } |
454 | mmap_read_unlock(mm); |
455 | |
456 | /* return an error if no tags copied */ |
457 | kiov->iov_len = buf - kiov->iov_base; |
458 | if (!kiov->iov_len) { |
459 | /* check for error accessing the tracee's address space */ |
460 | if (err) |
461 | return -EIO; |
462 | else |
463 | return -EFAULT; |
464 | } |
465 | |
466 | return 0; |
467 | } |
468 | |
469 | /* |
470 | * Copy MTE tags in another process' address space at 'addr' to/from tracer's |
471 | * iovec buffer. Return 0 on success. Inspired by ptrace_access_vm(). |
472 | */ |
473 | static int access_remote_tags(struct task_struct *tsk, unsigned long addr, |
474 | struct iovec *kiov, unsigned int gup_flags) |
475 | { |
476 | struct mm_struct *mm; |
477 | int ret; |
478 | |
479 | mm = get_task_mm(task: tsk); |
480 | if (!mm) |
481 | return -EPERM; |
482 | |
483 | if (!tsk->ptrace || (current != tsk->parent) || |
484 | ((get_dumpable(mm) != SUID_DUMP_USER) && |
485 | !ptracer_capable(tsk, ns: mm->user_ns))) { |
486 | mmput(mm); |
487 | return -EPERM; |
488 | } |
489 | |
490 | ret = __access_remote_tags(mm, addr, kiov, gup_flags); |
491 | mmput(mm); |
492 | |
493 | return ret; |
494 | } |
495 | |
496 | int mte_ptrace_copy_tags(struct task_struct *child, long request, |
497 | unsigned long addr, unsigned long data) |
498 | { |
499 | int ret; |
500 | struct iovec kiov; |
501 | struct iovec __user *uiov = (void __user *)data; |
502 | unsigned int gup_flags = FOLL_FORCE; |
503 | |
504 | if (!system_supports_mte()) |
505 | return -EIO; |
506 | |
507 | if (get_user(kiov.iov_base, &uiov->iov_base) || |
508 | get_user(kiov.iov_len, &uiov->iov_len)) |
509 | return -EFAULT; |
510 | |
511 | if (request == PTRACE_POKEMTETAGS) |
512 | gup_flags |= FOLL_WRITE; |
513 | |
514 | /* align addr to the MTE tag granule */ |
515 | addr &= MTE_GRANULE_MASK; |
516 | |
517 | ret = access_remote_tags(tsk: child, addr, kiov: &kiov, gup_flags); |
518 | if (!ret) |
519 | ret = put_user(kiov.iov_len, &uiov->iov_len); |
520 | |
521 | return ret; |
522 | } |
523 | |
524 | static ssize_t mte_tcf_preferred_show(struct device *dev, |
525 | struct device_attribute *attr, char *buf) |
526 | { |
527 | switch (per_cpu(mte_tcf_preferred, dev->id)) { |
528 | case MTE_CTRL_TCF_ASYNC: |
529 | return sysfs_emit(buf, fmt: "async\n" ); |
530 | case MTE_CTRL_TCF_SYNC: |
531 | return sysfs_emit(buf, fmt: "sync\n" ); |
532 | case MTE_CTRL_TCF_ASYMM: |
533 | return sysfs_emit(buf, fmt: "asymm\n" ); |
534 | default: |
535 | return sysfs_emit(buf, fmt: "???\n" ); |
536 | } |
537 | } |
538 | |
539 | static ssize_t mte_tcf_preferred_store(struct device *dev, |
540 | struct device_attribute *attr, |
541 | const char *buf, size_t count) |
542 | { |
543 | u64 tcf; |
544 | |
545 | if (sysfs_streq(s1: buf, s2: "async" )) |
546 | tcf = MTE_CTRL_TCF_ASYNC; |
547 | else if (sysfs_streq(s1: buf, s2: "sync" )) |
548 | tcf = MTE_CTRL_TCF_SYNC; |
549 | else if (cpus_have_cap(ARM64_MTE_ASYMM) && sysfs_streq(buf, "asymm" )) |
550 | tcf = MTE_CTRL_TCF_ASYMM; |
551 | else |
552 | return -EINVAL; |
553 | |
554 | device_lock(dev); |
555 | per_cpu(mte_tcf_preferred, dev->id) = tcf; |
556 | device_unlock(dev); |
557 | |
558 | return count; |
559 | } |
560 | static DEVICE_ATTR_RW(mte_tcf_preferred); |
561 | |
562 | static int register_mte_tcf_preferred_sysctl(void) |
563 | { |
564 | unsigned int cpu; |
565 | |
566 | if (!system_supports_mte()) |
567 | return 0; |
568 | |
569 | for_each_possible_cpu(cpu) { |
570 | per_cpu(mte_tcf_preferred, cpu) = MTE_CTRL_TCF_ASYNC; |
571 | device_create_file(device: get_cpu_device(cpu), |
572 | entry: &dev_attr_mte_tcf_preferred); |
573 | } |
574 | |
575 | return 0; |
576 | } |
577 | subsys_initcall(register_mte_tcf_preferred_sysctl); |
578 | |
579 | /* |
580 | * Return 0 on success, the number of bytes not probed otherwise. |
581 | */ |
582 | size_t mte_probe_user_range(const char __user *uaddr, size_t size) |
583 | { |
584 | const char __user *end = uaddr + size; |
585 | int err = 0; |
586 | char val; |
587 | |
588 | __raw_get_user(val, uaddr, err); |
589 | if (err) |
590 | return size; |
591 | |
592 | uaddr = PTR_ALIGN(uaddr, MTE_GRANULE_SIZE); |
593 | while (uaddr < end) { |
594 | /* |
595 | * A read is sufficient for mte, the caller should have probed |
596 | * for the pte write permission if required. |
597 | */ |
598 | __raw_get_user(val, uaddr, err); |
599 | if (err) |
600 | return end - uaddr; |
601 | uaddr += MTE_GRANULE_SIZE; |
602 | } |
603 | (void)val; |
604 | |
605 | return 0; |
606 | } |
607 | |