1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_VMALLOC_H
3#define _LINUX_VMALLOC_H
4
5#include <linux/spinlock.h>
6#include <linux/init.h>
7#include <linux/list.h>
8#include <linux/llist.h>
9#include <asm/page.h> /* pgprot_t */
10#include <linux/rbtree.h>
11#include <linux/overflow.h>
12
13#include <asm/vmalloc.h>
14
15struct vm_area_struct; /* vma defining user mapping in mm_types.h */
16struct notifier_block; /* in notifier.h */
17struct iov_iter; /* in uio.h */
18
19/* bits in flags of vmalloc's vm_struct below */
20#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
21#define VM_ALLOC 0x00000002 /* vmalloc() */
22#define VM_MAP 0x00000004 /* vmap()ed pages */
23#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
24#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
25#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
26#define VM_NO_GUARD 0x00000040 /* ***DANGEROUS*** don't add guard page */
27#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
28#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
29#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
30#define VM_ALLOW_HUGE_VMAP 0x00000400 /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */
31
32#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
33 !defined(CONFIG_KASAN_VMALLOC)
34#define VM_DEFER_KMEMLEAK 0x00000800 /* defer kmemleak object creation */
35#else
36#define VM_DEFER_KMEMLEAK 0
37#endif
38
39/* bits [20..32] reserved for arch specific ioremap internals */
40
41/*
42 * Maximum alignment for ioremap() regions.
43 * Can be overridden by arch-specific value.
44 */
45#ifndef IOREMAP_MAX_ORDER
46#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
47#endif
48
49struct vm_struct {
50 struct vm_struct *next;
51 void *addr;
52 unsigned long size;
53 unsigned long flags;
54 struct page **pages;
55#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
56 unsigned int page_order;
57#endif
58 unsigned int nr_pages;
59 phys_addr_t phys_addr;
60 const void *caller;
61};
62
63struct vmap_area {
64 unsigned long va_start;
65 unsigned long va_end;
66
67 struct rb_node rb_node; /* address sorted rbtree */
68 struct list_head list; /* address sorted list */
69
70 /*
71 * The following two variables can be packed, because
72 * a vmap_area object can be either:
73 * 1) in "free" tree (root is free_vmap_area_root)
74 * 2) or "busy" tree (root is vmap_area_root)
75 */
76 union {
77 unsigned long subtree_max_size; /* in "free" tree */
78 struct vm_struct *vm; /* in "busy" tree */
79 };
80 unsigned long flags; /* mark type of vm_map_ram area */
81};
82
83/* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */
84#ifndef arch_vmap_p4d_supported
85static inline bool arch_vmap_p4d_supported(pgprot_t prot)
86{
87 return false;
88}
89#endif
90
91#ifndef arch_vmap_pud_supported
92static inline bool arch_vmap_pud_supported(pgprot_t prot)
93{
94 return false;
95}
96#endif
97
98#ifndef arch_vmap_pmd_supported
99static inline bool arch_vmap_pmd_supported(pgprot_t prot)
100{
101 return false;
102}
103#endif
104
105#ifndef arch_vmap_pte_range_map_size
106static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
107 u64 pfn, unsigned int max_page_shift)
108{
109 return PAGE_SIZE;
110}
111#endif
112
113#ifndef arch_vmap_pte_supported_shift
114static inline int arch_vmap_pte_supported_shift(unsigned long size)
115{
116 return PAGE_SHIFT;
117}
118#endif
119
120#ifndef arch_vmap_pgprot_tagged
121static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
122{
123 return prot;
124}
125#endif
126
127/*
128 * Highlevel APIs for driver use
129 */
130extern void vm_unmap_ram(const void *mem, unsigned int count);
131extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
132extern void vm_unmap_aliases(void);
133
134#ifdef CONFIG_MMU
135extern unsigned long vmalloc_nr_pages(void);
136#else
137static inline unsigned long vmalloc_nr_pages(void) { return 0; }
138#endif
139
140extern void *vmalloc(unsigned long size) __alloc_size(1);
141extern void *vzalloc(unsigned long size) __alloc_size(1);
142extern void *vmalloc_user(unsigned long size) __alloc_size(1);
143extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1);
144extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1);
145extern void *vmalloc_32(unsigned long size) __alloc_size(1);
146extern void *vmalloc_32_user(unsigned long size) __alloc_size(1);
147extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
148extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
149 unsigned long start, unsigned long end, gfp_t gfp_mask,
150 pgprot_t prot, unsigned long vm_flags, int node,
151 const void *caller) __alloc_size(1);
152void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
153 int node, const void *caller) __alloc_size(1);
154void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
155
156extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
157extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2);
158extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
159extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2);
160
161extern void vfree(const void *addr);
162extern void vfree_atomic(const void *addr);
163
164extern void *vmap(struct page **pages, unsigned int count,
165 unsigned long flags, pgprot_t prot);
166void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
167extern void vunmap(const void *addr);
168
169extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
170 unsigned long uaddr, void *kaddr,
171 unsigned long pgoff, unsigned long size);
172
173extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
174 unsigned long pgoff);
175
176/*
177 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
178 * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
179 * needs to be called.
180 */
181#ifndef ARCH_PAGE_TABLE_SYNC_MASK
182#define ARCH_PAGE_TABLE_SYNC_MASK 0
183#endif
184
185/*
186 * There is no default implementation for arch_sync_kernel_mappings(). It is
187 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
188 * is 0.
189 */
190void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
191
192/*
193 * Lowlevel-APIs (not for driver use!)
194 */
195
196static inline size_t get_vm_area_size(const struct vm_struct *area)
197{
198 if (!(area->flags & VM_NO_GUARD))
199 /* return actual size without guard page */
200 return area->size - PAGE_SIZE;
201 else
202 return area->size;
203
204}
205
206extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
207extern struct vm_struct *get_vm_area_caller(unsigned long size,
208 unsigned long flags, const void *caller);
209extern struct vm_struct *__get_vm_area_caller(unsigned long size,
210 unsigned long flags,
211 unsigned long start, unsigned long end,
212 const void *caller);
213void free_vm_area(struct vm_struct *area);
214extern struct vm_struct *remove_vm_area(const void *addr);
215extern struct vm_struct *find_vm_area(const void *addr);
216struct vmap_area *find_vmap_area(unsigned long addr);
217
218static inline bool is_vm_area_hugepages(const void *addr)
219{
220 /*
221 * This may not 100% tell if the area is mapped with > PAGE_SIZE
222 * page table entries, if for some reason the architecture indicates
223 * larger sizes are available but decides not to use them, nothing
224 * prevents that. This only indicates the size of the physical page
225 * allocated in the vmalloc layer.
226 */
227#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
228 return find_vm_area(addr)->page_order > 0;
229#else
230 return false;
231#endif
232}
233
234#ifdef CONFIG_MMU
235void vunmap_range(unsigned long addr, unsigned long end);
236static inline void set_vm_flush_reset_perms(void *addr)
237{
238 struct vm_struct *vm = find_vm_area(addr);
239
240 if (vm)
241 vm->flags |= VM_FLUSH_RESET_PERMS;
242}
243
244#else
245static inline void set_vm_flush_reset_perms(void *addr)
246{
247}
248#endif
249
250/* for /proc/kcore */
251extern long vread_iter(struct iov_iter *iter, const char *addr, size_t count);
252
253/*
254 * Internals. Don't use..
255 */
256extern struct list_head vmap_area_list;
257extern __init void vm_area_add_early(struct vm_struct *vm);
258extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
259
260#ifdef CONFIG_SMP
261# ifdef CONFIG_MMU
262struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
263 const size_t *sizes, int nr_vms,
264 size_t align);
265
266void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
267# else
268static inline struct vm_struct **
269pcpu_get_vm_areas(const unsigned long *offsets,
270 const size_t *sizes, int nr_vms,
271 size_t align)
272{
273 return NULL;
274}
275
276static inline void
277pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
278{
279}
280# endif
281#endif
282
283#ifdef CONFIG_MMU
284#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
285#else
286#define VMALLOC_TOTAL 0UL
287#endif
288
289int register_vmap_purge_notifier(struct notifier_block *nb);
290int unregister_vmap_purge_notifier(struct notifier_block *nb);
291
292#if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
293bool vmalloc_dump_obj(void *object);
294#else
295static inline bool vmalloc_dump_obj(void *object) { return false; }
296#endif
297
298#endif /* _LINUX_VMALLOC_H */
299

source code of linux/include/linux/vmalloc.h