1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2020 Intel |
4 | * |
5 | * Based on drivers/base/devres.c |
6 | */ |
7 | |
8 | #include <drm/drm_managed.h> |
9 | |
10 | #include <linux/list.h> |
11 | #include <linux/mutex.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/spinlock.h> |
14 | |
15 | #include <drm/drm_device.h> |
16 | #include <drm/drm_print.h> |
17 | |
18 | #include "drm_internal.h" |
19 | |
20 | /** |
21 | * DOC: managed resources |
22 | * |
23 | * Inspired by struct &device managed resources, but tied to the lifetime of |
24 | * struct &drm_device, which can outlive the underlying physical device, usually |
25 | * when userspace has some open files and other handles to resources still open. |
26 | * |
27 | * Release actions can be added with drmm_add_action(), memory allocations can |
28 | * be done directly with drmm_kmalloc() and the related functions. Everything |
29 | * will be released on the final drm_dev_put() in reverse order of how the |
30 | * release actions have been added and memory has been allocated since driver |
31 | * loading started with devm_drm_dev_alloc(). |
32 | * |
33 | * Note that release actions and managed memory can also be added and removed |
34 | * during the lifetime of the driver, all the functions are fully concurrent |
35 | * safe. But it is recommended to use managed resources only for resources that |
36 | * change rarely, if ever, during the lifetime of the &drm_device instance. |
37 | */ |
38 | |
39 | struct drmres_node { |
40 | struct list_head entry; |
41 | drmres_release_t release; |
42 | const char *name; |
43 | size_t size; |
44 | }; |
45 | |
46 | struct drmres { |
47 | struct drmres_node node; |
48 | /* |
49 | * Some archs want to perform DMA into kmalloc caches |
50 | * and need a guaranteed alignment larger than |
51 | * the alignment of a 64-bit integer. |
52 | * Thus we use ARCH_DMA_MINALIGN for data[] which will force the same |
53 | * alignment for struct drmres when allocated by kmalloc(). |
54 | */ |
55 | u8 __aligned(ARCH_DMA_MINALIGN) data[]; |
56 | }; |
57 | |
58 | static void free_dr(struct drmres *dr) |
59 | { |
60 | kfree_const(x: dr->node.name); |
61 | kfree(objp: dr); |
62 | } |
63 | |
64 | void drm_managed_release(struct drm_device *dev) |
65 | { |
66 | struct drmres *dr, *tmp; |
67 | |
68 | drm_dbg_drmres(dev, "drmres release begin\n" ); |
69 | list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) { |
70 | drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n" , |
71 | dr, dr->node.name, dr->node.size); |
72 | |
73 | if (dr->node.release) |
74 | dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL); |
75 | |
76 | list_del(entry: &dr->node.entry); |
77 | free_dr(dr); |
78 | } |
79 | drm_dbg_drmres(dev, "drmres release end\n" ); |
80 | } |
81 | |
82 | /* |
83 | * Always inline so that kmalloc_track_caller tracks the actual interesting |
84 | * caller outside of drm_managed.c. |
85 | */ |
86 | static __always_inline struct drmres * alloc_dr(drmres_release_t release, |
87 | size_t size, gfp_t gfp, int nid) |
88 | { |
89 | size_t tot_size; |
90 | struct drmres *dr; |
91 | |
92 | /* We must catch any near-SIZE_MAX cases that could overflow. */ |
93 | if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size))) |
94 | return NULL; |
95 | |
96 | dr = kmalloc_node_track_caller(tot_size, gfp, nid); |
97 | if (unlikely(!dr)) |
98 | return NULL; |
99 | |
100 | memset(dr, 0, offsetof(struct drmres, data)); |
101 | |
102 | INIT_LIST_HEAD(list: &dr->node.entry); |
103 | dr->node.release = release; |
104 | dr->node.size = size; |
105 | |
106 | return dr; |
107 | } |
108 | |
109 | static void del_dr(struct drm_device *dev, struct drmres *dr) |
110 | { |
111 | list_del_init(entry: &dr->node.entry); |
112 | |
113 | drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n" , |
114 | dr, dr->node.name, (unsigned long) dr->node.size); |
115 | } |
116 | |
117 | static void add_dr(struct drm_device *dev, struct drmres *dr) |
118 | { |
119 | unsigned long flags; |
120 | |
121 | spin_lock_irqsave(&dev->managed.lock, flags); |
122 | list_add(new: &dr->node.entry, head: &dev->managed.resources); |
123 | spin_unlock_irqrestore(lock: &dev->managed.lock, flags); |
124 | |
125 | drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n" , |
126 | dr, dr->node.name, (unsigned long) dr->node.size); |
127 | } |
128 | |
129 | void drmm_add_final_kfree(struct drm_device *dev, void *container) |
130 | { |
131 | WARN_ON(dev->managed.final_kfree); |
132 | WARN_ON(dev < (struct drm_device *) container); |
133 | WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container))); |
134 | dev->managed.final_kfree = container; |
135 | } |
136 | |
137 | int __drmm_add_action(struct drm_device *dev, |
138 | drmres_release_t action, |
139 | void *data, const char *name) |
140 | { |
141 | struct drmres *dr; |
142 | void **void_ptr; |
143 | |
144 | dr = alloc_dr(release: action, size: data ? sizeof(void*) : 0, |
145 | GFP_KERNEL | __GFP_ZERO, |
146 | nid: dev_to_node(dev: dev->dev)); |
147 | if (!dr) { |
148 | drm_dbg_drmres(dev, "failed to add action %s for %p\n" , |
149 | name, data); |
150 | return -ENOMEM; |
151 | } |
152 | |
153 | dr->node.name = kstrdup_const(s: name, GFP_KERNEL); |
154 | if (data) { |
155 | void_ptr = (void **)&dr->data; |
156 | *void_ptr = data; |
157 | } |
158 | |
159 | add_dr(dev, dr); |
160 | |
161 | return 0; |
162 | } |
163 | EXPORT_SYMBOL(__drmm_add_action); |
164 | |
165 | int __drmm_add_action_or_reset(struct drm_device *dev, |
166 | drmres_release_t action, |
167 | void *data, const char *name) |
168 | { |
169 | int ret; |
170 | |
171 | ret = __drmm_add_action(dev, action, data, name); |
172 | if (ret) |
173 | action(dev, data); |
174 | |
175 | return ret; |
176 | } |
177 | EXPORT_SYMBOL(__drmm_add_action_or_reset); |
178 | |
179 | /** |
180 | * drmm_kmalloc - &drm_device managed kmalloc() |
181 | * @dev: DRM device |
182 | * @size: size of the memory allocation |
183 | * @gfp: GFP allocation flags |
184 | * |
185 | * This is a &drm_device managed version of kmalloc(). The allocated memory is |
186 | * automatically freed on the final drm_dev_put(). Memory can also be freed |
187 | * before the final drm_dev_put() by calling drmm_kfree(). |
188 | */ |
189 | void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) |
190 | { |
191 | struct drmres *dr; |
192 | |
193 | dr = alloc_dr(NULL, size, gfp, nid: dev_to_node(dev: dev->dev)); |
194 | if (!dr) { |
195 | drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n" , |
196 | size, gfp); |
197 | return NULL; |
198 | } |
199 | dr->node.name = kstrdup_const(s: "kmalloc" , gfp); |
200 | |
201 | add_dr(dev, dr); |
202 | |
203 | return dr->data; |
204 | } |
205 | EXPORT_SYMBOL(drmm_kmalloc); |
206 | |
207 | /** |
208 | * drmm_kstrdup - &drm_device managed kstrdup() |
209 | * @dev: DRM device |
210 | * @s: 0-terminated string to be duplicated |
211 | * @gfp: GFP allocation flags |
212 | * |
213 | * This is a &drm_device managed version of kstrdup(). The allocated memory is |
214 | * automatically freed on the final drm_dev_put() and works exactly like a |
215 | * memory allocation obtained by drmm_kmalloc(). |
216 | */ |
217 | char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp) |
218 | { |
219 | size_t size; |
220 | char *buf; |
221 | |
222 | if (!s) |
223 | return NULL; |
224 | |
225 | size = strlen(s) + 1; |
226 | buf = drmm_kmalloc(dev, size, gfp); |
227 | if (buf) |
228 | memcpy(buf, s, size); |
229 | return buf; |
230 | } |
231 | EXPORT_SYMBOL_GPL(drmm_kstrdup); |
232 | |
233 | /** |
234 | * drmm_kfree - &drm_device managed kfree() |
235 | * @dev: DRM device |
236 | * @data: memory allocation to be freed |
237 | * |
238 | * This is a &drm_device managed version of kfree() which can be used to |
239 | * release memory allocated through drmm_kmalloc() or any of its related |
240 | * functions before the final drm_dev_put() of @dev. |
241 | */ |
242 | void drmm_kfree(struct drm_device *dev, void *data) |
243 | { |
244 | struct drmres *dr_match = NULL, *dr; |
245 | unsigned long flags; |
246 | |
247 | if (!data) |
248 | return; |
249 | |
250 | spin_lock_irqsave(&dev->managed.lock, flags); |
251 | list_for_each_entry(dr, &dev->managed.resources, node.entry) { |
252 | if (dr->data == data) { |
253 | dr_match = dr; |
254 | del_dr(dev, dr: dr_match); |
255 | break; |
256 | } |
257 | } |
258 | spin_unlock_irqrestore(lock: &dev->managed.lock, flags); |
259 | |
260 | if (WARN_ON(!dr_match)) |
261 | return; |
262 | |
263 | free_dr(dr: dr_match); |
264 | } |
265 | EXPORT_SYMBOL(drmm_kfree); |
266 | |
267 | void __drmm_mutex_release(struct drm_device *dev, void *res) |
268 | { |
269 | struct mutex *lock = res; |
270 | |
271 | mutex_destroy(lock); |
272 | } |
273 | EXPORT_SYMBOL(__drmm_mutex_release); |
274 | |