1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * Copyright (C) 2008 Advanced Micro Devices, Inc. |
4 | * |
5 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) "DMA-API: " fmt |
9 | |
10 | #include <linux/sched/task_stack.h> |
11 | #include <linux/scatterlist.h> |
12 | #include <linux/dma-map-ops.h> |
13 | #include <linux/sched/task.h> |
14 | #include <linux/stacktrace.h> |
15 | #include <linux/spinlock.h> |
16 | #include <linux/vmalloc.h> |
17 | #include <linux/debugfs.h> |
18 | #include <linux/uaccess.h> |
19 | #include <linux/export.h> |
20 | #include <linux/device.h> |
21 | #include <linux/types.h> |
22 | #include <linux/sched.h> |
23 | #include <linux/ctype.h> |
24 | #include <linux/list.h> |
25 | #include <linux/slab.h> |
26 | #include <asm/sections.h> |
27 | #include "debug.h" |
28 | |
29 | #define HASH_SIZE 16384ULL |
30 | #define HASH_FN_SHIFT 13 |
31 | #define HASH_FN_MASK (HASH_SIZE - 1) |
32 | |
33 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |
34 | /* If the pool runs out, add this many new entries at once */ |
35 | #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry)) |
36 | |
37 | enum { |
38 | dma_debug_single, |
39 | dma_debug_sg, |
40 | dma_debug_coherent, |
41 | dma_debug_resource, |
42 | }; |
43 | |
44 | enum map_err_types { |
45 | MAP_ERR_CHECK_NOT_APPLICABLE, |
46 | MAP_ERR_NOT_CHECKED, |
47 | MAP_ERR_CHECKED, |
48 | }; |
49 | |
50 | #define DMA_DEBUG_STACKTRACE_ENTRIES 5 |
51 | |
52 | /** |
53 | * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping |
54 | * @list: node on pre-allocated free_entries list |
55 | * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent |
56 | * @dev_addr: dma address |
57 | * @size: length of the mapping |
58 | * @type: single, page, sg, coherent |
59 | * @direction: enum dma_data_direction |
60 | * @sg_call_ents: 'nents' from dma_map_sg |
61 | * @sg_mapped_ents: 'mapped_ents' from dma_map_sg |
62 | * @pfn: page frame of the start address |
63 | * @offset: offset of mapping relative to pfn |
64 | * @map_err_type: track whether dma_mapping_error() was checked |
65 | * @stack_len: number of backtrace entries in @stack_entries |
66 | * @stack_entries: stack of backtrace history |
67 | */ |
68 | struct dma_debug_entry { |
69 | struct list_head list; |
70 | struct device *dev; |
71 | u64 dev_addr; |
72 | u64 size; |
73 | int type; |
74 | int direction; |
75 | int sg_call_ents; |
76 | int sg_mapped_ents; |
77 | unsigned long pfn; |
78 | size_t offset; |
79 | enum map_err_types map_err_type; |
80 | #ifdef CONFIG_STACKTRACE |
81 | unsigned int stack_len; |
82 | unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; |
83 | #endif |
84 | } ____cacheline_aligned_in_smp; |
85 | |
86 | typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); |
87 | |
88 | struct hash_bucket { |
89 | struct list_head list; |
90 | spinlock_t lock; |
91 | }; |
92 | |
93 | /* Hash list to save the allocated dma addresses */ |
94 | static struct hash_bucket dma_entry_hash[HASH_SIZE]; |
95 | /* List of pre-allocated dma_debug_entry's */ |
96 | static LIST_HEAD(free_entries); |
97 | /* Lock for the list above */ |
98 | static DEFINE_SPINLOCK(free_entries_lock); |
99 | |
100 | /* Global disable flag - will be set in case of an error */ |
101 | static bool global_disable __read_mostly; |
102 | |
103 | /* Early initialization disable flag, set at the end of dma_debug_init */ |
104 | static bool dma_debug_initialized __read_mostly; |
105 | |
106 | static inline bool dma_debug_disabled(void) |
107 | { |
108 | return global_disable || !dma_debug_initialized; |
109 | } |
110 | |
111 | /* Global error count */ |
112 | static u32 error_count; |
113 | |
114 | /* Global error show enable*/ |
115 | static u32 show_all_errors __read_mostly; |
116 | /* Number of errors to show */ |
117 | static u32 show_num_errors = 1; |
118 | |
119 | static u32 num_free_entries; |
120 | static u32 min_free_entries; |
121 | static u32 nr_total_entries; |
122 | |
123 | /* number of preallocated entries requested by kernel cmdline */ |
124 | static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; |
125 | |
126 | /* per-driver filter related state */ |
127 | |
128 | #define NAME_MAX_LEN 64 |
129 | |
130 | static char current_driver_name[NAME_MAX_LEN] __read_mostly; |
131 | static struct device_driver *current_driver __read_mostly; |
132 | |
133 | static DEFINE_RWLOCK(driver_name_lock); |
134 | |
135 | static const char *const maperr2str[] = { |
136 | [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable", |
137 | [MAP_ERR_NOT_CHECKED] = "dma map error not checked", |
138 | [MAP_ERR_CHECKED] = "dma map error checked", |
139 | }; |
140 | |
141 | static const char *type2name[] = { |
142 | [dma_debug_single] = "single", |
143 | [dma_debug_sg] = "scatter-gather", |
144 | [dma_debug_coherent] = "coherent", |
145 | [dma_debug_resource] = "resource", |
146 | }; |
147 | |
148 | static const char *dir2name[] = { |
149 | [DMA_BIDIRECTIONAL] = "DMA_BIDIRECTIONAL", |
150 | [DMA_TO_DEVICE] = "DMA_TO_DEVICE", |
151 | [DMA_FROM_DEVICE] = "DMA_FROM_DEVICE", |
152 | [DMA_NONE] = "DMA_NONE", |
153 | }; |
154 | |
155 | /* |
156 | * The access to some variables in this macro is racy. We can't use atomic_t |
157 | * here because all these variables are exported to debugfs. Some of them even |
158 | * writeable. This is also the reason why a lock won't help much. But anyway, |
159 | * the races are no big deal. Here is why: |
160 | * |
161 | * error_count: the addition is racy, but the worst thing that can happen is |
162 | * that we don't count some errors |
163 | * show_num_errors: the subtraction is racy. Also no big deal because in |
164 | * worst case this will result in one warning more in the |
165 | * system log than the user configured. This variable is |
166 | * writeable via debugfs. |
167 | */ |
168 | static inline void dump_entry_trace(struct dma_debug_entry *entry) |
169 | { |
170 | #ifdef CONFIG_STACKTRACE |
171 | if (entry) { |
172 | pr_warn("Mapped at:\n"); |
173 | stack_trace_print(trace: entry->stack_entries, nr_entries: entry->stack_len, spaces: 0); |
174 | } |
175 | #endif |
176 | } |
177 | |
178 | static bool driver_filter(struct device *dev) |
179 | { |
180 | struct device_driver *drv; |
181 | unsigned long flags; |
182 | bool ret; |
183 | |
184 | /* driver filter off */ |
185 | if (likely(!current_driver_name[0])) |
186 | return true; |
187 | |
188 | /* driver filter on and initialized */ |
189 | if (current_driver && dev && dev->driver == current_driver) |
190 | return true; |
191 | |
192 | /* driver filter on, but we can't filter on a NULL device... */ |
193 | if (!dev) |
194 | return false; |
195 | |
196 | if (current_driver || !current_driver_name[0]) |
197 | return false; |
198 | |
199 | /* driver filter on but not yet initialized */ |
200 | drv = dev->driver; |
201 | if (!drv) |
202 | return false; |
203 | |
204 | /* lock to protect against change of current_driver_name */ |
205 | read_lock_irqsave(&driver_name_lock, flags); |
206 | |
207 | ret = false; |
208 | if (drv->name && |
209 | strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) { |
210 | current_driver = drv; |
211 | ret = true; |
212 | } |
213 | |
214 | read_unlock_irqrestore(&driver_name_lock, flags); |
215 | |
216 | return ret; |
217 | } |
218 | |
219 | #define err_printk(dev, entry, format, arg...) do { \ |
220 | error_count += 1; \ |
221 | if (driver_filter(dev) && \ |
222 | (show_all_errors || show_num_errors > 0)) { \ |
223 | WARN(1, pr_fmt("%s %s: ") format, \ |
224 | dev ? dev_driver_string(dev) : "NULL", \ |
225 | dev ? dev_name(dev) : "NULL", ## arg); \ |
226 | dump_entry_trace(entry); \ |
227 | } \ |
228 | if (!show_all_errors && show_num_errors > 0) \ |
229 | show_num_errors -= 1; \ |
230 | } while (0); |
231 | |
232 | /* |
233 | * Hash related functions |
234 | * |
235 | * Every DMA-API request is saved into a struct dma_debug_entry. To |
236 | * have quick access to these structs they are stored into a hash. |
237 | */ |
238 | static int hash_fn(struct dma_debug_entry *entry) |
239 | { |
240 | /* |
241 | * Hash function is based on the dma address. |
242 | * We use bits 20-27 here as the index into the hash |
243 | */ |
244 | return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; |
245 | } |
246 | |
247 | /* |
248 | * Request exclusive access to a hash bucket for a given dma_debug_entry. |
249 | */ |
250 | static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, |
251 | unsigned long *flags) |
252 | __acquires(&dma_entry_hash[idx].lock) |
253 | { |
254 | int idx = hash_fn(entry); |
255 | unsigned long __flags; |
256 | |
257 | spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); |
258 | *flags = __flags; |
259 | return &dma_entry_hash[idx]; |
260 | } |
261 | |
262 | /* |
263 | * Give up exclusive access to the hash bucket |
264 | */ |
265 | static void put_hash_bucket(struct hash_bucket *bucket, |
266 | unsigned long flags) |
267 | __releases(&bucket->lock) |
268 | { |
269 | spin_unlock_irqrestore(lock: &bucket->lock, flags); |
270 | } |
271 | |
272 | static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) |
273 | { |
274 | return ((a->dev_addr == b->dev_addr) && |
275 | (a->dev == b->dev)) ? true : false; |
276 | } |
277 | |
278 | static bool containing_match(struct dma_debug_entry *a, |
279 | struct dma_debug_entry *b) |
280 | { |
281 | if (a->dev != b->dev) |
282 | return false; |
283 | |
284 | if ((b->dev_addr <= a->dev_addr) && |
285 | ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) |
286 | return true; |
287 | |
288 | return false; |
289 | } |
290 | |
291 | /* |
292 | * Search a given entry in the hash bucket list |
293 | */ |
294 | static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, |
295 | struct dma_debug_entry *ref, |
296 | match_fn match) |
297 | { |
298 | struct dma_debug_entry *entry, *ret = NULL; |
299 | int matches = 0, match_lvl, last_lvl = -1; |
300 | |
301 | list_for_each_entry(entry, &bucket->list, list) { |
302 | if (!match(ref, entry)) |
303 | continue; |
304 | |
305 | /* |
306 | * Some drivers map the same physical address multiple |
307 | * times. Without a hardware IOMMU this results in the |
308 | * same device addresses being put into the dma-debug |
309 | * hash multiple times too. This can result in false |
310 | * positives being reported. Therefore we implement a |
311 | * best-fit algorithm here which returns the entry from |
312 | * the hash which fits best to the reference value |
313 | * instead of the first-fit. |
314 | */ |
315 | matches += 1; |
316 | match_lvl = 0; |
317 | entry->size == ref->size ? ++match_lvl : 0; |
318 | entry->type == ref->type ? ++match_lvl : 0; |
319 | entry->direction == ref->direction ? ++match_lvl : 0; |
320 | entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; |
321 | |
322 | if (match_lvl == 4) { |
323 | /* perfect-fit - return the result */ |
324 | return entry; |
325 | } else if (match_lvl > last_lvl) { |
326 | /* |
327 | * We found an entry that fits better then the |
328 | * previous one or it is the 1st match. |
329 | */ |
330 | last_lvl = match_lvl; |
331 | ret = entry; |
332 | } |
333 | } |
334 | |
335 | /* |
336 | * If we have multiple matches but no perfect-fit, just return |
337 | * NULL. |
338 | */ |
339 | ret = (matches == 1) ? ret : NULL; |
340 | |
341 | return ret; |
342 | } |
343 | |
344 | static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, |
345 | struct dma_debug_entry *ref) |
346 | { |
347 | return __hash_bucket_find(bucket, ref, match: exact_match); |
348 | } |
349 | |
350 | static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, |
351 | struct dma_debug_entry *ref, |
352 | unsigned long *flags) |
353 | { |
354 | |
355 | struct dma_debug_entry *entry, index = *ref; |
356 | int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1); |
357 | |
358 | for (int i = 0; i < limit; i++) { |
359 | entry = __hash_bucket_find(bucket: *bucket, ref, match: containing_match); |
360 | |
361 | if (entry) |
362 | return entry; |
363 | |
364 | /* |
365 | * Nothing found, go back a hash bucket |
366 | */ |
367 | put_hash_bucket(bucket: *bucket, flags: *flags); |
368 | index.dev_addr -= (1 << HASH_FN_SHIFT); |
369 | *bucket = get_hash_bucket(entry: &index, flags); |
370 | } |
371 | |
372 | return NULL; |
373 | } |
374 | |
375 | /* |
376 | * Add an entry to a hash bucket |
377 | */ |
378 | static void hash_bucket_add(struct hash_bucket *bucket, |
379 | struct dma_debug_entry *entry) |
380 | { |
381 | list_add_tail(new: &entry->list, head: &bucket->list); |
382 | } |
383 | |
384 | /* |
385 | * Remove entry from a hash bucket list |
386 | */ |
387 | static void hash_bucket_del(struct dma_debug_entry *entry) |
388 | { |
389 | list_del(entry: &entry->list); |
390 | } |
391 | |
392 | static unsigned long long phys_addr(struct dma_debug_entry *entry) |
393 | { |
394 | if (entry->type == dma_debug_resource) |
395 | return __pfn_to_phys(entry->pfn) + entry->offset; |
396 | |
397 | return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; |
398 | } |
399 | |
400 | /* |
401 | * For each mapping (initial cacheline in the case of |
402 | * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a |
403 | * scatterlist, or the cacheline specified in dma_map_single) insert |
404 | * into this tree using the cacheline as the key. At |
405 | * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If |
406 | * the entry already exists at insertion time add a tag as a reference |
407 | * count for the overlapping mappings. For now, the overlap tracking |
408 | * just ensures that 'unmaps' balance 'maps' before marking the |
409 | * cacheline idle, but we should also be flagging overlaps as an API |
410 | * violation. |
411 | * |
412 | * Memory usage is mostly constrained by the maximum number of available |
413 | * dma-debug entries in that we need a free dma_debug_entry before |
414 | * inserting into the tree. In the case of dma_map_page and |
415 | * dma_alloc_coherent there is only one dma_debug_entry and one |
416 | * dma_active_cacheline entry to track per event. dma_map_sg(), on the |
417 | * other hand, consumes a single dma_debug_entry, but inserts 'nents' |
418 | * entries into the tree. |
419 | */ |
420 | static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC); |
421 | static DEFINE_SPINLOCK(radix_lock); |
422 | #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) |
423 | #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) |
424 | #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT) |
425 | |
426 | static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) |
427 | { |
428 | return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + |
429 | (entry->offset >> L1_CACHE_SHIFT); |
430 | } |
431 | |
432 | static int active_cacheline_read_overlap(phys_addr_t cln) |
433 | { |
434 | int overlap = 0, i; |
435 | |
436 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) |
437 | if (radix_tree_tag_get(&dma_active_cacheline, index: cln, tag: i)) |
438 | overlap |= 1 << i; |
439 | return overlap; |
440 | } |
441 | |
442 | static int active_cacheline_set_overlap(phys_addr_t cln, int overlap) |
443 | { |
444 | int i; |
445 | |
446 | if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0) |
447 | return overlap; |
448 | |
449 | for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) |
450 | if (overlap & 1 << i) |
451 | radix_tree_tag_set(&dma_active_cacheline, index: cln, tag: i); |
452 | else |
453 | radix_tree_tag_clear(&dma_active_cacheline, index: cln, tag: i); |
454 | |
455 | return overlap; |
456 | } |
457 | |
458 | static void active_cacheline_inc_overlap(phys_addr_t cln) |
459 | { |
460 | int overlap = active_cacheline_read_overlap(cln); |
461 | |
462 | overlap = active_cacheline_set_overlap(cln, overlap: ++overlap); |
463 | |
464 | /* If we overflowed the overlap counter then we're potentially |
465 | * leaking dma-mappings. |
466 | */ |
467 | WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, |
468 | pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"), |
469 | ACTIVE_CACHELINE_MAX_OVERLAP, &cln); |
470 | } |
471 | |
472 | static int active_cacheline_dec_overlap(phys_addr_t cln) |
473 | { |
474 | int overlap = active_cacheline_read_overlap(cln); |
475 | |
476 | return active_cacheline_set_overlap(cln, overlap: --overlap); |
477 | } |
478 | |
479 | static int active_cacheline_insert(struct dma_debug_entry *entry) |
480 | { |
481 | phys_addr_t cln = to_cacheline_number(entry); |
482 | unsigned long flags; |
483 | int rc; |
484 | |
485 | /* If the device is not writing memory then we don't have any |
486 | * concerns about the cpu consuming stale data. This mitigates |
487 | * legitimate usages of overlapping mappings. |
488 | */ |
489 | if (entry->direction == DMA_TO_DEVICE) |
490 | return 0; |
491 | |
492 | spin_lock_irqsave(&radix_lock, flags); |
493 | rc = radix_tree_insert(&dma_active_cacheline, index: cln, entry); |
494 | if (rc == -EEXIST) |
495 | active_cacheline_inc_overlap(cln); |
496 | spin_unlock_irqrestore(lock: &radix_lock, flags); |
497 | |
498 | return rc; |
499 | } |
500 | |
501 | static void active_cacheline_remove(struct dma_debug_entry *entry) |
502 | { |
503 | phys_addr_t cln = to_cacheline_number(entry); |
504 | unsigned long flags; |
505 | |
506 | /* ...mirror the insert case */ |
507 | if (entry->direction == DMA_TO_DEVICE) |
508 | return; |
509 | |
510 | spin_lock_irqsave(&radix_lock, flags); |
511 | /* since we are counting overlaps the final put of the |
512 | * cacheline will occur when the overlap count is 0. |
513 | * active_cacheline_dec_overlap() returns -1 in that case |
514 | */ |
515 | if (active_cacheline_dec_overlap(cln) < 0) |
516 | radix_tree_delete(&dma_active_cacheline, cln); |
517 | spin_unlock_irqrestore(lock: &radix_lock, flags); |
518 | } |
519 | |
520 | /* |
521 | * Dump mappings entries on kernel space for debugging purposes |
522 | */ |
523 | void debug_dma_dump_mappings(struct device *dev) |
524 | { |
525 | int idx; |
526 | phys_addr_t cln; |
527 | |
528 | for (idx = 0; idx < HASH_SIZE; idx++) { |
529 | struct hash_bucket *bucket = &dma_entry_hash[idx]; |
530 | struct dma_debug_entry *entry; |
531 | unsigned long flags; |
532 | |
533 | spin_lock_irqsave(&bucket->lock, flags); |
534 | list_for_each_entry(entry, &bucket->list, list) { |
535 | if (!dev || dev == entry->dev) { |
536 | cln = to_cacheline_number(entry); |
537 | dev_info(entry->dev, |
538 | "%s idx %d P=%llx N=%lx D=%llx L=%llx cln=%pa %s %s\n", |
539 | type2name[entry->type], idx, |
540 | phys_addr(entry), entry->pfn, |
541 | entry->dev_addr, entry->size, |
542 | &cln, dir2name[entry->direction], |
543 | maperr2str[entry->map_err_type]); |
544 | } |
545 | } |
546 | spin_unlock_irqrestore(lock: &bucket->lock, flags); |
547 | |
548 | cond_resched(); |
549 | } |
550 | } |
551 | |
552 | /* |
553 | * Dump mappings entries on user space via debugfs |
554 | */ |
555 | static int dump_show(struct seq_file *seq, void *v) |
556 | { |
557 | int idx; |
558 | phys_addr_t cln; |
559 | |
560 | for (idx = 0; idx < HASH_SIZE; idx++) { |
561 | struct hash_bucket *bucket = &dma_entry_hash[idx]; |
562 | struct dma_debug_entry *entry; |
563 | unsigned long flags; |
564 | |
565 | spin_lock_irqsave(&bucket->lock, flags); |
566 | list_for_each_entry(entry, &bucket->list, list) { |
567 | cln = to_cacheline_number(entry); |
568 | seq_printf(m: seq, |
569 | fmt: "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx cln=%pa %s %s\n", |
570 | dev_driver_string(dev: entry->dev), |
571 | dev_name(dev: entry->dev), |
572 | type2name[entry->type], idx, |
573 | phys_addr(entry), entry->pfn, |
574 | entry->dev_addr, entry->size, |
575 | &cln, dir2name[entry->direction], |
576 | maperr2str[entry->map_err_type]); |
577 | } |
578 | spin_unlock_irqrestore(lock: &bucket->lock, flags); |
579 | } |
580 | return 0; |
581 | } |
582 | DEFINE_SHOW_ATTRIBUTE(dump); |
583 | |
584 | /* |
585 | * Wrapper function for adding an entry to the hash. |
586 | * This function takes care of locking itself. |
587 | */ |
588 | static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs) |
589 | { |
590 | struct hash_bucket *bucket; |
591 | unsigned long flags; |
592 | int rc; |
593 | |
594 | bucket = get_hash_bucket(entry, flags: &flags); |
595 | hash_bucket_add(bucket, entry); |
596 | put_hash_bucket(bucket, flags); |
597 | |
598 | rc = active_cacheline_insert(entry); |
599 | if (rc == -ENOMEM) { |
600 | pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n"); |
601 | global_disable = true; |
602 | } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { |
603 | err_printk(entry->dev, entry, |
604 | "cacheline tracking EEXIST, overlapping mappings aren't supported\n"); |
605 | } |
606 | } |
607 | |
608 | static int dma_debug_create_entries(gfp_t gfp) |
609 | { |
610 | struct dma_debug_entry *entry; |
611 | int i; |
612 | |
613 | entry = (void *)get_zeroed_page(gfp_mask: gfp); |
614 | if (!entry) |
615 | return -ENOMEM; |
616 | |
617 | for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++) |
618 | list_add_tail(new: &entry[i].list, head: &free_entries); |
619 | |
620 | num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES; |
621 | nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES; |
622 | |
623 | return 0; |
624 | } |
625 | |
626 | static struct dma_debug_entry *__dma_entry_alloc(void) |
627 | { |
628 | struct dma_debug_entry *entry; |
629 | |
630 | entry = list_entry(free_entries.next, struct dma_debug_entry, list); |
631 | list_del(entry: &entry->list); |
632 | memset(entry, 0, sizeof(*entry)); |
633 | |
634 | num_free_entries -= 1; |
635 | if (num_free_entries < min_free_entries) |
636 | min_free_entries = num_free_entries; |
637 | |
638 | return entry; |
639 | } |
640 | |
641 | /* |
642 | * This should be called outside of free_entries_lock scope to avoid potential |
643 | * deadlocks with serial consoles that use DMA. |
644 | */ |
645 | static void __dma_entry_alloc_check_leak(u32 nr_entries) |
646 | { |
647 | u32 tmp = nr_entries % nr_prealloc_entries; |
648 | |
649 | /* Shout each time we tick over some multiple of the initial pool */ |
650 | if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) { |
651 | pr_info("dma_debug_entry pool grown to %u (%u00%%)\n", |
652 | nr_entries, |
653 | (nr_entries / nr_prealloc_entries)); |
654 | } |
655 | } |
656 | |
657 | /* struct dma_entry allocator |
658 | * |
659 | * The next two functions implement the allocator for |
660 | * struct dma_debug_entries. |
661 | */ |
662 | static struct dma_debug_entry *dma_entry_alloc(void) |
663 | { |
664 | bool alloc_check_leak = false; |
665 | struct dma_debug_entry *entry; |
666 | unsigned long flags; |
667 | u32 nr_entries; |
668 | |
669 | spin_lock_irqsave(&free_entries_lock, flags); |
670 | if (num_free_entries == 0) { |
671 | if (dma_debug_create_entries(GFP_ATOMIC)) { |
672 | global_disable = true; |
673 | spin_unlock_irqrestore(lock: &free_entries_lock, flags); |
674 | pr_err("debugging out of memory - disabling\n"); |
675 | return NULL; |
676 | } |
677 | alloc_check_leak = true; |
678 | nr_entries = nr_total_entries; |
679 | } |
680 | |
681 | entry = __dma_entry_alloc(); |
682 | |
683 | spin_unlock_irqrestore(lock: &free_entries_lock, flags); |
684 | |
685 | if (alloc_check_leak) |
686 | __dma_entry_alloc_check_leak(nr_entries); |
687 | |
688 | #ifdef CONFIG_STACKTRACE |
689 | entry->stack_len = stack_trace_save(store: entry->stack_entries, |
690 | ARRAY_SIZE(entry->stack_entries), |
691 | skipnr: 1); |
692 | #endif |
693 | return entry; |
694 | } |
695 | |
696 | static void dma_entry_free(struct dma_debug_entry *entry) |
697 | { |
698 | unsigned long flags; |
699 | |
700 | active_cacheline_remove(entry); |
701 | |
702 | /* |
703 | * add to beginning of the list - this way the entries are |
704 | * more likely cache hot when they are reallocated. |
705 | */ |
706 | spin_lock_irqsave(&free_entries_lock, flags); |
707 | list_add(new: &entry->list, head: &free_entries); |
708 | num_free_entries += 1; |
709 | spin_unlock_irqrestore(lock: &free_entries_lock, flags); |
710 | } |
711 | |
712 | /* |
713 | * DMA-API debugging init code |
714 | * |
715 | * The init code does two things: |
716 | * 1. Initialize core data structures |
717 | * 2. Preallocate a given number of dma_debug_entry structs |
718 | */ |
719 | |
720 | static ssize_t filter_read(struct file *file, char __user *user_buf, |
721 | size_t count, loff_t *ppos) |
722 | { |
723 | char buf[NAME_MAX_LEN + 1]; |
724 | unsigned long flags; |
725 | int len; |
726 | |
727 | if (!current_driver_name[0]) |
728 | return 0; |
729 | |
730 | /* |
731 | * We can't copy to userspace directly because current_driver_name can |
732 | * only be read under the driver_name_lock with irqs disabled. So |
733 | * create a temporary copy first. |
734 | */ |
735 | read_lock_irqsave(&driver_name_lock, flags); |
736 | len = scnprintf(buf, NAME_MAX_LEN + 1, fmt: "%s\n", current_driver_name); |
737 | read_unlock_irqrestore(&driver_name_lock, flags); |
738 | |
739 | return simple_read_from_buffer(to: user_buf, count, ppos, from: buf, available: len); |
740 | } |
741 | |
742 | static ssize_t filter_write(struct file *file, const char __user *userbuf, |
743 | size_t count, loff_t *ppos) |
744 | { |
745 | char buf[NAME_MAX_LEN]; |
746 | unsigned long flags; |
747 | size_t len; |
748 | int i; |
749 | |
750 | /* |
751 | * We can't copy from userspace directly. Access to |
752 | * current_driver_name is protected with a write_lock with irqs |
753 | * disabled. Since copy_from_user can fault and may sleep we |
754 | * need to copy to temporary buffer first |
755 | */ |
756 | len = min(count, (size_t)(NAME_MAX_LEN - 1)); |
757 | if (copy_from_user(to: buf, from: userbuf, n: len)) |
758 | return -EFAULT; |
759 | |
760 | buf[len] = 0; |
761 | |
762 | write_lock_irqsave(&driver_name_lock, flags); |
763 | |
764 | /* |
765 | * Now handle the string we got from userspace very carefully. |
766 | * The rules are: |
767 | * - only use the first token we got |
768 | * - token delimiter is everything looking like a space |
769 | * character (' ', '\n', '\t' ...) |
770 | * |
771 | */ |
772 | if (!isalnum(buf[0])) { |
773 | /* |
774 | * If the first character userspace gave us is not |
775 | * alphanumerical then assume the filter should be |
776 | * switched off. |
777 | */ |
778 | if (current_driver_name[0]) |
779 | pr_info("switching off dma-debug driver filter\n"); |
780 | current_driver_name[0] = 0; |
781 | current_driver = NULL; |
782 | goto out_unlock; |
783 | } |
784 | |
785 | /* |
786 | * Now parse out the first token and use it as the name for the |
787 | * driver to filter for. |
788 | */ |
789 | for (i = 0; i < NAME_MAX_LEN - 1; ++i) { |
790 | current_driver_name[i] = buf[i]; |
791 | if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) |
792 | break; |
793 | } |
794 | current_driver_name[i] = 0; |
795 | current_driver = NULL; |
796 | |
797 | pr_info("enable driver filter for driver [%s]\n", |
798 | current_driver_name); |
799 | |
800 | out_unlock: |
801 | write_unlock_irqrestore(&driver_name_lock, flags); |
802 | |
803 | return count; |
804 | } |
805 | |
806 | static const struct file_operations filter_fops = { |
807 | .read = filter_read, |
808 | .write = filter_write, |
809 | .llseek = default_llseek, |
810 | }; |
811 | |
812 | static int __init dma_debug_fs_init(void) |
813 | { |
814 | struct dentry *dentry = debugfs_create_dir(name: "dma-api", NULL); |
815 | |
816 | debugfs_create_bool(name: "disabled", mode: 0444, parent: dentry, value: &global_disable); |
817 | debugfs_create_u32(name: "error_count", mode: 0444, parent: dentry, value: &error_count); |
818 | debugfs_create_u32(name: "all_errors", mode: 0644, parent: dentry, value: &show_all_errors); |
819 | debugfs_create_u32(name: "num_errors", mode: 0644, parent: dentry, value: &show_num_errors); |
820 | debugfs_create_u32(name: "num_free_entries", mode: 0444, parent: dentry, value: &num_free_entries); |
821 | debugfs_create_u32(name: "min_free_entries", mode: 0444, parent: dentry, value: &min_free_entries); |
822 | debugfs_create_u32(name: "nr_total_entries", mode: 0444, parent: dentry, value: &nr_total_entries); |
823 | debugfs_create_file(name: "driver_filter", mode: 0644, parent: dentry, NULL, fops: &filter_fops); |
824 | debugfs_create_file(name: "dump", mode: 0444, parent: dentry, NULL, fops: &dump_fops); |
825 | |
826 | return 0; |
827 | } |
828 | core_initcall_sync(dma_debug_fs_init); |
829 | |
830 | static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) |
831 | { |
832 | struct dma_debug_entry *entry; |
833 | unsigned long flags; |
834 | int count = 0, i; |
835 | |
836 | for (i = 0; i < HASH_SIZE; ++i) { |
837 | spin_lock_irqsave(&dma_entry_hash[i].lock, flags); |
838 | list_for_each_entry(entry, &dma_entry_hash[i].list, list) { |
839 | if (entry->dev == dev) { |
840 | count += 1; |
841 | *out_entry = entry; |
842 | } |
843 | } |
844 | spin_unlock_irqrestore(lock: &dma_entry_hash[i].lock, flags); |
845 | } |
846 | |
847 | return count; |
848 | } |
849 | |
850 | static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) |
851 | { |
852 | struct device *dev = data; |
853 | struct dma_debug_entry *entry; |
854 | int count; |
855 | |
856 | if (dma_debug_disabled()) |
857 | return 0; |
858 | |
859 | switch (action) { |
860 | case BUS_NOTIFY_UNBOUND_DRIVER: |
861 | count = device_dma_allocations(dev, out_entry: &entry); |
862 | if (count == 0) |
863 | break; |
864 | err_printk(dev, entry, "device driver has pending " |
865 | "DMA allocations while released from device " |
866 | "[count=%d]\n" |
867 | "One of leaked entries details: " |
868 | "[device address=0x%016llx] [size=%llu bytes] " |
869 | "[mapped with %s] [mapped as %s]\n", |
870 | count, entry->dev_addr, entry->size, |
871 | dir2name[entry->direction], type2name[entry->type]); |
872 | break; |
873 | default: |
874 | break; |
875 | } |
876 | |
877 | return 0; |
878 | } |
879 | |
880 | void dma_debug_add_bus(const struct bus_type *bus) |
881 | { |
882 | struct notifier_block *nb; |
883 | |
884 | if (dma_debug_disabled()) |
885 | return; |
886 | |
887 | nb = kzalloc(size: sizeof(struct notifier_block), GFP_KERNEL); |
888 | if (nb == NULL) { |
889 | pr_err("dma_debug_add_bus: out of memory\n"); |
890 | return; |
891 | } |
892 | |
893 | nb->notifier_call = dma_debug_device_change; |
894 | |
895 | bus_register_notifier(bus, nb); |
896 | } |
897 | |
898 | static int dma_debug_init(void) |
899 | { |
900 | int i, nr_pages; |
901 | |
902 | /* Do not use dma_debug_initialized here, since we really want to be |
903 | * called to set dma_debug_initialized |
904 | */ |
905 | if (global_disable) |
906 | return 0; |
907 | |
908 | for (i = 0; i < HASH_SIZE; ++i) { |
909 | INIT_LIST_HEAD(list: &dma_entry_hash[i].list); |
910 | spin_lock_init(&dma_entry_hash[i].lock); |
911 | } |
912 | |
913 | nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES); |
914 | for (i = 0; i < nr_pages; ++i) |
915 | dma_debug_create_entries(GFP_KERNEL); |
916 | if (num_free_entries >= nr_prealloc_entries) { |
917 | pr_info("preallocated %d debug entries\n", nr_total_entries); |
918 | } else if (num_free_entries > 0) { |
919 | pr_warn("%d debug entries requested but only %d allocated\n", |
920 | nr_prealloc_entries, nr_total_entries); |
921 | } else { |
922 | pr_err("debugging out of memory error - disabled\n"); |
923 | global_disable = true; |
924 | |
925 | return 0; |
926 | } |
927 | min_free_entries = num_free_entries; |
928 | |
929 | dma_debug_initialized = true; |
930 | |
931 | pr_info("debugging enabled by kernel config\n"); |
932 | return 0; |
933 | } |
934 | core_initcall(dma_debug_init); |
935 | |
936 | static __init int dma_debug_cmdline(char *str) |
937 | { |
938 | if (!str) |
939 | return -EINVAL; |
940 | |
941 | if (strncmp(str, "off", 3) == 0) { |
942 | pr_info("debugging disabled on kernel command line\n"); |
943 | global_disable = true; |
944 | } |
945 | |
946 | return 1; |
947 | } |
948 | |
949 | static __init int dma_debug_entries_cmdline(char *str) |
950 | { |
951 | if (!str) |
952 | return -EINVAL; |
953 | if (!get_option(str: &str, pint: &nr_prealloc_entries)) |
954 | nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; |
955 | return 1; |
956 | } |
957 | |
958 | __setup("dma_debug=", dma_debug_cmdline); |
959 | __setup("dma_debug_entries=", dma_debug_entries_cmdline); |
960 | |
961 | static void check_unmap(struct dma_debug_entry *ref) |
962 | { |
963 | struct dma_debug_entry *entry; |
964 | struct hash_bucket *bucket; |
965 | unsigned long flags; |
966 | |
967 | bucket = get_hash_bucket(entry: ref, flags: &flags); |
968 | entry = bucket_find_exact(bucket, ref); |
969 | |
970 | if (!entry) { |
971 | /* must drop lock before calling dma_mapping_error */ |
972 | put_hash_bucket(bucket, flags); |
973 | |
974 | if (dma_mapping_error(dev: ref->dev, dma_addr: ref->dev_addr)) { |
975 | err_printk(ref->dev, NULL, |
976 | "device driver tries to free an " |
977 | "invalid DMA memory address\n"); |
978 | } else { |
979 | err_printk(ref->dev, NULL, |
980 | "device driver tries to free DMA " |
981 | "memory it has not allocated [device " |
982 | "address=0x%016llx] [size=%llu bytes]\n", |
983 | ref->dev_addr, ref->size); |
984 | } |
985 | return; |
986 | } |
987 | |
988 | if (ref->size != entry->size) { |
989 | err_printk(ref->dev, entry, "device driver frees " |
990 | "DMA memory with different size " |
991 | "[device address=0x%016llx] [map size=%llu bytes] " |
992 | "[unmap size=%llu bytes]\n", |
993 | ref->dev_addr, entry->size, ref->size); |
994 | } |
995 | |
996 | if (ref->type != entry->type) { |
997 | err_printk(ref->dev, entry, "device driver frees " |
998 | "DMA memory with wrong function " |
999 | "[device address=0x%016llx] [size=%llu bytes] " |
1000 | "[mapped as %s] [unmapped as %s]\n", |
1001 | ref->dev_addr, ref->size, |
1002 | type2name[entry->type], type2name[ref->type]); |
1003 | } else if ((entry->type == dma_debug_coherent) && |
1004 | (phys_addr(entry: ref) != phys_addr(entry))) { |
1005 | err_printk(ref->dev, entry, "device driver frees " |
1006 | "DMA memory with different CPU address " |
1007 | "[device address=0x%016llx] [size=%llu bytes] " |
1008 | "[cpu alloc address=0x%016llx] " |
1009 | "[cpu free address=0x%016llx]", |
1010 | ref->dev_addr, ref->size, |
1011 | phys_addr(entry), |
1012 | phys_addr(ref)); |
1013 | } |
1014 | |
1015 | if (ref->sg_call_ents && ref->type == dma_debug_sg && |
1016 | ref->sg_call_ents != entry->sg_call_ents) { |
1017 | err_printk(ref->dev, entry, "device driver frees " |
1018 | "DMA sg list with different entry count " |
1019 | "[map count=%d] [unmap count=%d]\n", |
1020 | entry->sg_call_ents, ref->sg_call_ents); |
1021 | } |
1022 | |
1023 | /* |
1024 | * This may be no bug in reality - but most implementations of the |
1025 | * DMA API don't handle this properly, so check for it here |
1026 | */ |
1027 | if (ref->direction != entry->direction) { |
1028 | err_printk(ref->dev, entry, "device driver frees " |
1029 | "DMA memory with different direction " |
1030 | "[device address=0x%016llx] [size=%llu bytes] " |
1031 | "[mapped with %s] [unmapped with %s]\n", |
1032 | ref->dev_addr, ref->size, |
1033 | dir2name[entry->direction], |
1034 | dir2name[ref->direction]); |
1035 | } |
1036 | |
1037 | /* |
1038 | * Drivers should use dma_mapping_error() to check the returned |
1039 | * addresses of dma_map_single() and dma_map_page(). |
1040 | * If not, print this warning message. See Documentation/core-api/dma-api.rst. |
1041 | */ |
1042 | if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { |
1043 | err_printk(ref->dev, entry, |
1044 | "device driver failed to check map error" |
1045 | "[device address=0x%016llx] [size=%llu bytes] " |
1046 | "[mapped as %s]", |
1047 | ref->dev_addr, ref->size, |
1048 | type2name[entry->type]); |
1049 | } |
1050 | |
1051 | hash_bucket_del(entry); |
1052 | dma_entry_free(entry); |
1053 | |
1054 | put_hash_bucket(bucket, flags); |
1055 | } |
1056 | |
1057 | static void check_for_stack(struct device *dev, |
1058 | struct page *page, size_t offset) |
1059 | { |
1060 | void *addr; |
1061 | struct vm_struct *stack_vm_area = task_stack_vm_area(current); |
1062 | |
1063 | if (!stack_vm_area) { |
1064 | /* Stack is direct-mapped. */ |
1065 | if (PageHighMem(page)) |
1066 | return; |
1067 | addr = page_address(page) + offset; |
1068 | if (object_is_on_stack(obj: addr)) |
1069 | err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr); |
1070 | } else { |
1071 | /* Stack is vmalloced. */ |
1072 | int i; |
1073 | |
1074 | for (i = 0; i < stack_vm_area->nr_pages; i++) { |
1075 | if (page != stack_vm_area->pages[i]) |
1076 | continue; |
1077 | |
1078 | addr = (u8 *)current->stack + i * PAGE_SIZE + offset; |
1079 | err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr); |
1080 | break; |
1081 | } |
1082 | } |
1083 | } |
1084 | |
1085 | static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) |
1086 | { |
1087 | if (memory_intersects(begin: _stext, end: _etext, virt: addr, size: len) || |
1088 | memory_intersects(begin: __start_rodata, end: __end_rodata, virt: addr, size: len)) |
1089 | err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); |
1090 | } |
1091 | |
1092 | static void check_sync(struct device *dev, |
1093 | struct dma_debug_entry *ref, |
1094 | bool to_cpu) |
1095 | { |
1096 | struct dma_debug_entry *entry; |
1097 | struct hash_bucket *bucket; |
1098 | unsigned long flags; |
1099 | |
1100 | bucket = get_hash_bucket(entry: ref, flags: &flags); |
1101 | |
1102 | entry = bucket_find_contain(bucket: &bucket, ref, flags: &flags); |
1103 | |
1104 | if (!entry) { |
1105 | err_printk(dev, NULL, "device driver tries " |
1106 | "to sync DMA memory it has not allocated " |
1107 | "[device address=0x%016llx] [size=%llu bytes]\n", |
1108 | (unsigned long long)ref->dev_addr, ref->size); |
1109 | goto out; |
1110 | } |
1111 | |
1112 | if (ref->size > entry->size) { |
1113 | err_printk(dev, entry, "device driver syncs" |
1114 | " DMA memory outside allocated range " |
1115 | "[device address=0x%016llx] " |
1116 | "[allocation size=%llu bytes] " |
1117 | "[sync offset+size=%llu]\n", |
1118 | entry->dev_addr, entry->size, |
1119 | ref->size); |
1120 | } |
1121 | |
1122 | if (entry->direction == DMA_BIDIRECTIONAL) |
1123 | goto out; |
1124 | |
1125 | if (ref->direction != entry->direction) { |
1126 | err_printk(dev, entry, "device driver syncs " |
1127 | "DMA memory with different direction " |
1128 | "[device address=0x%016llx] [size=%llu bytes] " |
1129 | "[mapped with %s] [synced with %s]\n", |
1130 | (unsigned long long)ref->dev_addr, entry->size, |
1131 | dir2name[entry->direction], |
1132 | dir2name[ref->direction]); |
1133 | } |
1134 | |
1135 | if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && |
1136 | !(ref->direction == DMA_TO_DEVICE)) |
1137 | err_printk(dev, entry, "device driver syncs " |
1138 | "device read-only DMA memory for cpu " |
1139 | "[device address=0x%016llx] [size=%llu bytes] " |
1140 | "[mapped with %s] [synced with %s]\n", |
1141 | (unsigned long long)ref->dev_addr, entry->size, |
1142 | dir2name[entry->direction], |
1143 | dir2name[ref->direction]); |
1144 | |
1145 | if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && |
1146 | !(ref->direction == DMA_FROM_DEVICE)) |
1147 | err_printk(dev, entry, "device driver syncs " |
1148 | "device write-only DMA memory to device " |
1149 | "[device address=0x%016llx] [size=%llu bytes] " |
1150 | "[mapped with %s] [synced with %s]\n", |
1151 | (unsigned long long)ref->dev_addr, entry->size, |
1152 | dir2name[entry->direction], |
1153 | dir2name[ref->direction]); |
1154 | |
1155 | if (ref->sg_call_ents && ref->type == dma_debug_sg && |
1156 | ref->sg_call_ents != entry->sg_call_ents) { |
1157 | err_printk(ref->dev, entry, "device driver syncs " |
1158 | "DMA sg list with different entry count " |
1159 | "[map count=%d] [sync count=%d]\n", |
1160 | entry->sg_call_ents, ref->sg_call_ents); |
1161 | } |
1162 | |
1163 | out: |
1164 | put_hash_bucket(bucket, flags); |
1165 | } |
1166 | |
1167 | static void check_sg_segment(struct device *dev, struct scatterlist *sg) |
1168 | { |
1169 | #ifdef CONFIG_DMA_API_DEBUG_SG |
1170 | unsigned int max_seg = dma_get_max_seg_size(dev); |
1171 | u64 start, end, boundary = dma_get_seg_boundary(dev); |
1172 | |
1173 | /* |
1174 | * Either the driver forgot to set dma_parms appropriately, or |
1175 | * whoever generated the list forgot to check them. |
1176 | */ |
1177 | if (sg->length > max_seg) |
1178 | err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n", |
1179 | sg->length, max_seg); |
1180 | /* |
1181 | * In some cases this could potentially be the DMA API |
1182 | * implementation's fault, but it would usually imply that |
1183 | * the scatterlist was built inappropriately to begin with. |
1184 | */ |
1185 | start = sg_dma_address(sg); |
1186 | end = start + sg_dma_len(sg) - 1; |
1187 | if ((start ^ end) & ~boundary) |
1188 | err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n", |
1189 | start, end, boundary); |
1190 | #endif |
1191 | } |
1192 | |
1193 | void debug_dma_map_single(struct device *dev, const void *addr, |
1194 | unsigned long len) |
1195 | { |
1196 | if (unlikely(dma_debug_disabled())) |
1197 | return; |
1198 | |
1199 | if (!virt_addr_valid(addr)) |
1200 | err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n", |
1201 | addr, len); |
1202 | |
1203 | if (is_vmalloc_addr(x: addr)) |
1204 | err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n", |
1205 | addr, len); |
1206 | } |
1207 | EXPORT_SYMBOL(debug_dma_map_single); |
1208 | |
1209 | void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, |
1210 | size_t size, int direction, dma_addr_t dma_addr, |
1211 | unsigned long attrs) |
1212 | { |
1213 | struct dma_debug_entry *entry; |
1214 | |
1215 | if (unlikely(dma_debug_disabled())) |
1216 | return; |
1217 | |
1218 | if (dma_mapping_error(dev, dma_addr)) |
1219 | return; |
1220 | |
1221 | entry = dma_entry_alloc(); |
1222 | if (!entry) |
1223 | return; |
1224 | |
1225 | entry->dev = dev; |
1226 | entry->type = dma_debug_single; |
1227 | entry->pfn = page_to_pfn(page); |
1228 | entry->offset = offset; |
1229 | entry->dev_addr = dma_addr; |
1230 | entry->size = size; |
1231 | entry->direction = direction; |
1232 | entry->map_err_type = MAP_ERR_NOT_CHECKED; |
1233 | |
1234 | check_for_stack(dev, page, offset); |
1235 | |
1236 | if (!PageHighMem(page)) { |
1237 | void *addr = page_address(page) + offset; |
1238 | |
1239 | check_for_illegal_area(dev, addr, len: size); |
1240 | } |
1241 | |
1242 | add_dma_entry(entry, attrs); |
1243 | } |
1244 | |
1245 | void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
1246 | { |
1247 | struct dma_debug_entry ref; |
1248 | struct dma_debug_entry *entry; |
1249 | struct hash_bucket *bucket; |
1250 | unsigned long flags; |
1251 | |
1252 | if (unlikely(dma_debug_disabled())) |
1253 | return; |
1254 | |
1255 | ref.dev = dev; |
1256 | ref.dev_addr = dma_addr; |
1257 | bucket = get_hash_bucket(entry: &ref, flags: &flags); |
1258 | |
1259 | list_for_each_entry(entry, &bucket->list, list) { |
1260 | if (!exact_match(a: &ref, b: entry)) |
1261 | continue; |
1262 | |
1263 | /* |
1264 | * The same physical address can be mapped multiple |
1265 | * times. Without a hardware IOMMU this results in the |
1266 | * same device addresses being put into the dma-debug |
1267 | * hash multiple times too. This can result in false |
1268 | * positives being reported. Therefore we implement a |
1269 | * best-fit algorithm here which updates the first entry |
1270 | * from the hash which fits the reference value and is |
1271 | * not currently listed as being checked. |
1272 | */ |
1273 | if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { |
1274 | entry->map_err_type = MAP_ERR_CHECKED; |
1275 | break; |
1276 | } |
1277 | } |
1278 | |
1279 | put_hash_bucket(bucket, flags); |
1280 | } |
1281 | EXPORT_SYMBOL(debug_dma_mapping_error); |
1282 | |
1283 | void debug_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, |
1284 | size_t size, int direction) |
1285 | { |
1286 | struct dma_debug_entry ref = { |
1287 | .type = dma_debug_single, |
1288 | .dev = dev, |
1289 | .dev_addr = dma_addr, |
1290 | .size = size, |
1291 | .direction = direction, |
1292 | }; |
1293 | |
1294 | if (unlikely(dma_debug_disabled())) |
1295 | return; |
1296 | check_unmap(ref: &ref); |
1297 | } |
1298 | |
1299 | void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, |
1300 | int nents, int mapped_ents, int direction, |
1301 | unsigned long attrs) |
1302 | { |
1303 | struct dma_debug_entry *entry; |
1304 | struct scatterlist *s; |
1305 | int i; |
1306 | |
1307 | if (unlikely(dma_debug_disabled())) |
1308 | return; |
1309 | |
1310 | for_each_sg(sg, s, nents, i) { |
1311 | check_for_stack(dev, page: sg_page(sg: s), offset: s->offset); |
1312 | if (!PageHighMem(page: sg_page(sg: s))) |
1313 | check_for_illegal_area(dev, addr: sg_virt(sg: s), len: s->length); |
1314 | } |
1315 | |
1316 | for_each_sg(sg, s, mapped_ents, i) { |
1317 | entry = dma_entry_alloc(); |
1318 | if (!entry) |
1319 | return; |
1320 | |
1321 | entry->type = dma_debug_sg; |
1322 | entry->dev = dev; |
1323 | entry->pfn = page_to_pfn(sg_page(s)); |
1324 | entry->offset = s->offset; |
1325 | entry->size = sg_dma_len(s); |
1326 | entry->dev_addr = sg_dma_address(s); |
1327 | entry->direction = direction; |
1328 | entry->sg_call_ents = nents; |
1329 | entry->sg_mapped_ents = mapped_ents; |
1330 | |
1331 | check_sg_segment(dev, sg: s); |
1332 | |
1333 | add_dma_entry(entry, attrs); |
1334 | } |
1335 | } |
1336 | |
1337 | static int get_nr_mapped_entries(struct device *dev, |
1338 | struct dma_debug_entry *ref) |
1339 | { |
1340 | struct dma_debug_entry *entry; |
1341 | struct hash_bucket *bucket; |
1342 | unsigned long flags; |
1343 | int mapped_ents; |
1344 | |
1345 | bucket = get_hash_bucket(entry: ref, flags: &flags); |
1346 | entry = bucket_find_exact(bucket, ref); |
1347 | mapped_ents = 0; |
1348 | |
1349 | if (entry) |
1350 | mapped_ents = entry->sg_mapped_ents; |
1351 | put_hash_bucket(bucket, flags); |
1352 | |
1353 | return mapped_ents; |
1354 | } |
1355 | |
1356 | void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
1357 | int nelems, int dir) |
1358 | { |
1359 | struct scatterlist *s; |
1360 | int mapped_ents = 0, i; |
1361 | |
1362 | if (unlikely(dma_debug_disabled())) |
1363 | return; |
1364 | |
1365 | for_each_sg(sglist, s, nelems, i) { |
1366 | |
1367 | struct dma_debug_entry ref = { |
1368 | .type = dma_debug_sg, |
1369 | .dev = dev, |
1370 | .pfn = page_to_pfn(sg_page(s)), |
1371 | .offset = s->offset, |
1372 | .dev_addr = sg_dma_address(s), |
1373 | .size = sg_dma_len(s), |
1374 | .direction = dir, |
1375 | .sg_call_ents = nelems, |
1376 | }; |
1377 | |
1378 | if (mapped_ents && i >= mapped_ents) |
1379 | break; |
1380 | |
1381 | if (!i) |
1382 | mapped_ents = get_nr_mapped_entries(dev, ref: &ref); |
1383 | |
1384 | check_unmap(ref: &ref); |
1385 | } |
1386 | } |
1387 | |
1388 | void debug_dma_alloc_coherent(struct device *dev, size_t size, |
1389 | dma_addr_t dma_addr, void *virt, |
1390 | unsigned long attrs) |
1391 | { |
1392 | struct dma_debug_entry *entry; |
1393 | |
1394 | if (unlikely(dma_debug_disabled())) |
1395 | return; |
1396 | |
1397 | if (unlikely(virt == NULL)) |
1398 | return; |
1399 | |
1400 | /* handle vmalloc and linear addresses */ |
1401 | if (!is_vmalloc_addr(x: virt) && !virt_addr_valid(virt)) |
1402 | return; |
1403 | |
1404 | entry = dma_entry_alloc(); |
1405 | if (!entry) |
1406 | return; |
1407 | |
1408 | entry->type = dma_debug_coherent; |
1409 | entry->dev = dev; |
1410 | entry->offset = offset_in_page(virt); |
1411 | entry->size = size; |
1412 | entry->dev_addr = dma_addr; |
1413 | entry->direction = DMA_BIDIRECTIONAL; |
1414 | |
1415 | if (is_vmalloc_addr(x: virt)) |
1416 | entry->pfn = vmalloc_to_pfn(addr: virt); |
1417 | else |
1418 | entry->pfn = page_to_pfn(virt_to_page(virt)); |
1419 | |
1420 | add_dma_entry(entry, attrs); |
1421 | } |
1422 | |
1423 | void debug_dma_free_coherent(struct device *dev, size_t size, |
1424 | void *virt, dma_addr_t dma_addr) |
1425 | { |
1426 | struct dma_debug_entry ref = { |
1427 | .type = dma_debug_coherent, |
1428 | .dev = dev, |
1429 | .offset = offset_in_page(virt), |
1430 | .dev_addr = dma_addr, |
1431 | .size = size, |
1432 | .direction = DMA_BIDIRECTIONAL, |
1433 | }; |
1434 | |
1435 | /* handle vmalloc and linear addresses */ |
1436 | if (!is_vmalloc_addr(x: virt) && !virt_addr_valid(virt)) |
1437 | return; |
1438 | |
1439 | if (is_vmalloc_addr(x: virt)) |
1440 | ref.pfn = vmalloc_to_pfn(addr: virt); |
1441 | else |
1442 | ref.pfn = page_to_pfn(virt_to_page(virt)); |
1443 | |
1444 | if (unlikely(dma_debug_disabled())) |
1445 | return; |
1446 | |
1447 | check_unmap(ref: &ref); |
1448 | } |
1449 | |
1450 | void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, |
1451 | int direction, dma_addr_t dma_addr, |
1452 | unsigned long attrs) |
1453 | { |
1454 | struct dma_debug_entry *entry; |
1455 | |
1456 | if (unlikely(dma_debug_disabled())) |
1457 | return; |
1458 | |
1459 | entry = dma_entry_alloc(); |
1460 | if (!entry) |
1461 | return; |
1462 | |
1463 | entry->type = dma_debug_resource; |
1464 | entry->dev = dev; |
1465 | entry->pfn = PHYS_PFN(addr); |
1466 | entry->offset = offset_in_page(addr); |
1467 | entry->size = size; |
1468 | entry->dev_addr = dma_addr; |
1469 | entry->direction = direction; |
1470 | entry->map_err_type = MAP_ERR_NOT_CHECKED; |
1471 | |
1472 | add_dma_entry(entry, attrs); |
1473 | } |
1474 | |
1475 | void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, |
1476 | size_t size, int direction) |
1477 | { |
1478 | struct dma_debug_entry ref = { |
1479 | .type = dma_debug_resource, |
1480 | .dev = dev, |
1481 | .dev_addr = dma_addr, |
1482 | .size = size, |
1483 | .direction = direction, |
1484 | }; |
1485 | |
1486 | if (unlikely(dma_debug_disabled())) |
1487 | return; |
1488 | |
1489 | check_unmap(ref: &ref); |
1490 | } |
1491 | |
1492 | void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
1493 | size_t size, int direction) |
1494 | { |
1495 | struct dma_debug_entry ref; |
1496 | |
1497 | if (unlikely(dma_debug_disabled())) |
1498 | return; |
1499 | |
1500 | ref.type = dma_debug_single; |
1501 | ref.dev = dev; |
1502 | ref.dev_addr = dma_handle; |
1503 | ref.size = size; |
1504 | ref.direction = direction; |
1505 | ref.sg_call_ents = 0; |
1506 | |
1507 | check_sync(dev, ref: &ref, to_cpu: true); |
1508 | } |
1509 | |
1510 | void debug_dma_sync_single_for_device(struct device *dev, |
1511 | dma_addr_t dma_handle, size_t size, |
1512 | int direction) |
1513 | { |
1514 | struct dma_debug_entry ref; |
1515 | |
1516 | if (unlikely(dma_debug_disabled())) |
1517 | return; |
1518 | |
1519 | ref.type = dma_debug_single; |
1520 | ref.dev = dev; |
1521 | ref.dev_addr = dma_handle; |
1522 | ref.size = size; |
1523 | ref.direction = direction; |
1524 | ref.sg_call_ents = 0; |
1525 | |
1526 | check_sync(dev, ref: &ref, to_cpu: false); |
1527 | } |
1528 | |
1529 | void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
1530 | int nelems, int direction) |
1531 | { |
1532 | struct scatterlist *s; |
1533 | int mapped_ents = 0, i; |
1534 | |
1535 | if (unlikely(dma_debug_disabled())) |
1536 | return; |
1537 | |
1538 | for_each_sg(sg, s, nelems, i) { |
1539 | |
1540 | struct dma_debug_entry ref = { |
1541 | .type = dma_debug_sg, |
1542 | .dev = dev, |
1543 | .pfn = page_to_pfn(sg_page(s)), |
1544 | .offset = s->offset, |
1545 | .dev_addr = sg_dma_address(s), |
1546 | .size = sg_dma_len(s), |
1547 | .direction = direction, |
1548 | .sg_call_ents = nelems, |
1549 | }; |
1550 | |
1551 | if (!i) |
1552 | mapped_ents = get_nr_mapped_entries(dev, ref: &ref); |
1553 | |
1554 | if (i >= mapped_ents) |
1555 | break; |
1556 | |
1557 | check_sync(dev, ref: &ref, to_cpu: true); |
1558 | } |
1559 | } |
1560 | |
1561 | void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
1562 | int nelems, int direction) |
1563 | { |
1564 | struct scatterlist *s; |
1565 | int mapped_ents = 0, i; |
1566 | |
1567 | if (unlikely(dma_debug_disabled())) |
1568 | return; |
1569 | |
1570 | for_each_sg(sg, s, nelems, i) { |
1571 | |
1572 | struct dma_debug_entry ref = { |
1573 | .type = dma_debug_sg, |
1574 | .dev = dev, |
1575 | .pfn = page_to_pfn(sg_page(s)), |
1576 | .offset = s->offset, |
1577 | .dev_addr = sg_dma_address(s), |
1578 | .size = sg_dma_len(s), |
1579 | .direction = direction, |
1580 | .sg_call_ents = nelems, |
1581 | }; |
1582 | if (!i) |
1583 | mapped_ents = get_nr_mapped_entries(dev, ref: &ref); |
1584 | |
1585 | if (i >= mapped_ents) |
1586 | break; |
1587 | |
1588 | check_sync(dev, ref: &ref, to_cpu: false); |
1589 | } |
1590 | } |
1591 | |
1592 | static int __init dma_debug_driver_setup(char *str) |
1593 | { |
1594 | int i; |
1595 | |
1596 | for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) { |
1597 | current_driver_name[i] = *str; |
1598 | if (*str == 0) |
1599 | break; |
1600 | } |
1601 | |
1602 | if (current_driver_name[0]) |
1603 | pr_info("enable driver filter for driver [%s]\n", |
1604 | current_driver_name); |
1605 | |
1606 | |
1607 | return 1; |
1608 | } |
1609 | __setup("dma_debug_driver=", dma_debug_driver_setup); |
1610 |
Definitions
- map_err_types
- dma_debug_entry
- hash_bucket
- dma_entry_hash
- free_entries
- free_entries_lock
- global_disable
- dma_debug_initialized
- dma_debug_disabled
- error_count
- show_all_errors
- show_num_errors
- num_free_entries
- min_free_entries
- nr_total_entries
- nr_prealloc_entries
- current_driver_name
- current_driver
- driver_name_lock
- maperr2str
- type2name
- dir2name
- dump_entry_trace
- driver_filter
- hash_fn
- get_hash_bucket
- put_hash_bucket
- exact_match
- containing_match
- __hash_bucket_find
- bucket_find_exact
- bucket_find_contain
- hash_bucket_add
- hash_bucket_del
- phys_addr
- dma_active_cacheline
- radix_lock
- to_cacheline_number
- active_cacheline_read_overlap
- active_cacheline_set_overlap
- active_cacheline_inc_overlap
- active_cacheline_dec_overlap
- active_cacheline_insert
- active_cacheline_remove
- debug_dma_dump_mappings
- dump_show
- add_dma_entry
- dma_debug_create_entries
- __dma_entry_alloc
- __dma_entry_alloc_check_leak
- dma_entry_alloc
- dma_entry_free
- filter_read
- filter_write
- filter_fops
- dma_debug_fs_init
- device_dma_allocations
- dma_debug_device_change
- dma_debug_add_bus
- dma_debug_init
- dma_debug_cmdline
- dma_debug_entries_cmdline
- check_unmap
- check_for_stack
- check_for_illegal_area
- check_sync
- check_sg_segment
- debug_dma_map_single
- debug_dma_map_page
- debug_dma_mapping_error
- debug_dma_unmap_page
- debug_dma_map_sg
- get_nr_mapped_entries
- debug_dma_unmap_sg
- debug_dma_alloc_coherent
- debug_dma_free_coherent
- debug_dma_map_resource
- debug_dma_unmap_resource
- debug_dma_sync_single_for_cpu
- debug_dma_sync_single_for_device
- debug_dma_sync_sg_for_cpu
- debug_dma_sync_sg_for_device
Improve your Profiling and Debugging skills
Find out more