1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * DMA Engine test module |
4 | * |
5 | * Copyright (C) 2007 Atmel Corporation |
6 | * Copyright (C) 2013 Intel Corporation |
7 | */ |
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9 | |
10 | #include <linux/err.h> |
11 | #include <linux/delay.h> |
12 | #include <linux/dma-mapping.h> |
13 | #include <linux/dmaengine.h> |
14 | #include <linux/freezer.h> |
15 | #include <linux/init.h> |
16 | #include <linux/kthread.h> |
17 | #include <linux/sched/task.h> |
18 | #include <linux/module.h> |
19 | #include <linux/moduleparam.h> |
20 | #include <linux/random.h> |
21 | #include <linux/slab.h> |
22 | #include <linux/wait.h> |
23 | |
24 | static bool nobounce; |
25 | module_param(nobounce, bool, 0644); |
26 | MODULE_PARM_DESC(nobounce, "Prevent using swiotlb buffer (default: use swiotlb buffer)" ); |
27 | |
28 | static unsigned int test_buf_size = 16384; |
29 | module_param(test_buf_size, uint, 0644); |
30 | MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer" ); |
31 | |
32 | static char test_device[32]; |
33 | module_param_string(device, test_device, sizeof(test_device), 0644); |
34 | MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)" ); |
35 | |
36 | static unsigned int threads_per_chan = 1; |
37 | module_param(threads_per_chan, uint, 0644); |
38 | MODULE_PARM_DESC(threads_per_chan, |
39 | "Number of threads to start per channel (default: 1)" ); |
40 | |
41 | static unsigned int max_channels; |
42 | module_param(max_channels, uint, 0644); |
43 | MODULE_PARM_DESC(max_channels, |
44 | "Maximum number of channels to use (default: all)" ); |
45 | |
46 | static unsigned int iterations; |
47 | module_param(iterations, uint, 0644); |
48 | MODULE_PARM_DESC(iterations, |
49 | "Iterations before stopping test (default: infinite)" ); |
50 | |
51 | static unsigned int dmatest; |
52 | module_param(dmatest, uint, 0644); |
53 | MODULE_PARM_DESC(dmatest, |
54 | "dmatest 0-memcpy 1-memset (default: 0)" ); |
55 | |
56 | static unsigned int xor_sources = 3; |
57 | module_param(xor_sources, uint, 0644); |
58 | MODULE_PARM_DESC(xor_sources, |
59 | "Number of xor source buffers (default: 3)" ); |
60 | |
61 | static unsigned int pq_sources = 3; |
62 | module_param(pq_sources, uint, 0644); |
63 | MODULE_PARM_DESC(pq_sources, |
64 | "Number of p+q source buffers (default: 3)" ); |
65 | |
66 | static int timeout = 3000; |
67 | module_param(timeout, int, 0644); |
68 | MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " |
69 | "Pass -1 for infinite timeout" ); |
70 | |
71 | static bool noverify; |
72 | module_param(noverify, bool, 0644); |
73 | MODULE_PARM_DESC(noverify, "Disable data verification (default: verify)" ); |
74 | |
75 | static bool norandom; |
76 | module_param(norandom, bool, 0644); |
77 | MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)" ); |
78 | |
79 | static bool verbose; |
80 | module_param(verbose, bool, 0644); |
81 | MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)" ); |
82 | |
83 | static int alignment = -1; |
84 | module_param(alignment, int, 0644); |
85 | MODULE_PARM_DESC(alignment, "Custom data address alignment taken as 2^(alignment) (default: not used (-1))" ); |
86 | |
87 | static unsigned int transfer_size; |
88 | module_param(transfer_size, uint, 0644); |
89 | MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))" ); |
90 | |
91 | static bool polled; |
92 | module_param(polled, bool, 0644); |
93 | MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts" ); |
94 | |
95 | /** |
96 | * struct dmatest_params - test parameters. |
97 | * @nobounce: prevent using swiotlb buffer |
98 | * @buf_size: size of the memcpy test buffer |
99 | * @channel: bus ID of the channel to test |
100 | * @device: bus ID of the DMA Engine to test |
101 | * @threads_per_chan: number of threads to start per channel |
102 | * @max_channels: maximum number of channels to use |
103 | * @iterations: iterations before stopping test |
104 | * @xor_sources: number of xor source buffers |
105 | * @pq_sources: number of p+q source buffers |
106 | * @timeout: transfer timeout in msec, -1 for infinite timeout |
107 | * @noverify: disable data verification |
108 | * @norandom: disable random offset setup |
109 | * @alignment: custom data address alignment taken as 2^alignment |
110 | * @transfer_size: custom transfer size in bytes |
111 | * @polled: use polling for completion instead of interrupts |
112 | */ |
113 | struct dmatest_params { |
114 | bool nobounce; |
115 | unsigned int buf_size; |
116 | char channel[20]; |
117 | char device[32]; |
118 | unsigned int threads_per_chan; |
119 | unsigned int max_channels; |
120 | unsigned int iterations; |
121 | unsigned int xor_sources; |
122 | unsigned int pq_sources; |
123 | int timeout; |
124 | bool noverify; |
125 | bool norandom; |
126 | int alignment; |
127 | unsigned int transfer_size; |
128 | bool polled; |
129 | }; |
130 | |
131 | /** |
132 | * struct dmatest_info - test information. |
133 | * @params: test parameters |
134 | * @channels: channels under test |
135 | * @nr_channels: number of channels under test |
136 | * @lock: access protection to the fields of this structure |
137 | * @did_init: module has been initialized completely |
138 | * @last_error: test has faced configuration issues |
139 | */ |
140 | static struct dmatest_info { |
141 | /* Test parameters */ |
142 | struct dmatest_params params; |
143 | |
144 | /* Internal state */ |
145 | struct list_head channels; |
146 | unsigned int nr_channels; |
147 | int last_error; |
148 | struct mutex lock; |
149 | bool did_init; |
150 | } test_info = { |
151 | .channels = LIST_HEAD_INIT(test_info.channels), |
152 | .lock = __MUTEX_INITIALIZER(test_info.lock), |
153 | }; |
154 | |
155 | static int dmatest_run_set(const char *val, const struct kernel_param *kp); |
156 | static int dmatest_run_get(char *val, const struct kernel_param *kp); |
157 | static const struct kernel_param_ops run_ops = { |
158 | .set = dmatest_run_set, |
159 | .get = dmatest_run_get, |
160 | }; |
161 | static bool dmatest_run; |
162 | module_param_cb(run, &run_ops, &dmatest_run, 0644); |
163 | MODULE_PARM_DESC(run, "Run the test (default: false)" ); |
164 | |
165 | static int dmatest_chan_set(const char *val, const struct kernel_param *kp); |
166 | static int dmatest_chan_get(char *val, const struct kernel_param *kp); |
167 | static const struct kernel_param_ops multi_chan_ops = { |
168 | .set = dmatest_chan_set, |
169 | .get = dmatest_chan_get, |
170 | }; |
171 | |
172 | static char test_channel[20]; |
173 | static struct kparam_string newchan_kps = { |
174 | .string = test_channel, |
175 | .maxlen = 20, |
176 | }; |
177 | module_param_cb(channel, &multi_chan_ops, &newchan_kps, 0644); |
178 | MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)" ); |
179 | |
180 | static int dmatest_test_list_get(char *val, const struct kernel_param *kp); |
181 | static const struct kernel_param_ops test_list_ops = { |
182 | .get = dmatest_test_list_get, |
183 | }; |
184 | module_param_cb(test_list, &test_list_ops, NULL, 0444); |
185 | MODULE_PARM_DESC(test_list, "Print current test list" ); |
186 | |
187 | /* Maximum amount of mismatched bytes in buffer to print */ |
188 | #define MAX_ERROR_COUNT 32 |
189 | |
190 | /* |
191 | * Initialization patterns. All bytes in the source buffer has bit 7 |
192 | * set, all bytes in the destination buffer has bit 7 cleared. |
193 | * |
194 | * Bit 6 is set for all bytes which are to be copied by the DMA |
195 | * engine. Bit 5 is set for all bytes which are to be overwritten by |
196 | * the DMA engine. |
197 | * |
198 | * The remaining bits are the inverse of a counter which increments by |
199 | * one for each byte address. |
200 | */ |
201 | #define PATTERN_SRC 0x80 |
202 | #define PATTERN_DST 0x00 |
203 | #define PATTERN_COPY 0x40 |
204 | #define PATTERN_OVERWRITE 0x20 |
205 | #define PATTERN_COUNT_MASK 0x1f |
206 | #define PATTERN_MEMSET_IDX 0x01 |
207 | |
208 | /* Fixed point arithmetic ops */ |
209 | #define FIXPT_SHIFT 8 |
210 | #define FIXPNT_MASK 0xFF |
211 | #define FIXPT_TO_INT(a) ((a) >> FIXPT_SHIFT) |
212 | #define INT_TO_FIXPT(a) ((a) << FIXPT_SHIFT) |
213 | #define FIXPT_GET_FRAC(a) ((((a) & FIXPNT_MASK) * 100) >> FIXPT_SHIFT) |
214 | |
215 | /* poor man's completion - we want to use wait_event_freezable() on it */ |
216 | struct dmatest_done { |
217 | bool done; |
218 | wait_queue_head_t *wait; |
219 | }; |
220 | |
221 | struct dmatest_data { |
222 | u8 **raw; |
223 | u8 **aligned; |
224 | gfp_t gfp_flags; |
225 | unsigned int cnt; |
226 | unsigned int off; |
227 | }; |
228 | |
229 | struct dmatest_thread { |
230 | struct list_head node; |
231 | struct dmatest_info *info; |
232 | struct task_struct *task; |
233 | struct dma_chan *chan; |
234 | struct dmatest_data src; |
235 | struct dmatest_data dst; |
236 | enum dma_transaction_type type; |
237 | wait_queue_head_t done_wait; |
238 | struct dmatest_done test_done; |
239 | bool done; |
240 | bool pending; |
241 | }; |
242 | |
243 | struct dmatest_chan { |
244 | struct list_head node; |
245 | struct dma_chan *chan; |
246 | struct list_head threads; |
247 | }; |
248 | |
249 | static DECLARE_WAIT_QUEUE_HEAD(thread_wait); |
250 | static bool wait; |
251 | |
252 | static bool is_threaded_test_run(struct dmatest_info *info) |
253 | { |
254 | struct dmatest_chan *dtc; |
255 | |
256 | list_for_each_entry(dtc, &info->channels, node) { |
257 | struct dmatest_thread *thread; |
258 | |
259 | list_for_each_entry(thread, &dtc->threads, node) { |
260 | if (!thread->done && !thread->pending) |
261 | return true; |
262 | } |
263 | } |
264 | |
265 | return false; |
266 | } |
267 | |
268 | static bool is_threaded_test_pending(struct dmatest_info *info) |
269 | { |
270 | struct dmatest_chan *dtc; |
271 | |
272 | list_for_each_entry(dtc, &info->channels, node) { |
273 | struct dmatest_thread *thread; |
274 | |
275 | list_for_each_entry(thread, &dtc->threads, node) { |
276 | if (thread->pending) |
277 | return true; |
278 | } |
279 | } |
280 | |
281 | return false; |
282 | } |
283 | |
284 | static int dmatest_wait_get(char *val, const struct kernel_param *kp) |
285 | { |
286 | struct dmatest_info *info = &test_info; |
287 | struct dmatest_params *params = &info->params; |
288 | |
289 | if (params->iterations) |
290 | wait_event(thread_wait, !is_threaded_test_run(info)); |
291 | wait = true; |
292 | return param_get_bool(buffer: val, kp); |
293 | } |
294 | |
295 | static const struct kernel_param_ops wait_ops = { |
296 | .get = dmatest_wait_get, |
297 | .set = param_set_bool, |
298 | }; |
299 | module_param_cb(wait, &wait_ops, &wait, 0444); |
300 | MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)" ); |
301 | |
302 | static bool dmatest_match_channel(struct dmatest_params *params, |
303 | struct dma_chan *chan) |
304 | { |
305 | if (params->channel[0] == '\0') |
306 | return true; |
307 | return strcmp(dma_chan_name(chan), params->channel) == 0; |
308 | } |
309 | |
310 | static bool dmatest_match_device(struct dmatest_params *params, |
311 | struct dma_device *device) |
312 | { |
313 | if (params->device[0] == '\0') |
314 | return true; |
315 | return strcmp(dev_name(dev: device->dev), params->device) == 0; |
316 | } |
317 | |
318 | static unsigned long dmatest_random(void) |
319 | { |
320 | unsigned long buf; |
321 | |
322 | get_random_bytes(buf: &buf, len: sizeof(buf)); |
323 | return buf; |
324 | } |
325 | |
326 | static inline u8 gen_inv_idx(u8 index, bool is_memset) |
327 | { |
328 | u8 val = is_memset ? PATTERN_MEMSET_IDX : index; |
329 | |
330 | return ~val & PATTERN_COUNT_MASK; |
331 | } |
332 | |
333 | static inline u8 gen_src_value(u8 index, bool is_memset) |
334 | { |
335 | return PATTERN_SRC | gen_inv_idx(index, is_memset); |
336 | } |
337 | |
338 | static inline u8 gen_dst_value(u8 index, bool is_memset) |
339 | { |
340 | return PATTERN_DST | gen_inv_idx(index, is_memset); |
341 | } |
342 | |
343 | static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len, |
344 | unsigned int buf_size, bool is_memset) |
345 | { |
346 | unsigned int i; |
347 | u8 *buf; |
348 | |
349 | for (; (buf = *bufs); bufs++) { |
350 | for (i = 0; i < start; i++) |
351 | buf[i] = gen_src_value(index: i, is_memset); |
352 | for ( ; i < start + len; i++) |
353 | buf[i] = gen_src_value(index: i, is_memset) | PATTERN_COPY; |
354 | for ( ; i < buf_size; i++) |
355 | buf[i] = gen_src_value(index: i, is_memset); |
356 | buf++; |
357 | } |
358 | } |
359 | |
360 | static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, |
361 | unsigned int buf_size, bool is_memset) |
362 | { |
363 | unsigned int i; |
364 | u8 *buf; |
365 | |
366 | for (; (buf = *bufs); bufs++) { |
367 | for (i = 0; i < start; i++) |
368 | buf[i] = gen_dst_value(index: i, is_memset); |
369 | for ( ; i < start + len; i++) |
370 | buf[i] = gen_dst_value(index: i, is_memset) | |
371 | PATTERN_OVERWRITE; |
372 | for ( ; i < buf_size; i++) |
373 | buf[i] = gen_dst_value(index: i, is_memset); |
374 | } |
375 | } |
376 | |
377 | static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, |
378 | unsigned int counter, bool is_srcbuf, bool is_memset) |
379 | { |
380 | u8 diff = actual ^ pattern; |
381 | u8 expected = pattern | gen_inv_idx(index: counter, is_memset); |
382 | const char *thread_name = current->comm; |
383 | |
384 | if (is_srcbuf) |
385 | pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n" , |
386 | thread_name, index, expected, actual); |
387 | else if ((pattern & PATTERN_COPY) |
388 | && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) |
389 | pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n" , |
390 | thread_name, index, expected, actual); |
391 | else if (diff & PATTERN_SRC) |
392 | pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n" , |
393 | thread_name, index, expected, actual); |
394 | else |
395 | pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n" , |
396 | thread_name, index, expected, actual); |
397 | } |
398 | |
399 | static unsigned int dmatest_verify(u8 **bufs, unsigned int start, |
400 | unsigned int end, unsigned int counter, u8 pattern, |
401 | bool is_srcbuf, bool is_memset) |
402 | { |
403 | unsigned int i; |
404 | unsigned int error_count = 0; |
405 | u8 actual; |
406 | u8 expected; |
407 | u8 *buf; |
408 | unsigned int counter_orig = counter; |
409 | |
410 | for (; (buf = *bufs); bufs++) { |
411 | counter = counter_orig; |
412 | for (i = start; i < end; i++) { |
413 | actual = buf[i]; |
414 | expected = pattern | gen_inv_idx(index: counter, is_memset); |
415 | if (actual != expected) { |
416 | if (error_count < MAX_ERROR_COUNT) |
417 | dmatest_mismatch(actual, pattern, index: i, |
418 | counter, is_srcbuf, |
419 | is_memset); |
420 | error_count++; |
421 | } |
422 | counter++; |
423 | } |
424 | } |
425 | |
426 | if (error_count > MAX_ERROR_COUNT) |
427 | pr_warn("%s: %u errors suppressed\n" , |
428 | current->comm, error_count - MAX_ERROR_COUNT); |
429 | |
430 | return error_count; |
431 | } |
432 | |
433 | |
434 | static void dmatest_callback(void *arg) |
435 | { |
436 | struct dmatest_done *done = arg; |
437 | struct dmatest_thread *thread = |
438 | container_of(done, struct dmatest_thread, test_done); |
439 | if (!thread->done) { |
440 | done->done = true; |
441 | wake_up_all(done->wait); |
442 | } else { |
443 | /* |
444 | * If thread->done, it means that this callback occurred |
445 | * after the parent thread has cleaned up. This can |
446 | * happen in the case that driver doesn't implement |
447 | * the terminate_all() functionality and a dma operation |
448 | * did not occur within the timeout period |
449 | */ |
450 | WARN(1, "dmatest: Kernel memory may be corrupted!!\n" ); |
451 | } |
452 | } |
453 | |
454 | static unsigned int min_odd(unsigned int x, unsigned int y) |
455 | { |
456 | unsigned int val = min(x, y); |
457 | |
458 | return val % 2 ? val : val - 1; |
459 | } |
460 | |
461 | static void result(const char *err, unsigned int n, unsigned int src_off, |
462 | unsigned int dst_off, unsigned int len, unsigned long data) |
463 | { |
464 | if (IS_ERR_VALUE(data)) { |
465 | pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%ld)\n" , |
466 | current->comm, n, err, src_off, dst_off, len, data); |
467 | } else { |
468 | pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n" , |
469 | current->comm, n, err, src_off, dst_off, len, data); |
470 | } |
471 | } |
472 | |
473 | static void dbg_result(const char *err, unsigned int n, unsigned int src_off, |
474 | unsigned int dst_off, unsigned int len, |
475 | unsigned long data) |
476 | { |
477 | pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n" , |
478 | current->comm, n, err, src_off, dst_off, len, data); |
479 | } |
480 | |
481 | #define verbose_result(err, n, src_off, dst_off, len, data) ({ \ |
482 | if (verbose) \ |
483 | result(err, n, src_off, dst_off, len, data); \ |
484 | else \ |
485 | dbg_result(err, n, src_off, dst_off, len, data);\ |
486 | }) |
487 | |
488 | static unsigned long long dmatest_persec(s64 runtime, unsigned int val) |
489 | { |
490 | unsigned long long per_sec = 1000000; |
491 | |
492 | if (runtime <= 0) |
493 | return 0; |
494 | |
495 | /* drop precision until runtime is 32-bits */ |
496 | while (runtime > UINT_MAX) { |
497 | runtime >>= 1; |
498 | per_sec <<= 1; |
499 | } |
500 | |
501 | per_sec *= val; |
502 | per_sec = INT_TO_FIXPT(per_sec); |
503 | do_div(per_sec, runtime); |
504 | |
505 | return per_sec; |
506 | } |
507 | |
508 | static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) |
509 | { |
510 | return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10)); |
511 | } |
512 | |
513 | static void __dmatest_free_test_data(struct dmatest_data *d, unsigned int cnt) |
514 | { |
515 | unsigned int i; |
516 | |
517 | for (i = 0; i < cnt; i++) |
518 | kfree(objp: d->raw[i]); |
519 | |
520 | kfree(objp: d->aligned); |
521 | kfree(objp: d->raw); |
522 | } |
523 | |
524 | static void dmatest_free_test_data(struct dmatest_data *d) |
525 | { |
526 | __dmatest_free_test_data(d, cnt: d->cnt); |
527 | } |
528 | |
529 | static int dmatest_alloc_test_data(struct dmatest_data *d, |
530 | unsigned int buf_size, u8 align) |
531 | { |
532 | unsigned int i = 0; |
533 | |
534 | d->raw = kcalloc(n: d->cnt + 1, size: sizeof(u8 *), GFP_KERNEL); |
535 | if (!d->raw) |
536 | return -ENOMEM; |
537 | |
538 | d->aligned = kcalloc(n: d->cnt + 1, size: sizeof(u8 *), GFP_KERNEL); |
539 | if (!d->aligned) |
540 | goto err; |
541 | |
542 | for (i = 0; i < d->cnt; i++) { |
543 | d->raw[i] = kmalloc(size: buf_size + align, flags: d->gfp_flags); |
544 | if (!d->raw[i]) |
545 | goto err; |
546 | |
547 | /* align to alignment restriction */ |
548 | if (align) |
549 | d->aligned[i] = PTR_ALIGN(d->raw[i], align); |
550 | else |
551 | d->aligned[i] = d->raw[i]; |
552 | } |
553 | |
554 | return 0; |
555 | err: |
556 | __dmatest_free_test_data(d, cnt: i); |
557 | return -ENOMEM; |
558 | } |
559 | |
560 | /* |
561 | * This function repeatedly tests DMA transfers of various lengths and |
562 | * offsets for a given operation type until it is told to exit by |
563 | * kthread_stop(). There may be multiple threads running this function |
564 | * in parallel for a single channel, and there may be multiple channels |
565 | * being tested in parallel. |
566 | * |
567 | * Before each test, the source and destination buffer is initialized |
568 | * with a known pattern. This pattern is different depending on |
569 | * whether it's in an area which is supposed to be copied or |
570 | * overwritten, and different in the source and destination buffers. |
571 | * So if the DMA engine doesn't copy exactly what we tell it to copy, |
572 | * we'll notice. |
573 | */ |
574 | static int dmatest_func(void *data) |
575 | { |
576 | struct dmatest_thread *thread = data; |
577 | struct dmatest_done *done = &thread->test_done; |
578 | struct dmatest_info *info; |
579 | struct dmatest_params *params; |
580 | struct dma_chan *chan; |
581 | struct dma_device *dev; |
582 | struct device *dma_dev; |
583 | unsigned int error_count; |
584 | unsigned int failed_tests = 0; |
585 | unsigned int total_tests = 0; |
586 | dma_cookie_t cookie; |
587 | enum dma_status status; |
588 | enum dma_ctrl_flags flags; |
589 | u8 *pq_coefs = NULL; |
590 | int ret; |
591 | unsigned int buf_size; |
592 | struct dmatest_data *src; |
593 | struct dmatest_data *dst; |
594 | int i; |
595 | ktime_t ktime, start, diff; |
596 | ktime_t filltime = 0; |
597 | ktime_t comparetime = 0; |
598 | s64 runtime = 0; |
599 | unsigned long long total_len = 0; |
600 | unsigned long long iops = 0; |
601 | u8 align = 0; |
602 | bool is_memset = false; |
603 | dma_addr_t *srcs; |
604 | dma_addr_t *dma_pq; |
605 | |
606 | set_freezable(); |
607 | |
608 | ret = -ENOMEM; |
609 | |
610 | smp_rmb(); |
611 | thread->pending = false; |
612 | info = thread->info; |
613 | params = &info->params; |
614 | chan = thread->chan; |
615 | dev = chan->device; |
616 | dma_dev = dmaengine_get_dma_device(chan); |
617 | |
618 | src = &thread->src; |
619 | dst = &thread->dst; |
620 | if (thread->type == DMA_MEMCPY) { |
621 | align = params->alignment < 0 ? dev->copy_align : |
622 | params->alignment; |
623 | src->cnt = dst->cnt = 1; |
624 | } else if (thread->type == DMA_MEMSET) { |
625 | align = params->alignment < 0 ? dev->fill_align : |
626 | params->alignment; |
627 | src->cnt = dst->cnt = 1; |
628 | is_memset = true; |
629 | } else if (thread->type == DMA_XOR) { |
630 | /* force odd to ensure dst = src */ |
631 | src->cnt = min_odd(x: params->xor_sources | 1, y: dev->max_xor); |
632 | dst->cnt = 1; |
633 | align = params->alignment < 0 ? dev->xor_align : |
634 | params->alignment; |
635 | } else if (thread->type == DMA_PQ) { |
636 | /* force odd to ensure dst = src */ |
637 | src->cnt = min_odd(x: params->pq_sources | 1, y: dma_maxpq(dma: dev, flags: 0)); |
638 | dst->cnt = 2; |
639 | align = params->alignment < 0 ? dev->pq_align : |
640 | params->alignment; |
641 | |
642 | pq_coefs = kmalloc(size: params->pq_sources + 1, GFP_KERNEL); |
643 | if (!pq_coefs) |
644 | goto err_thread_type; |
645 | |
646 | for (i = 0; i < src->cnt; i++) |
647 | pq_coefs[i] = 1; |
648 | } else |
649 | goto err_thread_type; |
650 | |
651 | /* Check if buffer count fits into map count variable (u8) */ |
652 | if ((src->cnt + dst->cnt) >= 255) { |
653 | pr_err("too many buffers (%d of 255 supported)\n" , |
654 | src->cnt + dst->cnt); |
655 | goto err_free_coefs; |
656 | } |
657 | |
658 | buf_size = params->buf_size; |
659 | if (1 << align > buf_size) { |
660 | pr_err("%u-byte buffer too small for %d-byte alignment\n" , |
661 | buf_size, 1 << align); |
662 | goto err_free_coefs; |
663 | } |
664 | |
665 | src->gfp_flags = GFP_KERNEL; |
666 | dst->gfp_flags = GFP_KERNEL; |
667 | if (params->nobounce) { |
668 | src->gfp_flags = GFP_DMA; |
669 | dst->gfp_flags = GFP_DMA; |
670 | } |
671 | |
672 | if (dmatest_alloc_test_data(d: src, buf_size, align) < 0) |
673 | goto err_free_coefs; |
674 | |
675 | if (dmatest_alloc_test_data(d: dst, buf_size, align) < 0) |
676 | goto err_src; |
677 | |
678 | set_user_nice(current, nice: 10); |
679 | |
680 | srcs = kcalloc(n: src->cnt, size: sizeof(dma_addr_t), GFP_KERNEL); |
681 | if (!srcs) |
682 | goto err_dst; |
683 | |
684 | dma_pq = kcalloc(n: dst->cnt, size: sizeof(dma_addr_t), GFP_KERNEL); |
685 | if (!dma_pq) |
686 | goto err_srcs_array; |
687 | |
688 | /* |
689 | * src and dst buffers are freed by ourselves below |
690 | */ |
691 | if (params->polled) |
692 | flags = DMA_CTRL_ACK; |
693 | else |
694 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; |
695 | |
696 | ktime = ktime_get(); |
697 | while (!(kthread_should_stop() || |
698 | (params->iterations && total_tests >= params->iterations))) { |
699 | struct dma_async_tx_descriptor *tx = NULL; |
700 | struct dmaengine_unmap_data *um; |
701 | dma_addr_t *dsts; |
702 | unsigned int len; |
703 | |
704 | total_tests++; |
705 | |
706 | if (params->transfer_size) { |
707 | if (params->transfer_size >= buf_size) { |
708 | pr_err("%u-byte transfer size must be lower than %u-buffer size\n" , |
709 | params->transfer_size, buf_size); |
710 | break; |
711 | } |
712 | len = params->transfer_size; |
713 | } else if (params->norandom) { |
714 | len = buf_size; |
715 | } else { |
716 | len = dmatest_random() % buf_size + 1; |
717 | } |
718 | |
719 | /* Do not alter transfer size explicitly defined by user */ |
720 | if (!params->transfer_size) { |
721 | len = (len >> align) << align; |
722 | if (!len) |
723 | len = 1 << align; |
724 | } |
725 | total_len += len; |
726 | |
727 | if (params->norandom) { |
728 | src->off = 0; |
729 | dst->off = 0; |
730 | } else { |
731 | src->off = dmatest_random() % (buf_size - len + 1); |
732 | dst->off = dmatest_random() % (buf_size - len + 1); |
733 | |
734 | src->off = (src->off >> align) << align; |
735 | dst->off = (dst->off >> align) << align; |
736 | } |
737 | |
738 | if (!params->noverify) { |
739 | start = ktime_get(); |
740 | dmatest_init_srcs(bufs: src->aligned, start: src->off, len, |
741 | buf_size, is_memset); |
742 | dmatest_init_dsts(bufs: dst->aligned, start: dst->off, len, |
743 | buf_size, is_memset); |
744 | |
745 | diff = ktime_sub(ktime_get(), start); |
746 | filltime = ktime_add(filltime, diff); |
747 | } |
748 | |
749 | um = dmaengine_get_unmap_data(dev: dma_dev, nr: src->cnt + dst->cnt, |
750 | GFP_KERNEL); |
751 | if (!um) { |
752 | failed_tests++; |
753 | result(err: "unmap data NULL" , n: total_tests, |
754 | src_off: src->off, dst_off: dst->off, len, data: ret); |
755 | continue; |
756 | } |
757 | |
758 | um->len = buf_size; |
759 | for (i = 0; i < src->cnt; i++) { |
760 | void *buf = src->aligned[i]; |
761 | struct page *pg = virt_to_page(buf); |
762 | unsigned long pg_off = offset_in_page(buf); |
763 | |
764 | um->addr[i] = dma_map_page(dma_dev, pg, pg_off, |
765 | um->len, DMA_TO_DEVICE); |
766 | srcs[i] = um->addr[i] + src->off; |
767 | ret = dma_mapping_error(dev: dma_dev, dma_addr: um->addr[i]); |
768 | if (ret) { |
769 | result(err: "src mapping error" , n: total_tests, |
770 | src_off: src->off, dst_off: dst->off, len, data: ret); |
771 | goto error_unmap_continue; |
772 | } |
773 | um->to_cnt++; |
774 | } |
775 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ |
776 | dsts = &um->addr[src->cnt]; |
777 | for (i = 0; i < dst->cnt; i++) { |
778 | void *buf = dst->aligned[i]; |
779 | struct page *pg = virt_to_page(buf); |
780 | unsigned long pg_off = offset_in_page(buf); |
781 | |
782 | dsts[i] = dma_map_page(dma_dev, pg, pg_off, um->len, |
783 | DMA_BIDIRECTIONAL); |
784 | ret = dma_mapping_error(dev: dma_dev, dma_addr: dsts[i]); |
785 | if (ret) { |
786 | result(err: "dst mapping error" , n: total_tests, |
787 | src_off: src->off, dst_off: dst->off, len, data: ret); |
788 | goto error_unmap_continue; |
789 | } |
790 | um->bidi_cnt++; |
791 | } |
792 | |
793 | if (thread->type == DMA_MEMCPY) |
794 | tx = dev->device_prep_dma_memcpy(chan, |
795 | dsts[0] + dst->off, |
796 | srcs[0], len, flags); |
797 | else if (thread->type == DMA_MEMSET) |
798 | tx = dev->device_prep_dma_memset(chan, |
799 | dsts[0] + dst->off, |
800 | *(src->aligned[0] + src->off), |
801 | len, flags); |
802 | else if (thread->type == DMA_XOR) |
803 | tx = dev->device_prep_dma_xor(chan, |
804 | dsts[0] + dst->off, |
805 | srcs, src->cnt, |
806 | len, flags); |
807 | else if (thread->type == DMA_PQ) { |
808 | for (i = 0; i < dst->cnt; i++) |
809 | dma_pq[i] = dsts[i] + dst->off; |
810 | tx = dev->device_prep_dma_pq(chan, dma_pq, srcs, |
811 | src->cnt, pq_coefs, |
812 | len, flags); |
813 | } |
814 | |
815 | if (!tx) { |
816 | result(err: "prep error" , n: total_tests, src_off: src->off, |
817 | dst_off: dst->off, len, data: ret); |
818 | msleep(msecs: 100); |
819 | goto error_unmap_continue; |
820 | } |
821 | |
822 | done->done = false; |
823 | if (!params->polled) { |
824 | tx->callback = dmatest_callback; |
825 | tx->callback_param = done; |
826 | } |
827 | cookie = tx->tx_submit(tx); |
828 | |
829 | if (dma_submit_error(cookie)) { |
830 | result(err: "submit error" , n: total_tests, src_off: src->off, |
831 | dst_off: dst->off, len, data: ret); |
832 | msleep(msecs: 100); |
833 | goto error_unmap_continue; |
834 | } |
835 | |
836 | if (params->polled) { |
837 | status = dma_sync_wait(chan, cookie); |
838 | dmaengine_terminate_sync(chan); |
839 | if (status == DMA_COMPLETE) |
840 | done->done = true; |
841 | } else { |
842 | dma_async_issue_pending(chan); |
843 | |
844 | wait_event_freezable_timeout(thread->done_wait, |
845 | done->done, |
846 | msecs_to_jiffies(params->timeout)); |
847 | |
848 | status = dma_async_is_tx_complete(chan, cookie, NULL, |
849 | NULL); |
850 | } |
851 | |
852 | if (!done->done) { |
853 | result(err: "test timed out" , n: total_tests, src_off: src->off, dst_off: dst->off, |
854 | len, data: 0); |
855 | goto error_unmap_continue; |
856 | } else if (status != DMA_COMPLETE && |
857 | !(dma_has_cap(DMA_COMPLETION_NO_ORDER, |
858 | dev->cap_mask) && |
859 | status == DMA_OUT_OF_ORDER)) { |
860 | result(err: status == DMA_ERROR ? |
861 | "completion error status" : |
862 | "completion busy status" , n: total_tests, src_off: src->off, |
863 | dst_off: dst->off, len, data: ret); |
864 | goto error_unmap_continue; |
865 | } |
866 | |
867 | dmaengine_unmap_put(unmap: um); |
868 | |
869 | if (params->noverify) { |
870 | verbose_result("test passed" , total_tests, src->off, |
871 | dst->off, len, 0); |
872 | continue; |
873 | } |
874 | |
875 | start = ktime_get(); |
876 | pr_debug("%s: verifying source buffer...\n" , current->comm); |
877 | error_count = dmatest_verify(bufs: src->aligned, start: 0, end: src->off, |
878 | counter: 0, PATTERN_SRC, is_srcbuf: true, is_memset); |
879 | error_count += dmatest_verify(bufs: src->aligned, start: src->off, |
880 | end: src->off + len, counter: src->off, |
881 | PATTERN_SRC | PATTERN_COPY, is_srcbuf: true, is_memset); |
882 | error_count += dmatest_verify(bufs: src->aligned, start: src->off + len, |
883 | end: buf_size, counter: src->off + len, |
884 | PATTERN_SRC, is_srcbuf: true, is_memset); |
885 | |
886 | pr_debug("%s: verifying dest buffer...\n" , current->comm); |
887 | error_count += dmatest_verify(bufs: dst->aligned, start: 0, end: dst->off, |
888 | counter: 0, PATTERN_DST, is_srcbuf: false, is_memset); |
889 | |
890 | error_count += dmatest_verify(bufs: dst->aligned, start: dst->off, |
891 | end: dst->off + len, counter: src->off, |
892 | PATTERN_SRC | PATTERN_COPY, is_srcbuf: false, is_memset); |
893 | |
894 | error_count += dmatest_verify(bufs: dst->aligned, start: dst->off + len, |
895 | end: buf_size, counter: dst->off + len, |
896 | PATTERN_DST, is_srcbuf: false, is_memset); |
897 | |
898 | diff = ktime_sub(ktime_get(), start); |
899 | comparetime = ktime_add(comparetime, diff); |
900 | |
901 | if (error_count) { |
902 | result(err: "data error" , n: total_tests, src_off: src->off, dst_off: dst->off, |
903 | len, data: error_count); |
904 | failed_tests++; |
905 | } else { |
906 | verbose_result("test passed" , total_tests, src->off, |
907 | dst->off, len, 0); |
908 | } |
909 | |
910 | continue; |
911 | |
912 | error_unmap_continue: |
913 | dmaengine_unmap_put(unmap: um); |
914 | failed_tests++; |
915 | } |
916 | ktime = ktime_sub(ktime_get(), ktime); |
917 | ktime = ktime_sub(ktime, comparetime); |
918 | ktime = ktime_sub(ktime, filltime); |
919 | runtime = ktime_to_us(kt: ktime); |
920 | |
921 | ret = 0; |
922 | kfree(objp: dma_pq); |
923 | err_srcs_array: |
924 | kfree(objp: srcs); |
925 | err_dst: |
926 | dmatest_free_test_data(d: dst); |
927 | err_src: |
928 | dmatest_free_test_data(d: src); |
929 | err_free_coefs: |
930 | kfree(objp: pq_coefs); |
931 | err_thread_type: |
932 | iops = dmatest_persec(runtime, val: total_tests); |
933 | pr_info("%s: summary %u tests, %u failures %llu.%02llu iops %llu KB/s (%d)\n" , |
934 | current->comm, total_tests, failed_tests, |
935 | FIXPT_TO_INT(iops), FIXPT_GET_FRAC(iops), |
936 | dmatest_KBs(runtime, total_len), ret); |
937 | |
938 | /* terminate all transfers on specified channels */ |
939 | if (ret || failed_tests) |
940 | dmaengine_terminate_sync(chan); |
941 | |
942 | thread->done = true; |
943 | wake_up(&thread_wait); |
944 | |
945 | return ret; |
946 | } |
947 | |
948 | static void dmatest_cleanup_channel(struct dmatest_chan *dtc) |
949 | { |
950 | struct dmatest_thread *thread; |
951 | struct dmatest_thread *_thread; |
952 | int ret; |
953 | |
954 | list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { |
955 | ret = kthread_stop(k: thread->task); |
956 | pr_debug("thread %s exited with status %d\n" , |
957 | thread->task->comm, ret); |
958 | list_del(entry: &thread->node); |
959 | put_task_struct(t: thread->task); |
960 | kfree(objp: thread); |
961 | } |
962 | |
963 | /* terminate all transfers on specified channels */ |
964 | dmaengine_terminate_sync(chan: dtc->chan); |
965 | |
966 | kfree(objp: dtc); |
967 | } |
968 | |
969 | static int dmatest_add_threads(struct dmatest_info *info, |
970 | struct dmatest_chan *dtc, enum dma_transaction_type type) |
971 | { |
972 | struct dmatest_params *params = &info->params; |
973 | struct dmatest_thread *thread; |
974 | struct dma_chan *chan = dtc->chan; |
975 | char *op; |
976 | unsigned int i; |
977 | |
978 | if (type == DMA_MEMCPY) |
979 | op = "copy" ; |
980 | else if (type == DMA_MEMSET) |
981 | op = "set" ; |
982 | else if (type == DMA_XOR) |
983 | op = "xor" ; |
984 | else if (type == DMA_PQ) |
985 | op = "pq" ; |
986 | else |
987 | return -EINVAL; |
988 | |
989 | for (i = 0; i < params->threads_per_chan; i++) { |
990 | thread = kzalloc(size: sizeof(struct dmatest_thread), GFP_KERNEL); |
991 | if (!thread) { |
992 | pr_warn("No memory for %s-%s%u\n" , |
993 | dma_chan_name(chan), op, i); |
994 | break; |
995 | } |
996 | thread->info = info; |
997 | thread->chan = dtc->chan; |
998 | thread->type = type; |
999 | thread->test_done.wait = &thread->done_wait; |
1000 | init_waitqueue_head(&thread->done_wait); |
1001 | smp_wmb(); |
1002 | thread->task = kthread_create(dmatest_func, thread, "%s-%s%u" , |
1003 | dma_chan_name(chan), op, i); |
1004 | if (IS_ERR(ptr: thread->task)) { |
1005 | pr_warn("Failed to create thread %s-%s%u\n" , |
1006 | dma_chan_name(chan), op, i); |
1007 | kfree(objp: thread); |
1008 | break; |
1009 | } |
1010 | |
1011 | /* srcbuf and dstbuf are allocated by the thread itself */ |
1012 | get_task_struct(t: thread->task); |
1013 | list_add_tail(new: &thread->node, head: &dtc->threads); |
1014 | thread->pending = true; |
1015 | } |
1016 | |
1017 | return i; |
1018 | } |
1019 | |
1020 | static int dmatest_add_channel(struct dmatest_info *info, |
1021 | struct dma_chan *chan) |
1022 | { |
1023 | struct dmatest_chan *dtc; |
1024 | struct dma_device *dma_dev = chan->device; |
1025 | unsigned int thread_count = 0; |
1026 | int cnt; |
1027 | |
1028 | dtc = kmalloc(size: sizeof(struct dmatest_chan), GFP_KERNEL); |
1029 | if (!dtc) { |
1030 | pr_warn("No memory for %s\n" , dma_chan_name(chan)); |
1031 | return -ENOMEM; |
1032 | } |
1033 | |
1034 | dtc->chan = chan; |
1035 | INIT_LIST_HEAD(list: &dtc->threads); |
1036 | |
1037 | if (dma_has_cap(DMA_COMPLETION_NO_ORDER, dma_dev->cap_mask) && |
1038 | info->params.polled) { |
1039 | info->params.polled = false; |
1040 | pr_warn("DMA_COMPLETION_NO_ORDER, polled disabled\n" ); |
1041 | } |
1042 | |
1043 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { |
1044 | if (dmatest == 0) { |
1045 | cnt = dmatest_add_threads(info, dtc, type: DMA_MEMCPY); |
1046 | thread_count += cnt > 0 ? cnt : 0; |
1047 | } |
1048 | } |
1049 | |
1050 | if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { |
1051 | if (dmatest == 1) { |
1052 | cnt = dmatest_add_threads(info, dtc, type: DMA_MEMSET); |
1053 | thread_count += cnt > 0 ? cnt : 0; |
1054 | } |
1055 | } |
1056 | |
1057 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
1058 | cnt = dmatest_add_threads(info, dtc, type: DMA_XOR); |
1059 | thread_count += cnt > 0 ? cnt : 0; |
1060 | } |
1061 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { |
1062 | cnt = dmatest_add_threads(info, dtc, type: DMA_PQ); |
1063 | thread_count += cnt > 0 ? cnt : 0; |
1064 | } |
1065 | |
1066 | pr_info("Added %u threads using %s\n" , |
1067 | thread_count, dma_chan_name(chan)); |
1068 | |
1069 | list_add_tail(new: &dtc->node, head: &info->channels); |
1070 | info->nr_channels++; |
1071 | |
1072 | return 0; |
1073 | } |
1074 | |
1075 | static bool filter(struct dma_chan *chan, void *param) |
1076 | { |
1077 | return dmatest_match_channel(params: param, chan) && dmatest_match_device(params: param, device: chan->device); |
1078 | } |
1079 | |
1080 | static void request_channels(struct dmatest_info *info, |
1081 | enum dma_transaction_type type) |
1082 | { |
1083 | dma_cap_mask_t mask; |
1084 | |
1085 | dma_cap_zero(mask); |
1086 | dma_cap_set(type, mask); |
1087 | for (;;) { |
1088 | struct dmatest_params *params = &info->params; |
1089 | struct dma_chan *chan; |
1090 | |
1091 | chan = dma_request_channel(mask, filter, params); |
1092 | if (chan) { |
1093 | if (dmatest_add_channel(info, chan)) { |
1094 | dma_release_channel(chan); |
1095 | break; /* add_channel failed, punt */ |
1096 | } |
1097 | } else |
1098 | break; /* no more channels available */ |
1099 | if (params->max_channels && |
1100 | info->nr_channels >= params->max_channels) |
1101 | break; /* we have all we need */ |
1102 | } |
1103 | } |
1104 | |
1105 | static void add_threaded_test(struct dmatest_info *info) |
1106 | { |
1107 | struct dmatest_params *params = &info->params; |
1108 | |
1109 | /* Copy test parameters */ |
1110 | params->nobounce = nobounce; |
1111 | params->buf_size = test_buf_size; |
1112 | strscpy(params->channel, strim(test_channel), sizeof(params->channel)); |
1113 | strscpy(params->device, strim(test_device), sizeof(params->device)); |
1114 | params->threads_per_chan = threads_per_chan; |
1115 | params->max_channels = max_channels; |
1116 | params->iterations = iterations; |
1117 | params->xor_sources = xor_sources; |
1118 | params->pq_sources = pq_sources; |
1119 | params->timeout = timeout; |
1120 | params->noverify = noverify; |
1121 | params->norandom = norandom; |
1122 | params->alignment = alignment; |
1123 | params->transfer_size = transfer_size; |
1124 | params->polled = polled; |
1125 | |
1126 | request_channels(info, type: DMA_MEMCPY); |
1127 | request_channels(info, type: DMA_MEMSET); |
1128 | request_channels(info, type: DMA_XOR); |
1129 | request_channels(info, type: DMA_PQ); |
1130 | } |
1131 | |
1132 | static void run_pending_tests(struct dmatest_info *info) |
1133 | { |
1134 | struct dmatest_chan *dtc; |
1135 | unsigned int thread_count = 0; |
1136 | |
1137 | list_for_each_entry(dtc, &info->channels, node) { |
1138 | struct dmatest_thread *thread; |
1139 | |
1140 | thread_count = 0; |
1141 | list_for_each_entry(thread, &dtc->threads, node) { |
1142 | wake_up_process(tsk: thread->task); |
1143 | thread_count++; |
1144 | } |
1145 | pr_info("Started %u threads using %s\n" , |
1146 | thread_count, dma_chan_name(dtc->chan)); |
1147 | } |
1148 | } |
1149 | |
1150 | static void stop_threaded_test(struct dmatest_info *info) |
1151 | { |
1152 | struct dmatest_chan *dtc, *_dtc; |
1153 | struct dma_chan *chan; |
1154 | |
1155 | list_for_each_entry_safe(dtc, _dtc, &info->channels, node) { |
1156 | list_del(entry: &dtc->node); |
1157 | chan = dtc->chan; |
1158 | dmatest_cleanup_channel(dtc); |
1159 | pr_debug("dropped channel %s\n" , dma_chan_name(chan)); |
1160 | dma_release_channel(chan); |
1161 | } |
1162 | |
1163 | info->nr_channels = 0; |
1164 | } |
1165 | |
1166 | static void start_threaded_tests(struct dmatest_info *info) |
1167 | { |
1168 | /* we might be called early to set run=, defer running until all |
1169 | * parameters have been evaluated |
1170 | */ |
1171 | if (!info->did_init) |
1172 | return; |
1173 | |
1174 | run_pending_tests(info); |
1175 | } |
1176 | |
1177 | static int dmatest_run_get(char *val, const struct kernel_param *kp) |
1178 | { |
1179 | struct dmatest_info *info = &test_info; |
1180 | |
1181 | mutex_lock(&info->lock); |
1182 | if (is_threaded_test_run(info)) { |
1183 | dmatest_run = true; |
1184 | } else { |
1185 | if (!is_threaded_test_pending(info)) |
1186 | stop_threaded_test(info); |
1187 | dmatest_run = false; |
1188 | } |
1189 | mutex_unlock(lock: &info->lock); |
1190 | |
1191 | return param_get_bool(buffer: val, kp); |
1192 | } |
1193 | |
1194 | static int dmatest_run_set(const char *val, const struct kernel_param *kp) |
1195 | { |
1196 | struct dmatest_info *info = &test_info; |
1197 | int ret; |
1198 | |
1199 | mutex_lock(&info->lock); |
1200 | ret = param_set_bool(val, kp); |
1201 | if (ret) { |
1202 | mutex_unlock(lock: &info->lock); |
1203 | return ret; |
1204 | } else if (dmatest_run) { |
1205 | if (!is_threaded_test_pending(info)) { |
1206 | /* |
1207 | * We have nothing to run. This can be due to: |
1208 | */ |
1209 | ret = info->last_error; |
1210 | if (ret) { |
1211 | /* 1) Misconfiguration */ |
1212 | pr_err("Channel misconfigured, can't continue\n" ); |
1213 | mutex_unlock(lock: &info->lock); |
1214 | return ret; |
1215 | } else { |
1216 | /* 2) We rely on defaults */ |
1217 | pr_info("No channels configured, continue with any\n" ); |
1218 | if (!is_threaded_test_run(info)) |
1219 | stop_threaded_test(info); |
1220 | add_threaded_test(info); |
1221 | } |
1222 | } |
1223 | start_threaded_tests(info); |
1224 | } else { |
1225 | stop_threaded_test(info); |
1226 | } |
1227 | |
1228 | mutex_unlock(lock: &info->lock); |
1229 | |
1230 | return ret; |
1231 | } |
1232 | |
1233 | static int dmatest_chan_set(const char *val, const struct kernel_param *kp) |
1234 | { |
1235 | struct dmatest_info *info = &test_info; |
1236 | struct dmatest_chan *dtc; |
1237 | char chan_reset_val[20]; |
1238 | int ret; |
1239 | |
1240 | mutex_lock(&info->lock); |
1241 | ret = param_set_copystring(val, kp); |
1242 | if (ret) { |
1243 | mutex_unlock(lock: &info->lock); |
1244 | return ret; |
1245 | } |
1246 | /*Clear any previously run threads */ |
1247 | if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) |
1248 | stop_threaded_test(info); |
1249 | /* Reject channels that are already registered */ |
1250 | if (is_threaded_test_pending(info)) { |
1251 | list_for_each_entry(dtc, &info->channels, node) { |
1252 | if (strcmp(dma_chan_name(chan: dtc->chan), |
1253 | strim(test_channel)) == 0) { |
1254 | dtc = list_last_entry(&info->channels, |
1255 | struct dmatest_chan, |
1256 | node); |
1257 | strscpy(chan_reset_val, |
1258 | dma_chan_name(dtc->chan), |
1259 | sizeof(chan_reset_val)); |
1260 | ret = -EBUSY; |
1261 | goto add_chan_err; |
1262 | } |
1263 | } |
1264 | } |
1265 | |
1266 | add_threaded_test(info); |
1267 | |
1268 | /* Check if channel was added successfully */ |
1269 | if (!list_empty(head: &info->channels)) { |
1270 | /* |
1271 | * if new channel was not successfully added, revert the |
1272 | * "test_channel" string to the name of the last successfully |
1273 | * added channel. exception for when users issues empty string |
1274 | * to channel parameter. |
1275 | */ |
1276 | dtc = list_last_entry(&info->channels, struct dmatest_chan, node); |
1277 | if ((strcmp(dma_chan_name(chan: dtc->chan), strim(test_channel)) != 0) |
1278 | && (strcmp("" , strim(test_channel)) != 0)) { |
1279 | ret = -EINVAL; |
1280 | strscpy(chan_reset_val, dma_chan_name(dtc->chan), |
1281 | sizeof(chan_reset_val)); |
1282 | goto add_chan_err; |
1283 | } |
1284 | |
1285 | } else { |
1286 | /* Clear test_channel if no channels were added successfully */ |
1287 | strscpy(chan_reset_val, "" , sizeof(chan_reset_val)); |
1288 | ret = -EBUSY; |
1289 | goto add_chan_err; |
1290 | } |
1291 | |
1292 | info->last_error = ret; |
1293 | mutex_unlock(lock: &info->lock); |
1294 | |
1295 | return ret; |
1296 | |
1297 | add_chan_err: |
1298 | param_set_copystring(val: chan_reset_val, kp); |
1299 | info->last_error = ret; |
1300 | mutex_unlock(lock: &info->lock); |
1301 | |
1302 | return ret; |
1303 | } |
1304 | |
1305 | static int dmatest_chan_get(char *val, const struct kernel_param *kp) |
1306 | { |
1307 | struct dmatest_info *info = &test_info; |
1308 | |
1309 | mutex_lock(&info->lock); |
1310 | if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) { |
1311 | stop_threaded_test(info); |
1312 | strscpy(test_channel, "" , sizeof(test_channel)); |
1313 | } |
1314 | mutex_unlock(lock: &info->lock); |
1315 | |
1316 | return param_get_string(buffer: val, kp); |
1317 | } |
1318 | |
1319 | static int dmatest_test_list_get(char *val, const struct kernel_param *kp) |
1320 | { |
1321 | struct dmatest_info *info = &test_info; |
1322 | struct dmatest_chan *dtc; |
1323 | unsigned int thread_count = 0; |
1324 | |
1325 | list_for_each_entry(dtc, &info->channels, node) { |
1326 | struct dmatest_thread *thread; |
1327 | |
1328 | thread_count = 0; |
1329 | list_for_each_entry(thread, &dtc->threads, node) { |
1330 | thread_count++; |
1331 | } |
1332 | pr_info("%u threads using %s\n" , |
1333 | thread_count, dma_chan_name(dtc->chan)); |
1334 | } |
1335 | |
1336 | return 0; |
1337 | } |
1338 | |
1339 | static int __init dmatest_init(void) |
1340 | { |
1341 | struct dmatest_info *info = &test_info; |
1342 | struct dmatest_params *params = &info->params; |
1343 | |
1344 | if (dmatest_run) { |
1345 | mutex_lock(&info->lock); |
1346 | add_threaded_test(info); |
1347 | run_pending_tests(info); |
1348 | mutex_unlock(lock: &info->lock); |
1349 | } |
1350 | |
1351 | if (params->iterations && wait) |
1352 | wait_event(thread_wait, !is_threaded_test_run(info)); |
1353 | |
1354 | /* module parameters are stable, inittime tests are started, |
1355 | * let userspace take over 'run' control |
1356 | */ |
1357 | info->did_init = true; |
1358 | |
1359 | return 0; |
1360 | } |
1361 | /* when compiled-in wait for drivers to load first */ |
1362 | late_initcall(dmatest_init); |
1363 | |
1364 | static void __exit dmatest_exit(void) |
1365 | { |
1366 | struct dmatest_info *info = &test_info; |
1367 | |
1368 | mutex_lock(&info->lock); |
1369 | stop_threaded_test(info); |
1370 | mutex_unlock(lock: &info->lock); |
1371 | } |
1372 | module_exit(dmatest_exit); |
1373 | |
1374 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)" ); |
1375 | MODULE_LICENSE("GPL v2" ); |
1376 | |