1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Keystone accumulator queue manager |
4 | * |
5 | * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com |
6 | * Author: Sandeep Nair <sandeep_n@ti.com> |
7 | * Cyril Chemparathy <cyril@ti.com> |
8 | * Santosh Shilimkar <santosh.shilimkar@ti.com> |
9 | */ |
10 | |
11 | #include <linux/dma-mapping.h> |
12 | #include <linux/io.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/module.h> |
15 | #include <linux/of_address.h> |
16 | #include <linux/soc/ti/knav_qmss.h> |
17 | |
18 | #include "knav_qmss.h" |
19 | |
20 | #define knav_range_offset_to_inst(kdev, range, q) \ |
21 | (range->queue_base_inst + (q << kdev->inst_shift)) |
22 | |
23 | static void __knav_acc_notify(struct knav_range_info *range, |
24 | struct knav_acc_channel *acc) |
25 | { |
26 | struct knav_device *kdev = range->kdev; |
27 | struct knav_queue_inst *inst; |
28 | int range_base, queue; |
29 | |
30 | range_base = kdev->base_id + range->queue_base; |
31 | |
32 | if (range->flags & RANGE_MULTI_QUEUE) { |
33 | for (queue = 0; queue < range->num_queues; queue++) { |
34 | inst = knav_range_offset_to_inst(kdev, range, |
35 | queue); |
36 | if (inst->notify_needed) { |
37 | inst->notify_needed = 0; |
38 | dev_dbg(kdev->dev, "acc-irq: notifying %d\n" , |
39 | range_base + queue); |
40 | knav_queue_notify(inst); |
41 | } |
42 | } |
43 | } else { |
44 | queue = acc->channel - range->acc_info.start_channel; |
45 | inst = knav_range_offset_to_inst(kdev, range, queue); |
46 | dev_dbg(kdev->dev, "acc-irq: notifying %d\n" , |
47 | range_base + queue); |
48 | knav_queue_notify(inst); |
49 | } |
50 | } |
51 | |
52 | static int knav_acc_set_notify(struct knav_range_info *range, |
53 | struct knav_queue_inst *kq, |
54 | bool enabled) |
55 | { |
56 | struct knav_pdsp_info *pdsp = range->acc_info.pdsp; |
57 | struct knav_device *kdev = range->kdev; |
58 | u32 mask, offset; |
59 | |
60 | /* |
61 | * when enabling, we need to re-trigger an interrupt if we |
62 | * have descriptors pending |
63 | */ |
64 | if (!enabled || atomic_read(v: &kq->desc_count) <= 0) |
65 | return 0; |
66 | |
67 | kq->notify_needed = 1; |
68 | atomic_inc(v: &kq->acc->retrigger_count); |
69 | mask = BIT(kq->acc->channel % 32); |
70 | offset = ACC_INTD_OFFSET_STATUS(kq->acc->channel); |
71 | dev_dbg(kdev->dev, "setup-notify: re-triggering irq for %s\n" , |
72 | kq->acc->name); |
73 | writel_relaxed(mask, pdsp->intd + offset); |
74 | return 0; |
75 | } |
76 | |
77 | static irqreturn_t knav_acc_int_handler(int irq, void *_instdata) |
78 | { |
79 | struct knav_acc_channel *acc; |
80 | struct knav_queue_inst *kq = NULL; |
81 | struct knav_range_info *range; |
82 | struct knav_pdsp_info *pdsp; |
83 | struct knav_acc_info *info; |
84 | struct knav_device *kdev; |
85 | |
86 | u32 *list, *list_cpu, val, idx, notifies; |
87 | int range_base, channel, queue = 0; |
88 | dma_addr_t list_dma; |
89 | |
90 | range = _instdata; |
91 | info = &range->acc_info; |
92 | kdev = range->kdev; |
93 | pdsp = range->acc_info.pdsp; |
94 | acc = range->acc; |
95 | |
96 | range_base = kdev->base_id + range->queue_base; |
97 | if ((range->flags & RANGE_MULTI_QUEUE) == 0) { |
98 | for (queue = 0; queue < range->num_irqs; queue++) |
99 | if (range->irqs[queue].irq == irq) |
100 | break; |
101 | kq = knav_range_offset_to_inst(kdev, range, queue); |
102 | acc += queue; |
103 | } |
104 | |
105 | channel = acc->channel; |
106 | list_dma = acc->list_dma[acc->list_index]; |
107 | list_cpu = acc->list_cpu[acc->list_index]; |
108 | dev_dbg(kdev->dev, "acc-irq: channel %d, list %d, virt %p, dma %pad\n" , |
109 | channel, acc->list_index, list_cpu, &list_dma); |
110 | if (atomic_read(v: &acc->retrigger_count)) { |
111 | atomic_dec(v: &acc->retrigger_count); |
112 | __knav_acc_notify(range, acc); |
113 | writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel)); |
114 | /* ack the interrupt */ |
115 | writel_relaxed(ACC_CHANNEL_INT_BASE + channel, |
116 | pdsp->intd + ACC_INTD_OFFSET_EOI); |
117 | |
118 | return IRQ_HANDLED; |
119 | } |
120 | |
121 | notifies = readl_relaxed(pdsp->intd + ACC_INTD_OFFSET_COUNT(channel)); |
122 | WARN_ON(!notifies); |
123 | dma_sync_single_for_cpu(dev: kdev->dev, addr: list_dma, size: info->list_size, |
124 | dir: DMA_FROM_DEVICE); |
125 | |
126 | for (list = list_cpu; list < list_cpu + (info->list_size / sizeof(u32)); |
127 | list += ACC_LIST_ENTRY_WORDS) { |
128 | if (ACC_LIST_ENTRY_WORDS == 1) { |
129 | dev_dbg(kdev->dev, |
130 | "acc-irq: list %d, entry @%p, %08x\n" , |
131 | acc->list_index, list, list[0]); |
132 | } else if (ACC_LIST_ENTRY_WORDS == 2) { |
133 | dev_dbg(kdev->dev, |
134 | "acc-irq: list %d, entry @%p, %08x %08x\n" , |
135 | acc->list_index, list, list[0], list[1]); |
136 | } else if (ACC_LIST_ENTRY_WORDS == 4) { |
137 | dev_dbg(kdev->dev, |
138 | "acc-irq: list %d, entry @%p, %08x %08x %08x %08x\n" , |
139 | acc->list_index, list, list[0], list[1], |
140 | list[2], list[3]); |
141 | } |
142 | |
143 | val = list[ACC_LIST_ENTRY_DESC_IDX]; |
144 | if (!val) |
145 | break; |
146 | |
147 | if (range->flags & RANGE_MULTI_QUEUE) { |
148 | queue = list[ACC_LIST_ENTRY_QUEUE_IDX] >> 16; |
149 | if (queue < range_base || |
150 | queue >= range_base + range->num_queues) { |
151 | dev_err(kdev->dev, |
152 | "bad queue %d, expecting %d-%d\n" , |
153 | queue, range_base, |
154 | range_base + range->num_queues); |
155 | break; |
156 | } |
157 | queue -= range_base; |
158 | kq = knav_range_offset_to_inst(kdev, range, |
159 | queue); |
160 | } |
161 | |
162 | if (atomic_inc_return(v: &kq->desc_count) >= ACC_DESCS_MAX) { |
163 | atomic_dec(v: &kq->desc_count); |
164 | dev_err(kdev->dev, |
165 | "acc-irq: queue %d full, entry dropped\n" , |
166 | queue + range_base); |
167 | continue; |
168 | } |
169 | |
170 | idx = atomic_inc_return(v: &kq->desc_tail) & ACC_DESCS_MASK; |
171 | kq->descs[idx] = val; |
172 | kq->notify_needed = 1; |
173 | dev_dbg(kdev->dev, "acc-irq: enqueue %08x at %d, queue %d\n" , |
174 | val, idx, queue + range_base); |
175 | } |
176 | |
177 | __knav_acc_notify(range, acc); |
178 | memset(list_cpu, 0, info->list_size); |
179 | dma_sync_single_for_device(dev: kdev->dev, addr: list_dma, size: info->list_size, |
180 | dir: DMA_TO_DEVICE); |
181 | |
182 | /* flip to the other list */ |
183 | acc->list_index ^= 1; |
184 | |
185 | /* reset the interrupt counter */ |
186 | writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel)); |
187 | |
188 | /* ack the interrupt */ |
189 | writel_relaxed(ACC_CHANNEL_INT_BASE + channel, |
190 | pdsp->intd + ACC_INTD_OFFSET_EOI); |
191 | |
192 | return IRQ_HANDLED; |
193 | } |
194 | |
195 | static int knav_range_setup_acc_irq(struct knav_range_info *range, |
196 | int queue, bool enabled) |
197 | { |
198 | struct knav_device *kdev = range->kdev; |
199 | struct knav_acc_channel *acc; |
200 | struct cpumask *cpu_mask; |
201 | int ret = 0, irq; |
202 | u32 old, new; |
203 | |
204 | if (range->flags & RANGE_MULTI_QUEUE) { |
205 | acc = range->acc; |
206 | irq = range->irqs[0].irq; |
207 | cpu_mask = range->irqs[0].cpu_mask; |
208 | } else { |
209 | acc = range->acc + queue; |
210 | irq = range->irqs[queue].irq; |
211 | cpu_mask = range->irqs[queue].cpu_mask; |
212 | } |
213 | |
214 | old = acc->open_mask; |
215 | if (enabled) |
216 | new = old | BIT(queue); |
217 | else |
218 | new = old & ~BIT(queue); |
219 | acc->open_mask = new; |
220 | |
221 | dev_dbg(kdev->dev, |
222 | "setup-acc-irq: open mask old %08x, new %08x, channel %s\n" , |
223 | old, new, acc->name); |
224 | |
225 | if (likely(new == old)) |
226 | return 0; |
227 | |
228 | if (new && !old) { |
229 | dev_dbg(kdev->dev, |
230 | "setup-acc-irq: requesting %s for channel %s\n" , |
231 | acc->name, acc->name); |
232 | ret = request_irq(irq, handler: knav_acc_int_handler, flags: 0, name: acc->name, |
233 | dev: range); |
234 | if (!ret && cpu_mask) { |
235 | ret = irq_set_affinity_hint(irq, m: cpu_mask); |
236 | if (ret) { |
237 | dev_warn(range->kdev->dev, |
238 | "Failed to set IRQ affinity\n" ); |
239 | return ret; |
240 | } |
241 | } |
242 | } |
243 | |
244 | if (old && !new) { |
245 | dev_dbg(kdev->dev, "setup-acc-irq: freeing %s for channel %s\n" , |
246 | acc->name, acc->name); |
247 | ret = irq_set_affinity_hint(irq, NULL); |
248 | if (ret) |
249 | dev_warn(range->kdev->dev, |
250 | "Failed to set IRQ affinity\n" ); |
251 | free_irq(irq, range); |
252 | } |
253 | |
254 | return ret; |
255 | } |
256 | |
257 | static const char *knav_acc_result_str(enum knav_acc_result result) |
258 | { |
259 | static const char * const result_str[] = { |
260 | [ACC_RET_IDLE] = "idle" , |
261 | [ACC_RET_SUCCESS] = "success" , |
262 | [ACC_RET_INVALID_COMMAND] = "invalid command" , |
263 | [ACC_RET_INVALID_CHANNEL] = "invalid channel" , |
264 | [ACC_RET_INACTIVE_CHANNEL] = "inactive channel" , |
265 | [ACC_RET_ACTIVE_CHANNEL] = "active channel" , |
266 | [ACC_RET_INVALID_QUEUE] = "invalid queue" , |
267 | [ACC_RET_INVALID_RET] = "invalid return code" , |
268 | }; |
269 | |
270 | if (result >= ARRAY_SIZE(result_str)) |
271 | return result_str[ACC_RET_INVALID_RET]; |
272 | else |
273 | return result_str[result]; |
274 | } |
275 | |
276 | static enum knav_acc_result |
277 | knav_acc_write(struct knav_device *kdev, struct knav_pdsp_info *pdsp, |
278 | struct knav_reg_acc_command *cmd) |
279 | { |
280 | u32 result; |
281 | |
282 | dev_dbg(kdev->dev, "acc command %08x %08x %08x %08x %08x\n" , |
283 | cmd->command, cmd->queue_mask, cmd->list_dma, |
284 | cmd->queue_num, cmd->timer_config); |
285 | |
286 | writel_relaxed(cmd->timer_config, &pdsp->acc_command->timer_config); |
287 | writel_relaxed(cmd->queue_num, &pdsp->acc_command->queue_num); |
288 | writel_relaxed(cmd->list_dma, &pdsp->acc_command->list_dma); |
289 | writel_relaxed(cmd->queue_mask, &pdsp->acc_command->queue_mask); |
290 | writel_relaxed(cmd->command, &pdsp->acc_command->command); |
291 | |
292 | /* wait for the command to clear */ |
293 | do { |
294 | result = readl_relaxed(&pdsp->acc_command->command); |
295 | } while ((result >> 8) & 0xff); |
296 | |
297 | return (result >> 24) & 0xff; |
298 | } |
299 | |
300 | static void knav_acc_setup_cmd(struct knav_device *kdev, |
301 | struct knav_range_info *range, |
302 | struct knav_reg_acc_command *cmd, |
303 | int queue) |
304 | { |
305 | struct knav_acc_info *info = &range->acc_info; |
306 | struct knav_acc_channel *acc; |
307 | int queue_base; |
308 | u32 queue_mask; |
309 | |
310 | if (range->flags & RANGE_MULTI_QUEUE) { |
311 | acc = range->acc; |
312 | queue_base = range->queue_base; |
313 | queue_mask = BIT(range->num_queues) - 1; |
314 | } else { |
315 | acc = range->acc + queue; |
316 | queue_base = range->queue_base + queue; |
317 | queue_mask = 0; |
318 | } |
319 | |
320 | memset(cmd, 0, sizeof(*cmd)); |
321 | cmd->command = acc->channel; |
322 | cmd->queue_mask = queue_mask; |
323 | cmd->list_dma = (u32)acc->list_dma[0]; |
324 | cmd->queue_num = info->list_entries << 16; |
325 | cmd->queue_num |= queue_base; |
326 | |
327 | cmd->timer_config = ACC_LIST_ENTRY_TYPE << 18; |
328 | if (range->flags & RANGE_MULTI_QUEUE) |
329 | cmd->timer_config |= ACC_CFG_MULTI_QUEUE; |
330 | cmd->timer_config |= info->pacing_mode << 16; |
331 | cmd->timer_config |= info->timer_count; |
332 | } |
333 | |
334 | static void knav_acc_stop(struct knav_device *kdev, |
335 | struct knav_range_info *range, |
336 | int queue) |
337 | { |
338 | struct knav_reg_acc_command cmd; |
339 | struct knav_acc_channel *acc; |
340 | enum knav_acc_result result; |
341 | |
342 | acc = range->acc + queue; |
343 | |
344 | knav_acc_setup_cmd(kdev, range, cmd: &cmd, queue); |
345 | cmd.command |= ACC_CMD_DISABLE_CHANNEL << 8; |
346 | result = knav_acc_write(kdev, pdsp: range->acc_info.pdsp, cmd: &cmd); |
347 | |
348 | dev_dbg(kdev->dev, "stopped acc channel %s, result %s\n" , |
349 | acc->name, knav_acc_result_str(result)); |
350 | } |
351 | |
352 | static enum knav_acc_result knav_acc_start(struct knav_device *kdev, |
353 | struct knav_range_info *range, |
354 | int queue) |
355 | { |
356 | struct knav_reg_acc_command cmd; |
357 | struct knav_acc_channel *acc; |
358 | enum knav_acc_result result; |
359 | |
360 | acc = range->acc + queue; |
361 | |
362 | knav_acc_setup_cmd(kdev, range, cmd: &cmd, queue); |
363 | cmd.command |= ACC_CMD_ENABLE_CHANNEL << 8; |
364 | result = knav_acc_write(kdev, pdsp: range->acc_info.pdsp, cmd: &cmd); |
365 | |
366 | dev_dbg(kdev->dev, "started acc channel %s, result %s\n" , |
367 | acc->name, knav_acc_result_str(result)); |
368 | |
369 | return result; |
370 | } |
371 | |
372 | static int knav_acc_init_range(struct knav_range_info *range) |
373 | { |
374 | struct knav_device *kdev = range->kdev; |
375 | struct knav_acc_channel *acc; |
376 | enum knav_acc_result result; |
377 | int queue; |
378 | |
379 | for (queue = 0; queue < range->num_queues; queue++) { |
380 | acc = range->acc + queue; |
381 | |
382 | knav_acc_stop(kdev, range, queue); |
383 | acc->list_index = 0; |
384 | result = knav_acc_start(kdev, range, queue); |
385 | |
386 | if (result != ACC_RET_SUCCESS) |
387 | return -EIO; |
388 | |
389 | if (range->flags & RANGE_MULTI_QUEUE) |
390 | return 0; |
391 | } |
392 | return 0; |
393 | } |
394 | |
395 | static int knav_acc_init_queue(struct knav_range_info *range, |
396 | struct knav_queue_inst *kq) |
397 | { |
398 | unsigned id = kq->id - range->queue_base; |
399 | |
400 | kq->descs = devm_kcalloc(dev: range->kdev->dev, |
401 | ACC_DESCS_MAX, size: sizeof(u32), GFP_KERNEL); |
402 | if (!kq->descs) |
403 | return -ENOMEM; |
404 | |
405 | kq->acc = range->acc; |
406 | if ((range->flags & RANGE_MULTI_QUEUE) == 0) |
407 | kq->acc += id; |
408 | return 0; |
409 | } |
410 | |
411 | static int knav_acc_open_queue(struct knav_range_info *range, |
412 | struct knav_queue_inst *inst, unsigned flags) |
413 | { |
414 | unsigned id = inst->id - range->queue_base; |
415 | |
416 | return knav_range_setup_acc_irq(range, queue: id, enabled: true); |
417 | } |
418 | |
419 | static int knav_acc_close_queue(struct knav_range_info *range, |
420 | struct knav_queue_inst *inst) |
421 | { |
422 | unsigned id = inst->id - range->queue_base; |
423 | |
424 | return knav_range_setup_acc_irq(range, queue: id, enabled: false); |
425 | } |
426 | |
427 | static int knav_acc_free_range(struct knav_range_info *range) |
428 | { |
429 | struct knav_device *kdev = range->kdev; |
430 | struct knav_acc_channel *acc; |
431 | struct knav_acc_info *info; |
432 | int channel, channels; |
433 | |
434 | info = &range->acc_info; |
435 | |
436 | if (range->flags & RANGE_MULTI_QUEUE) |
437 | channels = 1; |
438 | else |
439 | channels = range->num_queues; |
440 | |
441 | for (channel = 0; channel < channels; channel++) { |
442 | acc = range->acc + channel; |
443 | if (!acc->list_cpu[0]) |
444 | continue; |
445 | dma_unmap_single(kdev->dev, acc->list_dma[0], |
446 | info->mem_size, DMA_BIDIRECTIONAL); |
447 | free_pages_exact(virt: acc->list_cpu[0], size: info->mem_size); |
448 | } |
449 | devm_kfree(dev: range->kdev->dev, p: range->acc); |
450 | return 0; |
451 | } |
452 | |
453 | static struct knav_range_ops knav_acc_range_ops = { |
454 | .set_notify = knav_acc_set_notify, |
455 | .init_queue = knav_acc_init_queue, |
456 | .open_queue = knav_acc_open_queue, |
457 | .close_queue = knav_acc_close_queue, |
458 | .init_range = knav_acc_init_range, |
459 | .free_range = knav_acc_free_range, |
460 | }; |
461 | |
462 | /** |
463 | * knav_init_acc_range: Initialise accumulator ranges |
464 | * |
465 | * @kdev: qmss device |
466 | * @node: device node |
467 | * @range: qmms range information |
468 | * |
469 | * Return 0 on success or error |
470 | */ |
471 | int knav_init_acc_range(struct knav_device *kdev, |
472 | struct device_node *node, |
473 | struct knav_range_info *range) |
474 | { |
475 | struct knav_acc_channel *acc; |
476 | struct knav_pdsp_info *pdsp; |
477 | struct knav_acc_info *info; |
478 | int ret, channel, channels; |
479 | int list_size, mem_size; |
480 | dma_addr_t list_dma; |
481 | void *list_mem; |
482 | u32 config[5]; |
483 | |
484 | range->flags |= RANGE_HAS_ACCUMULATOR; |
485 | info = &range->acc_info; |
486 | |
487 | ret = of_property_read_u32_array(np: node, propname: "accumulator" , out_values: config, sz: 5); |
488 | if (ret) |
489 | return ret; |
490 | |
491 | info->pdsp_id = config[0]; |
492 | info->start_channel = config[1]; |
493 | info->list_entries = config[2]; |
494 | info->pacing_mode = config[3]; |
495 | info->timer_count = config[4] / ACC_DEFAULT_PERIOD; |
496 | |
497 | if (info->start_channel > ACC_MAX_CHANNEL) { |
498 | dev_err(kdev->dev, "channel %d invalid for range %s\n" , |
499 | info->start_channel, range->name); |
500 | return -EINVAL; |
501 | } |
502 | |
503 | if (info->pacing_mode > 3) { |
504 | dev_err(kdev->dev, "pacing mode %d invalid for range %s\n" , |
505 | info->pacing_mode, range->name); |
506 | return -EINVAL; |
507 | } |
508 | |
509 | pdsp = knav_find_pdsp(kdev, pdsp_id: info->pdsp_id); |
510 | if (!pdsp) { |
511 | dev_err(kdev->dev, "pdsp id %d not found for range %s\n" , |
512 | info->pdsp_id, range->name); |
513 | return -EINVAL; |
514 | } |
515 | |
516 | if (!pdsp->started) { |
517 | dev_err(kdev->dev, "pdsp id %d not started for range %s\n" , |
518 | info->pdsp_id, range->name); |
519 | return -ENODEV; |
520 | } |
521 | |
522 | info->pdsp = pdsp; |
523 | channels = range->num_queues; |
524 | if (of_property_read_bool(np: node, propname: "multi-queue" )) { |
525 | range->flags |= RANGE_MULTI_QUEUE; |
526 | channels = 1; |
527 | if (range->queue_base & (32 - 1)) { |
528 | dev_err(kdev->dev, |
529 | "misaligned multi-queue accumulator range %s\n" , |
530 | range->name); |
531 | return -EINVAL; |
532 | } |
533 | if (range->num_queues > 32) { |
534 | dev_err(kdev->dev, |
535 | "too many queues in accumulator range %s\n" , |
536 | range->name); |
537 | return -EINVAL; |
538 | } |
539 | } |
540 | |
541 | /* figure out list size */ |
542 | list_size = info->list_entries; |
543 | list_size *= ACC_LIST_ENTRY_WORDS * sizeof(u32); |
544 | info->list_size = list_size; |
545 | mem_size = PAGE_ALIGN(list_size * 2); |
546 | info->mem_size = mem_size; |
547 | range->acc = devm_kcalloc(dev: kdev->dev, n: channels, size: sizeof(*range->acc), |
548 | GFP_KERNEL); |
549 | if (!range->acc) |
550 | return -ENOMEM; |
551 | |
552 | for (channel = 0; channel < channels; channel++) { |
553 | acc = range->acc + channel; |
554 | acc->channel = info->start_channel + channel; |
555 | |
556 | /* allocate memory for the two lists */ |
557 | list_mem = alloc_pages_exact(size: mem_size, GFP_KERNEL | GFP_DMA); |
558 | if (!list_mem) |
559 | return -ENOMEM; |
560 | |
561 | list_dma = dma_map_single(kdev->dev, list_mem, mem_size, |
562 | DMA_BIDIRECTIONAL); |
563 | if (dma_mapping_error(dev: kdev->dev, dma_addr: list_dma)) { |
564 | free_pages_exact(virt: list_mem, size: mem_size); |
565 | return -ENOMEM; |
566 | } |
567 | |
568 | memset(list_mem, 0, mem_size); |
569 | dma_sync_single_for_device(dev: kdev->dev, addr: list_dma, size: mem_size, |
570 | dir: DMA_TO_DEVICE); |
571 | scnprintf(buf: acc->name, size: sizeof(acc->name), fmt: "hwqueue-acc-%d" , |
572 | acc->channel); |
573 | acc->list_cpu[0] = list_mem; |
574 | acc->list_cpu[1] = list_mem + list_size; |
575 | acc->list_dma[0] = list_dma; |
576 | acc->list_dma[1] = list_dma + list_size; |
577 | dev_dbg(kdev->dev, "%s: channel %d, dma %pad, virt %8p\n" , |
578 | acc->name, acc->channel, &list_dma, list_mem); |
579 | } |
580 | |
581 | range->ops = &knav_acc_range_ops; |
582 | return 0; |
583 | } |
584 | EXPORT_SYMBOL_GPL(knav_init_acc_range); |
585 | |