1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Keystone Queue Manager subsystem driver |
4 | * |
5 | * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com |
6 | * Authors: Sandeep Nair <sandeep_n@ti.com> |
7 | * Cyril Chemparathy <cyril@ti.com> |
8 | * Santosh Shilimkar <santosh.shilimkar@ti.com> |
9 | */ |
10 | |
11 | #include <linux/debugfs.h> |
12 | #include <linux/dma-mapping.h> |
13 | #include <linux/firmware.h> |
14 | #include <linux/interrupt.h> |
15 | #include <linux/io.h> |
16 | #include <linux/module.h> |
17 | #include <linux/of.h> |
18 | #include <linux/of_address.h> |
19 | #include <linux/of_irq.h> |
20 | #include <linux/platform_device.h> |
21 | #include <linux/pm_runtime.h> |
22 | #include <linux/property.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/soc/ti/knav_qmss.h> |
25 | |
26 | #include "knav_qmss.h" |
27 | |
28 | static struct knav_device *kdev; |
29 | static DEFINE_MUTEX(knav_dev_lock); |
30 | #define knav_dev_lock_held() \ |
31 | lockdep_is_held(&knav_dev_lock) |
32 | |
33 | /* Queue manager register indices in DTS */ |
34 | #define KNAV_QUEUE_PEEK_REG_INDEX 0 |
35 | #define KNAV_QUEUE_STATUS_REG_INDEX 1 |
36 | #define KNAV_QUEUE_CONFIG_REG_INDEX 2 |
37 | #define KNAV_QUEUE_REGION_REG_INDEX 3 |
38 | #define KNAV_QUEUE_PUSH_REG_INDEX 4 |
39 | #define KNAV_QUEUE_POP_REG_INDEX 5 |
40 | |
41 | /* Queue manager register indices in DTS for QMSS in K2G NAVSS. |
42 | * There are no status and vbusm push registers on this version |
43 | * of QMSS. Push registers are same as pop, So all indices above 1 |
44 | * are to be re-defined |
45 | */ |
46 | #define KNAV_L_QUEUE_CONFIG_REG_INDEX 1 |
47 | #define KNAV_L_QUEUE_REGION_REG_INDEX 2 |
48 | #define KNAV_L_QUEUE_PUSH_REG_INDEX 3 |
49 | |
50 | /* PDSP register indices in DTS */ |
51 | #define KNAV_QUEUE_PDSP_IRAM_REG_INDEX 0 |
52 | #define KNAV_QUEUE_PDSP_REGS_REG_INDEX 1 |
53 | #define KNAV_QUEUE_PDSP_INTD_REG_INDEX 2 |
54 | #define KNAV_QUEUE_PDSP_CMD_REG_INDEX 3 |
55 | |
56 | #define knav_queue_idx_to_inst(kdev, idx) \ |
57 | (kdev->instances + (idx << kdev->inst_shift)) |
58 | |
59 | #define for_each_handle_rcu(qh, inst) \ |
60 | list_for_each_entry_rcu(qh, &inst->handles, list, \ |
61 | knav_dev_lock_held()) |
62 | |
63 | #define for_each_instance(idx, inst, kdev) \ |
64 | for (idx = 0, inst = kdev->instances; \ |
65 | idx < (kdev)->num_queues_in_use; \ |
66 | idx++, inst = knav_queue_idx_to_inst(kdev, idx)) |
67 | |
68 | /* All firmware file names end up here. List the firmware file names below. |
69 | * Newest followed by older ones. Search is done from start of the array |
70 | * until a firmware file is found. |
71 | */ |
72 | static const char * const knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin" }; |
73 | |
74 | static bool device_ready; |
75 | bool knav_qmss_device_ready(void) |
76 | { |
77 | return device_ready; |
78 | } |
79 | EXPORT_SYMBOL_GPL(knav_qmss_device_ready); |
80 | |
81 | /** |
82 | * knav_queue_notify: qmss queue notfier call |
83 | * |
84 | * @inst: - qmss queue instance like accumulator |
85 | */ |
86 | void knav_queue_notify(struct knav_queue_inst *inst) |
87 | { |
88 | struct knav_queue *qh; |
89 | |
90 | if (!inst) |
91 | return; |
92 | |
93 | rcu_read_lock(); |
94 | for_each_handle_rcu(qh, inst) { |
95 | if (atomic_read(v: &qh->notifier_enabled) <= 0) |
96 | continue; |
97 | if (WARN_ON(!qh->notifier_fn)) |
98 | continue; |
99 | this_cpu_inc(qh->stats->notifies); |
100 | qh->notifier_fn(qh->notifier_fn_arg); |
101 | } |
102 | rcu_read_unlock(); |
103 | } |
104 | EXPORT_SYMBOL_GPL(knav_queue_notify); |
105 | |
106 | static irqreturn_t knav_queue_int_handler(int irq, void *_instdata) |
107 | { |
108 | struct knav_queue_inst *inst = _instdata; |
109 | |
110 | knav_queue_notify(inst); |
111 | return IRQ_HANDLED; |
112 | } |
113 | |
114 | static int knav_queue_setup_irq(struct knav_range_info *range, |
115 | struct knav_queue_inst *inst) |
116 | { |
117 | unsigned queue = inst->id - range->queue_base; |
118 | int ret = 0, irq; |
119 | |
120 | if (range->flags & RANGE_HAS_IRQ) { |
121 | irq = range->irqs[queue].irq; |
122 | ret = request_irq(irq, handler: knav_queue_int_handler, flags: 0, |
123 | name: inst->irq_name, dev: inst); |
124 | if (ret) |
125 | return ret; |
126 | disable_irq(irq); |
127 | if (range->irqs[queue].cpu_mask) { |
128 | ret = irq_set_affinity_hint(irq, m: range->irqs[queue].cpu_mask); |
129 | if (ret) { |
130 | dev_warn(range->kdev->dev, |
131 | "Failed to set IRQ affinity\n" ); |
132 | return ret; |
133 | } |
134 | } |
135 | } |
136 | return ret; |
137 | } |
138 | |
139 | static void knav_queue_free_irq(struct knav_queue_inst *inst) |
140 | { |
141 | struct knav_range_info *range = inst->range; |
142 | unsigned queue = inst->id - inst->range->queue_base; |
143 | int irq; |
144 | |
145 | if (range->flags & RANGE_HAS_IRQ) { |
146 | irq = range->irqs[queue].irq; |
147 | irq_set_affinity_hint(irq, NULL); |
148 | free_irq(irq, inst); |
149 | } |
150 | } |
151 | |
152 | static inline bool knav_queue_is_busy(struct knav_queue_inst *inst) |
153 | { |
154 | return !list_empty(head: &inst->handles); |
155 | } |
156 | |
157 | static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst) |
158 | { |
159 | return inst->range->flags & RANGE_RESERVED; |
160 | } |
161 | |
162 | static inline bool knav_queue_is_shared(struct knav_queue_inst *inst) |
163 | { |
164 | struct knav_queue *tmp; |
165 | |
166 | rcu_read_lock(); |
167 | for_each_handle_rcu(tmp, inst) { |
168 | if (tmp->flags & KNAV_QUEUE_SHARED) { |
169 | rcu_read_unlock(); |
170 | return true; |
171 | } |
172 | } |
173 | rcu_read_unlock(); |
174 | return false; |
175 | } |
176 | |
177 | static inline bool knav_queue_match_type(struct knav_queue_inst *inst, |
178 | unsigned type) |
179 | { |
180 | if ((type == KNAV_QUEUE_QPEND) && |
181 | (inst->range->flags & RANGE_HAS_IRQ)) { |
182 | return true; |
183 | } else if ((type == KNAV_QUEUE_ACC) && |
184 | (inst->range->flags & RANGE_HAS_ACCUMULATOR)) { |
185 | return true; |
186 | } else if ((type == KNAV_QUEUE_GP) && |
187 | !(inst->range->flags & |
188 | (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) { |
189 | return true; |
190 | } |
191 | return false; |
192 | } |
193 | |
194 | static inline struct knav_queue_inst * |
195 | knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id) |
196 | { |
197 | struct knav_queue_inst *inst; |
198 | int idx; |
199 | |
200 | for_each_instance(idx, inst, kdev) { |
201 | if (inst->id == id) |
202 | return inst; |
203 | } |
204 | return NULL; |
205 | } |
206 | |
207 | static inline struct knav_queue_inst *knav_queue_find_by_id(int id) |
208 | { |
209 | if (kdev->base_id <= id && |
210 | kdev->base_id + kdev->num_queues > id) { |
211 | id -= kdev->base_id; |
212 | return knav_queue_match_id_to_inst(kdev, id); |
213 | } |
214 | return NULL; |
215 | } |
216 | |
217 | static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst, |
218 | const char *name, unsigned flags) |
219 | { |
220 | struct knav_queue *qh; |
221 | unsigned id; |
222 | int ret = 0; |
223 | |
224 | qh = devm_kzalloc(dev: inst->kdev->dev, size: sizeof(*qh), GFP_KERNEL); |
225 | if (!qh) |
226 | return ERR_PTR(error: -ENOMEM); |
227 | |
228 | qh->stats = alloc_percpu(struct knav_queue_stats); |
229 | if (!qh->stats) { |
230 | ret = -ENOMEM; |
231 | goto err; |
232 | } |
233 | |
234 | qh->flags = flags; |
235 | qh->inst = inst; |
236 | id = inst->id - inst->qmgr->start_queue; |
237 | qh->reg_push = &inst->qmgr->reg_push[id]; |
238 | qh->reg_pop = &inst->qmgr->reg_pop[id]; |
239 | qh->reg_peek = &inst->qmgr->reg_peek[id]; |
240 | |
241 | /* first opener? */ |
242 | if (!knav_queue_is_busy(inst)) { |
243 | struct knav_range_info *range = inst->range; |
244 | |
245 | inst->name = kstrndup(s: name, KNAV_NAME_SIZE - 1, GFP_KERNEL); |
246 | if (range->ops && range->ops->open_queue) |
247 | ret = range->ops->open_queue(range, inst, flags); |
248 | |
249 | if (ret) |
250 | goto err; |
251 | } |
252 | list_add_tail_rcu(new: &qh->list, head: &inst->handles); |
253 | return qh; |
254 | |
255 | err: |
256 | if (qh->stats) |
257 | free_percpu(pdata: qh->stats); |
258 | devm_kfree(dev: inst->kdev->dev, p: qh); |
259 | return ERR_PTR(error: ret); |
260 | } |
261 | |
262 | static struct knav_queue * |
263 | knav_queue_open_by_id(const char *name, unsigned id, unsigned flags) |
264 | { |
265 | struct knav_queue_inst *inst; |
266 | struct knav_queue *qh; |
267 | |
268 | mutex_lock(&knav_dev_lock); |
269 | |
270 | qh = ERR_PTR(error: -ENODEV); |
271 | inst = knav_queue_find_by_id(id); |
272 | if (!inst) |
273 | goto unlock_ret; |
274 | |
275 | qh = ERR_PTR(error: -EEXIST); |
276 | if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst)) |
277 | goto unlock_ret; |
278 | |
279 | qh = ERR_PTR(error: -EBUSY); |
280 | if ((flags & KNAV_QUEUE_SHARED) && |
281 | (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst))) |
282 | goto unlock_ret; |
283 | |
284 | qh = __knav_queue_open(inst, name, flags); |
285 | |
286 | unlock_ret: |
287 | mutex_unlock(lock: &knav_dev_lock); |
288 | |
289 | return qh; |
290 | } |
291 | |
292 | static struct knav_queue *knav_queue_open_by_type(const char *name, |
293 | unsigned type, unsigned flags) |
294 | { |
295 | struct knav_queue_inst *inst; |
296 | struct knav_queue *qh = ERR_PTR(error: -EINVAL); |
297 | int idx; |
298 | |
299 | mutex_lock(&knav_dev_lock); |
300 | |
301 | for_each_instance(idx, inst, kdev) { |
302 | if (knav_queue_is_reserved(inst)) |
303 | continue; |
304 | if (!knav_queue_match_type(inst, type)) |
305 | continue; |
306 | if (knav_queue_is_busy(inst)) |
307 | continue; |
308 | qh = __knav_queue_open(inst, name, flags); |
309 | goto unlock_ret; |
310 | } |
311 | |
312 | unlock_ret: |
313 | mutex_unlock(lock: &knav_dev_lock); |
314 | return qh; |
315 | } |
316 | |
317 | static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled) |
318 | { |
319 | struct knav_range_info *range = inst->range; |
320 | |
321 | if (range->ops && range->ops->set_notify) |
322 | range->ops->set_notify(range, inst, enabled); |
323 | } |
324 | |
325 | static int knav_queue_enable_notifier(struct knav_queue *qh) |
326 | { |
327 | struct knav_queue_inst *inst = qh->inst; |
328 | bool first; |
329 | |
330 | if (WARN_ON(!qh->notifier_fn)) |
331 | return -EINVAL; |
332 | |
333 | /* Adjust the per handle notifier count */ |
334 | first = (atomic_inc_return(v: &qh->notifier_enabled) == 1); |
335 | if (!first) |
336 | return 0; /* nothing to do */ |
337 | |
338 | /* Now adjust the per instance notifier count */ |
339 | first = (atomic_inc_return(v: &inst->num_notifiers) == 1); |
340 | if (first) |
341 | knav_queue_set_notify(inst, enabled: true); |
342 | |
343 | return 0; |
344 | } |
345 | |
346 | static int knav_queue_disable_notifier(struct knav_queue *qh) |
347 | { |
348 | struct knav_queue_inst *inst = qh->inst; |
349 | bool last; |
350 | |
351 | last = (atomic_dec_return(v: &qh->notifier_enabled) == 0); |
352 | if (!last) |
353 | return 0; /* nothing to do */ |
354 | |
355 | last = (atomic_dec_return(v: &inst->num_notifiers) == 0); |
356 | if (last) |
357 | knav_queue_set_notify(inst, enabled: false); |
358 | |
359 | return 0; |
360 | } |
361 | |
362 | static int knav_queue_set_notifier(struct knav_queue *qh, |
363 | struct knav_queue_notify_config *cfg) |
364 | { |
365 | knav_queue_notify_fn old_fn = qh->notifier_fn; |
366 | |
367 | if (!cfg) |
368 | return -EINVAL; |
369 | |
370 | if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) |
371 | return -ENOTSUPP; |
372 | |
373 | if (!cfg->fn && old_fn) |
374 | knav_queue_disable_notifier(qh); |
375 | |
376 | qh->notifier_fn = cfg->fn; |
377 | qh->notifier_fn_arg = cfg->fn_arg; |
378 | |
379 | if (cfg->fn && !old_fn) |
380 | knav_queue_enable_notifier(qh); |
381 | |
382 | return 0; |
383 | } |
384 | |
385 | static int knav_gp_set_notify(struct knav_range_info *range, |
386 | struct knav_queue_inst *inst, |
387 | bool enabled) |
388 | { |
389 | unsigned queue; |
390 | |
391 | if (range->flags & RANGE_HAS_IRQ) { |
392 | queue = inst->id - range->queue_base; |
393 | if (enabled) |
394 | enable_irq(irq: range->irqs[queue].irq); |
395 | else |
396 | disable_irq_nosync(irq: range->irqs[queue].irq); |
397 | } |
398 | return 0; |
399 | } |
400 | |
401 | static int knav_gp_open_queue(struct knav_range_info *range, |
402 | struct knav_queue_inst *inst, unsigned flags) |
403 | { |
404 | return knav_queue_setup_irq(range, inst); |
405 | } |
406 | |
407 | static int knav_gp_close_queue(struct knav_range_info *range, |
408 | struct knav_queue_inst *inst) |
409 | { |
410 | knav_queue_free_irq(inst); |
411 | return 0; |
412 | } |
413 | |
414 | static struct knav_range_ops knav_gp_range_ops = { |
415 | .set_notify = knav_gp_set_notify, |
416 | .open_queue = knav_gp_open_queue, |
417 | .close_queue = knav_gp_close_queue, |
418 | }; |
419 | |
420 | |
421 | static int knav_queue_get_count(void *qhandle) |
422 | { |
423 | struct knav_queue *qh = qhandle; |
424 | struct knav_queue_inst *inst = qh->inst; |
425 | |
426 | return readl_relaxed(&qh->reg_peek[0].entry_count) + |
427 | atomic_read(v: &inst->desc_count); |
428 | } |
429 | |
430 | static void knav_queue_debug_show_instance(struct seq_file *s, |
431 | struct knav_queue_inst *inst) |
432 | { |
433 | struct knav_device *kdev = inst->kdev; |
434 | struct knav_queue *qh; |
435 | int cpu = 0; |
436 | int pushes = 0; |
437 | int pops = 0; |
438 | int push_errors = 0; |
439 | int pop_errors = 0; |
440 | int notifies = 0; |
441 | |
442 | if (!knav_queue_is_busy(inst)) |
443 | return; |
444 | |
445 | seq_printf(m: s, fmt: "\tqueue id %d (%s)\n" , |
446 | kdev->base_id + inst->id, inst->name); |
447 | for_each_handle_rcu(qh, inst) { |
448 | for_each_possible_cpu(cpu) { |
449 | pushes += per_cpu_ptr(qh->stats, cpu)->pushes; |
450 | pops += per_cpu_ptr(qh->stats, cpu)->pops; |
451 | push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors; |
452 | pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors; |
453 | notifies += per_cpu_ptr(qh->stats, cpu)->notifies; |
454 | } |
455 | |
456 | seq_printf(m: s, fmt: "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n" , |
457 | qh, |
458 | pushes, |
459 | pops, |
460 | knav_queue_get_count(qhandle: qh), |
461 | notifies, |
462 | push_errors, |
463 | pop_errors); |
464 | } |
465 | } |
466 | |
467 | static int knav_queue_debug_show(struct seq_file *s, void *v) |
468 | { |
469 | struct knav_queue_inst *inst; |
470 | int idx; |
471 | |
472 | mutex_lock(&knav_dev_lock); |
473 | seq_printf(m: s, fmt: "%s: %u-%u\n" , |
474 | dev_name(dev: kdev->dev), kdev->base_id, |
475 | kdev->base_id + kdev->num_queues - 1); |
476 | for_each_instance(idx, inst, kdev) |
477 | knav_queue_debug_show_instance(s, inst); |
478 | mutex_unlock(lock: &knav_dev_lock); |
479 | |
480 | return 0; |
481 | } |
482 | |
483 | DEFINE_SHOW_ATTRIBUTE(knav_queue_debug); |
484 | |
485 | static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout, |
486 | u32 flags) |
487 | { |
488 | unsigned long end; |
489 | u32 val = 0; |
490 | |
491 | end = jiffies + msecs_to_jiffies(m: timeout); |
492 | while (time_after(end, jiffies)) { |
493 | val = readl_relaxed(addr); |
494 | if (flags) |
495 | val &= flags; |
496 | if (!val) |
497 | break; |
498 | cpu_relax(); |
499 | } |
500 | return val ? -ETIMEDOUT : 0; |
501 | } |
502 | |
503 | |
504 | static int knav_queue_flush(struct knav_queue *qh) |
505 | { |
506 | struct knav_queue_inst *inst = qh->inst; |
507 | unsigned id = inst->id - inst->qmgr->start_queue; |
508 | |
509 | atomic_set(v: &inst->desc_count, i: 0); |
510 | writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh); |
511 | return 0; |
512 | } |
513 | |
514 | /** |
515 | * knav_queue_open() - open a hardware queue |
516 | * @name: - name to give the queue handle |
517 | * @id: - desired queue number if any or specifes the type |
518 | * of queue |
519 | * @flags: - the following flags are applicable to queues: |
520 | * KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are |
521 | * exclusive by default. |
522 | * Subsequent attempts to open a shared queue should |
523 | * also have this flag. |
524 | * |
525 | * Returns a handle to the open hardware queue if successful. Use IS_ERR() |
526 | * to check the returned value for error codes. |
527 | */ |
528 | void *knav_queue_open(const char *name, unsigned id, |
529 | unsigned flags) |
530 | { |
531 | struct knav_queue *qh = ERR_PTR(error: -EINVAL); |
532 | |
533 | switch (id) { |
534 | case KNAV_QUEUE_QPEND: |
535 | case KNAV_QUEUE_ACC: |
536 | case KNAV_QUEUE_GP: |
537 | qh = knav_queue_open_by_type(name, type: id, flags); |
538 | break; |
539 | |
540 | default: |
541 | qh = knav_queue_open_by_id(name, id, flags); |
542 | break; |
543 | } |
544 | return qh; |
545 | } |
546 | EXPORT_SYMBOL_GPL(knav_queue_open); |
547 | |
548 | /** |
549 | * knav_queue_close() - close a hardware queue handle |
550 | * @qhandle: - handle to close |
551 | */ |
552 | void knav_queue_close(void *qhandle) |
553 | { |
554 | struct knav_queue *qh = qhandle; |
555 | struct knav_queue_inst *inst = qh->inst; |
556 | |
557 | while (atomic_read(v: &qh->notifier_enabled) > 0) |
558 | knav_queue_disable_notifier(qh); |
559 | |
560 | mutex_lock(&knav_dev_lock); |
561 | list_del_rcu(entry: &qh->list); |
562 | mutex_unlock(lock: &knav_dev_lock); |
563 | synchronize_rcu(); |
564 | if (!knav_queue_is_busy(inst)) { |
565 | struct knav_range_info *range = inst->range; |
566 | |
567 | if (range->ops && range->ops->close_queue) |
568 | range->ops->close_queue(range, inst); |
569 | } |
570 | free_percpu(pdata: qh->stats); |
571 | devm_kfree(dev: inst->kdev->dev, p: qh); |
572 | } |
573 | EXPORT_SYMBOL_GPL(knav_queue_close); |
574 | |
575 | /** |
576 | * knav_queue_device_control() - Perform control operations on a queue |
577 | * @qhandle: - queue handle |
578 | * @cmd: - control commands |
579 | * @arg: - command argument |
580 | * |
581 | * Returns 0 on success, errno otherwise. |
582 | */ |
583 | int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd, |
584 | unsigned long arg) |
585 | { |
586 | struct knav_queue *qh = qhandle; |
587 | struct knav_queue_notify_config *cfg; |
588 | int ret; |
589 | |
590 | switch ((int)cmd) { |
591 | case KNAV_QUEUE_GET_ID: |
592 | ret = qh->inst->kdev->base_id + qh->inst->id; |
593 | break; |
594 | |
595 | case KNAV_QUEUE_FLUSH: |
596 | ret = knav_queue_flush(qh); |
597 | break; |
598 | |
599 | case KNAV_QUEUE_SET_NOTIFIER: |
600 | cfg = (void *)arg; |
601 | ret = knav_queue_set_notifier(qh, cfg); |
602 | break; |
603 | |
604 | case KNAV_QUEUE_ENABLE_NOTIFY: |
605 | ret = knav_queue_enable_notifier(qh); |
606 | break; |
607 | |
608 | case KNAV_QUEUE_DISABLE_NOTIFY: |
609 | ret = knav_queue_disable_notifier(qh); |
610 | break; |
611 | |
612 | case KNAV_QUEUE_GET_COUNT: |
613 | ret = knav_queue_get_count(qhandle: qh); |
614 | break; |
615 | |
616 | default: |
617 | ret = -ENOTSUPP; |
618 | break; |
619 | } |
620 | return ret; |
621 | } |
622 | EXPORT_SYMBOL_GPL(knav_queue_device_control); |
623 | |
624 | |
625 | |
626 | /** |
627 | * knav_queue_push() - push data (or descriptor) to the tail of a queue |
628 | * @qhandle: - hardware queue handle |
629 | * @dma: - DMA data to push |
630 | * @size: - size of data to push |
631 | * @flags: - can be used to pass additional information |
632 | * |
633 | * Returns 0 on success, errno otherwise. |
634 | */ |
635 | int knav_queue_push(void *qhandle, dma_addr_t dma, |
636 | unsigned size, unsigned flags) |
637 | { |
638 | struct knav_queue *qh = qhandle; |
639 | u32 val; |
640 | |
641 | val = (u32)dma | ((size / 16) - 1); |
642 | writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh); |
643 | |
644 | this_cpu_inc(qh->stats->pushes); |
645 | return 0; |
646 | } |
647 | EXPORT_SYMBOL_GPL(knav_queue_push); |
648 | |
649 | /** |
650 | * knav_queue_pop() - pop data (or descriptor) from the head of a queue |
651 | * @qhandle: - hardware queue handle |
652 | * @size: - (optional) size of the data pop'ed. |
653 | * |
654 | * Returns a DMA address on success, 0 on failure. |
655 | */ |
656 | dma_addr_t knav_queue_pop(void *qhandle, unsigned *size) |
657 | { |
658 | struct knav_queue *qh = qhandle; |
659 | struct knav_queue_inst *inst = qh->inst; |
660 | dma_addr_t dma; |
661 | u32 val, idx; |
662 | |
663 | /* are we accumulated? */ |
664 | if (inst->descs) { |
665 | if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) { |
666 | atomic_inc(v: &inst->desc_count); |
667 | return 0; |
668 | } |
669 | idx = atomic_inc_return(v: &inst->desc_head); |
670 | idx &= ACC_DESCS_MASK; |
671 | val = inst->descs[idx]; |
672 | } else { |
673 | val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh); |
674 | if (unlikely(!val)) |
675 | return 0; |
676 | } |
677 | |
678 | dma = val & DESC_PTR_MASK; |
679 | if (size) |
680 | *size = ((val & DESC_SIZE_MASK) + 1) * 16; |
681 | |
682 | this_cpu_inc(qh->stats->pops); |
683 | return dma; |
684 | } |
685 | EXPORT_SYMBOL_GPL(knav_queue_pop); |
686 | |
687 | /* carve out descriptors and push into queue */ |
688 | static void kdesc_fill_pool(struct knav_pool *pool) |
689 | { |
690 | struct knav_region *region; |
691 | int i; |
692 | |
693 | region = pool->region; |
694 | pool->desc_size = region->desc_size; |
695 | for (i = 0; i < pool->num_desc; i++) { |
696 | int index = pool->region_offset + i; |
697 | dma_addr_t dma_addr; |
698 | unsigned dma_size; |
699 | dma_addr = region->dma_start + (region->desc_size * index); |
700 | dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES); |
701 | dma_sync_single_for_device(dev: pool->dev, addr: dma_addr, size: dma_size, |
702 | dir: DMA_TO_DEVICE); |
703 | knav_queue_push(pool->queue, dma_addr, dma_size, 0); |
704 | } |
705 | } |
706 | |
707 | /* pop out descriptors and close the queue */ |
708 | static void kdesc_empty_pool(struct knav_pool *pool) |
709 | { |
710 | dma_addr_t dma; |
711 | unsigned size; |
712 | void *desc; |
713 | int i; |
714 | |
715 | if (!pool->queue) |
716 | return; |
717 | |
718 | for (i = 0;; i++) { |
719 | dma = knav_queue_pop(pool->queue, &size); |
720 | if (!dma) |
721 | break; |
722 | desc = knav_pool_desc_dma_to_virt(ph: pool, dma); |
723 | if (!desc) { |
724 | dev_dbg(pool->kdev->dev, |
725 | "couldn't unmap desc, continuing\n" ); |
726 | continue; |
727 | } |
728 | } |
729 | WARN_ON(i != pool->num_desc); |
730 | knav_queue_close(pool->queue); |
731 | } |
732 | |
733 | |
734 | /* Get the DMA address of a descriptor */ |
735 | dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt) |
736 | { |
737 | struct knav_pool *pool = ph; |
738 | return pool->region->dma_start + (virt - pool->region->virt_start); |
739 | } |
740 | EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma); |
741 | |
742 | void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma) |
743 | { |
744 | struct knav_pool *pool = ph; |
745 | return pool->region->virt_start + (dma - pool->region->dma_start); |
746 | } |
747 | EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt); |
748 | |
749 | /** |
750 | * knav_pool_create() - Create a pool of descriptors |
751 | * @name: - name to give the pool handle |
752 | * @num_desc: - numbers of descriptors in the pool |
753 | * @region_id: - QMSS region id from which the descriptors are to be |
754 | * allocated. |
755 | * |
756 | * Returns a pool handle on success. |
757 | * Use IS_ERR_OR_NULL() to identify error values on return. |
758 | */ |
759 | void *knav_pool_create(const char *name, |
760 | int num_desc, int region_id) |
761 | { |
762 | struct knav_region *reg_itr, *region = NULL; |
763 | struct knav_pool *pool, *pi = NULL, *iter; |
764 | struct list_head *node; |
765 | unsigned last_offset; |
766 | int ret; |
767 | |
768 | if (!kdev) |
769 | return ERR_PTR(error: -EPROBE_DEFER); |
770 | |
771 | if (!kdev->dev) |
772 | return ERR_PTR(error: -ENODEV); |
773 | |
774 | pool = devm_kzalloc(dev: kdev->dev, size: sizeof(*pool), GFP_KERNEL); |
775 | if (!pool) { |
776 | dev_err(kdev->dev, "out of memory allocating pool\n" ); |
777 | return ERR_PTR(error: -ENOMEM); |
778 | } |
779 | |
780 | for_each_region(kdev, reg_itr) { |
781 | if (reg_itr->id != region_id) |
782 | continue; |
783 | region = reg_itr; |
784 | break; |
785 | } |
786 | |
787 | if (!region) { |
788 | dev_err(kdev->dev, "region-id(%d) not found\n" , region_id); |
789 | ret = -EINVAL; |
790 | goto err; |
791 | } |
792 | |
793 | pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0); |
794 | if (IS_ERR(ptr: pool->queue)) { |
795 | dev_err(kdev->dev, |
796 | "failed to open queue for pool(%s), error %ld\n" , |
797 | name, PTR_ERR(pool->queue)); |
798 | ret = PTR_ERR(ptr: pool->queue); |
799 | goto err; |
800 | } |
801 | |
802 | pool->name = kstrndup(s: name, KNAV_NAME_SIZE - 1, GFP_KERNEL); |
803 | pool->kdev = kdev; |
804 | pool->dev = kdev->dev; |
805 | |
806 | mutex_lock(&knav_dev_lock); |
807 | |
808 | if (num_desc > (region->num_desc - region->used_desc)) { |
809 | dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n" , |
810 | region_id, name); |
811 | ret = -ENOMEM; |
812 | goto err_unlock; |
813 | } |
814 | |
815 | /* Region maintains a sorted (by region offset) list of pools |
816 | * use the first free slot which is large enough to accomodate |
817 | * the request |
818 | */ |
819 | last_offset = 0; |
820 | node = ®ion->pools; |
821 | list_for_each_entry(iter, ®ion->pools, region_inst) { |
822 | if ((iter->region_offset - last_offset) >= num_desc) { |
823 | pi = iter; |
824 | break; |
825 | } |
826 | last_offset = iter->region_offset + iter->num_desc; |
827 | } |
828 | |
829 | if (pi) { |
830 | node = &pi->region_inst; |
831 | pool->region = region; |
832 | pool->num_desc = num_desc; |
833 | pool->region_offset = last_offset; |
834 | region->used_desc += num_desc; |
835 | list_add_tail(new: &pool->list, head: &kdev->pools); |
836 | list_add_tail(new: &pool->region_inst, head: node); |
837 | } else { |
838 | dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n" , |
839 | name, region_id); |
840 | ret = -ENOMEM; |
841 | goto err_unlock; |
842 | } |
843 | |
844 | mutex_unlock(lock: &knav_dev_lock); |
845 | kdesc_fill_pool(pool); |
846 | return pool; |
847 | |
848 | err_unlock: |
849 | mutex_unlock(lock: &knav_dev_lock); |
850 | err: |
851 | kfree(objp: pool->name); |
852 | devm_kfree(dev: kdev->dev, p: pool); |
853 | return ERR_PTR(error: ret); |
854 | } |
855 | EXPORT_SYMBOL_GPL(knav_pool_create); |
856 | |
857 | /** |
858 | * knav_pool_destroy() - Free a pool of descriptors |
859 | * @ph: - pool handle |
860 | */ |
861 | void knav_pool_destroy(void *ph) |
862 | { |
863 | struct knav_pool *pool = ph; |
864 | |
865 | if (!pool) |
866 | return; |
867 | |
868 | if (!pool->region) |
869 | return; |
870 | |
871 | kdesc_empty_pool(pool); |
872 | mutex_lock(&knav_dev_lock); |
873 | |
874 | pool->region->used_desc -= pool->num_desc; |
875 | list_del(entry: &pool->region_inst); |
876 | list_del(entry: &pool->list); |
877 | |
878 | mutex_unlock(lock: &knav_dev_lock); |
879 | kfree(objp: pool->name); |
880 | devm_kfree(dev: kdev->dev, p: pool); |
881 | } |
882 | EXPORT_SYMBOL_GPL(knav_pool_destroy); |
883 | |
884 | |
885 | /** |
886 | * knav_pool_desc_get() - Get a descriptor from the pool |
887 | * @ph: - pool handle |
888 | * |
889 | * Returns descriptor from the pool. |
890 | */ |
891 | void *knav_pool_desc_get(void *ph) |
892 | { |
893 | struct knav_pool *pool = ph; |
894 | dma_addr_t dma; |
895 | unsigned size; |
896 | void *data; |
897 | |
898 | dma = knav_queue_pop(pool->queue, &size); |
899 | if (unlikely(!dma)) |
900 | return ERR_PTR(error: -ENOMEM); |
901 | data = knav_pool_desc_dma_to_virt(pool, dma); |
902 | return data; |
903 | } |
904 | EXPORT_SYMBOL_GPL(knav_pool_desc_get); |
905 | |
906 | /** |
907 | * knav_pool_desc_put() - return a descriptor to the pool |
908 | * @ph: - pool handle |
909 | * @desc: - virtual address |
910 | */ |
911 | void knav_pool_desc_put(void *ph, void *desc) |
912 | { |
913 | struct knav_pool *pool = ph; |
914 | dma_addr_t dma; |
915 | dma = knav_pool_desc_virt_to_dma(pool, desc); |
916 | knav_queue_push(pool->queue, dma, pool->region->desc_size, 0); |
917 | } |
918 | EXPORT_SYMBOL_GPL(knav_pool_desc_put); |
919 | |
920 | /** |
921 | * knav_pool_desc_map() - Map descriptor for DMA transfer |
922 | * @ph: - pool handle |
923 | * @desc: - address of descriptor to map |
924 | * @size: - size of descriptor to map |
925 | * @dma: - DMA address return pointer |
926 | * @dma_sz: - adjusted return pointer |
927 | * |
928 | * Returns 0 on success, errno otherwise. |
929 | */ |
930 | int knav_pool_desc_map(void *ph, void *desc, unsigned size, |
931 | dma_addr_t *dma, unsigned *dma_sz) |
932 | { |
933 | struct knav_pool *pool = ph; |
934 | *dma = knav_pool_desc_virt_to_dma(pool, desc); |
935 | size = min(size, pool->region->desc_size); |
936 | size = ALIGN(size, SMP_CACHE_BYTES); |
937 | *dma_sz = size; |
938 | dma_sync_single_for_device(dev: pool->dev, addr: *dma, size, dir: DMA_TO_DEVICE); |
939 | |
940 | /* Ensure the descriptor reaches to the memory */ |
941 | __iowmb(); |
942 | |
943 | return 0; |
944 | } |
945 | EXPORT_SYMBOL_GPL(knav_pool_desc_map); |
946 | |
947 | /** |
948 | * knav_pool_desc_unmap() - Unmap descriptor after DMA transfer |
949 | * @ph: - pool handle |
950 | * @dma: - DMA address of descriptor to unmap |
951 | * @dma_sz: - size of descriptor to unmap |
952 | * |
953 | * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify |
954 | * error values on return. |
955 | */ |
956 | void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz) |
957 | { |
958 | struct knav_pool *pool = ph; |
959 | unsigned desc_sz; |
960 | void *desc; |
961 | |
962 | desc_sz = min(dma_sz, pool->region->desc_size); |
963 | desc = knav_pool_desc_dma_to_virt(pool, dma); |
964 | dma_sync_single_for_cpu(dev: pool->dev, addr: dma, size: desc_sz, dir: DMA_FROM_DEVICE); |
965 | prefetch(x: desc); |
966 | return desc; |
967 | } |
968 | EXPORT_SYMBOL_GPL(knav_pool_desc_unmap); |
969 | |
970 | /** |
971 | * knav_pool_count() - Get the number of descriptors in pool. |
972 | * @ph: - pool handle |
973 | * Returns number of elements in the pool. |
974 | */ |
975 | int knav_pool_count(void *ph) |
976 | { |
977 | struct knav_pool *pool = ph; |
978 | return knav_queue_get_count(qhandle: pool->queue); |
979 | } |
980 | EXPORT_SYMBOL_GPL(knav_pool_count); |
981 | |
982 | static void knav_queue_setup_region(struct knav_device *kdev, |
983 | struct knav_region *region) |
984 | { |
985 | unsigned hw_num_desc, hw_desc_size, size; |
986 | struct knav_reg_region __iomem *regs; |
987 | struct knav_qmgr_info *qmgr; |
988 | struct knav_pool *pool; |
989 | int id = region->id; |
990 | struct page *page; |
991 | |
992 | /* unused region? */ |
993 | if (!region->num_desc) { |
994 | dev_warn(kdev->dev, "unused region %s\n" , region->name); |
995 | return; |
996 | } |
997 | |
998 | /* get hardware descriptor value */ |
999 | hw_num_desc = ilog2(region->num_desc - 1) + 1; |
1000 | |
1001 | /* did we force fit ourselves into nothingness? */ |
1002 | if (region->num_desc < 32) { |
1003 | region->num_desc = 0; |
1004 | dev_warn(kdev->dev, "too few descriptors in region %s\n" , |
1005 | region->name); |
1006 | return; |
1007 | } |
1008 | |
1009 | size = region->num_desc * region->desc_size; |
1010 | region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA | |
1011 | GFP_DMA32); |
1012 | if (!region->virt_start) { |
1013 | region->num_desc = 0; |
1014 | dev_err(kdev->dev, "memory alloc failed for region %s\n" , |
1015 | region->name); |
1016 | return; |
1017 | } |
1018 | region->virt_end = region->virt_start + size; |
1019 | page = virt_to_page(region->virt_start); |
1020 | |
1021 | region->dma_start = dma_map_page(kdev->dev, page, 0, size, |
1022 | DMA_BIDIRECTIONAL); |
1023 | if (dma_mapping_error(dev: kdev->dev, dma_addr: region->dma_start)) { |
1024 | dev_err(kdev->dev, "dma map failed for region %s\n" , |
1025 | region->name); |
1026 | goto fail; |
1027 | } |
1028 | region->dma_end = region->dma_start + size; |
1029 | |
1030 | pool = devm_kzalloc(dev: kdev->dev, size: sizeof(*pool), GFP_KERNEL); |
1031 | if (!pool) { |
1032 | dev_err(kdev->dev, "out of memory allocating dummy pool\n" ); |
1033 | goto fail; |
1034 | } |
1035 | pool->num_desc = 0; |
1036 | pool->region_offset = region->num_desc; |
1037 | list_add(new: &pool->region_inst, head: ®ion->pools); |
1038 | |
1039 | dev_dbg(kdev->dev, |
1040 | "region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n" , |
1041 | region->name, id, region->desc_size, region->num_desc, |
1042 | region->link_index, ®ion->dma_start, ®ion->dma_end, |
1043 | region->virt_start, region->virt_end); |
1044 | |
1045 | hw_desc_size = (region->desc_size / 16) - 1; |
1046 | hw_num_desc -= 5; |
1047 | |
1048 | for_each_qmgr(kdev, qmgr) { |
1049 | regs = qmgr->reg_region + id; |
1050 | writel_relaxed((u32)region->dma_start, ®s->base); |
1051 | writel_relaxed(region->link_index, ®s->start_index); |
1052 | writel_relaxed(hw_desc_size << 16 | hw_num_desc, |
1053 | ®s->size_count); |
1054 | } |
1055 | return; |
1056 | |
1057 | fail: |
1058 | if (region->dma_start) |
1059 | dma_unmap_page(kdev->dev, region->dma_start, size, |
1060 | DMA_BIDIRECTIONAL); |
1061 | if (region->virt_start) |
1062 | free_pages_exact(virt: region->virt_start, size); |
1063 | region->num_desc = 0; |
1064 | return; |
1065 | } |
1066 | |
1067 | static const char *knav_queue_find_name(struct device_node *node) |
1068 | { |
1069 | const char *name; |
1070 | |
1071 | if (of_property_read_string(np: node, propname: "label" , out_string: &name) < 0) |
1072 | name = node->name; |
1073 | if (!name) |
1074 | name = "unknown" ; |
1075 | return name; |
1076 | } |
1077 | |
1078 | static int knav_queue_setup_regions(struct knav_device *kdev, |
1079 | struct device_node *regions) |
1080 | { |
1081 | struct device *dev = kdev->dev; |
1082 | struct knav_region *region; |
1083 | struct device_node *child; |
1084 | u32 temp[2]; |
1085 | int ret; |
1086 | |
1087 | for_each_child_of_node(regions, child) { |
1088 | region = devm_kzalloc(dev, size: sizeof(*region), GFP_KERNEL); |
1089 | if (!region) { |
1090 | of_node_put(node: child); |
1091 | dev_err(dev, "out of memory allocating region\n" ); |
1092 | return -ENOMEM; |
1093 | } |
1094 | |
1095 | region->name = knav_queue_find_name(node: child); |
1096 | of_property_read_u32(np: child, propname: "id" , out_value: ®ion->id); |
1097 | ret = of_property_read_u32_array(np: child, propname: "region-spec" , out_values: temp, sz: 2); |
1098 | if (!ret) { |
1099 | region->num_desc = temp[0]; |
1100 | region->desc_size = temp[1]; |
1101 | } else { |
1102 | dev_err(dev, "invalid region info %s\n" , region->name); |
1103 | devm_kfree(dev, p: region); |
1104 | continue; |
1105 | } |
1106 | |
1107 | if (!of_get_property(node: child, name: "link-index" , NULL)) { |
1108 | dev_err(dev, "No link info for %s\n" , region->name); |
1109 | devm_kfree(dev, p: region); |
1110 | continue; |
1111 | } |
1112 | ret = of_property_read_u32(np: child, propname: "link-index" , |
1113 | out_value: ®ion->link_index); |
1114 | if (ret) { |
1115 | dev_err(dev, "link index not found for %s\n" , |
1116 | region->name); |
1117 | devm_kfree(dev, p: region); |
1118 | continue; |
1119 | } |
1120 | |
1121 | INIT_LIST_HEAD(list: ®ion->pools); |
1122 | list_add_tail(new: ®ion->list, head: &kdev->regions); |
1123 | } |
1124 | if (list_empty(head: &kdev->regions)) { |
1125 | dev_err(dev, "no valid region information found\n" ); |
1126 | return -ENODEV; |
1127 | } |
1128 | |
1129 | /* Next, we run through the regions and set things up */ |
1130 | for_each_region(kdev, region) |
1131 | knav_queue_setup_region(kdev, region); |
1132 | |
1133 | return 0; |
1134 | } |
1135 | |
1136 | static int knav_get_link_ram(struct knav_device *kdev, |
1137 | const char *name, |
1138 | struct knav_link_ram_block *block) |
1139 | { |
1140 | struct platform_device *pdev = to_platform_device(kdev->dev); |
1141 | struct device_node *node = pdev->dev.of_node; |
1142 | u32 temp[2]; |
1143 | |
1144 | /* |
1145 | * Note: link ram resources are specified in "entry" sized units. In |
1146 | * reality, although entries are ~40bits in hardware, we treat them as |
1147 | * 64-bit entities here. |
1148 | * |
1149 | * For example, to specify the internal link ram for Keystone-I class |
1150 | * devices, we would set the linkram0 resource to 0x80000-0x83fff. |
1151 | * |
1152 | * This gets a bit weird when other link rams are used. For example, |
1153 | * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries |
1154 | * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000, |
1155 | * which accounts for 64-bits per entry, for 16K entries. |
1156 | */ |
1157 | if (!of_property_read_u32_array(np: node, propname: name , out_values: temp, sz: 2)) { |
1158 | if (temp[0]) { |
1159 | /* |
1160 | * queue_base specified => using internal or onchip |
1161 | * link ram WARNING - we do not "reserve" this block |
1162 | */ |
1163 | block->dma = (dma_addr_t)temp[0]; |
1164 | block->virt = NULL; |
1165 | block->size = temp[1]; |
1166 | } else { |
1167 | block->size = temp[1]; |
1168 | /* queue_base not specific => allocate requested size */ |
1169 | block->virt = dmam_alloc_coherent(dev: kdev->dev, |
1170 | size: 8 * block->size, dma_handle: &block->dma, |
1171 | GFP_KERNEL); |
1172 | if (!block->virt) { |
1173 | dev_err(kdev->dev, "failed to alloc linkram\n" ); |
1174 | return -ENOMEM; |
1175 | } |
1176 | } |
1177 | } else { |
1178 | return -ENODEV; |
1179 | } |
1180 | return 0; |
1181 | } |
1182 | |
1183 | static int knav_queue_setup_link_ram(struct knav_device *kdev) |
1184 | { |
1185 | struct knav_link_ram_block *block; |
1186 | struct knav_qmgr_info *qmgr; |
1187 | |
1188 | for_each_qmgr(kdev, qmgr) { |
1189 | block = &kdev->link_rams[0]; |
1190 | dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n" , |
1191 | &block->dma, block->virt, block->size); |
1192 | writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0); |
1193 | if (kdev->version == QMSS_66AK2G) |
1194 | writel_relaxed(block->size, |
1195 | &qmgr->reg_config->link_ram_size0); |
1196 | else |
1197 | writel_relaxed(block->size - 1, |
1198 | &qmgr->reg_config->link_ram_size0); |
1199 | block++; |
1200 | if (!block->size) |
1201 | continue; |
1202 | |
1203 | dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n" , |
1204 | &block->dma, block->virt, block->size); |
1205 | writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1); |
1206 | } |
1207 | |
1208 | return 0; |
1209 | } |
1210 | |
1211 | static int knav_setup_queue_range(struct knav_device *kdev, |
1212 | struct device_node *node) |
1213 | { |
1214 | struct device *dev = kdev->dev; |
1215 | struct knav_range_info *range; |
1216 | struct knav_qmgr_info *qmgr; |
1217 | u32 temp[2], start, end, id, index; |
1218 | int ret, i; |
1219 | |
1220 | range = devm_kzalloc(dev, size: sizeof(*range), GFP_KERNEL); |
1221 | if (!range) { |
1222 | dev_err(dev, "out of memory allocating range\n" ); |
1223 | return -ENOMEM; |
1224 | } |
1225 | |
1226 | range->kdev = kdev; |
1227 | range->name = knav_queue_find_name(node); |
1228 | ret = of_property_read_u32_array(np: node, propname: "qrange" , out_values: temp, sz: 2); |
1229 | if (!ret) { |
1230 | range->queue_base = temp[0] - kdev->base_id; |
1231 | range->num_queues = temp[1]; |
1232 | } else { |
1233 | dev_err(dev, "invalid queue range %s\n" , range->name); |
1234 | devm_kfree(dev, p: range); |
1235 | return -EINVAL; |
1236 | } |
1237 | |
1238 | for (i = 0; i < RANGE_MAX_IRQS; i++) { |
1239 | struct of_phandle_args oirq; |
1240 | |
1241 | if (of_irq_parse_one(device: node, index: i, out_irq: &oirq)) |
1242 | break; |
1243 | |
1244 | range->irqs[i].irq = irq_create_of_mapping(irq_data: &oirq); |
1245 | if (range->irqs[i].irq == IRQ_NONE) |
1246 | break; |
1247 | |
1248 | range->num_irqs++; |
1249 | |
1250 | if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) { |
1251 | unsigned long mask; |
1252 | int bit; |
1253 | |
1254 | range->irqs[i].cpu_mask = devm_kzalloc(dev, |
1255 | size: cpumask_size(), GFP_KERNEL); |
1256 | if (!range->irqs[i].cpu_mask) |
1257 | return -ENOMEM; |
1258 | |
1259 | mask = (oirq.args[2] & 0x0000ff00) >> 8; |
1260 | for_each_set_bit(bit, &mask, BITS_PER_LONG) |
1261 | cpumask_set_cpu(cpu: bit, dstp: range->irqs[i].cpu_mask); |
1262 | } |
1263 | } |
1264 | |
1265 | range->num_irqs = min(range->num_irqs, range->num_queues); |
1266 | if (range->num_irqs) |
1267 | range->flags |= RANGE_HAS_IRQ; |
1268 | |
1269 | if (of_property_read_bool(np: node, propname: "qalloc-by-id" )) |
1270 | range->flags |= RANGE_RESERVED; |
1271 | |
1272 | if (of_property_present(np: node, propname: "accumulator" )) { |
1273 | ret = knav_init_acc_range(kdev, node, range); |
1274 | if (ret < 0) { |
1275 | devm_kfree(dev, p: range); |
1276 | return ret; |
1277 | } |
1278 | } else { |
1279 | range->ops = &knav_gp_range_ops; |
1280 | } |
1281 | |
1282 | /* set threshold to 1, and flush out the queues */ |
1283 | for_each_qmgr(kdev, qmgr) { |
1284 | start = max(qmgr->start_queue, range->queue_base); |
1285 | end = min(qmgr->start_queue + qmgr->num_queues, |
1286 | range->queue_base + range->num_queues); |
1287 | for (id = start; id < end; id++) { |
1288 | index = id - qmgr->start_queue; |
1289 | writel_relaxed(THRESH_GTE | 1, |
1290 | &qmgr->reg_peek[index].ptr_size_thresh); |
1291 | writel_relaxed(0, |
1292 | &qmgr->reg_push[index].ptr_size_thresh); |
1293 | } |
1294 | } |
1295 | |
1296 | list_add_tail(new: &range->list, head: &kdev->queue_ranges); |
1297 | dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n" , |
1298 | range->name, range->queue_base, |
1299 | range->queue_base + range->num_queues - 1, |
1300 | range->num_irqs, |
1301 | (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "" , |
1302 | (range->flags & RANGE_RESERVED) ? ", reserved" : "" , |
1303 | (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "" ); |
1304 | kdev->num_queues_in_use += range->num_queues; |
1305 | return 0; |
1306 | } |
1307 | |
1308 | static int knav_setup_queue_pools(struct knav_device *kdev, |
1309 | struct device_node *queue_pools) |
1310 | { |
1311 | struct device_node *type, *range; |
1312 | |
1313 | for_each_child_of_node(queue_pools, type) { |
1314 | for_each_child_of_node(type, range) { |
1315 | /* return value ignored, we init the rest... */ |
1316 | knav_setup_queue_range(kdev, node: range); |
1317 | } |
1318 | } |
1319 | |
1320 | /* ... and barf if they all failed! */ |
1321 | if (list_empty(head: &kdev->queue_ranges)) { |
1322 | dev_err(kdev->dev, "no valid queue range found\n" ); |
1323 | return -ENODEV; |
1324 | } |
1325 | return 0; |
1326 | } |
1327 | |
1328 | static void knav_free_queue_range(struct knav_device *kdev, |
1329 | struct knav_range_info *range) |
1330 | { |
1331 | if (range->ops && range->ops->free_range) |
1332 | range->ops->free_range(range); |
1333 | list_del(entry: &range->list); |
1334 | devm_kfree(dev: kdev->dev, p: range); |
1335 | } |
1336 | |
1337 | static void knav_free_queue_ranges(struct knav_device *kdev) |
1338 | { |
1339 | struct knav_range_info *range; |
1340 | |
1341 | for (;;) { |
1342 | range = first_queue_range(kdev); |
1343 | if (!range) |
1344 | break; |
1345 | knav_free_queue_range(kdev, range); |
1346 | } |
1347 | } |
1348 | |
1349 | static void knav_queue_free_regions(struct knav_device *kdev) |
1350 | { |
1351 | struct knav_region *region; |
1352 | struct knav_pool *pool, *tmp; |
1353 | unsigned size; |
1354 | |
1355 | for (;;) { |
1356 | region = first_region(kdev); |
1357 | if (!region) |
1358 | break; |
1359 | list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst) |
1360 | knav_pool_destroy(pool); |
1361 | |
1362 | size = region->virt_end - region->virt_start; |
1363 | if (size) |
1364 | free_pages_exact(virt: region->virt_start, size); |
1365 | list_del(entry: ®ion->list); |
1366 | devm_kfree(dev: kdev->dev, p: region); |
1367 | } |
1368 | } |
1369 | |
1370 | static void __iomem *knav_queue_map_reg(struct knav_device *kdev, |
1371 | struct device_node *node, int index) |
1372 | { |
1373 | struct resource res; |
1374 | void __iomem *regs; |
1375 | int ret; |
1376 | |
1377 | ret = of_address_to_resource(dev: node, index, r: &res); |
1378 | if (ret) { |
1379 | dev_err(kdev->dev, "Can't translate of node(%pOFn) address for index(%d)\n" , |
1380 | node, index); |
1381 | return ERR_PTR(error: ret); |
1382 | } |
1383 | |
1384 | regs = devm_ioremap_resource(dev: kdev->dev, res: &res); |
1385 | if (IS_ERR(ptr: regs)) |
1386 | dev_err(kdev->dev, "Failed to map register base for index(%d) node(%pOFn)\n" , |
1387 | index, node); |
1388 | return regs; |
1389 | } |
1390 | |
1391 | static int knav_queue_init_qmgrs(struct knav_device *kdev, |
1392 | struct device_node *qmgrs) |
1393 | { |
1394 | struct device *dev = kdev->dev; |
1395 | struct knav_qmgr_info *qmgr; |
1396 | struct device_node *child; |
1397 | u32 temp[2]; |
1398 | int ret; |
1399 | |
1400 | for_each_child_of_node(qmgrs, child) { |
1401 | qmgr = devm_kzalloc(dev, size: sizeof(*qmgr), GFP_KERNEL); |
1402 | if (!qmgr) { |
1403 | of_node_put(node: child); |
1404 | dev_err(dev, "out of memory allocating qmgr\n" ); |
1405 | return -ENOMEM; |
1406 | } |
1407 | |
1408 | ret = of_property_read_u32_array(np: child, propname: "managed-queues" , |
1409 | out_values: temp, sz: 2); |
1410 | if (!ret) { |
1411 | qmgr->start_queue = temp[0]; |
1412 | qmgr->num_queues = temp[1]; |
1413 | } else { |
1414 | dev_err(dev, "invalid qmgr queue range\n" ); |
1415 | devm_kfree(dev, p: qmgr); |
1416 | continue; |
1417 | } |
1418 | |
1419 | dev_info(dev, "qmgr start queue %d, number of queues %d\n" , |
1420 | qmgr->start_queue, qmgr->num_queues); |
1421 | |
1422 | qmgr->reg_peek = |
1423 | knav_queue_map_reg(kdev, node: child, |
1424 | KNAV_QUEUE_PEEK_REG_INDEX); |
1425 | |
1426 | if (kdev->version == QMSS) { |
1427 | qmgr->reg_status = |
1428 | knav_queue_map_reg(kdev, node: child, |
1429 | KNAV_QUEUE_STATUS_REG_INDEX); |
1430 | } |
1431 | |
1432 | qmgr->reg_config = |
1433 | knav_queue_map_reg(kdev, node: child, |
1434 | index: (kdev->version == QMSS_66AK2G) ? |
1435 | KNAV_L_QUEUE_CONFIG_REG_INDEX : |
1436 | KNAV_QUEUE_CONFIG_REG_INDEX); |
1437 | qmgr->reg_region = |
1438 | knav_queue_map_reg(kdev, node: child, |
1439 | index: (kdev->version == QMSS_66AK2G) ? |
1440 | KNAV_L_QUEUE_REGION_REG_INDEX : |
1441 | KNAV_QUEUE_REGION_REG_INDEX); |
1442 | |
1443 | qmgr->reg_push = |
1444 | knav_queue_map_reg(kdev, node: child, |
1445 | index: (kdev->version == QMSS_66AK2G) ? |
1446 | KNAV_L_QUEUE_PUSH_REG_INDEX : |
1447 | KNAV_QUEUE_PUSH_REG_INDEX); |
1448 | |
1449 | if (kdev->version == QMSS) { |
1450 | qmgr->reg_pop = |
1451 | knav_queue_map_reg(kdev, node: child, |
1452 | KNAV_QUEUE_POP_REG_INDEX); |
1453 | } |
1454 | |
1455 | if (IS_ERR(ptr: qmgr->reg_peek) || |
1456 | ((kdev->version == QMSS) && |
1457 | (IS_ERR(ptr: qmgr->reg_status) || IS_ERR(ptr: qmgr->reg_pop))) || |
1458 | IS_ERR(ptr: qmgr->reg_config) || IS_ERR(ptr: qmgr->reg_region) || |
1459 | IS_ERR(ptr: qmgr->reg_push)) { |
1460 | dev_err(dev, "failed to map qmgr regs\n" ); |
1461 | if (kdev->version == QMSS) { |
1462 | if (!IS_ERR(ptr: qmgr->reg_status)) |
1463 | devm_iounmap(dev, addr: qmgr->reg_status); |
1464 | if (!IS_ERR(ptr: qmgr->reg_pop)) |
1465 | devm_iounmap(dev, addr: qmgr->reg_pop); |
1466 | } |
1467 | if (!IS_ERR(ptr: qmgr->reg_peek)) |
1468 | devm_iounmap(dev, addr: qmgr->reg_peek); |
1469 | if (!IS_ERR(ptr: qmgr->reg_config)) |
1470 | devm_iounmap(dev, addr: qmgr->reg_config); |
1471 | if (!IS_ERR(ptr: qmgr->reg_region)) |
1472 | devm_iounmap(dev, addr: qmgr->reg_region); |
1473 | if (!IS_ERR(ptr: qmgr->reg_push)) |
1474 | devm_iounmap(dev, addr: qmgr->reg_push); |
1475 | devm_kfree(dev, p: qmgr); |
1476 | continue; |
1477 | } |
1478 | |
1479 | /* Use same push register for pop as well */ |
1480 | if (kdev->version == QMSS_66AK2G) |
1481 | qmgr->reg_pop = qmgr->reg_push; |
1482 | |
1483 | list_add_tail(new: &qmgr->list, head: &kdev->qmgrs); |
1484 | dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n" , |
1485 | qmgr->start_queue, qmgr->num_queues, |
1486 | qmgr->reg_peek, qmgr->reg_status, |
1487 | qmgr->reg_config, qmgr->reg_region, |
1488 | qmgr->reg_push, qmgr->reg_pop); |
1489 | } |
1490 | return 0; |
1491 | } |
1492 | |
1493 | static int knav_queue_init_pdsps(struct knav_device *kdev, |
1494 | struct device_node *pdsps) |
1495 | { |
1496 | struct device *dev = kdev->dev; |
1497 | struct knav_pdsp_info *pdsp; |
1498 | struct device_node *child; |
1499 | |
1500 | for_each_child_of_node(pdsps, child) { |
1501 | pdsp = devm_kzalloc(dev, size: sizeof(*pdsp), GFP_KERNEL); |
1502 | if (!pdsp) { |
1503 | of_node_put(node: child); |
1504 | dev_err(dev, "out of memory allocating pdsp\n" ); |
1505 | return -ENOMEM; |
1506 | } |
1507 | pdsp->name = knav_queue_find_name(node: child); |
1508 | pdsp->iram = |
1509 | knav_queue_map_reg(kdev, node: child, |
1510 | KNAV_QUEUE_PDSP_IRAM_REG_INDEX); |
1511 | pdsp->regs = |
1512 | knav_queue_map_reg(kdev, node: child, |
1513 | KNAV_QUEUE_PDSP_REGS_REG_INDEX); |
1514 | pdsp->intd = |
1515 | knav_queue_map_reg(kdev, node: child, |
1516 | KNAV_QUEUE_PDSP_INTD_REG_INDEX); |
1517 | pdsp->command = |
1518 | knav_queue_map_reg(kdev, node: child, |
1519 | KNAV_QUEUE_PDSP_CMD_REG_INDEX); |
1520 | |
1521 | if (IS_ERR(ptr: pdsp->command) || IS_ERR(ptr: pdsp->iram) || |
1522 | IS_ERR(ptr: pdsp->regs) || IS_ERR(ptr: pdsp->intd)) { |
1523 | dev_err(dev, "failed to map pdsp %s regs\n" , |
1524 | pdsp->name); |
1525 | if (!IS_ERR(ptr: pdsp->command)) |
1526 | devm_iounmap(dev, addr: pdsp->command); |
1527 | if (!IS_ERR(ptr: pdsp->iram)) |
1528 | devm_iounmap(dev, addr: pdsp->iram); |
1529 | if (!IS_ERR(ptr: pdsp->regs)) |
1530 | devm_iounmap(dev, addr: pdsp->regs); |
1531 | if (!IS_ERR(ptr: pdsp->intd)) |
1532 | devm_iounmap(dev, addr: pdsp->intd); |
1533 | devm_kfree(dev, p: pdsp); |
1534 | continue; |
1535 | } |
1536 | of_property_read_u32(np: child, propname: "id" , out_value: &pdsp->id); |
1537 | list_add_tail(new: &pdsp->list, head: &kdev->pdsps); |
1538 | dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n" , |
1539 | pdsp->name, pdsp->command, pdsp->iram, pdsp->regs, |
1540 | pdsp->intd); |
1541 | } |
1542 | return 0; |
1543 | } |
1544 | |
1545 | static int knav_queue_stop_pdsp(struct knav_device *kdev, |
1546 | struct knav_pdsp_info *pdsp) |
1547 | { |
1548 | u32 val, timeout = 1000; |
1549 | int ret; |
1550 | |
1551 | val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE; |
1552 | writel_relaxed(val, &pdsp->regs->control); |
1553 | ret = knav_queue_pdsp_wait(addr: &pdsp->regs->control, timeout, |
1554 | PDSP_CTRL_RUNNING); |
1555 | if (ret < 0) { |
1556 | dev_err(kdev->dev, "timed out on pdsp %s stop\n" , pdsp->name); |
1557 | return ret; |
1558 | } |
1559 | pdsp->loaded = false; |
1560 | pdsp->started = false; |
1561 | return 0; |
1562 | } |
1563 | |
1564 | static int knav_queue_load_pdsp(struct knav_device *kdev, |
1565 | struct knav_pdsp_info *pdsp) |
1566 | { |
1567 | int i, ret, fwlen; |
1568 | const struct firmware *fw; |
1569 | bool found = false; |
1570 | u32 *fwdata; |
1571 | |
1572 | for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) { |
1573 | if (knav_acc_firmwares[i]) { |
1574 | ret = request_firmware_direct(fw: &fw, |
1575 | name: knav_acc_firmwares[i], |
1576 | device: kdev->dev); |
1577 | if (!ret) { |
1578 | found = true; |
1579 | break; |
1580 | } |
1581 | } |
1582 | } |
1583 | |
1584 | if (!found) { |
1585 | dev_err(kdev->dev, "failed to get firmware for pdsp\n" ); |
1586 | return -ENODEV; |
1587 | } |
1588 | |
1589 | dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n" , |
1590 | knav_acc_firmwares[i]); |
1591 | |
1592 | writel_relaxed(pdsp->id + 1, pdsp->command + 0x18); |
1593 | /* download the firmware */ |
1594 | fwdata = (u32 *)fw->data; |
1595 | fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32); |
1596 | for (i = 0; i < fwlen; i++) |
1597 | writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i); |
1598 | |
1599 | release_firmware(fw); |
1600 | return 0; |
1601 | } |
1602 | |
1603 | static int knav_queue_start_pdsp(struct knav_device *kdev, |
1604 | struct knav_pdsp_info *pdsp) |
1605 | { |
1606 | u32 val, timeout = 1000; |
1607 | int ret; |
1608 | |
1609 | /* write a command for sync */ |
1610 | writel_relaxed(0xffffffff, pdsp->command); |
1611 | while (readl_relaxed(pdsp->command) != 0xffffffff) |
1612 | cpu_relax(); |
1613 | |
1614 | /* soft reset the PDSP */ |
1615 | val = readl_relaxed(&pdsp->regs->control); |
1616 | val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET); |
1617 | writel_relaxed(val, &pdsp->regs->control); |
1618 | |
1619 | /* enable pdsp */ |
1620 | val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE; |
1621 | writel_relaxed(val, &pdsp->regs->control); |
1622 | |
1623 | /* wait for command register to clear */ |
1624 | ret = knav_queue_pdsp_wait(addr: pdsp->command, timeout, flags: 0); |
1625 | if (ret < 0) { |
1626 | dev_err(kdev->dev, |
1627 | "timed out on pdsp %s command register wait\n" , |
1628 | pdsp->name); |
1629 | return ret; |
1630 | } |
1631 | return 0; |
1632 | } |
1633 | |
1634 | static void knav_queue_stop_pdsps(struct knav_device *kdev) |
1635 | { |
1636 | struct knav_pdsp_info *pdsp; |
1637 | |
1638 | /* disable all pdsps */ |
1639 | for_each_pdsp(kdev, pdsp) |
1640 | knav_queue_stop_pdsp(kdev, pdsp); |
1641 | } |
1642 | |
1643 | static int knav_queue_start_pdsps(struct knav_device *kdev) |
1644 | { |
1645 | struct knav_pdsp_info *pdsp; |
1646 | int ret; |
1647 | |
1648 | knav_queue_stop_pdsps(kdev); |
1649 | /* now load them all. We return success even if pdsp |
1650 | * is not loaded as acc channels are optional on having |
1651 | * firmware availability in the system. We set the loaded |
1652 | * and stated flag and when initialize the acc range, check |
1653 | * it and init the range only if pdsp is started. |
1654 | */ |
1655 | for_each_pdsp(kdev, pdsp) { |
1656 | ret = knav_queue_load_pdsp(kdev, pdsp); |
1657 | if (!ret) |
1658 | pdsp->loaded = true; |
1659 | } |
1660 | |
1661 | for_each_pdsp(kdev, pdsp) { |
1662 | if (pdsp->loaded) { |
1663 | ret = knav_queue_start_pdsp(kdev, pdsp); |
1664 | if (!ret) |
1665 | pdsp->started = true; |
1666 | } |
1667 | } |
1668 | return 0; |
1669 | } |
1670 | |
1671 | static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id) |
1672 | { |
1673 | struct knav_qmgr_info *qmgr; |
1674 | |
1675 | for_each_qmgr(kdev, qmgr) { |
1676 | if ((id >= qmgr->start_queue) && |
1677 | (id < qmgr->start_queue + qmgr->num_queues)) |
1678 | return qmgr; |
1679 | } |
1680 | return NULL; |
1681 | } |
1682 | |
1683 | static int knav_queue_init_queue(struct knav_device *kdev, |
1684 | struct knav_range_info *range, |
1685 | struct knav_queue_inst *inst, |
1686 | unsigned id) |
1687 | { |
1688 | char irq_name[KNAV_NAME_SIZE]; |
1689 | inst->qmgr = knav_find_qmgr(id); |
1690 | if (!inst->qmgr) |
1691 | return -1; |
1692 | |
1693 | INIT_LIST_HEAD(list: &inst->handles); |
1694 | inst->kdev = kdev; |
1695 | inst->range = range; |
1696 | inst->irq_num = -1; |
1697 | inst->id = id; |
1698 | scnprintf(buf: irq_name, size: sizeof(irq_name), fmt: "hwqueue-%d" , id); |
1699 | inst->irq_name = kstrndup(s: irq_name, len: sizeof(irq_name), GFP_KERNEL); |
1700 | |
1701 | if (range->ops && range->ops->init_queue) |
1702 | return range->ops->init_queue(range, inst); |
1703 | else |
1704 | return 0; |
1705 | } |
1706 | |
1707 | static int knav_queue_init_queues(struct knav_device *kdev) |
1708 | { |
1709 | struct knav_range_info *range; |
1710 | int size, id, base_idx; |
1711 | int idx = 0, ret = 0; |
1712 | |
1713 | /* how much do we need for instance data? */ |
1714 | size = sizeof(struct knav_queue_inst); |
1715 | |
1716 | /* round this up to a power of 2, keep the index to instance |
1717 | * arithmetic fast. |
1718 | * */ |
1719 | kdev->inst_shift = order_base_2(size); |
1720 | size = (1 << kdev->inst_shift) * kdev->num_queues_in_use; |
1721 | kdev->instances = devm_kzalloc(dev: kdev->dev, size, GFP_KERNEL); |
1722 | if (!kdev->instances) |
1723 | return -ENOMEM; |
1724 | |
1725 | for_each_queue_range(kdev, range) { |
1726 | if (range->ops && range->ops->init_range) |
1727 | range->ops->init_range(range); |
1728 | base_idx = idx; |
1729 | for (id = range->queue_base; |
1730 | id < range->queue_base + range->num_queues; id++, idx++) { |
1731 | ret = knav_queue_init_queue(kdev, range, |
1732 | knav_queue_idx_to_inst(kdev, idx), id); |
1733 | if (ret < 0) |
1734 | return ret; |
1735 | } |
1736 | range->queue_base_inst = |
1737 | knav_queue_idx_to_inst(kdev, base_idx); |
1738 | } |
1739 | return 0; |
1740 | } |
1741 | |
1742 | /* Match table for of_platform binding */ |
1743 | static const struct of_device_id keystone_qmss_of_match[] = { |
1744 | { |
1745 | .compatible = "ti,keystone-navigator-qmss" , |
1746 | }, |
1747 | { |
1748 | .compatible = "ti,66ak2g-navss-qm" , |
1749 | .data = (void *)QMSS_66AK2G, |
1750 | }, |
1751 | {}, |
1752 | }; |
1753 | MODULE_DEVICE_TABLE(of, keystone_qmss_of_match); |
1754 | |
1755 | static int knav_queue_probe(struct platform_device *pdev) |
1756 | { |
1757 | struct device_node *node = pdev->dev.of_node; |
1758 | struct device_node *qmgrs, *queue_pools, *regions, *pdsps; |
1759 | struct device *dev = &pdev->dev; |
1760 | u32 temp[2]; |
1761 | int ret; |
1762 | |
1763 | if (!node) { |
1764 | dev_err(dev, "device tree info unavailable\n" ); |
1765 | return -ENODEV; |
1766 | } |
1767 | |
1768 | kdev = devm_kzalloc(dev, size: sizeof(struct knav_device), GFP_KERNEL); |
1769 | if (!kdev) { |
1770 | dev_err(dev, "memory allocation failed\n" ); |
1771 | return -ENOMEM; |
1772 | } |
1773 | |
1774 | if (device_get_match_data(dev)) |
1775 | kdev->version = QMSS_66AK2G; |
1776 | |
1777 | platform_set_drvdata(pdev, data: kdev); |
1778 | kdev->dev = dev; |
1779 | INIT_LIST_HEAD(list: &kdev->queue_ranges); |
1780 | INIT_LIST_HEAD(list: &kdev->qmgrs); |
1781 | INIT_LIST_HEAD(list: &kdev->pools); |
1782 | INIT_LIST_HEAD(list: &kdev->regions); |
1783 | INIT_LIST_HEAD(list: &kdev->pdsps); |
1784 | |
1785 | pm_runtime_enable(dev: &pdev->dev); |
1786 | ret = pm_runtime_resume_and_get(dev: &pdev->dev); |
1787 | if (ret < 0) { |
1788 | pm_runtime_disable(dev: &pdev->dev); |
1789 | dev_err(dev, "Failed to enable QMSS\n" ); |
1790 | return ret; |
1791 | } |
1792 | |
1793 | if (of_property_read_u32_array(np: node, propname: "queue-range" , out_values: temp, sz: 2)) { |
1794 | dev_err(dev, "queue-range not specified\n" ); |
1795 | ret = -ENODEV; |
1796 | goto err; |
1797 | } |
1798 | kdev->base_id = temp[0]; |
1799 | kdev->num_queues = temp[1]; |
1800 | |
1801 | /* Initialize queue managers using device tree configuration */ |
1802 | qmgrs = of_get_child_by_name(node, name: "qmgrs" ); |
1803 | if (!qmgrs) { |
1804 | dev_err(dev, "queue manager info not specified\n" ); |
1805 | ret = -ENODEV; |
1806 | goto err; |
1807 | } |
1808 | ret = knav_queue_init_qmgrs(kdev, qmgrs); |
1809 | of_node_put(node: qmgrs); |
1810 | if (ret) |
1811 | goto err; |
1812 | |
1813 | /* get pdsp configuration values from device tree */ |
1814 | pdsps = of_get_child_by_name(node, name: "pdsps" ); |
1815 | if (pdsps) { |
1816 | ret = knav_queue_init_pdsps(kdev, pdsps); |
1817 | if (ret) |
1818 | goto err; |
1819 | |
1820 | ret = knav_queue_start_pdsps(kdev); |
1821 | if (ret) |
1822 | goto err; |
1823 | } |
1824 | of_node_put(node: pdsps); |
1825 | |
1826 | /* get usable queue range values from device tree */ |
1827 | queue_pools = of_get_child_by_name(node, name: "queue-pools" ); |
1828 | if (!queue_pools) { |
1829 | dev_err(dev, "queue-pools not specified\n" ); |
1830 | ret = -ENODEV; |
1831 | goto err; |
1832 | } |
1833 | ret = knav_setup_queue_pools(kdev, queue_pools); |
1834 | of_node_put(node: queue_pools); |
1835 | if (ret) |
1836 | goto err; |
1837 | |
1838 | ret = knav_get_link_ram(kdev, name: "linkram0" , block: &kdev->link_rams[0]); |
1839 | if (ret) { |
1840 | dev_err(kdev->dev, "could not setup linking ram\n" ); |
1841 | goto err; |
1842 | } |
1843 | |
1844 | ret = knav_get_link_ram(kdev, name: "linkram1" , block: &kdev->link_rams[1]); |
1845 | if (ret) { |
1846 | /* |
1847 | * nothing really, we have one linking ram already, so we just |
1848 | * live within our means |
1849 | */ |
1850 | } |
1851 | |
1852 | ret = knav_queue_setup_link_ram(kdev); |
1853 | if (ret) |
1854 | goto err; |
1855 | |
1856 | regions = of_get_child_by_name(node, name: "descriptor-regions" ); |
1857 | if (!regions) { |
1858 | dev_err(dev, "descriptor-regions not specified\n" ); |
1859 | ret = -ENODEV; |
1860 | goto err; |
1861 | } |
1862 | ret = knav_queue_setup_regions(kdev, regions); |
1863 | of_node_put(node: regions); |
1864 | if (ret) |
1865 | goto err; |
1866 | |
1867 | ret = knav_queue_init_queues(kdev); |
1868 | if (ret < 0) { |
1869 | dev_err(dev, "hwqueue initialization failed\n" ); |
1870 | goto err; |
1871 | } |
1872 | |
1873 | debugfs_create_file(name: "qmss" , S_IFREG | S_IRUGO, NULL, NULL, |
1874 | fops: &knav_queue_debug_fops); |
1875 | device_ready = true; |
1876 | return 0; |
1877 | |
1878 | err: |
1879 | knav_queue_stop_pdsps(kdev); |
1880 | knav_queue_free_regions(kdev); |
1881 | knav_free_queue_ranges(kdev); |
1882 | pm_runtime_put_sync(dev: &pdev->dev); |
1883 | pm_runtime_disable(dev: &pdev->dev); |
1884 | return ret; |
1885 | } |
1886 | |
1887 | static void knav_queue_remove(struct platform_device *pdev) |
1888 | { |
1889 | /* TODO: Free resources */ |
1890 | pm_runtime_put_sync(dev: &pdev->dev); |
1891 | pm_runtime_disable(dev: &pdev->dev); |
1892 | } |
1893 | |
1894 | static struct platform_driver keystone_qmss_driver = { |
1895 | .probe = knav_queue_probe, |
1896 | .remove_new = knav_queue_remove, |
1897 | .driver = { |
1898 | .name = "keystone-navigator-qmss" , |
1899 | .of_match_table = keystone_qmss_of_match, |
1900 | }, |
1901 | }; |
1902 | module_platform_driver(keystone_qmss_driver); |
1903 | |
1904 | MODULE_LICENSE("GPL v2" ); |
1905 | MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs" ); |
1906 | MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>" ); |
1907 | MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>" ); |
1908 | |