1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Tegra host1x Syncpoints |
4 | * |
5 | * Copyright (c) 2010-2015, NVIDIA Corporation. |
6 | */ |
7 | |
8 | #include <linux/module.h> |
9 | #include <linux/device.h> |
10 | #include <linux/dma-fence.h> |
11 | #include <linux/slab.h> |
12 | |
13 | #include <trace/events/host1x.h> |
14 | |
15 | #include "syncpt.h" |
16 | #include "dev.h" |
17 | #include "intr.h" |
18 | #include "debug.h" |
19 | |
20 | #define SYNCPT_CHECK_PERIOD (2 * HZ) |
21 | #define MAX_STUCK_CHECK_COUNT 15 |
22 | |
23 | static struct host1x_syncpt_base * |
24 | host1x_syncpt_base_request(struct host1x *host) |
25 | { |
26 | struct host1x_syncpt_base *bases = host->bases; |
27 | unsigned int i; |
28 | |
29 | for (i = 0; i < host->info->nb_bases; i++) |
30 | if (!bases[i].requested) |
31 | break; |
32 | |
33 | if (i >= host->info->nb_bases) |
34 | return NULL; |
35 | |
36 | bases[i].requested = true; |
37 | return &bases[i]; |
38 | } |
39 | |
40 | static void host1x_syncpt_base_free(struct host1x_syncpt_base *base) |
41 | { |
42 | if (base) |
43 | base->requested = false; |
44 | } |
45 | |
46 | /** |
47 | * host1x_syncpt_alloc() - allocate a syncpoint |
48 | * @host: host1x device data |
49 | * @flags: bitfield of HOST1X_SYNCPT_* flags |
50 | * @name: name for the syncpoint for use in debug prints |
51 | * |
52 | * Allocates a hardware syncpoint for the caller's use. The caller then has |
53 | * the sole authority to mutate the syncpoint's value until it is freed again. |
54 | * |
55 | * If no free syncpoints are available, or a NULL name was specified, returns |
56 | * NULL. |
57 | */ |
58 | struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host, |
59 | unsigned long flags, |
60 | const char *name) |
61 | { |
62 | struct host1x_syncpt *sp = host->syncpt; |
63 | char *full_name; |
64 | unsigned int i; |
65 | |
66 | if (!name) |
67 | return NULL; |
68 | |
69 | mutex_lock(&host->syncpt_mutex); |
70 | |
71 | for (i = 0; i < host->info->nb_pts && kref_read(kref: &sp->ref); i++, sp++) |
72 | ; |
73 | |
74 | if (i >= host->info->nb_pts) |
75 | goto unlock; |
76 | |
77 | if (flags & HOST1X_SYNCPT_HAS_BASE) { |
78 | sp->base = host1x_syncpt_base_request(host); |
79 | if (!sp->base) |
80 | goto unlock; |
81 | } |
82 | |
83 | full_name = kasprintf(GFP_KERNEL, fmt: "%u-%s" , sp->id, name); |
84 | if (!full_name) |
85 | goto free_base; |
86 | |
87 | sp->name = full_name; |
88 | |
89 | if (flags & HOST1X_SYNCPT_CLIENT_MANAGED) |
90 | sp->client_managed = true; |
91 | else |
92 | sp->client_managed = false; |
93 | |
94 | kref_init(kref: &sp->ref); |
95 | |
96 | mutex_unlock(lock: &host->syncpt_mutex); |
97 | return sp; |
98 | |
99 | free_base: |
100 | host1x_syncpt_base_free(base: sp->base); |
101 | sp->base = NULL; |
102 | unlock: |
103 | mutex_unlock(lock: &host->syncpt_mutex); |
104 | return NULL; |
105 | } |
106 | EXPORT_SYMBOL(host1x_syncpt_alloc); |
107 | |
108 | /** |
109 | * host1x_syncpt_id() - retrieve syncpoint ID |
110 | * @sp: host1x syncpoint |
111 | * |
112 | * Given a pointer to a struct host1x_syncpt, retrieves its ID. This ID is |
113 | * often used as a value to program into registers that control how hardware |
114 | * blocks interact with syncpoints. |
115 | */ |
116 | u32 host1x_syncpt_id(struct host1x_syncpt *sp) |
117 | { |
118 | return sp->id; |
119 | } |
120 | EXPORT_SYMBOL(host1x_syncpt_id); |
121 | |
122 | /** |
123 | * host1x_syncpt_incr_max() - update the value sent to hardware |
124 | * @sp: host1x syncpoint |
125 | * @incrs: number of increments |
126 | */ |
127 | u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs) |
128 | { |
129 | return (u32)atomic_add_return(i: incrs, v: &sp->max_val); |
130 | } |
131 | EXPORT_SYMBOL(host1x_syncpt_incr_max); |
132 | |
133 | /* |
134 | * Write cached syncpoint and waitbase values to hardware. |
135 | */ |
136 | void host1x_syncpt_restore(struct host1x *host) |
137 | { |
138 | struct host1x_syncpt *sp_base = host->syncpt; |
139 | unsigned int i; |
140 | |
141 | for (i = 0; i < host1x_syncpt_nb_pts(host); i++) { |
142 | /* |
143 | * Unassign syncpt from channels for purposes of Tegra186 |
144 | * syncpoint protection. This prevents any channel from |
145 | * accessing it until it is reassigned. |
146 | */ |
147 | host1x_hw_syncpt_assign_to_channel(host, sp: sp_base + i, NULL); |
148 | host1x_hw_syncpt_restore(host, sp: sp_base + i); |
149 | } |
150 | |
151 | for (i = 0; i < host1x_syncpt_nb_bases(host); i++) |
152 | host1x_hw_syncpt_restore_wait_base(host, sp: sp_base + i); |
153 | |
154 | host1x_hw_syncpt_enable_protection(host); |
155 | |
156 | wmb(); |
157 | } |
158 | |
159 | /* |
160 | * Update the cached syncpoint and waitbase values by reading them |
161 | * from the registers. |
162 | */ |
163 | void host1x_syncpt_save(struct host1x *host) |
164 | { |
165 | struct host1x_syncpt *sp_base = host->syncpt; |
166 | unsigned int i; |
167 | |
168 | for (i = 0; i < host1x_syncpt_nb_pts(host); i++) { |
169 | if (host1x_syncpt_client_managed(sp: sp_base + i)) |
170 | host1x_hw_syncpt_load(host, sp: sp_base + i); |
171 | else |
172 | WARN_ON(!host1x_syncpt_idle(sp_base + i)); |
173 | } |
174 | |
175 | for (i = 0; i < host1x_syncpt_nb_bases(host); i++) |
176 | host1x_hw_syncpt_load_wait_base(host, sp: sp_base + i); |
177 | } |
178 | |
179 | /* |
180 | * Updates the cached syncpoint value by reading a new value from the hardware |
181 | * register |
182 | */ |
183 | u32 host1x_syncpt_load(struct host1x_syncpt *sp) |
184 | { |
185 | u32 val; |
186 | |
187 | val = host1x_hw_syncpt_load(host: sp->host, sp); |
188 | trace_host1x_syncpt_load_min(id: sp->id, val); |
189 | |
190 | return val; |
191 | } |
192 | |
193 | /* |
194 | * Get the current syncpoint base |
195 | */ |
196 | u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp) |
197 | { |
198 | host1x_hw_syncpt_load_wait_base(host: sp->host, sp); |
199 | |
200 | return sp->base_val; |
201 | } |
202 | |
203 | /** |
204 | * host1x_syncpt_incr() - increment syncpoint value from CPU, updating cache |
205 | * @sp: host1x syncpoint |
206 | */ |
207 | int host1x_syncpt_incr(struct host1x_syncpt *sp) |
208 | { |
209 | return host1x_hw_syncpt_cpu_incr(host: sp->host, sp); |
210 | } |
211 | EXPORT_SYMBOL(host1x_syncpt_incr); |
212 | |
213 | /** |
214 | * host1x_syncpt_wait() - wait for a syncpoint to reach a given value |
215 | * @sp: host1x syncpoint |
216 | * @thresh: threshold |
217 | * @timeout: maximum time to wait for the syncpoint to reach the given value |
218 | * @value: return location for the syncpoint value |
219 | */ |
220 | int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, |
221 | u32 *value) |
222 | { |
223 | struct dma_fence *fence; |
224 | long wait_err; |
225 | |
226 | host1x_hw_syncpt_load(host: sp->host, sp); |
227 | |
228 | if (value) |
229 | *value = host1x_syncpt_load(sp); |
230 | |
231 | if (host1x_syncpt_is_expired(sp, thresh)) |
232 | return 0; |
233 | |
234 | if (timeout < 0) |
235 | timeout = LONG_MAX; |
236 | else if (timeout == 0) |
237 | return -EAGAIN; |
238 | |
239 | fence = host1x_fence_create(sp, threshold: thresh, timeout: false); |
240 | if (IS_ERR(ptr: fence)) |
241 | return PTR_ERR(ptr: fence); |
242 | |
243 | wait_err = dma_fence_wait_timeout(fence, intr: true, timeout); |
244 | if (wait_err == 0) |
245 | host1x_fence_cancel(fence); |
246 | dma_fence_put(fence); |
247 | |
248 | if (value) |
249 | *value = host1x_syncpt_load(sp); |
250 | |
251 | /* |
252 | * Don't rely on dma_fence_wait_timeout return value, |
253 | * since it returns zero both on timeout and if the |
254 | * wait completed with 0 jiffies left. |
255 | */ |
256 | host1x_hw_syncpt_load(host: sp->host, sp); |
257 | if (wait_err == 0 && !host1x_syncpt_is_expired(sp, thresh)) |
258 | return -EAGAIN; |
259 | else if (wait_err < 0) |
260 | return wait_err; |
261 | else |
262 | return 0; |
263 | } |
264 | EXPORT_SYMBOL(host1x_syncpt_wait); |
265 | |
266 | /* |
267 | * Returns true if syncpoint is expired, false if we may need to wait |
268 | */ |
269 | bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh) |
270 | { |
271 | u32 current_val; |
272 | |
273 | smp_rmb(); |
274 | |
275 | current_val = (u32)atomic_read(v: &sp->min_val); |
276 | |
277 | return ((current_val - thresh) & 0x80000000U) == 0U; |
278 | } |
279 | |
280 | int host1x_syncpt_init(struct host1x *host) |
281 | { |
282 | struct host1x_syncpt_base *bases; |
283 | struct host1x_syncpt *syncpt; |
284 | unsigned int i; |
285 | |
286 | syncpt = devm_kcalloc(dev: host->dev, n: host->info->nb_pts, size: sizeof(*syncpt), |
287 | GFP_KERNEL); |
288 | if (!syncpt) |
289 | return -ENOMEM; |
290 | |
291 | bases = devm_kcalloc(dev: host->dev, n: host->info->nb_bases, size: sizeof(*bases), |
292 | GFP_KERNEL); |
293 | if (!bases) |
294 | return -ENOMEM; |
295 | |
296 | for (i = 0; i < host->info->nb_pts; i++) { |
297 | syncpt[i].id = i; |
298 | syncpt[i].host = host; |
299 | } |
300 | |
301 | for (i = 0; i < host->info->nb_bases; i++) |
302 | bases[i].id = i; |
303 | |
304 | mutex_init(&host->syncpt_mutex); |
305 | host->syncpt = syncpt; |
306 | host->bases = bases; |
307 | |
308 | /* Allocate sync point to use for clearing waits for expired fences */ |
309 | host->nop_sp = host1x_syncpt_alloc(host, 0, "reserved-nop" ); |
310 | if (!host->nop_sp) |
311 | return -ENOMEM; |
312 | |
313 | if (host->info->reserve_vblank_syncpts) { |
314 | kref_init(kref: &host->syncpt[26].ref); |
315 | kref_init(kref: &host->syncpt[27].ref); |
316 | } |
317 | |
318 | return 0; |
319 | } |
320 | |
321 | /** |
322 | * host1x_syncpt_request() - request a syncpoint |
323 | * @client: client requesting the syncpoint |
324 | * @flags: flags |
325 | * |
326 | * host1x client drivers can use this function to allocate a syncpoint for |
327 | * subsequent use. A syncpoint returned by this function will be reserved for |
328 | * use by the client exclusively. When no longer using a syncpoint, a host1x |
329 | * client driver needs to release it using host1x_syncpt_put(). |
330 | */ |
331 | struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client, |
332 | unsigned long flags) |
333 | { |
334 | struct host1x *host = dev_get_drvdata(dev: client->host->parent); |
335 | |
336 | return host1x_syncpt_alloc(host, flags, dev_name(dev: client->dev)); |
337 | } |
338 | EXPORT_SYMBOL(host1x_syncpt_request); |
339 | |
340 | static void syncpt_release(struct kref *ref) |
341 | { |
342 | struct host1x_syncpt *sp = container_of(ref, struct host1x_syncpt, ref); |
343 | |
344 | atomic_set(v: &sp->max_val, i: host1x_syncpt_read(sp)); |
345 | |
346 | sp->locked = false; |
347 | |
348 | mutex_lock(&sp->host->syncpt_mutex); |
349 | |
350 | host1x_syncpt_base_free(base: sp->base); |
351 | kfree(objp: sp->name); |
352 | sp->base = NULL; |
353 | sp->name = NULL; |
354 | sp->client_managed = false; |
355 | |
356 | mutex_unlock(lock: &sp->host->syncpt_mutex); |
357 | } |
358 | |
359 | /** |
360 | * host1x_syncpt_put() - free a requested syncpoint |
361 | * @sp: host1x syncpoint |
362 | * |
363 | * Release a syncpoint previously allocated using host1x_syncpt_request(). A |
364 | * host1x client driver should call this when the syncpoint is no longer in |
365 | * use. |
366 | */ |
367 | void host1x_syncpt_put(struct host1x_syncpt *sp) |
368 | { |
369 | if (!sp) |
370 | return; |
371 | |
372 | kref_put(kref: &sp->ref, release: syncpt_release); |
373 | } |
374 | EXPORT_SYMBOL(host1x_syncpt_put); |
375 | |
376 | void host1x_syncpt_deinit(struct host1x *host) |
377 | { |
378 | struct host1x_syncpt *sp = host->syncpt; |
379 | unsigned int i; |
380 | |
381 | for (i = 0; i < host->info->nb_pts; i++, sp++) |
382 | kfree(objp: sp->name); |
383 | } |
384 | |
385 | /** |
386 | * host1x_syncpt_read_max() - read maximum syncpoint value |
387 | * @sp: host1x syncpoint |
388 | * |
389 | * The maximum syncpoint value indicates how many operations there are in |
390 | * queue, either in channel or in a software thread. |
391 | */ |
392 | u32 host1x_syncpt_read_max(struct host1x_syncpt *sp) |
393 | { |
394 | smp_rmb(); |
395 | |
396 | return (u32)atomic_read(v: &sp->max_val); |
397 | } |
398 | EXPORT_SYMBOL(host1x_syncpt_read_max); |
399 | |
400 | /** |
401 | * host1x_syncpt_read_min() - read minimum syncpoint value |
402 | * @sp: host1x syncpoint |
403 | * |
404 | * The minimum syncpoint value is a shadow of the current sync point value in |
405 | * hardware. |
406 | */ |
407 | u32 host1x_syncpt_read_min(struct host1x_syncpt *sp) |
408 | { |
409 | smp_rmb(); |
410 | |
411 | return (u32)atomic_read(v: &sp->min_val); |
412 | } |
413 | EXPORT_SYMBOL(host1x_syncpt_read_min); |
414 | |
415 | /** |
416 | * host1x_syncpt_read() - read the current syncpoint value |
417 | * @sp: host1x syncpoint |
418 | */ |
419 | u32 host1x_syncpt_read(struct host1x_syncpt *sp) |
420 | { |
421 | return host1x_syncpt_load(sp); |
422 | } |
423 | EXPORT_SYMBOL(host1x_syncpt_read); |
424 | |
425 | unsigned int host1x_syncpt_nb_pts(struct host1x *host) |
426 | { |
427 | return host->info->nb_pts; |
428 | } |
429 | |
430 | unsigned int host1x_syncpt_nb_bases(struct host1x *host) |
431 | { |
432 | return host->info->nb_bases; |
433 | } |
434 | |
435 | unsigned int host1x_syncpt_nb_mlocks(struct host1x *host) |
436 | { |
437 | return host->info->nb_mlocks; |
438 | } |
439 | |
440 | /** |
441 | * host1x_syncpt_get_by_id() - obtain a syncpoint by ID |
442 | * @host: host1x controller |
443 | * @id: syncpoint ID |
444 | */ |
445 | struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, |
446 | unsigned int id) |
447 | { |
448 | if (id >= host->info->nb_pts) |
449 | return NULL; |
450 | |
451 | if (kref_get_unless_zero(kref: &host->syncpt[id].ref)) |
452 | return &host->syncpt[id]; |
453 | else |
454 | return NULL; |
455 | } |
456 | EXPORT_SYMBOL(host1x_syncpt_get_by_id); |
457 | |
458 | /** |
459 | * host1x_syncpt_get_by_id_noref() - obtain a syncpoint by ID but don't |
460 | * increase the refcount. |
461 | * @host: host1x controller |
462 | * @id: syncpoint ID |
463 | */ |
464 | struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, |
465 | unsigned int id) |
466 | { |
467 | if (id >= host->info->nb_pts) |
468 | return NULL; |
469 | |
470 | return &host->syncpt[id]; |
471 | } |
472 | EXPORT_SYMBOL(host1x_syncpt_get_by_id_noref); |
473 | |
474 | /** |
475 | * host1x_syncpt_get() - increment syncpoint refcount |
476 | * @sp: syncpoint |
477 | */ |
478 | struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp) |
479 | { |
480 | kref_get(kref: &sp->ref); |
481 | |
482 | return sp; |
483 | } |
484 | EXPORT_SYMBOL(host1x_syncpt_get); |
485 | |
486 | /** |
487 | * host1x_syncpt_get_base() - obtain the wait base associated with a syncpoint |
488 | * @sp: host1x syncpoint |
489 | */ |
490 | struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp) |
491 | { |
492 | return sp ? sp->base : NULL; |
493 | } |
494 | EXPORT_SYMBOL(host1x_syncpt_get_base); |
495 | |
496 | /** |
497 | * host1x_syncpt_base_id() - retrieve the ID of a syncpoint wait base |
498 | * @base: host1x syncpoint wait base |
499 | */ |
500 | u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base) |
501 | { |
502 | return base->id; |
503 | } |
504 | EXPORT_SYMBOL(host1x_syncpt_base_id); |
505 | |
506 | static void do_nothing(struct kref *ref) |
507 | { |
508 | } |
509 | |
510 | /** |
511 | * host1x_syncpt_release_vblank_reservation() - Make VBLANK syncpoint |
512 | * available for allocation |
513 | * |
514 | * @client: host1x bus client |
515 | * @syncpt_id: syncpoint ID to make available |
516 | * |
517 | * Makes VBLANK<i> syncpoint available for allocatation if it was |
518 | * reserved at initialization time. This should be called by the display |
519 | * driver after it has ensured that any VBLANK increment programming configured |
520 | * by the boot chain has been disabled. |
521 | */ |
522 | void host1x_syncpt_release_vblank_reservation(struct host1x_client *client, |
523 | u32 syncpt_id) |
524 | { |
525 | struct host1x *host = dev_get_drvdata(dev: client->host->parent); |
526 | |
527 | if (!host->info->reserve_vblank_syncpts) |
528 | return; |
529 | |
530 | kref_put(kref: &host->syncpt[syncpt_id].ref, release: do_nothing); |
531 | } |
532 | EXPORT_SYMBOL(host1x_syncpt_release_vblank_reservation); |
533 | |