1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
4 * Author: Liviu Dudau <Liviu.Dudau@arm.com>
5 *
6 * ARM Mali DP500/DP550/DP650 driver (crtc operations)
7 */
8
9#include <linux/clk.h>
10#include <linux/pm_runtime.h>
11
12#include <video/videomode.h>
13
14#include <drm/drm_atomic.h>
15#include <drm/drm_atomic_helper.h>
16#include <drm/drm_crtc.h>
17#include <drm/drm_framebuffer.h>
18#include <drm/drm_print.h>
19#include <drm/drm_probe_helper.h>
20#include <drm/drm_vblank.h>
21
22#include "malidp_drv.h"
23#include "malidp_hw.h"
24
25static enum drm_mode_status malidp_crtc_mode_valid(struct drm_crtc *crtc,
26 const struct drm_display_mode *mode)
27{
28 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
29 struct malidp_hw_device *hwdev = malidp->dev;
30
31 /*
32 * check that the hardware can drive the required clock rate,
33 * but skip the check if the clock is meant to be disabled (req_rate = 0)
34 */
35 long rate, req_rate = mode->crtc_clock * 1000;
36
37 if (req_rate) {
38 rate = clk_round_rate(clk: hwdev->pxlclk, rate: req_rate);
39 if (rate != req_rate) {
40 DRM_DEBUG_DRIVER("pxlclk doesn't support %ld Hz\n",
41 req_rate);
42 return MODE_NOCLOCK;
43 }
44 }
45
46 return MODE_OK;
47}
48
49static void malidp_crtc_atomic_enable(struct drm_crtc *crtc,
50 struct drm_atomic_state *state)
51{
52 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
53 struct malidp_hw_device *hwdev = malidp->dev;
54 struct videomode vm;
55 int err = pm_runtime_get_sync(dev: crtc->dev->dev);
56
57 if (err < 0) {
58 DRM_DEBUG_DRIVER("Failed to enable runtime power management: %d\n", err);
59 return;
60 }
61
62 drm_display_mode_to_videomode(dmode: &crtc->state->adjusted_mode, vm: &vm);
63 clk_prepare_enable(clk: hwdev->pxlclk);
64
65 /* We rely on firmware to set mclk to a sensible level. */
66 clk_set_rate(clk: hwdev->pxlclk, rate: crtc->state->adjusted_mode.crtc_clock * 1000);
67
68 hwdev->hw->modeset(hwdev, &vm);
69 hwdev->hw->leave_config_mode(hwdev);
70 drm_crtc_vblank_on(crtc);
71}
72
73static void malidp_crtc_atomic_disable(struct drm_crtc *crtc,
74 struct drm_atomic_state *state)
75{
76 struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
77 crtc);
78 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
79 struct malidp_hw_device *hwdev = malidp->dev;
80 int err;
81
82 /* always disable planes on the CRTC that is being turned off */
83 drm_atomic_helper_disable_planes_on_crtc(old_crtc_state: old_state, atomic: false);
84
85 drm_crtc_vblank_off(crtc);
86 hwdev->hw->enter_config_mode(hwdev);
87
88 clk_disable_unprepare(clk: hwdev->pxlclk);
89
90 err = pm_runtime_put(dev: crtc->dev->dev);
91 if (err < 0) {
92 DRM_DEBUG_DRIVER("Failed to disable runtime power management: %d\n", err);
93 }
94}
95
96static const struct gamma_curve_segment {
97 u16 start;
98 u16 end;
99} segments[MALIDP_COEFFTAB_NUM_COEFFS] = {
100 /* sector 0 */
101 { 0, 0 }, { 1, 1 }, { 2, 2 }, { 3, 3 },
102 { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 },
103 { 8, 8 }, { 9, 9 }, { 10, 10 }, { 11, 11 },
104 { 12, 12 }, { 13, 13 }, { 14, 14 }, { 15, 15 },
105 /* sector 1 */
106 { 16, 19 }, { 20, 23 }, { 24, 27 }, { 28, 31 },
107 /* sector 2 */
108 { 32, 39 }, { 40, 47 }, { 48, 55 }, { 56, 63 },
109 /* sector 3 */
110 { 64, 79 }, { 80, 95 }, { 96, 111 }, { 112, 127 },
111 /* sector 4 */
112 { 128, 159 }, { 160, 191 }, { 192, 223 }, { 224, 255 },
113 /* sector 5 */
114 { 256, 319 }, { 320, 383 }, { 384, 447 }, { 448, 511 },
115 /* sector 6 */
116 { 512, 639 }, { 640, 767 }, { 768, 895 }, { 896, 1023 },
117 { 1024, 1151 }, { 1152, 1279 }, { 1280, 1407 }, { 1408, 1535 },
118 { 1536, 1663 }, { 1664, 1791 }, { 1792, 1919 }, { 1920, 2047 },
119 { 2048, 2175 }, { 2176, 2303 }, { 2304, 2431 }, { 2432, 2559 },
120 { 2560, 2687 }, { 2688, 2815 }, { 2816, 2943 }, { 2944, 3071 },
121 { 3072, 3199 }, { 3200, 3327 }, { 3328, 3455 }, { 3456, 3583 },
122 { 3584, 3711 }, { 3712, 3839 }, { 3840, 3967 }, { 3968, 4095 },
123};
124
125#define DE_COEFTAB_DATA(a, b) ((((a) & 0xfff) << 16) | (((b) & 0xfff)))
126
127static void malidp_generate_gamma_table(struct drm_property_blob *lut_blob,
128 u32 coeffs[MALIDP_COEFFTAB_NUM_COEFFS])
129{
130 struct drm_color_lut *lut = (struct drm_color_lut *)lut_blob->data;
131 int i;
132
133 for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i) {
134 u32 a, b, delta_in, out_start, out_end;
135
136 delta_in = segments[i].end - segments[i].start;
137 /* DP has 12-bit internal precision for its LUTs. */
138 out_start = drm_color_lut_extract(user_input: lut[segments[i].start].green,
139 bit_precision: 12);
140 out_end = drm_color_lut_extract(user_input: lut[segments[i].end].green, bit_precision: 12);
141 a = (delta_in == 0) ? 0 : ((out_end - out_start) * 256) / delta_in;
142 b = out_start;
143 coeffs[i] = DE_COEFTAB_DATA(a, b);
144 }
145}
146
147/*
148 * Check if there is a new gamma LUT and if it is of an acceptable size. Also,
149 * reject any LUTs that use distinct red, green, and blue curves.
150 */
151static int malidp_crtc_atomic_check_gamma(struct drm_crtc *crtc,
152 struct drm_crtc_state *state)
153{
154 struct malidp_crtc_state *mc = to_malidp_crtc_state(state);
155 struct drm_color_lut *lut;
156 size_t lut_size;
157 int i;
158
159 if (!state->color_mgmt_changed || !state->gamma_lut)
160 return 0;
161
162 if (crtc->state->gamma_lut &&
163 (crtc->state->gamma_lut->base.id == state->gamma_lut->base.id))
164 return 0;
165
166 if (state->gamma_lut->length % sizeof(struct drm_color_lut))
167 return -EINVAL;
168
169 lut_size = state->gamma_lut->length / sizeof(struct drm_color_lut);
170 if (lut_size != MALIDP_GAMMA_LUT_SIZE)
171 return -EINVAL;
172
173 lut = (struct drm_color_lut *)state->gamma_lut->data;
174 for (i = 0; i < lut_size; ++i)
175 if (!((lut[i].red == lut[i].green) &&
176 (lut[i].red == lut[i].blue)))
177 return -EINVAL;
178
179 if (!state->mode_changed) {
180 int ret;
181
182 state->mode_changed = true;
183 /*
184 * Kerneldoc for drm_atomic_helper_check_modeset mandates that
185 * it be invoked when the driver sets ->mode_changed. Since
186 * changing the gamma LUT doesn't depend on any external
187 * resources, it is safe to call it only once.
188 */
189 ret = drm_atomic_helper_check_modeset(dev: crtc->dev, state: state->state);
190 if (ret)
191 return ret;
192 }
193
194 malidp_generate_gamma_table(lut_blob: state->gamma_lut, coeffs: mc->gamma_coeffs);
195 return 0;
196}
197
198/*
199 * Check if there is a new CTM and if it contains valid input. Valid here means
200 * that the number is inside the representable range for a Q3.12 number,
201 * excluding truncating the fractional part of the input data.
202 *
203 * The COLORADJ registers can be changed atomically.
204 */
205static int malidp_crtc_atomic_check_ctm(struct drm_crtc *crtc,
206 struct drm_crtc_state *state)
207{
208 struct malidp_crtc_state *mc = to_malidp_crtc_state(state);
209 struct drm_color_ctm *ctm;
210 int i;
211
212 if (!state->color_mgmt_changed)
213 return 0;
214
215 if (!state->ctm)
216 return 0;
217
218 if (crtc->state->ctm && (crtc->state->ctm->base.id ==
219 state->ctm->base.id))
220 return 0;
221
222 /*
223 * The size of the ctm is checked in
224 * drm_property_replace_blob_from_id.
225 */
226 ctm = (struct drm_color_ctm *)state->ctm->data;
227 for (i = 0; i < ARRAY_SIZE(ctm->matrix); ++i) {
228 /* Convert from S31.32 to Q3.12. */
229 s64 val = ctm->matrix[i];
230 u32 mag = ((((u64)val) & ~BIT_ULL(63)) >> 20) &
231 GENMASK_ULL(14, 0);
232
233 /*
234 * Convert to 2s complement and check the destination's top bit
235 * for overflow. NB: Can't check before converting or it'd
236 * incorrectly reject the case:
237 * sign == 1
238 * mag == 0x2000
239 */
240 if (val & BIT_ULL(63))
241 mag = ~mag + 1;
242 if (!!(val & BIT_ULL(63)) != !!(mag & BIT(14)))
243 return -EINVAL;
244 mc->coloradj_coeffs[i] = mag;
245 }
246
247 return 0;
248}
249
250static int malidp_crtc_atomic_check_scaling(struct drm_crtc *crtc,
251 struct drm_crtc_state *state)
252{
253 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
254 struct malidp_hw_device *hwdev = malidp->dev;
255 struct malidp_crtc_state *cs = to_malidp_crtc_state(state);
256 struct malidp_se_config *s = &cs->scaler_config;
257 struct drm_plane *plane;
258 struct videomode vm;
259 const struct drm_plane_state *pstate;
260 u32 h_upscale_factor = 0; /* U16.16 */
261 u32 v_upscale_factor = 0; /* U16.16 */
262 u8 scaling = cs->scaled_planes_mask;
263 int ret;
264
265 if (!scaling) {
266 s->scale_enable = false;
267 goto mclk_calc;
268 }
269
270 /* The scaling engine can only handle one plane at a time. */
271 if (scaling & (scaling - 1))
272 return -EINVAL;
273
274 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
275 struct malidp_plane *mp = to_malidp_plane(plane);
276 u32 phase;
277
278 if (!(mp->layer->id & scaling))
279 continue;
280
281 /*
282 * Convert crtc_[w|h] to U32.32, then divide by U16.16 src_[w|h]
283 * to get the U16.16 result.
284 */
285 h_upscale_factor = div_u64(dividend: (u64)pstate->crtc_w << 32,
286 divisor: pstate->src_w);
287 v_upscale_factor = div_u64(dividend: (u64)pstate->crtc_h << 32,
288 divisor: pstate->src_h);
289
290 s->enhancer_enable = ((h_upscale_factor >> 16) >= 2 ||
291 (v_upscale_factor >> 16) >= 2);
292
293 if (pstate->rotation & MALIDP_ROTATED_MASK) {
294 s->input_w = pstate->src_h >> 16;
295 s->input_h = pstate->src_w >> 16;
296 } else {
297 s->input_w = pstate->src_w >> 16;
298 s->input_h = pstate->src_h >> 16;
299 }
300
301 s->output_w = pstate->crtc_w;
302 s->output_h = pstate->crtc_h;
303
304#define SE_N_PHASE 4
305#define SE_SHIFT_N_PHASE 12
306 /* Calculate initial_phase and delta_phase for horizontal. */
307 phase = s->input_w;
308 s->h_init_phase =
309 ((phase << SE_N_PHASE) / s->output_w + 1) / 2;
310
311 phase = s->input_w;
312 phase <<= (SE_SHIFT_N_PHASE + SE_N_PHASE);
313 s->h_delta_phase = phase / s->output_w;
314
315 /* Same for vertical. */
316 phase = s->input_h;
317 s->v_init_phase =
318 ((phase << SE_N_PHASE) / s->output_h + 1) / 2;
319
320 phase = s->input_h;
321 phase <<= (SE_SHIFT_N_PHASE + SE_N_PHASE);
322 s->v_delta_phase = phase / s->output_h;
323#undef SE_N_PHASE
324#undef SE_SHIFT_N_PHASE
325 s->plane_src_id = mp->layer->id;
326 }
327
328 s->scale_enable = true;
329 s->hcoeff = malidp_se_select_coeffs(upscale_factor: h_upscale_factor);
330 s->vcoeff = malidp_se_select_coeffs(upscale_factor: v_upscale_factor);
331
332mclk_calc:
333 drm_display_mode_to_videomode(dmode: &state->adjusted_mode, vm: &vm);
334 ret = hwdev->hw->se_calc_mclk(hwdev, s, &vm);
335 if (ret < 0)
336 return -EINVAL;
337 return 0;
338}
339
340static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
341 struct drm_atomic_state *state)
342{
343 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
344 crtc);
345 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
346 struct malidp_hw_device *hwdev = malidp->dev;
347 struct drm_plane *plane;
348 const struct drm_plane_state *pstate;
349 u32 rot_mem_free, rot_mem_usable;
350 int rotated_planes = 0;
351 int ret;
352
353 /*
354 * check if there is enough rotation memory available for planes
355 * that need 90° and 270° rotion or planes that are compressed.
356 * Each plane has set its required memory size in the ->plane_check()
357 * callback, here we only make sure that the sums are less that the
358 * total usable memory.
359 *
360 * The rotation memory allocation algorithm (for each plane):
361 * a. If no more rotated or compressed planes exist, all remaining
362 * rotate memory in the bank is available for use by the plane.
363 * b. If other rotated or compressed planes exist, and plane's
364 * layer ID is DE_VIDEO1, it can use all the memory from first bank
365 * if secondary rotation memory bank is available, otherwise it can
366 * use up to half the bank's memory.
367 * c. If other rotated or compressed planes exist, and plane's layer ID
368 * is not DE_VIDEO1, it can use half of the available memory.
369 *
370 * Note: this algorithm assumes that the order in which the planes are
371 * checked always has DE_VIDEO1 plane first in the list if it is
372 * rotated. Because that is how we create the planes in the first
373 * place, under current DRM version things work, but if ever the order
374 * in which drm_atomic_crtc_state_for_each_plane() iterates over planes
375 * changes, we need to pre-sort the planes before validation.
376 */
377
378 /* first count the number of rotated planes */
379 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
380 struct drm_framebuffer *fb = pstate->fb;
381
382 if ((pstate->rotation & MALIDP_ROTATED_MASK) || fb->modifier)
383 rotated_planes++;
384 }
385
386 rot_mem_free = hwdev->rotation_memory[0];
387 /*
388 * if we have more than 1 plane using rotation memory, use the second
389 * block of rotation memory as well
390 */
391 if (rotated_planes > 1)
392 rot_mem_free += hwdev->rotation_memory[1];
393
394 /* now validate the rotation memory requirements */
395 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
396 struct malidp_plane *mp = to_malidp_plane(plane);
397 struct malidp_plane_state *ms = to_malidp_plane_state(pstate);
398 struct drm_framebuffer *fb = pstate->fb;
399
400 if ((pstate->rotation & MALIDP_ROTATED_MASK) || fb->modifier) {
401 /* process current plane */
402 rotated_planes--;
403
404 if (!rotated_planes) {
405 /* no more rotated planes, we can use what's left */
406 rot_mem_usable = rot_mem_free;
407 } else {
408 if ((mp->layer->id != DE_VIDEO1) ||
409 (hwdev->rotation_memory[1] == 0))
410 rot_mem_usable = rot_mem_free / 2;
411 else
412 rot_mem_usable = hwdev->rotation_memory[0];
413 }
414
415 rot_mem_free -= rot_mem_usable;
416
417 if (ms->rotmem_size > rot_mem_usable)
418 return -EINVAL;
419 }
420 }
421
422 /* If only the writeback routing has changed, we don't need a modeset */
423 if (crtc_state->connectors_changed) {
424 u32 old_mask = crtc->state->connector_mask;
425 u32 new_mask = crtc_state->connector_mask;
426
427 if ((old_mask ^ new_mask) ==
428 (1 << drm_connector_index(connector: &malidp->mw_connector.base)))
429 crtc_state->connectors_changed = false;
430 }
431
432 ret = malidp_crtc_atomic_check_gamma(crtc, state: crtc_state);
433 ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, state: crtc_state);
434 ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, state: crtc_state);
435
436 return ret;
437}
438
439static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = {
440 .mode_valid = malidp_crtc_mode_valid,
441 .atomic_check = malidp_crtc_atomic_check,
442 .atomic_enable = malidp_crtc_atomic_enable,
443 .atomic_disable = malidp_crtc_atomic_disable,
444};
445
446static struct drm_crtc_state *malidp_crtc_duplicate_state(struct drm_crtc *crtc)
447{
448 struct malidp_crtc_state *state, *old_state;
449
450 if (WARN_ON(!crtc->state))
451 return NULL;
452
453 old_state = to_malidp_crtc_state(crtc->state);
454 state = kmalloc(size: sizeof(*state), GFP_KERNEL);
455 if (!state)
456 return NULL;
457
458 __drm_atomic_helper_crtc_duplicate_state(crtc, state: &state->base);
459 memcpy(state->gamma_coeffs, old_state->gamma_coeffs,
460 sizeof(state->gamma_coeffs));
461 memcpy(state->coloradj_coeffs, old_state->coloradj_coeffs,
462 sizeof(state->coloradj_coeffs));
463 memcpy(&state->scaler_config, &old_state->scaler_config,
464 sizeof(state->scaler_config));
465 state->scaled_planes_mask = 0;
466
467 return &state->base;
468}
469
470static void malidp_crtc_destroy_state(struct drm_crtc *crtc,
471 struct drm_crtc_state *state)
472{
473 struct malidp_crtc_state *mali_state = NULL;
474
475 if (state) {
476 mali_state = to_malidp_crtc_state(state);
477 __drm_atomic_helper_crtc_destroy_state(state);
478 }
479
480 kfree(objp: mali_state);
481}
482
483static void malidp_crtc_reset(struct drm_crtc *crtc)
484{
485 struct malidp_crtc_state *state =
486 kzalloc(size: sizeof(*state), GFP_KERNEL);
487
488 if (crtc->state)
489 malidp_crtc_destroy_state(crtc, state: crtc->state);
490
491 if (state)
492 __drm_atomic_helper_crtc_reset(crtc, state: &state->base);
493 else
494 __drm_atomic_helper_crtc_reset(crtc, NULL);
495}
496
497static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
498{
499 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
500 struct malidp_hw_device *hwdev = malidp->dev;
501
502 malidp_hw_enable_irq(hwdev, block: MALIDP_DE_BLOCK,
503 irq: hwdev->hw->map.de_irq_map.vsync_irq);
504 return 0;
505}
506
507static void malidp_crtc_disable_vblank(struct drm_crtc *crtc)
508{
509 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
510 struct malidp_hw_device *hwdev = malidp->dev;
511
512 malidp_hw_disable_irq(hwdev, block: MALIDP_DE_BLOCK,
513 irq: hwdev->hw->map.de_irq_map.vsync_irq);
514}
515
516static const struct drm_crtc_funcs malidp_crtc_funcs = {
517 .set_config = drm_atomic_helper_set_config,
518 .page_flip = drm_atomic_helper_page_flip,
519 .reset = malidp_crtc_reset,
520 .atomic_duplicate_state = malidp_crtc_duplicate_state,
521 .atomic_destroy_state = malidp_crtc_destroy_state,
522 .enable_vblank = malidp_crtc_enable_vblank,
523 .disable_vblank = malidp_crtc_disable_vblank,
524};
525
526int malidp_crtc_init(struct drm_device *drm)
527{
528 struct malidp_drm *malidp = drm_to_malidp(drm);
529 struct drm_plane *primary = NULL, *plane;
530 int ret;
531
532 ret = malidp_de_planes_init(drm);
533 if (ret < 0) {
534 DRM_ERROR("Failed to initialise planes\n");
535 return ret;
536 }
537
538 drm_for_each_plane(plane, drm) {
539 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
540 primary = plane;
541 break;
542 }
543 }
544
545 if (!primary) {
546 DRM_ERROR("no primary plane found\n");
547 return -EINVAL;
548 }
549
550 ret = drmm_crtc_init_with_planes(dev: drm, crtc: &malidp->crtc, primary, NULL,
551 funcs: &malidp_crtc_funcs, NULL);
552 if (ret)
553 return ret;
554
555 drm_crtc_helper_add(crtc: &malidp->crtc, funcs: &malidp_crtc_helper_funcs);
556 drm_mode_crtc_set_gamma_size(crtc: &malidp->crtc, MALIDP_GAMMA_LUT_SIZE);
557 /* No inverse-gamma: it is per-plane. */
558 drm_crtc_enable_color_mgmt(crtc: &malidp->crtc, degamma_lut_size: 0, has_ctm: true, MALIDP_GAMMA_LUT_SIZE);
559
560 malidp_se_set_enh_coeffs(hwdev: malidp->dev);
561
562 return 0;
563}
564

source code of linux/drivers/gpu/drm/arm/malidp_crtc.c