1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright 2023 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26
27#include "reg_helper.h"
28#include "core_types.h"
29#include "dcn35_pg_cntl.h"
30#include "dccg.h"
31
32#define TO_DCN_PG_CNTL(pg_cntl)\
33 container_of(pg_cntl, struct dcn_pg_cntl, base)
34
35#define REG(reg) \
36 (pg_cntl_dcn->regs->reg)
37
38#undef FN
39#define FN(reg_name, field_name) \
40 pg_cntl_dcn->pg_cntl_shift->field_name, pg_cntl_dcn->pg_cntl_mask->field_name
41
42#define CTX \
43 pg_cntl_dcn->base.ctx
44#define DC_LOGGER \
45 pg_cntl->ctx->logger
46
47static bool pg_cntl35_dsc_pg_status(struct pg_cntl *pg_cntl, unsigned int dsc_inst)
48{
49 struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
50 uint32_t pwr_status = 0;
51
52 if (pg_cntl->ctx->dc->debug.ignore_pg)
53 return true;
54
55 switch (dsc_inst) {
56 case 0: /* DSC0 */
57 REG_GET(DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
58 break;
59 case 1: /* DSC1 */
60 REG_GET(DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
61 break;
62 case 2: /* DSC2 */
63 REG_GET(DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
64 break;
65 case 3: /* DSC3 */
66 REG_GET(DOMAIN19_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
67 break;
68 default:
69 BREAK_TO_DEBUGGER();
70 break;
71 }
72
73 return pwr_status == 0;
74}
75
76void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bool power_on)
77{
78 struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
79 uint32_t power_gate = power_on ? 0 : 1;
80 uint32_t pwr_status = power_on ? 0 : 2;
81 uint32_t org_ip_request_cntl = 0;
82 bool block_enabled;
83
84 /*need to enable dscclk regardless DSC_PG*/
85 if (pg_cntl->ctx->dc->res_pool->dccg->funcs->enable_dsc && power_on)
86 pg_cntl->ctx->dc->res_pool->dccg->funcs->enable_dsc(
87 pg_cntl->ctx->dc->res_pool->dccg, dsc_inst);
88
89 if (pg_cntl->ctx->dc->debug.ignore_pg ||
90 pg_cntl->ctx->dc->debug.disable_dsc_power_gate ||
91 pg_cntl->ctx->dc->idle_optimizations_allowed)
92 return;
93
94 block_enabled = pg_cntl35_dsc_pg_status(pg_cntl, dsc_inst);
95 if (power_on) {
96 if (block_enabled)
97 return;
98 } else {
99 if (!block_enabled)
100 return;
101 }
102
103 REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
104 if (org_ip_request_cntl == 0)
105 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
106
107 switch (dsc_inst) {
108 case 0: /* DSC0 */
109 REG_UPDATE(DOMAIN16_PG_CONFIG,
110 DOMAIN_POWER_GATE, power_gate);
111
112 REG_WAIT(DOMAIN16_PG_STATUS,
113 DOMAIN_PGFSM_PWR_STATUS, pwr_status,
114 1, 1000);
115 break;
116 case 1: /* DSC1 */
117 REG_UPDATE(DOMAIN17_PG_CONFIG,
118 DOMAIN_POWER_GATE, power_gate);
119
120 REG_WAIT(DOMAIN17_PG_STATUS,
121 DOMAIN_PGFSM_PWR_STATUS, pwr_status,
122 1, 1000);
123 break;
124 case 2: /* DSC2 */
125 REG_UPDATE(DOMAIN18_PG_CONFIG,
126 DOMAIN_POWER_GATE, power_gate);
127
128 REG_WAIT(DOMAIN18_PG_STATUS,
129 DOMAIN_PGFSM_PWR_STATUS, pwr_status,
130 1, 1000);
131 break;
132 case 3: /* DSC3 */
133 REG_UPDATE(DOMAIN19_PG_CONFIG,
134 DOMAIN_POWER_GATE, power_gate);
135
136 REG_WAIT(DOMAIN19_PG_STATUS,
137 DOMAIN_PGFSM_PWR_STATUS, pwr_status,
138 1, 1000);
139 break;
140 default:
141 BREAK_TO_DEBUGGER();
142 break;
143 }
144
145 if (dsc_inst < MAX_PIPES)
146 pg_cntl->pg_pipe_res_enable[PG_DSC][dsc_inst] = power_on;
147
148 if (pg_cntl->ctx->dc->res_pool->dccg->funcs->disable_dsc && !power_on) {
149 /*this is to disable dscclk*/
150 pg_cntl->ctx->dc->res_pool->dccg->funcs->disable_dsc(
151 pg_cntl->ctx->dc->res_pool->dccg, dsc_inst);
152 }
153}
154
155static bool pg_cntl35_hubp_dpp_pg_status(struct pg_cntl *pg_cntl, unsigned int hubp_dpp_inst)
156{
157 struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
158 uint32_t pwr_status = 0;
159
160 switch (hubp_dpp_inst) {
161 case 0:
162 /* DPP0 & HUBP0 */
163 REG_GET(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
164 break;
165 case 1:
166 /* DPP1 & HUBP1 */
167 REG_GET(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
168 break;
169 case 2:
170 /* DPP2 & HUBP2 */
171 REG_GET(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
172 break;
173 case 3:
174 /* DPP3 & HUBP3 */
175 REG_GET(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
176 break;
177 default:
178 BREAK_TO_DEBUGGER();
179 break;
180 }
181
182 return pwr_status == 0;
183}
184
185void pg_cntl35_hubp_dpp_pg_control(struct pg_cntl *pg_cntl, unsigned int hubp_dpp_inst, bool power_on)
186{
187 struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
188 uint32_t power_gate = power_on ? 0 : 1;
189 uint32_t pwr_status = power_on ? 0 : 2;
190 uint32_t org_ip_request_cntl;
191 bool block_enabled;
192
193 if (pg_cntl->ctx->dc->debug.ignore_pg ||
194 pg_cntl->ctx->dc->debug.disable_hubp_power_gate ||
195 pg_cntl->ctx->dc->debug.disable_dpp_power_gate ||
196 pg_cntl->ctx->dc->idle_optimizations_allowed)
197 return;
198
199 block_enabled = pg_cntl35_hubp_dpp_pg_status(pg_cntl, hubp_dpp_inst);
200 if (power_on) {
201 if (block_enabled)
202 return;
203 } else {
204 if (!block_enabled)
205 return;
206 }
207
208 REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
209 if (org_ip_request_cntl == 0)
210 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
211
212 switch (hubp_dpp_inst) {
213 case 0:
214 /* DPP0 & HUBP0 */
215 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
216 REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
217 break;
218 case 1:
219 /* DPP1 & HUBP1 */
220 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
221 REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
222 break;
223 case 2:
224 /* DPP2 & HUBP2 */
225 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
226 REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
227 break;
228 case 3:
229 /* DPP3 & HUBP3 */
230 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
231 REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
232 break;
233 default:
234 BREAK_TO_DEBUGGER();
235 break;
236 }
237
238 DC_LOG_DEBUG("HUBP DPP instance %d, power %s", hubp_dpp_inst,
239 power_on ? "ON" : "OFF");
240
241 if (hubp_dpp_inst < MAX_PIPES) {
242 pg_cntl->pg_pipe_res_enable[PG_HUBP][hubp_dpp_inst] = power_on;
243 pg_cntl->pg_pipe_res_enable[PG_DPP][hubp_dpp_inst] = power_on;
244 }
245}
246
247static bool pg_cntl35_hpo_pg_status(struct pg_cntl *pg_cntl)
248{
249 struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
250 uint32_t pwr_status = 0;
251
252 REG_GET(DOMAIN25_PG_STATUS,
253 DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
254
255 return pwr_status == 0;
256}
257
258void pg_cntl35_hpo_pg_control(struct pg_cntl *pg_cntl, bool power_on)
259{
260 struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
261 uint32_t power_gate = power_on ? 0 : 1;
262 uint32_t pwr_status = power_on ? 0 : 2;
263 uint32_t org_ip_request_cntl;
264 uint32_t power_forceon;
265 bool block_enabled;
266
267 if (pg_cntl->ctx->dc->debug.ignore_pg ||
268 pg_cntl->ctx->dc->debug.disable_hpo_power_gate ||
269 pg_cntl->ctx->dc->idle_optimizations_allowed)
270 return;
271
272 block_enabled = pg_cntl35_hpo_pg_status(pg_cntl);
273 if (power_on) {
274 if (block_enabled)
275 return;
276 } else {
277 if (!block_enabled)
278 return;
279 }
280
281 REG_GET(DOMAIN25_PG_CONFIG, DOMAIN_POWER_FORCEON, &power_forceon);
282 if (power_forceon)
283 return;
284
285 REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
286 if (org_ip_request_cntl == 0)
287 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
288
289 REG_UPDATE(DOMAIN25_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
290 REG_WAIT(DOMAIN25_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
291
292 pg_cntl->pg_res_enable[PG_HPO] = power_on;
293}
294
295static bool pg_cntl35_io_clk_status(struct pg_cntl *pg_cntl)
296{
297 struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
298 uint32_t pwr_status = 0;
299
300 REG_GET(DOMAIN22_PG_STATUS,
301 DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
302
303 return pwr_status == 0;
304}
305
306void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on)
307{
308 struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
309 uint32_t power_gate = power_on ? 0 : 1;
310 uint32_t pwr_status = power_on ? 0 : 2;
311 uint32_t org_ip_request_cntl;
312 uint32_t power_forceon;
313 bool block_enabled;
314
315 if (pg_cntl->ctx->dc->debug.ignore_pg ||
316 pg_cntl->ctx->dc->idle_optimizations_allowed)
317 return;
318
319 block_enabled = pg_cntl35_io_clk_status(pg_cntl);
320 if (power_on) {
321 if (block_enabled)
322 return;
323 } else {
324 if (!block_enabled)
325 return;
326 }
327
328 REG_GET(DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, &power_forceon);
329 if (power_forceon)
330 return;
331
332 REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
333 if (org_ip_request_cntl == 0)
334 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
335
336 /* DCCG, DIO, DCIO */
337 REG_UPDATE(DOMAIN22_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
338 REG_WAIT(DOMAIN22_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
339
340 pg_cntl->pg_res_enable[PG_DCCG] = power_on;
341 pg_cntl->pg_res_enable[PG_DIO] = power_on;
342 pg_cntl->pg_res_enable[PG_DCIO] = power_on;
343}
344
345static bool pg_cntl35_plane_otg_status(struct pg_cntl *pg_cntl)
346{
347 struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
348 uint32_t pwr_status = 0;
349
350 REG_GET(DOMAIN24_PG_STATUS,
351 DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
352
353 return pwr_status == 0;
354}
355
356void pg_cntl35_mpcc_pg_control(struct pg_cntl *pg_cntl,
357 unsigned int mpcc_inst, bool power_on)
358{
359 if (pg_cntl->ctx->dc->idle_optimizations_allowed)
360 return;
361
362 if (mpcc_inst >= 0 && mpcc_inst < MAX_PIPES)
363 pg_cntl->pg_pipe_res_enable[PG_MPCC][mpcc_inst] = power_on;
364}
365
366void pg_cntl35_opp_pg_control(struct pg_cntl *pg_cntl,
367 unsigned int opp_inst, bool power_on)
368{
369 if (pg_cntl->ctx->dc->idle_optimizations_allowed)
370 return;
371
372 if (opp_inst >= 0 && opp_inst < MAX_PIPES)
373 pg_cntl->pg_pipe_res_enable[PG_OPP][opp_inst] = power_on;
374}
375
376void pg_cntl35_optc_pg_control(struct pg_cntl *pg_cntl,
377 unsigned int optc_inst, bool power_on)
378{
379 if (pg_cntl->ctx->dc->idle_optimizations_allowed)
380 return;
381
382 if (optc_inst >= 0 && optc_inst < MAX_PIPES)
383 pg_cntl->pg_pipe_res_enable[PG_OPTC][optc_inst] = power_on;
384}
385
386void pg_cntl35_plane_otg_pg_control(struct pg_cntl *pg_cntl, bool power_on)
387{
388 struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
389 uint32_t power_gate = power_on ? 0 : 1;
390 uint32_t pwr_status = power_on ? 0 : 2;
391 uint32_t org_ip_request_cntl;
392 int i;
393 bool block_enabled;
394 bool all_mpcc_disabled = true, all_opp_disabled = true;
395 bool all_optc_disabled = true, all_stream_disabled = true;
396
397 if (pg_cntl->ctx->dc->debug.ignore_pg ||
398 pg_cntl->ctx->dc->debug.disable_optc_power_gate ||
399 pg_cntl->ctx->dc->idle_optimizations_allowed)
400 return;
401
402 block_enabled = pg_cntl35_plane_otg_status(pg_cntl);
403 if (power_on) {
404 if (block_enabled)
405 return;
406 } else {
407 if (!block_enabled)
408 return;
409 }
410
411 for (i = 0; i < pg_cntl->ctx->dc->res_pool->pipe_count; i++) {
412 struct pipe_ctx *pipe_ctx = &pg_cntl->ctx->dc->current_state->res_ctx.pipe_ctx[i];
413
414 if (pipe_ctx) {
415 if (pipe_ctx->stream)
416 all_stream_disabled = false;
417 }
418
419 if (pg_cntl->pg_pipe_res_enable[PG_MPCC][i])
420 all_mpcc_disabled = false;
421
422 if (pg_cntl->pg_pipe_res_enable[PG_OPP][i])
423 all_opp_disabled = false;
424
425 if (pg_cntl->pg_pipe_res_enable[PG_OPTC][i])
426 all_optc_disabled = false;
427 }
428
429 if (!power_on) {
430 if (!all_mpcc_disabled || !all_opp_disabled || !all_optc_disabled
431 || !all_stream_disabled || pg_cntl->pg_res_enable[PG_DWB])
432 return;
433 }
434
435 REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
436 if (org_ip_request_cntl == 0)
437 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
438
439 /* MPC, OPP, OPTC, DWB */
440 REG_UPDATE(DOMAIN24_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
441 REG_WAIT(DOMAIN24_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
442
443 for (i = 0; i < pg_cntl->ctx->dc->res_pool->pipe_count; i++) {
444 pg_cntl->pg_pipe_res_enable[PG_MPCC][i] = power_on;
445 pg_cntl->pg_pipe_res_enable[PG_OPP][i] = power_on;
446 pg_cntl->pg_pipe_res_enable[PG_OPTC][i] = power_on;
447 }
448 pg_cntl->pg_res_enable[PG_DWB] = power_on;
449}
450
451void pg_cntl35_dwb_pg_control(struct pg_cntl *pg_cntl, bool power_on)
452{
453 if (pg_cntl->ctx->dc->idle_optimizations_allowed)
454 return;
455
456 pg_cntl->pg_res_enable[PG_DWB] = power_on;
457}
458
459static bool pg_cntl35_mem_status(struct pg_cntl *pg_cntl)
460{
461 struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
462 uint32_t pwr_status = 0;
463
464 REG_GET(DOMAIN23_PG_STATUS,
465 DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
466
467 return pwr_status == 0;
468}
469
470void pg_cntl35_init_pg_status(struct pg_cntl *pg_cntl)
471{
472 int i = 0;
473 bool block_enabled;
474
475 pg_cntl->pg_res_enable[PG_HPO] = pg_cntl35_hpo_pg_status(pg_cntl);
476
477 block_enabled = pg_cntl35_io_clk_status(pg_cntl);
478 pg_cntl->pg_res_enable[PG_DCCG] = block_enabled;
479 pg_cntl->pg_res_enable[PG_DIO] = block_enabled;
480 pg_cntl->pg_res_enable[PG_DCIO] = block_enabled;
481
482 block_enabled = pg_cntl35_mem_status(pg_cntl);
483 pg_cntl->pg_res_enable[PG_DCHUBBUB] = block_enabled;
484 pg_cntl->pg_res_enable[PG_DCHVM] = block_enabled;
485
486 for (i = 0; i < pg_cntl->ctx->dc->res_pool->pipe_count; i++) {
487 block_enabled = pg_cntl35_hubp_dpp_pg_status(pg_cntl, hubp_dpp_inst: i);
488 pg_cntl->pg_pipe_res_enable[PG_HUBP][i] = block_enabled;
489 pg_cntl->pg_pipe_res_enable[PG_DPP][i] = block_enabled;
490
491 block_enabled = pg_cntl35_dsc_pg_status(pg_cntl, dsc_inst: i);
492 pg_cntl->pg_pipe_res_enable[PG_DSC][i] = block_enabled;
493 }
494
495 block_enabled = pg_cntl35_plane_otg_status(pg_cntl);
496 for (i = 0; i < pg_cntl->ctx->dc->res_pool->pipe_count; i++) {
497 pg_cntl->pg_pipe_res_enable[PG_MPCC][i] = block_enabled;
498 pg_cntl->pg_pipe_res_enable[PG_OPP][i] = block_enabled;
499 pg_cntl->pg_pipe_res_enable[PG_OPTC][i] = block_enabled;
500 }
501 pg_cntl->pg_res_enable[PG_DWB] = block_enabled;
502}
503
504static const struct pg_cntl_funcs pg_cntl35_funcs = {
505 .init_pg_status = pg_cntl35_init_pg_status,
506 .dsc_pg_control = pg_cntl35_dsc_pg_control,
507 .hubp_dpp_pg_control = pg_cntl35_hubp_dpp_pg_control,
508 .hpo_pg_control = pg_cntl35_hpo_pg_control,
509 .io_clk_pg_control = pg_cntl35_io_clk_pg_control,
510 .plane_otg_pg_control = pg_cntl35_plane_otg_pg_control,
511 .mpcc_pg_control = pg_cntl35_mpcc_pg_control,
512 .opp_pg_control = pg_cntl35_opp_pg_control,
513 .optc_pg_control = pg_cntl35_optc_pg_control,
514 .dwb_pg_control = pg_cntl35_dwb_pg_control
515};
516
517struct pg_cntl *pg_cntl35_create(
518 struct dc_context *ctx,
519 const struct pg_cntl_registers *regs,
520 const struct pg_cntl_shift *pg_cntl_shift,
521 const struct pg_cntl_mask *pg_cntl_mask)
522{
523 struct dcn_pg_cntl *pg_cntl_dcn = kzalloc(size: sizeof(*pg_cntl_dcn), GFP_KERNEL);
524 struct pg_cntl *base;
525
526 if (pg_cntl_dcn == NULL) {
527 BREAK_TO_DEBUGGER();
528 return NULL;
529 }
530
531 base = &pg_cntl_dcn->base;
532 base->ctx = ctx;
533 base->funcs = &pg_cntl35_funcs;
534
535 pg_cntl_dcn->regs = regs;
536 pg_cntl_dcn->pg_cntl_shift = pg_cntl_shift;
537 pg_cntl_dcn->pg_cntl_mask = pg_cntl_mask;
538
539 memset(base->pg_pipe_res_enable, 0, PG_HW_PIPE_RESOURCES_NUM_ELEMENT * MAX_PIPES * sizeof(bool));
540 memset(base->pg_res_enable, 0, PG_HW_RESOURCES_NUM_ELEMENT * sizeof(bool));
541
542 return &pg_cntl_dcn->base;
543}
544
545void dcn_pg_cntl_destroy(struct pg_cntl **pg_cntl)
546{
547 struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(*pg_cntl);
548
549 kfree(objp: pg_cntl_dcn);
550 *pg_cntl = NULL;
551}
552

source code of linux/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c