1 | /* |
---|---|
2 | * SPDX-License-Identifier: GPL-2.0 |
3 | * Copyright (c) 2018, The Linux Foundation |
4 | */ |
5 | |
6 | #include <linux/bitfield.h> |
7 | #include <linux/clk.h> |
8 | #include <linux/delay.h> |
9 | #include <linux/interconnect.h> |
10 | #include <linux/irq.h> |
11 | #include <linux/irqchip.h> |
12 | #include <linux/irqdesc.h> |
13 | #include <linux/irqchip/chained_irq.h> |
14 | #include <linux/of_platform.h> |
15 | #include <linux/platform_device.h> |
16 | #include <linux/pm_runtime.h> |
17 | #include <linux/reset.h> |
18 | |
19 | #include "msm_mdss.h" |
20 | #include "msm_kms.h" |
21 | |
22 | #include <generated/mdss.xml.h> |
23 | |
24 | #define MIN_IB_BW 400000000UL /* Min ib vote 400MB */ |
25 | |
26 | #define DEFAULT_REG_BW 153600 /* Used in mdss fbdev driver */ |
27 | |
28 | struct msm_mdss { |
29 | struct device *dev; |
30 | |
31 | void __iomem *mmio; |
32 | struct clk_bulk_data *clocks; |
33 | size_t num_clocks; |
34 | bool is_mdp5; |
35 | struct { |
36 | unsigned long enabled_mask; |
37 | struct irq_domain *domain; |
38 | } irq_controller; |
39 | const struct msm_mdss_data *mdss_data; |
40 | struct icc_path *mdp_path[2]; |
41 | u32 num_mdp_paths; |
42 | struct icc_path *reg_bus_path; |
43 | }; |
44 | |
45 | static int msm_mdss_parse_data_bus_icc_path(struct device *dev, |
46 | struct msm_mdss *msm_mdss) |
47 | { |
48 | struct icc_path *path0; |
49 | struct icc_path *path1; |
50 | struct icc_path *reg_bus_path; |
51 | |
52 | path0 = devm_of_icc_get(dev, name: "mdp0-mem"); |
53 | if (IS_ERR_OR_NULL(ptr: path0)) |
54 | return PTR_ERR_OR_ZERO(ptr: path0); |
55 | |
56 | msm_mdss->mdp_path[0] = path0; |
57 | msm_mdss->num_mdp_paths = 1; |
58 | |
59 | path1 = devm_of_icc_get(dev, name: "mdp1-mem"); |
60 | if (!IS_ERR_OR_NULL(ptr: path1)) { |
61 | msm_mdss->mdp_path[1] = path1; |
62 | msm_mdss->num_mdp_paths++; |
63 | } |
64 | |
65 | reg_bus_path = of_icc_get(dev, name: "cpu-cfg"); |
66 | if (!IS_ERR_OR_NULL(ptr: reg_bus_path)) |
67 | msm_mdss->reg_bus_path = reg_bus_path; |
68 | |
69 | return 0; |
70 | } |
71 | |
72 | static void msm_mdss_irq(struct irq_desc *desc) |
73 | { |
74 | struct msm_mdss *msm_mdss = irq_desc_get_handler_data(desc); |
75 | struct irq_chip *chip = irq_desc_get_chip(desc); |
76 | u32 interrupts; |
77 | |
78 | chained_irq_enter(chip, desc); |
79 | |
80 | interrupts = readl_relaxed(msm_mdss->mmio + REG_MDSS_HW_INTR_STATUS); |
81 | |
82 | while (interrupts) { |
83 | irq_hw_number_t hwirq = fls(x: interrupts) - 1; |
84 | int rc; |
85 | |
86 | rc = generic_handle_domain_irq(domain: msm_mdss->irq_controller.domain, |
87 | hwirq); |
88 | if (rc < 0) { |
89 | dev_err(msm_mdss->dev, "handle irq fail: irq=%lu rc=%d\n", |
90 | hwirq, rc); |
91 | break; |
92 | } |
93 | |
94 | interrupts &= ~(1 << hwirq); |
95 | } |
96 | |
97 | chained_irq_exit(chip, desc); |
98 | } |
99 | |
100 | static void msm_mdss_irq_mask(struct irq_data *irqd) |
101 | { |
102 | struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(d: irqd); |
103 | |
104 | /* memory barrier */ |
105 | smp_mb__before_atomic(); |
106 | clear_bit(nr: irqd->hwirq, addr: &msm_mdss->irq_controller.enabled_mask); |
107 | /* memory barrier */ |
108 | smp_mb__after_atomic(); |
109 | } |
110 | |
111 | static void msm_mdss_irq_unmask(struct irq_data *irqd) |
112 | { |
113 | struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(d: irqd); |
114 | |
115 | /* memory barrier */ |
116 | smp_mb__before_atomic(); |
117 | set_bit(nr: irqd->hwirq, addr: &msm_mdss->irq_controller.enabled_mask); |
118 | /* memory barrier */ |
119 | smp_mb__after_atomic(); |
120 | } |
121 | |
122 | static struct irq_chip msm_mdss_irq_chip = { |
123 | .name = "msm_mdss", |
124 | .irq_mask = msm_mdss_irq_mask, |
125 | .irq_unmask = msm_mdss_irq_unmask, |
126 | }; |
127 | |
128 | static struct lock_class_key msm_mdss_lock_key, msm_mdss_request_key; |
129 | |
130 | static int msm_mdss_irqdomain_map(struct irq_domain *domain, |
131 | unsigned int irq, irq_hw_number_t hwirq) |
132 | { |
133 | struct msm_mdss *msm_mdss = domain->host_data; |
134 | |
135 | irq_set_lockdep_class(irq, lock_class: &msm_mdss_lock_key, request_class: &msm_mdss_request_key); |
136 | irq_set_chip_and_handler(irq, chip: &msm_mdss_irq_chip, handle: handle_level_irq); |
137 | |
138 | return irq_set_chip_data(irq, data: msm_mdss); |
139 | } |
140 | |
141 | static const struct irq_domain_ops msm_mdss_irqdomain_ops = { |
142 | .map = msm_mdss_irqdomain_map, |
143 | .xlate = irq_domain_xlate_onecell, |
144 | }; |
145 | |
146 | static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss) |
147 | { |
148 | struct device *dev; |
149 | struct irq_domain *domain; |
150 | |
151 | dev = msm_mdss->dev; |
152 | |
153 | domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), size: 32, |
154 | ops: &msm_mdss_irqdomain_ops, host_data: msm_mdss); |
155 | if (!domain) { |
156 | dev_err(dev, "failed to add irq_domain\n"); |
157 | return -EINVAL; |
158 | } |
159 | |
160 | msm_mdss->irq_controller.enabled_mask = 0; |
161 | msm_mdss->irq_controller.domain = domain; |
162 | |
163 | return 0; |
164 | } |
165 | |
166 | static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss) |
167 | { |
168 | const struct msm_mdss_data *data = msm_mdss->mdss_data; |
169 | u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) | |
170 | MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit); |
171 | |
172 | if (data->ubwc_bank_spread) |
173 | value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD; |
174 | |
175 | if (data->ubwc_enc_version == UBWC_1_0) |
176 | value |= MDSS_UBWC_STATIC_UBWC_MIN_ACC_LEN(1); |
177 | |
178 | writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); |
179 | } |
180 | |
181 | static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss) |
182 | { |
183 | const struct msm_mdss_data *data = msm_mdss->mdss_data; |
184 | u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle & 0x1) | |
185 | MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit); |
186 | |
187 | if (data->macrotile_mode) |
188 | value |= MDSS_UBWC_STATIC_MACROTILE_MODE; |
189 | |
190 | if (data->ubwc_enc_version == UBWC_3_0) |
191 | value |= MDSS_UBWC_STATIC_UBWC_AMSBC; |
192 | |
193 | if (data->ubwc_enc_version == UBWC_1_0) |
194 | value |= MDSS_UBWC_STATIC_UBWC_MIN_ACC_LEN(1); |
195 | |
196 | writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); |
197 | } |
198 | |
199 | static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss) |
200 | { |
201 | const struct msm_mdss_data *data = msm_mdss->mdss_data; |
202 | u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) | |
203 | MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit); |
204 | |
205 | if (data->ubwc_bank_spread) |
206 | value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD; |
207 | |
208 | if (data->macrotile_mode) |
209 | value |= MDSS_UBWC_STATIC_MACROTILE_MODE; |
210 | |
211 | writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC); |
212 | |
213 | if (data->ubwc_enc_version == UBWC_3_0) { |
214 | writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); |
215 | writel_relaxed(0, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE); |
216 | } else { |
217 | if (data->ubwc_dec_version == UBWC_4_3) |
218 | writel_relaxed(3, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); |
219 | else |
220 | writel_relaxed(2, msm_mdss->mmio + REG_MDSS_UBWC_CTRL_2); |
221 | writel_relaxed(1, msm_mdss->mmio + REG_MDSS_UBWC_PREDICTION_MODE); |
222 | } |
223 | } |
224 | |
225 | #define MDSS_HW_MAJ_MIN \ |
226 | (MDSS_HW_VERSION_MAJOR__MASK | MDSS_HW_VERSION_MINOR__MASK) |
227 | |
228 | #define MDSS_HW_MSM8996 0x1007 |
229 | #define MDSS_HW_MSM8937 0x100e |
230 | #define MDSS_HW_MSM8953 0x1010 |
231 | #define MDSS_HW_MSM8998 0x3000 |
232 | #define MDSS_HW_SDM660 0x3002 |
233 | #define MDSS_HW_SDM630 0x3003 |
234 | |
235 | /* |
236 | * MDP5 platforms use generic qcom,mdp5 compat string, so we have to generate this data |
237 | */ |
238 | static const struct msm_mdss_data *msm_mdss_generate_mdp5_mdss_data(struct msm_mdss *mdss) |
239 | { |
240 | struct msm_mdss_data *data; |
241 | u32 hw_rev; |
242 | |
243 | data = devm_kzalloc(dev: mdss->dev, size: sizeof(*data), GFP_KERNEL); |
244 | if (!data) |
245 | return NULL; |
246 | |
247 | hw_rev = readl_relaxed(mdss->mmio + REG_MDSS_HW_VERSION); |
248 | hw_rev = FIELD_GET(MDSS_HW_MAJ_MIN, hw_rev); |
249 | |
250 | if (hw_rev == MDSS_HW_MSM8996 || |
251 | hw_rev == MDSS_HW_MSM8937 || |
252 | hw_rev == MDSS_HW_MSM8953 || |
253 | hw_rev == MDSS_HW_MSM8998 || |
254 | hw_rev == MDSS_HW_SDM660 || |
255 | hw_rev == MDSS_HW_SDM630) { |
256 | data->ubwc_dec_version = UBWC_1_0; |
257 | data->ubwc_enc_version = UBWC_1_0; |
258 | } |
259 | |
260 | if (hw_rev == MDSS_HW_MSM8996 || |
261 | hw_rev == MDSS_HW_MSM8998) |
262 | data->highest_bank_bit = 2; |
263 | else |
264 | data->highest_bank_bit = 1; |
265 | |
266 | return data; |
267 | } |
268 | |
269 | const struct msm_mdss_data *msm_mdss_get_mdss_data(struct device *dev) |
270 | { |
271 | struct msm_mdss *mdss; |
272 | |
273 | if (!dev) |
274 | return ERR_PTR(error: -EINVAL); |
275 | |
276 | mdss = dev_get_drvdata(dev); |
277 | |
278 | /* |
279 | * We could not do it at the probe time, since hw revision register was |
280 | * not readable. Fill data structure now for the MDP5 platforms. |
281 | */ |
282 | if (!mdss->mdss_data && mdss->is_mdp5) |
283 | mdss->mdss_data = msm_mdss_generate_mdp5_mdss_data(mdss); |
284 | |
285 | return mdss->mdss_data; |
286 | } |
287 | |
288 | static int msm_mdss_enable(struct msm_mdss *msm_mdss) |
289 | { |
290 | int ret, i; |
291 | |
292 | /* |
293 | * Several components have AXI clocks that can only be turned on if |
294 | * the interconnect is enabled (non-zero bandwidth). Let's make sure |
295 | * that the interconnects are at least at a minimum amount. |
296 | */ |
297 | for (i = 0; i < msm_mdss->num_mdp_paths; i++) |
298 | icc_set_bw(path: msm_mdss->mdp_path[i], avg_bw: 0, Bps_to_icc(MIN_IB_BW)); |
299 | |
300 | if (msm_mdss->mdss_data && msm_mdss->mdss_data->reg_bus_bw) |
301 | icc_set_bw(path: msm_mdss->reg_bus_path, avg_bw: 0, |
302 | peak_bw: msm_mdss->mdss_data->reg_bus_bw); |
303 | else |
304 | icc_set_bw(path: msm_mdss->reg_bus_path, avg_bw: 0, |
305 | DEFAULT_REG_BW); |
306 | |
307 | ret = clk_bulk_prepare_enable(num_clks: msm_mdss->num_clocks, clks: msm_mdss->clocks); |
308 | if (ret) { |
309 | dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret); |
310 | return ret; |
311 | } |
312 | |
313 | /* |
314 | * Register access requires MDSS_MDP_CLK, which is not enabled by the |
315 | * mdss on mdp5 hardware. Skip it for now. |
316 | */ |
317 | if (msm_mdss->is_mdp5 || !msm_mdss->mdss_data) |
318 | return 0; |
319 | |
320 | /* |
321 | * ubwc config is part of the "mdss" region which is not accessible |
322 | * from the rest of the driver. hardcode known configurations here |
323 | * |
324 | * Decoder version can be read from the UBWC_DEC_HW_VERSION reg, |
325 | * UBWC_n and the rest of params comes from hw data. |
326 | */ |
327 | switch (msm_mdss->mdss_data->ubwc_dec_version) { |
328 | case 0: /* no UBWC */ |
329 | case UBWC_1_0: |
330 | /* do nothing */ |
331 | break; |
332 | case UBWC_2_0: |
333 | msm_mdss_setup_ubwc_dec_20(msm_mdss); |
334 | break; |
335 | case UBWC_3_0: |
336 | msm_mdss_setup_ubwc_dec_30(msm_mdss); |
337 | break; |
338 | case UBWC_4_0: |
339 | case UBWC_4_3: |
340 | msm_mdss_setup_ubwc_dec_40(msm_mdss); |
341 | break; |
342 | default: |
343 | dev_err(msm_mdss->dev, "Unsupported UBWC decoder version %x\n", |
344 | msm_mdss->mdss_data->ubwc_dec_version); |
345 | dev_err(msm_mdss->dev, "HW_REV: 0x%x\n", |
346 | readl_relaxed(msm_mdss->mmio + REG_MDSS_HW_VERSION)); |
347 | dev_err(msm_mdss->dev, "UBWC_DEC_HW_VERSION: 0x%x\n", |
348 | readl_relaxed(msm_mdss->mmio + REG_MDSS_UBWC_DEC_HW_VERSION)); |
349 | break; |
350 | } |
351 | |
352 | return ret; |
353 | } |
354 | |
355 | static int msm_mdss_disable(struct msm_mdss *msm_mdss) |
356 | { |
357 | int i; |
358 | |
359 | clk_bulk_disable_unprepare(num_clks: msm_mdss->num_clocks, clks: msm_mdss->clocks); |
360 | |
361 | for (i = 0; i < msm_mdss->num_mdp_paths; i++) |
362 | icc_set_bw(path: msm_mdss->mdp_path[i], avg_bw: 0, peak_bw: 0); |
363 | |
364 | if (msm_mdss->reg_bus_path) |
365 | icc_set_bw(path: msm_mdss->reg_bus_path, avg_bw: 0, peak_bw: 0); |
366 | |
367 | return 0; |
368 | } |
369 | |
370 | static void msm_mdss_destroy(struct msm_mdss *msm_mdss) |
371 | { |
372 | struct platform_device *pdev = to_platform_device(msm_mdss->dev); |
373 | int irq; |
374 | |
375 | pm_runtime_suspend(dev: msm_mdss->dev); |
376 | pm_runtime_disable(dev: msm_mdss->dev); |
377 | irq_domain_remove(domain: msm_mdss->irq_controller.domain); |
378 | msm_mdss->irq_controller.domain = NULL; |
379 | irq = platform_get_irq(pdev, 0); |
380 | irq_set_chained_handler_and_data(irq, NULL, NULL); |
381 | } |
382 | |
383 | static int msm_mdss_reset(struct device *dev) |
384 | { |
385 | struct reset_control *reset; |
386 | |
387 | reset = reset_control_get_optional_exclusive(dev, NULL); |
388 | if (!reset) { |
389 | /* Optional reset not specified */ |
390 | return 0; |
391 | } else if (IS_ERR(ptr: reset)) { |
392 | return dev_err_probe(dev, err: PTR_ERR(ptr: reset), |
393 | fmt: "failed to acquire mdss reset\n"); |
394 | } |
395 | |
396 | reset_control_assert(rstc: reset); |
397 | /* |
398 | * Tests indicate that reset has to be held for some period of time, |
399 | * make it one frame in a typical system |
400 | */ |
401 | msleep(msecs: 20); |
402 | reset_control_deassert(rstc: reset); |
403 | |
404 | reset_control_put(rstc: reset); |
405 | |
406 | return 0; |
407 | } |
408 | |
409 | /* |
410 | * MDP5 MDSS uses at most three specified clocks. |
411 | */ |
412 | #define MDP5_MDSS_NUM_CLOCKS 3 |
413 | static int mdp5_mdss_parse_clock(struct platform_device *pdev, struct clk_bulk_data **clocks) |
414 | { |
415 | struct clk_bulk_data *bulk; |
416 | int num_clocks = 0; |
417 | int ret; |
418 | |
419 | if (!pdev) |
420 | return -EINVAL; |
421 | |
422 | bulk = devm_kcalloc(dev: &pdev->dev, MDP5_MDSS_NUM_CLOCKS, size: sizeof(struct clk_bulk_data), GFP_KERNEL); |
423 | if (!bulk) |
424 | return -ENOMEM; |
425 | |
426 | bulk[num_clocks++].id = "iface"; |
427 | bulk[num_clocks++].id = "bus"; |
428 | bulk[num_clocks++].id = "vsync"; |
429 | |
430 | ret = devm_clk_bulk_get_optional(dev: &pdev->dev, num_clks: num_clocks, clks: bulk); |
431 | if (ret) |
432 | return ret; |
433 | |
434 | *clocks = bulk; |
435 | |
436 | return num_clocks; |
437 | } |
438 | |
439 | static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5) |
440 | { |
441 | struct msm_mdss *msm_mdss; |
442 | int ret; |
443 | int irq; |
444 | |
445 | ret = msm_mdss_reset(dev: &pdev->dev); |
446 | if (ret) |
447 | return ERR_PTR(error: ret); |
448 | |
449 | msm_mdss = devm_kzalloc(dev: &pdev->dev, size: sizeof(*msm_mdss), GFP_KERNEL); |
450 | if (!msm_mdss) |
451 | return ERR_PTR(error: -ENOMEM); |
452 | |
453 | msm_mdss->mdss_data = of_device_get_match_data(dev: &pdev->dev); |
454 | |
455 | msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, name: is_mdp5 ? "mdss_phys": "mdss"); |
456 | if (IS_ERR(ptr: msm_mdss->mmio)) |
457 | return ERR_CAST(ptr: msm_mdss->mmio); |
458 | |
459 | dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio); |
460 | |
461 | ret = msm_mdss_parse_data_bus_icc_path(dev: &pdev->dev, msm_mdss); |
462 | if (ret) |
463 | return ERR_PTR(error: ret); |
464 | |
465 | if (is_mdp5) |
466 | ret = mdp5_mdss_parse_clock(pdev, clocks: &msm_mdss->clocks); |
467 | else |
468 | ret = devm_clk_bulk_get_all(dev: &pdev->dev, clks: &msm_mdss->clocks); |
469 | if (ret < 0) { |
470 | dev_err(&pdev->dev, "failed to parse clocks, ret=%d\n", ret); |
471 | return ERR_PTR(error: ret); |
472 | } |
473 | msm_mdss->num_clocks = ret; |
474 | msm_mdss->is_mdp5 = is_mdp5; |
475 | |
476 | msm_mdss->dev = &pdev->dev; |
477 | |
478 | irq = platform_get_irq(pdev, 0); |
479 | if (irq < 0) |
480 | return ERR_PTR(error: irq); |
481 | |
482 | ret = _msm_mdss_irq_domain_add(msm_mdss); |
483 | if (ret) |
484 | return ERR_PTR(error: ret); |
485 | |
486 | irq_set_chained_handler_and_data(irq, handle: msm_mdss_irq, |
487 | data: msm_mdss); |
488 | |
489 | pm_runtime_enable(dev: &pdev->dev); |
490 | |
491 | return msm_mdss; |
492 | } |
493 | |
494 | static int __maybe_unused mdss_runtime_suspend(struct device *dev) |
495 | { |
496 | struct msm_mdss *mdss = dev_get_drvdata(dev); |
497 | |
498 | DBG(""); |
499 | |
500 | return msm_mdss_disable(msm_mdss: mdss); |
501 | } |
502 | |
503 | static int __maybe_unused mdss_runtime_resume(struct device *dev) |
504 | { |
505 | struct msm_mdss *mdss = dev_get_drvdata(dev); |
506 | |
507 | DBG(""); |
508 | |
509 | return msm_mdss_enable(msm_mdss: mdss); |
510 | } |
511 | |
512 | static int __maybe_unused mdss_pm_suspend(struct device *dev) |
513 | { |
514 | |
515 | if (pm_runtime_suspended(dev)) |
516 | return 0; |
517 | |
518 | return mdss_runtime_suspend(dev); |
519 | } |
520 | |
521 | static int __maybe_unused mdss_pm_resume(struct device *dev) |
522 | { |
523 | if (pm_runtime_suspended(dev)) |
524 | return 0; |
525 | |
526 | return mdss_runtime_resume(dev); |
527 | } |
528 | |
529 | static const struct dev_pm_ops mdss_pm_ops = { |
530 | SET_SYSTEM_SLEEP_PM_OPS(mdss_pm_suspend, mdss_pm_resume) |
531 | SET_RUNTIME_PM_OPS(mdss_runtime_suspend, mdss_runtime_resume, NULL) |
532 | }; |
533 | |
534 | static int mdss_probe(struct platform_device *pdev) |
535 | { |
536 | struct msm_mdss *mdss; |
537 | bool is_mdp5 = of_device_is_compatible(device: pdev->dev.of_node, "qcom,mdss"); |
538 | struct device *dev = &pdev->dev; |
539 | int ret; |
540 | |
541 | mdss = msm_mdss_init(pdev, is_mdp5); |
542 | if (IS_ERR(ptr: mdss)) |
543 | return PTR_ERR(ptr: mdss); |
544 | |
545 | platform_set_drvdata(pdev, data: mdss); |
546 | |
547 | /* |
548 | * MDP5/DPU based devices don't have a flat hierarchy. There is a top |
549 | * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc. |
550 | * Populate the children devices, find the MDP5/DPU node, and then add |
551 | * the interfaces to our components list. |
552 | */ |
553 | ret = of_platform_populate(root: dev->of_node, NULL, NULL, parent: dev); |
554 | if (ret) { |
555 | DRM_DEV_ERROR(dev, "failed to populate children devices\n"); |
556 | msm_mdss_destroy(msm_mdss: mdss); |
557 | return ret; |
558 | } |
559 | |
560 | return 0; |
561 | } |
562 | |
563 | static void mdss_remove(struct platform_device *pdev) |
564 | { |
565 | struct msm_mdss *mdss = platform_get_drvdata(pdev); |
566 | |
567 | of_platform_depopulate(parent: &pdev->dev); |
568 | |
569 | msm_mdss_destroy(msm_mdss: mdss); |
570 | } |
571 | |
572 | static const struct msm_mdss_data msm8998_data = { |
573 | .ubwc_enc_version = UBWC_1_0, |
574 | .ubwc_dec_version = UBWC_1_0, |
575 | .highest_bank_bit = 2, |
576 | .reg_bus_bw = 76800, |
577 | }; |
578 | |
579 | static const struct msm_mdss_data qcm2290_data = { |
580 | /* no UBWC */ |
581 | .highest_bank_bit = 0x2, |
582 | .reg_bus_bw = 76800, |
583 | }; |
584 | |
585 | static const struct msm_mdss_data sa8775p_data = { |
586 | .ubwc_enc_version = UBWC_4_0, |
587 | .ubwc_dec_version = UBWC_4_0, |
588 | .ubwc_swizzle = 4, |
589 | .ubwc_bank_spread = true, |
590 | .highest_bank_bit = 0, |
591 | .macrotile_mode = true, |
592 | .reg_bus_bw = 74000, |
593 | }; |
594 | |
595 | static const struct msm_mdss_data sar2130p_data = { |
596 | .ubwc_enc_version = UBWC_3_0, /* 4.0.2 in hw */ |
597 | .ubwc_dec_version = UBWC_4_3, |
598 | .ubwc_swizzle = 6, |
599 | .ubwc_bank_spread = true, |
600 | .highest_bank_bit = 0, |
601 | .macrotile_mode = 1, |
602 | .reg_bus_bw = 74000, |
603 | }; |
604 | |
605 | static const struct msm_mdss_data sc7180_data = { |
606 | .ubwc_enc_version = UBWC_2_0, |
607 | .ubwc_dec_version = UBWC_2_0, |
608 | .ubwc_swizzle = 6, |
609 | .ubwc_bank_spread = true, |
610 | .highest_bank_bit = 0x1, |
611 | .reg_bus_bw = 76800, |
612 | }; |
613 | |
614 | static const struct msm_mdss_data sc7280_data = { |
615 | .ubwc_enc_version = UBWC_3_0, |
616 | .ubwc_dec_version = UBWC_4_0, |
617 | .ubwc_swizzle = 6, |
618 | .ubwc_bank_spread = true, |
619 | .highest_bank_bit = 1, |
620 | .macrotile_mode = true, |
621 | .reg_bus_bw = 74000, |
622 | }; |
623 | |
624 | static const struct msm_mdss_data sc8180x_data = { |
625 | .ubwc_enc_version = UBWC_3_0, |
626 | .ubwc_dec_version = UBWC_3_0, |
627 | .highest_bank_bit = 3, |
628 | .macrotile_mode = true, |
629 | .reg_bus_bw = 76800, |
630 | }; |
631 | |
632 | static const struct msm_mdss_data sc8280xp_data = { |
633 | .ubwc_enc_version = UBWC_4_0, |
634 | .ubwc_dec_version = UBWC_4_0, |
635 | .ubwc_swizzle = 6, |
636 | .ubwc_bank_spread = true, |
637 | .highest_bank_bit = 3, |
638 | .macrotile_mode = true, |
639 | .reg_bus_bw = 76800, |
640 | }; |
641 | |
642 | static const struct msm_mdss_data sdm670_data = { |
643 | .ubwc_enc_version = UBWC_2_0, |
644 | .ubwc_dec_version = UBWC_2_0, |
645 | .highest_bank_bit = 1, |
646 | .reg_bus_bw = 76800, |
647 | }; |
648 | |
649 | static const struct msm_mdss_data sdm845_data = { |
650 | .ubwc_enc_version = UBWC_2_0, |
651 | .ubwc_dec_version = UBWC_2_0, |
652 | .highest_bank_bit = 2, |
653 | .reg_bus_bw = 76800, |
654 | }; |
655 | |
656 | static const struct msm_mdss_data sm6350_data = { |
657 | .ubwc_enc_version = UBWC_2_0, |
658 | .ubwc_dec_version = UBWC_2_0, |
659 | .ubwc_swizzle = 6, |
660 | .ubwc_bank_spread = true, |
661 | .highest_bank_bit = 1, |
662 | .reg_bus_bw = 76800, |
663 | }; |
664 | |
665 | static const struct msm_mdss_data sm7150_data = { |
666 | .ubwc_enc_version = UBWC_2_0, |
667 | .ubwc_dec_version = UBWC_2_0, |
668 | .highest_bank_bit = 1, |
669 | .reg_bus_bw = 76800, |
670 | }; |
671 | |
672 | static const struct msm_mdss_data sm8150_data = { |
673 | .ubwc_enc_version = UBWC_3_0, |
674 | .ubwc_dec_version = UBWC_3_0, |
675 | .highest_bank_bit = 2, |
676 | .reg_bus_bw = 76800, |
677 | }; |
678 | |
679 | static const struct msm_mdss_data sm6115_data = { |
680 | .ubwc_enc_version = UBWC_1_0, |
681 | .ubwc_dec_version = UBWC_2_0, |
682 | .ubwc_swizzle = 7, |
683 | .ubwc_bank_spread = true, |
684 | .highest_bank_bit = 0x1, |
685 | .reg_bus_bw = 76800, |
686 | }; |
687 | |
688 | static const struct msm_mdss_data sm6125_data = { |
689 | .ubwc_enc_version = UBWC_1_0, |
690 | .ubwc_dec_version = UBWC_3_0, |
691 | .ubwc_swizzle = 1, |
692 | .highest_bank_bit = 1, |
693 | }; |
694 | |
695 | static const struct msm_mdss_data sm6150_data = { |
696 | .ubwc_enc_version = UBWC_2_0, |
697 | .ubwc_dec_version = UBWC_2_0, |
698 | .highest_bank_bit = 1, |
699 | .reg_bus_bw = 76800, |
700 | }; |
701 | |
702 | static const struct msm_mdss_data sm8250_data = { |
703 | .ubwc_enc_version = UBWC_4_0, |
704 | .ubwc_dec_version = UBWC_4_0, |
705 | .ubwc_swizzle = 6, |
706 | .ubwc_bank_spread = true, |
707 | /* TODO: highest_bank_bit = 2 for LP_DDR4 */ |
708 | .highest_bank_bit = 3, |
709 | .macrotile_mode = true, |
710 | .reg_bus_bw = 76800, |
711 | }; |
712 | |
713 | static const struct msm_mdss_data sm8350_data = { |
714 | .ubwc_enc_version = UBWC_4_0, |
715 | .ubwc_dec_version = UBWC_4_0, |
716 | .ubwc_swizzle = 6, |
717 | .ubwc_bank_spread = true, |
718 | /* TODO: highest_bank_bit = 2 for LP_DDR4 */ |
719 | .highest_bank_bit = 3, |
720 | .macrotile_mode = true, |
721 | .reg_bus_bw = 74000, |
722 | }; |
723 | |
724 | static const struct msm_mdss_data sm8550_data = { |
725 | .ubwc_enc_version = UBWC_4_0, |
726 | .ubwc_dec_version = UBWC_4_3, |
727 | .ubwc_swizzle = 6, |
728 | .ubwc_bank_spread = true, |
729 | /* TODO: highest_bank_bit = 2 for LP_DDR4 */ |
730 | .highest_bank_bit = 3, |
731 | .macrotile_mode = true, |
732 | .reg_bus_bw = 57000, |
733 | }; |
734 | |
735 | static const struct msm_mdss_data x1e80100_data = { |
736 | .ubwc_enc_version = UBWC_4_0, |
737 | .ubwc_dec_version = UBWC_4_3, |
738 | .ubwc_swizzle = 6, |
739 | .ubwc_bank_spread = true, |
740 | /* TODO: highest_bank_bit = 2 for LP_DDR4 */ |
741 | .highest_bank_bit = 3, |
742 | .macrotile_mode = true, |
743 | /* TODO: Add reg_bus_bw with real value */ |
744 | }; |
745 | |
746 | static const struct of_device_id mdss_dt_match[] = { |
747 | { .compatible = "qcom,mdss"}, |
748 | { .compatible = "qcom,msm8998-mdss", .data = &msm8998_data }, |
749 | { .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data }, |
750 | { .compatible = "qcom,sa8775p-mdss", .data = &sa8775p_data }, |
751 | { .compatible = "qcom,sar2130p-mdss", .data = &sar2130p_data }, |
752 | { .compatible = "qcom,sdm670-mdss", .data = &sdm670_data }, |
753 | { .compatible = "qcom,sdm845-mdss", .data = &sdm845_data }, |
754 | { .compatible = "qcom,sc7180-mdss", .data = &sc7180_data }, |
755 | { .compatible = "qcom,sc7280-mdss", .data = &sc7280_data }, |
756 | { .compatible = "qcom,sc8180x-mdss", .data = &sc8180x_data }, |
757 | { .compatible = "qcom,sc8280xp-mdss", .data = &sc8280xp_data }, |
758 | { .compatible = "qcom,sm6115-mdss", .data = &sm6115_data }, |
759 | { .compatible = "qcom,sm6125-mdss", .data = &sm6125_data }, |
760 | { .compatible = "qcom,sm6150-mdss", .data = &sm6150_data }, |
761 | { .compatible = "qcom,sm6350-mdss", .data = &sm6350_data }, |
762 | { .compatible = "qcom,sm6375-mdss", .data = &sm6350_data }, |
763 | { .compatible = "qcom,sm7150-mdss", .data = &sm7150_data }, |
764 | { .compatible = "qcom,sm8150-mdss", .data = &sm8150_data }, |
765 | { .compatible = "qcom,sm8250-mdss", .data = &sm8250_data }, |
766 | { .compatible = "qcom,sm8350-mdss", .data = &sm8350_data }, |
767 | { .compatible = "qcom,sm8450-mdss", .data = &sm8350_data }, |
768 | { .compatible = "qcom,sm8550-mdss", .data = &sm8550_data }, |
769 | { .compatible = "qcom,sm8650-mdss", .data = &sm8550_data}, |
770 | { .compatible = "qcom,x1e80100-mdss", .data = &x1e80100_data}, |
771 | {} |
772 | }; |
773 | MODULE_DEVICE_TABLE(of, mdss_dt_match); |
774 | |
775 | static struct platform_driver mdss_platform_driver = { |
776 | .probe = mdss_probe, |
777 | .remove = mdss_remove, |
778 | .driver = { |
779 | .name = "msm-mdss", |
780 | .of_match_table = mdss_dt_match, |
781 | .pm = &mdss_pm_ops, |
782 | }, |
783 | }; |
784 | |
785 | void __init msm_mdss_register(void) |
786 | { |
787 | platform_driver_register(&mdss_platform_driver); |
788 | } |
789 | |
790 | void __exit msm_mdss_unregister(void) |
791 | { |
792 | platform_driver_unregister(&mdss_platform_driver); |
793 | } |
794 |
Definitions
- msm_mdss
- msm_mdss_parse_data_bus_icc_path
- msm_mdss_irq
- msm_mdss_irq_mask
- msm_mdss_irq_unmask
- msm_mdss_irq_chip
- msm_mdss_lock_key
- msm_mdss_request_key
- msm_mdss_irqdomain_map
- msm_mdss_irqdomain_ops
- _msm_mdss_irq_domain_add
- msm_mdss_setup_ubwc_dec_20
- msm_mdss_setup_ubwc_dec_30
- msm_mdss_setup_ubwc_dec_40
- msm_mdss_generate_mdp5_mdss_data
- msm_mdss_get_mdss_data
- msm_mdss_enable
- msm_mdss_disable
- msm_mdss_destroy
- msm_mdss_reset
- mdp5_mdss_parse_clock
- msm_mdss_init
- mdss_runtime_suspend
- mdss_runtime_resume
- mdss_pm_suspend
- mdss_pm_resume
- mdss_pm_ops
- mdss_probe
- mdss_remove
- msm8998_data
- qcm2290_data
- sa8775p_data
- sar2130p_data
- sc7180_data
- sc7280_data
- sc8180x_data
- sc8280xp_data
- sdm670_data
- sdm845_data
- sm6350_data
- sm7150_data
- sm8150_data
- sm6115_data
- sm6125_data
- sm6150_data
- sm8250_data
- sm8350_data
- sm8550_data
- x1e80100_data
- mdss_dt_match
- mdss_platform_driver
- msm_mdss_register
Improve your Profiling and Debugging skills
Find out more