1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * Tegra30 External Memory Controller driver |
4 | * |
5 | * Based on downstream driver from NVIDIA and tegra124-emc.c |
6 | * Copyright (C) 2011-2014 NVIDIA Corporation |
7 | * |
8 | * Author: Dmitry Osipenko <digetx@gmail.com> |
9 | * Copyright (C) 2019 GRATE-DRIVER project |
10 | */ |
11 | |
12 | #include <linux/bitfield.h> |
13 | #include <linux/clk.h> |
14 | #include <linux/clk/tegra.h> |
15 | #include <linux/debugfs.h> |
16 | #include <linux/delay.h> |
17 | #include <linux/err.h> |
18 | #include <linux/interconnect-provider.h> |
19 | #include <linux/interrupt.h> |
20 | #include <linux/io.h> |
21 | #include <linux/iopoll.h> |
22 | #include <linux/kernel.h> |
23 | #include <linux/module.h> |
24 | #include <linux/mutex.h> |
25 | #include <linux/of.h> |
26 | #include <linux/platform_device.h> |
27 | #include <linux/pm_opp.h> |
28 | #include <linux/slab.h> |
29 | #include <linux/sort.h> |
30 | #include <linux/types.h> |
31 | |
32 | #include <soc/tegra/common.h> |
33 | #include <soc/tegra/fuse.h> |
34 | |
35 | #include "../jedec_ddr.h" |
36 | #include "../of_memory.h" |
37 | |
38 | #include "mc.h" |
39 | |
40 | #define EMC_INTSTATUS 0x000 |
41 | #define EMC_INTMASK 0x004 |
42 | #define EMC_DBG 0x008 |
43 | #define EMC_ADR_CFG 0x010 |
44 | #define EMC_CFG 0x00c |
45 | #define EMC_REFCTRL 0x020 |
46 | #define EMC_TIMING_CONTROL 0x028 |
47 | #define EMC_RC 0x02c |
48 | #define EMC_RFC 0x030 |
49 | #define EMC_RAS 0x034 |
50 | #define EMC_RP 0x038 |
51 | #define EMC_R2W 0x03c |
52 | #define EMC_W2R 0x040 |
53 | #define EMC_R2P 0x044 |
54 | #define EMC_W2P 0x048 |
55 | #define EMC_RD_RCD 0x04c |
56 | #define EMC_WR_RCD 0x050 |
57 | #define EMC_RRD 0x054 |
58 | #define EMC_REXT 0x058 |
59 | #define EMC_WDV 0x05c |
60 | #define EMC_QUSE 0x060 |
61 | #define EMC_QRST 0x064 |
62 | #define EMC_QSAFE 0x068 |
63 | #define EMC_RDV 0x06c |
64 | #define EMC_REFRESH 0x070 |
65 | #define EMC_BURST_REFRESH_NUM 0x074 |
66 | #define EMC_PDEX2WR 0x078 |
67 | #define EMC_PDEX2RD 0x07c |
68 | #define EMC_PCHG2PDEN 0x080 |
69 | #define EMC_ACT2PDEN 0x084 |
70 | #define EMC_AR2PDEN 0x088 |
71 | #define EMC_RW2PDEN 0x08c |
72 | #define EMC_TXSR 0x090 |
73 | #define EMC_TCKE 0x094 |
74 | #define EMC_TFAW 0x098 |
75 | #define EMC_TRPAB 0x09c |
76 | #define EMC_TCLKSTABLE 0x0a0 |
77 | #define EMC_TCLKSTOP 0x0a4 |
78 | #define EMC_TREFBW 0x0a8 |
79 | #define 0x0ac |
80 | #define EMC_ODT_WRITE 0x0b0 |
81 | #define EMC_ODT_READ 0x0b4 |
82 | #define EMC_WEXT 0x0b8 |
83 | #define EMC_CTT 0x0bc |
84 | #define EMC_MRS_WAIT_CNT 0x0c8 |
85 | #define EMC_MRS 0x0cc |
86 | #define EMC_EMRS 0x0d0 |
87 | #define EMC_SELF_REF 0x0e0 |
88 | #define EMC_MRW 0x0e8 |
89 | #define EMC_MRR 0x0ec |
90 | #define EMC_XM2DQSPADCTRL3 0x0f8 |
91 | #define EMC_FBIO_SPARE 0x100 |
92 | #define EMC_FBIO_CFG5 0x104 |
93 | #define EMC_FBIO_CFG6 0x114 |
94 | #define EMC_CFG_RSV 0x120 |
95 | #define EMC_AUTO_CAL_CONFIG 0x2a4 |
96 | #define EMC_AUTO_CAL_INTERVAL 0x2a8 |
97 | #define EMC_AUTO_CAL_STATUS 0x2ac |
98 | #define EMC_STATUS 0x2b4 |
99 | #define EMC_CFG_2 0x2b8 |
100 | #define EMC_CFG_DIG_DLL 0x2bc |
101 | #define EMC_CFG_DIG_DLL_PERIOD 0x2c0 |
102 | #define EMC_CTT_DURATION 0x2d8 |
103 | #define EMC_CTT_TERM_CTRL 0x2dc |
104 | #define EMC_ZCAL_INTERVAL 0x2e0 |
105 | #define EMC_ZCAL_WAIT_CNT 0x2e4 |
106 | #define EMC_ZQ_CAL 0x2ec |
107 | #define EMC_XM2CMDPADCTRL 0x2f0 |
108 | #define EMC_XM2DQSPADCTRL2 0x2fc |
109 | #define EMC_XM2DQPADCTRL2 0x304 |
110 | #define EMC_XM2CLKPADCTRL 0x308 |
111 | #define EMC_XM2COMPPADCTRL 0x30c |
112 | #define EMC_XM2VTTGENPADCTRL 0x310 |
113 | #define EMC_XM2VTTGENPADCTRL2 0x314 |
114 | #define EMC_XM2QUSEPADCTRL 0x318 |
115 | #define EMC_DLL_XFORM_DQS0 0x328 |
116 | #define EMC_DLL_XFORM_DQS1 0x32c |
117 | #define EMC_DLL_XFORM_DQS2 0x330 |
118 | #define EMC_DLL_XFORM_DQS3 0x334 |
119 | #define EMC_DLL_XFORM_DQS4 0x338 |
120 | #define EMC_DLL_XFORM_DQS5 0x33c |
121 | #define EMC_DLL_XFORM_DQS6 0x340 |
122 | #define EMC_DLL_XFORM_DQS7 0x344 |
123 | #define EMC_DLL_XFORM_QUSE0 0x348 |
124 | #define EMC_DLL_XFORM_QUSE1 0x34c |
125 | #define EMC_DLL_XFORM_QUSE2 0x350 |
126 | #define EMC_DLL_XFORM_QUSE3 0x354 |
127 | #define EMC_DLL_XFORM_QUSE4 0x358 |
128 | #define EMC_DLL_XFORM_QUSE5 0x35c |
129 | #define EMC_DLL_XFORM_QUSE6 0x360 |
130 | #define EMC_DLL_XFORM_QUSE7 0x364 |
131 | #define EMC_DLL_XFORM_DQ0 0x368 |
132 | #define EMC_DLL_XFORM_DQ1 0x36c |
133 | #define EMC_DLL_XFORM_DQ2 0x370 |
134 | #define EMC_DLL_XFORM_DQ3 0x374 |
135 | #define EMC_DLI_TRIM_TXDQS0 0x3a8 |
136 | #define EMC_DLI_TRIM_TXDQS1 0x3ac |
137 | #define EMC_DLI_TRIM_TXDQS2 0x3b0 |
138 | #define EMC_DLI_TRIM_TXDQS3 0x3b4 |
139 | #define EMC_DLI_TRIM_TXDQS4 0x3b8 |
140 | #define EMC_DLI_TRIM_TXDQS5 0x3bc |
141 | #define EMC_DLI_TRIM_TXDQS6 0x3c0 |
142 | #define EMC_DLI_TRIM_TXDQS7 0x3c4 |
143 | #define EMC_STALL_THEN_EXE_BEFORE_CLKCHANGE 0x3c8 |
144 | #define EMC_STALL_THEN_EXE_AFTER_CLKCHANGE 0x3cc |
145 | #define EMC_UNSTALL_RW_AFTER_CLKCHANGE 0x3d0 |
146 | #define EMC_SEL_DPD_CTRL 0x3d8 |
147 | #define EMC_PRE_REFRESH_REQ_CNT 0x3dc |
148 | #define EMC_DYN_SELF_REF_CONTROL 0x3e0 |
149 | #define EMC_TXSRDLL 0x3e4 |
150 | |
151 | #define EMC_STATUS_TIMING_UPDATE_STALLED BIT(23) |
152 | |
153 | #define EMC_MODE_SET_DLL_RESET BIT(8) |
154 | #define EMC_MODE_SET_LONG_CNT BIT(26) |
155 | |
156 | #define EMC_SELF_REF_CMD_ENABLED BIT(0) |
157 | |
158 | #define DRAM_DEV_SEL_ALL (0 << 30) |
159 | #define DRAM_DEV_SEL_0 BIT(31) |
160 | #define DRAM_DEV_SEL_1 BIT(30) |
161 | #define DRAM_BROADCAST(num) \ |
162 | ((num) > 1 ? DRAM_DEV_SEL_ALL : DRAM_DEV_SEL_0) |
163 | |
164 | #define EMC_ZQ_CAL_CMD BIT(0) |
165 | #define EMC_ZQ_CAL_LONG BIT(4) |
166 | #define EMC_ZQ_CAL_LONG_CMD_DEV0 \ |
167 | (DRAM_DEV_SEL_0 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD) |
168 | #define EMC_ZQ_CAL_LONG_CMD_DEV1 \ |
169 | (DRAM_DEV_SEL_1 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD) |
170 | |
171 | #define EMC_DBG_READ_MUX_ASSEMBLY BIT(0) |
172 | #define EMC_DBG_WRITE_MUX_ACTIVE BIT(1) |
173 | #define EMC_DBG_FORCE_UPDATE BIT(2) |
174 | #define EMC_DBG_CFG_PRIORITY BIT(24) |
175 | |
176 | #define EMC_CFG5_QUSE_MODE_SHIFT 13 |
177 | #define EMC_CFG5_QUSE_MODE_MASK (7 << EMC_CFG5_QUSE_MODE_SHIFT) |
178 | |
179 | #define EMC_CFG5_QUSE_MODE_INTERNAL_LPBK 2 |
180 | #define EMC_CFG5_QUSE_MODE_PULSE_INTERN 3 |
181 | |
182 | #define EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE BIT(9) |
183 | |
184 | #define EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE BIT(10) |
185 | |
186 | #define EMC_XM2QUSEPADCTRL_IVREF_ENABLE BIT(4) |
187 | |
188 | #define EMC_XM2DQSPADCTRL2_VREF_ENABLE BIT(5) |
189 | #define EMC_XM2DQSPADCTRL3_VREF_ENABLE BIT(5) |
190 | |
191 | #define EMC_AUTO_CAL_STATUS_ACTIVE BIT(31) |
192 | |
193 | #define EMC_FBIO_CFG5_DRAM_TYPE_MASK 0x3 |
194 | |
195 | #define EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK 0x3ff |
196 | #define EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT 16 |
197 | #define EMC_MRS_WAIT_CNT_LONG_WAIT_MASK \ |
198 | (0x3ff << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) |
199 | |
200 | #define EMC_REFCTRL_DEV_SEL_MASK 0x3 |
201 | #define EMC_REFCTRL_ENABLE BIT(31) |
202 | #define EMC_REFCTRL_ENABLE_ALL(num) \ |
203 | (((num) > 1 ? 0 : 2) | EMC_REFCTRL_ENABLE) |
204 | #define EMC_REFCTRL_DISABLE_ALL(num) ((num) > 1 ? 0 : 2) |
205 | |
206 | #define EMC_CFG_PERIODIC_QRST BIT(21) |
207 | #define EMC_CFG_DYN_SREF_ENABLE BIT(28) |
208 | |
209 | #define EMC_CLKCHANGE_REQ_ENABLE BIT(0) |
210 | #define EMC_CLKCHANGE_PD_ENABLE BIT(1) |
211 | #define EMC_CLKCHANGE_SR_ENABLE BIT(2) |
212 | |
213 | #define EMC_TIMING_UPDATE BIT(0) |
214 | |
215 | #define EMC_REFRESH_OVERFLOW_INT BIT(3) |
216 | #define EMC_CLKCHANGE_COMPLETE_INT BIT(4) |
217 | #define EMC_MRR_DIVLD_INT BIT(5) |
218 | |
219 | #define EMC_MRR_DEV_SELECTN GENMASK(31, 30) |
220 | #define EMC_MRR_MRR_MA GENMASK(23, 16) |
221 | #define EMC_MRR_MRR_DATA GENMASK(15, 0) |
222 | |
223 | #define EMC_ADR_CFG_EMEM_NUMDEV BIT(0) |
224 | |
225 | enum emc_dram_type { |
226 | DRAM_TYPE_DDR3, |
227 | DRAM_TYPE_DDR1, |
228 | DRAM_TYPE_LPDDR2, |
229 | DRAM_TYPE_DDR2, |
230 | }; |
231 | |
232 | enum emc_dll_change { |
233 | DLL_CHANGE_NONE, |
234 | DLL_CHANGE_ON, |
235 | DLL_CHANGE_OFF |
236 | }; |
237 | |
238 | static const u16 emc_timing_registers[] = { |
239 | [0] = EMC_RC, |
240 | [1] = EMC_RFC, |
241 | [2] = EMC_RAS, |
242 | [3] = EMC_RP, |
243 | [4] = EMC_R2W, |
244 | [5] = EMC_W2R, |
245 | [6] = EMC_R2P, |
246 | [7] = EMC_W2P, |
247 | [8] = EMC_RD_RCD, |
248 | [9] = EMC_WR_RCD, |
249 | [10] = EMC_RRD, |
250 | [11] = EMC_REXT, |
251 | [12] = EMC_WEXT, |
252 | [13] = EMC_WDV, |
253 | [14] = EMC_QUSE, |
254 | [15] = EMC_QRST, |
255 | [16] = EMC_QSAFE, |
256 | [17] = EMC_RDV, |
257 | [18] = EMC_REFRESH, |
258 | [19] = EMC_BURST_REFRESH_NUM, |
259 | [20] = EMC_PRE_REFRESH_REQ_CNT, |
260 | [21] = EMC_PDEX2WR, |
261 | [22] = EMC_PDEX2RD, |
262 | [23] = EMC_PCHG2PDEN, |
263 | [24] = EMC_ACT2PDEN, |
264 | [25] = EMC_AR2PDEN, |
265 | [26] = EMC_RW2PDEN, |
266 | [27] = EMC_TXSR, |
267 | [28] = EMC_TXSRDLL, |
268 | [29] = EMC_TCKE, |
269 | [30] = EMC_TFAW, |
270 | [31] = EMC_TRPAB, |
271 | [32] = EMC_TCLKSTABLE, |
272 | [33] = EMC_TCLKSTOP, |
273 | [34] = EMC_TREFBW, |
274 | [35] = EMC_QUSE_EXTRA, |
275 | [36] = EMC_FBIO_CFG6, |
276 | [37] = EMC_ODT_WRITE, |
277 | [38] = EMC_ODT_READ, |
278 | [39] = EMC_FBIO_CFG5, |
279 | [40] = EMC_CFG_DIG_DLL, |
280 | [41] = EMC_CFG_DIG_DLL_PERIOD, |
281 | [42] = EMC_DLL_XFORM_DQS0, |
282 | [43] = EMC_DLL_XFORM_DQS1, |
283 | [44] = EMC_DLL_XFORM_DQS2, |
284 | [45] = EMC_DLL_XFORM_DQS3, |
285 | [46] = EMC_DLL_XFORM_DQS4, |
286 | [47] = EMC_DLL_XFORM_DQS5, |
287 | [48] = EMC_DLL_XFORM_DQS6, |
288 | [49] = EMC_DLL_XFORM_DQS7, |
289 | [50] = EMC_DLL_XFORM_QUSE0, |
290 | [51] = EMC_DLL_XFORM_QUSE1, |
291 | [52] = EMC_DLL_XFORM_QUSE2, |
292 | [53] = EMC_DLL_XFORM_QUSE3, |
293 | [54] = EMC_DLL_XFORM_QUSE4, |
294 | [55] = EMC_DLL_XFORM_QUSE5, |
295 | [56] = EMC_DLL_XFORM_QUSE6, |
296 | [57] = EMC_DLL_XFORM_QUSE7, |
297 | [58] = EMC_DLI_TRIM_TXDQS0, |
298 | [59] = EMC_DLI_TRIM_TXDQS1, |
299 | [60] = EMC_DLI_TRIM_TXDQS2, |
300 | [61] = EMC_DLI_TRIM_TXDQS3, |
301 | [62] = EMC_DLI_TRIM_TXDQS4, |
302 | [63] = EMC_DLI_TRIM_TXDQS5, |
303 | [64] = EMC_DLI_TRIM_TXDQS6, |
304 | [65] = EMC_DLI_TRIM_TXDQS7, |
305 | [66] = EMC_DLL_XFORM_DQ0, |
306 | [67] = EMC_DLL_XFORM_DQ1, |
307 | [68] = EMC_DLL_XFORM_DQ2, |
308 | [69] = EMC_DLL_XFORM_DQ3, |
309 | [70] = EMC_XM2CMDPADCTRL, |
310 | [71] = EMC_XM2DQSPADCTRL2, |
311 | [72] = EMC_XM2DQPADCTRL2, |
312 | [73] = EMC_XM2CLKPADCTRL, |
313 | [74] = EMC_XM2COMPPADCTRL, |
314 | [75] = EMC_XM2VTTGENPADCTRL, |
315 | [76] = EMC_XM2VTTGENPADCTRL2, |
316 | [77] = EMC_XM2QUSEPADCTRL, |
317 | [78] = EMC_XM2DQSPADCTRL3, |
318 | [79] = EMC_CTT_TERM_CTRL, |
319 | [80] = EMC_ZCAL_INTERVAL, |
320 | [81] = EMC_ZCAL_WAIT_CNT, |
321 | [82] = EMC_MRS_WAIT_CNT, |
322 | [83] = EMC_AUTO_CAL_CONFIG, |
323 | [84] = EMC_CTT, |
324 | [85] = EMC_CTT_DURATION, |
325 | [86] = EMC_DYN_SELF_REF_CONTROL, |
326 | [87] = EMC_FBIO_SPARE, |
327 | [88] = EMC_CFG_RSV, |
328 | }; |
329 | |
330 | struct emc_timing { |
331 | unsigned long rate; |
332 | |
333 | u32 data[ARRAY_SIZE(emc_timing_registers)]; |
334 | |
335 | u32 emc_auto_cal_interval; |
336 | u32 emc_mode_1; |
337 | u32 emc_mode_2; |
338 | u32 emc_mode_reset; |
339 | u32 emc_zcal_cnt_long; |
340 | bool emc_cfg_periodic_qrst; |
341 | bool emc_cfg_dyn_self_ref; |
342 | }; |
343 | |
344 | enum emc_rate_request_type { |
345 | EMC_RATE_DEBUG, |
346 | EMC_RATE_ICC, |
347 | EMC_RATE_TYPE_MAX, |
348 | }; |
349 | |
350 | struct emc_rate_request { |
351 | unsigned long min_rate; |
352 | unsigned long max_rate; |
353 | }; |
354 | |
355 | struct tegra_emc { |
356 | struct device *dev; |
357 | struct tegra_mc *mc; |
358 | struct icc_provider provider; |
359 | struct notifier_block clk_nb; |
360 | struct clk *clk; |
361 | void __iomem *regs; |
362 | unsigned int irq; |
363 | bool bad_state; |
364 | |
365 | struct emc_timing *new_timing; |
366 | struct emc_timing *timings; |
367 | unsigned int num_timings; |
368 | |
369 | u32 mc_override; |
370 | u32 emc_cfg; |
371 | |
372 | u32 emc_mode_1; |
373 | u32 emc_mode_2; |
374 | u32 emc_mode_reset; |
375 | |
376 | bool vref_cal_toggle : 1; |
377 | bool zcal_long : 1; |
378 | bool dll_on : 1; |
379 | |
380 | struct { |
381 | struct dentry *root; |
382 | unsigned long min_rate; |
383 | unsigned long max_rate; |
384 | } debugfs; |
385 | |
386 | /* |
387 | * There are multiple sources in the EMC driver which could request |
388 | * a min/max clock rate, these rates are contained in this array. |
389 | */ |
390 | struct emc_rate_request requested_rate[EMC_RATE_TYPE_MAX]; |
391 | |
392 | /* protect shared rate-change code path */ |
393 | struct mutex rate_lock; |
394 | |
395 | bool mrr_error; |
396 | }; |
397 | |
398 | static int emc_seq_update_timing(struct tegra_emc *emc) |
399 | { |
400 | u32 val; |
401 | int err; |
402 | |
403 | writel_relaxed(EMC_TIMING_UPDATE, emc->regs + EMC_TIMING_CONTROL); |
404 | |
405 | err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_STATUS, val, |
406 | !(val & EMC_STATUS_TIMING_UPDATE_STALLED), |
407 | 1, 200); |
408 | if (err) { |
409 | dev_err(emc->dev, "failed to update timing: %d\n" , err); |
410 | return err; |
411 | } |
412 | |
413 | return 0; |
414 | } |
415 | |
416 | static irqreturn_t tegra_emc_isr(int irq, void *data) |
417 | { |
418 | struct tegra_emc *emc = data; |
419 | u32 intmask = EMC_REFRESH_OVERFLOW_INT; |
420 | u32 status; |
421 | |
422 | status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask; |
423 | if (!status) |
424 | return IRQ_NONE; |
425 | |
426 | /* notify about HW problem */ |
427 | if (status & EMC_REFRESH_OVERFLOW_INT) |
428 | dev_err_ratelimited(emc->dev, |
429 | "refresh request overflow timeout\n" ); |
430 | |
431 | /* clear interrupts */ |
432 | writel_relaxed(status, emc->regs + EMC_INTSTATUS); |
433 | |
434 | return IRQ_HANDLED; |
435 | } |
436 | |
437 | static struct emc_timing *emc_find_timing(struct tegra_emc *emc, |
438 | unsigned long rate) |
439 | { |
440 | struct emc_timing *timing = NULL; |
441 | unsigned int i; |
442 | |
443 | for (i = 0; i < emc->num_timings; i++) { |
444 | if (emc->timings[i].rate >= rate) { |
445 | timing = &emc->timings[i]; |
446 | break; |
447 | } |
448 | } |
449 | |
450 | if (!timing) { |
451 | dev_err(emc->dev, "no timing for rate %lu\n" , rate); |
452 | return NULL; |
453 | } |
454 | |
455 | return timing; |
456 | } |
457 | |
458 | static bool emc_dqs_preset(struct tegra_emc *emc, struct emc_timing *timing, |
459 | bool *schmitt_to_vref) |
460 | { |
461 | bool preset = false; |
462 | u32 val; |
463 | |
464 | if (timing->data[71] & EMC_XM2DQSPADCTRL2_VREF_ENABLE) { |
465 | val = readl_relaxed(emc->regs + EMC_XM2DQSPADCTRL2); |
466 | |
467 | if (!(val & EMC_XM2DQSPADCTRL2_VREF_ENABLE)) { |
468 | val |= EMC_XM2DQSPADCTRL2_VREF_ENABLE; |
469 | writel_relaxed(val, emc->regs + EMC_XM2DQSPADCTRL2); |
470 | |
471 | preset = true; |
472 | } |
473 | } |
474 | |
475 | if (timing->data[78] & EMC_XM2DQSPADCTRL3_VREF_ENABLE) { |
476 | val = readl_relaxed(emc->regs + EMC_XM2DQSPADCTRL3); |
477 | |
478 | if (!(val & EMC_XM2DQSPADCTRL3_VREF_ENABLE)) { |
479 | val |= EMC_XM2DQSPADCTRL3_VREF_ENABLE; |
480 | writel_relaxed(val, emc->regs + EMC_XM2DQSPADCTRL3); |
481 | |
482 | preset = true; |
483 | } |
484 | } |
485 | |
486 | if (timing->data[77] & EMC_XM2QUSEPADCTRL_IVREF_ENABLE) { |
487 | val = readl_relaxed(emc->regs + EMC_XM2QUSEPADCTRL); |
488 | |
489 | if (!(val & EMC_XM2QUSEPADCTRL_IVREF_ENABLE)) { |
490 | val |= EMC_XM2QUSEPADCTRL_IVREF_ENABLE; |
491 | writel_relaxed(val, emc->regs + EMC_XM2QUSEPADCTRL); |
492 | |
493 | *schmitt_to_vref = true; |
494 | preset = true; |
495 | } |
496 | } |
497 | |
498 | return preset; |
499 | } |
500 | |
501 | static int emc_prepare_mc_clk_cfg(struct tegra_emc *emc, unsigned long rate) |
502 | { |
503 | struct tegra_mc *mc = emc->mc; |
504 | unsigned int misc0_index = 16; |
505 | unsigned int i; |
506 | bool same; |
507 | |
508 | for (i = 0; i < mc->num_timings; i++) { |
509 | if (mc->timings[i].rate != rate) |
510 | continue; |
511 | |
512 | if (mc->timings[i].emem_data[misc0_index] & BIT(27)) |
513 | same = true; |
514 | else |
515 | same = false; |
516 | |
517 | return tegra20_clk_prepare_emc_mc_same_freq(emc_clk: emc->clk, same); |
518 | } |
519 | |
520 | return -EINVAL; |
521 | } |
522 | |
523 | static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate) |
524 | { |
525 | struct emc_timing *timing = emc_find_timing(emc, rate); |
526 | enum emc_dll_change dll_change; |
527 | enum emc_dram_type dram_type; |
528 | bool schmitt_to_vref = false; |
529 | unsigned int pre_wait = 0; |
530 | bool qrst_used = false; |
531 | unsigned int dram_num; |
532 | unsigned int i; |
533 | u32 fbio_cfg5; |
534 | u32 emc_dbg; |
535 | u32 val; |
536 | int err; |
537 | |
538 | if (!timing || emc->bad_state) |
539 | return -EINVAL; |
540 | |
541 | dev_dbg(emc->dev, "%s: using timing rate %lu for requested rate %lu\n" , |
542 | __func__, timing->rate, rate); |
543 | |
544 | emc->bad_state = true; |
545 | |
546 | err = emc_prepare_mc_clk_cfg(emc, rate); |
547 | if (err) { |
548 | dev_err(emc->dev, "mc clock preparation failed: %d\n" , err); |
549 | return err; |
550 | } |
551 | |
552 | emc->vref_cal_toggle = false; |
553 | emc->mc_override = mc_readl(mc: emc->mc, MC_EMEM_ARB_OVERRIDE); |
554 | emc->emc_cfg = readl_relaxed(emc->regs + EMC_CFG); |
555 | emc_dbg = readl_relaxed(emc->regs + EMC_DBG); |
556 | |
557 | if (emc->dll_on == !!(timing->emc_mode_1 & 0x1)) |
558 | dll_change = DLL_CHANGE_NONE; |
559 | else if (timing->emc_mode_1 & 0x1) |
560 | dll_change = DLL_CHANGE_ON; |
561 | else |
562 | dll_change = DLL_CHANGE_OFF; |
563 | |
564 | emc->dll_on = !!(timing->emc_mode_1 & 0x1); |
565 | |
566 | if (timing->data[80] && !readl_relaxed(emc->regs + EMC_ZCAL_INTERVAL)) |
567 | emc->zcal_long = true; |
568 | else |
569 | emc->zcal_long = false; |
570 | |
571 | fbio_cfg5 = readl_relaxed(emc->regs + EMC_FBIO_CFG5); |
572 | dram_type = fbio_cfg5 & EMC_FBIO_CFG5_DRAM_TYPE_MASK; |
573 | |
574 | dram_num = tegra_mc_get_emem_device_count(mc: emc->mc); |
575 | |
576 | /* disable dynamic self-refresh */ |
577 | if (emc->emc_cfg & EMC_CFG_DYN_SREF_ENABLE) { |
578 | emc->emc_cfg &= ~EMC_CFG_DYN_SREF_ENABLE; |
579 | writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG); |
580 | |
581 | pre_wait = 5; |
582 | } |
583 | |
584 | /* update MC arbiter settings */ |
585 | val = mc_readl(mc: emc->mc, MC_EMEM_ARB_OUTSTANDING_REQ); |
586 | if (!(val & MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE) || |
587 | ((val & MC_EMEM_ARB_OUTSTANDING_REQ_MAX_MASK) > 0x50)) { |
588 | |
589 | val = MC_EMEM_ARB_OUTSTANDING_REQ_LIMIT_ENABLE | |
590 | MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE | 0x50; |
591 | mc_writel(mc: emc->mc, value: val, MC_EMEM_ARB_OUTSTANDING_REQ); |
592 | mc_writel(mc: emc->mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL); |
593 | } |
594 | |
595 | if (emc->mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK) |
596 | mc_writel(mc: emc->mc, |
597 | value: emc->mc_override & ~MC_EMEM_ARB_OVERRIDE_EACK_MASK, |
598 | MC_EMEM_ARB_OVERRIDE); |
599 | |
600 | /* check DQ/DQS VREF delay */ |
601 | if (emc_dqs_preset(emc, timing, schmitt_to_vref: &schmitt_to_vref)) { |
602 | if (pre_wait < 3) |
603 | pre_wait = 3; |
604 | } |
605 | |
606 | if (pre_wait) { |
607 | err = emc_seq_update_timing(emc); |
608 | if (err) |
609 | return err; |
610 | |
611 | udelay(pre_wait); |
612 | } |
613 | |
614 | /* disable auto-calibration if VREF mode is switching */ |
615 | if (timing->emc_auto_cal_interval) { |
616 | val = readl_relaxed(emc->regs + EMC_XM2COMPPADCTRL); |
617 | val ^= timing->data[74]; |
618 | |
619 | if (val & EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE) { |
620 | writel_relaxed(0, emc->regs + EMC_AUTO_CAL_INTERVAL); |
621 | |
622 | err = readl_relaxed_poll_timeout_atomic( |
623 | emc->regs + EMC_AUTO_CAL_STATUS, val, |
624 | !(val & EMC_AUTO_CAL_STATUS_ACTIVE), 1, 300); |
625 | if (err) { |
626 | dev_err(emc->dev, |
627 | "auto-cal finish timeout: %d\n" , err); |
628 | return err; |
629 | } |
630 | |
631 | emc->vref_cal_toggle = true; |
632 | } |
633 | } |
634 | |
635 | /* program shadow registers */ |
636 | for (i = 0; i < ARRAY_SIZE(timing->data); i++) { |
637 | /* EMC_XM2CLKPADCTRL should be programmed separately */ |
638 | if (i != 73) |
639 | writel_relaxed(timing->data[i], |
640 | emc->regs + emc_timing_registers[i]); |
641 | } |
642 | |
643 | err = tegra_mc_write_emem_configuration(mc: emc->mc, rate: timing->rate); |
644 | if (err) |
645 | return err; |
646 | |
647 | /* DDR3: predict MRS long wait count */ |
648 | if (dram_type == DRAM_TYPE_DDR3 && dll_change == DLL_CHANGE_ON) { |
649 | u32 cnt = 512; |
650 | |
651 | if (emc->zcal_long) |
652 | cnt -= dram_num * 256; |
653 | |
654 | val = timing->data[82] & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK; |
655 | if (cnt < val) |
656 | cnt = val; |
657 | |
658 | val = timing->data[82] & ~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK; |
659 | val |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) & |
660 | EMC_MRS_WAIT_CNT_LONG_WAIT_MASK; |
661 | |
662 | writel_relaxed(val, emc->regs + EMC_MRS_WAIT_CNT); |
663 | } |
664 | |
665 | /* this read also completes the writes */ |
666 | val = readl_relaxed(emc->regs + EMC_SEL_DPD_CTRL); |
667 | |
668 | if (!(val & EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE) && schmitt_to_vref) { |
669 | u32 cur_mode, new_mode; |
670 | |
671 | cur_mode = fbio_cfg5 & EMC_CFG5_QUSE_MODE_MASK; |
672 | cur_mode >>= EMC_CFG5_QUSE_MODE_SHIFT; |
673 | |
674 | new_mode = timing->data[39] & EMC_CFG5_QUSE_MODE_MASK; |
675 | new_mode >>= EMC_CFG5_QUSE_MODE_SHIFT; |
676 | |
677 | if ((cur_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN && |
678 | cur_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK) || |
679 | (new_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN && |
680 | new_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK)) |
681 | qrst_used = true; |
682 | } |
683 | |
684 | /* flow control marker 1 */ |
685 | writel_relaxed(0x1, emc->regs + EMC_STALL_THEN_EXE_BEFORE_CLKCHANGE); |
686 | |
687 | /* enable periodic reset */ |
688 | if (qrst_used) { |
689 | writel_relaxed(emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE, |
690 | emc->regs + EMC_DBG); |
691 | writel_relaxed(emc->emc_cfg | EMC_CFG_PERIODIC_QRST, |
692 | emc->regs + EMC_CFG); |
693 | writel_relaxed(emc_dbg, emc->regs + EMC_DBG); |
694 | } |
695 | |
696 | /* disable auto-refresh to save time after clock change */ |
697 | writel_relaxed(EMC_REFCTRL_DISABLE_ALL(dram_num), |
698 | emc->regs + EMC_REFCTRL); |
699 | |
700 | /* turn off DLL and enter self-refresh on DDR3 */ |
701 | if (dram_type == DRAM_TYPE_DDR3) { |
702 | if (dll_change == DLL_CHANGE_OFF) |
703 | writel_relaxed(timing->emc_mode_1, |
704 | emc->regs + EMC_EMRS); |
705 | |
706 | writel_relaxed(DRAM_BROADCAST(dram_num) | |
707 | EMC_SELF_REF_CMD_ENABLED, |
708 | emc->regs + EMC_SELF_REF); |
709 | } |
710 | |
711 | /* flow control marker 2 */ |
712 | writel_relaxed(0x1, emc->regs + EMC_STALL_THEN_EXE_AFTER_CLKCHANGE); |
713 | |
714 | /* enable write-active MUX, update unshadowed pad control */ |
715 | writel_relaxed(emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE, emc->regs + EMC_DBG); |
716 | writel_relaxed(timing->data[73], emc->regs + EMC_XM2CLKPADCTRL); |
717 | |
718 | /* restore periodic QRST and disable write-active MUX */ |
719 | val = !!(emc->emc_cfg & EMC_CFG_PERIODIC_QRST); |
720 | if (qrst_used || timing->emc_cfg_periodic_qrst != val) { |
721 | if (timing->emc_cfg_periodic_qrst) |
722 | emc->emc_cfg |= EMC_CFG_PERIODIC_QRST; |
723 | else |
724 | emc->emc_cfg &= ~EMC_CFG_PERIODIC_QRST; |
725 | |
726 | writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG); |
727 | } |
728 | writel_relaxed(emc_dbg, emc->regs + EMC_DBG); |
729 | |
730 | /* exit self-refresh on DDR3 */ |
731 | if (dram_type == DRAM_TYPE_DDR3) |
732 | writel_relaxed(DRAM_BROADCAST(dram_num), |
733 | emc->regs + EMC_SELF_REF); |
734 | |
735 | /* set DRAM-mode registers */ |
736 | if (dram_type == DRAM_TYPE_DDR3) { |
737 | if (timing->emc_mode_1 != emc->emc_mode_1) |
738 | writel_relaxed(timing->emc_mode_1, |
739 | emc->regs + EMC_EMRS); |
740 | |
741 | if (timing->emc_mode_2 != emc->emc_mode_2) |
742 | writel_relaxed(timing->emc_mode_2, |
743 | emc->regs + EMC_EMRS); |
744 | |
745 | if (timing->emc_mode_reset != emc->emc_mode_reset || |
746 | dll_change == DLL_CHANGE_ON) { |
747 | val = timing->emc_mode_reset; |
748 | if (dll_change == DLL_CHANGE_ON) { |
749 | val |= EMC_MODE_SET_DLL_RESET; |
750 | val |= EMC_MODE_SET_LONG_CNT; |
751 | } else { |
752 | val &= ~EMC_MODE_SET_DLL_RESET; |
753 | } |
754 | writel_relaxed(val, emc->regs + EMC_MRS); |
755 | } |
756 | } else { |
757 | if (timing->emc_mode_2 != emc->emc_mode_2) |
758 | writel_relaxed(timing->emc_mode_2, |
759 | emc->regs + EMC_MRW); |
760 | |
761 | if (timing->emc_mode_1 != emc->emc_mode_1) |
762 | writel_relaxed(timing->emc_mode_1, |
763 | emc->regs + EMC_MRW); |
764 | } |
765 | |
766 | emc->emc_mode_1 = timing->emc_mode_1; |
767 | emc->emc_mode_2 = timing->emc_mode_2; |
768 | emc->emc_mode_reset = timing->emc_mode_reset; |
769 | |
770 | /* issue ZCAL command if turning ZCAL on */ |
771 | if (emc->zcal_long) { |
772 | writel_relaxed(EMC_ZQ_CAL_LONG_CMD_DEV0, |
773 | emc->regs + EMC_ZQ_CAL); |
774 | |
775 | if (dram_num > 1) |
776 | writel_relaxed(EMC_ZQ_CAL_LONG_CMD_DEV1, |
777 | emc->regs + EMC_ZQ_CAL); |
778 | } |
779 | |
780 | /* flow control marker 3 */ |
781 | writel_relaxed(0x1, emc->regs + EMC_UNSTALL_RW_AFTER_CLKCHANGE); |
782 | |
783 | /* |
784 | * Read and discard an arbitrary MC register (Note: EMC registers |
785 | * can't be used) to ensure the register writes are completed. |
786 | */ |
787 | mc_readl(mc: emc->mc, MC_EMEM_ARB_OVERRIDE); |
788 | |
789 | return 0; |
790 | } |
791 | |
792 | static int emc_complete_timing_change(struct tegra_emc *emc, |
793 | unsigned long rate) |
794 | { |
795 | struct emc_timing *timing = emc_find_timing(emc, rate); |
796 | unsigned int dram_num; |
797 | int err; |
798 | u32 v; |
799 | |
800 | err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, v, |
801 | v & EMC_CLKCHANGE_COMPLETE_INT, |
802 | 1, 100); |
803 | if (err) { |
804 | dev_err(emc->dev, "emc-car handshake timeout: %d\n" , err); |
805 | return err; |
806 | } |
807 | |
808 | /* re-enable auto-refresh */ |
809 | dram_num = tegra_mc_get_emem_device_count(mc: emc->mc); |
810 | writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num), |
811 | emc->regs + EMC_REFCTRL); |
812 | |
813 | /* restore auto-calibration */ |
814 | if (emc->vref_cal_toggle) |
815 | writel_relaxed(timing->emc_auto_cal_interval, |
816 | emc->regs + EMC_AUTO_CAL_INTERVAL); |
817 | |
818 | /* restore dynamic self-refresh */ |
819 | if (timing->emc_cfg_dyn_self_ref) { |
820 | emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE; |
821 | writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG); |
822 | } |
823 | |
824 | /* set number of clocks to wait after each ZQ command */ |
825 | if (emc->zcal_long) |
826 | writel_relaxed(timing->emc_zcal_cnt_long, |
827 | emc->regs + EMC_ZCAL_WAIT_CNT); |
828 | |
829 | /* wait for writes to settle */ |
830 | udelay(2); |
831 | |
832 | /* update restored timing */ |
833 | err = emc_seq_update_timing(emc); |
834 | if (!err) |
835 | emc->bad_state = false; |
836 | |
837 | /* restore early ACK */ |
838 | mc_writel(mc: emc->mc, value: emc->mc_override, MC_EMEM_ARB_OVERRIDE); |
839 | |
840 | return err; |
841 | } |
842 | |
843 | static int emc_unprepare_timing_change(struct tegra_emc *emc, |
844 | unsigned long rate) |
845 | { |
846 | if (!emc->bad_state) { |
847 | /* shouldn't ever happen in practice */ |
848 | dev_err(emc->dev, "timing configuration can't be reverted\n" ); |
849 | emc->bad_state = true; |
850 | } |
851 | |
852 | return 0; |
853 | } |
854 | |
855 | static int emc_clk_change_notify(struct notifier_block *nb, |
856 | unsigned long msg, void *data) |
857 | { |
858 | struct tegra_emc *emc = container_of(nb, struct tegra_emc, clk_nb); |
859 | struct clk_notifier_data *cnd = data; |
860 | int err; |
861 | |
862 | switch (msg) { |
863 | case PRE_RATE_CHANGE: |
864 | /* |
865 | * Disable interrupt since read accesses are prohibited after |
866 | * stalling. |
867 | */ |
868 | disable_irq(irq: emc->irq); |
869 | err = emc_prepare_timing_change(emc, rate: cnd->new_rate); |
870 | enable_irq(irq: emc->irq); |
871 | break; |
872 | |
873 | case ABORT_RATE_CHANGE: |
874 | err = emc_unprepare_timing_change(emc, rate: cnd->old_rate); |
875 | break; |
876 | |
877 | case POST_RATE_CHANGE: |
878 | err = emc_complete_timing_change(emc, rate: cnd->new_rate); |
879 | break; |
880 | |
881 | default: |
882 | return NOTIFY_DONE; |
883 | } |
884 | |
885 | return notifier_from_errno(err); |
886 | } |
887 | |
888 | static int load_one_timing_from_dt(struct tegra_emc *emc, |
889 | struct emc_timing *timing, |
890 | struct device_node *node) |
891 | { |
892 | u32 value; |
893 | int err; |
894 | |
895 | err = of_property_read_u32(np: node, propname: "clock-frequency" , out_value: &value); |
896 | if (err) { |
897 | dev_err(emc->dev, "timing %pOF: failed to read rate: %d\n" , |
898 | node, err); |
899 | return err; |
900 | } |
901 | |
902 | timing->rate = value; |
903 | |
904 | err = of_property_read_u32_array(np: node, propname: "nvidia,emc-configuration" , |
905 | out_values: timing->data, |
906 | ARRAY_SIZE(emc_timing_registers)); |
907 | if (err) { |
908 | dev_err(emc->dev, |
909 | "timing %pOF: failed to read emc timing data: %d\n" , |
910 | node, err); |
911 | return err; |
912 | } |
913 | |
914 | #define EMC_READ_BOOL(prop, dtprop) \ |
915 | timing->prop = of_property_read_bool(node, dtprop); |
916 | |
917 | #define EMC_READ_U32(prop, dtprop) \ |
918 | err = of_property_read_u32(node, dtprop, &timing->prop); \ |
919 | if (err) { \ |
920 | dev_err(emc->dev, \ |
921 | "timing %pOFn: failed to read " #prop ": %d\n", \ |
922 | node, err); \ |
923 | return err; \ |
924 | } |
925 | |
926 | EMC_READ_U32(emc_auto_cal_interval, "nvidia,emc-auto-cal-interval" ) |
927 | EMC_READ_U32(emc_mode_1, "nvidia,emc-mode-1" ) |
928 | EMC_READ_U32(emc_mode_2, "nvidia,emc-mode-2" ) |
929 | EMC_READ_U32(emc_mode_reset, "nvidia,emc-mode-reset" ) |
930 | EMC_READ_U32(emc_zcal_cnt_long, "nvidia,emc-zcal-cnt-long" ) |
931 | EMC_READ_BOOL(emc_cfg_dyn_self_ref, "nvidia,emc-cfg-dyn-self-ref" ) |
932 | EMC_READ_BOOL(emc_cfg_periodic_qrst, "nvidia,emc-cfg-periodic-qrst" ) |
933 | |
934 | #undef EMC_READ_U32 |
935 | #undef EMC_READ_BOOL |
936 | |
937 | dev_dbg(emc->dev, "%s: %pOF: rate %lu\n" , __func__, node, timing->rate); |
938 | |
939 | return 0; |
940 | } |
941 | |
942 | static int cmp_timings(const void *_a, const void *_b) |
943 | { |
944 | const struct emc_timing *a = _a; |
945 | const struct emc_timing *b = _b; |
946 | |
947 | if (a->rate < b->rate) |
948 | return -1; |
949 | |
950 | if (a->rate > b->rate) |
951 | return 1; |
952 | |
953 | return 0; |
954 | } |
955 | |
956 | static int emc_check_mc_timings(struct tegra_emc *emc) |
957 | { |
958 | struct tegra_mc *mc = emc->mc; |
959 | unsigned int i; |
960 | |
961 | if (emc->num_timings != mc->num_timings) { |
962 | dev_err(emc->dev, "emc/mc timings number mismatch: %u %u\n" , |
963 | emc->num_timings, mc->num_timings); |
964 | return -EINVAL; |
965 | } |
966 | |
967 | for (i = 0; i < mc->num_timings; i++) { |
968 | if (emc->timings[i].rate != mc->timings[i].rate) { |
969 | dev_err(emc->dev, |
970 | "emc/mc timing rate mismatch: %lu %lu\n" , |
971 | emc->timings[i].rate, mc->timings[i].rate); |
972 | return -EINVAL; |
973 | } |
974 | } |
975 | |
976 | return 0; |
977 | } |
978 | |
979 | static int emc_load_timings_from_dt(struct tegra_emc *emc, |
980 | struct device_node *node) |
981 | { |
982 | struct device_node *child; |
983 | struct emc_timing *timing; |
984 | int child_count; |
985 | int err; |
986 | |
987 | child_count = of_get_child_count(np: node); |
988 | if (!child_count) { |
989 | dev_err(emc->dev, "no memory timings in: %pOF\n" , node); |
990 | return -EINVAL; |
991 | } |
992 | |
993 | emc->timings = devm_kcalloc(dev: emc->dev, n: child_count, size: sizeof(*timing), |
994 | GFP_KERNEL); |
995 | if (!emc->timings) |
996 | return -ENOMEM; |
997 | |
998 | emc->num_timings = child_count; |
999 | timing = emc->timings; |
1000 | |
1001 | for_each_child_of_node(node, child) { |
1002 | err = load_one_timing_from_dt(emc, timing: timing++, node: child); |
1003 | if (err) { |
1004 | of_node_put(node: child); |
1005 | return err; |
1006 | } |
1007 | } |
1008 | |
1009 | sort(base: emc->timings, num: emc->num_timings, size: sizeof(*timing), cmp_func: cmp_timings, |
1010 | NULL); |
1011 | |
1012 | err = emc_check_mc_timings(emc); |
1013 | if (err) |
1014 | return err; |
1015 | |
1016 | dev_info_once(emc->dev, |
1017 | "got %u timings for RAM code %u (min %luMHz max %luMHz)\n" , |
1018 | emc->num_timings, |
1019 | tegra_read_ram_code(), |
1020 | emc->timings[0].rate / 1000000, |
1021 | emc->timings[emc->num_timings - 1].rate / 1000000); |
1022 | |
1023 | return 0; |
1024 | } |
1025 | |
1026 | static struct device_node *emc_find_node_by_ram_code(struct tegra_emc *emc) |
1027 | { |
1028 | struct device *dev = emc->dev; |
1029 | struct device_node *np; |
1030 | u32 value, ram_code; |
1031 | int err; |
1032 | |
1033 | if (emc->mrr_error) { |
1034 | dev_warn(dev, "memory timings skipped due to MRR error\n" ); |
1035 | return NULL; |
1036 | } |
1037 | |
1038 | if (of_get_child_count(np: dev->of_node) == 0) { |
1039 | dev_info_once(dev, "device-tree doesn't have memory timings\n" ); |
1040 | return NULL; |
1041 | } |
1042 | |
1043 | ram_code = tegra_read_ram_code(); |
1044 | |
1045 | for_each_child_of_node(dev->of_node, np) { |
1046 | err = of_property_read_u32(np, propname: "nvidia,ram-code" , out_value: &value); |
1047 | if (err || value != ram_code) |
1048 | continue; |
1049 | |
1050 | return np; |
1051 | } |
1052 | |
1053 | dev_err(dev, "no memory timings for RAM code %u found in device-tree\n" , |
1054 | ram_code); |
1055 | |
1056 | return NULL; |
1057 | } |
1058 | |
1059 | static int emc_read_lpddr_mode_register(struct tegra_emc *emc, |
1060 | unsigned int emem_dev, |
1061 | unsigned int register_addr, |
1062 | unsigned int *register_data) |
1063 | { |
1064 | u32 memory_dev = emem_dev ? 1 : 2; |
1065 | u32 val, mr_mask = 0xff; |
1066 | int err; |
1067 | |
1068 | /* clear data-valid interrupt status */ |
1069 | writel_relaxed(EMC_MRR_DIVLD_INT, emc->regs + EMC_INTSTATUS); |
1070 | |
1071 | /* issue mode register read request */ |
1072 | val = FIELD_PREP(EMC_MRR_DEV_SELECTN, memory_dev); |
1073 | val |= FIELD_PREP(EMC_MRR_MRR_MA, register_addr); |
1074 | |
1075 | writel_relaxed(val, emc->regs + EMC_MRR); |
1076 | |
1077 | /* wait for the LPDDR2 data-valid interrupt */ |
1078 | err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, val, |
1079 | val & EMC_MRR_DIVLD_INT, |
1080 | 1, 100); |
1081 | if (err) { |
1082 | dev_err(emc->dev, "mode register %u read failed: %d\n" , |
1083 | register_addr, err); |
1084 | emc->mrr_error = true; |
1085 | return err; |
1086 | } |
1087 | |
1088 | /* read out mode register data */ |
1089 | val = readl_relaxed(emc->regs + EMC_MRR); |
1090 | *register_data = FIELD_GET(EMC_MRR_MRR_DATA, val) & mr_mask; |
1091 | |
1092 | return 0; |
1093 | } |
1094 | |
1095 | static void emc_read_lpddr_sdram_info(struct tegra_emc *emc, |
1096 | unsigned int emem_dev) |
1097 | { |
1098 | union lpddr2_basic_config4 basic_conf4; |
1099 | unsigned int manufacturer_id; |
1100 | unsigned int revision_id1; |
1101 | unsigned int revision_id2; |
1102 | |
1103 | /* these registers are standard for all LPDDR JEDEC memory chips */ |
1104 | emc_read_lpddr_mode_register(emc, emem_dev, register_addr: 5, register_data: &manufacturer_id); |
1105 | emc_read_lpddr_mode_register(emc, emem_dev, register_addr: 6, register_data: &revision_id1); |
1106 | emc_read_lpddr_mode_register(emc, emem_dev, register_addr: 7, register_data: &revision_id2); |
1107 | emc_read_lpddr_mode_register(emc, emem_dev, register_addr: 8, register_data: &basic_conf4.value); |
1108 | |
1109 | dev_info(emc->dev, "SDRAM[dev%u]: manufacturer: 0x%x (%s) rev1: 0x%x rev2: 0x%x prefetch: S%u density: %uMbit iowidth: %ubit\n" , |
1110 | emem_dev, manufacturer_id, |
1111 | lpddr2_jedec_manufacturer(manufacturer_id), |
1112 | revision_id1, revision_id2, |
1113 | 4 >> basic_conf4.arch_type, |
1114 | 64 << basic_conf4.density, |
1115 | 32 >> basic_conf4.io_width); |
1116 | } |
1117 | |
1118 | static int emc_setup_hw(struct tegra_emc *emc) |
1119 | { |
1120 | u32 fbio_cfg5, emc_cfg, emc_dbg, emc_adr_cfg; |
1121 | u32 intmask = EMC_REFRESH_OVERFLOW_INT; |
1122 | static bool print_sdram_info_once; |
1123 | enum emc_dram_type dram_type; |
1124 | const char *dram_type_str; |
1125 | unsigned int emem_numdev; |
1126 | |
1127 | fbio_cfg5 = readl_relaxed(emc->regs + EMC_FBIO_CFG5); |
1128 | dram_type = fbio_cfg5 & EMC_FBIO_CFG5_DRAM_TYPE_MASK; |
1129 | |
1130 | emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2); |
1131 | |
1132 | /* enable EMC and CAR to handshake on PLL divider/source changes */ |
1133 | emc_cfg |= EMC_CLKCHANGE_REQ_ENABLE; |
1134 | |
1135 | /* configure clock change mode accordingly to DRAM type */ |
1136 | switch (dram_type) { |
1137 | case DRAM_TYPE_LPDDR2: |
1138 | emc_cfg |= EMC_CLKCHANGE_PD_ENABLE; |
1139 | emc_cfg &= ~EMC_CLKCHANGE_SR_ENABLE; |
1140 | break; |
1141 | |
1142 | default: |
1143 | emc_cfg &= ~EMC_CLKCHANGE_SR_ENABLE; |
1144 | emc_cfg &= ~EMC_CLKCHANGE_PD_ENABLE; |
1145 | break; |
1146 | } |
1147 | |
1148 | writel_relaxed(emc_cfg, emc->regs + EMC_CFG_2); |
1149 | |
1150 | /* initialize interrupt */ |
1151 | writel_relaxed(intmask, emc->regs + EMC_INTMASK); |
1152 | writel_relaxed(0xffffffff, emc->regs + EMC_INTSTATUS); |
1153 | |
1154 | /* ensure that unwanted debug features are disabled */ |
1155 | emc_dbg = readl_relaxed(emc->regs + EMC_DBG); |
1156 | emc_dbg |= EMC_DBG_CFG_PRIORITY; |
1157 | emc_dbg &= ~EMC_DBG_READ_MUX_ASSEMBLY; |
1158 | emc_dbg &= ~EMC_DBG_WRITE_MUX_ACTIVE; |
1159 | emc_dbg &= ~EMC_DBG_FORCE_UPDATE; |
1160 | writel_relaxed(emc_dbg, emc->regs + EMC_DBG); |
1161 | |
1162 | switch (dram_type) { |
1163 | case DRAM_TYPE_DDR1: |
1164 | dram_type_str = "DDR1" ; |
1165 | break; |
1166 | case DRAM_TYPE_LPDDR2: |
1167 | dram_type_str = "LPDDR2" ; |
1168 | break; |
1169 | case DRAM_TYPE_DDR2: |
1170 | dram_type_str = "DDR2" ; |
1171 | break; |
1172 | case DRAM_TYPE_DDR3: |
1173 | dram_type_str = "DDR3" ; |
1174 | break; |
1175 | } |
1176 | |
1177 | emc_adr_cfg = readl_relaxed(emc->regs + EMC_ADR_CFG); |
1178 | emem_numdev = FIELD_GET(EMC_ADR_CFG_EMEM_NUMDEV, emc_adr_cfg) + 1; |
1179 | |
1180 | dev_info_once(emc->dev, "%u %s %s attached\n" , emem_numdev, |
1181 | dram_type_str, emem_numdev == 2 ? "devices" : "device" ); |
1182 | |
1183 | if (dram_type == DRAM_TYPE_LPDDR2 && !print_sdram_info_once) { |
1184 | while (emem_numdev--) |
1185 | emc_read_lpddr_sdram_info(emc, emem_dev: emem_numdev); |
1186 | |
1187 | print_sdram_info_once = true; |
1188 | } |
1189 | |
1190 | return 0; |
1191 | } |
1192 | |
1193 | static long emc_round_rate(unsigned long rate, |
1194 | unsigned long min_rate, |
1195 | unsigned long max_rate, |
1196 | void *arg) |
1197 | { |
1198 | struct emc_timing *timing = NULL; |
1199 | struct tegra_emc *emc = arg; |
1200 | unsigned int i; |
1201 | |
1202 | if (!emc->num_timings) |
1203 | return clk_get_rate(clk: emc->clk); |
1204 | |
1205 | min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate); |
1206 | |
1207 | for (i = 0; i < emc->num_timings; i++) { |
1208 | if (emc->timings[i].rate < rate && i != emc->num_timings - 1) |
1209 | continue; |
1210 | |
1211 | if (emc->timings[i].rate > max_rate) { |
1212 | i = max(i, 1u) - 1; |
1213 | |
1214 | if (emc->timings[i].rate < min_rate) |
1215 | break; |
1216 | } |
1217 | |
1218 | if (emc->timings[i].rate < min_rate) |
1219 | continue; |
1220 | |
1221 | timing = &emc->timings[i]; |
1222 | break; |
1223 | } |
1224 | |
1225 | if (!timing) { |
1226 | dev_err(emc->dev, "no timing for rate %lu min %lu max %lu\n" , |
1227 | rate, min_rate, max_rate); |
1228 | return -EINVAL; |
1229 | } |
1230 | |
1231 | return timing->rate; |
1232 | } |
1233 | |
1234 | static void tegra_emc_rate_requests_init(struct tegra_emc *emc) |
1235 | { |
1236 | unsigned int i; |
1237 | |
1238 | for (i = 0; i < EMC_RATE_TYPE_MAX; i++) { |
1239 | emc->requested_rate[i].min_rate = 0; |
1240 | emc->requested_rate[i].max_rate = ULONG_MAX; |
1241 | } |
1242 | } |
1243 | |
1244 | static int emc_request_rate(struct tegra_emc *emc, |
1245 | unsigned long new_min_rate, |
1246 | unsigned long new_max_rate, |
1247 | enum emc_rate_request_type type) |
1248 | { |
1249 | struct emc_rate_request *req = emc->requested_rate; |
1250 | unsigned long min_rate = 0, max_rate = ULONG_MAX; |
1251 | unsigned int i; |
1252 | int err; |
1253 | |
1254 | /* select minimum and maximum rates among the requested rates */ |
1255 | for (i = 0; i < EMC_RATE_TYPE_MAX; i++, req++) { |
1256 | if (i == type) { |
1257 | min_rate = max(new_min_rate, min_rate); |
1258 | max_rate = min(new_max_rate, max_rate); |
1259 | } else { |
1260 | min_rate = max(req->min_rate, min_rate); |
1261 | max_rate = min(req->max_rate, max_rate); |
1262 | } |
1263 | } |
1264 | |
1265 | if (min_rate > max_rate) { |
1266 | dev_err_ratelimited(emc->dev, "%s: type %u: out of range: %lu %lu\n" , |
1267 | __func__, type, min_rate, max_rate); |
1268 | return -ERANGE; |
1269 | } |
1270 | |
1271 | /* |
1272 | * EMC rate-changes should go via OPP API because it manages voltage |
1273 | * changes. |
1274 | */ |
1275 | err = dev_pm_opp_set_rate(dev: emc->dev, target_freq: min_rate); |
1276 | if (err) |
1277 | return err; |
1278 | |
1279 | emc->requested_rate[type].min_rate = new_min_rate; |
1280 | emc->requested_rate[type].max_rate = new_max_rate; |
1281 | |
1282 | return 0; |
1283 | } |
1284 | |
1285 | static int emc_set_min_rate(struct tegra_emc *emc, unsigned long rate, |
1286 | enum emc_rate_request_type type) |
1287 | { |
1288 | struct emc_rate_request *req = &emc->requested_rate[type]; |
1289 | int ret; |
1290 | |
1291 | mutex_lock(&emc->rate_lock); |
1292 | ret = emc_request_rate(emc, new_min_rate: rate, new_max_rate: req->max_rate, type); |
1293 | mutex_unlock(lock: &emc->rate_lock); |
1294 | |
1295 | return ret; |
1296 | } |
1297 | |
1298 | static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate, |
1299 | enum emc_rate_request_type type) |
1300 | { |
1301 | struct emc_rate_request *req = &emc->requested_rate[type]; |
1302 | int ret; |
1303 | |
1304 | mutex_lock(&emc->rate_lock); |
1305 | ret = emc_request_rate(emc, new_min_rate: req->min_rate, new_max_rate: rate, type); |
1306 | mutex_unlock(lock: &emc->rate_lock); |
1307 | |
1308 | return ret; |
1309 | } |
1310 | |
1311 | /* |
1312 | * debugfs interface |
1313 | * |
1314 | * The memory controller driver exposes some files in debugfs that can be used |
1315 | * to control the EMC frequency. The top-level directory can be found here: |
1316 | * |
1317 | * /sys/kernel/debug/emc |
1318 | * |
1319 | * It contains the following files: |
1320 | * |
1321 | * - available_rates: This file contains a list of valid, space-separated |
1322 | * EMC frequencies. |
1323 | * |
1324 | * - min_rate: Writing a value to this file sets the given frequency as the |
1325 | * floor of the permitted range. If this is higher than the currently |
1326 | * configured EMC frequency, this will cause the frequency to be |
1327 | * increased so that it stays within the valid range. |
1328 | * |
1329 | * - max_rate: Similarily to the min_rate file, writing a value to this file |
1330 | * sets the given frequency as the ceiling of the permitted range. If |
1331 | * the value is lower than the currently configured EMC frequency, this |
1332 | * will cause the frequency to be decreased so that it stays within the |
1333 | * valid range. |
1334 | */ |
1335 | |
1336 | static bool tegra_emc_validate_rate(struct tegra_emc *emc, unsigned long rate) |
1337 | { |
1338 | unsigned int i; |
1339 | |
1340 | for (i = 0; i < emc->num_timings; i++) |
1341 | if (rate == emc->timings[i].rate) |
1342 | return true; |
1343 | |
1344 | return false; |
1345 | } |
1346 | |
1347 | static int tegra_emc_debug_available_rates_show(struct seq_file *s, void *data) |
1348 | { |
1349 | struct tegra_emc *emc = s->private; |
1350 | const char *prefix = "" ; |
1351 | unsigned int i; |
1352 | |
1353 | for (i = 0; i < emc->num_timings; i++) { |
1354 | seq_printf(m: s, fmt: "%s%lu" , prefix, emc->timings[i].rate); |
1355 | prefix = " " ; |
1356 | } |
1357 | |
1358 | seq_puts(m: s, s: "\n" ); |
1359 | |
1360 | return 0; |
1361 | } |
1362 | DEFINE_SHOW_ATTRIBUTE(tegra_emc_debug_available_rates); |
1363 | |
1364 | static int tegra_emc_debug_min_rate_get(void *data, u64 *rate) |
1365 | { |
1366 | struct tegra_emc *emc = data; |
1367 | |
1368 | *rate = emc->debugfs.min_rate; |
1369 | |
1370 | return 0; |
1371 | } |
1372 | |
1373 | static int tegra_emc_debug_min_rate_set(void *data, u64 rate) |
1374 | { |
1375 | struct tegra_emc *emc = data; |
1376 | int err; |
1377 | |
1378 | if (!tegra_emc_validate_rate(emc, rate)) |
1379 | return -EINVAL; |
1380 | |
1381 | err = emc_set_min_rate(emc, rate, type: EMC_RATE_DEBUG); |
1382 | if (err < 0) |
1383 | return err; |
1384 | |
1385 | emc->debugfs.min_rate = rate; |
1386 | |
1387 | return 0; |
1388 | } |
1389 | |
1390 | DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_min_rate_fops, |
1391 | tegra_emc_debug_min_rate_get, |
1392 | tegra_emc_debug_min_rate_set, "%llu\n" ); |
1393 | |
1394 | static int tegra_emc_debug_max_rate_get(void *data, u64 *rate) |
1395 | { |
1396 | struct tegra_emc *emc = data; |
1397 | |
1398 | *rate = emc->debugfs.max_rate; |
1399 | |
1400 | return 0; |
1401 | } |
1402 | |
1403 | static int tegra_emc_debug_max_rate_set(void *data, u64 rate) |
1404 | { |
1405 | struct tegra_emc *emc = data; |
1406 | int err; |
1407 | |
1408 | if (!tegra_emc_validate_rate(emc, rate)) |
1409 | return -EINVAL; |
1410 | |
1411 | err = emc_set_max_rate(emc, rate, type: EMC_RATE_DEBUG); |
1412 | if (err < 0) |
1413 | return err; |
1414 | |
1415 | emc->debugfs.max_rate = rate; |
1416 | |
1417 | return 0; |
1418 | } |
1419 | |
1420 | DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_max_rate_fops, |
1421 | tegra_emc_debug_max_rate_get, |
1422 | tegra_emc_debug_max_rate_set, "%llu\n" ); |
1423 | |
1424 | static void tegra_emc_debugfs_init(struct tegra_emc *emc) |
1425 | { |
1426 | struct device *dev = emc->dev; |
1427 | unsigned int i; |
1428 | int err; |
1429 | |
1430 | emc->debugfs.min_rate = ULONG_MAX; |
1431 | emc->debugfs.max_rate = 0; |
1432 | |
1433 | for (i = 0; i < emc->num_timings; i++) { |
1434 | if (emc->timings[i].rate < emc->debugfs.min_rate) |
1435 | emc->debugfs.min_rate = emc->timings[i].rate; |
1436 | |
1437 | if (emc->timings[i].rate > emc->debugfs.max_rate) |
1438 | emc->debugfs.max_rate = emc->timings[i].rate; |
1439 | } |
1440 | |
1441 | if (!emc->num_timings) { |
1442 | emc->debugfs.min_rate = clk_get_rate(clk: emc->clk); |
1443 | emc->debugfs.max_rate = emc->debugfs.min_rate; |
1444 | } |
1445 | |
1446 | err = clk_set_rate_range(clk: emc->clk, min: emc->debugfs.min_rate, |
1447 | max: emc->debugfs.max_rate); |
1448 | if (err < 0) { |
1449 | dev_err(dev, "failed to set rate range [%lu-%lu] for %pC\n" , |
1450 | emc->debugfs.min_rate, emc->debugfs.max_rate, |
1451 | emc->clk); |
1452 | } |
1453 | |
1454 | emc->debugfs.root = debugfs_create_dir(name: "emc" , NULL); |
1455 | |
1456 | debugfs_create_file(name: "available_rates" , mode: 0444, parent: emc->debugfs.root, |
1457 | data: emc, fops: &tegra_emc_debug_available_rates_fops); |
1458 | debugfs_create_file(name: "min_rate" , mode: 0644, parent: emc->debugfs.root, |
1459 | data: emc, fops: &tegra_emc_debug_min_rate_fops); |
1460 | debugfs_create_file(name: "max_rate" , mode: 0644, parent: emc->debugfs.root, |
1461 | data: emc, fops: &tegra_emc_debug_max_rate_fops); |
1462 | } |
1463 | |
1464 | static inline struct tegra_emc * |
1465 | to_tegra_emc_provider(struct icc_provider *provider) |
1466 | { |
1467 | return container_of(provider, struct tegra_emc, provider); |
1468 | } |
1469 | |
1470 | static struct icc_node_data * |
1471 | emc_of_icc_xlate_extended(const struct of_phandle_args *spec, void *data) |
1472 | { |
1473 | struct icc_provider *provider = data; |
1474 | struct icc_node_data *ndata; |
1475 | struct icc_node *node; |
1476 | |
1477 | /* External Memory is the only possible ICC route */ |
1478 | list_for_each_entry(node, &provider->nodes, node_list) { |
1479 | if (node->id != TEGRA_ICC_EMEM) |
1480 | continue; |
1481 | |
1482 | ndata = kzalloc(size: sizeof(*ndata), GFP_KERNEL); |
1483 | if (!ndata) |
1484 | return ERR_PTR(error: -ENOMEM); |
1485 | |
1486 | /* |
1487 | * SRC and DST nodes should have matching TAG in order to have |
1488 | * it set by default for a requested path. |
1489 | */ |
1490 | ndata->tag = TEGRA_MC_ICC_TAG_ISO; |
1491 | ndata->node = node; |
1492 | |
1493 | return ndata; |
1494 | } |
1495 | |
1496 | return ERR_PTR(error: -EPROBE_DEFER); |
1497 | } |
1498 | |
1499 | static int emc_icc_set(struct icc_node *src, struct icc_node *dst) |
1500 | { |
1501 | struct tegra_emc *emc = to_tegra_emc_provider(provider: dst->provider); |
1502 | unsigned long long peak_bw = icc_units_to_bps(dst->peak_bw); |
1503 | unsigned long long avg_bw = icc_units_to_bps(dst->avg_bw); |
1504 | unsigned long long rate = max(avg_bw, peak_bw); |
1505 | const unsigned int dram_data_bus_width_bytes = 4; |
1506 | const unsigned int ddr = 2; |
1507 | int err; |
1508 | |
1509 | /* |
1510 | * Tegra30 EMC runs on a clock rate of SDRAM bus. This means that |
1511 | * EMC clock rate is twice smaller than the peak data rate because |
1512 | * data is sampled on both EMC clock edges. |
1513 | */ |
1514 | do_div(rate, ddr * dram_data_bus_width_bytes); |
1515 | rate = min_t(u64, rate, U32_MAX); |
1516 | |
1517 | err = emc_set_min_rate(emc, rate, type: EMC_RATE_ICC); |
1518 | if (err) |
1519 | return err; |
1520 | |
1521 | return 0; |
1522 | } |
1523 | |
1524 | static int tegra_emc_interconnect_init(struct tegra_emc *emc) |
1525 | { |
1526 | const struct tegra_mc_soc *soc = emc->mc->soc; |
1527 | struct icc_node *node; |
1528 | int err; |
1529 | |
1530 | emc->provider.dev = emc->dev; |
1531 | emc->provider.set = emc_icc_set; |
1532 | emc->provider.data = &emc->provider; |
1533 | emc->provider.aggregate = soc->icc_ops->aggregate; |
1534 | emc->provider.xlate_extended = emc_of_icc_xlate_extended; |
1535 | |
1536 | icc_provider_init(provider: &emc->provider); |
1537 | |
1538 | /* create External Memory Controller node */ |
1539 | node = icc_node_create(TEGRA_ICC_EMC); |
1540 | if (IS_ERR(ptr: node)) { |
1541 | err = PTR_ERR(ptr: node); |
1542 | goto err_msg; |
1543 | } |
1544 | |
1545 | node->name = "External Memory Controller" ; |
1546 | icc_node_add(node, provider: &emc->provider); |
1547 | |
1548 | /* link External Memory Controller to External Memory (DRAM) */ |
1549 | err = icc_link_create(node, TEGRA_ICC_EMEM); |
1550 | if (err) |
1551 | goto remove_nodes; |
1552 | |
1553 | /* create External Memory node */ |
1554 | node = icc_node_create(TEGRA_ICC_EMEM); |
1555 | if (IS_ERR(ptr: node)) { |
1556 | err = PTR_ERR(ptr: node); |
1557 | goto remove_nodes; |
1558 | } |
1559 | |
1560 | node->name = "External Memory (DRAM)" ; |
1561 | icc_node_add(node, provider: &emc->provider); |
1562 | |
1563 | err = icc_provider_register(provider: &emc->provider); |
1564 | if (err) |
1565 | goto remove_nodes; |
1566 | |
1567 | return 0; |
1568 | |
1569 | remove_nodes: |
1570 | icc_nodes_remove(provider: &emc->provider); |
1571 | err_msg: |
1572 | dev_err(emc->dev, "failed to initialize ICC: %d\n" , err); |
1573 | |
1574 | return err; |
1575 | } |
1576 | |
1577 | static void devm_tegra_emc_unset_callback(void *data) |
1578 | { |
1579 | tegra20_clk_set_emc_round_callback(NULL, NULL); |
1580 | } |
1581 | |
1582 | static void devm_tegra_emc_unreg_clk_notifier(void *data) |
1583 | { |
1584 | struct tegra_emc *emc = data; |
1585 | |
1586 | clk_notifier_unregister(clk: emc->clk, nb: &emc->clk_nb); |
1587 | } |
1588 | |
1589 | static int tegra_emc_init_clk(struct tegra_emc *emc) |
1590 | { |
1591 | int err; |
1592 | |
1593 | tegra20_clk_set_emc_round_callback(round_cb: emc_round_rate, cb_arg: emc); |
1594 | |
1595 | err = devm_add_action_or_reset(emc->dev, devm_tegra_emc_unset_callback, |
1596 | NULL); |
1597 | if (err) |
1598 | return err; |
1599 | |
1600 | emc->clk = devm_clk_get(dev: emc->dev, NULL); |
1601 | if (IS_ERR(ptr: emc->clk)) { |
1602 | dev_err(emc->dev, "failed to get EMC clock: %pe\n" , emc->clk); |
1603 | return PTR_ERR(ptr: emc->clk); |
1604 | } |
1605 | |
1606 | err = clk_notifier_register(clk: emc->clk, nb: &emc->clk_nb); |
1607 | if (err) { |
1608 | dev_err(emc->dev, "failed to register clk notifier: %d\n" , err); |
1609 | return err; |
1610 | } |
1611 | |
1612 | err = devm_add_action_or_reset(emc->dev, |
1613 | devm_tegra_emc_unreg_clk_notifier, emc); |
1614 | if (err) |
1615 | return err; |
1616 | |
1617 | return 0; |
1618 | } |
1619 | |
1620 | static int tegra_emc_probe(struct platform_device *pdev) |
1621 | { |
1622 | struct tegra_core_opp_params opp_params = {}; |
1623 | struct device_node *np; |
1624 | struct tegra_emc *emc; |
1625 | int err; |
1626 | |
1627 | emc = devm_kzalloc(dev: &pdev->dev, size: sizeof(*emc), GFP_KERNEL); |
1628 | if (!emc) |
1629 | return -ENOMEM; |
1630 | |
1631 | emc->mc = devm_tegra_memory_controller_get(dev: &pdev->dev); |
1632 | if (IS_ERR(ptr: emc->mc)) |
1633 | return PTR_ERR(ptr: emc->mc); |
1634 | |
1635 | mutex_init(&emc->rate_lock); |
1636 | emc->clk_nb.notifier_call = emc_clk_change_notify; |
1637 | emc->dev = &pdev->dev; |
1638 | |
1639 | emc->regs = devm_platform_ioremap_resource(pdev, index: 0); |
1640 | if (IS_ERR(ptr: emc->regs)) |
1641 | return PTR_ERR(ptr: emc->regs); |
1642 | |
1643 | err = emc_setup_hw(emc); |
1644 | if (err) |
1645 | return err; |
1646 | |
1647 | np = emc_find_node_by_ram_code(emc); |
1648 | if (np) { |
1649 | err = emc_load_timings_from_dt(emc, node: np); |
1650 | of_node_put(node: np); |
1651 | if (err) |
1652 | return err; |
1653 | } |
1654 | |
1655 | err = platform_get_irq(pdev, 0); |
1656 | if (err < 0) |
1657 | return err; |
1658 | |
1659 | emc->irq = err; |
1660 | |
1661 | err = devm_request_irq(dev: &pdev->dev, irq: emc->irq, handler: tegra_emc_isr, irqflags: 0, |
1662 | devname: dev_name(dev: &pdev->dev), dev_id: emc); |
1663 | if (err) { |
1664 | dev_err(&pdev->dev, "failed to request irq: %d\n" , err); |
1665 | return err; |
1666 | } |
1667 | |
1668 | err = tegra_emc_init_clk(emc); |
1669 | if (err) |
1670 | return err; |
1671 | |
1672 | opp_params.init_state = true; |
1673 | |
1674 | err = devm_tegra_core_dev_init_opp_table(dev: &pdev->dev, params: &opp_params); |
1675 | if (err) |
1676 | return err; |
1677 | |
1678 | platform_set_drvdata(pdev, data: emc); |
1679 | tegra_emc_rate_requests_init(emc); |
1680 | tegra_emc_debugfs_init(emc); |
1681 | tegra_emc_interconnect_init(emc); |
1682 | |
1683 | /* |
1684 | * Don't allow the kernel module to be unloaded. Unloading adds some |
1685 | * extra complexity which doesn't really worth the effort in a case of |
1686 | * this driver. |
1687 | */ |
1688 | try_module_get(THIS_MODULE); |
1689 | |
1690 | return 0; |
1691 | } |
1692 | |
1693 | static int tegra_emc_suspend(struct device *dev) |
1694 | { |
1695 | struct tegra_emc *emc = dev_get_drvdata(dev); |
1696 | int err; |
1697 | |
1698 | /* take exclusive control over the clock's rate */ |
1699 | err = clk_rate_exclusive_get(clk: emc->clk); |
1700 | if (err) { |
1701 | dev_err(emc->dev, "failed to acquire clk: %d\n" , err); |
1702 | return err; |
1703 | } |
1704 | |
1705 | /* suspending in a bad state will hang machine */ |
1706 | if (WARN(emc->bad_state, "hardware in a bad state\n" )) |
1707 | return -EINVAL; |
1708 | |
1709 | emc->bad_state = true; |
1710 | |
1711 | return 0; |
1712 | } |
1713 | |
1714 | static int tegra_emc_resume(struct device *dev) |
1715 | { |
1716 | struct tegra_emc *emc = dev_get_drvdata(dev); |
1717 | |
1718 | emc_setup_hw(emc); |
1719 | emc->bad_state = false; |
1720 | |
1721 | clk_rate_exclusive_put(clk: emc->clk); |
1722 | |
1723 | return 0; |
1724 | } |
1725 | |
1726 | static const struct dev_pm_ops tegra_emc_pm_ops = { |
1727 | .suspend = tegra_emc_suspend, |
1728 | .resume = tegra_emc_resume, |
1729 | }; |
1730 | |
1731 | static const struct of_device_id tegra_emc_of_match[] = { |
1732 | { .compatible = "nvidia,tegra30-emc" , }, |
1733 | {}, |
1734 | }; |
1735 | MODULE_DEVICE_TABLE(of, tegra_emc_of_match); |
1736 | |
1737 | static struct platform_driver tegra_emc_driver = { |
1738 | .probe = tegra_emc_probe, |
1739 | .driver = { |
1740 | .name = "tegra30-emc" , |
1741 | .of_match_table = tegra_emc_of_match, |
1742 | .pm = &tegra_emc_pm_ops, |
1743 | .suppress_bind_attrs = true, |
1744 | .sync_state = icc_sync_state, |
1745 | }, |
1746 | }; |
1747 | module_platform_driver(tegra_emc_driver); |
1748 | |
1749 | MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>" ); |
1750 | MODULE_DESCRIPTION("NVIDIA Tegra30 EMC driver" ); |
1751 | MODULE_LICENSE("GPL v2" ); |
1752 | |