1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2013-2016, Linux Foundation. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/acpi.h> |
7 | #include <linux/clk.h> |
8 | #include <linux/delay.h> |
9 | #include <linux/devfreq.h> |
10 | #include <linux/gpio/consumer.h> |
11 | #include <linux/interconnect.h> |
12 | #include <linux/module.h> |
13 | #include <linux/of.h> |
14 | #include <linux/phy/phy.h> |
15 | #include <linux/platform_device.h> |
16 | #include <linux/reset-controller.h> |
17 | #include <linux/time.h> |
18 | |
19 | #include <soc/qcom/ice.h> |
20 | |
21 | #include <ufs/ufshcd.h> |
22 | #include <ufs/ufshci.h> |
23 | #include <ufs/ufs_quirks.h> |
24 | #include <ufs/unipro.h> |
25 | #include "ufshcd-pltfrm.h" |
26 | #include "ufs-qcom.h" |
27 | |
28 | #define MCQ_QCFGPTR_MASK GENMASK(7, 0) |
29 | #define MCQ_QCFGPTR_UNIT 0x200 |
30 | #define MCQ_SQATTR_OFFSET(c) \ |
31 | ((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT) |
32 | #define MCQ_QCFG_SIZE 0x40 |
33 | |
34 | enum { |
35 | TSTBUS_UAWM, |
36 | TSTBUS_UARM, |
37 | TSTBUS_TXUC, |
38 | TSTBUS_RXUC, |
39 | TSTBUS_DFC, |
40 | TSTBUS_TRLUT, |
41 | TSTBUS_TMRLUT, |
42 | TSTBUS_OCSC, |
43 | TSTBUS_UTP_HCI, |
44 | TSTBUS_COMBINED, |
45 | TSTBUS_WRAPPER, |
46 | TSTBUS_UNIPRO, |
47 | TSTBUS_MAX, |
48 | }; |
49 | |
50 | #define QCOM_UFS_MAX_GEAR 5 |
51 | #define QCOM_UFS_MAX_LANE 2 |
52 | |
53 | enum { |
54 | MODE_MIN, |
55 | MODE_PWM, |
56 | MODE_HS_RA, |
57 | MODE_HS_RB, |
58 | MODE_MAX, |
59 | }; |
60 | |
61 | static const struct __ufs_qcom_bw_table { |
62 | u32 mem_bw; |
63 | u32 cfg_bw; |
64 | } ufs_qcom_bw_table[MODE_MAX + 1][QCOM_UFS_MAX_GEAR + 1][QCOM_UFS_MAX_LANE + 1] = { |
65 | [MODE_MIN][0][0] = { .mem_bw: 0, .cfg_bw: 0 }, /* Bandwidth values in KB/s */ |
66 | [MODE_PWM][UFS_PWM_G1][UFS_LANE_1] = { 922, 1000 }, |
67 | [MODE_PWM][UFS_PWM_G2][UFS_LANE_1] = { .mem_bw: 1844, .cfg_bw: 1000 }, |
68 | [MODE_PWM][UFS_PWM_G3][UFS_LANE_1] = { .mem_bw: 3688, .cfg_bw: 1000 }, |
69 | [MODE_PWM][UFS_PWM_G4][UFS_LANE_1] = { .mem_bw: 7376, .cfg_bw: 1000 }, |
70 | [MODE_PWM][UFS_PWM_G5][UFS_LANE_1] = { .mem_bw: 14752, .cfg_bw: 1000 }, |
71 | [MODE_PWM][UFS_PWM_G1][UFS_LANE_2] = { .mem_bw: 1844, .cfg_bw: 1000 }, |
72 | [MODE_PWM][UFS_PWM_G2][UFS_LANE_2] = { .mem_bw: 3688, .cfg_bw: 1000 }, |
73 | [MODE_PWM][UFS_PWM_G3][UFS_LANE_2] = { .mem_bw: 7376, .cfg_bw: 1000 }, |
74 | [MODE_PWM][UFS_PWM_G4][UFS_LANE_2] = { .mem_bw: 14752, .cfg_bw: 1000 }, |
75 | [MODE_PWM][UFS_PWM_G5][UFS_LANE_2] = { .mem_bw: 29504, .cfg_bw: 1000 }, |
76 | [MODE_HS_RA][UFS_HS_G1][UFS_LANE_1] = { .mem_bw: 127796, .cfg_bw: 1000 }, |
77 | [MODE_HS_RA][UFS_HS_G2][UFS_LANE_1] = { .mem_bw: 255591, .cfg_bw: 1000 }, |
78 | [MODE_HS_RA][UFS_HS_G3][UFS_LANE_1] = { .mem_bw: 1492582, .cfg_bw: 102400 }, |
79 | [MODE_HS_RA][UFS_HS_G4][UFS_LANE_1] = { .mem_bw: 2915200, .cfg_bw: 204800 }, |
80 | [MODE_HS_RA][UFS_HS_G5][UFS_LANE_1] = { .mem_bw: 5836800, .cfg_bw: 409600 }, |
81 | [MODE_HS_RA][UFS_HS_G1][UFS_LANE_2] = { .mem_bw: 255591, .cfg_bw: 1000 }, |
82 | [MODE_HS_RA][UFS_HS_G2][UFS_LANE_2] = { .mem_bw: 511181, .cfg_bw: 1000 }, |
83 | [MODE_HS_RA][UFS_HS_G3][UFS_LANE_2] = { .mem_bw: 1492582, .cfg_bw: 204800 }, |
84 | [MODE_HS_RA][UFS_HS_G4][UFS_LANE_2] = { .mem_bw: 2915200, .cfg_bw: 409600 }, |
85 | [MODE_HS_RA][UFS_HS_G5][UFS_LANE_2] = { .mem_bw: 5836800, .cfg_bw: 819200 }, |
86 | [MODE_HS_RB][UFS_HS_G1][UFS_LANE_1] = { .mem_bw: 149422, .cfg_bw: 1000 }, |
87 | [MODE_HS_RB][UFS_HS_G2][UFS_LANE_1] = { .mem_bw: 298189, .cfg_bw: 1000 }, |
88 | [MODE_HS_RB][UFS_HS_G3][UFS_LANE_1] = { .mem_bw: 1492582, .cfg_bw: 102400 }, |
89 | [MODE_HS_RB][UFS_HS_G4][UFS_LANE_1] = { .mem_bw: 2915200, .cfg_bw: 204800 }, |
90 | [MODE_HS_RB][UFS_HS_G5][UFS_LANE_1] = { .mem_bw: 5836800, .cfg_bw: 409600 }, |
91 | [MODE_HS_RB][UFS_HS_G1][UFS_LANE_2] = { .mem_bw: 298189, .cfg_bw: 1000 }, |
92 | [MODE_HS_RB][UFS_HS_G2][UFS_LANE_2] = { .mem_bw: 596378, .cfg_bw: 1000 }, |
93 | [MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { .mem_bw: 1492582, .cfg_bw: 204800 }, |
94 | [MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { .mem_bw: 2915200, .cfg_bw: 409600 }, |
95 | [MODE_HS_RB][UFS_HS_G5][UFS_LANE_2] = { .mem_bw: 5836800, .cfg_bw: 819200 }, |
96 | [MODE_MAX][0][0] = { .mem_bw: 7643136, .cfg_bw: 307200 }, |
97 | }; |
98 | |
99 | static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); |
100 | static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up); |
101 | |
102 | static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd) |
103 | { |
104 | return container_of(rcd, struct ufs_qcom_host, rcdev); |
105 | } |
106 | |
107 | #ifdef CONFIG_SCSI_UFS_CRYPTO |
108 | |
109 | static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host) |
110 | { |
111 | if (host->hba->caps & UFSHCD_CAP_CRYPTO) |
112 | qcom_ice_enable(ice: host->ice); |
113 | } |
114 | |
115 | static int ufs_qcom_ice_init(struct ufs_qcom_host *host) |
116 | { |
117 | struct ufs_hba *hba = host->hba; |
118 | struct device *dev = hba->dev; |
119 | struct qcom_ice *ice; |
120 | |
121 | ice = of_qcom_ice_get(dev); |
122 | if (ice == ERR_PTR(error: -EOPNOTSUPP)) { |
123 | dev_warn(dev, "Disabling inline encryption support\n" ); |
124 | ice = NULL; |
125 | } |
126 | |
127 | if (IS_ERR_OR_NULL(ptr: ice)) |
128 | return PTR_ERR_OR_ZERO(ptr: ice); |
129 | |
130 | host->ice = ice; |
131 | hba->caps |= UFSHCD_CAP_CRYPTO; |
132 | |
133 | return 0; |
134 | } |
135 | |
136 | static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host) |
137 | { |
138 | if (host->hba->caps & UFSHCD_CAP_CRYPTO) |
139 | return qcom_ice_resume(ice: host->ice); |
140 | |
141 | return 0; |
142 | } |
143 | |
144 | static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host) |
145 | { |
146 | if (host->hba->caps & UFSHCD_CAP_CRYPTO) |
147 | return qcom_ice_suspend(ice: host->ice); |
148 | |
149 | return 0; |
150 | } |
151 | |
152 | static int ufs_qcom_ice_program_key(struct ufs_hba *hba, |
153 | const union ufs_crypto_cfg_entry *cfg, |
154 | int slot) |
155 | { |
156 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
157 | union ufs_crypto_cap_entry cap; |
158 | bool config_enable = |
159 | cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE; |
160 | |
161 | /* Only AES-256-XTS has been tested so far. */ |
162 | cap = hba->crypto_cap_array[cfg->crypto_cap_idx]; |
163 | if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS || |
164 | cap.key_size != UFS_CRYPTO_KEY_SIZE_256) |
165 | return -EOPNOTSUPP; |
166 | |
167 | if (config_enable) |
168 | return qcom_ice_program_key(ice: host->ice, |
169 | algorithm_id: QCOM_ICE_CRYPTO_ALG_AES_XTS, |
170 | key_size: QCOM_ICE_CRYPTO_KEY_SIZE_256, |
171 | crypto_key: cfg->crypto_key, |
172 | data_unit_size: cfg->data_unit_size, slot); |
173 | else |
174 | return qcom_ice_evict_key(ice: host->ice, slot); |
175 | } |
176 | |
177 | #else |
178 | |
179 | #define ufs_qcom_ice_program_key NULL |
180 | |
181 | static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host) |
182 | { |
183 | } |
184 | |
185 | static int ufs_qcom_ice_init(struct ufs_qcom_host *host) |
186 | { |
187 | return 0; |
188 | } |
189 | |
190 | static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host) |
191 | { |
192 | return 0; |
193 | } |
194 | |
195 | static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host) |
196 | { |
197 | return 0; |
198 | } |
199 | #endif |
200 | |
201 | static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) |
202 | { |
203 | if (!host->is_lane_clks_enabled) |
204 | return; |
205 | |
206 | clk_bulk_disable_unprepare(num_clks: host->num_clks, clks: host->clks); |
207 | |
208 | host->is_lane_clks_enabled = false; |
209 | } |
210 | |
211 | static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) |
212 | { |
213 | int err; |
214 | |
215 | err = clk_bulk_prepare_enable(num_clks: host->num_clks, clks: host->clks); |
216 | if (err) |
217 | return err; |
218 | |
219 | host->is_lane_clks_enabled = true; |
220 | |
221 | return 0; |
222 | } |
223 | |
224 | static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) |
225 | { |
226 | int err; |
227 | struct device *dev = host->hba->dev; |
228 | |
229 | if (has_acpi_companion(dev)) |
230 | return 0; |
231 | |
232 | err = devm_clk_bulk_get_all(dev, clks: &host->clks); |
233 | if (err <= 0) |
234 | return err; |
235 | |
236 | host->num_clks = err; |
237 | |
238 | return 0; |
239 | } |
240 | |
241 | static int ufs_qcom_check_hibern8(struct ufs_hba *hba) |
242 | { |
243 | int err; |
244 | u32 tx_fsm_val; |
245 | unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS); |
246 | |
247 | do { |
248 | err = ufshcd_dme_get(hba, |
249 | UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, |
250 | UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), |
251 | mib_val: &tx_fsm_val); |
252 | if (err || tx_fsm_val == TX_FSM_HIBERN8) |
253 | break; |
254 | |
255 | /* sleep for max. 200us */ |
256 | usleep_range(min: 100, max: 200); |
257 | } while (time_before(jiffies, timeout)); |
258 | |
259 | /* |
260 | * we might have scheduled out for long during polling so |
261 | * check the state again. |
262 | */ |
263 | if (time_after(jiffies, timeout)) |
264 | err = ufshcd_dme_get(hba, |
265 | UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, |
266 | UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), |
267 | mib_val: &tx_fsm_val); |
268 | |
269 | if (err) { |
270 | dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n" , |
271 | __func__, err); |
272 | } else if (tx_fsm_val != TX_FSM_HIBERN8) { |
273 | err = tx_fsm_val; |
274 | dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n" , |
275 | __func__, err); |
276 | } |
277 | |
278 | return err; |
279 | } |
280 | |
281 | static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) |
282 | { |
283 | ufshcd_rmwl(hba: host->hba, QUNIPRO_SEL, QUNIPRO_SEL, reg: REG_UFS_CFG1); |
284 | |
285 | if (host->hw_ver.major >= 0x05) |
286 | ufshcd_rmwl(hba: host->hba, QUNIPRO_G4_SEL, val: 0, reg: REG_UFS_CFG0); |
287 | |
288 | /* make sure above configuration is applied before we return */ |
289 | mb(); |
290 | } |
291 | |
292 | /* |
293 | * ufs_qcom_host_reset - reset host controller and PHY |
294 | */ |
295 | static int ufs_qcom_host_reset(struct ufs_hba *hba) |
296 | { |
297 | int ret; |
298 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
299 | bool reenable_intr; |
300 | |
301 | if (!host->core_reset) |
302 | return 0; |
303 | |
304 | reenable_intr = hba->is_irq_enabled; |
305 | ufshcd_disable_irq(hba); |
306 | |
307 | ret = reset_control_assert(rstc: host->core_reset); |
308 | if (ret) { |
309 | dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n" , |
310 | __func__, ret); |
311 | return ret; |
312 | } |
313 | |
314 | /* |
315 | * The hardware requirement for delay between assert/deassert |
316 | * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to |
317 | * ~125us (4/32768). To be on the safe side add 200us delay. |
318 | */ |
319 | usleep_range(min: 200, max: 210); |
320 | |
321 | ret = reset_control_deassert(rstc: host->core_reset); |
322 | if (ret) { |
323 | dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n" , |
324 | __func__, ret); |
325 | return ret; |
326 | } |
327 | |
328 | usleep_range(min: 1000, max: 1100); |
329 | |
330 | if (reenable_intr) |
331 | ufshcd_enable_irq(hba); |
332 | |
333 | return 0; |
334 | } |
335 | |
336 | static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba) |
337 | { |
338 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
339 | |
340 | if (host->hw_ver.major >= 0x4) |
341 | return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0)); |
342 | |
343 | /* Default is HS-G3 */ |
344 | return UFS_HS_G3; |
345 | } |
346 | |
347 | static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) |
348 | { |
349 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
350 | struct ufs_host_params *host_params = &host->host_params; |
351 | struct phy *phy = host->generic_phy; |
352 | enum phy_mode mode; |
353 | int ret; |
354 | |
355 | /* |
356 | * HW ver 5 can only support up to HS-G5 Rate-A due to HW limitations. |
357 | * If the HS-G5 PHY gear is used, update host_params->hs_rate to Rate-A, |
358 | * so that the subsequent power mode change shall stick to Rate-A. |
359 | */ |
360 | if (host->hw_ver.major == 0x5) { |
361 | if (host->phy_gear == UFS_HS_G5) |
362 | host_params->hs_rate = PA_HS_MODE_A; |
363 | else |
364 | host_params->hs_rate = PA_HS_MODE_B; |
365 | } |
366 | |
367 | mode = host_params->hs_rate == PA_HS_MODE_B ? PHY_MODE_UFS_HS_B : PHY_MODE_UFS_HS_A; |
368 | |
369 | /* Reset UFS Host Controller and PHY */ |
370 | ret = ufs_qcom_host_reset(hba); |
371 | if (ret) |
372 | return ret; |
373 | |
374 | /* phy initialization - calibrate the phy */ |
375 | ret = phy_init(phy); |
376 | if (ret) { |
377 | dev_err(hba->dev, "%s: phy init failed, ret = %d\n" , |
378 | __func__, ret); |
379 | return ret; |
380 | } |
381 | |
382 | ret = phy_set_mode_ext(phy, mode, submode: host->phy_gear); |
383 | if (ret) |
384 | goto out_disable_phy; |
385 | |
386 | /* power on phy - start serdes and phy's power and clocks */ |
387 | ret = phy_power_on(phy); |
388 | if (ret) { |
389 | dev_err(hba->dev, "%s: phy power on failed, ret = %d\n" , |
390 | __func__, ret); |
391 | goto out_disable_phy; |
392 | } |
393 | |
394 | ufs_qcom_select_unipro_mode(host); |
395 | |
396 | return 0; |
397 | |
398 | out_disable_phy: |
399 | phy_exit(phy); |
400 | |
401 | return ret; |
402 | } |
403 | |
404 | /* |
405 | * The UTP controller has a number of internal clock gating cells (CGCs). |
406 | * Internal hardware sub-modules within the UTP controller control the CGCs. |
407 | * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved |
408 | * in a specific operation, UTP controller CGCs are by default disabled and |
409 | * this function enables them (after every UFS link startup) to save some power |
410 | * leakage. |
411 | */ |
412 | static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) |
413 | { |
414 | ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL, |
415 | reg: REG_UFS_CFG2); |
416 | |
417 | /* Ensure that HW clock gating is enabled before next operations */ |
418 | mb(); |
419 | } |
420 | |
421 | static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, |
422 | enum ufs_notify_change_status status) |
423 | { |
424 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
425 | int err; |
426 | |
427 | switch (status) { |
428 | case PRE_CHANGE: |
429 | err = ufs_qcom_power_up_sequence(hba); |
430 | if (err) |
431 | return err; |
432 | |
433 | /* |
434 | * The PHY PLL output is the source of tx/rx lane symbol |
435 | * clocks, hence, enable the lane clocks only after PHY |
436 | * is initialized. |
437 | */ |
438 | err = ufs_qcom_enable_lane_clks(host); |
439 | break; |
440 | case POST_CHANGE: |
441 | /* check if UFS PHY moved from DISABLED to HIBERN8 */ |
442 | err = ufs_qcom_check_hibern8(hba); |
443 | ufs_qcom_enable_hw_clk_gating(hba); |
444 | ufs_qcom_ice_enable(host); |
445 | break; |
446 | default: |
447 | dev_err(hba->dev, "%s: invalid status %d\n" , __func__, status); |
448 | err = -EINVAL; |
449 | break; |
450 | } |
451 | return err; |
452 | } |
453 | |
454 | /** |
455 | * ufs_qcom_cfg_timers - Configure ufs qcom cfg timers |
456 | * |
457 | * @hba: host controller instance |
458 | * @gear: Current operating gear |
459 | * @hs: current power mode |
460 | * @rate: current operating rate (A or B) |
461 | * @update_link_startup_timer: indicate if link_start ongoing |
462 | * @is_pre_scale_up: flag to check if pre scale up condition. |
463 | * Return: zero for success and non-zero in case of a failure. |
464 | */ |
465 | static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, |
466 | u32 hs, u32 rate, bool update_link_startup_timer, |
467 | bool is_pre_scale_up) |
468 | { |
469 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
470 | struct ufs_clk_info *clki; |
471 | unsigned long core_clk_rate = 0; |
472 | u32 core_clk_cycles_per_us; |
473 | |
474 | /* |
475 | * UTP controller uses SYS1CLK_1US_REG register for Interrupt |
476 | * Aggregation logic. |
477 | * It is mandatory to write SYS1CLK_1US_REG register on UFS host |
478 | * controller V4.0.0 onwards. |
479 | */ |
480 | if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba)) |
481 | return 0; |
482 | |
483 | if (gear == 0) { |
484 | dev_err(hba->dev, "%s: invalid gear = %d\n" , __func__, gear); |
485 | return -EINVAL; |
486 | } |
487 | |
488 | list_for_each_entry(clki, &hba->clk_list_head, list) { |
489 | if (!strcmp(clki->name, "core_clk" )) { |
490 | if (is_pre_scale_up) |
491 | core_clk_rate = clki->max_freq; |
492 | else |
493 | core_clk_rate = clk_get_rate(clk: clki->clk); |
494 | break; |
495 | } |
496 | |
497 | } |
498 | |
499 | /* If frequency is smaller than 1MHz, set to 1MHz */ |
500 | if (core_clk_rate < DEFAULT_CLK_RATE_HZ) |
501 | core_clk_rate = DEFAULT_CLK_RATE_HZ; |
502 | |
503 | core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; |
504 | if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { |
505 | ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); |
506 | /* |
507 | * make sure above write gets applied before we return from |
508 | * this function. |
509 | */ |
510 | mb(); |
511 | } |
512 | |
513 | return 0; |
514 | } |
515 | |
516 | static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, |
517 | enum ufs_notify_change_status status) |
518 | { |
519 | int err = 0; |
520 | |
521 | switch (status) { |
522 | case PRE_CHANGE: |
523 | if (ufs_qcom_cfg_timers(hba, gear: UFS_PWM_G1, hs: SLOWAUTO_MODE, |
524 | rate: 0, update_link_startup_timer: true, is_pre_scale_up: false)) { |
525 | dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n" , |
526 | __func__); |
527 | return -EINVAL; |
528 | } |
529 | |
530 | err = ufs_qcom_set_core_clk_ctrl(hba, is_scale_up: true); |
531 | if (err) |
532 | dev_err(hba->dev, "cfg core clk ctrl failed\n" ); |
533 | /* |
534 | * Some UFS devices (and may be host) have issues if LCC is |
535 | * enabled. So we are setting PA_Local_TX_LCC_Enable to 0 |
536 | * before link startup which will make sure that both host |
537 | * and device TX LCC are disabled once link startup is |
538 | * completed. |
539 | */ |
540 | if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41) |
541 | err = ufshcd_disable_host_tx_lcc(hba); |
542 | |
543 | break; |
544 | default: |
545 | break; |
546 | } |
547 | |
548 | return err; |
549 | } |
550 | |
551 | static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted) |
552 | { |
553 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
554 | |
555 | /* reset gpio is optional */ |
556 | if (!host->device_reset) |
557 | return; |
558 | |
559 | gpiod_set_value_cansleep(desc: host->device_reset, value: asserted); |
560 | } |
561 | |
562 | static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, |
563 | enum ufs_notify_change_status status) |
564 | { |
565 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
566 | struct phy *phy = host->generic_phy; |
567 | |
568 | if (status == PRE_CHANGE) |
569 | return 0; |
570 | |
571 | if (ufs_qcom_is_link_off(hba)) { |
572 | /* |
573 | * Disable the tx/rx lane symbol clocks before PHY is |
574 | * powered down as the PLL source should be disabled |
575 | * after downstream clocks are disabled. |
576 | */ |
577 | ufs_qcom_disable_lane_clks(host); |
578 | phy_power_off(phy); |
579 | |
580 | /* reset the connected UFS device during power down */ |
581 | ufs_qcom_device_reset_ctrl(hba, asserted: true); |
582 | |
583 | } else if (!ufs_qcom_is_link_active(hba)) { |
584 | ufs_qcom_disable_lane_clks(host); |
585 | } |
586 | |
587 | return ufs_qcom_ice_suspend(host); |
588 | } |
589 | |
590 | static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) |
591 | { |
592 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
593 | struct phy *phy = host->generic_phy; |
594 | int err; |
595 | |
596 | if (ufs_qcom_is_link_off(hba)) { |
597 | err = phy_power_on(phy); |
598 | if (err) { |
599 | dev_err(hba->dev, "%s: failed PHY power on: %d\n" , |
600 | __func__, err); |
601 | return err; |
602 | } |
603 | |
604 | err = ufs_qcom_enable_lane_clks(host); |
605 | if (err) |
606 | return err; |
607 | |
608 | } else if (!ufs_qcom_is_link_active(hba)) { |
609 | err = ufs_qcom_enable_lane_clks(host); |
610 | if (err) |
611 | return err; |
612 | } |
613 | |
614 | return ufs_qcom_ice_resume(host); |
615 | } |
616 | |
617 | static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable) |
618 | { |
619 | if (host->dev_ref_clk_ctrl_mmio && |
620 | (enable ^ host->is_dev_ref_clk_enabled)) { |
621 | u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio); |
622 | |
623 | if (enable) |
624 | temp |= host->dev_ref_clk_en_mask; |
625 | else |
626 | temp &= ~host->dev_ref_clk_en_mask; |
627 | |
628 | /* |
629 | * If we are here to disable this clock it might be immediately |
630 | * after entering into hibern8 in which case we need to make |
631 | * sure that device ref_clk is active for specific time after |
632 | * hibern8 enter. |
633 | */ |
634 | if (!enable) { |
635 | unsigned long gating_wait; |
636 | |
637 | gating_wait = host->hba->dev_info.clk_gating_wait_us; |
638 | if (!gating_wait) { |
639 | udelay(1); |
640 | } else { |
641 | /* |
642 | * bRefClkGatingWaitTime defines the minimum |
643 | * time for which the reference clock is |
644 | * required by device during transition from |
645 | * HS-MODE to LS-MODE or HIBERN8 state. Give it |
646 | * more delay to be on the safe side. |
647 | */ |
648 | gating_wait += 10; |
649 | usleep_range(min: gating_wait, max: gating_wait + 10); |
650 | } |
651 | } |
652 | |
653 | writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio); |
654 | |
655 | /* |
656 | * Make sure the write to ref_clk reaches the destination and |
657 | * not stored in a Write Buffer (WB). |
658 | */ |
659 | readl(addr: host->dev_ref_clk_ctrl_mmio); |
660 | |
661 | /* |
662 | * If we call hibern8 exit after this, we need to make sure that |
663 | * device ref_clk is stable for at least 1us before the hibern8 |
664 | * exit command. |
665 | */ |
666 | if (enable) |
667 | udelay(1); |
668 | |
669 | host->is_dev_ref_clk_enabled = enable; |
670 | } |
671 | } |
672 | |
673 | static int ufs_qcom_icc_set_bw(struct ufs_qcom_host *host, u32 mem_bw, u32 cfg_bw) |
674 | { |
675 | struct device *dev = host->hba->dev; |
676 | int ret; |
677 | |
678 | ret = icc_set_bw(path: host->icc_ddr, avg_bw: 0, peak_bw: mem_bw); |
679 | if (ret < 0) { |
680 | dev_err(dev, "failed to set bandwidth request: %d\n" , ret); |
681 | return ret; |
682 | } |
683 | |
684 | ret = icc_set_bw(path: host->icc_cpu, avg_bw: 0, peak_bw: cfg_bw); |
685 | if (ret < 0) { |
686 | dev_err(dev, "failed to set bandwidth request: %d\n" , ret); |
687 | return ret; |
688 | } |
689 | |
690 | return 0; |
691 | } |
692 | |
693 | static struct __ufs_qcom_bw_table ufs_qcom_get_bw_table(struct ufs_qcom_host *host) |
694 | { |
695 | struct ufs_pa_layer_attr *p = &host->dev_req_params; |
696 | int gear = max_t(u32, p->gear_rx, p->gear_tx); |
697 | int lane = max_t(u32, p->lane_rx, p->lane_tx); |
698 | |
699 | if (ufshcd_is_hs_mode(pwr_info: p)) { |
700 | if (p->hs_rate == PA_HS_MODE_B) |
701 | return ufs_qcom_bw_table[MODE_HS_RB][gear][lane]; |
702 | else |
703 | return ufs_qcom_bw_table[MODE_HS_RA][gear][lane]; |
704 | } else { |
705 | return ufs_qcom_bw_table[MODE_PWM][gear][lane]; |
706 | } |
707 | } |
708 | |
709 | static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host) |
710 | { |
711 | struct __ufs_qcom_bw_table bw_table; |
712 | |
713 | bw_table = ufs_qcom_get_bw_table(host); |
714 | |
715 | return ufs_qcom_icc_set_bw(host, mem_bw: bw_table.mem_bw, cfg_bw: bw_table.cfg_bw); |
716 | } |
717 | |
718 | static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, |
719 | enum ufs_notify_change_status status, |
720 | struct ufs_pa_layer_attr *dev_max_params, |
721 | struct ufs_pa_layer_attr *dev_req_params) |
722 | { |
723 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
724 | struct ufs_host_params *host_params = &host->host_params; |
725 | int ret = 0; |
726 | |
727 | if (!dev_req_params) { |
728 | pr_err("%s: incoming dev_req_params is NULL\n" , __func__); |
729 | return -EINVAL; |
730 | } |
731 | |
732 | switch (status) { |
733 | case PRE_CHANGE: |
734 | ret = ufshcd_negotiate_pwr_params(host_params, dev_max: dev_max_params, agreed_pwr: dev_req_params); |
735 | if (ret) { |
736 | dev_err(hba->dev, "%s: failed to determine capabilities\n" , |
737 | __func__); |
738 | return ret; |
739 | } |
740 | |
741 | /* |
742 | * During UFS driver probe, always update the PHY gear to match the negotiated |
743 | * gear, so that, if quirk UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is enabled, |
744 | * the second init can program the optimal PHY settings. This allows one to start |
745 | * the first init with either the minimum or the maximum support gear. |
746 | */ |
747 | if (hba->ufshcd_state == UFSHCD_STATE_RESET) { |
748 | /* |
749 | * Skip REINIT if the negotiated gear matches with the |
750 | * initial phy_gear. Otherwise, update the phy_gear to |
751 | * program the optimal gear setting during REINIT. |
752 | */ |
753 | if (host->phy_gear == dev_req_params->gear_tx) |
754 | hba->quirks &= ~UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; |
755 | else |
756 | host->phy_gear = dev_req_params->gear_tx; |
757 | } |
758 | |
759 | /* enable the device ref clock before changing to HS mode */ |
760 | if (!ufshcd_is_hs_mode(pwr_info: &hba->pwr_info) && |
761 | ufshcd_is_hs_mode(pwr_info: dev_req_params)) |
762 | ufs_qcom_dev_ref_clk_ctrl(host, enable: true); |
763 | |
764 | if (host->hw_ver.major >= 0x4) { |
765 | ufshcd_dme_configure_adapt(hba, |
766 | agreed_gear: dev_req_params->gear_tx, |
767 | PA_INITIAL_ADAPT); |
768 | } |
769 | break; |
770 | case POST_CHANGE: |
771 | if (ufs_qcom_cfg_timers(hba, gear: dev_req_params->gear_rx, |
772 | hs: dev_req_params->pwr_rx, |
773 | rate: dev_req_params->hs_rate, update_link_startup_timer: false, is_pre_scale_up: false)) { |
774 | dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n" , |
775 | __func__); |
776 | /* |
777 | * we return error code at the end of the routine, |
778 | * but continue to configure UFS_PHY_TX_LANE_ENABLE |
779 | * and bus voting as usual |
780 | */ |
781 | ret = -EINVAL; |
782 | } |
783 | |
784 | /* cache the power mode parameters to use internally */ |
785 | memcpy(&host->dev_req_params, |
786 | dev_req_params, sizeof(*dev_req_params)); |
787 | |
788 | ufs_qcom_icc_update_bw(host); |
789 | |
790 | /* disable the device ref clock if entered PWM mode */ |
791 | if (ufshcd_is_hs_mode(pwr_info: &hba->pwr_info) && |
792 | !ufshcd_is_hs_mode(pwr_info: dev_req_params)) |
793 | ufs_qcom_dev_ref_clk_ctrl(host, enable: false); |
794 | break; |
795 | default: |
796 | ret = -EINVAL; |
797 | break; |
798 | } |
799 | |
800 | return ret; |
801 | } |
802 | |
803 | static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba) |
804 | { |
805 | int err; |
806 | u32 pa_vs_config_reg1; |
807 | |
808 | err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), |
809 | mib_val: &pa_vs_config_reg1); |
810 | if (err) |
811 | return err; |
812 | |
813 | /* Allow extension of MSB bits of PA_SaveConfigTime attribute */ |
814 | return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), |
815 | mib_val: (pa_vs_config_reg1 | (1 << 12))); |
816 | } |
817 | |
818 | static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) |
819 | { |
820 | int err = 0; |
821 | |
822 | if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) |
823 | err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); |
824 | |
825 | if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC) |
826 | hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE; |
827 | |
828 | return err; |
829 | } |
830 | |
831 | static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) |
832 | { |
833 | return ufshci_version(major: 2, minor: 0); |
834 | } |
835 | |
836 | /** |
837 | * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks |
838 | * @hba: host controller instance |
839 | * |
840 | * QCOM UFS host controller might have some non standard behaviours (quirks) |
841 | * than what is specified by UFSHCI specification. Advertise all such |
842 | * quirks to standard UFS host controller driver so standard takes them into |
843 | * account. |
844 | */ |
845 | static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) |
846 | { |
847 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
848 | |
849 | if (host->hw_ver.major == 0x2) |
850 | hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; |
851 | |
852 | if (host->hw_ver.major > 0x3) |
853 | hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; |
854 | } |
855 | |
856 | static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host) |
857 | { |
858 | struct ufs_host_params *host_params = &host->host_params; |
859 | u32 val, dev_major; |
860 | |
861 | /* |
862 | * Default to powering up the PHY to the max gear possible, which is |
863 | * backwards compatible with lower gears but not optimal from |
864 | * a power usage point of view. After device negotiation, if the |
865 | * gear is lower a reinit will be performed to program the PHY |
866 | * to the ideal gear for this combo of controller and device. |
867 | */ |
868 | host->phy_gear = host_params->hs_tx_gear; |
869 | |
870 | if (host->hw_ver.major < 0x4) { |
871 | /* |
872 | * These controllers only have one PHY init sequence, |
873 | * let's power up the PHY using that (the minimum supported |
874 | * gear, UFS_HS_G2). |
875 | */ |
876 | host->phy_gear = UFS_HS_G2; |
877 | } else if (host->hw_ver.major >= 0x5) { |
878 | val = ufshcd_readl(host->hba, REG_UFS_DEBUG_SPARE_CFG); |
879 | dev_major = FIELD_GET(UFS_DEV_VER_MAJOR_MASK, val); |
880 | |
881 | /* |
882 | * Since the UFS device version is populated, let's remove the |
883 | * REINIT quirk as the negotiated gear won't change during boot. |
884 | * So there is no need to do reinit. |
885 | */ |
886 | if (dev_major != 0x0) |
887 | host->hba->quirks &= ~UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; |
888 | |
889 | /* |
890 | * For UFS 3.1 device and older, power up the PHY using HS-G4 |
891 | * PHY gear to save power. |
892 | */ |
893 | if (dev_major > 0x0 && dev_major < 0x4) |
894 | host->phy_gear = UFS_HS_G4; |
895 | } |
896 | } |
897 | |
898 | static void ufs_qcom_set_host_params(struct ufs_hba *hba) |
899 | { |
900 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
901 | struct ufs_host_params *host_params = &host->host_params; |
902 | |
903 | ufshcd_init_host_params(host_params); |
904 | |
905 | /* This driver only supports symmetic gear setting i.e., hs_tx_gear == hs_rx_gear */ |
906 | host_params->hs_tx_gear = host_params->hs_rx_gear = ufs_qcom_get_hs_gear(hba); |
907 | } |
908 | |
909 | static void ufs_qcom_set_caps(struct ufs_hba *hba) |
910 | { |
911 | hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; |
912 | hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING; |
913 | hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; |
914 | hba->caps |= UFSHCD_CAP_WB_EN; |
915 | hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE; |
916 | hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; |
917 | } |
918 | |
919 | /** |
920 | * ufs_qcom_setup_clocks - enables/disable clocks |
921 | * @hba: host controller instance |
922 | * @on: If true, enable clocks else disable them. |
923 | * @status: PRE_CHANGE or POST_CHANGE notify |
924 | * |
925 | * Return: 0 on success, non-zero on failure. |
926 | */ |
927 | static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, |
928 | enum ufs_notify_change_status status) |
929 | { |
930 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
931 | |
932 | /* |
933 | * In case ufs_qcom_init() is not yet done, simply ignore. |
934 | * This ufs_qcom_setup_clocks() shall be called from |
935 | * ufs_qcom_init() after init is done. |
936 | */ |
937 | if (!host) |
938 | return 0; |
939 | |
940 | switch (status) { |
941 | case PRE_CHANGE: |
942 | if (on) { |
943 | ufs_qcom_icc_update_bw(host); |
944 | } else { |
945 | if (!ufs_qcom_is_link_active(hba)) { |
946 | /* disable device ref_clk */ |
947 | ufs_qcom_dev_ref_clk_ctrl(host, enable: false); |
948 | } |
949 | } |
950 | break; |
951 | case POST_CHANGE: |
952 | if (on) { |
953 | /* enable the device ref clock for HS mode*/ |
954 | if (ufshcd_is_hs_mode(pwr_info: &hba->pwr_info)) |
955 | ufs_qcom_dev_ref_clk_ctrl(host, enable: true); |
956 | } else { |
957 | ufs_qcom_icc_set_bw(host, mem_bw: ufs_qcom_bw_table[MODE_MIN][0][0].mem_bw, |
958 | cfg_bw: ufs_qcom_bw_table[MODE_MIN][0][0].cfg_bw); |
959 | } |
960 | break; |
961 | } |
962 | |
963 | return 0; |
964 | } |
965 | |
966 | static int |
967 | ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) |
968 | { |
969 | struct ufs_qcom_host *host = rcdev_to_ufs_host(rcd: rcdev); |
970 | |
971 | ufs_qcom_assert_reset(hba: host->hba); |
972 | /* provide 1ms delay to let the reset pulse propagate. */ |
973 | usleep_range(min: 1000, max: 1100); |
974 | return 0; |
975 | } |
976 | |
977 | static int |
978 | ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) |
979 | { |
980 | struct ufs_qcom_host *host = rcdev_to_ufs_host(rcd: rcdev); |
981 | |
982 | ufs_qcom_deassert_reset(hba: host->hba); |
983 | |
984 | /* |
985 | * after reset deassertion, phy will need all ref clocks, |
986 | * voltage, current to settle down before starting serdes. |
987 | */ |
988 | usleep_range(min: 1000, max: 1100); |
989 | return 0; |
990 | } |
991 | |
992 | static const struct reset_control_ops ufs_qcom_reset_ops = { |
993 | .assert = ufs_qcom_reset_assert, |
994 | .deassert = ufs_qcom_reset_deassert, |
995 | }; |
996 | |
997 | static int ufs_qcom_icc_init(struct ufs_qcom_host *host) |
998 | { |
999 | struct device *dev = host->hba->dev; |
1000 | int ret; |
1001 | |
1002 | host->icc_ddr = devm_of_icc_get(dev, name: "ufs-ddr" ); |
1003 | if (IS_ERR(ptr: host->icc_ddr)) |
1004 | return dev_err_probe(dev, err: PTR_ERR(ptr: host->icc_ddr), |
1005 | fmt: "failed to acquire interconnect path\n" ); |
1006 | |
1007 | host->icc_cpu = devm_of_icc_get(dev, name: "cpu-ufs" ); |
1008 | if (IS_ERR(ptr: host->icc_cpu)) |
1009 | return dev_err_probe(dev, err: PTR_ERR(ptr: host->icc_cpu), |
1010 | fmt: "failed to acquire interconnect path\n" ); |
1011 | |
1012 | /* |
1013 | * Set Maximum bandwidth vote before initializing the UFS controller and |
1014 | * device. Ideally, a minimal interconnect vote would suffice for the |
1015 | * initialization, but a max vote would allow faster initialization. |
1016 | */ |
1017 | ret = ufs_qcom_icc_set_bw(host, mem_bw: ufs_qcom_bw_table[MODE_MAX][0][0].mem_bw, |
1018 | cfg_bw: ufs_qcom_bw_table[MODE_MAX][0][0].cfg_bw); |
1019 | if (ret < 0) |
1020 | return dev_err_probe(dev, err: ret, fmt: "failed to set bandwidth request\n" ); |
1021 | |
1022 | return 0; |
1023 | } |
1024 | |
1025 | /** |
1026 | * ufs_qcom_init - bind phy with controller |
1027 | * @hba: host controller instance |
1028 | * |
1029 | * Binds PHY with controller and powers up PHY enabling clocks |
1030 | * and regulators. |
1031 | * |
1032 | * Return: -EPROBE_DEFER if binding fails, returns negative error |
1033 | * on phy power up failure and returns zero on success. |
1034 | */ |
1035 | static int ufs_qcom_init(struct ufs_hba *hba) |
1036 | { |
1037 | int err; |
1038 | struct device *dev = hba->dev; |
1039 | struct ufs_qcom_host *host; |
1040 | struct ufs_clk_info *clki; |
1041 | |
1042 | host = devm_kzalloc(dev, size: sizeof(*host), GFP_KERNEL); |
1043 | if (!host) |
1044 | return -ENOMEM; |
1045 | |
1046 | /* Make a two way bind between the qcom host and the hba */ |
1047 | host->hba = hba; |
1048 | ufshcd_set_variant(hba, variant: host); |
1049 | |
1050 | /* Setup the optional reset control of HCI */ |
1051 | host->core_reset = devm_reset_control_get_optional(dev: hba->dev, id: "rst" ); |
1052 | if (IS_ERR(ptr: host->core_reset)) { |
1053 | err = dev_err_probe(dev, err: PTR_ERR(ptr: host->core_reset), |
1054 | fmt: "Failed to get reset control\n" ); |
1055 | goto out_variant_clear; |
1056 | } |
1057 | |
1058 | /* Fire up the reset controller. Failure here is non-fatal. */ |
1059 | host->rcdev.of_node = dev->of_node; |
1060 | host->rcdev.ops = &ufs_qcom_reset_ops; |
1061 | host->rcdev.owner = dev->driver->owner; |
1062 | host->rcdev.nr_resets = 1; |
1063 | err = devm_reset_controller_register(dev, rcdev: &host->rcdev); |
1064 | if (err) |
1065 | dev_warn(dev, "Failed to register reset controller\n" ); |
1066 | |
1067 | if (!has_acpi_companion(dev)) { |
1068 | host->generic_phy = devm_phy_get(dev, string: "ufsphy" ); |
1069 | if (IS_ERR(ptr: host->generic_phy)) { |
1070 | err = dev_err_probe(dev, err: PTR_ERR(ptr: host->generic_phy), fmt: "Failed to get PHY\n" ); |
1071 | goto out_variant_clear; |
1072 | } |
1073 | } |
1074 | |
1075 | err = ufs_qcom_icc_init(host); |
1076 | if (err) |
1077 | goto out_variant_clear; |
1078 | |
1079 | host->device_reset = devm_gpiod_get_optional(dev, con_id: "reset" , |
1080 | flags: GPIOD_OUT_HIGH); |
1081 | if (IS_ERR(ptr: host->device_reset)) { |
1082 | err = dev_err_probe(dev, err: PTR_ERR(ptr: host->device_reset), |
1083 | fmt: "Failed to acquire device reset gpio\n" ); |
1084 | goto out_variant_clear; |
1085 | } |
1086 | |
1087 | ufs_qcom_get_controller_revision(hba, major: &host->hw_ver.major, |
1088 | minor: &host->hw_ver.minor, step: &host->hw_ver.step); |
1089 | |
1090 | host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1; |
1091 | host->dev_ref_clk_en_mask = BIT(26); |
1092 | |
1093 | list_for_each_entry(clki, &hba->clk_list_head, list) { |
1094 | if (!strcmp(clki->name, "core_clk_unipro" )) |
1095 | clki->keep_link_active = true; |
1096 | } |
1097 | |
1098 | err = ufs_qcom_init_lane_clks(host); |
1099 | if (err) |
1100 | goto out_variant_clear; |
1101 | |
1102 | ufs_qcom_set_caps(hba); |
1103 | ufs_qcom_advertise_quirks(hba); |
1104 | ufs_qcom_set_host_params(hba); |
1105 | ufs_qcom_set_phy_gear(host); |
1106 | |
1107 | err = ufs_qcom_ice_init(host); |
1108 | if (err) |
1109 | goto out_variant_clear; |
1110 | |
1111 | ufs_qcom_setup_clocks(hba, on: true, status: POST_CHANGE); |
1112 | |
1113 | ufs_qcom_get_default_testbus_cfg(host); |
1114 | err = ufs_qcom_testbus_config(host); |
1115 | if (err) |
1116 | /* Failure is non-fatal */ |
1117 | dev_warn(dev, "%s: failed to configure the testbus %d\n" , |
1118 | __func__, err); |
1119 | |
1120 | return 0; |
1121 | |
1122 | out_variant_clear: |
1123 | ufshcd_set_variant(hba, NULL); |
1124 | |
1125 | return err; |
1126 | } |
1127 | |
1128 | static void ufs_qcom_exit(struct ufs_hba *hba) |
1129 | { |
1130 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
1131 | |
1132 | ufs_qcom_disable_lane_clks(host); |
1133 | phy_power_off(phy: host->generic_phy); |
1134 | phy_exit(phy: host->generic_phy); |
1135 | } |
1136 | |
1137 | /** |
1138 | * ufs_qcom_set_clk_40ns_cycles - Configure 40ns clk cycles |
1139 | * |
1140 | * @hba: host controller instance |
1141 | * @cycles_in_1us: No of cycles in 1us to be configured |
1142 | * |
1143 | * Returns error if dme get/set configuration for 40ns fails |
1144 | * and returns zero on success. |
1145 | */ |
1146 | static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba, |
1147 | u32 cycles_in_1us) |
1148 | { |
1149 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
1150 | u32 cycles_in_40ns; |
1151 | u32 reg; |
1152 | int err; |
1153 | |
1154 | /* |
1155 | * UFS host controller V4.0.0 onwards needs to program |
1156 | * PA_VS_CORE_CLK_40NS_CYCLES attribute per programmed |
1157 | * frequency of unipro core clk of UFS host controller. |
1158 | */ |
1159 | if (host->hw_ver.major < 4) |
1160 | return 0; |
1161 | |
1162 | /* |
1163 | * Generic formulae for cycles_in_40ns = (freq_unipro/25) is not |
1164 | * applicable for all frequencies. For ex: ceil(37.5 MHz/25) will |
1165 | * be 2 and ceil(403 MHZ/25) will be 17 whereas Hardware |
1166 | * specification expect to be 16. Hence use exact hardware spec |
1167 | * mandated value for cycles_in_40ns instead of calculating using |
1168 | * generic formulae. |
1169 | */ |
1170 | switch (cycles_in_1us) { |
1171 | case UNIPRO_CORE_CLK_FREQ_403_MHZ: |
1172 | cycles_in_40ns = 16; |
1173 | break; |
1174 | case UNIPRO_CORE_CLK_FREQ_300_MHZ: |
1175 | cycles_in_40ns = 12; |
1176 | break; |
1177 | case UNIPRO_CORE_CLK_FREQ_201_5_MHZ: |
1178 | cycles_in_40ns = 8; |
1179 | break; |
1180 | case UNIPRO_CORE_CLK_FREQ_150_MHZ: |
1181 | cycles_in_40ns = 6; |
1182 | break; |
1183 | case UNIPRO_CORE_CLK_FREQ_100_MHZ: |
1184 | cycles_in_40ns = 4; |
1185 | break; |
1186 | case UNIPRO_CORE_CLK_FREQ_75_MHZ: |
1187 | cycles_in_40ns = 3; |
1188 | break; |
1189 | case UNIPRO_CORE_CLK_FREQ_37_5_MHZ: |
1190 | cycles_in_40ns = 2; |
1191 | break; |
1192 | default: |
1193 | dev_err(hba->dev, "UNIPRO clk freq %u MHz not supported\n" , |
1194 | cycles_in_1us); |
1195 | return -EINVAL; |
1196 | } |
1197 | |
1198 | err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), mib_val: ®); |
1199 | if (err) |
1200 | return err; |
1201 | |
1202 | reg &= ~PA_VS_CORE_CLK_40NS_CYCLES_MASK; |
1203 | reg |= cycles_in_40ns; |
1204 | |
1205 | return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), mib_val: reg); |
1206 | } |
1207 | |
1208 | static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up) |
1209 | { |
1210 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
1211 | struct list_head *head = &hba->clk_list_head; |
1212 | struct ufs_clk_info *clki; |
1213 | u32 cycles_in_1us = 0; |
1214 | u32 core_clk_ctrl_reg; |
1215 | int err; |
1216 | |
1217 | list_for_each_entry(clki, head, list) { |
1218 | if (!IS_ERR_OR_NULL(ptr: clki->clk) && |
1219 | !strcmp(clki->name, "core_clk_unipro" )) { |
1220 | if (!clki->max_freq) |
1221 | cycles_in_1us = 150; /* default for backwards compatibility */ |
1222 | else if (is_scale_up) |
1223 | cycles_in_1us = ceil(clki->max_freq, (1000 * 1000)); |
1224 | else |
1225 | cycles_in_1us = ceil(clk_get_rate(clki->clk), (1000 * 1000)); |
1226 | break; |
1227 | } |
1228 | } |
1229 | |
1230 | err = ufshcd_dme_get(hba, |
1231 | UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), |
1232 | mib_val: &core_clk_ctrl_reg); |
1233 | if (err) |
1234 | return err; |
1235 | |
1236 | /* Bit mask is different for UFS host controller V4.0.0 onwards */ |
1237 | if (host->hw_ver.major >= 4) { |
1238 | if (!FIELD_FIT(CLK_1US_CYCLES_MASK_V4, cycles_in_1us)) |
1239 | return -ERANGE; |
1240 | core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK_V4; |
1241 | core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK_V4, cycles_in_1us); |
1242 | } else { |
1243 | if (!FIELD_FIT(CLK_1US_CYCLES_MASK, cycles_in_1us)) |
1244 | return -ERANGE; |
1245 | core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK; |
1246 | core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK, cycles_in_1us); |
1247 | } |
1248 | |
1249 | /* Clear CORE_CLK_DIV_EN */ |
1250 | core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; |
1251 | |
1252 | err = ufshcd_dme_set(hba, |
1253 | UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), |
1254 | mib_val: core_clk_ctrl_reg); |
1255 | if (err) |
1256 | return err; |
1257 | |
1258 | /* Configure unipro core clk 40ns attribute */ |
1259 | return ufs_qcom_set_clk_40ns_cycles(hba, cycles_in_1us); |
1260 | } |
1261 | |
1262 | static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba) |
1263 | { |
1264 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
1265 | struct ufs_pa_layer_attr *attr = &host->dev_req_params; |
1266 | int ret; |
1267 | |
1268 | ret = ufs_qcom_cfg_timers(hba, gear: attr->gear_rx, hs: attr->pwr_rx, |
1269 | rate: attr->hs_rate, update_link_startup_timer: false, is_pre_scale_up: true); |
1270 | if (ret) { |
1271 | dev_err(hba->dev, "%s ufs cfg timer failed\n" , __func__); |
1272 | return ret; |
1273 | } |
1274 | /* set unipro core clock attributes and clear clock divider */ |
1275 | return ufs_qcom_set_core_clk_ctrl(hba, is_scale_up: true); |
1276 | } |
1277 | |
1278 | static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) |
1279 | { |
1280 | return 0; |
1281 | } |
1282 | |
1283 | static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) |
1284 | { |
1285 | int err; |
1286 | u32 core_clk_ctrl_reg; |
1287 | |
1288 | err = ufshcd_dme_get(hba, |
1289 | UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), |
1290 | mib_val: &core_clk_ctrl_reg); |
1291 | |
1292 | /* make sure CORE_CLK_DIV_EN is cleared */ |
1293 | if (!err && |
1294 | (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) { |
1295 | core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; |
1296 | err = ufshcd_dme_set(hba, |
1297 | UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), |
1298 | mib_val: core_clk_ctrl_reg); |
1299 | } |
1300 | |
1301 | return err; |
1302 | } |
1303 | |
1304 | static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba) |
1305 | { |
1306 | /* set unipro core clock attributes and clear clock divider */ |
1307 | return ufs_qcom_set_core_clk_ctrl(hba, is_scale_up: false); |
1308 | } |
1309 | |
1310 | static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, |
1311 | bool scale_up, enum ufs_notify_change_status status) |
1312 | { |
1313 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
1314 | int err; |
1315 | |
1316 | /* check the host controller state before sending hibern8 cmd */ |
1317 | if (!ufshcd_is_hba_active(hba)) |
1318 | return 0; |
1319 | |
1320 | if (status == PRE_CHANGE) { |
1321 | err = ufshcd_uic_hibern8_enter(hba); |
1322 | if (err) |
1323 | return err; |
1324 | if (scale_up) |
1325 | err = ufs_qcom_clk_scale_up_pre_change(hba); |
1326 | else |
1327 | err = ufs_qcom_clk_scale_down_pre_change(hba); |
1328 | |
1329 | if (err) { |
1330 | ufshcd_uic_hibern8_exit(hba); |
1331 | return err; |
1332 | } |
1333 | } else { |
1334 | if (scale_up) |
1335 | err = ufs_qcom_clk_scale_up_post_change(hba); |
1336 | else |
1337 | err = ufs_qcom_clk_scale_down_post_change(hba); |
1338 | |
1339 | |
1340 | if (err) { |
1341 | ufshcd_uic_hibern8_exit(hba); |
1342 | return err; |
1343 | } |
1344 | |
1345 | ufs_qcom_icc_update_bw(host); |
1346 | ufshcd_uic_hibern8_exit(hba); |
1347 | } |
1348 | |
1349 | return 0; |
1350 | } |
1351 | |
1352 | static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host) |
1353 | { |
1354 | ufshcd_rmwl(hba: host->hba, UFS_REG_TEST_BUS_EN, |
1355 | UFS_REG_TEST_BUS_EN, reg: REG_UFS_CFG1); |
1356 | ufshcd_rmwl(hba: host->hba, TEST_BUS_EN, TEST_BUS_EN, reg: REG_UFS_CFG1); |
1357 | } |
1358 | |
1359 | static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) |
1360 | { |
1361 | /* provide a legal default configuration */ |
1362 | host->testbus.select_major = TSTBUS_UNIPRO; |
1363 | host->testbus.select_minor = 37; |
1364 | } |
1365 | |
1366 | static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host) |
1367 | { |
1368 | if (host->testbus.select_major >= TSTBUS_MAX) { |
1369 | dev_err(host->hba->dev, |
1370 | "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n" , |
1371 | __func__, host->testbus.select_major); |
1372 | return false; |
1373 | } |
1374 | |
1375 | return true; |
1376 | } |
1377 | |
1378 | int ufs_qcom_testbus_config(struct ufs_qcom_host *host) |
1379 | { |
1380 | int reg; |
1381 | int offset; |
1382 | u32 mask = TEST_BUS_SUB_SEL_MASK; |
1383 | |
1384 | if (!host) |
1385 | return -EINVAL; |
1386 | |
1387 | if (!ufs_qcom_testbus_cfg_is_ok(host)) |
1388 | return -EPERM; |
1389 | |
1390 | switch (host->testbus.select_major) { |
1391 | case TSTBUS_UAWM: |
1392 | reg = UFS_TEST_BUS_CTRL_0; |
1393 | offset = 24; |
1394 | break; |
1395 | case TSTBUS_UARM: |
1396 | reg = UFS_TEST_BUS_CTRL_0; |
1397 | offset = 16; |
1398 | break; |
1399 | case TSTBUS_TXUC: |
1400 | reg = UFS_TEST_BUS_CTRL_0; |
1401 | offset = 8; |
1402 | break; |
1403 | case TSTBUS_RXUC: |
1404 | reg = UFS_TEST_BUS_CTRL_0; |
1405 | offset = 0; |
1406 | break; |
1407 | case TSTBUS_DFC: |
1408 | reg = UFS_TEST_BUS_CTRL_1; |
1409 | offset = 24; |
1410 | break; |
1411 | case TSTBUS_TRLUT: |
1412 | reg = UFS_TEST_BUS_CTRL_1; |
1413 | offset = 16; |
1414 | break; |
1415 | case TSTBUS_TMRLUT: |
1416 | reg = UFS_TEST_BUS_CTRL_1; |
1417 | offset = 8; |
1418 | break; |
1419 | case TSTBUS_OCSC: |
1420 | reg = UFS_TEST_BUS_CTRL_1; |
1421 | offset = 0; |
1422 | break; |
1423 | case TSTBUS_WRAPPER: |
1424 | reg = UFS_TEST_BUS_CTRL_2; |
1425 | offset = 16; |
1426 | break; |
1427 | case TSTBUS_COMBINED: |
1428 | reg = UFS_TEST_BUS_CTRL_2; |
1429 | offset = 8; |
1430 | break; |
1431 | case TSTBUS_UTP_HCI: |
1432 | reg = UFS_TEST_BUS_CTRL_2; |
1433 | offset = 0; |
1434 | break; |
1435 | case TSTBUS_UNIPRO: |
1436 | reg = UFS_UNIPRO_CFG; |
1437 | offset = 20; |
1438 | mask = 0xFFF; |
1439 | break; |
1440 | /* |
1441 | * No need for a default case, since |
1442 | * ufs_qcom_testbus_cfg_is_ok() checks that the configuration |
1443 | * is legal |
1444 | */ |
1445 | } |
1446 | mask <<= offset; |
1447 | ufshcd_rmwl(hba: host->hba, TEST_BUS_SEL, |
1448 | val: (u32)host->testbus.select_major << 19, |
1449 | reg: REG_UFS_CFG1); |
1450 | ufshcd_rmwl(hba: host->hba, mask, |
1451 | val: (u32)host->testbus.select_minor << offset, |
1452 | reg); |
1453 | ufs_qcom_enable_test_bus(host); |
1454 | /* |
1455 | * Make sure the test bus configuration is |
1456 | * committed before returning. |
1457 | */ |
1458 | mb(); |
1459 | |
1460 | return 0; |
1461 | } |
1462 | |
1463 | static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) |
1464 | { |
1465 | u32 reg; |
1466 | struct ufs_qcom_host *host; |
1467 | |
1468 | host = ufshcd_get_variant(hba); |
1469 | |
1470 | ufshcd_dump_regs(hba, offset: REG_UFS_SYS1CLK_1US, len: 16 * 4, |
1471 | prefix: "HCI Vendor Specific Registers " ); |
1472 | |
1473 | reg = ufs_qcom_get_debug_reg_offset(host, reg: UFS_UFS_DBG_RD_REG_OCSC); |
1474 | ufshcd_dump_regs(hba, offset: reg, len: 44 * 4, prefix: "UFS_UFS_DBG_RD_REG_OCSC " ); |
1475 | |
1476 | reg = ufshcd_readl(hba, REG_UFS_CFG1); |
1477 | reg |= UTP_DBG_RAMS_EN; |
1478 | ufshcd_writel(hba, reg, REG_UFS_CFG1); |
1479 | |
1480 | reg = ufs_qcom_get_debug_reg_offset(host, reg: UFS_UFS_DBG_RD_EDTL_RAM); |
1481 | ufshcd_dump_regs(hba, offset: reg, len: 32 * 4, prefix: "UFS_UFS_DBG_RD_EDTL_RAM " ); |
1482 | |
1483 | reg = ufs_qcom_get_debug_reg_offset(host, reg: UFS_UFS_DBG_RD_DESC_RAM); |
1484 | ufshcd_dump_regs(hba, offset: reg, len: 128 * 4, prefix: "UFS_UFS_DBG_RD_DESC_RAM " ); |
1485 | |
1486 | reg = ufs_qcom_get_debug_reg_offset(host, reg: UFS_UFS_DBG_RD_PRDT_RAM); |
1487 | ufshcd_dump_regs(hba, offset: reg, len: 64 * 4, prefix: "UFS_UFS_DBG_RD_PRDT_RAM " ); |
1488 | |
1489 | /* clear bit 17 - UTP_DBG_RAMS_EN */ |
1490 | ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, val: 0, reg: REG_UFS_CFG1); |
1491 | |
1492 | reg = ufs_qcom_get_debug_reg_offset(host, reg: UFS_DBG_RD_REG_UAWM); |
1493 | ufshcd_dump_regs(hba, offset: reg, len: 4 * 4, prefix: "UFS_DBG_RD_REG_UAWM " ); |
1494 | |
1495 | reg = ufs_qcom_get_debug_reg_offset(host, reg: UFS_DBG_RD_REG_UARM); |
1496 | ufshcd_dump_regs(hba, offset: reg, len: 4 * 4, prefix: "UFS_DBG_RD_REG_UARM " ); |
1497 | |
1498 | reg = ufs_qcom_get_debug_reg_offset(host, reg: UFS_DBG_RD_REG_TXUC); |
1499 | ufshcd_dump_regs(hba, offset: reg, len: 48 * 4, prefix: "UFS_DBG_RD_REG_TXUC " ); |
1500 | |
1501 | reg = ufs_qcom_get_debug_reg_offset(host, reg: UFS_DBG_RD_REG_RXUC); |
1502 | ufshcd_dump_regs(hba, offset: reg, len: 27 * 4, prefix: "UFS_DBG_RD_REG_RXUC " ); |
1503 | |
1504 | reg = ufs_qcom_get_debug_reg_offset(host, reg: UFS_DBG_RD_REG_DFC); |
1505 | ufshcd_dump_regs(hba, offset: reg, len: 19 * 4, prefix: "UFS_DBG_RD_REG_DFC " ); |
1506 | |
1507 | reg = ufs_qcom_get_debug_reg_offset(host, reg: UFS_DBG_RD_REG_TRLUT); |
1508 | ufshcd_dump_regs(hba, offset: reg, len: 34 * 4, prefix: "UFS_DBG_RD_REG_TRLUT " ); |
1509 | |
1510 | reg = ufs_qcom_get_debug_reg_offset(host, reg: UFS_DBG_RD_REG_TMRLUT); |
1511 | ufshcd_dump_regs(hba, offset: reg, len: 9 * 4, prefix: "UFS_DBG_RD_REG_TMRLUT " ); |
1512 | } |
1513 | |
1514 | /** |
1515 | * ufs_qcom_device_reset() - toggle the (optional) device reset line |
1516 | * @hba: per-adapter instance |
1517 | * |
1518 | * Toggles the (optional) reset line to reset the attached device. |
1519 | */ |
1520 | static int ufs_qcom_device_reset(struct ufs_hba *hba) |
1521 | { |
1522 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
1523 | |
1524 | /* reset gpio is optional */ |
1525 | if (!host->device_reset) |
1526 | return -EOPNOTSUPP; |
1527 | |
1528 | /* |
1529 | * The UFS device shall detect reset pulses of 1us, sleep for 10us to |
1530 | * be on the safe side. |
1531 | */ |
1532 | ufs_qcom_device_reset_ctrl(hba, asserted: true); |
1533 | usleep_range(min: 10, max: 15); |
1534 | |
1535 | ufs_qcom_device_reset_ctrl(hba, asserted: false); |
1536 | usleep_range(min: 10, max: 15); |
1537 | |
1538 | return 0; |
1539 | } |
1540 | |
1541 | #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) |
1542 | static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, |
1543 | struct devfreq_dev_profile *p, |
1544 | struct devfreq_simple_ondemand_data *d) |
1545 | { |
1546 | p->polling_ms = 60; |
1547 | p->timer = DEVFREQ_TIMER_DELAYED; |
1548 | d->upthreshold = 70; |
1549 | d->downdifferential = 5; |
1550 | } |
1551 | #else |
1552 | static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, |
1553 | struct devfreq_dev_profile *p, |
1554 | struct devfreq_simple_ondemand_data *data) |
1555 | { |
1556 | } |
1557 | #endif |
1558 | |
1559 | static void ufs_qcom_reinit_notify(struct ufs_hba *hba) |
1560 | { |
1561 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
1562 | |
1563 | phy_power_off(phy: host->generic_phy); |
1564 | } |
1565 | |
1566 | /* Resources */ |
1567 | static const struct ufshcd_res_info ufs_res_info[RES_MAX] = { |
1568 | {.name = "ufs_mem" ,}, |
1569 | {.name = "mcq" ,}, |
1570 | /* Submission Queue DAO */ |
1571 | {.name = "mcq_sqd" ,}, |
1572 | /* Submission Queue Interrupt Status */ |
1573 | {.name = "mcq_sqis" ,}, |
1574 | /* Completion Queue DAO */ |
1575 | {.name = "mcq_cqd" ,}, |
1576 | /* Completion Queue Interrupt Status */ |
1577 | {.name = "mcq_cqis" ,}, |
1578 | /* MCQ vendor specific */ |
1579 | {.name = "mcq_vs" ,}, |
1580 | }; |
1581 | |
1582 | static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba) |
1583 | { |
1584 | struct platform_device *pdev = to_platform_device(hba->dev); |
1585 | struct ufshcd_res_info *res; |
1586 | struct resource *res_mem, *res_mcq; |
1587 | int i, ret; |
1588 | |
1589 | memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info)); |
1590 | |
1591 | for (i = 0; i < RES_MAX; i++) { |
1592 | res = &hba->res[i]; |
1593 | res->resource = platform_get_resource_byname(pdev, |
1594 | IORESOURCE_MEM, |
1595 | res->name); |
1596 | if (!res->resource) { |
1597 | dev_info(hba->dev, "Resource %s not provided\n" , res->name); |
1598 | if (i == RES_UFS) |
1599 | return -ENODEV; |
1600 | continue; |
1601 | } else if (i == RES_UFS) { |
1602 | res_mem = res->resource; |
1603 | res->base = hba->mmio_base; |
1604 | continue; |
1605 | } |
1606 | |
1607 | res->base = devm_ioremap_resource(dev: hba->dev, res: res->resource); |
1608 | if (IS_ERR(ptr: res->base)) { |
1609 | dev_err(hba->dev, "Failed to map res %s, err=%d\n" , |
1610 | res->name, (int)PTR_ERR(res->base)); |
1611 | ret = PTR_ERR(ptr: res->base); |
1612 | res->base = NULL; |
1613 | return ret; |
1614 | } |
1615 | } |
1616 | |
1617 | /* MCQ resource provided in DT */ |
1618 | res = &hba->res[RES_MCQ]; |
1619 | /* Bail if MCQ resource is provided */ |
1620 | if (res->base) |
1621 | goto out; |
1622 | |
1623 | /* Explicitly allocate MCQ resource from ufs_mem */ |
1624 | res_mcq = devm_kzalloc(dev: hba->dev, size: sizeof(*res_mcq), GFP_KERNEL); |
1625 | if (!res_mcq) |
1626 | return -ENOMEM; |
1627 | |
1628 | res_mcq->start = res_mem->start + |
1629 | MCQ_SQATTR_OFFSET(hba->mcq_capabilities); |
1630 | res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1; |
1631 | res_mcq->flags = res_mem->flags; |
1632 | res_mcq->name = "mcq" ; |
1633 | |
1634 | ret = insert_resource(parent: &iomem_resource, new: res_mcq); |
1635 | if (ret) { |
1636 | dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n" , |
1637 | ret); |
1638 | return ret; |
1639 | } |
1640 | |
1641 | res->base = devm_ioremap_resource(dev: hba->dev, res: res_mcq); |
1642 | if (IS_ERR(ptr: res->base)) { |
1643 | dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n" , |
1644 | (int)PTR_ERR(res->base)); |
1645 | ret = PTR_ERR(ptr: res->base); |
1646 | goto ioremap_err; |
1647 | } |
1648 | |
1649 | out: |
1650 | hba->mcq_base = res->base; |
1651 | return 0; |
1652 | ioremap_err: |
1653 | res->base = NULL; |
1654 | remove_resource(old: res_mcq); |
1655 | return ret; |
1656 | } |
1657 | |
1658 | static int ufs_qcom_op_runtime_config(struct ufs_hba *hba) |
1659 | { |
1660 | struct ufshcd_res_info *mem_res, *sqdao_res; |
1661 | struct ufshcd_mcq_opr_info_t *opr; |
1662 | int i; |
1663 | |
1664 | mem_res = &hba->res[RES_UFS]; |
1665 | sqdao_res = &hba->res[RES_MCQ_SQD]; |
1666 | |
1667 | if (!mem_res->base || !sqdao_res->base) |
1668 | return -EINVAL; |
1669 | |
1670 | for (i = 0; i < OPR_MAX; i++) { |
1671 | opr = &hba->mcq_opr[i]; |
1672 | opr->offset = sqdao_res->resource->start - |
1673 | mem_res->resource->start + 0x40 * i; |
1674 | opr->stride = 0x100; |
1675 | opr->base = sqdao_res->base + 0x40 * i; |
1676 | } |
1677 | |
1678 | return 0; |
1679 | } |
1680 | |
1681 | static int ufs_qcom_get_hba_mac(struct ufs_hba *hba) |
1682 | { |
1683 | /* Qualcomm HC supports up to 64 */ |
1684 | return MAX_SUPP_MAC; |
1685 | } |
1686 | |
1687 | static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba, |
1688 | unsigned long *ocqs) |
1689 | { |
1690 | struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS]; |
1691 | |
1692 | if (!mcq_vs_res->base) |
1693 | return -EINVAL; |
1694 | |
1695 | *ocqs = readl(addr: mcq_vs_res->base + UFS_MEM_CQIS_VS); |
1696 | |
1697 | return 0; |
1698 | } |
1699 | |
1700 | static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) |
1701 | { |
1702 | struct device *dev = msi_desc_to_dev(desc); |
1703 | struct ufs_hba *hba = dev_get_drvdata(dev); |
1704 | |
1705 | ufshcd_mcq_config_esi(hba, msg); |
1706 | } |
1707 | |
1708 | static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data) |
1709 | { |
1710 | struct msi_desc *desc = data; |
1711 | struct device *dev = msi_desc_to_dev(desc); |
1712 | struct ufs_hba *hba = dev_get_drvdata(dev); |
1713 | u32 id = desc->msi_index; |
1714 | struct ufs_hw_queue *hwq = &hba->uhq[id]; |
1715 | |
1716 | ufshcd_mcq_write_cqis(hba, val: 0x1, i: id); |
1717 | ufshcd_mcq_poll_cqe_lock(hba, hwq); |
1718 | |
1719 | return IRQ_HANDLED; |
1720 | } |
1721 | |
1722 | static int ufs_qcom_config_esi(struct ufs_hba *hba) |
1723 | { |
1724 | struct ufs_qcom_host *host = ufshcd_get_variant(hba); |
1725 | struct msi_desc *desc; |
1726 | struct msi_desc *failed_desc = NULL; |
1727 | int nr_irqs, ret; |
1728 | |
1729 | if (host->esi_enabled) |
1730 | return 0; |
1731 | |
1732 | /* |
1733 | * 1. We only handle CQs as of now. |
1734 | * 2. Poll queues do not need ESI. |
1735 | */ |
1736 | nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; |
1737 | ret = platform_device_msi_init_and_alloc_irqs(dev: hba->dev, nvec: nr_irqs, |
1738 | write_msi_msg: ufs_qcom_write_msi_msg); |
1739 | if (ret) { |
1740 | dev_err(hba->dev, "Failed to request Platform MSI %d\n" , ret); |
1741 | return ret; |
1742 | } |
1743 | |
1744 | msi_lock_descs(dev: hba->dev); |
1745 | msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { |
1746 | ret = devm_request_irq(dev: hba->dev, irq: desc->irq, |
1747 | handler: ufs_qcom_mcq_esi_handler, |
1748 | IRQF_SHARED, devname: "qcom-mcq-esi" , dev_id: desc); |
1749 | if (ret) { |
1750 | dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n" , |
1751 | __func__, desc->irq, ret); |
1752 | failed_desc = desc; |
1753 | break; |
1754 | } |
1755 | } |
1756 | msi_unlock_descs(dev: hba->dev); |
1757 | |
1758 | if (ret) { |
1759 | /* Rewind */ |
1760 | msi_lock_descs(dev: hba->dev); |
1761 | msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { |
1762 | if (desc == failed_desc) |
1763 | break; |
1764 | devm_free_irq(dev: hba->dev, irq: desc->irq, dev_id: hba); |
1765 | } |
1766 | msi_unlock_descs(dev: hba->dev); |
1767 | platform_device_msi_free_irqs_all(dev: hba->dev); |
1768 | } else { |
1769 | if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && |
1770 | host->hw_ver.step == 0) |
1771 | ufshcd_rmwl(hba, ESI_VEC_MASK, |
1772 | FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), |
1773 | reg: REG_UFS_CFG3); |
1774 | ufshcd_mcq_enable_esi(hba); |
1775 | host->esi_enabled = true; |
1776 | } |
1777 | |
1778 | return ret; |
1779 | } |
1780 | |
1781 | /* |
1782 | * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations |
1783 | * |
1784 | * The variant operations configure the necessary controller and PHY |
1785 | * handshake during initialization. |
1786 | */ |
1787 | static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { |
1788 | .name = "qcom" , |
1789 | .init = ufs_qcom_init, |
1790 | .exit = ufs_qcom_exit, |
1791 | .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version, |
1792 | .clk_scale_notify = ufs_qcom_clk_scale_notify, |
1793 | .setup_clocks = ufs_qcom_setup_clocks, |
1794 | .hce_enable_notify = ufs_qcom_hce_enable_notify, |
1795 | .link_startup_notify = ufs_qcom_link_startup_notify, |
1796 | .pwr_change_notify = ufs_qcom_pwr_change_notify, |
1797 | .apply_dev_quirks = ufs_qcom_apply_dev_quirks, |
1798 | .suspend = ufs_qcom_suspend, |
1799 | .resume = ufs_qcom_resume, |
1800 | .dbg_register_dump = ufs_qcom_dump_dbg_regs, |
1801 | .device_reset = ufs_qcom_device_reset, |
1802 | .config_scaling_param = ufs_qcom_config_scaling_param, |
1803 | .program_key = ufs_qcom_ice_program_key, |
1804 | .reinit_notify = ufs_qcom_reinit_notify, |
1805 | .mcq_config_resource = ufs_qcom_mcq_config_resource, |
1806 | .get_hba_mac = ufs_qcom_get_hba_mac, |
1807 | .op_runtime_config = ufs_qcom_op_runtime_config, |
1808 | .get_outstanding_cqs = ufs_qcom_get_outstanding_cqs, |
1809 | .config_esi = ufs_qcom_config_esi, |
1810 | }; |
1811 | |
1812 | /** |
1813 | * ufs_qcom_probe - probe routine of the driver |
1814 | * @pdev: pointer to Platform device handle |
1815 | * |
1816 | * Return: zero for success and non-zero for failure. |
1817 | */ |
1818 | static int ufs_qcom_probe(struct platform_device *pdev) |
1819 | { |
1820 | int err; |
1821 | struct device *dev = &pdev->dev; |
1822 | |
1823 | /* Perform generic probe */ |
1824 | err = ufshcd_pltfrm_init(pdev, vops: &ufs_hba_qcom_vops); |
1825 | if (err) |
1826 | return dev_err_probe(dev, err, fmt: "ufshcd_pltfrm_init() failed\n" ); |
1827 | |
1828 | return 0; |
1829 | } |
1830 | |
1831 | /** |
1832 | * ufs_qcom_remove - set driver_data of the device to NULL |
1833 | * @pdev: pointer to platform device handle |
1834 | * |
1835 | * Always returns 0 |
1836 | */ |
1837 | static void ufs_qcom_remove(struct platform_device *pdev) |
1838 | { |
1839 | struct ufs_hba *hba = platform_get_drvdata(pdev); |
1840 | |
1841 | pm_runtime_get_sync(dev: &(pdev)->dev); |
1842 | ufshcd_remove(hba); |
1843 | platform_device_msi_free_irqs_all(dev: hba->dev); |
1844 | } |
1845 | |
1846 | static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = { |
1847 | { .compatible = "qcom,ufshc" }, |
1848 | {}, |
1849 | }; |
1850 | MODULE_DEVICE_TABLE(of, ufs_qcom_of_match); |
1851 | |
1852 | #ifdef CONFIG_ACPI |
1853 | static const struct acpi_device_id ufs_qcom_acpi_match[] = { |
1854 | { "QCOM24A5" }, |
1855 | { }, |
1856 | }; |
1857 | MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match); |
1858 | #endif |
1859 | |
1860 | static const struct dev_pm_ops ufs_qcom_pm_ops = { |
1861 | SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) |
1862 | .prepare = ufshcd_suspend_prepare, |
1863 | .complete = ufshcd_resume_complete, |
1864 | #ifdef CONFIG_PM_SLEEP |
1865 | .suspend = ufshcd_system_suspend, |
1866 | .resume = ufshcd_system_resume, |
1867 | .freeze = ufshcd_system_freeze, |
1868 | .restore = ufshcd_system_restore, |
1869 | .thaw = ufshcd_system_thaw, |
1870 | #endif |
1871 | }; |
1872 | |
1873 | static struct platform_driver ufs_qcom_pltform = { |
1874 | .probe = ufs_qcom_probe, |
1875 | .remove_new = ufs_qcom_remove, |
1876 | .driver = { |
1877 | .name = "ufshcd-qcom" , |
1878 | .pm = &ufs_qcom_pm_ops, |
1879 | .of_match_table = of_match_ptr(ufs_qcom_of_match), |
1880 | .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match), |
1881 | }, |
1882 | }; |
1883 | module_platform_driver(ufs_qcom_pltform); |
1884 | |
1885 | MODULE_LICENSE("GPL v2" ); |
1886 | |