1// SPDX-License-Identifier: GPL-2.0
2/*
3 * CLx support
4 *
5 * Copyright (C) 2020 - 2023, Intel Corporation
6 * Authors: Gil Fine <gil.fine@intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
8 */
9
10#include <linux/module.h>
11
12#include "tb.h"
13
14static bool clx_enabled = true;
15module_param_named(clx, clx_enabled, bool, 0444);
16MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
17
18static const char *clx_name(unsigned int clx)
19{
20 switch (clx) {
21 case TB_CL0S | TB_CL1 | TB_CL2:
22 return "CL0s/CL1/CL2";
23 case TB_CL1 | TB_CL2:
24 return "CL1/CL2";
25 case TB_CL0S | TB_CL2:
26 return "CL0s/CL2";
27 case TB_CL0S | TB_CL1:
28 return "CL0s/CL1";
29 case TB_CL0S:
30 return "CL0s";
31 case 0:
32 return "disabled";
33 default:
34 return "unknown";
35 }
36}
37
38static int tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
39{
40 u32 phy;
41 int ret;
42
43 ret = tb_port_read(port, buffer: &phy, space: TB_CFG_PORT,
44 offset: port->cap_phy + LANE_ADP_CS_1, length: 1);
45 if (ret)
46 return ret;
47
48 if (secondary)
49 phy |= LANE_ADP_CS_1_PMS;
50 else
51 phy &= ~LANE_ADP_CS_1_PMS;
52
53 return tb_port_write(port, buffer: &phy, space: TB_CFG_PORT,
54 offset: port->cap_phy + LANE_ADP_CS_1, length: 1);
55}
56
57static int tb_port_pm_secondary_enable(struct tb_port *port)
58{
59 return tb_port_pm_secondary_set(port, secondary: true);
60}
61
62static int tb_port_pm_secondary_disable(struct tb_port *port)
63{
64 return tb_port_pm_secondary_set(port, secondary: false);
65}
66
67/* Called for USB4 or Titan Ridge routers only */
68static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx)
69{
70 u32 val, mask = 0;
71 bool ret;
72
73 /* Don't enable CLx in case of two single-lane links */
74 if (!port->bonded && port->dual_link_port)
75 return false;
76
77 /* Don't enable CLx in case of inter-domain link */
78 if (port->xdomain)
79 return false;
80
81 if (tb_switch_is_usb4(sw: port->sw)) {
82 if (!usb4_port_clx_supported(port))
83 return false;
84 } else if (!tb_lc_is_clx_supported(port)) {
85 return false;
86 }
87
88 if (clx & TB_CL0S)
89 mask |= LANE_ADP_CS_0_CL0S_SUPPORT;
90 if (clx & TB_CL1)
91 mask |= LANE_ADP_CS_0_CL1_SUPPORT;
92 if (clx & TB_CL2)
93 mask |= LANE_ADP_CS_0_CL2_SUPPORT;
94
95 ret = tb_port_read(port, buffer: &val, space: TB_CFG_PORT,
96 offset: port->cap_phy + LANE_ADP_CS_0, length: 1);
97 if (ret)
98 return false;
99
100 return !!(val & mask);
101}
102
103static int tb_port_clx_set(struct tb_port *port, unsigned int clx, bool enable)
104{
105 u32 phy, mask = 0;
106 int ret;
107
108 if (clx & TB_CL0S)
109 mask |= LANE_ADP_CS_1_CL0S_ENABLE;
110 if (clx & TB_CL1)
111 mask |= LANE_ADP_CS_1_CL1_ENABLE;
112 if (clx & TB_CL2)
113 mask |= LANE_ADP_CS_1_CL2_ENABLE;
114
115 if (!mask)
116 return -EOPNOTSUPP;
117
118 ret = tb_port_read(port, buffer: &phy, space: TB_CFG_PORT,
119 offset: port->cap_phy + LANE_ADP_CS_1, length: 1);
120 if (ret)
121 return ret;
122
123 if (enable)
124 phy |= mask;
125 else
126 phy &= ~mask;
127
128 return tb_port_write(port, buffer: &phy, space: TB_CFG_PORT,
129 offset: port->cap_phy + LANE_ADP_CS_1, length: 1);
130}
131
132static int tb_port_clx_disable(struct tb_port *port, unsigned int clx)
133{
134 return tb_port_clx_set(port, clx, enable: false);
135}
136
137static int tb_port_clx_enable(struct tb_port *port, unsigned int clx)
138{
139 return tb_port_clx_set(port, clx, enable: true);
140}
141
142static int tb_port_clx(struct tb_port *port)
143{
144 u32 val;
145 int ret;
146
147 if (!tb_port_clx_supported(port, TB_CL0S | TB_CL1 | TB_CL2))
148 return 0;
149
150 ret = tb_port_read(port, buffer: &val, space: TB_CFG_PORT,
151 offset: port->cap_phy + LANE_ADP_CS_1, length: 1);
152 if (ret)
153 return ret;
154
155 if (val & LANE_ADP_CS_1_CL0S_ENABLE)
156 ret |= TB_CL0S;
157 if (val & LANE_ADP_CS_1_CL1_ENABLE)
158 ret |= TB_CL1;
159 if (val & LANE_ADP_CS_1_CL2_ENABLE)
160 ret |= TB_CL2;
161
162 return ret;
163}
164
165/**
166 * tb_port_clx_is_enabled() - Is given CL state enabled
167 * @port: USB4 port to check
168 * @clx: Mask of CL states to check
169 *
170 * Returns true if any of the given CL states is enabled for @port.
171 */
172bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx)
173{
174 return !!(tb_port_clx(port) & clx);
175}
176
177/**
178 * tb_switch_clx_is_supported() - Is CLx supported on this type of router
179 * @sw: The router to check CLx support for
180 */
181static bool tb_switch_clx_is_supported(const struct tb_switch *sw)
182{
183 if (!clx_enabled)
184 return false;
185
186 if (sw->quirks & QUIRK_NO_CLX)
187 return false;
188
189 /*
190 * CLx is not enabled and validated on Intel USB4 platforms
191 * before Alder Lake.
192 */
193 if (tb_switch_is_tiger_lake(sw))
194 return false;
195
196 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
197}
198
199/**
200 * tb_switch_clx_init() - Initialize router CL states
201 * @sw: Router
202 *
203 * Can be called for any router. Initializes the current CL state by
204 * reading it from the hardware.
205 *
206 * Returns %0 in case of success and negative errno in case of failure.
207 */
208int tb_switch_clx_init(struct tb_switch *sw)
209{
210 struct tb_port *up, *down;
211 unsigned int clx, tmp;
212
213 if (tb_switch_is_icm(sw))
214 return 0;
215
216 if (!tb_route(sw))
217 return 0;
218
219 if (!tb_switch_clx_is_supported(sw))
220 return 0;
221
222 up = tb_upstream_port(sw);
223 down = tb_switch_downstream_port(sw);
224
225 clx = tb_port_clx(port: up);
226 tmp = tb_port_clx(port: down);
227 if (clx != tmp)
228 tb_sw_warn(sw, "CLx: inconsistent configuration %#x != %#x\n",
229 clx, tmp);
230
231 tb_sw_dbg(sw, "CLx: current mode: %s\n", clx_name(clx));
232
233 sw->clx = clx;
234 return 0;
235}
236
237static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
238{
239 struct tb_port *up, *down;
240 int ret;
241
242 if (!tb_route(sw))
243 return 0;
244
245 up = tb_upstream_port(sw);
246 down = tb_switch_downstream_port(sw);
247 ret = tb_port_pm_secondary_enable(port: up);
248 if (ret)
249 return ret;
250
251 return tb_port_pm_secondary_disable(port: down);
252}
253
254static int tb_switch_mask_clx_objections(struct tb_switch *sw)
255{
256 int up_port = sw->config.upstream_port_number;
257 u32 offset, val[2], mask_obj, unmask_obj;
258 int ret, i;
259
260 /* Only Titan Ridge of pre-USB4 devices support CLx states */
261 if (!tb_switch_is_titan_ridge(sw))
262 return 0;
263
264 if (!tb_route(sw))
265 return 0;
266
267 /*
268 * In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
269 * Port A consists of lane adapters 1,2 and
270 * Port B consists of lane adapters 3,4
271 * If upstream port is A, (lanes are 1,2), we mask objections from
272 * port B (lanes 3,4) and unmask objections from Port A and vice-versa.
273 */
274 if (up_port == 1) {
275 mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
276 unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
277 offset = TB_LOW_PWR_C1_CL1;
278 } else {
279 mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
280 unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
281 offset = TB_LOW_PWR_C3_CL1;
282 }
283
284 ret = tb_sw_read(sw, buffer: &val, space: TB_CFG_SWITCH,
285 offset: sw->cap_lp + offset, ARRAY_SIZE(val));
286 if (ret)
287 return ret;
288
289 for (i = 0; i < ARRAY_SIZE(val); i++) {
290 val[i] |= mask_obj;
291 val[i] &= ~unmask_obj;
292 }
293
294 return tb_sw_write(sw, buffer: &val, space: TB_CFG_SWITCH,
295 offset: sw->cap_lp + offset, ARRAY_SIZE(val));
296}
297
298static bool validate_mask(unsigned int clx)
299{
300 /* Previous states need to be enabled */
301 if (clx & TB_CL1)
302 return (clx & TB_CL0S) == TB_CL0S;
303 return true;
304}
305
306/**
307 * tb_switch_clx_enable() - Enable CLx on upstream port of specified router
308 * @sw: Router to enable CLx for
309 * @clx: The CLx state to enable
310 *
311 * CLx is enabled only if both sides of the link support CLx, and if both sides
312 * of the link are not configured as two single lane links and only if the link
313 * is not inter-domain link. The complete set of conditions is described in CM
314 * Guide 1.0 section 8.1.
315 *
316 * Returns %0 on success or an error code on failure.
317 */
318int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx)
319{
320 bool up_clx_support, down_clx_support;
321 struct tb_switch *parent_sw;
322 struct tb_port *up, *down;
323 int ret;
324
325 if (!clx || sw->clx == clx)
326 return 0;
327
328 if (!validate_mask(clx))
329 return -EINVAL;
330
331 parent_sw = tb_switch_parent(sw);
332 if (!parent_sw)
333 return 0;
334
335 if (!tb_switch_clx_is_supported(sw: parent_sw) ||
336 !tb_switch_clx_is_supported(sw))
337 return 0;
338
339 /* Only support CL2 for v2 routers */
340 if ((clx & TB_CL2) &&
341 (usb4_switch_version(sw: parent_sw) < 2 ||
342 usb4_switch_version(sw) < 2))
343 return -EOPNOTSUPP;
344
345 ret = tb_switch_pm_secondary_resolve(sw);
346 if (ret)
347 return ret;
348
349 up = tb_upstream_port(sw);
350 down = tb_switch_downstream_port(sw);
351
352 up_clx_support = tb_port_clx_supported(port: up, clx);
353 down_clx_support = tb_port_clx_supported(port: down, clx);
354
355 tb_port_dbg(up, "CLx: %s %ssupported\n", clx_name(clx),
356 up_clx_support ? "" : "not ");
357 tb_port_dbg(down, "CLx: %s %ssupported\n", clx_name(clx),
358 down_clx_support ? "" : "not ");
359
360 if (!up_clx_support || !down_clx_support)
361 return -EOPNOTSUPP;
362
363 ret = tb_port_clx_enable(port: up, clx);
364 if (ret)
365 return ret;
366
367 ret = tb_port_clx_enable(port: down, clx);
368 if (ret) {
369 tb_port_clx_disable(port: up, clx);
370 return ret;
371 }
372
373 ret = tb_switch_mask_clx_objections(sw);
374 if (ret) {
375 tb_port_clx_disable(port: up, clx);
376 tb_port_clx_disable(port: down, clx);
377 return ret;
378 }
379
380 sw->clx |= clx;
381
382 tb_sw_dbg(sw, "CLx: %s enabled\n", clx_name(clx));
383 return 0;
384}
385
386/**
387 * tb_switch_clx_disable() - Disable CLx on upstream port of specified router
388 * @sw: Router to disable CLx for
389 *
390 * Disables all CL states of the given router. Can be called on any
391 * router and if the states were not enabled already does nothing.
392 *
393 * Returns the CL states that were disabled or negative errno in case of
394 * failure.
395 */
396int tb_switch_clx_disable(struct tb_switch *sw)
397{
398 unsigned int clx = sw->clx;
399 struct tb_port *up, *down;
400 int ret;
401
402 if (!tb_switch_clx_is_supported(sw))
403 return 0;
404
405 if (!clx)
406 return 0;
407
408 if (sw->is_unplugged)
409 return clx;
410
411 up = tb_upstream_port(sw);
412 down = tb_switch_downstream_port(sw);
413
414 ret = tb_port_clx_disable(port: up, clx);
415 if (ret)
416 return ret;
417
418 ret = tb_port_clx_disable(port: down, clx);
419 if (ret)
420 return ret;
421
422 sw->clx = 0;
423
424 tb_sw_dbg(sw, "CLx: %s disabled\n", clx_name(clx));
425 return clx;
426}
427

source code of linux/drivers/thunderbolt/clx.c