1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Thunderbolt driver - switch/port utility functions |
4 | * |
5 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
6 | * Copyright (C) 2018, Intel Corporation |
7 | */ |
8 | |
9 | #include <linux/delay.h> |
10 | #include <linux/idr.h> |
11 | #include <linux/module.h> |
12 | #include <linux/nvmem-provider.h> |
13 | #include <linux/pm_runtime.h> |
14 | #include <linux/sched/signal.h> |
15 | #include <linux/sizes.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/string_helpers.h> |
18 | |
19 | #include "tb.h" |
20 | |
21 | /* Switch NVM support */ |
22 | |
23 | struct nvm_auth_status { |
24 | struct list_head list; |
25 | uuid_t uuid; |
26 | u32 status; |
27 | }; |
28 | |
29 | /* |
30 | * Hold NVM authentication failure status per switch This information |
31 | * needs to stay around even when the switch gets power cycled so we |
32 | * keep it separately. |
33 | */ |
34 | static LIST_HEAD(nvm_auth_status_cache); |
35 | static DEFINE_MUTEX(nvm_auth_status_lock); |
36 | |
37 | static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) |
38 | { |
39 | struct nvm_auth_status *st; |
40 | |
41 | list_for_each_entry(st, &nvm_auth_status_cache, list) { |
42 | if (uuid_equal(u1: &st->uuid, u2: sw->uuid)) |
43 | return st; |
44 | } |
45 | |
46 | return NULL; |
47 | } |
48 | |
49 | static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) |
50 | { |
51 | struct nvm_auth_status *st; |
52 | |
53 | mutex_lock(&nvm_auth_status_lock); |
54 | st = __nvm_get_auth_status(sw); |
55 | mutex_unlock(lock: &nvm_auth_status_lock); |
56 | |
57 | *status = st ? st->status : 0; |
58 | } |
59 | |
60 | static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) |
61 | { |
62 | struct nvm_auth_status *st; |
63 | |
64 | if (WARN_ON(!sw->uuid)) |
65 | return; |
66 | |
67 | mutex_lock(&nvm_auth_status_lock); |
68 | st = __nvm_get_auth_status(sw); |
69 | |
70 | if (!st) { |
71 | st = kzalloc(size: sizeof(*st), GFP_KERNEL); |
72 | if (!st) |
73 | goto unlock; |
74 | |
75 | memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); |
76 | INIT_LIST_HEAD(list: &st->list); |
77 | list_add_tail(new: &st->list, head: &nvm_auth_status_cache); |
78 | } |
79 | |
80 | st->status = status; |
81 | unlock: |
82 | mutex_unlock(lock: &nvm_auth_status_lock); |
83 | } |
84 | |
85 | static void nvm_clear_auth_status(const struct tb_switch *sw) |
86 | { |
87 | struct nvm_auth_status *st; |
88 | |
89 | mutex_lock(&nvm_auth_status_lock); |
90 | st = __nvm_get_auth_status(sw); |
91 | if (st) { |
92 | list_del(entry: &st->list); |
93 | kfree(objp: st); |
94 | } |
95 | mutex_unlock(lock: &nvm_auth_status_lock); |
96 | } |
97 | |
98 | static int nvm_validate_and_write(struct tb_switch *sw) |
99 | { |
100 | unsigned int image_size; |
101 | const u8 *buf; |
102 | int ret; |
103 | |
104 | ret = tb_nvm_validate(nvm: sw->nvm); |
105 | if (ret) |
106 | return ret; |
107 | |
108 | ret = tb_nvm_write_headers(nvm: sw->nvm); |
109 | if (ret) |
110 | return ret; |
111 | |
112 | buf = sw->nvm->buf_data_start; |
113 | image_size = sw->nvm->buf_data_size; |
114 | |
115 | if (tb_switch_is_usb4(sw)) |
116 | ret = usb4_switch_nvm_write(sw, address: 0, buf, size: image_size); |
117 | else |
118 | ret = dma_port_flash_write(dma: sw->dma_port, address: 0, buf, size: image_size); |
119 | if (ret) |
120 | return ret; |
121 | |
122 | sw->nvm->flushed = true; |
123 | return 0; |
124 | } |
125 | |
126 | static int nvm_authenticate_host_dma_port(struct tb_switch *sw) |
127 | { |
128 | int ret = 0; |
129 | |
130 | /* |
131 | * Root switch NVM upgrade requires that we disconnect the |
132 | * existing paths first (in case it is not in safe mode |
133 | * already). |
134 | */ |
135 | if (!sw->safe_mode) { |
136 | u32 status; |
137 | |
138 | ret = tb_domain_disconnect_all_paths(tb: sw->tb); |
139 | if (ret) |
140 | return ret; |
141 | /* |
142 | * The host controller goes away pretty soon after this if |
143 | * everything goes well so getting timeout is expected. |
144 | */ |
145 | ret = dma_port_flash_update_auth(dma: sw->dma_port); |
146 | if (!ret || ret == -ETIMEDOUT) |
147 | return 0; |
148 | |
149 | /* |
150 | * Any error from update auth operation requires power |
151 | * cycling of the host router. |
152 | */ |
153 | tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n" ); |
154 | if (dma_port_flash_update_auth_status(dma: sw->dma_port, status: &status) > 0) |
155 | nvm_set_auth_status(sw, status); |
156 | } |
157 | |
158 | /* |
159 | * From safe mode we can get out by just power cycling the |
160 | * switch. |
161 | */ |
162 | dma_port_power_cycle(dma: sw->dma_port); |
163 | return ret; |
164 | } |
165 | |
166 | static int nvm_authenticate_device_dma_port(struct tb_switch *sw) |
167 | { |
168 | int ret, retries = 10; |
169 | |
170 | ret = dma_port_flash_update_auth(dma: sw->dma_port); |
171 | switch (ret) { |
172 | case 0: |
173 | case -ETIMEDOUT: |
174 | case -EACCES: |
175 | case -EINVAL: |
176 | /* Power cycle is required */ |
177 | break; |
178 | default: |
179 | return ret; |
180 | } |
181 | |
182 | /* |
183 | * Poll here for the authentication status. It takes some time |
184 | * for the device to respond (we get timeout for a while). Once |
185 | * we get response the device needs to be power cycled in order |
186 | * to the new NVM to be taken into use. |
187 | */ |
188 | do { |
189 | u32 status; |
190 | |
191 | ret = dma_port_flash_update_auth_status(dma: sw->dma_port, status: &status); |
192 | if (ret < 0 && ret != -ETIMEDOUT) |
193 | return ret; |
194 | if (ret > 0) { |
195 | if (status) { |
196 | tb_sw_warn(sw, "failed to authenticate NVM\n" ); |
197 | nvm_set_auth_status(sw, status); |
198 | } |
199 | |
200 | tb_sw_info(sw, "power cycling the switch now\n" ); |
201 | dma_port_power_cycle(dma: sw->dma_port); |
202 | return 0; |
203 | } |
204 | |
205 | msleep(msecs: 500); |
206 | } while (--retries); |
207 | |
208 | return -ETIMEDOUT; |
209 | } |
210 | |
211 | static void nvm_authenticate_start_dma_port(struct tb_switch *sw) |
212 | { |
213 | struct pci_dev *root_port; |
214 | |
215 | /* |
216 | * During host router NVM upgrade we should not allow root port to |
217 | * go into D3cold because some root ports cannot trigger PME |
218 | * itself. To be on the safe side keep the root port in D0 during |
219 | * the whole upgrade process. |
220 | */ |
221 | root_port = pcie_find_root_port(dev: sw->tb->nhi->pdev); |
222 | if (root_port) |
223 | pm_runtime_get_noresume(dev: &root_port->dev); |
224 | } |
225 | |
226 | static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) |
227 | { |
228 | struct pci_dev *root_port; |
229 | |
230 | root_port = pcie_find_root_port(dev: sw->tb->nhi->pdev); |
231 | if (root_port) |
232 | pm_runtime_put(dev: &root_port->dev); |
233 | } |
234 | |
235 | static inline bool nvm_readable(struct tb_switch *sw) |
236 | { |
237 | if (tb_switch_is_usb4(sw)) { |
238 | /* |
239 | * USB4 devices must support NVM operations but it is |
240 | * optional for hosts. Therefore we query the NVM sector |
241 | * size here and if it is supported assume NVM |
242 | * operations are implemented. |
243 | */ |
244 | return usb4_switch_nvm_sector_size(sw) > 0; |
245 | } |
246 | |
247 | /* Thunderbolt 2 and 3 devices support NVM through DMA port */ |
248 | return !!sw->dma_port; |
249 | } |
250 | |
251 | static inline bool nvm_upgradeable(struct tb_switch *sw) |
252 | { |
253 | if (sw->no_nvm_upgrade) |
254 | return false; |
255 | return nvm_readable(sw); |
256 | } |
257 | |
258 | static int nvm_authenticate(struct tb_switch *sw, bool auth_only) |
259 | { |
260 | int ret; |
261 | |
262 | if (tb_switch_is_usb4(sw)) { |
263 | if (auth_only) { |
264 | ret = usb4_switch_nvm_set_offset(sw, address: 0); |
265 | if (ret) |
266 | return ret; |
267 | } |
268 | sw->nvm->authenticating = true; |
269 | return usb4_switch_nvm_authenticate(sw); |
270 | } |
271 | if (auth_only) |
272 | return -EOPNOTSUPP; |
273 | |
274 | sw->nvm->authenticating = true; |
275 | if (!tb_route(sw)) { |
276 | nvm_authenticate_start_dma_port(sw); |
277 | ret = nvm_authenticate_host_dma_port(sw); |
278 | } else { |
279 | ret = nvm_authenticate_device_dma_port(sw); |
280 | } |
281 | |
282 | return ret; |
283 | } |
284 | |
285 | /** |
286 | * tb_switch_nvm_read() - Read router NVM |
287 | * @sw: Router whose NVM to read |
288 | * @address: Start address on the NVM |
289 | * @buf: Buffer where the read data is copied |
290 | * @size: Size of the buffer in bytes |
291 | * |
292 | * Reads from router NVM and returns the requested data in @buf. Locking |
293 | * is up to the caller. Returns %0 in success and negative errno in case |
294 | * of failure. |
295 | */ |
296 | int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, |
297 | size_t size) |
298 | { |
299 | if (tb_switch_is_usb4(sw)) |
300 | return usb4_switch_nvm_read(sw, address, buf, size); |
301 | return dma_port_flash_read(dma: sw->dma_port, address, buf, size); |
302 | } |
303 | |
304 | static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes) |
305 | { |
306 | struct tb_nvm *nvm = priv; |
307 | struct tb_switch *sw = tb_to_switch(dev: nvm->dev); |
308 | int ret; |
309 | |
310 | pm_runtime_get_sync(dev: &sw->dev); |
311 | |
312 | if (!mutex_trylock(lock: &sw->tb->lock)) { |
313 | ret = restart_syscall(); |
314 | goto out; |
315 | } |
316 | |
317 | ret = tb_switch_nvm_read(sw, address: offset, buf: val, size: bytes); |
318 | mutex_unlock(lock: &sw->tb->lock); |
319 | |
320 | out: |
321 | pm_runtime_mark_last_busy(dev: &sw->dev); |
322 | pm_runtime_put_autosuspend(dev: &sw->dev); |
323 | |
324 | return ret; |
325 | } |
326 | |
327 | static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) |
328 | { |
329 | struct tb_nvm *nvm = priv; |
330 | struct tb_switch *sw = tb_to_switch(dev: nvm->dev); |
331 | int ret; |
332 | |
333 | if (!mutex_trylock(lock: &sw->tb->lock)) |
334 | return restart_syscall(); |
335 | |
336 | /* |
337 | * Since writing the NVM image might require some special steps, |
338 | * for example when CSS headers are written, we cache the image |
339 | * locally here and handle the special cases when the user asks |
340 | * us to authenticate the image. |
341 | */ |
342 | ret = tb_nvm_write_buf(nvm, offset, val, bytes); |
343 | mutex_unlock(lock: &sw->tb->lock); |
344 | |
345 | return ret; |
346 | } |
347 | |
348 | static int tb_switch_nvm_add(struct tb_switch *sw) |
349 | { |
350 | struct tb_nvm *nvm; |
351 | int ret; |
352 | |
353 | if (!nvm_readable(sw)) |
354 | return 0; |
355 | |
356 | nvm = tb_nvm_alloc(dev: &sw->dev); |
357 | if (IS_ERR(ptr: nvm)) { |
358 | ret = PTR_ERR(ptr: nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(ptr: nvm); |
359 | goto err_nvm; |
360 | } |
361 | |
362 | ret = tb_nvm_read_version(nvm); |
363 | if (ret) |
364 | goto err_nvm; |
365 | |
366 | /* |
367 | * If the switch is in safe-mode the only accessible portion of |
368 | * the NVM is the non-active one where userspace is expected to |
369 | * write new functional NVM. |
370 | */ |
371 | if (!sw->safe_mode) { |
372 | ret = tb_nvm_add_active(nvm, reg_read: nvm_read); |
373 | if (ret) |
374 | goto err_nvm; |
375 | tb_sw_dbg(sw, "NVM version %x.%x\n" , nvm->major, nvm->minor); |
376 | } |
377 | |
378 | if (!sw->no_nvm_upgrade) { |
379 | ret = tb_nvm_add_non_active(nvm, reg_write: nvm_write); |
380 | if (ret) |
381 | goto err_nvm; |
382 | } |
383 | |
384 | sw->nvm = nvm; |
385 | return 0; |
386 | |
387 | err_nvm: |
388 | tb_sw_dbg(sw, "NVM upgrade disabled\n" ); |
389 | sw->no_nvm_upgrade = true; |
390 | if (!IS_ERR(ptr: nvm)) |
391 | tb_nvm_free(nvm); |
392 | |
393 | return ret; |
394 | } |
395 | |
396 | static void tb_switch_nvm_remove(struct tb_switch *sw) |
397 | { |
398 | struct tb_nvm *nvm; |
399 | |
400 | nvm = sw->nvm; |
401 | sw->nvm = NULL; |
402 | |
403 | if (!nvm) |
404 | return; |
405 | |
406 | /* Remove authentication status in case the switch is unplugged */ |
407 | if (!nvm->authenticating) |
408 | nvm_clear_auth_status(sw); |
409 | |
410 | tb_nvm_free(nvm); |
411 | } |
412 | |
413 | /* port utility functions */ |
414 | |
415 | static const char *tb_port_type(const struct tb_regs_port_header *port) |
416 | { |
417 | switch (port->type >> 16) { |
418 | case 0: |
419 | switch ((u8) port->type) { |
420 | case 0: |
421 | return "Inactive" ; |
422 | case 1: |
423 | return "Port" ; |
424 | case 2: |
425 | return "NHI" ; |
426 | default: |
427 | return "unknown" ; |
428 | } |
429 | case 0x2: |
430 | return "Ethernet" ; |
431 | case 0x8: |
432 | return "SATA" ; |
433 | case 0xe: |
434 | return "DP/HDMI" ; |
435 | case 0x10: |
436 | return "PCIe" ; |
437 | case 0x20: |
438 | return "USB" ; |
439 | default: |
440 | return "unknown" ; |
441 | } |
442 | } |
443 | |
444 | static void tb_dump_port(struct tb *tb, const struct tb_port *port) |
445 | { |
446 | const struct tb_regs_port_header *regs = &port->config; |
447 | |
448 | tb_dbg(tb, |
449 | " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n" , |
450 | regs->port_number, regs->vendor_id, regs->device_id, |
451 | regs->revision, regs->thunderbolt_version, tb_port_type(regs), |
452 | regs->type); |
453 | tb_dbg(tb, " Max hop id (in/out): %d/%d\n" , |
454 | regs->max_in_hop_id, regs->max_out_hop_id); |
455 | tb_dbg(tb, " Max counters: %d\n" , regs->max_counters); |
456 | tb_dbg(tb, " NFC Credits: %#x\n" , regs->nfc_credits); |
457 | tb_dbg(tb, " Credits (total/control): %u/%u\n" , port->total_credits, |
458 | port->ctl_credits); |
459 | } |
460 | |
461 | /** |
462 | * tb_port_state() - get connectedness state of a port |
463 | * @port: the port to check |
464 | * |
465 | * The port must have a TB_CAP_PHY (i.e. it should be a real port). |
466 | * |
467 | * Return: Returns an enum tb_port_state on success or an error code on failure. |
468 | */ |
469 | int tb_port_state(struct tb_port *port) |
470 | { |
471 | struct tb_cap_phy phy; |
472 | int res; |
473 | if (port->cap_phy == 0) { |
474 | tb_port_WARN(port, "does not have a PHY\n" ); |
475 | return -EINVAL; |
476 | } |
477 | res = tb_port_read(port, buffer: &phy, space: TB_CFG_PORT, offset: port->cap_phy, length: 2); |
478 | if (res) |
479 | return res; |
480 | return phy.state; |
481 | } |
482 | |
483 | /** |
484 | * tb_wait_for_port() - wait for a port to become ready |
485 | * @port: Port to wait |
486 | * @wait_if_unplugged: Wait also when port is unplugged |
487 | * |
488 | * Wait up to 1 second for a port to reach state TB_PORT_UP. If |
489 | * wait_if_unplugged is set then we also wait if the port is in state |
490 | * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after |
491 | * switch resume). Otherwise we only wait if a device is registered but the link |
492 | * has not yet been established. |
493 | * |
494 | * Return: Returns an error code on failure. Returns 0 if the port is not |
495 | * connected or failed to reach state TB_PORT_UP within one second. Returns 1 |
496 | * if the port is connected and in state TB_PORT_UP. |
497 | */ |
498 | int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) |
499 | { |
500 | int retries = 10; |
501 | int state; |
502 | if (!port->cap_phy) { |
503 | tb_port_WARN(port, "does not have PHY\n" ); |
504 | return -EINVAL; |
505 | } |
506 | if (tb_is_upstream_port(port)) { |
507 | tb_port_WARN(port, "is the upstream port\n" ); |
508 | return -EINVAL; |
509 | } |
510 | |
511 | while (retries--) { |
512 | state = tb_port_state(port); |
513 | switch (state) { |
514 | case TB_PORT_DISABLED: |
515 | tb_port_dbg(port, "is disabled (state: 0)\n" ); |
516 | return 0; |
517 | |
518 | case TB_PORT_UNPLUGGED: |
519 | if (wait_if_unplugged) { |
520 | /* used during resume */ |
521 | tb_port_dbg(port, |
522 | "is unplugged (state: 7), retrying...\n" ); |
523 | msleep(msecs: 100); |
524 | break; |
525 | } |
526 | tb_port_dbg(port, "is unplugged (state: 7)\n" ); |
527 | return 0; |
528 | |
529 | case TB_PORT_UP: |
530 | case TB_PORT_TX_CL0S: |
531 | case TB_PORT_RX_CL0S: |
532 | case TB_PORT_CL1: |
533 | case TB_PORT_CL2: |
534 | tb_port_dbg(port, "is connected, link is up (state: %d)\n" , state); |
535 | return 1; |
536 | |
537 | default: |
538 | if (state < 0) |
539 | return state; |
540 | |
541 | /* |
542 | * After plug-in the state is TB_PORT_CONNECTING. Give it some |
543 | * time. |
544 | */ |
545 | tb_port_dbg(port, |
546 | "is connected, link is not up (state: %d), retrying...\n" , |
547 | state); |
548 | msleep(msecs: 100); |
549 | } |
550 | |
551 | } |
552 | tb_port_warn(port, |
553 | "failed to reach state TB_PORT_UP. Ignoring port...\n" ); |
554 | return 0; |
555 | } |
556 | |
557 | /** |
558 | * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port |
559 | * @port: Port to add/remove NFC credits |
560 | * @credits: Credits to add/remove |
561 | * |
562 | * Change the number of NFC credits allocated to @port by @credits. To remove |
563 | * NFC credits pass a negative amount of credits. |
564 | * |
565 | * Return: Returns 0 on success or an error code on failure. |
566 | */ |
567 | int tb_port_add_nfc_credits(struct tb_port *port, int credits) |
568 | { |
569 | u32 nfc_credits; |
570 | |
571 | if (credits == 0 || port->sw->is_unplugged) |
572 | return 0; |
573 | |
574 | /* |
575 | * USB4 restricts programming NFC buffers to lane adapters only |
576 | * so skip other ports. |
577 | */ |
578 | if (tb_switch_is_usb4(sw: port->sw) && !tb_port_is_null(port)) |
579 | return 0; |
580 | |
581 | nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; |
582 | if (credits < 0) |
583 | credits = max_t(int, -nfc_credits, credits); |
584 | |
585 | nfc_credits += credits; |
586 | |
587 | tb_port_dbg(port, "adding %d NFC credits to %lu" , credits, |
588 | port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK); |
589 | |
590 | port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK; |
591 | port->config.nfc_credits |= nfc_credits; |
592 | |
593 | return tb_port_write(port, buffer: &port->config.nfc_credits, |
594 | space: TB_CFG_PORT, ADP_CS_4, length: 1); |
595 | } |
596 | |
597 | /** |
598 | * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER |
599 | * @port: Port whose counters to clear |
600 | * @counter: Counter index to clear |
601 | * |
602 | * Return: Returns 0 on success or an error code on failure. |
603 | */ |
604 | int tb_port_clear_counter(struct tb_port *port, int counter) |
605 | { |
606 | u32 zero[3] = { 0, 0, 0 }; |
607 | tb_port_dbg(port, "clearing counter %d\n" , counter); |
608 | return tb_port_write(port, buffer: zero, space: TB_CFG_COUNTERS, offset: 3 * counter, length: 3); |
609 | } |
610 | |
611 | /** |
612 | * tb_port_unlock() - Unlock downstream port |
613 | * @port: Port to unlock |
614 | * |
615 | * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the |
616 | * downstream router accessible for CM. |
617 | */ |
618 | int tb_port_unlock(struct tb_port *port) |
619 | { |
620 | if (tb_switch_is_icm(sw: port->sw)) |
621 | return 0; |
622 | if (!tb_port_is_null(port)) |
623 | return -EINVAL; |
624 | if (tb_switch_is_usb4(sw: port->sw)) |
625 | return usb4_port_unlock(port); |
626 | return 0; |
627 | } |
628 | |
629 | static int __tb_port_enable(struct tb_port *port, bool enable) |
630 | { |
631 | int ret; |
632 | u32 phy; |
633 | |
634 | if (!tb_port_is_null(port)) |
635 | return -EINVAL; |
636 | |
637 | ret = tb_port_read(port, buffer: &phy, space: TB_CFG_PORT, |
638 | offset: port->cap_phy + LANE_ADP_CS_1, length: 1); |
639 | if (ret) |
640 | return ret; |
641 | |
642 | if (enable) |
643 | phy &= ~LANE_ADP_CS_1_LD; |
644 | else |
645 | phy |= LANE_ADP_CS_1_LD; |
646 | |
647 | |
648 | ret = tb_port_write(port, buffer: &phy, space: TB_CFG_PORT, |
649 | offset: port->cap_phy + LANE_ADP_CS_1, length: 1); |
650 | if (ret) |
651 | return ret; |
652 | |
653 | tb_port_dbg(port, "lane %s\n" , str_enabled_disabled(enable)); |
654 | return 0; |
655 | } |
656 | |
657 | /** |
658 | * tb_port_enable() - Enable lane adapter |
659 | * @port: Port to enable (can be %NULL) |
660 | * |
661 | * This is used for lane 0 and 1 adapters to enable it. |
662 | */ |
663 | int tb_port_enable(struct tb_port *port) |
664 | { |
665 | return __tb_port_enable(port, enable: true); |
666 | } |
667 | |
668 | /** |
669 | * tb_port_disable() - Disable lane adapter |
670 | * @port: Port to disable (can be %NULL) |
671 | * |
672 | * This is used for lane 0 and 1 adapters to disable it. |
673 | */ |
674 | int tb_port_disable(struct tb_port *port) |
675 | { |
676 | return __tb_port_enable(port, enable: false); |
677 | } |
678 | |
679 | static int tb_port_reset(struct tb_port *port) |
680 | { |
681 | if (tb_switch_is_usb4(sw: port->sw)) |
682 | return port->cap_usb4 ? usb4_port_reset(port) : 0; |
683 | return tb_lc_reset_port(port); |
684 | } |
685 | |
686 | /* |
687 | * tb_init_port() - initialize a port |
688 | * |
689 | * This is a helper method for tb_switch_alloc. Does not check or initialize |
690 | * any downstream switches. |
691 | * |
692 | * Return: Returns 0 on success or an error code on failure. |
693 | */ |
694 | static int tb_init_port(struct tb_port *port) |
695 | { |
696 | int res; |
697 | int cap; |
698 | |
699 | INIT_LIST_HEAD(list: &port->list); |
700 | |
701 | /* Control adapter does not have configuration space */ |
702 | if (!port->port) |
703 | return 0; |
704 | |
705 | res = tb_port_read(port, buffer: &port->config, space: TB_CFG_PORT, offset: 0, length: 8); |
706 | if (res) { |
707 | if (res == -ENODEV) { |
708 | tb_dbg(port->sw->tb, " Port %d: not implemented\n" , |
709 | port->port); |
710 | port->disabled = true; |
711 | return 0; |
712 | } |
713 | return res; |
714 | } |
715 | |
716 | /* Port 0 is the switch itself and has no PHY. */ |
717 | if (port->config.type == TB_TYPE_PORT) { |
718 | cap = tb_port_find_cap(port, cap: TB_PORT_CAP_PHY); |
719 | |
720 | if (cap > 0) |
721 | port->cap_phy = cap; |
722 | else |
723 | tb_port_WARN(port, "non switch port without a PHY\n" ); |
724 | |
725 | cap = tb_port_find_cap(port, cap: TB_PORT_CAP_USB4); |
726 | if (cap > 0) |
727 | port->cap_usb4 = cap; |
728 | |
729 | /* |
730 | * USB4 ports the buffers allocated for the control path |
731 | * can be read from the path config space. Legacy |
732 | * devices we use hard-coded value. |
733 | */ |
734 | if (port->cap_usb4) { |
735 | struct tb_regs_hop hop; |
736 | |
737 | if (!tb_port_read(port, buffer: &hop, space: TB_CFG_HOPS, offset: 0, length: 2)) |
738 | port->ctl_credits = hop.initial_credits; |
739 | } |
740 | if (!port->ctl_credits) |
741 | port->ctl_credits = 2; |
742 | |
743 | } else { |
744 | cap = tb_port_find_cap(port, cap: TB_PORT_CAP_ADAP); |
745 | if (cap > 0) |
746 | port->cap_adap = cap; |
747 | } |
748 | |
749 | port->total_credits = |
750 | (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> |
751 | ADP_CS_4_TOTAL_BUFFERS_SHIFT; |
752 | |
753 | tb_dump_port(tb: port->sw->tb, port); |
754 | return 0; |
755 | } |
756 | |
757 | static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, |
758 | int max_hopid) |
759 | { |
760 | int port_max_hopid; |
761 | struct ida *ida; |
762 | |
763 | if (in) { |
764 | port_max_hopid = port->config.max_in_hop_id; |
765 | ida = &port->in_hopids; |
766 | } else { |
767 | port_max_hopid = port->config.max_out_hop_id; |
768 | ida = &port->out_hopids; |
769 | } |
770 | |
771 | /* |
772 | * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are |
773 | * reserved. |
774 | */ |
775 | if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID) |
776 | min_hopid = TB_PATH_MIN_HOPID; |
777 | |
778 | if (max_hopid < 0 || max_hopid > port_max_hopid) |
779 | max_hopid = port_max_hopid; |
780 | |
781 | return ida_alloc_range(ida, min: min_hopid, max: max_hopid, GFP_KERNEL); |
782 | } |
783 | |
784 | /** |
785 | * tb_port_alloc_in_hopid() - Allocate input HopID from port |
786 | * @port: Port to allocate HopID for |
787 | * @min_hopid: Minimum acceptable input HopID |
788 | * @max_hopid: Maximum acceptable input HopID |
789 | * |
790 | * Return: HopID between @min_hopid and @max_hopid or negative errno in |
791 | * case of error. |
792 | */ |
793 | int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid) |
794 | { |
795 | return tb_port_alloc_hopid(port, in: true, min_hopid, max_hopid); |
796 | } |
797 | |
798 | /** |
799 | * tb_port_alloc_out_hopid() - Allocate output HopID from port |
800 | * @port: Port to allocate HopID for |
801 | * @min_hopid: Minimum acceptable output HopID |
802 | * @max_hopid: Maximum acceptable output HopID |
803 | * |
804 | * Return: HopID between @min_hopid and @max_hopid or negative errno in |
805 | * case of error. |
806 | */ |
807 | int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid) |
808 | { |
809 | return tb_port_alloc_hopid(port, in: false, min_hopid, max_hopid); |
810 | } |
811 | |
812 | /** |
813 | * tb_port_release_in_hopid() - Release allocated input HopID from port |
814 | * @port: Port whose HopID to release |
815 | * @hopid: HopID to release |
816 | */ |
817 | void tb_port_release_in_hopid(struct tb_port *port, int hopid) |
818 | { |
819 | ida_free(&port->in_hopids, id: hopid); |
820 | } |
821 | |
822 | /** |
823 | * tb_port_release_out_hopid() - Release allocated output HopID from port |
824 | * @port: Port whose HopID to release |
825 | * @hopid: HopID to release |
826 | */ |
827 | void tb_port_release_out_hopid(struct tb_port *port, int hopid) |
828 | { |
829 | ida_free(&port->out_hopids, id: hopid); |
830 | } |
831 | |
832 | static inline bool tb_switch_is_reachable(const struct tb_switch *parent, |
833 | const struct tb_switch *sw) |
834 | { |
835 | u64 mask = (1ULL << parent->config.depth * 8) - 1; |
836 | return (tb_route(sw: parent) & mask) == (tb_route(sw) & mask); |
837 | } |
838 | |
839 | /** |
840 | * tb_next_port_on_path() - Return next port for given port on a path |
841 | * @start: Start port of the walk |
842 | * @end: End port of the walk |
843 | * @prev: Previous port (%NULL if this is the first) |
844 | * |
845 | * This function can be used to walk from one port to another if they |
846 | * are connected through zero or more switches. If the @prev is dual |
847 | * link port, the function follows that link and returns another end on |
848 | * that same link. |
849 | * |
850 | * If the @end port has been reached, return %NULL. |
851 | * |
852 | * Domain tb->lock must be held when this function is called. |
853 | */ |
854 | struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, |
855 | struct tb_port *prev) |
856 | { |
857 | struct tb_port *next; |
858 | |
859 | if (!prev) |
860 | return start; |
861 | |
862 | if (prev->sw == end->sw) { |
863 | if (prev == end) |
864 | return NULL; |
865 | return end; |
866 | } |
867 | |
868 | if (tb_switch_is_reachable(parent: prev->sw, sw: end->sw)) { |
869 | next = tb_port_at(route: tb_route(sw: end->sw), sw: prev->sw); |
870 | /* Walk down the topology if next == prev */ |
871 | if (prev->remote && |
872 | (next == prev || next->dual_link_port == prev)) |
873 | next = prev->remote; |
874 | } else { |
875 | if (tb_is_upstream_port(port: prev)) { |
876 | next = prev->remote; |
877 | } else { |
878 | next = tb_upstream_port(sw: prev->sw); |
879 | /* |
880 | * Keep the same link if prev and next are both |
881 | * dual link ports. |
882 | */ |
883 | if (next->dual_link_port && |
884 | next->link_nr != prev->link_nr) { |
885 | next = next->dual_link_port; |
886 | } |
887 | } |
888 | } |
889 | |
890 | return next != prev ? next : NULL; |
891 | } |
892 | |
893 | /** |
894 | * tb_port_get_link_speed() - Get current link speed |
895 | * @port: Port to check (USB4 or CIO) |
896 | * |
897 | * Returns link speed in Gb/s or negative errno in case of failure. |
898 | */ |
899 | int tb_port_get_link_speed(struct tb_port *port) |
900 | { |
901 | u32 val, speed; |
902 | int ret; |
903 | |
904 | if (!port->cap_phy) |
905 | return -EINVAL; |
906 | |
907 | ret = tb_port_read(port, buffer: &val, space: TB_CFG_PORT, |
908 | offset: port->cap_phy + LANE_ADP_CS_1, length: 1); |
909 | if (ret) |
910 | return ret; |
911 | |
912 | speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> |
913 | LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; |
914 | |
915 | switch (speed) { |
916 | case LANE_ADP_CS_1_CURRENT_SPEED_GEN4: |
917 | return 40; |
918 | case LANE_ADP_CS_1_CURRENT_SPEED_GEN3: |
919 | return 20; |
920 | default: |
921 | return 10; |
922 | } |
923 | } |
924 | |
925 | /** |
926 | * tb_port_get_link_generation() - Returns link generation |
927 | * @port: Lane adapter |
928 | * |
929 | * Returns link generation as number or negative errno in case of |
930 | * failure. Does not distinguish between Thunderbolt 1 and Thunderbolt 2 |
931 | * links so for those always returns 2. |
932 | */ |
933 | int tb_port_get_link_generation(struct tb_port *port) |
934 | { |
935 | int ret; |
936 | |
937 | ret = tb_port_get_link_speed(port); |
938 | if (ret < 0) |
939 | return ret; |
940 | |
941 | switch (ret) { |
942 | case 40: |
943 | return 4; |
944 | case 20: |
945 | return 3; |
946 | default: |
947 | return 2; |
948 | } |
949 | } |
950 | |
951 | /** |
952 | * tb_port_get_link_width() - Get current link width |
953 | * @port: Port to check (USB4 or CIO) |
954 | * |
955 | * Returns link width. Return the link width as encoded in &enum |
956 | * tb_link_width or negative errno in case of failure. |
957 | */ |
958 | int tb_port_get_link_width(struct tb_port *port) |
959 | { |
960 | u32 val; |
961 | int ret; |
962 | |
963 | if (!port->cap_phy) |
964 | return -EINVAL; |
965 | |
966 | ret = tb_port_read(port, buffer: &val, space: TB_CFG_PORT, |
967 | offset: port->cap_phy + LANE_ADP_CS_1, length: 1); |
968 | if (ret) |
969 | return ret; |
970 | |
971 | /* Matches the values in enum tb_link_width */ |
972 | return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> |
973 | LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; |
974 | } |
975 | |
976 | /** |
977 | * tb_port_width_supported() - Is the given link width supported |
978 | * @port: Port to check |
979 | * @width: Widths to check (bitmask) |
980 | * |
981 | * Can be called to any lane adapter. Checks if given @width is |
982 | * supported by the hardware and returns %true if it is. |
983 | */ |
984 | bool tb_port_width_supported(struct tb_port *port, unsigned int width) |
985 | { |
986 | u32 phy, widths; |
987 | int ret; |
988 | |
989 | if (!port->cap_phy) |
990 | return false; |
991 | |
992 | if (width & (TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX)) { |
993 | if (tb_port_get_link_generation(port) < 4 || |
994 | !usb4_port_asym_supported(port)) |
995 | return false; |
996 | } |
997 | |
998 | ret = tb_port_read(port, buffer: &phy, space: TB_CFG_PORT, |
999 | offset: port->cap_phy + LANE_ADP_CS_0, length: 1); |
1000 | if (ret) |
1001 | return false; |
1002 | |
1003 | /* |
1004 | * The field encoding is the same as &enum tb_link_width (which is |
1005 | * passed to @width). |
1006 | */ |
1007 | widths = FIELD_GET(LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK, phy); |
1008 | return widths & width; |
1009 | } |
1010 | |
1011 | /** |
1012 | * tb_port_set_link_width() - Set target link width of the lane adapter |
1013 | * @port: Lane adapter |
1014 | * @width: Target link width |
1015 | * |
1016 | * Sets the target link width of the lane adapter to @width. Does not |
1017 | * enable/disable lane bonding. For that call tb_port_set_lane_bonding(). |
1018 | * |
1019 | * Return: %0 in case of success and negative errno in case of error |
1020 | */ |
1021 | int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width) |
1022 | { |
1023 | u32 val; |
1024 | int ret; |
1025 | |
1026 | if (!port->cap_phy) |
1027 | return -EINVAL; |
1028 | |
1029 | ret = tb_port_read(port, buffer: &val, space: TB_CFG_PORT, |
1030 | offset: port->cap_phy + LANE_ADP_CS_1, length: 1); |
1031 | if (ret) |
1032 | return ret; |
1033 | |
1034 | val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; |
1035 | switch (width) { |
1036 | case TB_LINK_WIDTH_SINGLE: |
1037 | /* Gen 4 link cannot be single */ |
1038 | if (tb_port_get_link_generation(port) >= 4) |
1039 | return -EOPNOTSUPP; |
1040 | val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << |
1041 | LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; |
1042 | break; |
1043 | |
1044 | case TB_LINK_WIDTH_DUAL: |
1045 | if (tb_port_get_link_generation(port) >= 4) |
1046 | return usb4_port_asym_set_link_width(port, width); |
1047 | val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << |
1048 | LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; |
1049 | break; |
1050 | |
1051 | case TB_LINK_WIDTH_ASYM_TX: |
1052 | case TB_LINK_WIDTH_ASYM_RX: |
1053 | return usb4_port_asym_set_link_width(port, width); |
1054 | |
1055 | default: |
1056 | return -EINVAL; |
1057 | } |
1058 | |
1059 | return tb_port_write(port, buffer: &val, space: TB_CFG_PORT, |
1060 | offset: port->cap_phy + LANE_ADP_CS_1, length: 1); |
1061 | } |
1062 | |
1063 | /** |
1064 | * tb_port_set_lane_bonding() - Enable/disable lane bonding |
1065 | * @port: Lane adapter |
1066 | * @bonding: enable/disable bonding |
1067 | * |
1068 | * Enables or disables lane bonding. This should be called after target |
1069 | * link width has been set (tb_port_set_link_width()). Note in most |
1070 | * cases one should use tb_port_lane_bonding_enable() instead to enable |
1071 | * lane bonding. |
1072 | * |
1073 | * Return: %0 in case of success and negative errno in case of error |
1074 | */ |
1075 | static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding) |
1076 | { |
1077 | u32 val; |
1078 | int ret; |
1079 | |
1080 | if (!port->cap_phy) |
1081 | return -EINVAL; |
1082 | |
1083 | ret = tb_port_read(port, buffer: &val, space: TB_CFG_PORT, |
1084 | offset: port->cap_phy + LANE_ADP_CS_1, length: 1); |
1085 | if (ret) |
1086 | return ret; |
1087 | |
1088 | if (bonding) |
1089 | val |= LANE_ADP_CS_1_LB; |
1090 | else |
1091 | val &= ~LANE_ADP_CS_1_LB; |
1092 | |
1093 | return tb_port_write(port, buffer: &val, space: TB_CFG_PORT, |
1094 | offset: port->cap_phy + LANE_ADP_CS_1, length: 1); |
1095 | } |
1096 | |
1097 | /** |
1098 | * tb_port_lane_bonding_enable() - Enable bonding on port |
1099 | * @port: port to enable |
1100 | * |
1101 | * Enable bonding by setting the link width of the port and the other |
1102 | * port in case of dual link port. Does not wait for the link to |
1103 | * actually reach the bonded state so caller needs to call |
1104 | * tb_port_wait_for_link_width() before enabling any paths through the |
1105 | * link to make sure the link is in expected state. |
1106 | * |
1107 | * Return: %0 in case of success and negative errno in case of error |
1108 | */ |
1109 | int tb_port_lane_bonding_enable(struct tb_port *port) |
1110 | { |
1111 | enum tb_link_width width; |
1112 | int ret; |
1113 | |
1114 | /* |
1115 | * Enable lane bonding for both links if not already enabled by |
1116 | * for example the boot firmware. |
1117 | */ |
1118 | width = tb_port_get_link_width(port); |
1119 | if (width == TB_LINK_WIDTH_SINGLE) { |
1120 | ret = tb_port_set_link_width(port, width: TB_LINK_WIDTH_DUAL); |
1121 | if (ret) |
1122 | goto err_lane0; |
1123 | } |
1124 | |
1125 | width = tb_port_get_link_width(port: port->dual_link_port); |
1126 | if (width == TB_LINK_WIDTH_SINGLE) { |
1127 | ret = tb_port_set_link_width(port: port->dual_link_port, |
1128 | width: TB_LINK_WIDTH_DUAL); |
1129 | if (ret) |
1130 | goto err_lane1; |
1131 | } |
1132 | |
1133 | /* |
1134 | * Only set bonding if the link was not already bonded. This |
1135 | * avoids the lane adapter to re-enter bonding state. |
1136 | */ |
1137 | if (width == TB_LINK_WIDTH_SINGLE && !tb_is_upstream_port(port)) { |
1138 | ret = tb_port_set_lane_bonding(port, bonding: true); |
1139 | if (ret) |
1140 | goto err_lane1; |
1141 | } |
1142 | |
1143 | /* |
1144 | * When lane 0 bonding is set it will affect lane 1 too so |
1145 | * update both. |
1146 | */ |
1147 | port->bonded = true; |
1148 | port->dual_link_port->bonded = true; |
1149 | |
1150 | return 0; |
1151 | |
1152 | err_lane1: |
1153 | tb_port_set_link_width(port: port->dual_link_port, width: TB_LINK_WIDTH_SINGLE); |
1154 | err_lane0: |
1155 | tb_port_set_link_width(port, width: TB_LINK_WIDTH_SINGLE); |
1156 | |
1157 | return ret; |
1158 | } |
1159 | |
1160 | /** |
1161 | * tb_port_lane_bonding_disable() - Disable bonding on port |
1162 | * @port: port to disable |
1163 | * |
1164 | * Disable bonding by setting the link width of the port and the |
1165 | * other port in case of dual link port. |
1166 | */ |
1167 | void tb_port_lane_bonding_disable(struct tb_port *port) |
1168 | { |
1169 | tb_port_set_lane_bonding(port, bonding: false); |
1170 | tb_port_set_link_width(port: port->dual_link_port, width: TB_LINK_WIDTH_SINGLE); |
1171 | tb_port_set_link_width(port, width: TB_LINK_WIDTH_SINGLE); |
1172 | port->dual_link_port->bonded = false; |
1173 | port->bonded = false; |
1174 | } |
1175 | |
1176 | /** |
1177 | * tb_port_wait_for_link_width() - Wait until link reaches specific width |
1178 | * @port: Port to wait for |
1179 | * @width: Expected link width (bitmask) |
1180 | * @timeout_msec: Timeout in ms how long to wait |
1181 | * |
1182 | * Should be used after both ends of the link have been bonded (or |
1183 | * bonding has been disabled) to wait until the link actually reaches |
1184 | * the expected state. Returns %-ETIMEDOUT if the width was not reached |
1185 | * within the given timeout, %0 if it did. Can be passed a mask of |
1186 | * expected widths and succeeds if any of the widths is reached. |
1187 | */ |
1188 | int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width, |
1189 | int timeout_msec) |
1190 | { |
1191 | ktime_t timeout = ktime_add_ms(kt: ktime_get(), msec: timeout_msec); |
1192 | int ret; |
1193 | |
1194 | /* Gen 4 link does not support single lane */ |
1195 | if ((width & TB_LINK_WIDTH_SINGLE) && |
1196 | tb_port_get_link_generation(port) >= 4) |
1197 | return -EOPNOTSUPP; |
1198 | |
1199 | do { |
1200 | ret = tb_port_get_link_width(port); |
1201 | if (ret < 0) { |
1202 | /* |
1203 | * Sometimes we get port locked error when |
1204 | * polling the lanes so we can ignore it and |
1205 | * retry. |
1206 | */ |
1207 | if (ret != -EACCES) |
1208 | return ret; |
1209 | } else if (ret & width) { |
1210 | return 0; |
1211 | } |
1212 | |
1213 | usleep_range(min: 1000, max: 2000); |
1214 | } while (ktime_before(cmp1: ktime_get(), cmp2: timeout)); |
1215 | |
1216 | return -ETIMEDOUT; |
1217 | } |
1218 | |
1219 | static int tb_port_do_update_credits(struct tb_port *port) |
1220 | { |
1221 | u32 nfc_credits; |
1222 | int ret; |
1223 | |
1224 | ret = tb_port_read(port, buffer: &nfc_credits, space: TB_CFG_PORT, ADP_CS_4, length: 1); |
1225 | if (ret) |
1226 | return ret; |
1227 | |
1228 | if (nfc_credits != port->config.nfc_credits) { |
1229 | u32 total; |
1230 | |
1231 | total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> |
1232 | ADP_CS_4_TOTAL_BUFFERS_SHIFT; |
1233 | |
1234 | tb_port_dbg(port, "total credits changed %u -> %u\n" , |
1235 | port->total_credits, total); |
1236 | |
1237 | port->config.nfc_credits = nfc_credits; |
1238 | port->total_credits = total; |
1239 | } |
1240 | |
1241 | return 0; |
1242 | } |
1243 | |
1244 | /** |
1245 | * tb_port_update_credits() - Re-read port total credits |
1246 | * @port: Port to update |
1247 | * |
1248 | * After the link is bonded (or bonding was disabled) the port total |
1249 | * credits may change, so this function needs to be called to re-read |
1250 | * the credits. Updates also the second lane adapter. |
1251 | */ |
1252 | int tb_port_update_credits(struct tb_port *port) |
1253 | { |
1254 | int ret; |
1255 | |
1256 | ret = tb_port_do_update_credits(port); |
1257 | if (ret) |
1258 | return ret; |
1259 | |
1260 | if (!port->dual_link_port) |
1261 | return 0; |
1262 | return tb_port_do_update_credits(port: port->dual_link_port); |
1263 | } |
1264 | |
1265 | static int tb_port_start_lane_initialization(struct tb_port *port) |
1266 | { |
1267 | int ret; |
1268 | |
1269 | if (tb_switch_is_usb4(sw: port->sw)) |
1270 | return 0; |
1271 | |
1272 | ret = tb_lc_start_lane_initialization(port); |
1273 | return ret == -EINVAL ? 0 : ret; |
1274 | } |
1275 | |
1276 | /* |
1277 | * Returns true if the port had something (router, XDomain) connected |
1278 | * before suspend. |
1279 | */ |
1280 | static bool tb_port_resume(struct tb_port *port) |
1281 | { |
1282 | bool has_remote = tb_port_has_remote(port); |
1283 | |
1284 | if (port->usb4) { |
1285 | usb4_port_device_resume(usb4: port->usb4); |
1286 | } else if (!has_remote) { |
1287 | /* |
1288 | * For disconnected downstream lane adapters start lane |
1289 | * initialization now so we detect future connects. |
1290 | * |
1291 | * For XDomain start the lane initialzation now so the |
1292 | * link gets re-established. |
1293 | * |
1294 | * This is only needed for non-USB4 ports. |
1295 | */ |
1296 | if (!tb_is_upstream_port(port) || port->xdomain) |
1297 | tb_port_start_lane_initialization(port); |
1298 | } |
1299 | |
1300 | return has_remote || port->xdomain; |
1301 | } |
1302 | |
1303 | /** |
1304 | * tb_port_is_enabled() - Is the adapter port enabled |
1305 | * @port: Port to check |
1306 | */ |
1307 | bool tb_port_is_enabled(struct tb_port *port) |
1308 | { |
1309 | switch (port->config.type) { |
1310 | case TB_TYPE_PCIE_UP: |
1311 | case TB_TYPE_PCIE_DOWN: |
1312 | return tb_pci_port_is_enabled(port); |
1313 | |
1314 | case TB_TYPE_DP_HDMI_IN: |
1315 | case TB_TYPE_DP_HDMI_OUT: |
1316 | return tb_dp_port_is_enabled(port); |
1317 | |
1318 | case TB_TYPE_USB3_UP: |
1319 | case TB_TYPE_USB3_DOWN: |
1320 | return tb_usb3_port_is_enabled(port); |
1321 | |
1322 | default: |
1323 | return false; |
1324 | } |
1325 | } |
1326 | |
1327 | /** |
1328 | * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled |
1329 | * @port: USB3 adapter port to check |
1330 | */ |
1331 | bool tb_usb3_port_is_enabled(struct tb_port *port) |
1332 | { |
1333 | u32 data; |
1334 | |
1335 | if (tb_port_read(port, buffer: &data, space: TB_CFG_PORT, |
1336 | offset: port->cap_adap + ADP_USB3_CS_0, length: 1)) |
1337 | return false; |
1338 | |
1339 | return !!(data & ADP_USB3_CS_0_PE); |
1340 | } |
1341 | |
1342 | /** |
1343 | * tb_usb3_port_enable() - Enable USB3 adapter port |
1344 | * @port: USB3 adapter port to enable |
1345 | * @enable: Enable/disable the USB3 adapter |
1346 | */ |
1347 | int tb_usb3_port_enable(struct tb_port *port, bool enable) |
1348 | { |
1349 | u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V) |
1350 | : ADP_USB3_CS_0_V; |
1351 | |
1352 | if (!port->cap_adap) |
1353 | return -ENXIO; |
1354 | return tb_port_write(port, buffer: &word, space: TB_CFG_PORT, |
1355 | offset: port->cap_adap + ADP_USB3_CS_0, length: 1); |
1356 | } |
1357 | |
1358 | /** |
1359 | * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled |
1360 | * @port: PCIe port to check |
1361 | */ |
1362 | bool tb_pci_port_is_enabled(struct tb_port *port) |
1363 | { |
1364 | u32 data; |
1365 | |
1366 | if (tb_port_read(port, buffer: &data, space: TB_CFG_PORT, |
1367 | offset: port->cap_adap + ADP_PCIE_CS_0, length: 1)) |
1368 | return false; |
1369 | |
1370 | return !!(data & ADP_PCIE_CS_0_PE); |
1371 | } |
1372 | |
1373 | /** |
1374 | * tb_pci_port_enable() - Enable PCIe adapter port |
1375 | * @port: PCIe port to enable |
1376 | * @enable: Enable/disable the PCIe adapter |
1377 | */ |
1378 | int tb_pci_port_enable(struct tb_port *port, bool enable) |
1379 | { |
1380 | u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0; |
1381 | if (!port->cap_adap) |
1382 | return -ENXIO; |
1383 | return tb_port_write(port, buffer: &word, space: TB_CFG_PORT, |
1384 | offset: port->cap_adap + ADP_PCIE_CS_0, length: 1); |
1385 | } |
1386 | |
1387 | /** |
1388 | * tb_dp_port_hpd_is_active() - Is HPD already active |
1389 | * @port: DP out port to check |
1390 | * |
1391 | * Checks if the DP OUT adapter port has HPD bit already set. |
1392 | */ |
1393 | int tb_dp_port_hpd_is_active(struct tb_port *port) |
1394 | { |
1395 | u32 data; |
1396 | int ret; |
1397 | |
1398 | ret = tb_port_read(port, buffer: &data, space: TB_CFG_PORT, |
1399 | offset: port->cap_adap + ADP_DP_CS_2, length: 1); |
1400 | if (ret) |
1401 | return ret; |
1402 | |
1403 | return !!(data & ADP_DP_CS_2_HPD); |
1404 | } |
1405 | |
1406 | /** |
1407 | * tb_dp_port_hpd_clear() - Clear HPD from DP IN port |
1408 | * @port: Port to clear HPD |
1409 | * |
1410 | * If the DP IN port has HPD set, this function can be used to clear it. |
1411 | */ |
1412 | int tb_dp_port_hpd_clear(struct tb_port *port) |
1413 | { |
1414 | u32 data; |
1415 | int ret; |
1416 | |
1417 | ret = tb_port_read(port, buffer: &data, space: TB_CFG_PORT, |
1418 | offset: port->cap_adap + ADP_DP_CS_3, length: 1); |
1419 | if (ret) |
1420 | return ret; |
1421 | |
1422 | data |= ADP_DP_CS_3_HPDC; |
1423 | return tb_port_write(port, buffer: &data, space: TB_CFG_PORT, |
1424 | offset: port->cap_adap + ADP_DP_CS_3, length: 1); |
1425 | } |
1426 | |
1427 | /** |
1428 | * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port |
1429 | * @port: DP IN/OUT port to set hops |
1430 | * @video: Video Hop ID |
1431 | * @aux_tx: AUX TX Hop ID |
1432 | * @aux_rx: AUX RX Hop ID |
1433 | * |
1434 | * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4 |
1435 | * router DP adapters too but does not program the values as the fields |
1436 | * are read-only. |
1437 | */ |
1438 | int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, |
1439 | unsigned int aux_tx, unsigned int aux_rx) |
1440 | { |
1441 | u32 data[2]; |
1442 | int ret; |
1443 | |
1444 | if (tb_switch_is_usb4(sw: port->sw)) |
1445 | return 0; |
1446 | |
1447 | ret = tb_port_read(port, buffer: data, space: TB_CFG_PORT, |
1448 | offset: port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); |
1449 | if (ret) |
1450 | return ret; |
1451 | |
1452 | data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; |
1453 | data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; |
1454 | data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; |
1455 | |
1456 | data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & |
1457 | ADP_DP_CS_0_VIDEO_HOPID_MASK; |
1458 | data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK; |
1459 | data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) & |
1460 | ADP_DP_CS_1_AUX_RX_HOPID_MASK; |
1461 | |
1462 | return tb_port_write(port, buffer: data, space: TB_CFG_PORT, |
1463 | offset: port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); |
1464 | } |
1465 | |
1466 | /** |
1467 | * tb_dp_port_is_enabled() - Is DP adapter port enabled |
1468 | * @port: DP adapter port to check |
1469 | */ |
1470 | bool tb_dp_port_is_enabled(struct tb_port *port) |
1471 | { |
1472 | u32 data[2]; |
1473 | |
1474 | if (tb_port_read(port, buffer: data, space: TB_CFG_PORT, offset: port->cap_adap + ADP_DP_CS_0, |
1475 | ARRAY_SIZE(data))) |
1476 | return false; |
1477 | |
1478 | return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE)); |
1479 | } |
1480 | |
1481 | /** |
1482 | * tb_dp_port_enable() - Enables/disables DP paths of a port |
1483 | * @port: DP IN/OUT port |
1484 | * @enable: Enable/disable DP path |
1485 | * |
1486 | * Once Hop IDs are programmed DP paths can be enabled or disabled by |
1487 | * calling this function. |
1488 | */ |
1489 | int tb_dp_port_enable(struct tb_port *port, bool enable) |
1490 | { |
1491 | u32 data[2]; |
1492 | int ret; |
1493 | |
1494 | ret = tb_port_read(port, buffer: data, space: TB_CFG_PORT, |
1495 | offset: port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); |
1496 | if (ret) |
1497 | return ret; |
1498 | |
1499 | if (enable) |
1500 | data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE; |
1501 | else |
1502 | data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE); |
1503 | |
1504 | return tb_port_write(port, buffer: data, space: TB_CFG_PORT, |
1505 | offset: port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); |
1506 | } |
1507 | |
1508 | /* switch utility functions */ |
1509 | |
1510 | static const char *tb_switch_generation_name(const struct tb_switch *sw) |
1511 | { |
1512 | switch (sw->generation) { |
1513 | case 1: |
1514 | return "Thunderbolt 1" ; |
1515 | case 2: |
1516 | return "Thunderbolt 2" ; |
1517 | case 3: |
1518 | return "Thunderbolt 3" ; |
1519 | case 4: |
1520 | return "USB4" ; |
1521 | default: |
1522 | return "Unknown" ; |
1523 | } |
1524 | } |
1525 | |
1526 | static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) |
1527 | { |
1528 | const struct tb_regs_switch_header *regs = &sw->config; |
1529 | |
1530 | tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n" , |
1531 | tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, |
1532 | regs->revision, regs->thunderbolt_version); |
1533 | tb_dbg(tb, " Max Port Number: %d\n" , regs->max_port_number); |
1534 | tb_dbg(tb, " Config:\n" ); |
1535 | tb_dbg(tb, |
1536 | " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n" , |
1537 | regs->upstream_port_number, regs->depth, |
1538 | (((u64) regs->route_hi) << 32) | regs->route_lo, |
1539 | regs->enabled, regs->plug_events_delay); |
1540 | tb_dbg(tb, " unknown1: %#x unknown4: %#x\n" , |
1541 | regs->__unknown1, regs->__unknown4); |
1542 | } |
1543 | |
1544 | static int tb_switch_reset_host(struct tb_switch *sw) |
1545 | { |
1546 | if (sw->generation > 1) { |
1547 | struct tb_port *port; |
1548 | |
1549 | tb_switch_for_each_port(sw, port) { |
1550 | int i, ret; |
1551 | |
1552 | /* |
1553 | * For lane adapters we issue downstream port |
1554 | * reset and clear up path config spaces. |
1555 | * |
1556 | * For protocol adapters we disable the path and |
1557 | * clear path config space one by one (from 8 to |
1558 | * Max Input HopID of the adapter). |
1559 | */ |
1560 | if (tb_port_is_null(port) && !tb_is_upstream_port(port)) { |
1561 | ret = tb_port_reset(port); |
1562 | if (ret) |
1563 | return ret; |
1564 | } else if (tb_port_is_usb3_down(port) || |
1565 | tb_port_is_usb3_up(port)) { |
1566 | tb_usb3_port_enable(port, enable: false); |
1567 | } else if (tb_port_is_dpin(port) || |
1568 | tb_port_is_dpout(port)) { |
1569 | tb_dp_port_enable(port, enable: false); |
1570 | } else if (tb_port_is_pcie_down(port) || |
1571 | tb_port_is_pcie_up(port)) { |
1572 | tb_pci_port_enable(port, enable: false); |
1573 | } else { |
1574 | continue; |
1575 | } |
1576 | |
1577 | /* Cleanup path config space of protocol adapter */ |
1578 | for (i = TB_PATH_MIN_HOPID; |
1579 | i <= port->config.max_in_hop_id; i++) { |
1580 | ret = tb_path_deactivate_hop(port, hop_index: i); |
1581 | if (ret) |
1582 | return ret; |
1583 | } |
1584 | } |
1585 | } else { |
1586 | struct tb_cfg_result res; |
1587 | |
1588 | /* Thunderbolt 1 uses the "reset" config space packet */ |
1589 | res.err = tb_sw_write(sw, buffer: ((u32 *) &sw->config) + 2, |
1590 | space: TB_CFG_SWITCH, offset: 2, length: 2); |
1591 | if (res.err) |
1592 | return res.err; |
1593 | res = tb_cfg_reset(ctl: sw->tb->ctl, route: tb_route(sw)); |
1594 | if (res.err > 0) |
1595 | return -EIO; |
1596 | else if (res.err < 0) |
1597 | return res.err; |
1598 | } |
1599 | |
1600 | return 0; |
1601 | } |
1602 | |
1603 | static int tb_switch_reset_device(struct tb_switch *sw) |
1604 | { |
1605 | return tb_port_reset(port: tb_switch_downstream_port(sw)); |
1606 | } |
1607 | |
1608 | static bool tb_switch_enumerated(struct tb_switch *sw) |
1609 | { |
1610 | u32 val; |
1611 | int ret; |
1612 | |
1613 | /* |
1614 | * Read directly from the hardware because we use this also |
1615 | * during system sleep where sw->config.enabled is already set |
1616 | * by us. |
1617 | */ |
1618 | ret = tb_sw_read(sw, buffer: &val, space: TB_CFG_SWITCH, ROUTER_CS_3, length: 1); |
1619 | if (ret) |
1620 | return false; |
1621 | |
1622 | return !!(val & ROUTER_CS_3_V); |
1623 | } |
1624 | |
1625 | /** |
1626 | * tb_switch_reset() - Perform reset to the router |
1627 | * @sw: Router to reset |
1628 | * |
1629 | * Issues reset to the router @sw. Can be used for any router. For host |
1630 | * routers, resets all the downstream ports and cleans up path config |
1631 | * spaces accordingly. For device routers issues downstream port reset |
1632 | * through the parent router, so as side effect there will be unplug |
1633 | * soon after this is finished. |
1634 | * |
1635 | * If the router is not enumerated does nothing. |
1636 | * |
1637 | * Returns %0 on success or negative errno in case of failure. |
1638 | */ |
1639 | int tb_switch_reset(struct tb_switch *sw) |
1640 | { |
1641 | int ret; |
1642 | |
1643 | /* |
1644 | * We cannot access the port config spaces unless the router is |
1645 | * already enumerated. If the router is not enumerated it is |
1646 | * equal to being reset so we can skip that here. |
1647 | */ |
1648 | if (!tb_switch_enumerated(sw)) |
1649 | return 0; |
1650 | |
1651 | tb_sw_dbg(sw, "resetting\n" ); |
1652 | |
1653 | if (tb_route(sw)) |
1654 | ret = tb_switch_reset_device(sw); |
1655 | else |
1656 | ret = tb_switch_reset_host(sw); |
1657 | |
1658 | if (ret) |
1659 | tb_sw_warn(sw, "failed to reset\n" ); |
1660 | |
1661 | return ret; |
1662 | } |
1663 | |
1664 | /** |
1665 | * tb_switch_wait_for_bit() - Wait for specified value of bits in offset |
1666 | * @sw: Router to read the offset value from |
1667 | * @offset: Offset in the router config space to read from |
1668 | * @bit: Bit mask in the offset to wait for |
1669 | * @value: Value of the bits to wait for |
1670 | * @timeout_msec: Timeout in ms how long to wait |
1671 | * |
1672 | * Wait till the specified bits in specified offset reach specified value. |
1673 | * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached |
1674 | * within the given timeout or a negative errno in case of failure. |
1675 | */ |
1676 | int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, |
1677 | u32 value, int timeout_msec) |
1678 | { |
1679 | ktime_t timeout = ktime_add_ms(kt: ktime_get(), msec: timeout_msec); |
1680 | |
1681 | do { |
1682 | u32 val; |
1683 | int ret; |
1684 | |
1685 | ret = tb_sw_read(sw, buffer: &val, space: TB_CFG_SWITCH, offset, length: 1); |
1686 | if (ret) |
1687 | return ret; |
1688 | |
1689 | if ((val & bit) == value) |
1690 | return 0; |
1691 | |
1692 | usleep_range(min: 50, max: 100); |
1693 | } while (ktime_before(cmp1: ktime_get(), cmp2: timeout)); |
1694 | |
1695 | return -ETIMEDOUT; |
1696 | } |
1697 | |
1698 | /* |
1699 | * tb_plug_events_active() - enable/disable plug events on a switch |
1700 | * |
1701 | * Also configures a sane plug_events_delay of 255ms. |
1702 | * |
1703 | * Return: Returns 0 on success or an error code on failure. |
1704 | */ |
1705 | static int tb_plug_events_active(struct tb_switch *sw, bool active) |
1706 | { |
1707 | u32 data; |
1708 | int res; |
1709 | |
1710 | if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw)) |
1711 | return 0; |
1712 | |
1713 | sw->config.plug_events_delay = 0xff; |
1714 | res = tb_sw_write(sw, buffer: ((u32 *) &sw->config) + 4, space: TB_CFG_SWITCH, offset: 4, length: 1); |
1715 | if (res) |
1716 | return res; |
1717 | |
1718 | res = tb_sw_read(sw, buffer: &data, space: TB_CFG_SWITCH, offset: sw->cap_plug_events + 1, length: 1); |
1719 | if (res) |
1720 | return res; |
1721 | |
1722 | if (active) { |
1723 | data = data & 0xFFFFFF83; |
1724 | switch (sw->config.device_id) { |
1725 | case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: |
1726 | case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: |
1727 | case PCI_DEVICE_ID_INTEL_PORT_RIDGE: |
1728 | break; |
1729 | default: |
1730 | /* |
1731 | * Skip Alpine Ridge, it needs to have vendor |
1732 | * specific USB hotplug event enabled for the |
1733 | * internal xHCI to work. |
1734 | */ |
1735 | if (!tb_switch_is_alpine_ridge(sw)) |
1736 | data |= TB_PLUG_EVENTS_USB_DISABLE; |
1737 | } |
1738 | } else { |
1739 | data = data | 0x7c; |
1740 | } |
1741 | return tb_sw_write(sw, buffer: &data, space: TB_CFG_SWITCH, |
1742 | offset: sw->cap_plug_events + 1, length: 1); |
1743 | } |
1744 | |
1745 | static ssize_t authorized_show(struct device *dev, |
1746 | struct device_attribute *attr, |
1747 | char *buf) |
1748 | { |
1749 | struct tb_switch *sw = tb_to_switch(dev); |
1750 | |
1751 | return sysfs_emit(buf, fmt: "%u\n" , sw->authorized); |
1752 | } |
1753 | |
1754 | static int disapprove_switch(struct device *dev, void *not_used) |
1755 | { |
1756 | char *envp[] = { "AUTHORIZED=0" , NULL }; |
1757 | struct tb_switch *sw; |
1758 | |
1759 | sw = tb_to_switch(dev); |
1760 | if (sw && sw->authorized) { |
1761 | int ret; |
1762 | |
1763 | /* First children */ |
1764 | ret = device_for_each_child_reverse(dev: &sw->dev, NULL, fn: disapprove_switch); |
1765 | if (ret) |
1766 | return ret; |
1767 | |
1768 | ret = tb_domain_disapprove_switch(tb: sw->tb, sw); |
1769 | if (ret) |
1770 | return ret; |
1771 | |
1772 | sw->authorized = 0; |
1773 | kobject_uevent_env(kobj: &sw->dev.kobj, action: KOBJ_CHANGE, envp); |
1774 | } |
1775 | |
1776 | return 0; |
1777 | } |
1778 | |
1779 | static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) |
1780 | { |
1781 | char envp_string[13]; |
1782 | int ret = -EINVAL; |
1783 | char *envp[] = { envp_string, NULL }; |
1784 | |
1785 | if (!mutex_trylock(lock: &sw->tb->lock)) |
1786 | return restart_syscall(); |
1787 | |
1788 | if (!!sw->authorized == !!val) |
1789 | goto unlock; |
1790 | |
1791 | switch (val) { |
1792 | /* Disapprove switch */ |
1793 | case 0: |
1794 | if (tb_route(sw)) { |
1795 | ret = disapprove_switch(dev: &sw->dev, NULL); |
1796 | goto unlock; |
1797 | } |
1798 | break; |
1799 | |
1800 | /* Approve switch */ |
1801 | case 1: |
1802 | if (sw->key) |
1803 | ret = tb_domain_approve_switch_key(tb: sw->tb, sw); |
1804 | else |
1805 | ret = tb_domain_approve_switch(tb: sw->tb, sw); |
1806 | break; |
1807 | |
1808 | /* Challenge switch */ |
1809 | case 2: |
1810 | if (sw->key) |
1811 | ret = tb_domain_challenge_switch_key(tb: sw->tb, sw); |
1812 | break; |
1813 | |
1814 | default: |
1815 | break; |
1816 | } |
1817 | |
1818 | if (!ret) { |
1819 | sw->authorized = val; |
1820 | /* |
1821 | * Notify status change to the userspace, informing the new |
1822 | * value of /sys/bus/thunderbolt/devices/.../authorized. |
1823 | */ |
1824 | sprintf(buf: envp_string, fmt: "AUTHORIZED=%u" , sw->authorized); |
1825 | kobject_uevent_env(kobj: &sw->dev.kobj, action: KOBJ_CHANGE, envp); |
1826 | } |
1827 | |
1828 | unlock: |
1829 | mutex_unlock(lock: &sw->tb->lock); |
1830 | return ret; |
1831 | } |
1832 | |
1833 | static ssize_t authorized_store(struct device *dev, |
1834 | struct device_attribute *attr, |
1835 | const char *buf, size_t count) |
1836 | { |
1837 | struct tb_switch *sw = tb_to_switch(dev); |
1838 | unsigned int val; |
1839 | ssize_t ret; |
1840 | |
1841 | ret = kstrtouint(s: buf, base: 0, res: &val); |
1842 | if (ret) |
1843 | return ret; |
1844 | if (val > 2) |
1845 | return -EINVAL; |
1846 | |
1847 | pm_runtime_get_sync(dev: &sw->dev); |
1848 | ret = tb_switch_set_authorized(sw, val); |
1849 | pm_runtime_mark_last_busy(dev: &sw->dev); |
1850 | pm_runtime_put_autosuspend(dev: &sw->dev); |
1851 | |
1852 | return ret ? ret : count; |
1853 | } |
1854 | static DEVICE_ATTR_RW(authorized); |
1855 | |
1856 | static ssize_t boot_show(struct device *dev, struct device_attribute *attr, |
1857 | char *buf) |
1858 | { |
1859 | struct tb_switch *sw = tb_to_switch(dev); |
1860 | |
1861 | return sysfs_emit(buf, fmt: "%u\n" , sw->boot); |
1862 | } |
1863 | static DEVICE_ATTR_RO(boot); |
1864 | |
1865 | static ssize_t device_show(struct device *dev, struct device_attribute *attr, |
1866 | char *buf) |
1867 | { |
1868 | struct tb_switch *sw = tb_to_switch(dev); |
1869 | |
1870 | return sysfs_emit(buf, fmt: "%#x\n" , sw->device); |
1871 | } |
1872 | static DEVICE_ATTR_RO(device); |
1873 | |
1874 | static ssize_t |
1875 | device_name_show(struct device *dev, struct device_attribute *attr, char *buf) |
1876 | { |
1877 | struct tb_switch *sw = tb_to_switch(dev); |
1878 | |
1879 | return sysfs_emit(buf, fmt: "%s\n" , sw->device_name ?: "" ); |
1880 | } |
1881 | static DEVICE_ATTR_RO(device_name); |
1882 | |
1883 | static ssize_t |
1884 | generation_show(struct device *dev, struct device_attribute *attr, char *buf) |
1885 | { |
1886 | struct tb_switch *sw = tb_to_switch(dev); |
1887 | |
1888 | return sysfs_emit(buf, fmt: "%u\n" , sw->generation); |
1889 | } |
1890 | static DEVICE_ATTR_RO(generation); |
1891 | |
1892 | static ssize_t key_show(struct device *dev, struct device_attribute *attr, |
1893 | char *buf) |
1894 | { |
1895 | struct tb_switch *sw = tb_to_switch(dev); |
1896 | ssize_t ret; |
1897 | |
1898 | if (!mutex_trylock(lock: &sw->tb->lock)) |
1899 | return restart_syscall(); |
1900 | |
1901 | if (sw->key) |
1902 | ret = sysfs_emit(buf, fmt: "%*phN\n" , TB_SWITCH_KEY_SIZE, sw->key); |
1903 | else |
1904 | ret = sysfs_emit(buf, fmt: "\n" ); |
1905 | |
1906 | mutex_unlock(lock: &sw->tb->lock); |
1907 | return ret; |
1908 | } |
1909 | |
1910 | static ssize_t key_store(struct device *dev, struct device_attribute *attr, |
1911 | const char *buf, size_t count) |
1912 | { |
1913 | struct tb_switch *sw = tb_to_switch(dev); |
1914 | u8 key[TB_SWITCH_KEY_SIZE]; |
1915 | ssize_t ret = count; |
1916 | bool clear = false; |
1917 | |
1918 | if (!strcmp(buf, "\n" )) |
1919 | clear = true; |
1920 | else if (hex2bin(dst: key, src: buf, count: sizeof(key))) |
1921 | return -EINVAL; |
1922 | |
1923 | if (!mutex_trylock(lock: &sw->tb->lock)) |
1924 | return restart_syscall(); |
1925 | |
1926 | if (sw->authorized) { |
1927 | ret = -EBUSY; |
1928 | } else { |
1929 | kfree(objp: sw->key); |
1930 | if (clear) { |
1931 | sw->key = NULL; |
1932 | } else { |
1933 | sw->key = kmemdup(p: key, size: sizeof(key), GFP_KERNEL); |
1934 | if (!sw->key) |
1935 | ret = -ENOMEM; |
1936 | } |
1937 | } |
1938 | |
1939 | mutex_unlock(lock: &sw->tb->lock); |
1940 | return ret; |
1941 | } |
1942 | static DEVICE_ATTR(key, 0600, key_show, key_store); |
1943 | |
1944 | static ssize_t speed_show(struct device *dev, struct device_attribute *attr, |
1945 | char *buf) |
1946 | { |
1947 | struct tb_switch *sw = tb_to_switch(dev); |
1948 | |
1949 | return sysfs_emit(buf, fmt: "%u.0 Gb/s\n" , sw->link_speed); |
1950 | } |
1951 | |
1952 | /* |
1953 | * Currently all lanes must run at the same speed but we expose here |
1954 | * both directions to allow possible asymmetric links in the future. |
1955 | */ |
1956 | static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); |
1957 | static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); |
1958 | |
1959 | static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr, |
1960 | char *buf) |
1961 | { |
1962 | struct tb_switch *sw = tb_to_switch(dev); |
1963 | unsigned int width; |
1964 | |
1965 | switch (sw->link_width) { |
1966 | case TB_LINK_WIDTH_SINGLE: |
1967 | case TB_LINK_WIDTH_ASYM_TX: |
1968 | width = 1; |
1969 | break; |
1970 | case TB_LINK_WIDTH_DUAL: |
1971 | width = 2; |
1972 | break; |
1973 | case TB_LINK_WIDTH_ASYM_RX: |
1974 | width = 3; |
1975 | break; |
1976 | default: |
1977 | WARN_ON_ONCE(1); |
1978 | return -EINVAL; |
1979 | } |
1980 | |
1981 | return sysfs_emit(buf, fmt: "%u\n" , width); |
1982 | } |
1983 | static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL); |
1984 | |
1985 | static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr, |
1986 | char *buf) |
1987 | { |
1988 | struct tb_switch *sw = tb_to_switch(dev); |
1989 | unsigned int width; |
1990 | |
1991 | switch (sw->link_width) { |
1992 | case TB_LINK_WIDTH_SINGLE: |
1993 | case TB_LINK_WIDTH_ASYM_RX: |
1994 | width = 1; |
1995 | break; |
1996 | case TB_LINK_WIDTH_DUAL: |
1997 | width = 2; |
1998 | break; |
1999 | case TB_LINK_WIDTH_ASYM_TX: |
2000 | width = 3; |
2001 | break; |
2002 | default: |
2003 | WARN_ON_ONCE(1); |
2004 | return -EINVAL; |
2005 | } |
2006 | |
2007 | return sysfs_emit(buf, fmt: "%u\n" , width); |
2008 | } |
2009 | static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL); |
2010 | |
2011 | static ssize_t nvm_authenticate_show(struct device *dev, |
2012 | struct device_attribute *attr, char *buf) |
2013 | { |
2014 | struct tb_switch *sw = tb_to_switch(dev); |
2015 | u32 status; |
2016 | |
2017 | nvm_get_auth_status(sw, status: &status); |
2018 | return sysfs_emit(buf, fmt: "%#x\n" , status); |
2019 | } |
2020 | |
2021 | static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf, |
2022 | bool disconnect) |
2023 | { |
2024 | struct tb_switch *sw = tb_to_switch(dev); |
2025 | int val, ret; |
2026 | |
2027 | pm_runtime_get_sync(dev: &sw->dev); |
2028 | |
2029 | if (!mutex_trylock(lock: &sw->tb->lock)) { |
2030 | ret = restart_syscall(); |
2031 | goto exit_rpm; |
2032 | } |
2033 | |
2034 | if (sw->no_nvm_upgrade) { |
2035 | ret = -EOPNOTSUPP; |
2036 | goto exit_unlock; |
2037 | } |
2038 | |
2039 | /* If NVMem devices are not yet added */ |
2040 | if (!sw->nvm) { |
2041 | ret = -EAGAIN; |
2042 | goto exit_unlock; |
2043 | } |
2044 | |
2045 | ret = kstrtoint(s: buf, base: 10, res: &val); |
2046 | if (ret) |
2047 | goto exit_unlock; |
2048 | |
2049 | /* Always clear the authentication status */ |
2050 | nvm_clear_auth_status(sw); |
2051 | |
2052 | if (val > 0) { |
2053 | if (val == AUTHENTICATE_ONLY) { |
2054 | if (disconnect) |
2055 | ret = -EINVAL; |
2056 | else |
2057 | ret = nvm_authenticate(sw, auth_only: true); |
2058 | } else { |
2059 | if (!sw->nvm->flushed) { |
2060 | if (!sw->nvm->buf) { |
2061 | ret = -EINVAL; |
2062 | goto exit_unlock; |
2063 | } |
2064 | |
2065 | ret = nvm_validate_and_write(sw); |
2066 | if (ret || val == WRITE_ONLY) |
2067 | goto exit_unlock; |
2068 | } |
2069 | if (val == WRITE_AND_AUTHENTICATE) { |
2070 | if (disconnect) |
2071 | ret = tb_lc_force_power(sw); |
2072 | else |
2073 | ret = nvm_authenticate(sw, auth_only: false); |
2074 | } |
2075 | } |
2076 | } |
2077 | |
2078 | exit_unlock: |
2079 | mutex_unlock(lock: &sw->tb->lock); |
2080 | exit_rpm: |
2081 | pm_runtime_mark_last_busy(dev: &sw->dev); |
2082 | pm_runtime_put_autosuspend(dev: &sw->dev); |
2083 | |
2084 | return ret; |
2085 | } |
2086 | |
2087 | static ssize_t nvm_authenticate_store(struct device *dev, |
2088 | struct device_attribute *attr, const char *buf, size_t count) |
2089 | { |
2090 | int ret = nvm_authenticate_sysfs(dev, buf, disconnect: false); |
2091 | if (ret) |
2092 | return ret; |
2093 | return count; |
2094 | } |
2095 | static DEVICE_ATTR_RW(nvm_authenticate); |
2096 | |
2097 | static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev, |
2098 | struct device_attribute *attr, char *buf) |
2099 | { |
2100 | return nvm_authenticate_show(dev, attr, buf); |
2101 | } |
2102 | |
2103 | static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev, |
2104 | struct device_attribute *attr, const char *buf, size_t count) |
2105 | { |
2106 | int ret; |
2107 | |
2108 | ret = nvm_authenticate_sysfs(dev, buf, disconnect: true); |
2109 | return ret ? ret : count; |
2110 | } |
2111 | static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect); |
2112 | |
2113 | static ssize_t nvm_version_show(struct device *dev, |
2114 | struct device_attribute *attr, char *buf) |
2115 | { |
2116 | struct tb_switch *sw = tb_to_switch(dev); |
2117 | int ret; |
2118 | |
2119 | if (!mutex_trylock(lock: &sw->tb->lock)) |
2120 | return restart_syscall(); |
2121 | |
2122 | if (sw->safe_mode) |
2123 | ret = -ENODATA; |
2124 | else if (!sw->nvm) |
2125 | ret = -EAGAIN; |
2126 | else |
2127 | ret = sysfs_emit(buf, fmt: "%x.%x\n" , sw->nvm->major, sw->nvm->minor); |
2128 | |
2129 | mutex_unlock(lock: &sw->tb->lock); |
2130 | |
2131 | return ret; |
2132 | } |
2133 | static DEVICE_ATTR_RO(nvm_version); |
2134 | |
2135 | static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, |
2136 | char *buf) |
2137 | { |
2138 | struct tb_switch *sw = tb_to_switch(dev); |
2139 | |
2140 | return sysfs_emit(buf, fmt: "%#x\n" , sw->vendor); |
2141 | } |
2142 | static DEVICE_ATTR_RO(vendor); |
2143 | |
2144 | static ssize_t |
2145 | vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) |
2146 | { |
2147 | struct tb_switch *sw = tb_to_switch(dev); |
2148 | |
2149 | return sysfs_emit(buf, fmt: "%s\n" , sw->vendor_name ?: "" ); |
2150 | } |
2151 | static DEVICE_ATTR_RO(vendor_name); |
2152 | |
2153 | static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, |
2154 | char *buf) |
2155 | { |
2156 | struct tb_switch *sw = tb_to_switch(dev); |
2157 | |
2158 | return sysfs_emit(buf, fmt: "%pUb\n" , sw->uuid); |
2159 | } |
2160 | static DEVICE_ATTR_RO(unique_id); |
2161 | |
2162 | static struct attribute *switch_attrs[] = { |
2163 | &dev_attr_authorized.attr, |
2164 | &dev_attr_boot.attr, |
2165 | &dev_attr_device.attr, |
2166 | &dev_attr_device_name.attr, |
2167 | &dev_attr_generation.attr, |
2168 | &dev_attr_key.attr, |
2169 | &dev_attr_nvm_authenticate.attr, |
2170 | &dev_attr_nvm_authenticate_on_disconnect.attr, |
2171 | &dev_attr_nvm_version.attr, |
2172 | &dev_attr_rx_speed.attr, |
2173 | &dev_attr_rx_lanes.attr, |
2174 | &dev_attr_tx_speed.attr, |
2175 | &dev_attr_tx_lanes.attr, |
2176 | &dev_attr_vendor.attr, |
2177 | &dev_attr_vendor_name.attr, |
2178 | &dev_attr_unique_id.attr, |
2179 | NULL, |
2180 | }; |
2181 | |
2182 | static umode_t switch_attr_is_visible(struct kobject *kobj, |
2183 | struct attribute *attr, int n) |
2184 | { |
2185 | struct device *dev = kobj_to_dev(kobj); |
2186 | struct tb_switch *sw = tb_to_switch(dev); |
2187 | |
2188 | if (attr == &dev_attr_authorized.attr) { |
2189 | if (sw->tb->security_level == TB_SECURITY_NOPCIE || |
2190 | sw->tb->security_level == TB_SECURITY_DPONLY) |
2191 | return 0; |
2192 | } else if (attr == &dev_attr_device.attr) { |
2193 | if (!sw->device) |
2194 | return 0; |
2195 | } else if (attr == &dev_attr_device_name.attr) { |
2196 | if (!sw->device_name) |
2197 | return 0; |
2198 | } else if (attr == &dev_attr_vendor.attr) { |
2199 | if (!sw->vendor) |
2200 | return 0; |
2201 | } else if (attr == &dev_attr_vendor_name.attr) { |
2202 | if (!sw->vendor_name) |
2203 | return 0; |
2204 | } else if (attr == &dev_attr_key.attr) { |
2205 | if (tb_route(sw) && |
2206 | sw->tb->security_level == TB_SECURITY_SECURE && |
2207 | sw->security_level == TB_SECURITY_SECURE) |
2208 | return attr->mode; |
2209 | return 0; |
2210 | } else if (attr == &dev_attr_rx_speed.attr || |
2211 | attr == &dev_attr_rx_lanes.attr || |
2212 | attr == &dev_attr_tx_speed.attr || |
2213 | attr == &dev_attr_tx_lanes.attr) { |
2214 | if (tb_route(sw)) |
2215 | return attr->mode; |
2216 | return 0; |
2217 | } else if (attr == &dev_attr_nvm_authenticate.attr) { |
2218 | if (nvm_upgradeable(sw)) |
2219 | return attr->mode; |
2220 | return 0; |
2221 | } else if (attr == &dev_attr_nvm_version.attr) { |
2222 | if (nvm_readable(sw)) |
2223 | return attr->mode; |
2224 | return 0; |
2225 | } else if (attr == &dev_attr_boot.attr) { |
2226 | if (tb_route(sw)) |
2227 | return attr->mode; |
2228 | return 0; |
2229 | } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) { |
2230 | if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER) |
2231 | return attr->mode; |
2232 | return 0; |
2233 | } |
2234 | |
2235 | return sw->safe_mode ? 0 : attr->mode; |
2236 | } |
2237 | |
2238 | static const struct attribute_group switch_group = { |
2239 | .is_visible = switch_attr_is_visible, |
2240 | .attrs = switch_attrs, |
2241 | }; |
2242 | |
2243 | static const struct attribute_group *switch_groups[] = { |
2244 | &switch_group, |
2245 | NULL, |
2246 | }; |
2247 | |
2248 | static void tb_switch_release(struct device *dev) |
2249 | { |
2250 | struct tb_switch *sw = tb_to_switch(dev); |
2251 | struct tb_port *port; |
2252 | |
2253 | dma_port_free(dma: sw->dma_port); |
2254 | |
2255 | tb_switch_for_each_port(sw, port) { |
2256 | ida_destroy(ida: &port->in_hopids); |
2257 | ida_destroy(ida: &port->out_hopids); |
2258 | } |
2259 | |
2260 | kfree(objp: sw->uuid); |
2261 | kfree(objp: sw->device_name); |
2262 | kfree(objp: sw->vendor_name); |
2263 | kfree(objp: sw->ports); |
2264 | kfree(objp: sw->drom); |
2265 | kfree(objp: sw->key); |
2266 | kfree(objp: sw); |
2267 | } |
2268 | |
2269 | static int tb_switch_uevent(const struct device *dev, struct kobj_uevent_env *env) |
2270 | { |
2271 | const struct tb_switch *sw = tb_to_switch(dev); |
2272 | const char *type; |
2273 | |
2274 | if (tb_switch_is_usb4(sw)) { |
2275 | if (add_uevent_var(env, format: "USB4_VERSION=%u.0" , |
2276 | usb4_switch_version(sw))) |
2277 | return -ENOMEM; |
2278 | } |
2279 | |
2280 | if (!tb_route(sw)) { |
2281 | type = "host" ; |
2282 | } else { |
2283 | const struct tb_port *port; |
2284 | bool hub = false; |
2285 | |
2286 | /* Device is hub if it has any downstream ports */ |
2287 | tb_switch_for_each_port(sw, port) { |
2288 | if (!port->disabled && !tb_is_upstream_port(port) && |
2289 | tb_port_is_null(port)) { |
2290 | hub = true; |
2291 | break; |
2292 | } |
2293 | } |
2294 | |
2295 | type = hub ? "hub" : "device" ; |
2296 | } |
2297 | |
2298 | if (add_uevent_var(env, format: "USB4_TYPE=%s" , type)) |
2299 | return -ENOMEM; |
2300 | return 0; |
2301 | } |
2302 | |
2303 | /* |
2304 | * Currently only need to provide the callbacks. Everything else is handled |
2305 | * in the connection manager. |
2306 | */ |
2307 | static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) |
2308 | { |
2309 | struct tb_switch *sw = tb_to_switch(dev); |
2310 | const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; |
2311 | |
2312 | if (cm_ops->runtime_suspend_switch) |
2313 | return cm_ops->runtime_suspend_switch(sw); |
2314 | |
2315 | return 0; |
2316 | } |
2317 | |
2318 | static int __maybe_unused tb_switch_runtime_resume(struct device *dev) |
2319 | { |
2320 | struct tb_switch *sw = tb_to_switch(dev); |
2321 | const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; |
2322 | |
2323 | if (cm_ops->runtime_resume_switch) |
2324 | return cm_ops->runtime_resume_switch(sw); |
2325 | return 0; |
2326 | } |
2327 | |
2328 | static const struct dev_pm_ops tb_switch_pm_ops = { |
2329 | SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, |
2330 | NULL) |
2331 | }; |
2332 | |
2333 | const struct device_type tb_switch_type = { |
2334 | .name = "thunderbolt_device" , |
2335 | .release = tb_switch_release, |
2336 | .uevent = tb_switch_uevent, |
2337 | .pm = &tb_switch_pm_ops, |
2338 | }; |
2339 | |
2340 | static int tb_switch_get_generation(struct tb_switch *sw) |
2341 | { |
2342 | if (tb_switch_is_usb4(sw)) |
2343 | return 4; |
2344 | |
2345 | if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { |
2346 | switch (sw->config.device_id) { |
2347 | case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: |
2348 | case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: |
2349 | case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: |
2350 | case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: |
2351 | case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: |
2352 | case PCI_DEVICE_ID_INTEL_PORT_RIDGE: |
2353 | case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: |
2354 | case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: |
2355 | return 1; |
2356 | |
2357 | case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: |
2358 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: |
2359 | case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: |
2360 | return 2; |
2361 | |
2362 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: |
2363 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: |
2364 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: |
2365 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: |
2366 | case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: |
2367 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: |
2368 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: |
2369 | case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: |
2370 | case PCI_DEVICE_ID_INTEL_ICL_NHI0: |
2371 | case PCI_DEVICE_ID_INTEL_ICL_NHI1: |
2372 | return 3; |
2373 | } |
2374 | } |
2375 | |
2376 | /* |
2377 | * For unknown switches assume generation to be 1 to be on the |
2378 | * safe side. |
2379 | */ |
2380 | tb_sw_warn(sw, "unsupported switch device id %#x\n" , |
2381 | sw->config.device_id); |
2382 | return 1; |
2383 | } |
2384 | |
2385 | static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) |
2386 | { |
2387 | int max_depth; |
2388 | |
2389 | if (tb_switch_is_usb4(sw) || |
2390 | (sw->tb->root_switch && tb_switch_is_usb4(sw: sw->tb->root_switch))) |
2391 | max_depth = USB4_SWITCH_MAX_DEPTH; |
2392 | else |
2393 | max_depth = TB_SWITCH_MAX_DEPTH; |
2394 | |
2395 | return depth > max_depth; |
2396 | } |
2397 | |
2398 | /** |
2399 | * tb_switch_alloc() - allocate a switch |
2400 | * @tb: Pointer to the owning domain |
2401 | * @parent: Parent device for this switch |
2402 | * @route: Route string for this switch |
2403 | * |
2404 | * Allocates and initializes a switch. Will not upload configuration to |
2405 | * the switch. For that you need to call tb_switch_configure() |
2406 | * separately. The returned switch should be released by calling |
2407 | * tb_switch_put(). |
2408 | * |
2409 | * Return: Pointer to the allocated switch or ERR_PTR() in case of |
2410 | * failure. |
2411 | */ |
2412 | struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, |
2413 | u64 route) |
2414 | { |
2415 | struct tb_switch *sw; |
2416 | int upstream_port; |
2417 | int i, ret, depth; |
2418 | |
2419 | /* Unlock the downstream port so we can access the switch below */ |
2420 | if (route) { |
2421 | struct tb_switch *parent_sw = tb_to_switch(dev: parent); |
2422 | struct tb_port *down; |
2423 | |
2424 | down = tb_port_at(route, sw: parent_sw); |
2425 | tb_port_unlock(port: down); |
2426 | } |
2427 | |
2428 | depth = tb_route_length(route); |
2429 | |
2430 | upstream_port = tb_cfg_get_upstream_port(ctl: tb->ctl, route); |
2431 | if (upstream_port < 0) |
2432 | return ERR_PTR(error: upstream_port); |
2433 | |
2434 | sw = kzalloc(size: sizeof(*sw), GFP_KERNEL); |
2435 | if (!sw) |
2436 | return ERR_PTR(error: -ENOMEM); |
2437 | |
2438 | sw->tb = tb; |
2439 | ret = tb_cfg_read(ctl: tb->ctl, buffer: &sw->config, route, port: 0, space: TB_CFG_SWITCH, offset: 0, length: 5); |
2440 | if (ret) |
2441 | goto err_free_sw_ports; |
2442 | |
2443 | sw->generation = tb_switch_get_generation(sw); |
2444 | |
2445 | tb_dbg(tb, "current switch config:\n" ); |
2446 | tb_dump_switch(tb, sw); |
2447 | |
2448 | /* configure switch */ |
2449 | sw->config.upstream_port_number = upstream_port; |
2450 | sw->config.depth = depth; |
2451 | sw->config.route_hi = upper_32_bits(route); |
2452 | sw->config.route_lo = lower_32_bits(route); |
2453 | sw->config.enabled = 0; |
2454 | |
2455 | /* Make sure we do not exceed maximum topology limit */ |
2456 | if (tb_switch_exceeds_max_depth(sw, depth)) { |
2457 | ret = -EADDRNOTAVAIL; |
2458 | goto err_free_sw_ports; |
2459 | } |
2460 | |
2461 | /* initialize ports */ |
2462 | sw->ports = kcalloc(n: sw->config.max_port_number + 1, size: sizeof(*sw->ports), |
2463 | GFP_KERNEL); |
2464 | if (!sw->ports) { |
2465 | ret = -ENOMEM; |
2466 | goto err_free_sw_ports; |
2467 | } |
2468 | |
2469 | for (i = 0; i <= sw->config.max_port_number; i++) { |
2470 | /* minimum setup for tb_find_cap and tb_drom_read to work */ |
2471 | sw->ports[i].sw = sw; |
2472 | sw->ports[i].port = i; |
2473 | |
2474 | /* Control port does not need HopID allocation */ |
2475 | if (i) { |
2476 | ida_init(ida: &sw->ports[i].in_hopids); |
2477 | ida_init(ida: &sw->ports[i].out_hopids); |
2478 | } |
2479 | } |
2480 | |
2481 | ret = tb_switch_find_vse_cap(sw, vsec: TB_VSE_CAP_PLUG_EVENTS); |
2482 | if (ret > 0) |
2483 | sw->cap_plug_events = ret; |
2484 | |
2485 | ret = tb_switch_find_vse_cap(sw, vsec: TB_VSE_CAP_TIME2); |
2486 | if (ret > 0) |
2487 | sw->cap_vsec_tmu = ret; |
2488 | |
2489 | ret = tb_switch_find_vse_cap(sw, vsec: TB_VSE_CAP_LINK_CONTROLLER); |
2490 | if (ret > 0) |
2491 | sw->cap_lc = ret; |
2492 | |
2493 | ret = tb_switch_find_vse_cap(sw, vsec: TB_VSE_CAP_CP_LP); |
2494 | if (ret > 0) |
2495 | sw->cap_lp = ret; |
2496 | |
2497 | /* Root switch is always authorized */ |
2498 | if (!route) |
2499 | sw->authorized = true; |
2500 | |
2501 | device_initialize(dev: &sw->dev); |
2502 | sw->dev.parent = parent; |
2503 | sw->dev.bus = &tb_bus_type; |
2504 | sw->dev.type = &tb_switch_type; |
2505 | sw->dev.groups = switch_groups; |
2506 | dev_set_name(dev: &sw->dev, name: "%u-%llx" , tb->index, tb_route(sw)); |
2507 | |
2508 | return sw; |
2509 | |
2510 | err_free_sw_ports: |
2511 | kfree(objp: sw->ports); |
2512 | kfree(objp: sw); |
2513 | |
2514 | return ERR_PTR(error: ret); |
2515 | } |
2516 | |
2517 | /** |
2518 | * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode |
2519 | * @tb: Pointer to the owning domain |
2520 | * @parent: Parent device for this switch |
2521 | * @route: Route string for this switch |
2522 | * |
2523 | * This creates a switch in safe mode. This means the switch pretty much |
2524 | * lacks all capabilities except DMA configuration port before it is |
2525 | * flashed with a valid NVM firmware. |
2526 | * |
2527 | * The returned switch must be released by calling tb_switch_put(). |
2528 | * |
2529 | * Return: Pointer to the allocated switch or ERR_PTR() in case of failure |
2530 | */ |
2531 | struct tb_switch * |
2532 | tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) |
2533 | { |
2534 | struct tb_switch *sw; |
2535 | |
2536 | sw = kzalloc(size: sizeof(*sw), GFP_KERNEL); |
2537 | if (!sw) |
2538 | return ERR_PTR(error: -ENOMEM); |
2539 | |
2540 | sw->tb = tb; |
2541 | sw->config.depth = tb_route_length(route); |
2542 | sw->config.route_hi = upper_32_bits(route); |
2543 | sw->config.route_lo = lower_32_bits(route); |
2544 | sw->safe_mode = true; |
2545 | |
2546 | device_initialize(dev: &sw->dev); |
2547 | sw->dev.parent = parent; |
2548 | sw->dev.bus = &tb_bus_type; |
2549 | sw->dev.type = &tb_switch_type; |
2550 | sw->dev.groups = switch_groups; |
2551 | dev_set_name(dev: &sw->dev, name: "%u-%llx" , tb->index, tb_route(sw)); |
2552 | |
2553 | return sw; |
2554 | } |
2555 | |
2556 | /** |
2557 | * tb_switch_configure() - Uploads configuration to the switch |
2558 | * @sw: Switch to configure |
2559 | * |
2560 | * Call this function before the switch is added to the system. It will |
2561 | * upload configuration to the switch and makes it available for the |
2562 | * connection manager to use. Can be called to the switch again after |
2563 | * resume from low power states to re-initialize it. |
2564 | * |
2565 | * Return: %0 in case of success and negative errno in case of failure |
2566 | */ |
2567 | int tb_switch_configure(struct tb_switch *sw) |
2568 | { |
2569 | struct tb *tb = sw->tb; |
2570 | u64 route; |
2571 | int ret; |
2572 | |
2573 | route = tb_route(sw); |
2574 | |
2575 | tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n" , |
2576 | sw->config.enabled ? "restoring" : "initializing" , route, |
2577 | tb_route_length(route), sw->config.upstream_port_number); |
2578 | |
2579 | sw->config.enabled = 1; |
2580 | |
2581 | if (tb_switch_is_usb4(sw)) { |
2582 | /* |
2583 | * For USB4 devices, we need to program the CM version |
2584 | * accordingly so that it knows to expose all the |
2585 | * additional capabilities. Program it according to USB4 |
2586 | * version to avoid changing existing (v1) routers behaviour. |
2587 | */ |
2588 | if (usb4_switch_version(sw) < 2) |
2589 | sw->config.cmuv = ROUTER_CS_4_CMUV_V1; |
2590 | else |
2591 | sw->config.cmuv = ROUTER_CS_4_CMUV_V2; |
2592 | sw->config.plug_events_delay = 0xa; |
2593 | |
2594 | /* Enumerate the switch */ |
2595 | ret = tb_sw_write(sw, buffer: (u32 *)&sw->config + 1, space: TB_CFG_SWITCH, |
2596 | ROUTER_CS_1, length: 4); |
2597 | if (ret) |
2598 | return ret; |
2599 | |
2600 | ret = usb4_switch_setup(sw); |
2601 | } else { |
2602 | if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) |
2603 | tb_sw_warn(sw, "unknown switch vendor id %#x\n" , |
2604 | sw->config.vendor_id); |
2605 | |
2606 | if (!sw->cap_plug_events) { |
2607 | tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n" ); |
2608 | return -ENODEV; |
2609 | } |
2610 | |
2611 | /* Enumerate the switch */ |
2612 | ret = tb_sw_write(sw, buffer: (u32 *)&sw->config + 1, space: TB_CFG_SWITCH, |
2613 | ROUTER_CS_1, length: 3); |
2614 | } |
2615 | if (ret) |
2616 | return ret; |
2617 | |
2618 | return tb_plug_events_active(sw, active: true); |
2619 | } |
2620 | |
2621 | /** |
2622 | * tb_switch_configuration_valid() - Set the tunneling configuration to be valid |
2623 | * @sw: Router to configure |
2624 | * |
2625 | * Needs to be called before any tunnels can be setup through the |
2626 | * router. Can be called to any router. |
2627 | * |
2628 | * Returns %0 in success and negative errno otherwise. |
2629 | */ |
2630 | int tb_switch_configuration_valid(struct tb_switch *sw) |
2631 | { |
2632 | if (tb_switch_is_usb4(sw)) |
2633 | return usb4_switch_configuration_valid(sw); |
2634 | return 0; |
2635 | } |
2636 | |
2637 | static int tb_switch_set_uuid(struct tb_switch *sw) |
2638 | { |
2639 | bool uid = false; |
2640 | u32 uuid[4]; |
2641 | int ret; |
2642 | |
2643 | if (sw->uuid) |
2644 | return 0; |
2645 | |
2646 | if (tb_switch_is_usb4(sw)) { |
2647 | ret = usb4_switch_read_uid(sw, uid: &sw->uid); |
2648 | if (ret) |
2649 | return ret; |
2650 | uid = true; |
2651 | } else { |
2652 | /* |
2653 | * The newer controllers include fused UUID as part of |
2654 | * link controller specific registers |
2655 | */ |
2656 | ret = tb_lc_read_uuid(sw, uuid); |
2657 | if (ret) { |
2658 | if (ret != -EINVAL) |
2659 | return ret; |
2660 | uid = true; |
2661 | } |
2662 | } |
2663 | |
2664 | if (uid) { |
2665 | /* |
2666 | * ICM generates UUID based on UID and fills the upper |
2667 | * two words with ones. This is not strictly following |
2668 | * UUID format but we want to be compatible with it so |
2669 | * we do the same here. |
2670 | */ |
2671 | uuid[0] = sw->uid & 0xffffffff; |
2672 | uuid[1] = (sw->uid >> 32) & 0xffffffff; |
2673 | uuid[2] = 0xffffffff; |
2674 | uuid[3] = 0xffffffff; |
2675 | } |
2676 | |
2677 | sw->uuid = kmemdup(p: uuid, size: sizeof(uuid), GFP_KERNEL); |
2678 | if (!sw->uuid) |
2679 | return -ENOMEM; |
2680 | return 0; |
2681 | } |
2682 | |
2683 | static int tb_switch_add_dma_port(struct tb_switch *sw) |
2684 | { |
2685 | u32 status; |
2686 | int ret; |
2687 | |
2688 | switch (sw->generation) { |
2689 | case 2: |
2690 | /* Only root switch can be upgraded */ |
2691 | if (tb_route(sw)) |
2692 | return 0; |
2693 | |
2694 | fallthrough; |
2695 | case 3: |
2696 | case 4: |
2697 | ret = tb_switch_set_uuid(sw); |
2698 | if (ret) |
2699 | return ret; |
2700 | break; |
2701 | |
2702 | default: |
2703 | /* |
2704 | * DMA port is the only thing available when the switch |
2705 | * is in safe mode. |
2706 | */ |
2707 | if (!sw->safe_mode) |
2708 | return 0; |
2709 | break; |
2710 | } |
2711 | |
2712 | if (sw->no_nvm_upgrade) |
2713 | return 0; |
2714 | |
2715 | if (tb_switch_is_usb4(sw)) { |
2716 | ret = usb4_switch_nvm_authenticate_status(sw, status: &status); |
2717 | if (ret) |
2718 | return ret; |
2719 | |
2720 | if (status) { |
2721 | tb_sw_info(sw, "switch flash authentication failed\n" ); |
2722 | nvm_set_auth_status(sw, status); |
2723 | } |
2724 | |
2725 | return 0; |
2726 | } |
2727 | |
2728 | /* Root switch DMA port requires running firmware */ |
2729 | if (!tb_route(sw) && !tb_switch_is_icm(sw)) |
2730 | return 0; |
2731 | |
2732 | sw->dma_port = dma_port_alloc(sw); |
2733 | if (!sw->dma_port) |
2734 | return 0; |
2735 | |
2736 | /* |
2737 | * If there is status already set then authentication failed |
2738 | * when the dma_port_flash_update_auth() returned. Power cycling |
2739 | * is not needed (it was done already) so only thing we do here |
2740 | * is to unblock runtime PM of the root port. |
2741 | */ |
2742 | nvm_get_auth_status(sw, status: &status); |
2743 | if (status) { |
2744 | if (!tb_route(sw)) |
2745 | nvm_authenticate_complete_dma_port(sw); |
2746 | return 0; |
2747 | } |
2748 | |
2749 | /* |
2750 | * Check status of the previous flash authentication. If there |
2751 | * is one we need to power cycle the switch in any case to make |
2752 | * it functional again. |
2753 | */ |
2754 | ret = dma_port_flash_update_auth_status(dma: sw->dma_port, status: &status); |
2755 | if (ret <= 0) |
2756 | return ret; |
2757 | |
2758 | /* Now we can allow root port to suspend again */ |
2759 | if (!tb_route(sw)) |
2760 | nvm_authenticate_complete_dma_port(sw); |
2761 | |
2762 | if (status) { |
2763 | tb_sw_info(sw, "switch flash authentication failed\n" ); |
2764 | nvm_set_auth_status(sw, status); |
2765 | } |
2766 | |
2767 | tb_sw_info(sw, "power cycling the switch now\n" ); |
2768 | dma_port_power_cycle(dma: sw->dma_port); |
2769 | |
2770 | /* |
2771 | * We return error here which causes the switch adding failure. |
2772 | * It should appear back after power cycle is complete. |
2773 | */ |
2774 | return -ESHUTDOWN; |
2775 | } |
2776 | |
2777 | static void tb_switch_default_link_ports(struct tb_switch *sw) |
2778 | { |
2779 | int i; |
2780 | |
2781 | for (i = 1; i <= sw->config.max_port_number; i++) { |
2782 | struct tb_port *port = &sw->ports[i]; |
2783 | struct tb_port *subordinate; |
2784 | |
2785 | if (!tb_port_is_null(port)) |
2786 | continue; |
2787 | |
2788 | /* Check for the subordinate port */ |
2789 | if (i == sw->config.max_port_number || |
2790 | !tb_port_is_null(port: &sw->ports[i + 1])) |
2791 | continue; |
2792 | |
2793 | /* Link them if not already done so (by DROM) */ |
2794 | subordinate = &sw->ports[i + 1]; |
2795 | if (!port->dual_link_port && !subordinate->dual_link_port) { |
2796 | port->link_nr = 0; |
2797 | port->dual_link_port = subordinate; |
2798 | subordinate->link_nr = 1; |
2799 | subordinate->dual_link_port = port; |
2800 | |
2801 | tb_sw_dbg(sw, "linked ports %d <-> %d\n" , |
2802 | port->port, subordinate->port); |
2803 | } |
2804 | } |
2805 | } |
2806 | |
2807 | static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) |
2808 | { |
2809 | const struct tb_port *up = tb_upstream_port(sw); |
2810 | |
2811 | if (!up->dual_link_port || !up->dual_link_port->remote) |
2812 | return false; |
2813 | |
2814 | if (tb_switch_is_usb4(sw)) |
2815 | return usb4_switch_lane_bonding_possible(sw); |
2816 | return tb_lc_lane_bonding_possible(sw); |
2817 | } |
2818 | |
2819 | static int tb_switch_update_link_attributes(struct tb_switch *sw) |
2820 | { |
2821 | struct tb_port *up; |
2822 | bool change = false; |
2823 | int ret; |
2824 | |
2825 | if (!tb_route(sw) || tb_switch_is_icm(sw)) |
2826 | return 0; |
2827 | |
2828 | up = tb_upstream_port(sw); |
2829 | |
2830 | ret = tb_port_get_link_speed(port: up); |
2831 | if (ret < 0) |
2832 | return ret; |
2833 | if (sw->link_speed != ret) |
2834 | change = true; |
2835 | sw->link_speed = ret; |
2836 | |
2837 | ret = tb_port_get_link_width(port: up); |
2838 | if (ret < 0) |
2839 | return ret; |
2840 | if (sw->link_width != ret) |
2841 | change = true; |
2842 | sw->link_width = ret; |
2843 | |
2844 | /* Notify userspace that there is possible link attribute change */ |
2845 | if (device_is_registered(dev: &sw->dev) && change) |
2846 | kobject_uevent(kobj: &sw->dev.kobj, action: KOBJ_CHANGE); |
2847 | |
2848 | return 0; |
2849 | } |
2850 | |
2851 | /* Must be called after tb_switch_update_link_attributes() */ |
2852 | static void tb_switch_link_init(struct tb_switch *sw) |
2853 | { |
2854 | struct tb_port *up, *down; |
2855 | bool bonded; |
2856 | |
2857 | if (!tb_route(sw) || tb_switch_is_icm(sw)) |
2858 | return; |
2859 | |
2860 | tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n" , sw->link_speed); |
2861 | tb_sw_dbg(sw, "current link width %s\n" , tb_width_name(sw->link_width)); |
2862 | |
2863 | bonded = sw->link_width >= TB_LINK_WIDTH_DUAL; |
2864 | |
2865 | /* |
2866 | * Gen 4 links come up as bonded so update the port structures |
2867 | * accordingly. |
2868 | */ |
2869 | up = tb_upstream_port(sw); |
2870 | down = tb_switch_downstream_port(sw); |
2871 | |
2872 | up->bonded = bonded; |
2873 | if (up->dual_link_port) |
2874 | up->dual_link_port->bonded = bonded; |
2875 | tb_port_update_credits(port: up); |
2876 | |
2877 | down->bonded = bonded; |
2878 | if (down->dual_link_port) |
2879 | down->dual_link_port->bonded = bonded; |
2880 | tb_port_update_credits(port: down); |
2881 | |
2882 | if (tb_port_get_link_generation(port: up) < 4) |
2883 | return; |
2884 | |
2885 | /* |
2886 | * Set the Gen 4 preferred link width. This is what the router |
2887 | * prefers when the link is brought up. If the router does not |
2888 | * support asymmetric link configuration, this also will be set |
2889 | * to TB_LINK_WIDTH_DUAL. |
2890 | */ |
2891 | sw->preferred_link_width = sw->link_width; |
2892 | tb_sw_dbg(sw, "preferred link width %s\n" , |
2893 | tb_width_name(sw->preferred_link_width)); |
2894 | } |
2895 | |
2896 | /** |
2897 | * tb_switch_lane_bonding_enable() - Enable lane bonding |
2898 | * @sw: Switch to enable lane bonding |
2899 | * |
2900 | * Connection manager can call this function to enable lane bonding of a |
2901 | * switch. If conditions are correct and both switches support the feature, |
2902 | * lanes are bonded. It is safe to call this to any switch. |
2903 | */ |
2904 | static int tb_switch_lane_bonding_enable(struct tb_switch *sw) |
2905 | { |
2906 | struct tb_port *up, *down; |
2907 | unsigned int width; |
2908 | int ret; |
2909 | |
2910 | if (!tb_switch_lane_bonding_possible(sw)) |
2911 | return 0; |
2912 | |
2913 | up = tb_upstream_port(sw); |
2914 | down = tb_switch_downstream_port(sw); |
2915 | |
2916 | if (!tb_port_width_supported(port: up, width: TB_LINK_WIDTH_DUAL) || |
2917 | !tb_port_width_supported(port: down, width: TB_LINK_WIDTH_DUAL)) |
2918 | return 0; |
2919 | |
2920 | /* |
2921 | * Both lanes need to be in CL0. Here we assume lane 0 already be in |
2922 | * CL0 and check just for lane 1. |
2923 | */ |
2924 | if (tb_wait_for_port(port: down->dual_link_port, wait_if_unplugged: false) <= 0) |
2925 | return -ENOTCONN; |
2926 | |
2927 | ret = tb_port_lane_bonding_enable(port: up); |
2928 | if (ret) { |
2929 | tb_port_warn(up, "failed to enable lane bonding\n" ); |
2930 | return ret; |
2931 | } |
2932 | |
2933 | ret = tb_port_lane_bonding_enable(port: down); |
2934 | if (ret) { |
2935 | tb_port_warn(down, "failed to enable lane bonding\n" ); |
2936 | tb_port_lane_bonding_disable(port: up); |
2937 | return ret; |
2938 | } |
2939 | |
2940 | /* Any of the widths are all bonded */ |
2941 | width = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX | |
2942 | TB_LINK_WIDTH_ASYM_RX; |
2943 | |
2944 | return tb_port_wait_for_link_width(port: down, width, timeout_msec: 100); |
2945 | } |
2946 | |
2947 | /** |
2948 | * tb_switch_lane_bonding_disable() - Disable lane bonding |
2949 | * @sw: Switch whose lane bonding to disable |
2950 | * |
2951 | * Disables lane bonding between @sw and parent. This can be called even |
2952 | * if lanes were not bonded originally. |
2953 | */ |
2954 | static int tb_switch_lane_bonding_disable(struct tb_switch *sw) |
2955 | { |
2956 | struct tb_port *up, *down; |
2957 | int ret; |
2958 | |
2959 | up = tb_upstream_port(sw); |
2960 | if (!up->bonded) |
2961 | return 0; |
2962 | |
2963 | /* |
2964 | * If the link is Gen 4 there is no way to switch the link to |
2965 | * two single lane links so avoid that here. Also don't bother |
2966 | * if the link is not up anymore (sw is unplugged). |
2967 | */ |
2968 | ret = tb_port_get_link_generation(port: up); |
2969 | if (ret < 0) |
2970 | return ret; |
2971 | if (ret >= 4) |
2972 | return -EOPNOTSUPP; |
2973 | |
2974 | down = tb_switch_downstream_port(sw); |
2975 | tb_port_lane_bonding_disable(port: up); |
2976 | tb_port_lane_bonding_disable(port: down); |
2977 | |
2978 | /* |
2979 | * It is fine if we get other errors as the router might have |
2980 | * been unplugged. |
2981 | */ |
2982 | return tb_port_wait_for_link_width(port: down, width: TB_LINK_WIDTH_SINGLE, timeout_msec: 100); |
2983 | } |
2984 | |
2985 | /* Note updating sw->link_width done in tb_switch_update_link_attributes() */ |
2986 | static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width) |
2987 | { |
2988 | struct tb_port *up, *down, *port; |
2989 | enum tb_link_width down_width; |
2990 | int ret; |
2991 | |
2992 | up = tb_upstream_port(sw); |
2993 | down = tb_switch_downstream_port(sw); |
2994 | |
2995 | if (width == TB_LINK_WIDTH_ASYM_TX) { |
2996 | down_width = TB_LINK_WIDTH_ASYM_RX; |
2997 | port = down; |
2998 | } else { |
2999 | down_width = TB_LINK_WIDTH_ASYM_TX; |
3000 | port = up; |
3001 | } |
3002 | |
3003 | ret = tb_port_set_link_width(port: up, width); |
3004 | if (ret) |
3005 | return ret; |
3006 | |
3007 | ret = tb_port_set_link_width(port: down, width: down_width); |
3008 | if (ret) |
3009 | return ret; |
3010 | |
3011 | /* |
3012 | * Initiate the change in the router that one of its TX lanes is |
3013 | * changing to RX but do so only if there is an actual change. |
3014 | */ |
3015 | if (sw->link_width != width) { |
3016 | ret = usb4_port_asym_start(port); |
3017 | if (ret) |
3018 | return ret; |
3019 | |
3020 | ret = tb_port_wait_for_link_width(port: up, width, timeout_msec: 100); |
3021 | if (ret) |
3022 | return ret; |
3023 | } |
3024 | |
3025 | return 0; |
3026 | } |
3027 | |
3028 | /* Note updating sw->link_width done in tb_switch_update_link_attributes() */ |
3029 | static int tb_switch_asym_disable(struct tb_switch *sw) |
3030 | { |
3031 | struct tb_port *up, *down; |
3032 | int ret; |
3033 | |
3034 | up = tb_upstream_port(sw); |
3035 | down = tb_switch_downstream_port(sw); |
3036 | |
3037 | ret = tb_port_set_link_width(port: up, width: TB_LINK_WIDTH_DUAL); |
3038 | if (ret) |
3039 | return ret; |
3040 | |
3041 | ret = tb_port_set_link_width(port: down, width: TB_LINK_WIDTH_DUAL); |
3042 | if (ret) |
3043 | return ret; |
3044 | |
3045 | /* |
3046 | * Initiate the change in the router that has three TX lanes and |
3047 | * is changing one of its TX lanes to RX but only if there is a |
3048 | * change in the link width. |
3049 | */ |
3050 | if (sw->link_width > TB_LINK_WIDTH_DUAL) { |
3051 | if (sw->link_width == TB_LINK_WIDTH_ASYM_TX) |
3052 | ret = usb4_port_asym_start(port: up); |
3053 | else |
3054 | ret = usb4_port_asym_start(port: down); |
3055 | if (ret) |
3056 | return ret; |
3057 | |
3058 | ret = tb_port_wait_for_link_width(port: up, width: TB_LINK_WIDTH_DUAL, timeout_msec: 100); |
3059 | if (ret) |
3060 | return ret; |
3061 | } |
3062 | |
3063 | return 0; |
3064 | } |
3065 | |
3066 | /** |
3067 | * tb_switch_set_link_width() - Configure router link width |
3068 | * @sw: Router to configure |
3069 | * @width: The new link width |
3070 | * |
3071 | * Set device router link width to @width from router upstream port |
3072 | * perspective. Supports also asymmetric links if the routers boths side |
3073 | * of the link supports it. |
3074 | * |
3075 | * Does nothing for host router. |
3076 | * |
3077 | * Returns %0 in case of success, negative errno otherwise. |
3078 | */ |
3079 | int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width) |
3080 | { |
3081 | struct tb_port *up, *down; |
3082 | int ret = 0; |
3083 | |
3084 | if (!tb_route(sw)) |
3085 | return 0; |
3086 | |
3087 | up = tb_upstream_port(sw); |
3088 | down = tb_switch_downstream_port(sw); |
3089 | |
3090 | switch (width) { |
3091 | case TB_LINK_WIDTH_SINGLE: |
3092 | ret = tb_switch_lane_bonding_disable(sw); |
3093 | break; |
3094 | |
3095 | case TB_LINK_WIDTH_DUAL: |
3096 | if (sw->link_width == TB_LINK_WIDTH_ASYM_TX || |
3097 | sw->link_width == TB_LINK_WIDTH_ASYM_RX) { |
3098 | ret = tb_switch_asym_disable(sw); |
3099 | if (ret) |
3100 | break; |
3101 | } |
3102 | ret = tb_switch_lane_bonding_enable(sw); |
3103 | break; |
3104 | |
3105 | case TB_LINK_WIDTH_ASYM_TX: |
3106 | case TB_LINK_WIDTH_ASYM_RX: |
3107 | ret = tb_switch_asym_enable(sw, width); |
3108 | break; |
3109 | } |
3110 | |
3111 | switch (ret) { |
3112 | case 0: |
3113 | break; |
3114 | |
3115 | case -ETIMEDOUT: |
3116 | tb_sw_warn(sw, "timeout changing link width\n" ); |
3117 | return ret; |
3118 | |
3119 | case -ENOTCONN: |
3120 | case -EOPNOTSUPP: |
3121 | case -ENODEV: |
3122 | return ret; |
3123 | |
3124 | default: |
3125 | tb_sw_dbg(sw, "failed to change link width: %d\n" , ret); |
3126 | return ret; |
3127 | } |
3128 | |
3129 | tb_port_update_credits(port: down); |
3130 | tb_port_update_credits(port: up); |
3131 | |
3132 | tb_switch_update_link_attributes(sw); |
3133 | |
3134 | tb_sw_dbg(sw, "link width set to %s\n" , tb_width_name(width)); |
3135 | return ret; |
3136 | } |
3137 | |
3138 | /** |
3139 | * tb_switch_configure_link() - Set link configured |
3140 | * @sw: Switch whose link is configured |
3141 | * |
3142 | * Sets the link upstream from @sw configured (from both ends) so that |
3143 | * it will not be disconnected when the domain exits sleep. Can be |
3144 | * called for any switch. |
3145 | * |
3146 | * It is recommended that this is called after lane bonding is enabled. |
3147 | * |
3148 | * Returns %0 on success and negative errno in case of error. |
3149 | */ |
3150 | int tb_switch_configure_link(struct tb_switch *sw) |
3151 | { |
3152 | struct tb_port *up, *down; |
3153 | int ret; |
3154 | |
3155 | if (!tb_route(sw) || tb_switch_is_icm(sw)) |
3156 | return 0; |
3157 | |
3158 | up = tb_upstream_port(sw); |
3159 | if (tb_switch_is_usb4(sw: up->sw)) |
3160 | ret = usb4_port_configure(port: up); |
3161 | else |
3162 | ret = tb_lc_configure_port(port: up); |
3163 | if (ret) |
3164 | return ret; |
3165 | |
3166 | down = up->remote; |
3167 | if (tb_switch_is_usb4(sw: down->sw)) |
3168 | return usb4_port_configure(port: down); |
3169 | return tb_lc_configure_port(port: down); |
3170 | } |
3171 | |
3172 | /** |
3173 | * tb_switch_unconfigure_link() - Unconfigure link |
3174 | * @sw: Switch whose link is unconfigured |
3175 | * |
3176 | * Sets the link unconfigured so the @sw will be disconnected if the |
3177 | * domain exists sleep. |
3178 | */ |
3179 | void tb_switch_unconfigure_link(struct tb_switch *sw) |
3180 | { |
3181 | struct tb_port *up, *down; |
3182 | |
3183 | if (!tb_route(sw) || tb_switch_is_icm(sw)) |
3184 | return; |
3185 | |
3186 | /* |
3187 | * Unconfigure downstream port so that wake-on-connect can be |
3188 | * configured after router unplug. No need to unconfigure upstream port |
3189 | * since its router is unplugged. |
3190 | */ |
3191 | up = tb_upstream_port(sw); |
3192 | down = up->remote; |
3193 | if (tb_switch_is_usb4(sw: down->sw)) |
3194 | usb4_port_unconfigure(port: down); |
3195 | else |
3196 | tb_lc_unconfigure_port(port: down); |
3197 | |
3198 | if (sw->is_unplugged) |
3199 | return; |
3200 | |
3201 | up = tb_upstream_port(sw); |
3202 | if (tb_switch_is_usb4(sw: up->sw)) |
3203 | usb4_port_unconfigure(port: up); |
3204 | else |
3205 | tb_lc_unconfigure_port(port: up); |
3206 | } |
3207 | |
3208 | static void tb_switch_credits_init(struct tb_switch *sw) |
3209 | { |
3210 | if (tb_switch_is_icm(sw)) |
3211 | return; |
3212 | if (!tb_switch_is_usb4(sw)) |
3213 | return; |
3214 | if (usb4_switch_credits_init(sw)) |
3215 | tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n" ); |
3216 | } |
3217 | |
3218 | static int tb_switch_port_hotplug_enable(struct tb_switch *sw) |
3219 | { |
3220 | struct tb_port *port; |
3221 | |
3222 | if (tb_switch_is_icm(sw)) |
3223 | return 0; |
3224 | |
3225 | tb_switch_for_each_port(sw, port) { |
3226 | int res; |
3227 | |
3228 | if (!port->cap_usb4) |
3229 | continue; |
3230 | |
3231 | res = usb4_port_hotplug_enable(port); |
3232 | if (res) |
3233 | return res; |
3234 | } |
3235 | return 0; |
3236 | } |
3237 | |
3238 | /** |
3239 | * tb_switch_add() - Add a switch to the domain |
3240 | * @sw: Switch to add |
3241 | * |
3242 | * This is the last step in adding switch to the domain. It will read |
3243 | * identification information from DROM and initializes ports so that |
3244 | * they can be used to connect other switches. The switch will be |
3245 | * exposed to the userspace when this function successfully returns. To |
3246 | * remove and release the switch, call tb_switch_remove(). |
3247 | * |
3248 | * Return: %0 in case of success and negative errno in case of failure |
3249 | */ |
3250 | int tb_switch_add(struct tb_switch *sw) |
3251 | { |
3252 | int i, ret; |
3253 | |
3254 | /* |
3255 | * Initialize DMA control port now before we read DROM. Recent |
3256 | * host controllers have more complete DROM on NVM that includes |
3257 | * vendor and model identification strings which we then expose |
3258 | * to the userspace. NVM can be accessed through DMA |
3259 | * configuration based mailbox. |
3260 | */ |
3261 | ret = tb_switch_add_dma_port(sw); |
3262 | if (ret) { |
3263 | dev_err(&sw->dev, "failed to add DMA port\n" ); |
3264 | return ret; |
3265 | } |
3266 | |
3267 | if (!sw->safe_mode) { |
3268 | tb_switch_credits_init(sw); |
3269 | |
3270 | /* read drom */ |
3271 | ret = tb_drom_read(sw); |
3272 | if (ret) |
3273 | dev_warn(&sw->dev, "reading DROM failed: %d\n" , ret); |
3274 | tb_sw_dbg(sw, "uid: %#llx\n" , sw->uid); |
3275 | |
3276 | ret = tb_switch_set_uuid(sw); |
3277 | if (ret) { |
3278 | dev_err(&sw->dev, "failed to set UUID\n" ); |
3279 | return ret; |
3280 | } |
3281 | |
3282 | for (i = 0; i <= sw->config.max_port_number; i++) { |
3283 | if (sw->ports[i].disabled) { |
3284 | tb_port_dbg(&sw->ports[i], "disabled by eeprom\n" ); |
3285 | continue; |
3286 | } |
3287 | ret = tb_init_port(port: &sw->ports[i]); |
3288 | if (ret) { |
3289 | dev_err(&sw->dev, "failed to initialize port %d\n" , i); |
3290 | return ret; |
3291 | } |
3292 | } |
3293 | |
3294 | tb_check_quirks(sw); |
3295 | |
3296 | tb_switch_default_link_ports(sw); |
3297 | |
3298 | ret = tb_switch_update_link_attributes(sw); |
3299 | if (ret) |
3300 | return ret; |
3301 | |
3302 | tb_switch_link_init(sw); |
3303 | |
3304 | ret = tb_switch_clx_init(sw); |
3305 | if (ret) |
3306 | return ret; |
3307 | |
3308 | ret = tb_switch_tmu_init(sw); |
3309 | if (ret) |
3310 | return ret; |
3311 | } |
3312 | |
3313 | ret = tb_switch_port_hotplug_enable(sw); |
3314 | if (ret) |
3315 | return ret; |
3316 | |
3317 | ret = device_add(dev: &sw->dev); |
3318 | if (ret) { |
3319 | dev_err(&sw->dev, "failed to add device: %d\n" , ret); |
3320 | return ret; |
3321 | } |
3322 | |
3323 | if (tb_route(sw)) { |
3324 | dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n" , |
3325 | sw->vendor, sw->device); |
3326 | if (sw->vendor_name && sw->device_name) |
3327 | dev_info(&sw->dev, "%s %s\n" , sw->vendor_name, |
3328 | sw->device_name); |
3329 | } |
3330 | |
3331 | ret = usb4_switch_add_ports(sw); |
3332 | if (ret) { |
3333 | dev_err(&sw->dev, "failed to add USB4 ports\n" ); |
3334 | goto err_del; |
3335 | } |
3336 | |
3337 | ret = tb_switch_nvm_add(sw); |
3338 | if (ret) { |
3339 | dev_err(&sw->dev, "failed to add NVM devices\n" ); |
3340 | goto err_ports; |
3341 | } |
3342 | |
3343 | /* |
3344 | * Thunderbolt routers do not generate wakeups themselves but |
3345 | * they forward wakeups from tunneled protocols, so enable it |
3346 | * here. |
3347 | */ |
3348 | device_init_wakeup(dev: &sw->dev, enable: true); |
3349 | |
3350 | pm_runtime_set_active(dev: &sw->dev); |
3351 | if (sw->rpm) { |
3352 | pm_runtime_set_autosuspend_delay(dev: &sw->dev, TB_AUTOSUSPEND_DELAY); |
3353 | pm_runtime_use_autosuspend(dev: &sw->dev); |
3354 | pm_runtime_mark_last_busy(dev: &sw->dev); |
3355 | pm_runtime_enable(dev: &sw->dev); |
3356 | pm_request_autosuspend(dev: &sw->dev); |
3357 | } |
3358 | |
3359 | tb_switch_debugfs_init(sw); |
3360 | return 0; |
3361 | |
3362 | err_ports: |
3363 | usb4_switch_remove_ports(sw); |
3364 | err_del: |
3365 | device_del(dev: &sw->dev); |
3366 | |
3367 | return ret; |
3368 | } |
3369 | |
3370 | /** |
3371 | * tb_switch_remove() - Remove and release a switch |
3372 | * @sw: Switch to remove |
3373 | * |
3374 | * This will remove the switch from the domain and release it after last |
3375 | * reference count drops to zero. If there are switches connected below |
3376 | * this switch, they will be removed as well. |
3377 | */ |
3378 | void tb_switch_remove(struct tb_switch *sw) |
3379 | { |
3380 | struct tb_port *port; |
3381 | |
3382 | tb_switch_debugfs_remove(sw); |
3383 | |
3384 | if (sw->rpm) { |
3385 | pm_runtime_get_sync(dev: &sw->dev); |
3386 | pm_runtime_disable(dev: &sw->dev); |
3387 | } |
3388 | |
3389 | /* port 0 is the switch itself and never has a remote */ |
3390 | tb_switch_for_each_port(sw, port) { |
3391 | if (tb_port_has_remote(port)) { |
3392 | tb_switch_remove(sw: port->remote->sw); |
3393 | port->remote = NULL; |
3394 | } else if (port->xdomain) { |
3395 | tb_xdomain_remove(xd: port->xdomain); |
3396 | port->xdomain = NULL; |
3397 | } |
3398 | |
3399 | /* Remove any downstream retimers */ |
3400 | tb_retimer_remove_all(port); |
3401 | } |
3402 | |
3403 | if (!sw->is_unplugged) |
3404 | tb_plug_events_active(sw, active: false); |
3405 | |
3406 | tb_switch_nvm_remove(sw); |
3407 | usb4_switch_remove_ports(sw); |
3408 | |
3409 | if (tb_route(sw)) |
3410 | dev_info(&sw->dev, "device disconnected\n" ); |
3411 | device_unregister(dev: &sw->dev); |
3412 | } |
3413 | |
3414 | /** |
3415 | * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches |
3416 | * @sw: Router to mark unplugged |
3417 | */ |
3418 | void tb_sw_set_unplugged(struct tb_switch *sw) |
3419 | { |
3420 | struct tb_port *port; |
3421 | |
3422 | if (sw == sw->tb->root_switch) { |
3423 | tb_sw_WARN(sw, "cannot unplug root switch\n" ); |
3424 | return; |
3425 | } |
3426 | if (sw->is_unplugged) { |
3427 | tb_sw_WARN(sw, "is_unplugged already set\n" ); |
3428 | return; |
3429 | } |
3430 | sw->is_unplugged = true; |
3431 | tb_switch_for_each_port(sw, port) { |
3432 | if (tb_port_has_remote(port)) |
3433 | tb_sw_set_unplugged(sw: port->remote->sw); |
3434 | else if (port->xdomain) |
3435 | port->xdomain->is_unplugged = true; |
3436 | } |
3437 | } |
3438 | |
3439 | static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) |
3440 | { |
3441 | if (flags) |
3442 | tb_sw_dbg(sw, "enabling wakeup: %#x\n" , flags); |
3443 | else |
3444 | tb_sw_dbg(sw, "disabling wakeup\n" ); |
3445 | |
3446 | if (tb_switch_is_usb4(sw)) |
3447 | return usb4_switch_set_wake(sw, flags); |
3448 | return tb_lc_set_wake(sw, flags); |
3449 | } |
3450 | |
3451 | static void tb_switch_check_wakes(struct tb_switch *sw) |
3452 | { |
3453 | if (device_may_wakeup(dev: &sw->dev)) { |
3454 | if (tb_switch_is_usb4(sw)) |
3455 | usb4_switch_check_wakes(sw); |
3456 | } |
3457 | } |
3458 | |
3459 | /** |
3460 | * tb_switch_resume() - Resume a switch after sleep |
3461 | * @sw: Switch to resume |
3462 | * @runtime: Is this resume from runtime suspend or system sleep |
3463 | * |
3464 | * Resumes and re-enumerates router (and all its children), if still plugged |
3465 | * after suspend. Don't enumerate device router whose UID was changed during |
3466 | * suspend. If this is resume from system sleep, notifies PM core about the |
3467 | * wakes occurred during suspend. Disables all wakes, except USB4 wake of |
3468 | * upstream port for USB4 routers that shall be always enabled. |
3469 | */ |
3470 | int tb_switch_resume(struct tb_switch *sw, bool runtime) |
3471 | { |
3472 | struct tb_port *port; |
3473 | int err; |
3474 | |
3475 | tb_sw_dbg(sw, "resuming switch\n" ); |
3476 | |
3477 | /* |
3478 | * Check for UID of the connected switches except for root |
3479 | * switch which we assume cannot be removed. |
3480 | */ |
3481 | if (tb_route(sw)) { |
3482 | u64 uid; |
3483 | |
3484 | /* |
3485 | * Check first that we can still read the switch config |
3486 | * space. It may be that there is now another domain |
3487 | * connected. |
3488 | */ |
3489 | err = tb_cfg_get_upstream_port(ctl: sw->tb->ctl, route: tb_route(sw)); |
3490 | if (err < 0) { |
3491 | tb_sw_info(sw, "switch not present anymore\n" ); |
3492 | return err; |
3493 | } |
3494 | |
3495 | /* We don't have any way to confirm this was the same device */ |
3496 | if (!sw->uid) |
3497 | return -ENODEV; |
3498 | |
3499 | if (tb_switch_is_usb4(sw)) |
3500 | err = usb4_switch_read_uid(sw, uid: &uid); |
3501 | else |
3502 | err = tb_drom_read_uid_only(sw, uid: &uid); |
3503 | if (err) { |
3504 | tb_sw_warn(sw, "uid read failed\n" ); |
3505 | return err; |
3506 | } |
3507 | if (sw->uid != uid) { |
3508 | tb_sw_info(sw, |
3509 | "changed while suspended (uid %#llx -> %#llx)\n" , |
3510 | sw->uid, uid); |
3511 | return -ENODEV; |
3512 | } |
3513 | } |
3514 | |
3515 | err = tb_switch_configure(sw); |
3516 | if (err) |
3517 | return err; |
3518 | |
3519 | if (!runtime) |
3520 | tb_switch_check_wakes(sw); |
3521 | |
3522 | /* Disable wakes */ |
3523 | tb_switch_set_wake(sw, flags: 0); |
3524 | |
3525 | err = tb_switch_tmu_init(sw); |
3526 | if (err) |
3527 | return err; |
3528 | |
3529 | /* check for surviving downstream switches */ |
3530 | tb_switch_for_each_port(sw, port) { |
3531 | if (!tb_port_is_null(port)) |
3532 | continue; |
3533 | |
3534 | if (!tb_port_resume(port)) |
3535 | continue; |
3536 | |
3537 | if (tb_wait_for_port(port, wait_if_unplugged: true) <= 0) { |
3538 | tb_port_warn(port, |
3539 | "lost during suspend, disconnecting\n" ); |
3540 | if (tb_port_has_remote(port)) |
3541 | tb_sw_set_unplugged(sw: port->remote->sw); |
3542 | else if (port->xdomain) |
3543 | port->xdomain->is_unplugged = true; |
3544 | } else { |
3545 | /* |
3546 | * Always unlock the port so the downstream |
3547 | * switch/domain is accessible. |
3548 | */ |
3549 | if (tb_port_unlock(port)) |
3550 | tb_port_warn(port, "failed to unlock port\n" ); |
3551 | if (port->remote && |
3552 | tb_switch_resume(sw: port->remote->sw, runtime)) { |
3553 | tb_port_warn(port, |
3554 | "lost during suspend, disconnecting\n" ); |
3555 | tb_sw_set_unplugged(sw: port->remote->sw); |
3556 | } |
3557 | } |
3558 | } |
3559 | return 0; |
3560 | } |
3561 | |
3562 | /** |
3563 | * tb_switch_suspend() - Put a switch to sleep |
3564 | * @sw: Switch to suspend |
3565 | * @runtime: Is this runtime suspend or system sleep |
3566 | * |
3567 | * Suspends router and all its children. Enables wakes according to |
3568 | * value of @runtime and then sets sleep bit for the router. If @sw is |
3569 | * host router the domain is ready to go to sleep once this function |
3570 | * returns. |
3571 | */ |
3572 | void tb_switch_suspend(struct tb_switch *sw, bool runtime) |
3573 | { |
3574 | unsigned int flags = 0; |
3575 | struct tb_port *port; |
3576 | int err; |
3577 | |
3578 | tb_sw_dbg(sw, "suspending switch\n" ); |
3579 | |
3580 | /* |
3581 | * Actually only needed for Titan Ridge but for simplicity can be |
3582 | * done for USB4 device too as CLx is re-enabled at resume. |
3583 | */ |
3584 | tb_switch_clx_disable(sw); |
3585 | |
3586 | err = tb_plug_events_active(sw, active: false); |
3587 | if (err) |
3588 | return; |
3589 | |
3590 | tb_switch_for_each_port(sw, port) { |
3591 | if (tb_port_has_remote(port)) |
3592 | tb_switch_suspend(sw: port->remote->sw, runtime); |
3593 | } |
3594 | |
3595 | if (runtime) { |
3596 | /* Trigger wake when something is plugged in/out */ |
3597 | flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT; |
3598 | flags |= TB_WAKE_ON_USB4; |
3599 | flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP; |
3600 | } else if (device_may_wakeup(dev: &sw->dev)) { |
3601 | flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; |
3602 | } |
3603 | |
3604 | tb_switch_set_wake(sw, flags); |
3605 | |
3606 | if (tb_switch_is_usb4(sw)) |
3607 | usb4_switch_set_sleep(sw); |
3608 | else |
3609 | tb_lc_set_sleep(sw); |
3610 | } |
3611 | |
3612 | /** |
3613 | * tb_switch_query_dp_resource() - Query availability of DP resource |
3614 | * @sw: Switch whose DP resource is queried |
3615 | * @in: DP IN port |
3616 | * |
3617 | * Queries availability of DP resource for DP tunneling using switch |
3618 | * specific means. Returns %true if resource is available. |
3619 | */ |
3620 | bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) |
3621 | { |
3622 | if (tb_switch_is_usb4(sw)) |
3623 | return usb4_switch_query_dp_resource(sw, in); |
3624 | return tb_lc_dp_sink_query(sw, in); |
3625 | } |
3626 | |
3627 | /** |
3628 | * tb_switch_alloc_dp_resource() - Allocate available DP resource |
3629 | * @sw: Switch whose DP resource is allocated |
3630 | * @in: DP IN port |
3631 | * |
3632 | * Allocates DP resource for DP tunneling. The resource must be |
3633 | * available for this to succeed (see tb_switch_query_dp_resource()). |
3634 | * Returns %0 in success and negative errno otherwise. |
3635 | */ |
3636 | int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) |
3637 | { |
3638 | int ret; |
3639 | |
3640 | if (tb_switch_is_usb4(sw)) |
3641 | ret = usb4_switch_alloc_dp_resource(sw, in); |
3642 | else |
3643 | ret = tb_lc_dp_sink_alloc(sw, in); |
3644 | |
3645 | if (ret) |
3646 | tb_sw_warn(sw, "failed to allocate DP resource for port %d\n" , |
3647 | in->port); |
3648 | else |
3649 | tb_sw_dbg(sw, "allocated DP resource for port %d\n" , in->port); |
3650 | |
3651 | return ret; |
3652 | } |
3653 | |
3654 | /** |
3655 | * tb_switch_dealloc_dp_resource() - De-allocate DP resource |
3656 | * @sw: Switch whose DP resource is de-allocated |
3657 | * @in: DP IN port |
3658 | * |
3659 | * De-allocates DP resource that was previously allocated for DP |
3660 | * tunneling. |
3661 | */ |
3662 | void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) |
3663 | { |
3664 | int ret; |
3665 | |
3666 | if (tb_switch_is_usb4(sw)) |
3667 | ret = usb4_switch_dealloc_dp_resource(sw, in); |
3668 | else |
3669 | ret = tb_lc_dp_sink_dealloc(sw, in); |
3670 | |
3671 | if (ret) |
3672 | tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n" , |
3673 | in->port); |
3674 | else |
3675 | tb_sw_dbg(sw, "released DP resource for port %d\n" , in->port); |
3676 | } |
3677 | |
3678 | struct tb_sw_lookup { |
3679 | struct tb *tb; |
3680 | u8 link; |
3681 | u8 depth; |
3682 | const uuid_t *uuid; |
3683 | u64 route; |
3684 | }; |
3685 | |
3686 | static int tb_switch_match(struct device *dev, const void *data) |
3687 | { |
3688 | struct tb_switch *sw = tb_to_switch(dev); |
3689 | const struct tb_sw_lookup *lookup = data; |
3690 | |
3691 | if (!sw) |
3692 | return 0; |
3693 | if (sw->tb != lookup->tb) |
3694 | return 0; |
3695 | |
3696 | if (lookup->uuid) |
3697 | return !memcmp(p: sw->uuid, q: lookup->uuid, size: sizeof(*lookup->uuid)); |
3698 | |
3699 | if (lookup->route) { |
3700 | return sw->config.route_lo == lower_32_bits(lookup->route) && |
3701 | sw->config.route_hi == upper_32_bits(lookup->route); |
3702 | } |
3703 | |
3704 | /* Root switch is matched only by depth */ |
3705 | if (!lookup->depth) |
3706 | return !sw->depth; |
3707 | |
3708 | return sw->link == lookup->link && sw->depth == lookup->depth; |
3709 | } |
3710 | |
3711 | /** |
3712 | * tb_switch_find_by_link_depth() - Find switch by link and depth |
3713 | * @tb: Domain the switch belongs |
3714 | * @link: Link number the switch is connected |
3715 | * @depth: Depth of the switch in link |
3716 | * |
3717 | * Returned switch has reference count increased so the caller needs to |
3718 | * call tb_switch_put() when done with the switch. |
3719 | */ |
3720 | struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) |
3721 | { |
3722 | struct tb_sw_lookup lookup; |
3723 | struct device *dev; |
3724 | |
3725 | memset(&lookup, 0, sizeof(lookup)); |
3726 | lookup.tb = tb; |
3727 | lookup.link = link; |
3728 | lookup.depth = depth; |
3729 | |
3730 | dev = bus_find_device(bus: &tb_bus_type, NULL, data: &lookup, match: tb_switch_match); |
3731 | if (dev) |
3732 | return tb_to_switch(dev); |
3733 | |
3734 | return NULL; |
3735 | } |
3736 | |
3737 | /** |
3738 | * tb_switch_find_by_uuid() - Find switch by UUID |
3739 | * @tb: Domain the switch belongs |
3740 | * @uuid: UUID to look for |
3741 | * |
3742 | * Returned switch has reference count increased so the caller needs to |
3743 | * call tb_switch_put() when done with the switch. |
3744 | */ |
3745 | struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) |
3746 | { |
3747 | struct tb_sw_lookup lookup; |
3748 | struct device *dev; |
3749 | |
3750 | memset(&lookup, 0, sizeof(lookup)); |
3751 | lookup.tb = tb; |
3752 | lookup.uuid = uuid; |
3753 | |
3754 | dev = bus_find_device(bus: &tb_bus_type, NULL, data: &lookup, match: tb_switch_match); |
3755 | if (dev) |
3756 | return tb_to_switch(dev); |
3757 | |
3758 | return NULL; |
3759 | } |
3760 | |
3761 | /** |
3762 | * tb_switch_find_by_route() - Find switch by route string |
3763 | * @tb: Domain the switch belongs |
3764 | * @route: Route string to look for |
3765 | * |
3766 | * Returned switch has reference count increased so the caller needs to |
3767 | * call tb_switch_put() when done with the switch. |
3768 | */ |
3769 | struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) |
3770 | { |
3771 | struct tb_sw_lookup lookup; |
3772 | struct device *dev; |
3773 | |
3774 | if (!route) |
3775 | return tb_switch_get(sw: tb->root_switch); |
3776 | |
3777 | memset(&lookup, 0, sizeof(lookup)); |
3778 | lookup.tb = tb; |
3779 | lookup.route = route; |
3780 | |
3781 | dev = bus_find_device(bus: &tb_bus_type, NULL, data: &lookup, match: tb_switch_match); |
3782 | if (dev) |
3783 | return tb_to_switch(dev); |
3784 | |
3785 | return NULL; |
3786 | } |
3787 | |
3788 | /** |
3789 | * tb_switch_find_port() - return the first port of @type on @sw or NULL |
3790 | * @sw: Switch to find the port from |
3791 | * @type: Port type to look for |
3792 | */ |
3793 | struct tb_port *tb_switch_find_port(struct tb_switch *sw, |
3794 | enum tb_port_type type) |
3795 | { |
3796 | struct tb_port *port; |
3797 | |
3798 | tb_switch_for_each_port(sw, port) { |
3799 | if (port->config.type == type) |
3800 | return port; |
3801 | } |
3802 | |
3803 | return NULL; |
3804 | } |
3805 | |
3806 | /* |
3807 | * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3 |
3808 | * device. For now used only for Titan Ridge. |
3809 | */ |
3810 | static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge, |
3811 | unsigned int pcie_offset, u32 value) |
3812 | { |
3813 | u32 offset, command, val; |
3814 | int ret; |
3815 | |
3816 | if (sw->generation != 3) |
3817 | return -EOPNOTSUPP; |
3818 | |
3819 | offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA; |
3820 | ret = tb_sw_write(sw, buffer: &value, space: TB_CFG_SWITCH, offset, length: 1); |
3821 | if (ret) |
3822 | return ret; |
3823 | |
3824 | command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK; |
3825 | command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT); |
3826 | command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK; |
3827 | command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL |
3828 | << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT; |
3829 | command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK; |
3830 | |
3831 | offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD; |
3832 | |
3833 | ret = tb_sw_write(sw, buffer: &command, space: TB_CFG_SWITCH, offset, length: 1); |
3834 | if (ret) |
3835 | return ret; |
3836 | |
3837 | ret = tb_switch_wait_for_bit(sw, offset, |
3838 | TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, value: 0, timeout_msec: 100); |
3839 | if (ret) |
3840 | return ret; |
3841 | |
3842 | ret = tb_sw_read(sw, buffer: &val, space: TB_CFG_SWITCH, offset, length: 1); |
3843 | if (ret) |
3844 | return ret; |
3845 | |
3846 | if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK) |
3847 | return -ETIMEDOUT; |
3848 | |
3849 | return 0; |
3850 | } |
3851 | |
3852 | /** |
3853 | * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state |
3854 | * @sw: Router to enable PCIe L1 |
3855 | * |
3856 | * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable |
3857 | * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel |
3858 | * was configured. Due to Intel platforms limitation, shall be called only |
3859 | * for first hop switch. |
3860 | */ |
3861 | int tb_switch_pcie_l1_enable(struct tb_switch *sw) |
3862 | { |
3863 | struct tb_switch *parent = tb_switch_parent(sw); |
3864 | int ret; |
3865 | |
3866 | if (!tb_route(sw)) |
3867 | return 0; |
3868 | |
3869 | if (!tb_switch_is_titan_ridge(sw)) |
3870 | return 0; |
3871 | |
3872 | /* Enable PCIe L1 enable only for first hop router (depth = 1) */ |
3873 | if (tb_route(sw: parent)) |
3874 | return 0; |
3875 | |
3876 | /* Write to downstream PCIe bridge #5 aka Dn4 */ |
3877 | ret = tb_switch_pcie_bridge_write(sw, bridge: 5, pcie_offset: 0x143, value: 0x0c7806b1); |
3878 | if (ret) |
3879 | return ret; |
3880 | |
3881 | /* Write to Upstream PCIe bridge #0 aka Up0 */ |
3882 | return tb_switch_pcie_bridge_write(sw, bridge: 0, pcie_offset: 0x143, value: 0x0c5806b1); |
3883 | } |
3884 | |
3885 | /** |
3886 | * tb_switch_xhci_connect() - Connect internal xHCI |
3887 | * @sw: Router whose xHCI to connect |
3888 | * |
3889 | * Can be called to any router. For Alpine Ridge and Titan Ridge |
3890 | * performs special flows that bring the xHCI functional for any device |
3891 | * connected to the type-C port. Call only after PCIe tunnel has been |
3892 | * established. The function only does the connect if not done already |
3893 | * so can be called several times for the same router. |
3894 | */ |
3895 | int tb_switch_xhci_connect(struct tb_switch *sw) |
3896 | { |
3897 | struct tb_port *port1, *port3; |
3898 | int ret; |
3899 | |
3900 | if (sw->generation != 3) |
3901 | return 0; |
3902 | |
3903 | port1 = &sw->ports[1]; |
3904 | port3 = &sw->ports[3]; |
3905 | |
3906 | if (tb_switch_is_alpine_ridge(sw)) { |
3907 | bool usb_port1, usb_port3, xhci_port1, xhci_port3; |
3908 | |
3909 | usb_port1 = tb_lc_is_usb_plugged(port: port1); |
3910 | usb_port3 = tb_lc_is_usb_plugged(port: port3); |
3911 | xhci_port1 = tb_lc_is_xhci_connected(port: port1); |
3912 | xhci_port3 = tb_lc_is_xhci_connected(port: port3); |
3913 | |
3914 | /* Figure out correct USB port to connect */ |
3915 | if (usb_port1 && !xhci_port1) { |
3916 | ret = tb_lc_xhci_connect(port: port1); |
3917 | if (ret) |
3918 | return ret; |
3919 | } |
3920 | if (usb_port3 && !xhci_port3) |
3921 | return tb_lc_xhci_connect(port: port3); |
3922 | } else if (tb_switch_is_titan_ridge(sw)) { |
3923 | ret = tb_lc_xhci_connect(port: port1); |
3924 | if (ret) |
3925 | return ret; |
3926 | return tb_lc_xhci_connect(port: port3); |
3927 | } |
3928 | |
3929 | return 0; |
3930 | } |
3931 | |
3932 | /** |
3933 | * tb_switch_xhci_disconnect() - Disconnect internal xHCI |
3934 | * @sw: Router whose xHCI to disconnect |
3935 | * |
3936 | * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both |
3937 | * ports. |
3938 | */ |
3939 | void tb_switch_xhci_disconnect(struct tb_switch *sw) |
3940 | { |
3941 | if (sw->generation == 3) { |
3942 | struct tb_port *port1 = &sw->ports[1]; |
3943 | struct tb_port *port3 = &sw->ports[3]; |
3944 | |
3945 | tb_lc_xhci_disconnect(port: port1); |
3946 | tb_port_dbg(port1, "disconnected xHCI\n" ); |
3947 | tb_lc_xhci_disconnect(port: port3); |
3948 | tb_port_dbg(port3, "disconnected xHCI\n" ); |
3949 | } |
3950 | } |
3951 | |