1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Thunderbolt XDomain discovery protocol support |
4 | * |
5 | * Copyright (C) 2017, Intel Corporation |
6 | * Authors: Michael Jamet <michael.jamet@intel.com> |
7 | * Mika Westerberg <mika.westerberg@linux.intel.com> |
8 | */ |
9 | |
10 | #include <linux/device.h> |
11 | #include <linux/delay.h> |
12 | #include <linux/kmod.h> |
13 | #include <linux/module.h> |
14 | #include <linux/pm_runtime.h> |
15 | #include <linux/prandom.h> |
16 | #include <linux/string_helpers.h> |
17 | #include <linux/utsname.h> |
18 | #include <linux/uuid.h> |
19 | #include <linux/workqueue.h> |
20 | |
21 | #include "tb.h" |
22 | |
23 | #define XDOMAIN_SHORT_TIMEOUT 100 /* ms */ |
24 | #define XDOMAIN_DEFAULT_TIMEOUT 1000 /* ms */ |
25 | #define XDOMAIN_BONDING_TIMEOUT 10000 /* ms */ |
26 | #define XDOMAIN_RETRIES 10 |
27 | #define XDOMAIN_DEFAULT_MAX_HOPID 15 |
28 | |
29 | enum { |
30 | XDOMAIN_STATE_INIT, |
31 | XDOMAIN_STATE_UUID, |
32 | XDOMAIN_STATE_LINK_STATUS, |
33 | XDOMAIN_STATE_LINK_STATE_CHANGE, |
34 | XDOMAIN_STATE_LINK_STATUS2, |
35 | XDOMAIN_STATE_BONDING_UUID_LOW, |
36 | XDOMAIN_STATE_BONDING_UUID_HIGH, |
37 | XDOMAIN_STATE_PROPERTIES, |
38 | XDOMAIN_STATE_ENUMERATED, |
39 | XDOMAIN_STATE_ERROR, |
40 | }; |
41 | |
42 | static const char * const state_names[] = { |
43 | [XDOMAIN_STATE_INIT] = "INIT" , |
44 | [XDOMAIN_STATE_UUID] = "UUID" , |
45 | [XDOMAIN_STATE_LINK_STATUS] = "LINK_STATUS" , |
46 | [XDOMAIN_STATE_LINK_STATE_CHANGE] = "LINK_STATE_CHANGE" , |
47 | [XDOMAIN_STATE_LINK_STATUS2] = "LINK_STATUS2" , |
48 | [XDOMAIN_STATE_BONDING_UUID_LOW] = "BONDING_UUID_LOW" , |
49 | [XDOMAIN_STATE_BONDING_UUID_HIGH] = "BONDING_UUID_HIGH" , |
50 | [XDOMAIN_STATE_PROPERTIES] = "PROPERTIES" , |
51 | [XDOMAIN_STATE_ENUMERATED] = "ENUMERATED" , |
52 | [XDOMAIN_STATE_ERROR] = "ERROR" , |
53 | }; |
54 | |
55 | struct xdomain_request_work { |
56 | struct work_struct work; |
57 | struct tb_xdp_header *pkg; |
58 | struct tb *tb; |
59 | }; |
60 | |
61 | static bool tb_xdomain_enabled = true; |
62 | module_param_named(xdomain, tb_xdomain_enabled, bool, 0444); |
63 | MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)" ); |
64 | |
65 | /* |
66 | * Serializes access to the properties and protocol handlers below. If |
67 | * you need to take both this lock and the struct tb_xdomain lock, take |
68 | * this one first. |
69 | */ |
70 | static DEFINE_MUTEX(xdomain_lock); |
71 | |
72 | /* Properties exposed to the remote domains */ |
73 | static struct tb_property_dir *xdomain_property_dir; |
74 | static u32 xdomain_property_block_gen; |
75 | |
76 | /* Additional protocol handlers */ |
77 | static LIST_HEAD(protocol_handlers); |
78 | |
79 | /* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */ |
80 | static const uuid_t tb_xdp_uuid = |
81 | UUID_INIT(0xb638d70e, 0x42ff, 0x40bb, |
82 | 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07); |
83 | |
84 | bool tb_is_xdomain_enabled(void) |
85 | { |
86 | return tb_xdomain_enabled && tb_acpi_is_xdomain_allowed(); |
87 | } |
88 | |
89 | static bool tb_xdomain_match(const struct tb_cfg_request *req, |
90 | const struct ctl_pkg *pkg) |
91 | { |
92 | switch (pkg->frame.eof) { |
93 | case TB_CFG_PKG_ERROR: |
94 | return true; |
95 | |
96 | case TB_CFG_PKG_XDOMAIN_RESP: { |
97 | const struct tb_xdp_header *res_hdr = pkg->buffer; |
98 | const struct tb_xdp_header *req_hdr = req->request; |
99 | |
100 | if (pkg->frame.size < req->response_size / 4) |
101 | return false; |
102 | |
103 | /* Make sure route matches */ |
104 | if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) != |
105 | req_hdr->xd_hdr.route_hi) |
106 | return false; |
107 | if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo) |
108 | return false; |
109 | |
110 | /* Check that the XDomain protocol matches */ |
111 | if (!uuid_equal(u1: &res_hdr->uuid, u2: &req_hdr->uuid)) |
112 | return false; |
113 | |
114 | return true; |
115 | } |
116 | |
117 | default: |
118 | return false; |
119 | } |
120 | } |
121 | |
122 | static bool tb_xdomain_copy(struct tb_cfg_request *req, |
123 | const struct ctl_pkg *pkg) |
124 | { |
125 | memcpy(req->response, pkg->buffer, req->response_size); |
126 | req->result.err = 0; |
127 | return true; |
128 | } |
129 | |
130 | static void response_ready(void *data) |
131 | { |
132 | tb_cfg_request_put(req: data); |
133 | } |
134 | |
135 | static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response, |
136 | size_t size, enum tb_cfg_pkg_type type) |
137 | { |
138 | struct tb_cfg_request *req; |
139 | |
140 | req = tb_cfg_request_alloc(); |
141 | if (!req) |
142 | return -ENOMEM; |
143 | |
144 | req->match = tb_xdomain_match; |
145 | req->copy = tb_xdomain_copy; |
146 | req->request = response; |
147 | req->request_size = size; |
148 | req->request_type = type; |
149 | |
150 | return tb_cfg_request(ctl, req, callback: response_ready, callback_data: req); |
151 | } |
152 | |
153 | /** |
154 | * tb_xdomain_response() - Send a XDomain response message |
155 | * @xd: XDomain to send the message |
156 | * @response: Response to send |
157 | * @size: Size of the response |
158 | * @type: PDF type of the response |
159 | * |
160 | * This can be used to send a XDomain response message to the other |
161 | * domain. No response for the message is expected. |
162 | * |
163 | * Return: %0 in case of success and negative errno in case of failure |
164 | */ |
165 | int tb_xdomain_response(struct tb_xdomain *xd, const void *response, |
166 | size_t size, enum tb_cfg_pkg_type type) |
167 | { |
168 | return __tb_xdomain_response(ctl: xd->tb->ctl, response, size, type); |
169 | } |
170 | EXPORT_SYMBOL_GPL(tb_xdomain_response); |
171 | |
172 | static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request, |
173 | size_t request_size, enum tb_cfg_pkg_type request_type, void *response, |
174 | size_t response_size, enum tb_cfg_pkg_type response_type, |
175 | unsigned int timeout_msec) |
176 | { |
177 | struct tb_cfg_request *req; |
178 | struct tb_cfg_result res; |
179 | |
180 | req = tb_cfg_request_alloc(); |
181 | if (!req) |
182 | return -ENOMEM; |
183 | |
184 | req->match = tb_xdomain_match; |
185 | req->copy = tb_xdomain_copy; |
186 | req->request = request; |
187 | req->request_size = request_size; |
188 | req->request_type = request_type; |
189 | req->response = response; |
190 | req->response_size = response_size; |
191 | req->response_type = response_type; |
192 | |
193 | res = tb_cfg_request_sync(ctl, req, timeout_msec); |
194 | |
195 | tb_cfg_request_put(req); |
196 | |
197 | return res.err == 1 ? -EIO : res.err; |
198 | } |
199 | |
200 | /** |
201 | * tb_xdomain_request() - Send a XDomain request |
202 | * @xd: XDomain to send the request |
203 | * @request: Request to send |
204 | * @request_size: Size of the request in bytes |
205 | * @request_type: PDF type of the request |
206 | * @response: Response is copied here |
207 | * @response_size: Expected size of the response in bytes |
208 | * @response_type: Expected PDF type of the response |
209 | * @timeout_msec: Timeout in milliseconds to wait for the response |
210 | * |
211 | * This function can be used to send XDomain control channel messages to |
212 | * the other domain. The function waits until the response is received |
213 | * or when timeout triggers. Whichever comes first. |
214 | * |
215 | * Return: %0 in case of success and negative errno in case of failure |
216 | */ |
217 | int tb_xdomain_request(struct tb_xdomain *xd, const void *request, |
218 | size_t request_size, enum tb_cfg_pkg_type request_type, |
219 | void *response, size_t response_size, |
220 | enum tb_cfg_pkg_type response_type, unsigned int timeout_msec) |
221 | { |
222 | return __tb_xdomain_request(ctl: xd->tb->ctl, request, request_size, |
223 | request_type, response, response_size, |
224 | response_type, timeout_msec); |
225 | } |
226 | EXPORT_SYMBOL_GPL(tb_xdomain_request); |
227 | |
228 | static inline void (struct tb_xdp_header *hdr, u64 route, |
229 | u8 sequence, enum tb_xdp_type type, size_t size) |
230 | { |
231 | u32 length_sn; |
232 | |
233 | length_sn = (size - sizeof(hdr->xd_hdr)) / 4; |
234 | length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK; |
235 | |
236 | hdr->xd_hdr.route_hi = upper_32_bits(route); |
237 | hdr->xd_hdr.route_lo = lower_32_bits(route); |
238 | hdr->xd_hdr.length_sn = length_sn; |
239 | hdr->type = type; |
240 | memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid)); |
241 | } |
242 | |
243 | static int tb_xdp_handle_error(const struct tb_xdp_error_response *res) |
244 | { |
245 | if (res->hdr.type != ERROR_RESPONSE) |
246 | return 0; |
247 | |
248 | switch (res->error) { |
249 | case ERROR_UNKNOWN_PACKET: |
250 | case ERROR_UNKNOWN_DOMAIN: |
251 | return -EIO; |
252 | case ERROR_NOT_SUPPORTED: |
253 | return -ENOTSUPP; |
254 | case ERROR_NOT_READY: |
255 | return -EAGAIN; |
256 | default: |
257 | break; |
258 | } |
259 | |
260 | return 0; |
261 | } |
262 | |
263 | static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry, |
264 | uuid_t *uuid, u64 *remote_route) |
265 | { |
266 | struct tb_xdp_uuid_response res; |
267 | struct tb_xdp_uuid req; |
268 | int ret; |
269 | |
270 | memset(&req, 0, sizeof(req)); |
271 | tb_xdp_fill_header(hdr: &req.hdr, route, sequence: retry % 4, type: UUID_REQUEST, |
272 | size: sizeof(req)); |
273 | |
274 | memset(&res, 0, sizeof(res)); |
275 | ret = __tb_xdomain_request(ctl, request: &req, request_size: sizeof(req), |
276 | request_type: TB_CFG_PKG_XDOMAIN_REQ, response: &res, response_size: sizeof(res), |
277 | response_type: TB_CFG_PKG_XDOMAIN_RESP, |
278 | XDOMAIN_DEFAULT_TIMEOUT); |
279 | if (ret) |
280 | return ret; |
281 | |
282 | ret = tb_xdp_handle_error(res: &res.err); |
283 | if (ret) |
284 | return ret; |
285 | |
286 | uuid_copy(dst: uuid, src: &res.src_uuid); |
287 | *remote_route = (u64)res.src_route_hi << 32 | res.src_route_lo; |
288 | |
289 | return 0; |
290 | } |
291 | |
292 | static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence, |
293 | const uuid_t *uuid) |
294 | { |
295 | struct tb_xdp_uuid_response res; |
296 | |
297 | memset(&res, 0, sizeof(res)); |
298 | tb_xdp_fill_header(hdr: &res.hdr, route, sequence, type: UUID_RESPONSE, |
299 | size: sizeof(res)); |
300 | |
301 | uuid_copy(dst: &res.src_uuid, src: uuid); |
302 | res.src_route_hi = upper_32_bits(route); |
303 | res.src_route_lo = lower_32_bits(route); |
304 | |
305 | return __tb_xdomain_response(ctl, response: &res, size: sizeof(res), |
306 | type: TB_CFG_PKG_XDOMAIN_RESP); |
307 | } |
308 | |
309 | static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence, |
310 | enum tb_xdp_error error) |
311 | { |
312 | struct tb_xdp_error_response res; |
313 | |
314 | memset(&res, 0, sizeof(res)); |
315 | tb_xdp_fill_header(hdr: &res.hdr, route, sequence, type: ERROR_RESPONSE, |
316 | size: sizeof(res)); |
317 | res.error = error; |
318 | |
319 | return __tb_xdomain_response(ctl, response: &res, size: sizeof(res), |
320 | type: TB_CFG_PKG_XDOMAIN_RESP); |
321 | } |
322 | |
323 | static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route, |
324 | const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry, |
325 | u32 **block, u32 *generation) |
326 | { |
327 | struct tb_xdp_properties_response *res; |
328 | struct tb_xdp_properties req; |
329 | u16 data_len, len; |
330 | size_t total_size; |
331 | u32 *data = NULL; |
332 | int ret; |
333 | |
334 | total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4; |
335 | res = kzalloc(size: total_size, GFP_KERNEL); |
336 | if (!res) |
337 | return -ENOMEM; |
338 | |
339 | memset(&req, 0, sizeof(req)); |
340 | tb_xdp_fill_header(hdr: &req.hdr, route, sequence: retry % 4, type: PROPERTIES_REQUEST, |
341 | size: sizeof(req)); |
342 | memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid)); |
343 | memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid)); |
344 | |
345 | data_len = 0; |
346 | |
347 | do { |
348 | ret = __tb_xdomain_request(ctl, request: &req, request_size: sizeof(req), |
349 | request_type: TB_CFG_PKG_XDOMAIN_REQ, response: res, |
350 | response_size: total_size, response_type: TB_CFG_PKG_XDOMAIN_RESP, |
351 | XDOMAIN_DEFAULT_TIMEOUT); |
352 | if (ret) |
353 | goto err; |
354 | |
355 | ret = tb_xdp_handle_error(res: &res->err); |
356 | if (ret) |
357 | goto err; |
358 | |
359 | /* |
360 | * Package length includes the whole payload without the |
361 | * XDomain header. Validate first that the package is at |
362 | * least size of the response structure. |
363 | */ |
364 | len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK; |
365 | if (len < sizeof(*res) / 4) { |
366 | ret = -EINVAL; |
367 | goto err; |
368 | } |
369 | |
370 | len += sizeof(res->hdr.xd_hdr) / 4; |
371 | len -= sizeof(*res) / 4; |
372 | |
373 | if (res->offset != req.offset) { |
374 | ret = -EINVAL; |
375 | goto err; |
376 | } |
377 | |
378 | /* |
379 | * First time allocate block that has enough space for |
380 | * the whole properties block. |
381 | */ |
382 | if (!data) { |
383 | data_len = res->data_length; |
384 | if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) { |
385 | ret = -E2BIG; |
386 | goto err; |
387 | } |
388 | |
389 | data = kcalloc(n: data_len, size: sizeof(u32), GFP_KERNEL); |
390 | if (!data) { |
391 | ret = -ENOMEM; |
392 | goto err; |
393 | } |
394 | } |
395 | |
396 | memcpy(data + req.offset, res->data, len * 4); |
397 | req.offset += len; |
398 | } while (!data_len || req.offset < data_len); |
399 | |
400 | *block = data; |
401 | *generation = res->generation; |
402 | |
403 | kfree(objp: res); |
404 | |
405 | return data_len; |
406 | |
407 | err: |
408 | kfree(objp: data); |
409 | kfree(objp: res); |
410 | |
411 | return ret; |
412 | } |
413 | |
414 | static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl, |
415 | struct tb_xdomain *xd, u8 sequence, const struct tb_xdp_properties *req) |
416 | { |
417 | struct tb_xdp_properties_response *res; |
418 | size_t total_size; |
419 | u16 len; |
420 | int ret; |
421 | |
422 | /* |
423 | * Currently we expect all requests to be directed to us. The |
424 | * protocol supports forwarding, though which we might add |
425 | * support later on. |
426 | */ |
427 | if (!uuid_equal(u1: xd->local_uuid, u2: &req->dst_uuid)) { |
428 | tb_xdp_error_response(ctl, route: xd->route, sequence, |
429 | error: ERROR_UNKNOWN_DOMAIN); |
430 | return 0; |
431 | } |
432 | |
433 | mutex_lock(&xd->lock); |
434 | |
435 | if (req->offset >= xd->local_property_block_len) { |
436 | mutex_unlock(lock: &xd->lock); |
437 | return -EINVAL; |
438 | } |
439 | |
440 | len = xd->local_property_block_len - req->offset; |
441 | len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH); |
442 | total_size = sizeof(*res) + len * 4; |
443 | |
444 | res = kzalloc(size: total_size, GFP_KERNEL); |
445 | if (!res) { |
446 | mutex_unlock(lock: &xd->lock); |
447 | return -ENOMEM; |
448 | } |
449 | |
450 | tb_xdp_fill_header(hdr: &res->hdr, route: xd->route, sequence, type: PROPERTIES_RESPONSE, |
451 | size: total_size); |
452 | res->generation = xd->local_property_block_gen; |
453 | res->data_length = xd->local_property_block_len; |
454 | res->offset = req->offset; |
455 | uuid_copy(dst: &res->src_uuid, src: xd->local_uuid); |
456 | uuid_copy(dst: &res->dst_uuid, src: &req->src_uuid); |
457 | memcpy(res->data, &xd->local_property_block[req->offset], len * 4); |
458 | |
459 | mutex_unlock(lock: &xd->lock); |
460 | |
461 | ret = __tb_xdomain_response(ctl, response: res, size: total_size, |
462 | type: TB_CFG_PKG_XDOMAIN_RESP); |
463 | |
464 | kfree(objp: res); |
465 | return ret; |
466 | } |
467 | |
468 | static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route, |
469 | int retry, const uuid_t *uuid) |
470 | { |
471 | struct tb_xdp_properties_changed_response res; |
472 | struct tb_xdp_properties_changed req; |
473 | int ret; |
474 | |
475 | memset(&req, 0, sizeof(req)); |
476 | tb_xdp_fill_header(hdr: &req.hdr, route, sequence: retry % 4, |
477 | type: PROPERTIES_CHANGED_REQUEST, size: sizeof(req)); |
478 | uuid_copy(dst: &req.src_uuid, src: uuid); |
479 | |
480 | memset(&res, 0, sizeof(res)); |
481 | ret = __tb_xdomain_request(ctl, request: &req, request_size: sizeof(req), |
482 | request_type: TB_CFG_PKG_XDOMAIN_REQ, response: &res, response_size: sizeof(res), |
483 | response_type: TB_CFG_PKG_XDOMAIN_RESP, |
484 | XDOMAIN_DEFAULT_TIMEOUT); |
485 | if (ret) |
486 | return ret; |
487 | |
488 | return tb_xdp_handle_error(res: &res.err); |
489 | } |
490 | |
491 | static int |
492 | tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence) |
493 | { |
494 | struct tb_xdp_properties_changed_response res; |
495 | |
496 | memset(&res, 0, sizeof(res)); |
497 | tb_xdp_fill_header(hdr: &res.hdr, route, sequence, |
498 | type: PROPERTIES_CHANGED_RESPONSE, size: sizeof(res)); |
499 | return __tb_xdomain_response(ctl, response: &res, size: sizeof(res), |
500 | type: TB_CFG_PKG_XDOMAIN_RESP); |
501 | } |
502 | |
503 | static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route, |
504 | u8 sequence, u8 *slw, u8 *tlw, |
505 | u8 *sls, u8 *tls) |
506 | { |
507 | struct tb_xdp_link_state_status_response res; |
508 | struct tb_xdp_link_state_status req; |
509 | int ret; |
510 | |
511 | memset(&req, 0, sizeof(req)); |
512 | tb_xdp_fill_header(hdr: &req.hdr, route, sequence, type: LINK_STATE_STATUS_REQUEST, |
513 | size: sizeof(req)); |
514 | |
515 | memset(&res, 0, sizeof(res)); |
516 | ret = __tb_xdomain_request(ctl, request: &req, request_size: sizeof(req), request_type: TB_CFG_PKG_XDOMAIN_REQ, |
517 | response: &res, response_size: sizeof(res), response_type: TB_CFG_PKG_XDOMAIN_RESP, |
518 | XDOMAIN_DEFAULT_TIMEOUT); |
519 | if (ret) |
520 | return ret; |
521 | |
522 | ret = tb_xdp_handle_error(res: &res.err); |
523 | if (ret) |
524 | return ret; |
525 | |
526 | if (res.status != 0) |
527 | return -EREMOTEIO; |
528 | |
529 | *slw = res.slw; |
530 | *tlw = res.tlw; |
531 | *sls = res.sls; |
532 | *tls = res.tls; |
533 | |
534 | return 0; |
535 | } |
536 | |
537 | static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl, |
538 | struct tb_xdomain *xd, u8 sequence) |
539 | { |
540 | struct tb_xdp_link_state_status_response res; |
541 | struct tb_port *port = tb_xdomain_downstream_port(xd); |
542 | u32 val[2]; |
543 | int ret; |
544 | |
545 | memset(&res, 0, sizeof(res)); |
546 | tb_xdp_fill_header(hdr: &res.hdr, route: xd->route, sequence, |
547 | type: LINK_STATE_STATUS_RESPONSE, size: sizeof(res)); |
548 | |
549 | ret = tb_port_read(port, buffer: val, space: TB_CFG_PORT, |
550 | offset: port->cap_phy + LANE_ADP_CS_0, ARRAY_SIZE(val)); |
551 | if (ret) |
552 | return ret; |
553 | |
554 | res.slw = (val[0] & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> |
555 | LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; |
556 | res.sls = (val[0] & LANE_ADP_CS_0_SUPPORTED_SPEED_MASK) >> |
557 | LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT; |
558 | res.tls = val[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK; |
559 | res.tlw = (val[1] & LANE_ADP_CS_1_TARGET_WIDTH_MASK) >> |
560 | LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; |
561 | |
562 | return __tb_xdomain_response(ctl, response: &res, size: sizeof(res), |
563 | type: TB_CFG_PKG_XDOMAIN_RESP); |
564 | } |
565 | |
566 | static int tb_xdp_link_state_change_request(struct tb_ctl *ctl, u64 route, |
567 | u8 sequence, u8 tlw, u8 tls) |
568 | { |
569 | struct tb_xdp_link_state_change_response res; |
570 | struct tb_xdp_link_state_change req; |
571 | int ret; |
572 | |
573 | memset(&req, 0, sizeof(req)); |
574 | tb_xdp_fill_header(hdr: &req.hdr, route, sequence, type: LINK_STATE_CHANGE_REQUEST, |
575 | size: sizeof(req)); |
576 | req.tlw = tlw; |
577 | req.tls = tls; |
578 | |
579 | memset(&res, 0, sizeof(res)); |
580 | ret = __tb_xdomain_request(ctl, request: &req, request_size: sizeof(req), request_type: TB_CFG_PKG_XDOMAIN_REQ, |
581 | response: &res, response_size: sizeof(res), response_type: TB_CFG_PKG_XDOMAIN_RESP, |
582 | XDOMAIN_DEFAULT_TIMEOUT); |
583 | if (ret) |
584 | return ret; |
585 | |
586 | ret = tb_xdp_handle_error(res: &res.err); |
587 | if (ret) |
588 | return ret; |
589 | |
590 | return res.status != 0 ? -EREMOTEIO : 0; |
591 | } |
592 | |
593 | static int tb_xdp_link_state_change_response(struct tb_ctl *ctl, u64 route, |
594 | u8 sequence, u32 status) |
595 | { |
596 | struct tb_xdp_link_state_change_response res; |
597 | |
598 | memset(&res, 0, sizeof(res)); |
599 | tb_xdp_fill_header(hdr: &res.hdr, route, sequence, type: LINK_STATE_CHANGE_RESPONSE, |
600 | size: sizeof(res)); |
601 | |
602 | res.status = status; |
603 | |
604 | return __tb_xdomain_response(ctl, response: &res, size: sizeof(res), |
605 | type: TB_CFG_PKG_XDOMAIN_RESP); |
606 | } |
607 | |
608 | /** |
609 | * tb_register_protocol_handler() - Register protocol handler |
610 | * @handler: Handler to register |
611 | * |
612 | * This allows XDomain service drivers to hook into incoming XDomain |
613 | * messages. After this function is called the service driver needs to |
614 | * be able to handle calls to callback whenever a package with the |
615 | * registered protocol is received. |
616 | */ |
617 | int tb_register_protocol_handler(struct tb_protocol_handler *handler) |
618 | { |
619 | if (!handler->uuid || !handler->callback) |
620 | return -EINVAL; |
621 | if (uuid_equal(u1: handler->uuid, u2: &tb_xdp_uuid)) |
622 | return -EINVAL; |
623 | |
624 | mutex_lock(&xdomain_lock); |
625 | list_add_tail(new: &handler->list, head: &protocol_handlers); |
626 | mutex_unlock(lock: &xdomain_lock); |
627 | |
628 | return 0; |
629 | } |
630 | EXPORT_SYMBOL_GPL(tb_register_protocol_handler); |
631 | |
632 | /** |
633 | * tb_unregister_protocol_handler() - Unregister protocol handler |
634 | * @handler: Handler to unregister |
635 | * |
636 | * Removes the previously registered protocol handler. |
637 | */ |
638 | void tb_unregister_protocol_handler(struct tb_protocol_handler *handler) |
639 | { |
640 | mutex_lock(&xdomain_lock); |
641 | list_del_init(entry: &handler->list); |
642 | mutex_unlock(lock: &xdomain_lock); |
643 | } |
644 | EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler); |
645 | |
646 | static void update_property_block(struct tb_xdomain *xd) |
647 | { |
648 | mutex_lock(&xdomain_lock); |
649 | mutex_lock(&xd->lock); |
650 | /* |
651 | * If the local property block is not up-to-date, rebuild it now |
652 | * based on the global property template. |
653 | */ |
654 | if (!xd->local_property_block || |
655 | xd->local_property_block_gen < xdomain_property_block_gen) { |
656 | struct tb_property_dir *dir; |
657 | int ret, block_len; |
658 | u32 *block; |
659 | |
660 | dir = tb_property_copy_dir(dir: xdomain_property_dir); |
661 | if (!dir) { |
662 | dev_warn(&xd->dev, "failed to copy properties\n" ); |
663 | goto out_unlock; |
664 | } |
665 | |
666 | /* Fill in non-static properties now */ |
667 | tb_property_add_text(parent: dir, key: "deviceid" , text: utsname()->nodename); |
668 | tb_property_add_immediate(parent: dir, key: "maxhopid" , value: xd->local_max_hopid); |
669 | |
670 | ret = tb_property_format_dir(dir, NULL, block_len: 0); |
671 | if (ret < 0) { |
672 | dev_warn(&xd->dev, "local property block creation failed\n" ); |
673 | tb_property_free_dir(dir); |
674 | goto out_unlock; |
675 | } |
676 | |
677 | block_len = ret; |
678 | block = kcalloc(n: block_len, size: sizeof(*block), GFP_KERNEL); |
679 | if (!block) { |
680 | tb_property_free_dir(dir); |
681 | goto out_unlock; |
682 | } |
683 | |
684 | ret = tb_property_format_dir(dir, block, block_len); |
685 | if (ret) { |
686 | dev_warn(&xd->dev, "property block generation failed\n" ); |
687 | tb_property_free_dir(dir); |
688 | kfree(objp: block); |
689 | goto out_unlock; |
690 | } |
691 | |
692 | tb_property_free_dir(dir); |
693 | /* Release the previous block */ |
694 | kfree(objp: xd->local_property_block); |
695 | /* Assign new one */ |
696 | xd->local_property_block = block; |
697 | xd->local_property_block_len = block_len; |
698 | xd->local_property_block_gen = xdomain_property_block_gen; |
699 | } |
700 | |
701 | out_unlock: |
702 | mutex_unlock(lock: &xd->lock); |
703 | mutex_unlock(lock: &xdomain_lock); |
704 | } |
705 | |
706 | static void start_handshake(struct tb_xdomain *xd) |
707 | { |
708 | xd->state = XDOMAIN_STATE_INIT; |
709 | queue_delayed_work(wq: xd->tb->wq, dwork: &xd->state_work, |
710 | delay: msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); |
711 | } |
712 | |
713 | /* Can be called from state_work */ |
714 | static void __stop_handshake(struct tb_xdomain *xd) |
715 | { |
716 | cancel_delayed_work_sync(dwork: &xd->properties_changed_work); |
717 | xd->properties_changed_retries = 0; |
718 | xd->state_retries = 0; |
719 | } |
720 | |
721 | static void stop_handshake(struct tb_xdomain *xd) |
722 | { |
723 | cancel_delayed_work_sync(dwork: &xd->state_work); |
724 | __stop_handshake(xd); |
725 | } |
726 | |
727 | static void tb_xdp_handle_request(struct work_struct *work) |
728 | { |
729 | struct xdomain_request_work *xw = container_of(work, typeof(*xw), work); |
730 | const struct tb_xdp_header *pkg = xw->pkg; |
731 | const struct tb_xdomain_header *xhdr = &pkg->xd_hdr; |
732 | struct tb *tb = xw->tb; |
733 | struct tb_ctl *ctl = tb->ctl; |
734 | struct tb_xdomain *xd; |
735 | const uuid_t *uuid; |
736 | int ret = 0; |
737 | u32 sequence; |
738 | u64 route; |
739 | |
740 | route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63); |
741 | sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK; |
742 | sequence >>= TB_XDOMAIN_SN_SHIFT; |
743 | |
744 | mutex_lock(&tb->lock); |
745 | if (tb->root_switch) |
746 | uuid = tb->root_switch->uuid; |
747 | else |
748 | uuid = NULL; |
749 | mutex_unlock(lock: &tb->lock); |
750 | |
751 | if (!uuid) { |
752 | tb_xdp_error_response(ctl, route, sequence, error: ERROR_NOT_READY); |
753 | goto out; |
754 | } |
755 | |
756 | xd = tb_xdomain_find_by_route_locked(tb, route); |
757 | if (xd) |
758 | update_property_block(xd); |
759 | |
760 | switch (pkg->type) { |
761 | case PROPERTIES_REQUEST: |
762 | tb_dbg(tb, "%llx: received XDomain properties request\n" , route); |
763 | if (xd) { |
764 | ret = tb_xdp_properties_response(tb, ctl, xd, sequence, |
765 | req: (const struct tb_xdp_properties *)pkg); |
766 | } |
767 | break; |
768 | |
769 | case PROPERTIES_CHANGED_REQUEST: |
770 | tb_dbg(tb, "%llx: received XDomain properties changed request\n" , |
771 | route); |
772 | |
773 | ret = tb_xdp_properties_changed_response(ctl, route, sequence); |
774 | |
775 | /* |
776 | * Since the properties have been changed, let's update |
777 | * the xdomain related to this connection as well in |
778 | * case there is a change in services it offers. |
779 | */ |
780 | if (xd && device_is_registered(dev: &xd->dev)) |
781 | queue_delayed_work(wq: tb->wq, dwork: &xd->state_work, |
782 | delay: msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); |
783 | break; |
784 | |
785 | case UUID_REQUEST_OLD: |
786 | case UUID_REQUEST: |
787 | tb_dbg(tb, "%llx: received XDomain UUID request\n" , route); |
788 | ret = tb_xdp_uuid_response(ctl, route, sequence, uuid); |
789 | /* |
790 | * If we've stopped the discovery with an error such as |
791 | * timing out, we will restart the handshake now that we |
792 | * received UUID request from the remote host. |
793 | */ |
794 | if (!ret && xd && xd->state == XDOMAIN_STATE_ERROR) { |
795 | dev_dbg(&xd->dev, "restarting handshake\n" ); |
796 | start_handshake(xd); |
797 | } |
798 | break; |
799 | |
800 | case LINK_STATE_STATUS_REQUEST: |
801 | tb_dbg(tb, "%llx: received XDomain link state status request\n" , |
802 | route); |
803 | |
804 | if (xd) { |
805 | ret = tb_xdp_link_state_status_response(tb, ctl, xd, |
806 | sequence); |
807 | } else { |
808 | tb_xdp_error_response(ctl, route, sequence, |
809 | error: ERROR_NOT_READY); |
810 | } |
811 | break; |
812 | |
813 | case LINK_STATE_CHANGE_REQUEST: |
814 | tb_dbg(tb, "%llx: received XDomain link state change request\n" , |
815 | route); |
816 | |
817 | if (xd && xd->state == XDOMAIN_STATE_BONDING_UUID_HIGH) { |
818 | const struct tb_xdp_link_state_change *lsc = |
819 | (const struct tb_xdp_link_state_change *)pkg; |
820 | |
821 | ret = tb_xdp_link_state_change_response(ctl, route, |
822 | sequence, status: 0); |
823 | xd->target_link_width = lsc->tlw; |
824 | queue_delayed_work(wq: tb->wq, dwork: &xd->state_work, |
825 | delay: msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); |
826 | } else { |
827 | tb_xdp_error_response(ctl, route, sequence, |
828 | error: ERROR_NOT_READY); |
829 | } |
830 | break; |
831 | |
832 | default: |
833 | tb_dbg(tb, "%llx: unknown XDomain request %#x\n" , route, pkg->type); |
834 | tb_xdp_error_response(ctl, route, sequence, |
835 | error: ERROR_NOT_SUPPORTED); |
836 | break; |
837 | } |
838 | |
839 | tb_xdomain_put(xd); |
840 | |
841 | if (ret) { |
842 | tb_warn(tb, "failed to send XDomain response for %#x\n" , |
843 | pkg->type); |
844 | } |
845 | |
846 | out: |
847 | kfree(objp: xw->pkg); |
848 | kfree(objp: xw); |
849 | |
850 | tb_domain_put(tb); |
851 | } |
852 | |
853 | static bool |
854 | tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr, |
855 | size_t size) |
856 | { |
857 | struct xdomain_request_work *xw; |
858 | |
859 | xw = kmalloc(size: sizeof(*xw), GFP_KERNEL); |
860 | if (!xw) |
861 | return false; |
862 | |
863 | INIT_WORK(&xw->work, tb_xdp_handle_request); |
864 | xw->pkg = kmemdup(p: hdr, size, GFP_KERNEL); |
865 | if (!xw->pkg) { |
866 | kfree(objp: xw); |
867 | return false; |
868 | } |
869 | xw->tb = tb_domain_get(tb); |
870 | |
871 | schedule_work(work: &xw->work); |
872 | return true; |
873 | } |
874 | |
875 | /** |
876 | * tb_register_service_driver() - Register XDomain service driver |
877 | * @drv: Driver to register |
878 | * |
879 | * Registers new service driver from @drv to the bus. |
880 | */ |
881 | int tb_register_service_driver(struct tb_service_driver *drv) |
882 | { |
883 | drv->driver.bus = &tb_bus_type; |
884 | return driver_register(drv: &drv->driver); |
885 | } |
886 | EXPORT_SYMBOL_GPL(tb_register_service_driver); |
887 | |
888 | /** |
889 | * tb_unregister_service_driver() - Unregister XDomain service driver |
890 | * @drv: Driver to unregister |
891 | * |
892 | * Unregisters XDomain service driver from the bus. |
893 | */ |
894 | void tb_unregister_service_driver(struct tb_service_driver *drv) |
895 | { |
896 | driver_unregister(drv: &drv->driver); |
897 | } |
898 | EXPORT_SYMBOL_GPL(tb_unregister_service_driver); |
899 | |
900 | static ssize_t key_show(struct device *dev, struct device_attribute *attr, |
901 | char *buf) |
902 | { |
903 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
904 | |
905 | /* |
906 | * It should be null terminated but anything else is pretty much |
907 | * allowed. |
908 | */ |
909 | return sysfs_emit(buf, fmt: "%*pE\n" , (int)strlen(svc->key), svc->key); |
910 | } |
911 | static DEVICE_ATTR_RO(key); |
912 | |
913 | static int get_modalias(const struct tb_service *svc, char *buf, size_t size) |
914 | { |
915 | return snprintf(buf, size, fmt: "tbsvc:k%sp%08Xv%08Xr%08X" , svc->key, |
916 | svc->prtcid, svc->prtcvers, svc->prtcrevs); |
917 | } |
918 | |
919 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, |
920 | char *buf) |
921 | { |
922 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
923 | |
924 | /* Full buffer size except new line and null termination */ |
925 | get_modalias(svc, buf, PAGE_SIZE - 2); |
926 | return strlen(strcat(buf, "\n" )); |
927 | } |
928 | static DEVICE_ATTR_RO(modalias); |
929 | |
930 | static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr, |
931 | char *buf) |
932 | { |
933 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
934 | |
935 | return sysfs_emit(buf, fmt: "%u\n" , svc->prtcid); |
936 | } |
937 | static DEVICE_ATTR_RO(prtcid); |
938 | |
939 | static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr, |
940 | char *buf) |
941 | { |
942 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
943 | |
944 | return sysfs_emit(buf, fmt: "%u\n" , svc->prtcvers); |
945 | } |
946 | static DEVICE_ATTR_RO(prtcvers); |
947 | |
948 | static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr, |
949 | char *buf) |
950 | { |
951 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
952 | |
953 | return sysfs_emit(buf, fmt: "%u\n" , svc->prtcrevs); |
954 | } |
955 | static DEVICE_ATTR_RO(prtcrevs); |
956 | |
957 | static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr, |
958 | char *buf) |
959 | { |
960 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
961 | |
962 | return sysfs_emit(buf, fmt: "0x%08x\n" , svc->prtcstns); |
963 | } |
964 | static DEVICE_ATTR_RO(prtcstns); |
965 | |
966 | static struct attribute *tb_service_attrs[] = { |
967 | &dev_attr_key.attr, |
968 | &dev_attr_modalias.attr, |
969 | &dev_attr_prtcid.attr, |
970 | &dev_attr_prtcvers.attr, |
971 | &dev_attr_prtcrevs.attr, |
972 | &dev_attr_prtcstns.attr, |
973 | NULL, |
974 | }; |
975 | |
976 | static const struct attribute_group tb_service_attr_group = { |
977 | .attrs = tb_service_attrs, |
978 | }; |
979 | |
980 | static const struct attribute_group *tb_service_attr_groups[] = { |
981 | &tb_service_attr_group, |
982 | NULL, |
983 | }; |
984 | |
985 | static int tb_service_uevent(const struct device *dev, struct kobj_uevent_env *env) |
986 | { |
987 | const struct tb_service *svc = container_of_const(dev, struct tb_service, dev); |
988 | char modalias[64]; |
989 | |
990 | get_modalias(svc, buf: modalias, size: sizeof(modalias)); |
991 | return add_uevent_var(env, format: "MODALIAS=%s" , modalias); |
992 | } |
993 | |
994 | static void tb_service_release(struct device *dev) |
995 | { |
996 | struct tb_service *svc = container_of(dev, struct tb_service, dev); |
997 | struct tb_xdomain *xd = tb_service_parent(svc); |
998 | |
999 | tb_service_debugfs_remove(svc); |
1000 | ida_free(&xd->service_ids, id: svc->id); |
1001 | kfree(objp: svc->key); |
1002 | kfree(objp: svc); |
1003 | } |
1004 | |
1005 | const struct device_type tb_service_type = { |
1006 | .name = "thunderbolt_service" , |
1007 | .groups = tb_service_attr_groups, |
1008 | .uevent = tb_service_uevent, |
1009 | .release = tb_service_release, |
1010 | }; |
1011 | EXPORT_SYMBOL_GPL(tb_service_type); |
1012 | |
1013 | static int remove_missing_service(struct device *dev, void *data) |
1014 | { |
1015 | struct tb_xdomain *xd = data; |
1016 | struct tb_service *svc; |
1017 | |
1018 | svc = tb_to_service(dev); |
1019 | if (!svc) |
1020 | return 0; |
1021 | |
1022 | if (!tb_property_find(dir: xd->remote_properties, key: svc->key, |
1023 | type: TB_PROPERTY_TYPE_DIRECTORY)) |
1024 | device_unregister(dev); |
1025 | |
1026 | return 0; |
1027 | } |
1028 | |
1029 | static int find_service(struct device *dev, void *data) |
1030 | { |
1031 | const struct tb_property *p = data; |
1032 | struct tb_service *svc; |
1033 | |
1034 | svc = tb_to_service(dev); |
1035 | if (!svc) |
1036 | return 0; |
1037 | |
1038 | return !strcmp(svc->key, p->key); |
1039 | } |
1040 | |
1041 | static int populate_service(struct tb_service *svc, |
1042 | struct tb_property *property) |
1043 | { |
1044 | struct tb_property_dir *dir = property->value.dir; |
1045 | struct tb_property *p; |
1046 | |
1047 | /* Fill in standard properties */ |
1048 | p = tb_property_find(dir, key: "prtcid" , type: TB_PROPERTY_TYPE_VALUE); |
1049 | if (p) |
1050 | svc->prtcid = p->value.immediate; |
1051 | p = tb_property_find(dir, key: "prtcvers" , type: TB_PROPERTY_TYPE_VALUE); |
1052 | if (p) |
1053 | svc->prtcvers = p->value.immediate; |
1054 | p = tb_property_find(dir, key: "prtcrevs" , type: TB_PROPERTY_TYPE_VALUE); |
1055 | if (p) |
1056 | svc->prtcrevs = p->value.immediate; |
1057 | p = tb_property_find(dir, key: "prtcstns" , type: TB_PROPERTY_TYPE_VALUE); |
1058 | if (p) |
1059 | svc->prtcstns = p->value.immediate; |
1060 | |
1061 | svc->key = kstrdup(s: property->key, GFP_KERNEL); |
1062 | if (!svc->key) |
1063 | return -ENOMEM; |
1064 | |
1065 | return 0; |
1066 | } |
1067 | |
1068 | static void enumerate_services(struct tb_xdomain *xd) |
1069 | { |
1070 | struct tb_service *svc; |
1071 | struct tb_property *p; |
1072 | struct device *dev; |
1073 | int id; |
1074 | |
1075 | /* |
1076 | * First remove all services that are not available anymore in |
1077 | * the updated property block. |
1078 | */ |
1079 | device_for_each_child_reverse(dev: &xd->dev, data: xd, fn: remove_missing_service); |
1080 | |
1081 | /* Then re-enumerate properties creating new services as we go */ |
1082 | tb_property_for_each(xd->remote_properties, p) { |
1083 | if (p->type != TB_PROPERTY_TYPE_DIRECTORY) |
1084 | continue; |
1085 | |
1086 | /* If the service exists already we are fine */ |
1087 | dev = device_find_child(dev: &xd->dev, data: p, match: find_service); |
1088 | if (dev) { |
1089 | put_device(dev); |
1090 | continue; |
1091 | } |
1092 | |
1093 | svc = kzalloc(size: sizeof(*svc), GFP_KERNEL); |
1094 | if (!svc) |
1095 | break; |
1096 | |
1097 | if (populate_service(svc, property: p)) { |
1098 | kfree(objp: svc); |
1099 | break; |
1100 | } |
1101 | |
1102 | id = ida_alloc(ida: &xd->service_ids, GFP_KERNEL); |
1103 | if (id < 0) { |
1104 | kfree(objp: svc->key); |
1105 | kfree(objp: svc); |
1106 | break; |
1107 | } |
1108 | svc->id = id; |
1109 | svc->dev.bus = &tb_bus_type; |
1110 | svc->dev.type = &tb_service_type; |
1111 | svc->dev.parent = &xd->dev; |
1112 | dev_set_name(dev: &svc->dev, name: "%s.%d" , dev_name(dev: &xd->dev), svc->id); |
1113 | |
1114 | tb_service_debugfs_init(svc); |
1115 | |
1116 | if (device_register(dev: &svc->dev)) { |
1117 | put_device(dev: &svc->dev); |
1118 | break; |
1119 | } |
1120 | } |
1121 | } |
1122 | |
1123 | static int populate_properties(struct tb_xdomain *xd, |
1124 | struct tb_property_dir *dir) |
1125 | { |
1126 | const struct tb_property *p; |
1127 | |
1128 | /* Required properties */ |
1129 | p = tb_property_find(dir, key: "deviceid" , type: TB_PROPERTY_TYPE_VALUE); |
1130 | if (!p) |
1131 | return -EINVAL; |
1132 | xd->device = p->value.immediate; |
1133 | |
1134 | p = tb_property_find(dir, key: "vendorid" , type: TB_PROPERTY_TYPE_VALUE); |
1135 | if (!p) |
1136 | return -EINVAL; |
1137 | xd->vendor = p->value.immediate; |
1138 | |
1139 | p = tb_property_find(dir, key: "maxhopid" , type: TB_PROPERTY_TYPE_VALUE); |
1140 | /* |
1141 | * USB4 inter-domain spec suggests using 15 as HopID if the |
1142 | * other end does not announce it in a property. This is for |
1143 | * TBT3 compatibility. |
1144 | */ |
1145 | xd->remote_max_hopid = p ? p->value.immediate : XDOMAIN_DEFAULT_MAX_HOPID; |
1146 | |
1147 | kfree(objp: xd->device_name); |
1148 | xd->device_name = NULL; |
1149 | kfree(objp: xd->vendor_name); |
1150 | xd->vendor_name = NULL; |
1151 | |
1152 | /* Optional properties */ |
1153 | p = tb_property_find(dir, key: "deviceid" , type: TB_PROPERTY_TYPE_TEXT); |
1154 | if (p) |
1155 | xd->device_name = kstrdup(s: p->value.text, GFP_KERNEL); |
1156 | p = tb_property_find(dir, key: "vendorid" , type: TB_PROPERTY_TYPE_TEXT); |
1157 | if (p) |
1158 | xd->vendor_name = kstrdup(s: p->value.text, GFP_KERNEL); |
1159 | |
1160 | return 0; |
1161 | } |
1162 | |
1163 | static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd) |
1164 | { |
1165 | bool change = false; |
1166 | struct tb_port *port; |
1167 | int ret; |
1168 | |
1169 | port = tb_xdomain_downstream_port(xd); |
1170 | |
1171 | ret = tb_port_get_link_speed(port); |
1172 | if (ret < 0) |
1173 | return ret; |
1174 | |
1175 | if (xd->link_speed != ret) |
1176 | change = true; |
1177 | |
1178 | xd->link_speed = ret; |
1179 | |
1180 | ret = tb_port_get_link_width(port); |
1181 | if (ret < 0) |
1182 | return ret; |
1183 | |
1184 | if (xd->link_width != ret) |
1185 | change = true; |
1186 | |
1187 | xd->link_width = ret; |
1188 | |
1189 | if (change) |
1190 | kobject_uevent(kobj: &xd->dev.kobj, action: KOBJ_CHANGE); |
1191 | |
1192 | return 0; |
1193 | } |
1194 | |
1195 | static int tb_xdomain_get_uuid(struct tb_xdomain *xd) |
1196 | { |
1197 | struct tb *tb = xd->tb; |
1198 | uuid_t uuid; |
1199 | u64 route; |
1200 | int ret; |
1201 | |
1202 | dev_dbg(&xd->dev, "requesting remote UUID\n" ); |
1203 | |
1204 | ret = tb_xdp_uuid_request(ctl: tb->ctl, route: xd->route, retry: xd->state_retries, uuid: &uuid, |
1205 | remote_route: &route); |
1206 | if (ret < 0) { |
1207 | if (xd->state_retries-- > 0) { |
1208 | dev_dbg(&xd->dev, "failed to request UUID, retrying\n" ); |
1209 | return -EAGAIN; |
1210 | } |
1211 | dev_dbg(&xd->dev, "failed to read remote UUID\n" ); |
1212 | return ret; |
1213 | } |
1214 | |
1215 | dev_dbg(&xd->dev, "got remote UUID %pUb\n" , &uuid); |
1216 | |
1217 | if (uuid_equal(u1: &uuid, u2: xd->local_uuid)) { |
1218 | if (route == xd->route) |
1219 | dev_dbg(&xd->dev, "loop back detected\n" ); |
1220 | else |
1221 | dev_dbg(&xd->dev, "intra-domain loop detected\n" ); |
1222 | |
1223 | /* Don't bond lanes automatically for loops */ |
1224 | xd->bonding_possible = false; |
1225 | } |
1226 | |
1227 | /* |
1228 | * If the UUID is different, there is another domain connected |
1229 | * so mark this one unplugged and wait for the connection |
1230 | * manager to replace it. |
1231 | */ |
1232 | if (xd->remote_uuid && !uuid_equal(u1: &uuid, u2: xd->remote_uuid)) { |
1233 | dev_dbg(&xd->dev, "remote UUID is different, unplugging\n" ); |
1234 | xd->is_unplugged = true; |
1235 | return -ENODEV; |
1236 | } |
1237 | |
1238 | /* First time fill in the missing UUID */ |
1239 | if (!xd->remote_uuid) { |
1240 | xd->remote_uuid = kmemdup(p: &uuid, size: sizeof(uuid_t), GFP_KERNEL); |
1241 | if (!xd->remote_uuid) |
1242 | return -ENOMEM; |
1243 | } |
1244 | |
1245 | return 0; |
1246 | } |
1247 | |
1248 | static int tb_xdomain_get_link_status(struct tb_xdomain *xd) |
1249 | { |
1250 | struct tb *tb = xd->tb; |
1251 | u8 slw, tlw, sls, tls; |
1252 | int ret; |
1253 | |
1254 | dev_dbg(&xd->dev, "sending link state status request to %pUb\n" , |
1255 | xd->remote_uuid); |
1256 | |
1257 | ret = tb_xdp_link_state_status_request(ctl: tb->ctl, route: xd->route, |
1258 | sequence: xd->state_retries, slw: &slw, tlw: &tlw, sls: &sls, |
1259 | tls: &tls); |
1260 | if (ret) { |
1261 | if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) { |
1262 | dev_dbg(&xd->dev, |
1263 | "failed to request remote link status, retrying\n" ); |
1264 | return -EAGAIN; |
1265 | } |
1266 | dev_dbg(&xd->dev, "failed to receive remote link status\n" ); |
1267 | return ret; |
1268 | } |
1269 | |
1270 | dev_dbg(&xd->dev, "remote link supports width %#x speed %#x\n" , slw, sls); |
1271 | |
1272 | if (slw < LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL) { |
1273 | dev_dbg(&xd->dev, "remote adapter is single lane only\n" ); |
1274 | return -EOPNOTSUPP; |
1275 | } |
1276 | |
1277 | return 0; |
1278 | } |
1279 | |
1280 | static int tb_xdomain_link_state_change(struct tb_xdomain *xd, |
1281 | unsigned int width) |
1282 | { |
1283 | struct tb_port *port = tb_xdomain_downstream_port(xd); |
1284 | struct tb *tb = xd->tb; |
1285 | u8 tlw, tls; |
1286 | u32 val; |
1287 | int ret; |
1288 | |
1289 | if (width == 2) |
1290 | tlw = LANE_ADP_CS_1_TARGET_WIDTH_DUAL; |
1291 | else if (width == 1) |
1292 | tlw = LANE_ADP_CS_1_TARGET_WIDTH_SINGLE; |
1293 | else |
1294 | return -EINVAL; |
1295 | |
1296 | /* Use the current target speed */ |
1297 | ret = tb_port_read(port, buffer: &val, space: TB_CFG_PORT, offset: port->cap_phy + LANE_ADP_CS_1, length: 1); |
1298 | if (ret) |
1299 | return ret; |
1300 | tls = val & LANE_ADP_CS_1_TARGET_SPEED_MASK; |
1301 | |
1302 | dev_dbg(&xd->dev, "sending link state change request with width %#x speed %#x\n" , |
1303 | tlw, tls); |
1304 | |
1305 | ret = tb_xdp_link_state_change_request(ctl: tb->ctl, route: xd->route, |
1306 | sequence: xd->state_retries, tlw, tls); |
1307 | if (ret) { |
1308 | if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) { |
1309 | dev_dbg(&xd->dev, |
1310 | "failed to change remote link state, retrying\n" ); |
1311 | return -EAGAIN; |
1312 | } |
1313 | dev_err(&xd->dev, "failed request link state change, aborting\n" ); |
1314 | return ret; |
1315 | } |
1316 | |
1317 | dev_dbg(&xd->dev, "received link state change response\n" ); |
1318 | return 0; |
1319 | } |
1320 | |
1321 | static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd) |
1322 | { |
1323 | unsigned int width, width_mask; |
1324 | struct tb_port *port; |
1325 | int ret; |
1326 | |
1327 | if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) { |
1328 | width = TB_LINK_WIDTH_SINGLE; |
1329 | width_mask = width; |
1330 | } else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) { |
1331 | width = TB_LINK_WIDTH_DUAL; |
1332 | width_mask = width | TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX; |
1333 | } else { |
1334 | if (xd->state_retries-- > 0) { |
1335 | dev_dbg(&xd->dev, |
1336 | "link state change request not received yet, retrying\n" ); |
1337 | return -EAGAIN; |
1338 | } |
1339 | dev_dbg(&xd->dev, "timeout waiting for link change request\n" ); |
1340 | return -ETIMEDOUT; |
1341 | } |
1342 | |
1343 | port = tb_xdomain_downstream_port(xd); |
1344 | |
1345 | /* |
1346 | * We can't use tb_xdomain_lane_bonding_enable() here because it |
1347 | * is the other side that initiates lane bonding. So here we |
1348 | * just set the width to both lane adapters and wait for the |
1349 | * link to transition bonded. |
1350 | */ |
1351 | ret = tb_port_set_link_width(port: port->dual_link_port, width); |
1352 | if (ret) { |
1353 | tb_port_warn(port->dual_link_port, |
1354 | "failed to set link width to %d\n" , width); |
1355 | return ret; |
1356 | } |
1357 | |
1358 | ret = tb_port_set_link_width(port, width); |
1359 | if (ret) { |
1360 | tb_port_warn(port, "failed to set link width to %d\n" , width); |
1361 | return ret; |
1362 | } |
1363 | |
1364 | ret = tb_port_wait_for_link_width(port, width: width_mask, |
1365 | XDOMAIN_BONDING_TIMEOUT); |
1366 | if (ret) { |
1367 | dev_warn(&xd->dev, "error waiting for link width to become %d\n" , |
1368 | width_mask); |
1369 | return ret; |
1370 | } |
1371 | |
1372 | port->bonded = width > TB_LINK_WIDTH_SINGLE; |
1373 | port->dual_link_port->bonded = width > TB_LINK_WIDTH_SINGLE; |
1374 | |
1375 | tb_port_update_credits(port); |
1376 | tb_xdomain_update_link_attributes(xd); |
1377 | |
1378 | dev_dbg(&xd->dev, "lane bonding %s\n" , str_enabled_disabled(width == 2)); |
1379 | return 0; |
1380 | } |
1381 | |
1382 | static int tb_xdomain_get_properties(struct tb_xdomain *xd) |
1383 | { |
1384 | struct tb_property_dir *dir; |
1385 | struct tb *tb = xd->tb; |
1386 | bool update = false; |
1387 | u32 *block = NULL; |
1388 | u32 gen = 0; |
1389 | int ret; |
1390 | |
1391 | dev_dbg(&xd->dev, "requesting remote properties\n" ); |
1392 | |
1393 | ret = tb_xdp_properties_request(ctl: tb->ctl, route: xd->route, src_uuid: xd->local_uuid, |
1394 | dst_uuid: xd->remote_uuid, retry: xd->state_retries, |
1395 | block: &block, generation: &gen); |
1396 | if (ret < 0) { |
1397 | if (xd->state_retries-- > 0) { |
1398 | dev_dbg(&xd->dev, |
1399 | "failed to request remote properties, retrying\n" ); |
1400 | return -EAGAIN; |
1401 | } |
1402 | /* Give up now */ |
1403 | dev_err(&xd->dev, "failed read XDomain properties from %pUb\n" , |
1404 | xd->remote_uuid); |
1405 | |
1406 | return ret; |
1407 | } |
1408 | |
1409 | mutex_lock(&xd->lock); |
1410 | |
1411 | /* Only accept newer generation properties */ |
1412 | if (xd->remote_properties && gen <= xd->remote_property_block_gen) { |
1413 | ret = 0; |
1414 | goto err_free_block; |
1415 | } |
1416 | |
1417 | dir = tb_property_parse_dir(block, block_len: ret); |
1418 | if (!dir) { |
1419 | dev_err(&xd->dev, "failed to parse XDomain properties\n" ); |
1420 | ret = -ENOMEM; |
1421 | goto err_free_block; |
1422 | } |
1423 | |
1424 | ret = populate_properties(xd, dir); |
1425 | if (ret) { |
1426 | dev_err(&xd->dev, "missing XDomain properties in response\n" ); |
1427 | goto err_free_dir; |
1428 | } |
1429 | |
1430 | /* Release the existing one */ |
1431 | if (xd->remote_properties) { |
1432 | tb_property_free_dir(dir: xd->remote_properties); |
1433 | update = true; |
1434 | } |
1435 | |
1436 | xd->remote_properties = dir; |
1437 | xd->remote_property_block_gen = gen; |
1438 | |
1439 | tb_xdomain_update_link_attributes(xd); |
1440 | |
1441 | mutex_unlock(lock: &xd->lock); |
1442 | |
1443 | kfree(objp: block); |
1444 | |
1445 | /* |
1446 | * Now the device should be ready enough so we can add it to the |
1447 | * bus and let userspace know about it. If the device is already |
1448 | * registered, we notify the userspace that it has changed. |
1449 | */ |
1450 | if (!update) { |
1451 | /* |
1452 | * Now disable lane 1 if bonding was not enabled. Do |
1453 | * this only if bonding was possible at the beginning |
1454 | * (that is we are the connection manager and there are |
1455 | * two lanes). |
1456 | */ |
1457 | if (xd->bonding_possible) { |
1458 | struct tb_port *port; |
1459 | |
1460 | port = tb_xdomain_downstream_port(xd); |
1461 | if (!port->bonded) |
1462 | tb_port_disable(port: port->dual_link_port); |
1463 | } |
1464 | |
1465 | dev_dbg(&xd->dev, "current link speed %u.0 Gb/s\n" , |
1466 | xd->link_speed); |
1467 | dev_dbg(&xd->dev, "current link width %s\n" , |
1468 | tb_width_name(xd->link_width)); |
1469 | |
1470 | if (device_add(dev: &xd->dev)) { |
1471 | dev_err(&xd->dev, "failed to add XDomain device\n" ); |
1472 | return -ENODEV; |
1473 | } |
1474 | dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n" , |
1475 | xd->vendor, xd->device); |
1476 | if (xd->vendor_name && xd->device_name) |
1477 | dev_info(&xd->dev, "%s %s\n" , xd->vendor_name, |
1478 | xd->device_name); |
1479 | |
1480 | tb_xdomain_debugfs_init(xd); |
1481 | } else { |
1482 | kobject_uevent(kobj: &xd->dev.kobj, action: KOBJ_CHANGE); |
1483 | } |
1484 | |
1485 | enumerate_services(xd); |
1486 | return 0; |
1487 | |
1488 | err_free_dir: |
1489 | tb_property_free_dir(dir); |
1490 | err_free_block: |
1491 | kfree(objp: block); |
1492 | mutex_unlock(lock: &xd->lock); |
1493 | |
1494 | return ret; |
1495 | } |
1496 | |
1497 | static void tb_xdomain_queue_uuid(struct tb_xdomain *xd) |
1498 | { |
1499 | xd->state = XDOMAIN_STATE_UUID; |
1500 | xd->state_retries = XDOMAIN_RETRIES; |
1501 | queue_delayed_work(wq: xd->tb->wq, dwork: &xd->state_work, |
1502 | delay: msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); |
1503 | } |
1504 | |
1505 | static void tb_xdomain_queue_link_status(struct tb_xdomain *xd) |
1506 | { |
1507 | xd->state = XDOMAIN_STATE_LINK_STATUS; |
1508 | xd->state_retries = XDOMAIN_RETRIES; |
1509 | queue_delayed_work(wq: xd->tb->wq, dwork: &xd->state_work, |
1510 | delay: msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); |
1511 | } |
1512 | |
1513 | static void tb_xdomain_queue_link_status2(struct tb_xdomain *xd) |
1514 | { |
1515 | xd->state = XDOMAIN_STATE_LINK_STATUS2; |
1516 | xd->state_retries = XDOMAIN_RETRIES; |
1517 | queue_delayed_work(wq: xd->tb->wq, dwork: &xd->state_work, |
1518 | delay: msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); |
1519 | } |
1520 | |
1521 | static void tb_xdomain_queue_bonding(struct tb_xdomain *xd) |
1522 | { |
1523 | if (memcmp(p: xd->local_uuid, q: xd->remote_uuid, UUID_SIZE) > 0) { |
1524 | dev_dbg(&xd->dev, "we have higher UUID, other side bonds the lanes\n" ); |
1525 | xd->state = XDOMAIN_STATE_BONDING_UUID_HIGH; |
1526 | } else { |
1527 | dev_dbg(&xd->dev, "we have lower UUID, bonding lanes\n" ); |
1528 | xd->state = XDOMAIN_STATE_LINK_STATE_CHANGE; |
1529 | } |
1530 | |
1531 | xd->state_retries = XDOMAIN_RETRIES; |
1532 | queue_delayed_work(wq: xd->tb->wq, dwork: &xd->state_work, |
1533 | delay: msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); |
1534 | } |
1535 | |
1536 | static void tb_xdomain_queue_bonding_uuid_low(struct tb_xdomain *xd) |
1537 | { |
1538 | xd->state = XDOMAIN_STATE_BONDING_UUID_LOW; |
1539 | xd->state_retries = XDOMAIN_RETRIES; |
1540 | queue_delayed_work(wq: xd->tb->wq, dwork: &xd->state_work, |
1541 | delay: msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); |
1542 | } |
1543 | |
1544 | static void tb_xdomain_queue_properties(struct tb_xdomain *xd) |
1545 | { |
1546 | xd->state = XDOMAIN_STATE_PROPERTIES; |
1547 | xd->state_retries = XDOMAIN_RETRIES; |
1548 | queue_delayed_work(wq: xd->tb->wq, dwork: &xd->state_work, |
1549 | delay: msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); |
1550 | } |
1551 | |
1552 | static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd) |
1553 | { |
1554 | xd->properties_changed_retries = XDOMAIN_RETRIES; |
1555 | queue_delayed_work(wq: xd->tb->wq, dwork: &xd->properties_changed_work, |
1556 | delay: msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); |
1557 | } |
1558 | |
1559 | static void tb_xdomain_failed(struct tb_xdomain *xd) |
1560 | { |
1561 | xd->state = XDOMAIN_STATE_ERROR; |
1562 | queue_delayed_work(wq: xd->tb->wq, dwork: &xd->state_work, |
1563 | delay: msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); |
1564 | } |
1565 | |
1566 | static void tb_xdomain_state_work(struct work_struct *work) |
1567 | { |
1568 | struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work); |
1569 | int ret, state = xd->state; |
1570 | |
1571 | if (WARN_ON_ONCE(state < XDOMAIN_STATE_INIT || |
1572 | state > XDOMAIN_STATE_ERROR)) |
1573 | return; |
1574 | |
1575 | dev_dbg(&xd->dev, "running state %s\n" , state_names[state]); |
1576 | |
1577 | switch (state) { |
1578 | case XDOMAIN_STATE_INIT: |
1579 | if (xd->needs_uuid) { |
1580 | tb_xdomain_queue_uuid(xd); |
1581 | } else { |
1582 | tb_xdomain_queue_properties_changed(xd); |
1583 | tb_xdomain_queue_properties(xd); |
1584 | } |
1585 | break; |
1586 | |
1587 | case XDOMAIN_STATE_UUID: |
1588 | ret = tb_xdomain_get_uuid(xd); |
1589 | if (ret) { |
1590 | if (ret == -EAGAIN) |
1591 | goto retry_state; |
1592 | tb_xdomain_failed(xd); |
1593 | } else { |
1594 | tb_xdomain_queue_properties_changed(xd); |
1595 | if (xd->bonding_possible) |
1596 | tb_xdomain_queue_link_status(xd); |
1597 | else |
1598 | tb_xdomain_queue_properties(xd); |
1599 | } |
1600 | break; |
1601 | |
1602 | case XDOMAIN_STATE_LINK_STATUS: |
1603 | ret = tb_xdomain_get_link_status(xd); |
1604 | if (ret) { |
1605 | if (ret == -EAGAIN) |
1606 | goto retry_state; |
1607 | |
1608 | /* |
1609 | * If any of the lane bonding states fail we skip |
1610 | * bonding completely and try to continue from |
1611 | * reading properties. |
1612 | */ |
1613 | tb_xdomain_queue_properties(xd); |
1614 | } else { |
1615 | tb_xdomain_queue_bonding(xd); |
1616 | } |
1617 | break; |
1618 | |
1619 | case XDOMAIN_STATE_LINK_STATE_CHANGE: |
1620 | ret = tb_xdomain_link_state_change(xd, width: 2); |
1621 | if (ret) { |
1622 | if (ret == -EAGAIN) |
1623 | goto retry_state; |
1624 | tb_xdomain_queue_properties(xd); |
1625 | } else { |
1626 | tb_xdomain_queue_link_status2(xd); |
1627 | } |
1628 | break; |
1629 | |
1630 | case XDOMAIN_STATE_LINK_STATUS2: |
1631 | ret = tb_xdomain_get_link_status(xd); |
1632 | if (ret) { |
1633 | if (ret == -EAGAIN) |
1634 | goto retry_state; |
1635 | tb_xdomain_queue_properties(xd); |
1636 | } else { |
1637 | tb_xdomain_queue_bonding_uuid_low(xd); |
1638 | } |
1639 | break; |
1640 | |
1641 | case XDOMAIN_STATE_BONDING_UUID_LOW: |
1642 | tb_xdomain_lane_bonding_enable(xd); |
1643 | tb_xdomain_queue_properties(xd); |
1644 | break; |
1645 | |
1646 | case XDOMAIN_STATE_BONDING_UUID_HIGH: |
1647 | if (tb_xdomain_bond_lanes_uuid_high(xd) == -EAGAIN) |
1648 | goto retry_state; |
1649 | tb_xdomain_queue_properties(xd); |
1650 | break; |
1651 | |
1652 | case XDOMAIN_STATE_PROPERTIES: |
1653 | ret = tb_xdomain_get_properties(xd); |
1654 | if (ret) { |
1655 | if (ret == -EAGAIN) |
1656 | goto retry_state; |
1657 | tb_xdomain_failed(xd); |
1658 | } else { |
1659 | xd->state = XDOMAIN_STATE_ENUMERATED; |
1660 | } |
1661 | break; |
1662 | |
1663 | case XDOMAIN_STATE_ENUMERATED: |
1664 | tb_xdomain_queue_properties(xd); |
1665 | break; |
1666 | |
1667 | case XDOMAIN_STATE_ERROR: |
1668 | dev_dbg(&xd->dev, "discovery failed, stopping handshake\n" ); |
1669 | __stop_handshake(xd); |
1670 | break; |
1671 | |
1672 | default: |
1673 | dev_warn(&xd->dev, "unexpected state %d\n" , state); |
1674 | break; |
1675 | } |
1676 | |
1677 | return; |
1678 | |
1679 | retry_state: |
1680 | queue_delayed_work(wq: xd->tb->wq, dwork: &xd->state_work, |
1681 | delay: msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); |
1682 | } |
1683 | |
1684 | static void tb_xdomain_properties_changed(struct work_struct *work) |
1685 | { |
1686 | struct tb_xdomain *xd = container_of(work, typeof(*xd), |
1687 | properties_changed_work.work); |
1688 | int ret; |
1689 | |
1690 | dev_dbg(&xd->dev, "sending properties changed notification\n" ); |
1691 | |
1692 | ret = tb_xdp_properties_changed_request(ctl: xd->tb->ctl, route: xd->route, |
1693 | retry: xd->properties_changed_retries, uuid: xd->local_uuid); |
1694 | if (ret) { |
1695 | if (xd->properties_changed_retries-- > 0) { |
1696 | dev_dbg(&xd->dev, |
1697 | "failed to send properties changed notification, retrying\n" ); |
1698 | queue_delayed_work(wq: xd->tb->wq, |
1699 | dwork: &xd->properties_changed_work, |
1700 | delay: msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); |
1701 | } |
1702 | dev_err(&xd->dev, "failed to send properties changed notification\n" ); |
1703 | return; |
1704 | } |
1705 | |
1706 | xd->properties_changed_retries = XDOMAIN_RETRIES; |
1707 | } |
1708 | |
1709 | static ssize_t device_show(struct device *dev, struct device_attribute *attr, |
1710 | char *buf) |
1711 | { |
1712 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
1713 | |
1714 | return sysfs_emit(buf, fmt: "%#x\n" , xd->device); |
1715 | } |
1716 | static DEVICE_ATTR_RO(device); |
1717 | |
1718 | static ssize_t |
1719 | device_name_show(struct device *dev, struct device_attribute *attr, char *buf) |
1720 | { |
1721 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
1722 | int ret; |
1723 | |
1724 | if (mutex_lock_interruptible(&xd->lock)) |
1725 | return -ERESTARTSYS; |
1726 | ret = sysfs_emit(buf, fmt: "%s\n" , xd->device_name ?: "" ); |
1727 | mutex_unlock(lock: &xd->lock); |
1728 | |
1729 | return ret; |
1730 | } |
1731 | static DEVICE_ATTR_RO(device_name); |
1732 | |
1733 | static ssize_t maxhopid_show(struct device *dev, struct device_attribute *attr, |
1734 | char *buf) |
1735 | { |
1736 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
1737 | |
1738 | return sysfs_emit(buf, fmt: "%d\n" , xd->remote_max_hopid); |
1739 | } |
1740 | static DEVICE_ATTR_RO(maxhopid); |
1741 | |
1742 | static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, |
1743 | char *buf) |
1744 | { |
1745 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
1746 | |
1747 | return sysfs_emit(buf, fmt: "%#x\n" , xd->vendor); |
1748 | } |
1749 | static DEVICE_ATTR_RO(vendor); |
1750 | |
1751 | static ssize_t |
1752 | vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) |
1753 | { |
1754 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
1755 | int ret; |
1756 | |
1757 | if (mutex_lock_interruptible(&xd->lock)) |
1758 | return -ERESTARTSYS; |
1759 | ret = sysfs_emit(buf, fmt: "%s\n" , xd->vendor_name ?: "" ); |
1760 | mutex_unlock(lock: &xd->lock); |
1761 | |
1762 | return ret; |
1763 | } |
1764 | static DEVICE_ATTR_RO(vendor_name); |
1765 | |
1766 | static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, |
1767 | char *buf) |
1768 | { |
1769 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
1770 | |
1771 | return sysfs_emit(buf, fmt: "%pUb\n" , xd->remote_uuid); |
1772 | } |
1773 | static DEVICE_ATTR_RO(unique_id); |
1774 | |
1775 | static ssize_t speed_show(struct device *dev, struct device_attribute *attr, |
1776 | char *buf) |
1777 | { |
1778 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
1779 | |
1780 | return sysfs_emit(buf, fmt: "%u.0 Gb/s\n" , xd->link_speed); |
1781 | } |
1782 | |
1783 | static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); |
1784 | static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); |
1785 | |
1786 | static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr, |
1787 | char *buf) |
1788 | { |
1789 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
1790 | unsigned int width; |
1791 | |
1792 | switch (xd->link_width) { |
1793 | case TB_LINK_WIDTH_SINGLE: |
1794 | case TB_LINK_WIDTH_ASYM_TX: |
1795 | width = 1; |
1796 | break; |
1797 | case TB_LINK_WIDTH_DUAL: |
1798 | width = 2; |
1799 | break; |
1800 | case TB_LINK_WIDTH_ASYM_RX: |
1801 | width = 3; |
1802 | break; |
1803 | default: |
1804 | WARN_ON_ONCE(1); |
1805 | return -EINVAL; |
1806 | } |
1807 | |
1808 | return sysfs_emit(buf, fmt: "%u\n" , width); |
1809 | } |
1810 | static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL); |
1811 | |
1812 | static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr, |
1813 | char *buf) |
1814 | { |
1815 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
1816 | unsigned int width; |
1817 | |
1818 | switch (xd->link_width) { |
1819 | case TB_LINK_WIDTH_SINGLE: |
1820 | case TB_LINK_WIDTH_ASYM_RX: |
1821 | width = 1; |
1822 | break; |
1823 | case TB_LINK_WIDTH_DUAL: |
1824 | width = 2; |
1825 | break; |
1826 | case TB_LINK_WIDTH_ASYM_TX: |
1827 | width = 3; |
1828 | break; |
1829 | default: |
1830 | WARN_ON_ONCE(1); |
1831 | return -EINVAL; |
1832 | } |
1833 | |
1834 | return sysfs_emit(buf, fmt: "%u\n" , width); |
1835 | } |
1836 | static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL); |
1837 | |
1838 | static struct attribute *xdomain_attrs[] = { |
1839 | &dev_attr_device.attr, |
1840 | &dev_attr_device_name.attr, |
1841 | &dev_attr_maxhopid.attr, |
1842 | &dev_attr_rx_lanes.attr, |
1843 | &dev_attr_rx_speed.attr, |
1844 | &dev_attr_tx_lanes.attr, |
1845 | &dev_attr_tx_speed.attr, |
1846 | &dev_attr_unique_id.attr, |
1847 | &dev_attr_vendor.attr, |
1848 | &dev_attr_vendor_name.attr, |
1849 | NULL, |
1850 | }; |
1851 | |
1852 | static const struct attribute_group xdomain_attr_group = { |
1853 | .attrs = xdomain_attrs, |
1854 | }; |
1855 | |
1856 | static const struct attribute_group *xdomain_attr_groups[] = { |
1857 | &xdomain_attr_group, |
1858 | NULL, |
1859 | }; |
1860 | |
1861 | static void tb_xdomain_release(struct device *dev) |
1862 | { |
1863 | struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); |
1864 | |
1865 | put_device(dev: xd->dev.parent); |
1866 | |
1867 | kfree(objp: xd->local_property_block); |
1868 | tb_property_free_dir(dir: xd->remote_properties); |
1869 | ida_destroy(ida: &xd->out_hopids); |
1870 | ida_destroy(ida: &xd->in_hopids); |
1871 | ida_destroy(ida: &xd->service_ids); |
1872 | |
1873 | kfree(objp: xd->local_uuid); |
1874 | kfree(objp: xd->remote_uuid); |
1875 | kfree(objp: xd->device_name); |
1876 | kfree(objp: xd->vendor_name); |
1877 | kfree(objp: xd); |
1878 | } |
1879 | |
1880 | static int __maybe_unused tb_xdomain_suspend(struct device *dev) |
1881 | { |
1882 | stop_handshake(xd: tb_to_xdomain(dev)); |
1883 | return 0; |
1884 | } |
1885 | |
1886 | static int __maybe_unused tb_xdomain_resume(struct device *dev) |
1887 | { |
1888 | start_handshake(xd: tb_to_xdomain(dev)); |
1889 | return 0; |
1890 | } |
1891 | |
1892 | static const struct dev_pm_ops tb_xdomain_pm_ops = { |
1893 | SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume) |
1894 | }; |
1895 | |
1896 | const struct device_type tb_xdomain_type = { |
1897 | .name = "thunderbolt_xdomain" , |
1898 | .release = tb_xdomain_release, |
1899 | .pm = &tb_xdomain_pm_ops, |
1900 | }; |
1901 | EXPORT_SYMBOL_GPL(tb_xdomain_type); |
1902 | |
1903 | static void tb_xdomain_link_init(struct tb_xdomain *xd, struct tb_port *down) |
1904 | { |
1905 | if (!down->dual_link_port) |
1906 | return; |
1907 | |
1908 | /* |
1909 | * Gen 4 links come up already as bonded so only update the port |
1910 | * structures here. |
1911 | */ |
1912 | if (tb_port_get_link_generation(port: down) >= 4) { |
1913 | down->bonded = true; |
1914 | down->dual_link_port->bonded = true; |
1915 | } else { |
1916 | xd->bonding_possible = true; |
1917 | } |
1918 | } |
1919 | |
1920 | static void tb_xdomain_link_exit(struct tb_xdomain *xd) |
1921 | { |
1922 | struct tb_port *down = tb_xdomain_downstream_port(xd); |
1923 | |
1924 | if (!down->dual_link_port) |
1925 | return; |
1926 | |
1927 | if (tb_port_get_link_generation(port: down) >= 4) { |
1928 | down->bonded = false; |
1929 | down->dual_link_port->bonded = false; |
1930 | } else if (xd->link_width > TB_LINK_WIDTH_SINGLE) { |
1931 | /* |
1932 | * Just return port structures back to way they were and |
1933 | * update credits. No need to update userspace because |
1934 | * the XDomain is removed soon anyway. |
1935 | */ |
1936 | tb_port_lane_bonding_disable(port: down); |
1937 | tb_port_update_credits(port: down); |
1938 | } else if (down->dual_link_port) { |
1939 | /* |
1940 | * Re-enable the lane 1 adapter we disabled at the end |
1941 | * of tb_xdomain_get_properties(). |
1942 | */ |
1943 | tb_port_enable(port: down->dual_link_port); |
1944 | } |
1945 | } |
1946 | |
1947 | /** |
1948 | * tb_xdomain_alloc() - Allocate new XDomain object |
1949 | * @tb: Domain where the XDomain belongs |
1950 | * @parent: Parent device (the switch through the connection to the |
1951 | * other domain is reached). |
1952 | * @route: Route string used to reach the other domain |
1953 | * @local_uuid: Our local domain UUID |
1954 | * @remote_uuid: UUID of the other domain (optional) |
1955 | * |
1956 | * Allocates new XDomain structure and returns pointer to that. The |
1957 | * object must be released by calling tb_xdomain_put(). |
1958 | */ |
1959 | struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, |
1960 | u64 route, const uuid_t *local_uuid, |
1961 | const uuid_t *remote_uuid) |
1962 | { |
1963 | struct tb_switch *parent_sw = tb_to_switch(dev: parent); |
1964 | struct tb_xdomain *xd; |
1965 | struct tb_port *down; |
1966 | |
1967 | /* Make sure the downstream domain is accessible */ |
1968 | down = tb_port_at(route, sw: parent_sw); |
1969 | tb_port_unlock(port: down); |
1970 | |
1971 | xd = kzalloc(size: sizeof(*xd), GFP_KERNEL); |
1972 | if (!xd) |
1973 | return NULL; |
1974 | |
1975 | xd->tb = tb; |
1976 | xd->route = route; |
1977 | xd->local_max_hopid = down->config.max_in_hop_id; |
1978 | ida_init(ida: &xd->service_ids); |
1979 | ida_init(ida: &xd->in_hopids); |
1980 | ida_init(ida: &xd->out_hopids); |
1981 | mutex_init(&xd->lock); |
1982 | INIT_DELAYED_WORK(&xd->state_work, tb_xdomain_state_work); |
1983 | INIT_DELAYED_WORK(&xd->properties_changed_work, |
1984 | tb_xdomain_properties_changed); |
1985 | |
1986 | xd->local_uuid = kmemdup(p: local_uuid, size: sizeof(uuid_t), GFP_KERNEL); |
1987 | if (!xd->local_uuid) |
1988 | goto err_free; |
1989 | |
1990 | if (remote_uuid) { |
1991 | xd->remote_uuid = kmemdup(p: remote_uuid, size: sizeof(uuid_t), |
1992 | GFP_KERNEL); |
1993 | if (!xd->remote_uuid) |
1994 | goto err_free_local_uuid; |
1995 | } else { |
1996 | xd->needs_uuid = true; |
1997 | |
1998 | tb_xdomain_link_init(xd, down); |
1999 | } |
2000 | |
2001 | device_initialize(dev: &xd->dev); |
2002 | xd->dev.parent = get_device(dev: parent); |
2003 | xd->dev.bus = &tb_bus_type; |
2004 | xd->dev.type = &tb_xdomain_type; |
2005 | xd->dev.groups = xdomain_attr_groups; |
2006 | dev_set_name(dev: &xd->dev, name: "%u-%llx" , tb->index, route); |
2007 | |
2008 | dev_dbg(&xd->dev, "local UUID %pUb\n" , local_uuid); |
2009 | if (remote_uuid) |
2010 | dev_dbg(&xd->dev, "remote UUID %pUb\n" , remote_uuid); |
2011 | |
2012 | /* |
2013 | * This keeps the DMA powered on as long as we have active |
2014 | * connection to another host. |
2015 | */ |
2016 | pm_runtime_set_active(dev: &xd->dev); |
2017 | pm_runtime_get_noresume(dev: &xd->dev); |
2018 | pm_runtime_enable(dev: &xd->dev); |
2019 | |
2020 | return xd; |
2021 | |
2022 | err_free_local_uuid: |
2023 | kfree(objp: xd->local_uuid); |
2024 | err_free: |
2025 | kfree(objp: xd); |
2026 | |
2027 | return NULL; |
2028 | } |
2029 | |
2030 | /** |
2031 | * tb_xdomain_add() - Add XDomain to the bus |
2032 | * @xd: XDomain to add |
2033 | * |
2034 | * This function starts XDomain discovery protocol handshake and |
2035 | * eventually adds the XDomain to the bus. After calling this function |
2036 | * the caller needs to call tb_xdomain_remove() in order to remove and |
2037 | * release the object regardless whether the handshake succeeded or not. |
2038 | */ |
2039 | void tb_xdomain_add(struct tb_xdomain *xd) |
2040 | { |
2041 | /* Start exchanging properties with the other host */ |
2042 | start_handshake(xd); |
2043 | } |
2044 | |
2045 | static int unregister_service(struct device *dev, void *data) |
2046 | { |
2047 | device_unregister(dev); |
2048 | return 0; |
2049 | } |
2050 | |
2051 | /** |
2052 | * tb_xdomain_remove() - Remove XDomain from the bus |
2053 | * @xd: XDomain to remove |
2054 | * |
2055 | * This will stop all ongoing configuration work and remove the XDomain |
2056 | * along with any services from the bus. When the last reference to @xd |
2057 | * is released the object will be released as well. |
2058 | */ |
2059 | void tb_xdomain_remove(struct tb_xdomain *xd) |
2060 | { |
2061 | tb_xdomain_debugfs_remove(xd); |
2062 | |
2063 | stop_handshake(xd); |
2064 | |
2065 | device_for_each_child_reverse(dev: &xd->dev, data: xd, fn: unregister_service); |
2066 | |
2067 | tb_xdomain_link_exit(xd); |
2068 | |
2069 | /* |
2070 | * Undo runtime PM here explicitly because it is possible that |
2071 | * the XDomain was never added to the bus and thus device_del() |
2072 | * is not called for it (device_del() would handle this otherwise). |
2073 | */ |
2074 | pm_runtime_disable(dev: &xd->dev); |
2075 | pm_runtime_put_noidle(dev: &xd->dev); |
2076 | pm_runtime_set_suspended(dev: &xd->dev); |
2077 | |
2078 | if (!device_is_registered(dev: &xd->dev)) { |
2079 | put_device(dev: &xd->dev); |
2080 | } else { |
2081 | dev_info(&xd->dev, "host disconnected\n" ); |
2082 | device_unregister(dev: &xd->dev); |
2083 | } |
2084 | } |
2085 | |
2086 | /** |
2087 | * tb_xdomain_lane_bonding_enable() - Enable lane bonding on XDomain |
2088 | * @xd: XDomain connection |
2089 | * |
2090 | * Lane bonding is disabled by default for XDomains. This function tries |
2091 | * to enable bonding by first enabling the port and waiting for the CL0 |
2092 | * state. |
2093 | * |
2094 | * Return: %0 in case of success and negative errno in case of error. |
2095 | */ |
2096 | int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd) |
2097 | { |
2098 | unsigned int width_mask; |
2099 | struct tb_port *port; |
2100 | int ret; |
2101 | |
2102 | port = tb_xdomain_downstream_port(xd); |
2103 | if (!port->dual_link_port) |
2104 | return -ENODEV; |
2105 | |
2106 | ret = tb_port_enable(port: port->dual_link_port); |
2107 | if (ret) |
2108 | return ret; |
2109 | |
2110 | ret = tb_wait_for_port(port: port->dual_link_port, wait_if_unplugged: true); |
2111 | if (ret < 0) |
2112 | return ret; |
2113 | if (!ret) |
2114 | return -ENOTCONN; |
2115 | |
2116 | ret = tb_port_lane_bonding_enable(port); |
2117 | if (ret) { |
2118 | tb_port_warn(port, "failed to enable lane bonding\n" ); |
2119 | return ret; |
2120 | } |
2121 | |
2122 | /* Any of the widths are all bonded */ |
2123 | width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX | |
2124 | TB_LINK_WIDTH_ASYM_RX; |
2125 | |
2126 | ret = tb_port_wait_for_link_width(port, width: width_mask, |
2127 | XDOMAIN_BONDING_TIMEOUT); |
2128 | if (ret) { |
2129 | tb_port_warn(port, "failed to enable lane bonding\n" ); |
2130 | return ret; |
2131 | } |
2132 | |
2133 | tb_port_update_credits(port); |
2134 | tb_xdomain_update_link_attributes(xd); |
2135 | |
2136 | dev_dbg(&xd->dev, "lane bonding enabled\n" ); |
2137 | return 0; |
2138 | } |
2139 | EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable); |
2140 | |
2141 | /** |
2142 | * tb_xdomain_lane_bonding_disable() - Disable lane bonding |
2143 | * @xd: XDomain connection |
2144 | * |
2145 | * Lane bonding is disabled by default for XDomains. If bonding has been |
2146 | * enabled, this function can be used to disable it. |
2147 | */ |
2148 | void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd) |
2149 | { |
2150 | struct tb_port *port; |
2151 | |
2152 | port = tb_xdomain_downstream_port(xd); |
2153 | if (port->dual_link_port) { |
2154 | int ret; |
2155 | |
2156 | tb_port_lane_bonding_disable(port); |
2157 | ret = tb_port_wait_for_link_width(port, width: TB_LINK_WIDTH_SINGLE, timeout_msec: 100); |
2158 | if (ret == -ETIMEDOUT) |
2159 | tb_port_warn(port, "timeout disabling lane bonding\n" ); |
2160 | tb_port_disable(port: port->dual_link_port); |
2161 | tb_port_update_credits(port); |
2162 | tb_xdomain_update_link_attributes(xd); |
2163 | |
2164 | dev_dbg(&xd->dev, "lane bonding disabled\n" ); |
2165 | } |
2166 | } |
2167 | EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable); |
2168 | |
2169 | /** |
2170 | * tb_xdomain_alloc_in_hopid() - Allocate input HopID for tunneling |
2171 | * @xd: XDomain connection |
2172 | * @hopid: Preferred HopID or %-1 for next available |
2173 | * |
2174 | * Returns allocated HopID or negative errno. Specifically returns |
2175 | * %-ENOSPC if there are no more available HopIDs. Returned HopID is |
2176 | * guaranteed to be within range supported by the input lane adapter. |
2177 | * Call tb_xdomain_release_in_hopid() to release the allocated HopID. |
2178 | */ |
2179 | int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid) |
2180 | { |
2181 | if (hopid < 0) |
2182 | hopid = TB_PATH_MIN_HOPID; |
2183 | if (hopid < TB_PATH_MIN_HOPID || hopid > xd->local_max_hopid) |
2184 | return -EINVAL; |
2185 | |
2186 | return ida_alloc_range(&xd->in_hopids, min: hopid, max: xd->local_max_hopid, |
2187 | GFP_KERNEL); |
2188 | } |
2189 | EXPORT_SYMBOL_GPL(tb_xdomain_alloc_in_hopid); |
2190 | |
2191 | /** |
2192 | * tb_xdomain_alloc_out_hopid() - Allocate output HopID for tunneling |
2193 | * @xd: XDomain connection |
2194 | * @hopid: Preferred HopID or %-1 for next available |
2195 | * |
2196 | * Returns allocated HopID or negative errno. Specifically returns |
2197 | * %-ENOSPC if there are no more available HopIDs. Returned HopID is |
2198 | * guaranteed to be within range supported by the output lane adapter. |
2199 | * Call tb_xdomain_release_in_hopid() to release the allocated HopID. |
2200 | */ |
2201 | int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid) |
2202 | { |
2203 | if (hopid < 0) |
2204 | hopid = TB_PATH_MIN_HOPID; |
2205 | if (hopid < TB_PATH_MIN_HOPID || hopid > xd->remote_max_hopid) |
2206 | return -EINVAL; |
2207 | |
2208 | return ida_alloc_range(&xd->out_hopids, min: hopid, max: xd->remote_max_hopid, |
2209 | GFP_KERNEL); |
2210 | } |
2211 | EXPORT_SYMBOL_GPL(tb_xdomain_alloc_out_hopid); |
2212 | |
2213 | /** |
2214 | * tb_xdomain_release_in_hopid() - Release input HopID |
2215 | * @xd: XDomain connection |
2216 | * @hopid: HopID to release |
2217 | */ |
2218 | void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid) |
2219 | { |
2220 | ida_free(&xd->in_hopids, id: hopid); |
2221 | } |
2222 | EXPORT_SYMBOL_GPL(tb_xdomain_release_in_hopid); |
2223 | |
2224 | /** |
2225 | * tb_xdomain_release_out_hopid() - Release output HopID |
2226 | * @xd: XDomain connection |
2227 | * @hopid: HopID to release |
2228 | */ |
2229 | void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid) |
2230 | { |
2231 | ida_free(&xd->out_hopids, id: hopid); |
2232 | } |
2233 | EXPORT_SYMBOL_GPL(tb_xdomain_release_out_hopid); |
2234 | |
2235 | /** |
2236 | * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection |
2237 | * @xd: XDomain connection |
2238 | * @transmit_path: HopID we are using to send out packets |
2239 | * @transmit_ring: DMA ring used to send out packets |
2240 | * @receive_path: HopID the other end is using to send packets to us |
2241 | * @receive_ring: DMA ring used to receive packets from @receive_path |
2242 | * |
2243 | * The function enables DMA paths accordingly so that after successful |
2244 | * return the caller can send and receive packets using high-speed DMA |
2245 | * path. If a transmit or receive path is not needed, pass %-1 for those |
2246 | * parameters. |
2247 | * |
2248 | * Return: %0 in case of success and negative errno in case of error |
2249 | */ |
2250 | int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path, |
2251 | int transmit_ring, int receive_path, |
2252 | int receive_ring) |
2253 | { |
2254 | return tb_domain_approve_xdomain_paths(tb: xd->tb, xd, transmit_path, |
2255 | transmit_ring, receive_path, |
2256 | receive_ring); |
2257 | } |
2258 | EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths); |
2259 | |
2260 | /** |
2261 | * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection |
2262 | * @xd: XDomain connection |
2263 | * @transmit_path: HopID we are using to send out packets |
2264 | * @transmit_ring: DMA ring used to send out packets |
2265 | * @receive_path: HopID the other end is using to send packets to us |
2266 | * @receive_ring: DMA ring used to receive packets from @receive_path |
2267 | * |
2268 | * This does the opposite of tb_xdomain_enable_paths(). After call to |
2269 | * this the caller is not expected to use the rings anymore. Passing %-1 |
2270 | * as path/ring parameter means don't care. Normally the callers should |
2271 | * pass the same values here as they do when paths are enabled. |
2272 | * |
2273 | * Return: %0 in case of success and negative errno in case of error |
2274 | */ |
2275 | int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path, |
2276 | int transmit_ring, int receive_path, |
2277 | int receive_ring) |
2278 | { |
2279 | return tb_domain_disconnect_xdomain_paths(tb: xd->tb, xd, transmit_path, |
2280 | transmit_ring, receive_path, |
2281 | receive_ring); |
2282 | } |
2283 | EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths); |
2284 | |
2285 | struct tb_xdomain_lookup { |
2286 | const uuid_t *uuid; |
2287 | u8 link; |
2288 | u8 depth; |
2289 | u64 route; |
2290 | }; |
2291 | |
2292 | static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw, |
2293 | const struct tb_xdomain_lookup *lookup) |
2294 | { |
2295 | struct tb_port *port; |
2296 | |
2297 | tb_switch_for_each_port(sw, port) { |
2298 | struct tb_xdomain *xd; |
2299 | |
2300 | if (port->xdomain) { |
2301 | xd = port->xdomain; |
2302 | |
2303 | if (lookup->uuid) { |
2304 | if (xd->remote_uuid && |
2305 | uuid_equal(u1: xd->remote_uuid, u2: lookup->uuid)) |
2306 | return xd; |
2307 | } else { |
2308 | if (lookup->link && lookup->link == xd->link && |
2309 | lookup->depth == xd->depth) |
2310 | return xd; |
2311 | if (lookup->route && lookup->route == xd->route) |
2312 | return xd; |
2313 | } |
2314 | } else if (tb_port_has_remote(port)) { |
2315 | xd = switch_find_xdomain(sw: port->remote->sw, lookup); |
2316 | if (xd) |
2317 | return xd; |
2318 | } |
2319 | } |
2320 | |
2321 | return NULL; |
2322 | } |
2323 | |
2324 | /** |
2325 | * tb_xdomain_find_by_uuid() - Find an XDomain by UUID |
2326 | * @tb: Domain where the XDomain belongs to |
2327 | * @uuid: UUID to look for |
2328 | * |
2329 | * Finds XDomain by walking through the Thunderbolt topology below @tb. |
2330 | * The returned XDomain will have its reference count increased so the |
2331 | * caller needs to call tb_xdomain_put() when it is done with the |
2332 | * object. |
2333 | * |
2334 | * This will find all XDomains including the ones that are not yet added |
2335 | * to the bus (handshake is still in progress). |
2336 | * |
2337 | * The caller needs to hold @tb->lock. |
2338 | */ |
2339 | struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid) |
2340 | { |
2341 | struct tb_xdomain_lookup lookup; |
2342 | struct tb_xdomain *xd; |
2343 | |
2344 | memset(&lookup, 0, sizeof(lookup)); |
2345 | lookup.uuid = uuid; |
2346 | |
2347 | xd = switch_find_xdomain(sw: tb->root_switch, lookup: &lookup); |
2348 | return tb_xdomain_get(xd); |
2349 | } |
2350 | EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid); |
2351 | |
2352 | /** |
2353 | * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth |
2354 | * @tb: Domain where the XDomain belongs to |
2355 | * @link: Root switch link number |
2356 | * @depth: Depth in the link |
2357 | * |
2358 | * Finds XDomain by walking through the Thunderbolt topology below @tb. |
2359 | * The returned XDomain will have its reference count increased so the |
2360 | * caller needs to call tb_xdomain_put() when it is done with the |
2361 | * object. |
2362 | * |
2363 | * This will find all XDomains including the ones that are not yet added |
2364 | * to the bus (handshake is still in progress). |
2365 | * |
2366 | * The caller needs to hold @tb->lock. |
2367 | */ |
2368 | struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link, |
2369 | u8 depth) |
2370 | { |
2371 | struct tb_xdomain_lookup lookup; |
2372 | struct tb_xdomain *xd; |
2373 | |
2374 | memset(&lookup, 0, sizeof(lookup)); |
2375 | lookup.link = link; |
2376 | lookup.depth = depth; |
2377 | |
2378 | xd = switch_find_xdomain(sw: tb->root_switch, lookup: &lookup); |
2379 | return tb_xdomain_get(xd); |
2380 | } |
2381 | |
2382 | /** |
2383 | * tb_xdomain_find_by_route() - Find an XDomain by route string |
2384 | * @tb: Domain where the XDomain belongs to |
2385 | * @route: XDomain route string |
2386 | * |
2387 | * Finds XDomain by walking through the Thunderbolt topology below @tb. |
2388 | * The returned XDomain will have its reference count increased so the |
2389 | * caller needs to call tb_xdomain_put() when it is done with the |
2390 | * object. |
2391 | * |
2392 | * This will find all XDomains including the ones that are not yet added |
2393 | * to the bus (handshake is still in progress). |
2394 | * |
2395 | * The caller needs to hold @tb->lock. |
2396 | */ |
2397 | struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route) |
2398 | { |
2399 | struct tb_xdomain_lookup lookup; |
2400 | struct tb_xdomain *xd; |
2401 | |
2402 | memset(&lookup, 0, sizeof(lookup)); |
2403 | lookup.route = route; |
2404 | |
2405 | xd = switch_find_xdomain(sw: tb->root_switch, lookup: &lookup); |
2406 | return tb_xdomain_get(xd); |
2407 | } |
2408 | EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route); |
2409 | |
2410 | bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type, |
2411 | const void *buf, size_t size) |
2412 | { |
2413 | const struct tb_protocol_handler *handler, *tmp; |
2414 | const struct tb_xdp_header *hdr = buf; |
2415 | unsigned int length; |
2416 | int ret = 0; |
2417 | |
2418 | /* We expect the packet is at least size of the header */ |
2419 | length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK; |
2420 | if (length != size / 4 - sizeof(hdr->xd_hdr) / 4) |
2421 | return true; |
2422 | if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4) |
2423 | return true; |
2424 | |
2425 | /* |
2426 | * Handle XDomain discovery protocol packets directly here. For |
2427 | * other protocols (based on their UUID) we call registered |
2428 | * handlers in turn. |
2429 | */ |
2430 | if (uuid_equal(u1: &hdr->uuid, u2: &tb_xdp_uuid)) { |
2431 | if (type == TB_CFG_PKG_XDOMAIN_REQ) |
2432 | return tb_xdp_schedule_request(tb, hdr, size); |
2433 | return false; |
2434 | } |
2435 | |
2436 | mutex_lock(&xdomain_lock); |
2437 | list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) { |
2438 | if (!uuid_equal(u1: &hdr->uuid, u2: handler->uuid)) |
2439 | continue; |
2440 | |
2441 | mutex_unlock(lock: &xdomain_lock); |
2442 | ret = handler->callback(buf, size, handler->data); |
2443 | mutex_lock(&xdomain_lock); |
2444 | |
2445 | if (ret) |
2446 | break; |
2447 | } |
2448 | mutex_unlock(lock: &xdomain_lock); |
2449 | |
2450 | return ret > 0; |
2451 | } |
2452 | |
2453 | static int update_xdomain(struct device *dev, void *data) |
2454 | { |
2455 | struct tb_xdomain *xd; |
2456 | |
2457 | xd = tb_to_xdomain(dev); |
2458 | if (xd) { |
2459 | queue_delayed_work(wq: xd->tb->wq, dwork: &xd->properties_changed_work, |
2460 | delay: msecs_to_jiffies(m: 50)); |
2461 | } |
2462 | |
2463 | return 0; |
2464 | } |
2465 | |
2466 | static void update_all_xdomains(void) |
2467 | { |
2468 | bus_for_each_dev(bus: &tb_bus_type, NULL, NULL, fn: update_xdomain); |
2469 | } |
2470 | |
2471 | static bool remove_directory(const char *key, const struct tb_property_dir *dir) |
2472 | { |
2473 | struct tb_property *p; |
2474 | |
2475 | p = tb_property_find(dir: xdomain_property_dir, key, |
2476 | type: TB_PROPERTY_TYPE_DIRECTORY); |
2477 | if (p && p->value.dir == dir) { |
2478 | tb_property_remove(tb_property: p); |
2479 | return true; |
2480 | } |
2481 | return false; |
2482 | } |
2483 | |
2484 | /** |
2485 | * tb_register_property_dir() - Register property directory to the host |
2486 | * @key: Key (name) of the directory to add |
2487 | * @dir: Directory to add |
2488 | * |
2489 | * Service drivers can use this function to add new property directory |
2490 | * to the host available properties. The other connected hosts are |
2491 | * notified so they can re-read properties of this host if they are |
2492 | * interested. |
2493 | * |
2494 | * Return: %0 on success and negative errno on failure |
2495 | */ |
2496 | int tb_register_property_dir(const char *key, struct tb_property_dir *dir) |
2497 | { |
2498 | int ret; |
2499 | |
2500 | if (WARN_ON(!xdomain_property_dir)) |
2501 | return -EAGAIN; |
2502 | |
2503 | if (!key || strlen(key) > 8) |
2504 | return -EINVAL; |
2505 | |
2506 | mutex_lock(&xdomain_lock); |
2507 | if (tb_property_find(dir: xdomain_property_dir, key, |
2508 | type: TB_PROPERTY_TYPE_DIRECTORY)) { |
2509 | ret = -EEXIST; |
2510 | goto err_unlock; |
2511 | } |
2512 | |
2513 | ret = tb_property_add_dir(parent: xdomain_property_dir, key, dir); |
2514 | if (ret) |
2515 | goto err_unlock; |
2516 | |
2517 | xdomain_property_block_gen++; |
2518 | |
2519 | mutex_unlock(lock: &xdomain_lock); |
2520 | update_all_xdomains(); |
2521 | return 0; |
2522 | |
2523 | err_unlock: |
2524 | mutex_unlock(lock: &xdomain_lock); |
2525 | return ret; |
2526 | } |
2527 | EXPORT_SYMBOL_GPL(tb_register_property_dir); |
2528 | |
2529 | /** |
2530 | * tb_unregister_property_dir() - Removes property directory from host |
2531 | * @key: Key (name) of the directory |
2532 | * @dir: Directory to remove |
2533 | * |
2534 | * This will remove the existing directory from this host and notify the |
2535 | * connected hosts about the change. |
2536 | */ |
2537 | void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir) |
2538 | { |
2539 | int ret = 0; |
2540 | |
2541 | mutex_lock(&xdomain_lock); |
2542 | if (remove_directory(key, dir)) |
2543 | xdomain_property_block_gen++; |
2544 | mutex_unlock(lock: &xdomain_lock); |
2545 | |
2546 | if (!ret) |
2547 | update_all_xdomains(); |
2548 | } |
2549 | EXPORT_SYMBOL_GPL(tb_unregister_property_dir); |
2550 | |
2551 | int tb_xdomain_init(void) |
2552 | { |
2553 | xdomain_property_dir = tb_property_create_dir(NULL); |
2554 | if (!xdomain_property_dir) |
2555 | return -ENOMEM; |
2556 | |
2557 | /* |
2558 | * Initialize standard set of properties without any service |
2559 | * directories. Those will be added by service drivers |
2560 | * themselves when they are loaded. |
2561 | * |
2562 | * Rest of the properties are filled dynamically based on these |
2563 | * when the P2P connection is made. |
2564 | */ |
2565 | tb_property_add_immediate(parent: xdomain_property_dir, key: "vendorid" , |
2566 | PCI_VENDOR_ID_INTEL); |
2567 | tb_property_add_text(parent: xdomain_property_dir, key: "vendorid" , text: "Intel Corp." ); |
2568 | tb_property_add_immediate(parent: xdomain_property_dir, key: "deviceid" , value: 0x1); |
2569 | tb_property_add_immediate(parent: xdomain_property_dir, key: "devicerv" , value: 0x80000100); |
2570 | |
2571 | xdomain_property_block_gen = get_random_u32(); |
2572 | return 0; |
2573 | } |
2574 | |
2575 | void tb_xdomain_exit(void) |
2576 | { |
2577 | tb_property_free_dir(dir: xdomain_property_dir); |
2578 | } |
2579 | |