1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Data Object Exchange |
4 | * PCIe r6.0, sec 6.30 DOE |
5 | * |
6 | * Copyright (C) 2021 Huawei |
7 | * Jonathan Cameron <Jonathan.Cameron@huawei.com> |
8 | * |
9 | * Copyright (C) 2022 Intel Corporation |
10 | * Ira Weiny <ira.weiny@intel.com> |
11 | */ |
12 | |
13 | #define dev_fmt(fmt) "DOE: " fmt |
14 | |
15 | #include <linux/bitfield.h> |
16 | #include <linux/delay.h> |
17 | #include <linux/jiffies.h> |
18 | #include <linux/mutex.h> |
19 | #include <linux/pci.h> |
20 | #include <linux/pci-doe.h> |
21 | #include <linux/workqueue.h> |
22 | |
23 | #include "pci.h" |
24 | |
25 | #define PCI_DOE_PROTOCOL_DISCOVERY 0 |
26 | |
27 | /* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */ |
28 | #define PCI_DOE_TIMEOUT HZ |
29 | #define PCI_DOE_POLL_INTERVAL (PCI_DOE_TIMEOUT / 128) |
30 | |
31 | #define PCI_DOE_FLAG_CANCEL 0 |
32 | #define PCI_DOE_FLAG_DEAD 1 |
33 | |
34 | /* Max data object length is 2^18 dwords */ |
35 | #define PCI_DOE_MAX_LENGTH (1 << 18) |
36 | |
37 | /** |
38 | * struct pci_doe_mb - State for a single DOE mailbox |
39 | * |
40 | * This state is used to manage a single DOE mailbox capability. All fields |
41 | * should be considered opaque to the consumers and the structure passed into |
42 | * the helpers below after being created by pci_doe_create_mb(). |
43 | * |
44 | * @pdev: PCI device this mailbox belongs to |
45 | * @cap_offset: Capability offset |
46 | * @prots: Array of protocols supported (encoded as long values) |
47 | * @wq: Wait queue for work item |
48 | * @work_queue: Queue of pci_doe_work items |
49 | * @flags: Bit array of PCI_DOE_FLAG_* flags |
50 | */ |
51 | struct pci_doe_mb { |
52 | struct pci_dev *pdev; |
53 | u16 cap_offset; |
54 | struct xarray prots; |
55 | |
56 | wait_queue_head_t wq; |
57 | struct workqueue_struct *work_queue; |
58 | unsigned long flags; |
59 | }; |
60 | |
61 | struct pci_doe_protocol { |
62 | u16 vid; |
63 | u8 type; |
64 | }; |
65 | |
66 | /** |
67 | * struct pci_doe_task - represents a single query/response |
68 | * |
69 | * @prot: DOE Protocol |
70 | * @request_pl: The request payload |
71 | * @request_pl_sz: Size of the request payload (bytes) |
72 | * @response_pl: The response payload |
73 | * @response_pl_sz: Size of the response payload (bytes) |
74 | * @rv: Return value. Length of received response or error (bytes) |
75 | * @complete: Called when task is complete |
76 | * @private: Private data for the consumer |
77 | * @work: Used internally by the mailbox |
78 | * @doe_mb: Used internally by the mailbox |
79 | */ |
80 | struct pci_doe_task { |
81 | struct pci_doe_protocol prot; |
82 | const __le32 *request_pl; |
83 | size_t request_pl_sz; |
84 | __le32 *response_pl; |
85 | size_t response_pl_sz; |
86 | int rv; |
87 | void (*complete)(struct pci_doe_task *task); |
88 | void *private; |
89 | |
90 | /* initialized by pci_doe_submit_task() */ |
91 | struct work_struct work; |
92 | struct pci_doe_mb *doe_mb; |
93 | }; |
94 | |
95 | static int pci_doe_wait(struct pci_doe_mb *doe_mb, unsigned long timeout) |
96 | { |
97 | if (wait_event_timeout(doe_mb->wq, |
98 | test_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags), |
99 | timeout)) |
100 | return -EIO; |
101 | return 0; |
102 | } |
103 | |
104 | static void pci_doe_write_ctrl(struct pci_doe_mb *doe_mb, u32 val) |
105 | { |
106 | struct pci_dev *pdev = doe_mb->pdev; |
107 | int offset = doe_mb->cap_offset; |
108 | |
109 | pci_write_config_dword(dev: pdev, where: offset + PCI_DOE_CTRL, val); |
110 | } |
111 | |
112 | static int pci_doe_abort(struct pci_doe_mb *doe_mb) |
113 | { |
114 | struct pci_dev *pdev = doe_mb->pdev; |
115 | int offset = doe_mb->cap_offset; |
116 | unsigned long timeout_jiffies; |
117 | |
118 | pci_dbg(pdev, "[%x] Issuing Abort\n" , offset); |
119 | |
120 | timeout_jiffies = jiffies + PCI_DOE_TIMEOUT; |
121 | pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_ABORT); |
122 | |
123 | do { |
124 | int rc; |
125 | u32 val; |
126 | |
127 | rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL); |
128 | if (rc) |
129 | return rc; |
130 | pci_read_config_dword(dev: pdev, where: offset + PCI_DOE_STATUS, val: &val); |
131 | |
132 | /* Abort success! */ |
133 | if (!FIELD_GET(PCI_DOE_STATUS_ERROR, val) && |
134 | !FIELD_GET(PCI_DOE_STATUS_BUSY, val)) |
135 | return 0; |
136 | |
137 | } while (!time_after(jiffies, timeout_jiffies)); |
138 | |
139 | /* Abort has timed out and the MB is dead */ |
140 | pci_err(pdev, "[%x] ABORT timed out\n" , offset); |
141 | return -EIO; |
142 | } |
143 | |
144 | static int pci_doe_send_req(struct pci_doe_mb *doe_mb, |
145 | struct pci_doe_task *task) |
146 | { |
147 | struct pci_dev *pdev = doe_mb->pdev; |
148 | int offset = doe_mb->cap_offset; |
149 | size_t length, remainder; |
150 | u32 val; |
151 | int i; |
152 | |
153 | /* |
154 | * Check the DOE busy bit is not set. If it is set, this could indicate |
155 | * someone other than Linux (e.g. firmware) is using the mailbox. Note |
156 | * it is expected that firmware and OS will negotiate access rights via |
157 | * an, as yet to be defined, method. |
158 | */ |
159 | pci_read_config_dword(dev: pdev, where: offset + PCI_DOE_STATUS, val: &val); |
160 | if (FIELD_GET(PCI_DOE_STATUS_BUSY, val)) |
161 | return -EBUSY; |
162 | |
163 | if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) |
164 | return -EIO; |
165 | |
166 | /* Length is 2 DW of header + length of payload in DW */ |
167 | length = 2 + DIV_ROUND_UP(task->request_pl_sz, sizeof(__le32)); |
168 | if (length > PCI_DOE_MAX_LENGTH) |
169 | return -EIO; |
170 | if (length == PCI_DOE_MAX_LENGTH) |
171 | length = 0; |
172 | |
173 | /* Write DOE Header */ |
174 | val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->prot.vid) | |
175 | FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->prot.type); |
176 | pci_write_config_dword(dev: pdev, where: offset + PCI_DOE_WRITE, val); |
177 | pci_write_config_dword(dev: pdev, where: offset + PCI_DOE_WRITE, |
178 | FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, |
179 | length)); |
180 | |
181 | /* Write payload */ |
182 | for (i = 0; i < task->request_pl_sz / sizeof(__le32); i++) |
183 | pci_write_config_dword(dev: pdev, where: offset + PCI_DOE_WRITE, |
184 | le32_to_cpu(task->request_pl[i])); |
185 | |
186 | /* Write last payload dword */ |
187 | remainder = task->request_pl_sz % sizeof(__le32); |
188 | if (remainder) { |
189 | val = 0; |
190 | memcpy(&val, &task->request_pl[i], remainder); |
191 | le32_to_cpus(&val); |
192 | pci_write_config_dword(dev: pdev, where: offset + PCI_DOE_WRITE, val); |
193 | } |
194 | |
195 | pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_GO); |
196 | |
197 | return 0; |
198 | } |
199 | |
200 | static bool pci_doe_data_obj_ready(struct pci_doe_mb *doe_mb) |
201 | { |
202 | struct pci_dev *pdev = doe_mb->pdev; |
203 | int offset = doe_mb->cap_offset; |
204 | u32 val; |
205 | |
206 | pci_read_config_dword(dev: pdev, where: offset + PCI_DOE_STATUS, val: &val); |
207 | if (FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val)) |
208 | return true; |
209 | return false; |
210 | } |
211 | |
212 | static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *task) |
213 | { |
214 | size_t length, payload_length, remainder, received; |
215 | struct pci_dev *pdev = doe_mb->pdev; |
216 | int offset = doe_mb->cap_offset; |
217 | int i = 0; |
218 | u32 val; |
219 | |
220 | /* Read the first dword to get the protocol */ |
221 | pci_read_config_dword(dev: pdev, where: offset + PCI_DOE_READ, val: &val); |
222 | if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->prot.vid) || |
223 | (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->prot.type)) { |
224 | dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Protocol] = [%04x, %02x], got [%04x, %02x]\n" , |
225 | doe_mb->cap_offset, task->prot.vid, task->prot.type, |
226 | FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val), |
227 | FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val)); |
228 | return -EIO; |
229 | } |
230 | |
231 | pci_write_config_dword(dev: pdev, where: offset + PCI_DOE_READ, val: 0); |
232 | /* Read the second dword to get the length */ |
233 | pci_read_config_dword(dev: pdev, where: offset + PCI_DOE_READ, val: &val); |
234 | pci_write_config_dword(dev: pdev, where: offset + PCI_DOE_READ, val: 0); |
235 | |
236 | length = FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, val); |
237 | /* A value of 0x0 indicates max data object length */ |
238 | if (!length) |
239 | length = PCI_DOE_MAX_LENGTH; |
240 | if (length < 2) |
241 | return -EIO; |
242 | |
243 | /* First 2 dwords have already been read */ |
244 | length -= 2; |
245 | received = task->response_pl_sz; |
246 | payload_length = DIV_ROUND_UP(task->response_pl_sz, sizeof(__le32)); |
247 | remainder = task->response_pl_sz % sizeof(__le32); |
248 | |
249 | /* remainder signifies number of data bytes in last payload dword */ |
250 | if (!remainder) |
251 | remainder = sizeof(__le32); |
252 | |
253 | if (length < payload_length) { |
254 | received = length * sizeof(__le32); |
255 | payload_length = length; |
256 | remainder = sizeof(__le32); |
257 | } |
258 | |
259 | if (payload_length) { |
260 | /* Read all payload dwords except the last */ |
261 | for (; i < payload_length - 1; i++) { |
262 | pci_read_config_dword(dev: pdev, where: offset + PCI_DOE_READ, |
263 | val: &val); |
264 | task->response_pl[i] = cpu_to_le32(val); |
265 | pci_write_config_dword(dev: pdev, where: offset + PCI_DOE_READ, val: 0); |
266 | } |
267 | |
268 | /* Read last payload dword */ |
269 | pci_read_config_dword(dev: pdev, where: offset + PCI_DOE_READ, val: &val); |
270 | cpu_to_le32s(&val); |
271 | memcpy(&task->response_pl[i], &val, remainder); |
272 | /* Prior to the last ack, ensure Data Object Ready */ |
273 | if (!pci_doe_data_obj_ready(doe_mb)) |
274 | return -EIO; |
275 | pci_write_config_dword(dev: pdev, where: offset + PCI_DOE_READ, val: 0); |
276 | i++; |
277 | } |
278 | |
279 | /* Flush excess length */ |
280 | for (; i < length; i++) { |
281 | pci_read_config_dword(dev: pdev, where: offset + PCI_DOE_READ, val: &val); |
282 | pci_write_config_dword(dev: pdev, where: offset + PCI_DOE_READ, val: 0); |
283 | } |
284 | |
285 | /* Final error check to pick up on any since Data Object Ready */ |
286 | pci_read_config_dword(dev: pdev, where: offset + PCI_DOE_STATUS, val: &val); |
287 | if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) |
288 | return -EIO; |
289 | |
290 | return received; |
291 | } |
292 | |
293 | static void signal_task_complete(struct pci_doe_task *task, int rv) |
294 | { |
295 | task->rv = rv; |
296 | destroy_work_on_stack(work: &task->work); |
297 | task->complete(task); |
298 | } |
299 | |
300 | static void signal_task_abort(struct pci_doe_task *task, int rv) |
301 | { |
302 | struct pci_doe_mb *doe_mb = task->doe_mb; |
303 | struct pci_dev *pdev = doe_mb->pdev; |
304 | |
305 | if (pci_doe_abort(doe_mb)) { |
306 | /* |
307 | * If the device can't process an abort; set the mailbox dead |
308 | * - no more submissions |
309 | */ |
310 | pci_err(pdev, "[%x] Abort failed marking mailbox dead\n" , |
311 | doe_mb->cap_offset); |
312 | set_bit(PCI_DOE_FLAG_DEAD, addr: &doe_mb->flags); |
313 | } |
314 | signal_task_complete(task, rv); |
315 | } |
316 | |
317 | static void doe_statemachine_work(struct work_struct *work) |
318 | { |
319 | struct pci_doe_task *task = container_of(work, struct pci_doe_task, |
320 | work); |
321 | struct pci_doe_mb *doe_mb = task->doe_mb; |
322 | struct pci_dev *pdev = doe_mb->pdev; |
323 | int offset = doe_mb->cap_offset; |
324 | unsigned long timeout_jiffies; |
325 | u32 val; |
326 | int rc; |
327 | |
328 | if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags)) { |
329 | signal_task_complete(task, rv: -EIO); |
330 | return; |
331 | } |
332 | |
333 | /* Send request */ |
334 | rc = pci_doe_send_req(doe_mb, task); |
335 | if (rc) { |
336 | /* |
337 | * The specification does not provide any guidance on how to |
338 | * resolve conflicting requests from other entities. |
339 | * Furthermore, it is likely that busy will not be detected |
340 | * most of the time. Flag any detection of status busy with an |
341 | * error. |
342 | */ |
343 | if (rc == -EBUSY) |
344 | dev_err_ratelimited(&pdev->dev, "[%x] busy detected; another entity is sending conflicting requests\n" , |
345 | offset); |
346 | signal_task_abort(task, rv: rc); |
347 | return; |
348 | } |
349 | |
350 | timeout_jiffies = jiffies + PCI_DOE_TIMEOUT; |
351 | /* Poll for response */ |
352 | retry_resp: |
353 | pci_read_config_dword(dev: pdev, where: offset + PCI_DOE_STATUS, val: &val); |
354 | if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) { |
355 | signal_task_abort(task, rv: -EIO); |
356 | return; |
357 | } |
358 | |
359 | if (!FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val)) { |
360 | if (time_after(jiffies, timeout_jiffies)) { |
361 | signal_task_abort(task, rv: -EIO); |
362 | return; |
363 | } |
364 | rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL); |
365 | if (rc) { |
366 | signal_task_abort(task, rv: rc); |
367 | return; |
368 | } |
369 | goto retry_resp; |
370 | } |
371 | |
372 | rc = pci_doe_recv_resp(doe_mb, task); |
373 | if (rc < 0) { |
374 | signal_task_abort(task, rv: rc); |
375 | return; |
376 | } |
377 | |
378 | signal_task_complete(task, rv: rc); |
379 | } |
380 | |
381 | static void pci_doe_task_complete(struct pci_doe_task *task) |
382 | { |
383 | complete(task->private); |
384 | } |
385 | |
386 | static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid, |
387 | u8 *protocol) |
388 | { |
389 | u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX, |
390 | *index); |
391 | __le32 request_pl_le = cpu_to_le32(request_pl); |
392 | __le32 response_pl_le; |
393 | u32 response_pl; |
394 | int rc; |
395 | |
396 | rc = pci_doe(doe_mb, PCI_VENDOR_ID_PCI_SIG, PCI_DOE_PROTOCOL_DISCOVERY, |
397 | request: &request_pl_le, request_sz: sizeof(request_pl_le), |
398 | response: &response_pl_le, response_sz: sizeof(response_pl_le)); |
399 | if (rc < 0) |
400 | return rc; |
401 | |
402 | if (rc != sizeof(response_pl_le)) |
403 | return -EIO; |
404 | |
405 | response_pl = le32_to_cpu(response_pl_le); |
406 | *vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl); |
407 | *protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL, |
408 | response_pl); |
409 | *index = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX, |
410 | response_pl); |
411 | |
412 | return 0; |
413 | } |
414 | |
415 | static void *pci_doe_xa_prot_entry(u16 vid, u8 prot) |
416 | { |
417 | return xa_mk_value(v: (vid << 8) | prot); |
418 | } |
419 | |
420 | static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb) |
421 | { |
422 | u8 index = 0; |
423 | u8 xa_idx = 0; |
424 | |
425 | do { |
426 | int rc; |
427 | u16 vid; |
428 | u8 prot; |
429 | |
430 | rc = pci_doe_discovery(doe_mb, index: &index, vid: &vid, protocol: &prot); |
431 | if (rc) |
432 | return rc; |
433 | |
434 | pci_dbg(doe_mb->pdev, |
435 | "[%x] Found protocol %d vid: %x prot: %x\n" , |
436 | doe_mb->cap_offset, xa_idx, vid, prot); |
437 | |
438 | rc = xa_insert(xa: &doe_mb->prots, index: xa_idx++, |
439 | entry: pci_doe_xa_prot_entry(vid, prot), GFP_KERNEL); |
440 | if (rc) |
441 | return rc; |
442 | } while (index); |
443 | |
444 | return 0; |
445 | } |
446 | |
447 | static void pci_doe_cancel_tasks(struct pci_doe_mb *doe_mb) |
448 | { |
449 | /* Stop all pending work items from starting */ |
450 | set_bit(PCI_DOE_FLAG_DEAD, addr: &doe_mb->flags); |
451 | |
452 | /* Cancel an in progress work item, if necessary */ |
453 | set_bit(PCI_DOE_FLAG_CANCEL, addr: &doe_mb->flags); |
454 | wake_up(&doe_mb->wq); |
455 | } |
456 | |
457 | /** |
458 | * pci_doe_create_mb() - Create a DOE mailbox object |
459 | * |
460 | * @pdev: PCI device to create the DOE mailbox for |
461 | * @cap_offset: Offset of the DOE mailbox |
462 | * |
463 | * Create a single mailbox object to manage the mailbox protocol at the |
464 | * cap_offset specified. |
465 | * |
466 | * RETURNS: created mailbox object on success |
467 | * ERR_PTR(-errno) on failure |
468 | */ |
469 | static struct pci_doe_mb *pci_doe_create_mb(struct pci_dev *pdev, |
470 | u16 cap_offset) |
471 | { |
472 | struct pci_doe_mb *doe_mb; |
473 | int rc; |
474 | |
475 | doe_mb = kzalloc(size: sizeof(*doe_mb), GFP_KERNEL); |
476 | if (!doe_mb) |
477 | return ERR_PTR(error: -ENOMEM); |
478 | |
479 | doe_mb->pdev = pdev; |
480 | doe_mb->cap_offset = cap_offset; |
481 | init_waitqueue_head(&doe_mb->wq); |
482 | xa_init(xa: &doe_mb->prots); |
483 | |
484 | doe_mb->work_queue = alloc_ordered_workqueue("%s %s DOE [%x]" , 0, |
485 | dev_bus_name(&pdev->dev), |
486 | pci_name(pdev), |
487 | doe_mb->cap_offset); |
488 | if (!doe_mb->work_queue) { |
489 | pci_err(pdev, "[%x] failed to allocate work queue\n" , |
490 | doe_mb->cap_offset); |
491 | rc = -ENOMEM; |
492 | goto err_free; |
493 | } |
494 | |
495 | /* Reset the mailbox by issuing an abort */ |
496 | rc = pci_doe_abort(doe_mb); |
497 | if (rc) { |
498 | pci_err(pdev, "[%x] failed to reset mailbox with abort command : %d\n" , |
499 | doe_mb->cap_offset, rc); |
500 | goto err_destroy_wq; |
501 | } |
502 | |
503 | /* |
504 | * The state machine and the mailbox should be in sync now; |
505 | * Use the mailbox to query protocols. |
506 | */ |
507 | rc = pci_doe_cache_protocols(doe_mb); |
508 | if (rc) { |
509 | pci_err(pdev, "[%x] failed to cache protocols : %d\n" , |
510 | doe_mb->cap_offset, rc); |
511 | goto err_cancel; |
512 | } |
513 | |
514 | return doe_mb; |
515 | |
516 | err_cancel: |
517 | pci_doe_cancel_tasks(doe_mb); |
518 | xa_destroy(&doe_mb->prots); |
519 | err_destroy_wq: |
520 | destroy_workqueue(wq: doe_mb->work_queue); |
521 | err_free: |
522 | kfree(objp: doe_mb); |
523 | return ERR_PTR(error: rc); |
524 | } |
525 | |
526 | /** |
527 | * pci_doe_destroy_mb() - Destroy a DOE mailbox object |
528 | * |
529 | * @doe_mb: DOE mailbox |
530 | * |
531 | * Destroy all internal data structures created for the DOE mailbox. |
532 | */ |
533 | static void pci_doe_destroy_mb(struct pci_doe_mb *doe_mb) |
534 | { |
535 | pci_doe_cancel_tasks(doe_mb); |
536 | xa_destroy(&doe_mb->prots); |
537 | destroy_workqueue(wq: doe_mb->work_queue); |
538 | kfree(objp: doe_mb); |
539 | } |
540 | |
541 | /** |
542 | * pci_doe_supports_prot() - Return if the DOE instance supports the given |
543 | * protocol |
544 | * @doe_mb: DOE mailbox capability to query |
545 | * @vid: Protocol Vendor ID |
546 | * @type: Protocol type |
547 | * |
548 | * RETURNS: True if the DOE mailbox supports the protocol specified |
549 | */ |
550 | static bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type) |
551 | { |
552 | unsigned long index; |
553 | void *entry; |
554 | |
555 | /* The discovery protocol must always be supported */ |
556 | if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_PROTOCOL_DISCOVERY) |
557 | return true; |
558 | |
559 | xa_for_each(&doe_mb->prots, index, entry) |
560 | if (entry == pci_doe_xa_prot_entry(vid, prot: type)) |
561 | return true; |
562 | |
563 | return false; |
564 | } |
565 | |
566 | /** |
567 | * pci_doe_submit_task() - Submit a task to be processed by the state machine |
568 | * |
569 | * @doe_mb: DOE mailbox capability to submit to |
570 | * @task: task to be queued |
571 | * |
572 | * Submit a DOE task (request/response) to the DOE mailbox to be processed. |
573 | * Returns upon queueing the task object. If the queue is full this function |
574 | * will sleep until there is room in the queue. |
575 | * |
576 | * task->complete will be called when the state machine is done processing this |
577 | * task. |
578 | * |
579 | * @task must be allocated on the stack. |
580 | * |
581 | * Excess data will be discarded. |
582 | * |
583 | * RETURNS: 0 when task has been successfully queued, -ERRNO on error |
584 | */ |
585 | static int pci_doe_submit_task(struct pci_doe_mb *doe_mb, |
586 | struct pci_doe_task *task) |
587 | { |
588 | if (!pci_doe_supports_prot(doe_mb, vid: task->prot.vid, type: task->prot.type)) |
589 | return -EINVAL; |
590 | |
591 | if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags)) |
592 | return -EIO; |
593 | |
594 | task->doe_mb = doe_mb; |
595 | INIT_WORK_ONSTACK(&task->work, doe_statemachine_work); |
596 | queue_work(wq: doe_mb->work_queue, work: &task->work); |
597 | return 0; |
598 | } |
599 | |
600 | /** |
601 | * pci_doe() - Perform Data Object Exchange |
602 | * |
603 | * @doe_mb: DOE Mailbox |
604 | * @vendor: Vendor ID |
605 | * @type: Data Object Type |
606 | * @request: Request payload |
607 | * @request_sz: Size of request payload (bytes) |
608 | * @response: Response payload |
609 | * @response_sz: Size of response payload (bytes) |
610 | * |
611 | * Submit @request to @doe_mb and store the @response. |
612 | * The DOE exchange is performed synchronously and may therefore sleep. |
613 | * |
614 | * Payloads are treated as opaque byte streams which are transmitted verbatim, |
615 | * without byte-swapping. If payloads contain little-endian register values, |
616 | * the caller is responsible for conversion with cpu_to_le32() / le32_to_cpu(). |
617 | * |
618 | * For convenience, arbitrary payload sizes are allowed even though PCIe r6.0 |
619 | * sec 6.30.1 specifies the Data Object Header 2 "Length" in dwords. The last |
620 | * (partial) dword is copied with byte granularity and padded with zeroes if |
621 | * necessary. Callers are thus relieved of using dword-sized bounce buffers. |
622 | * |
623 | * RETURNS: Length of received response or negative errno. |
624 | * Received data in excess of @response_sz is discarded. |
625 | * The length may be smaller than @response_sz and the caller |
626 | * is responsible for checking that. |
627 | */ |
628 | int pci_doe(struct pci_doe_mb *doe_mb, u16 vendor, u8 type, |
629 | const void *request, size_t request_sz, |
630 | void *response, size_t response_sz) |
631 | { |
632 | DECLARE_COMPLETION_ONSTACK(c); |
633 | struct pci_doe_task task = { |
634 | .prot.vid = vendor, |
635 | .prot.type = type, |
636 | .request_pl = request, |
637 | .request_pl_sz = request_sz, |
638 | .response_pl = response, |
639 | .response_pl_sz = response_sz, |
640 | .complete = pci_doe_task_complete, |
641 | .private = &c, |
642 | }; |
643 | int rc; |
644 | |
645 | rc = pci_doe_submit_task(doe_mb, task: &task); |
646 | if (rc) |
647 | return rc; |
648 | |
649 | wait_for_completion(&c); |
650 | |
651 | return task.rv; |
652 | } |
653 | EXPORT_SYMBOL_GPL(pci_doe); |
654 | |
655 | /** |
656 | * pci_find_doe_mailbox() - Find Data Object Exchange mailbox |
657 | * |
658 | * @pdev: PCI device |
659 | * @vendor: Vendor ID |
660 | * @type: Data Object Type |
661 | * |
662 | * Find first DOE mailbox of a PCI device which supports the given protocol. |
663 | * |
664 | * RETURNS: Pointer to the DOE mailbox or NULL if none was found. |
665 | */ |
666 | struct pci_doe_mb *pci_find_doe_mailbox(struct pci_dev *pdev, u16 vendor, |
667 | u8 type) |
668 | { |
669 | struct pci_doe_mb *doe_mb; |
670 | unsigned long index; |
671 | |
672 | xa_for_each(&pdev->doe_mbs, index, doe_mb) |
673 | if (pci_doe_supports_prot(doe_mb, vid: vendor, type)) |
674 | return doe_mb; |
675 | |
676 | return NULL; |
677 | } |
678 | EXPORT_SYMBOL_GPL(pci_find_doe_mailbox); |
679 | |
680 | void pci_doe_init(struct pci_dev *pdev) |
681 | { |
682 | struct pci_doe_mb *doe_mb; |
683 | u16 offset = 0; |
684 | int rc; |
685 | |
686 | xa_init(xa: &pdev->doe_mbs); |
687 | |
688 | while ((offset = pci_find_next_ext_capability(dev: pdev, pos: offset, |
689 | PCI_EXT_CAP_ID_DOE))) { |
690 | doe_mb = pci_doe_create_mb(pdev, cap_offset: offset); |
691 | if (IS_ERR(ptr: doe_mb)) { |
692 | pci_err(pdev, "[%x] failed to create mailbox: %ld\n" , |
693 | offset, PTR_ERR(doe_mb)); |
694 | continue; |
695 | } |
696 | |
697 | rc = xa_insert(xa: &pdev->doe_mbs, index: offset, entry: doe_mb, GFP_KERNEL); |
698 | if (rc) { |
699 | pci_err(pdev, "[%x] failed to insert mailbox: %d\n" , |
700 | offset, rc); |
701 | pci_doe_destroy_mb(doe_mb); |
702 | } |
703 | } |
704 | } |
705 | |
706 | void pci_doe_destroy(struct pci_dev *pdev) |
707 | { |
708 | struct pci_doe_mb *doe_mb; |
709 | unsigned long index; |
710 | |
711 | xa_for_each(&pdev->doe_mbs, index, doe_mb) |
712 | pci_doe_destroy_mb(doe_mb); |
713 | |
714 | xa_destroy(&pdev->doe_mbs); |
715 | } |
716 | |
717 | void pci_doe_disconnected(struct pci_dev *pdev) |
718 | { |
719 | struct pci_doe_mb *doe_mb; |
720 | unsigned long index; |
721 | |
722 | xa_for_each(&pdev->doe_mbs, index, doe_mb) |
723 | pci_doe_cancel_tasks(doe_mb); |
724 | } |
725 | |