1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCI Endpoint *Controller* (EPC) library
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9#include <linux/device.h>
10#include <linux/slab.h>
11#include <linux/module.h>
12
13#include <linux/pci-epc.h>
14#include <linux/pci-epf.h>
15#include <linux/pci-ep-cfs.h>
16
17static struct class *pci_epc_class;
18
19static void devm_pci_epc_release(struct device *dev, void *res)
20{
21 struct pci_epc *epc = *(struct pci_epc **)res;
22
23 pci_epc_destroy(epc);
24}
25
26static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
27{
28 struct pci_epc **epc = res;
29
30 return *epc == match_data;
31}
32
33/**
34 * pci_epc_put() - release the PCI endpoint controller
35 * @epc: epc returned by pci_epc_get()
36 *
37 * release the refcount the caller obtained by invoking pci_epc_get()
38 */
39void pci_epc_put(struct pci_epc *epc)
40{
41 if (IS_ERR_OR_NULL(ptr: epc))
42 return;
43
44 module_put(module: epc->ops->owner);
45 put_device(dev: &epc->dev);
46}
47EXPORT_SYMBOL_GPL(pci_epc_put);
48
49/**
50 * pci_epc_get() - get the PCI endpoint controller
51 * @epc_name: device name of the endpoint controller
52 *
53 * Invoke to get struct pci_epc * corresponding to the device name of the
54 * endpoint controller
55 */
56struct pci_epc *pci_epc_get(const char *epc_name)
57{
58 int ret = -EINVAL;
59 struct pci_epc *epc;
60 struct device *dev;
61 struct class_dev_iter iter;
62
63 class_dev_iter_init(iter: &iter, class: pci_epc_class, NULL, NULL);
64 while ((dev = class_dev_iter_next(iter: &iter))) {
65 if (strcmp(epc_name, dev_name(dev)))
66 continue;
67
68 epc = to_pci_epc(dev);
69 if (!try_module_get(module: epc->ops->owner)) {
70 ret = -EINVAL;
71 goto err;
72 }
73
74 class_dev_iter_exit(iter: &iter);
75 get_device(dev: &epc->dev);
76 return epc;
77 }
78
79err:
80 class_dev_iter_exit(iter: &iter);
81 return ERR_PTR(error: ret);
82}
83EXPORT_SYMBOL_GPL(pci_epc_get);
84
85/**
86 * pci_epc_get_first_free_bar() - helper to get first unreserved BAR
87 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
88 *
89 * Invoke to get the first unreserved BAR that can be used by the endpoint
90 * function.
91 */
92enum pci_barno
93pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
94{
95 return pci_epc_get_next_free_bar(epc_features, bar: BAR_0);
96}
97EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
98
99/**
100 * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar
101 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap
102 * @bar: the starting BAR number from where unreserved BAR should be searched
103 *
104 * Invoke to get the next unreserved BAR starting from @bar that can be used
105 * for endpoint function.
106 */
107enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
108 *epc_features, enum pci_barno bar)
109{
110 int i;
111
112 if (!epc_features)
113 return BAR_0;
114
115 /* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
116 if (bar > 0 && epc_features->bar[bar - 1].only_64bit)
117 bar++;
118
119 for (i = bar; i < PCI_STD_NUM_BARS; i++) {
120 /* If the BAR is not reserved, return it. */
121 if (epc_features->bar[i].type != BAR_RESERVED)
122 return i;
123 }
124
125 return NO_BAR;
126}
127EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
128
129/**
130 * pci_epc_get_features() - get the features supported by EPC
131 * @epc: the features supported by *this* EPC device will be returned
132 * @func_no: the features supported by the EPC device specific to the
133 * endpoint function with func_no will be returned
134 * @vfunc_no: the features supported by the EPC device specific to the
135 * virtual endpoint function with vfunc_no will be returned
136 *
137 * Invoke to get the features provided by the EPC which may be
138 * specific to an endpoint function. Returns pci_epc_features on success
139 * and NULL for any failures.
140 */
141const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
142 u8 func_no, u8 vfunc_no)
143{
144 const struct pci_epc_features *epc_features;
145
146 if (IS_ERR_OR_NULL(ptr: epc) || func_no >= epc->max_functions)
147 return NULL;
148
149 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
150 return NULL;
151
152 if (!epc->ops->get_features)
153 return NULL;
154
155 mutex_lock(&epc->lock);
156 epc_features = epc->ops->get_features(epc, func_no, vfunc_no);
157 mutex_unlock(lock: &epc->lock);
158
159 return epc_features;
160}
161EXPORT_SYMBOL_GPL(pci_epc_get_features);
162
163/**
164 * pci_epc_stop() - stop the PCI link
165 * @epc: the link of the EPC device that has to be stopped
166 *
167 * Invoke to stop the PCI link
168 */
169void pci_epc_stop(struct pci_epc *epc)
170{
171 if (IS_ERR(ptr: epc) || !epc->ops->stop)
172 return;
173
174 mutex_lock(&epc->lock);
175 epc->ops->stop(epc);
176 mutex_unlock(lock: &epc->lock);
177}
178EXPORT_SYMBOL_GPL(pci_epc_stop);
179
180/**
181 * pci_epc_start() - start the PCI link
182 * @epc: the link of *this* EPC device has to be started
183 *
184 * Invoke to start the PCI link
185 */
186int pci_epc_start(struct pci_epc *epc)
187{
188 int ret;
189
190 if (IS_ERR(ptr: epc))
191 return -EINVAL;
192
193 if (!epc->ops->start)
194 return 0;
195
196 mutex_lock(&epc->lock);
197 ret = epc->ops->start(epc);
198 mutex_unlock(lock: &epc->lock);
199
200 return ret;
201}
202EXPORT_SYMBOL_GPL(pci_epc_start);
203
204/**
205 * pci_epc_raise_irq() - interrupt the host system
206 * @epc: the EPC device which has to interrupt the host
207 * @func_no: the physical endpoint function number in the EPC device
208 * @vfunc_no: the virtual endpoint function number in the physical function
209 * @type: specify the type of interrupt; INTX, MSI or MSI-X
210 * @interrupt_num: the MSI or MSI-X interrupt number with range (1-N)
211 *
212 * Invoke to raise an INTX, MSI or MSI-X interrupt
213 */
214int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
215 unsigned int type, u16 interrupt_num)
216{
217 int ret;
218
219 if (IS_ERR_OR_NULL(ptr: epc) || func_no >= epc->max_functions)
220 return -EINVAL;
221
222 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
223 return -EINVAL;
224
225 if (!epc->ops->raise_irq)
226 return 0;
227
228 mutex_lock(&epc->lock);
229 ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num);
230 mutex_unlock(lock: &epc->lock);
231
232 return ret;
233}
234EXPORT_SYMBOL_GPL(pci_epc_raise_irq);
235
236/**
237 * pci_epc_map_msi_irq() - Map physical address to MSI address and return
238 * MSI data
239 * @epc: the EPC device which has the MSI capability
240 * @func_no: the physical endpoint function number in the EPC device
241 * @vfunc_no: the virtual endpoint function number in the physical function
242 * @phys_addr: the physical address of the outbound region
243 * @interrupt_num: the MSI interrupt number with range (1-N)
244 * @entry_size: Size of Outbound address region for each interrupt
245 * @msi_data: the data that should be written in order to raise MSI interrupt
246 * with interrupt number as 'interrupt num'
247 * @msi_addr_offset: Offset of MSI address from the aligned outbound address
248 * to which the MSI address is mapped
249 *
250 * Invoke to map physical address to MSI address and return MSI data. The
251 * physical address should be an address in the outbound region. This is
252 * required to implement doorbell functionality of NTB wherein EPC on either
253 * side of the interface (primary and secondary) can directly write to the
254 * physical address (in outbound region) of the other interface to ring
255 * doorbell.
256 */
257int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
258 phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size,
259 u32 *msi_data, u32 *msi_addr_offset)
260{
261 int ret;
262
263 if (IS_ERR_OR_NULL(ptr: epc))
264 return -EINVAL;
265
266 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
267 return -EINVAL;
268
269 if (!epc->ops->map_msi_irq)
270 return -EINVAL;
271
272 mutex_lock(&epc->lock);
273 ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr,
274 interrupt_num, entry_size, msi_data,
275 msi_addr_offset);
276 mutex_unlock(lock: &epc->lock);
277
278 return ret;
279}
280EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq);
281
282/**
283 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated
284 * @epc: the EPC device to which MSI interrupts was requested
285 * @func_no: the physical endpoint function number in the EPC device
286 * @vfunc_no: the virtual endpoint function number in the physical function
287 *
288 * Invoke to get the number of MSI interrupts allocated by the RC
289 */
290int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
291{
292 int interrupt;
293
294 if (IS_ERR_OR_NULL(ptr: epc) || func_no >= epc->max_functions)
295 return 0;
296
297 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
298 return 0;
299
300 if (!epc->ops->get_msi)
301 return 0;
302
303 mutex_lock(&epc->lock);
304 interrupt = epc->ops->get_msi(epc, func_no, vfunc_no);
305 mutex_unlock(lock: &epc->lock);
306
307 if (interrupt < 0)
308 return 0;
309
310 interrupt = 1 << interrupt;
311
312 return interrupt;
313}
314EXPORT_SYMBOL_GPL(pci_epc_get_msi);
315
316/**
317 * pci_epc_set_msi() - set the number of MSI interrupt numbers required
318 * @epc: the EPC device on which MSI has to be configured
319 * @func_no: the physical endpoint function number in the EPC device
320 * @vfunc_no: the virtual endpoint function number in the physical function
321 * @interrupts: number of MSI interrupts required by the EPF
322 *
323 * Invoke to set the required number of MSI interrupts.
324 */
325int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
326{
327 int ret;
328 u8 encode_int;
329
330 if (IS_ERR_OR_NULL(ptr: epc) || func_no >= epc->max_functions ||
331 interrupts < 1 || interrupts > 32)
332 return -EINVAL;
333
334 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
335 return -EINVAL;
336
337 if (!epc->ops->set_msi)
338 return 0;
339
340 encode_int = order_base_2(interrupts);
341
342 mutex_lock(&epc->lock);
343 ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
344 mutex_unlock(lock: &epc->lock);
345
346 return ret;
347}
348EXPORT_SYMBOL_GPL(pci_epc_set_msi);
349
350/**
351 * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
352 * @epc: the EPC device to which MSI-X interrupts was requested
353 * @func_no: the physical endpoint function number in the EPC device
354 * @vfunc_no: the virtual endpoint function number in the physical function
355 *
356 * Invoke to get the number of MSI-X interrupts allocated by the RC
357 */
358int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
359{
360 int interrupt;
361
362 if (IS_ERR_OR_NULL(ptr: epc) || func_no >= epc->max_functions)
363 return 0;
364
365 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
366 return 0;
367
368 if (!epc->ops->get_msix)
369 return 0;
370
371 mutex_lock(&epc->lock);
372 interrupt = epc->ops->get_msix(epc, func_no, vfunc_no);
373 mutex_unlock(lock: &epc->lock);
374
375 if (interrupt < 0)
376 return 0;
377
378 return interrupt + 1;
379}
380EXPORT_SYMBOL_GPL(pci_epc_get_msix);
381
382/**
383 * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
384 * @epc: the EPC device on which MSI-X has to be configured
385 * @func_no: the physical endpoint function number in the EPC device
386 * @vfunc_no: the virtual endpoint function number in the physical function
387 * @interrupts: number of MSI-X interrupts required by the EPF
388 * @bir: BAR where the MSI-X table resides
389 * @offset: Offset pointing to the start of MSI-X table
390 *
391 * Invoke to set the required number of MSI-X interrupts.
392 */
393int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
394 u16 interrupts, enum pci_barno bir, u32 offset)
395{
396 int ret;
397
398 if (IS_ERR_OR_NULL(ptr: epc) || func_no >= epc->max_functions ||
399 interrupts < 1 || interrupts > 2048)
400 return -EINVAL;
401
402 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
403 return -EINVAL;
404
405 if (!epc->ops->set_msix)
406 return 0;
407
408 mutex_lock(&epc->lock);
409 ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
410 offset);
411 mutex_unlock(lock: &epc->lock);
412
413 return ret;
414}
415EXPORT_SYMBOL_GPL(pci_epc_set_msix);
416
417/**
418 * pci_epc_unmap_addr() - unmap CPU address from PCI address
419 * @epc: the EPC device on which address is allocated
420 * @func_no: the physical endpoint function number in the EPC device
421 * @vfunc_no: the virtual endpoint function number in the physical function
422 * @phys_addr: physical address of the local system
423 *
424 * Invoke to unmap the CPU address from PCI address.
425 */
426void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
427 phys_addr_t phys_addr)
428{
429 if (IS_ERR_OR_NULL(ptr: epc) || func_no >= epc->max_functions)
430 return;
431
432 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
433 return;
434
435 if (!epc->ops->unmap_addr)
436 return;
437
438 mutex_lock(&epc->lock);
439 epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr);
440 mutex_unlock(lock: &epc->lock);
441}
442EXPORT_SYMBOL_GPL(pci_epc_unmap_addr);
443
444/**
445 * pci_epc_map_addr() - map CPU address to PCI address
446 * @epc: the EPC device on which address is allocated
447 * @func_no: the physical endpoint function number in the EPC device
448 * @vfunc_no: the virtual endpoint function number in the physical function
449 * @phys_addr: physical address of the local system
450 * @pci_addr: PCI address to which the physical address should be mapped
451 * @size: the size of the allocation
452 *
453 * Invoke to map CPU address with PCI address.
454 */
455int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
456 phys_addr_t phys_addr, u64 pci_addr, size_t size)
457{
458 int ret;
459
460 if (IS_ERR_OR_NULL(ptr: epc) || func_no >= epc->max_functions)
461 return -EINVAL;
462
463 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
464 return -EINVAL;
465
466 if (!epc->ops->map_addr)
467 return 0;
468
469 mutex_lock(&epc->lock);
470 ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr,
471 size);
472 mutex_unlock(lock: &epc->lock);
473
474 return ret;
475}
476EXPORT_SYMBOL_GPL(pci_epc_map_addr);
477
478/**
479 * pci_epc_clear_bar() - reset the BAR
480 * @epc: the EPC device for which the BAR has to be cleared
481 * @func_no: the physical endpoint function number in the EPC device
482 * @vfunc_no: the virtual endpoint function number in the physical function
483 * @epf_bar: the struct epf_bar that contains the BAR information
484 *
485 * Invoke to reset the BAR of the endpoint device.
486 */
487void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
488 struct pci_epf_bar *epf_bar)
489{
490 if (IS_ERR_OR_NULL(ptr: epc) || func_no >= epc->max_functions ||
491 (epf_bar->barno == BAR_5 &&
492 epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
493 return;
494
495 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
496 return;
497
498 if (!epc->ops->clear_bar)
499 return;
500
501 mutex_lock(&epc->lock);
502 epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar);
503 mutex_unlock(lock: &epc->lock);
504}
505EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
506
507/**
508 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space
509 * @epc: the EPC device on which BAR has to be configured
510 * @func_no: the physical endpoint function number in the EPC device
511 * @vfunc_no: the virtual endpoint function number in the physical function
512 * @epf_bar: the struct epf_bar that contains the BAR information
513 *
514 * Invoke to configure the BAR of the endpoint device.
515 */
516int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
517 struct pci_epf_bar *epf_bar)
518{
519 int ret;
520 int flags = epf_bar->flags;
521
522 if (IS_ERR_OR_NULL(ptr: epc) || func_no >= epc->max_functions ||
523 (epf_bar->barno == BAR_5 &&
524 flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
525 (flags & PCI_BASE_ADDRESS_SPACE_IO &&
526 flags & PCI_BASE_ADDRESS_IO_MASK) ||
527 (upper_32_bits(epf_bar->size) &&
528 !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
529 return -EINVAL;
530
531 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
532 return -EINVAL;
533
534 if (!epc->ops->set_bar)
535 return 0;
536
537 mutex_lock(&epc->lock);
538 ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar);
539 mutex_unlock(lock: &epc->lock);
540
541 return ret;
542}
543EXPORT_SYMBOL_GPL(pci_epc_set_bar);
544
545/**
546 * pci_epc_write_header() - write standard configuration header
547 * @epc: the EPC device to which the configuration header should be written
548 * @func_no: the physical endpoint function number in the EPC device
549 * @vfunc_no: the virtual endpoint function number in the physical function
550 * @header: standard configuration header fields
551 *
552 * Invoke to write the configuration header to the endpoint controller. Every
553 * endpoint controller will have a dedicated location to which the standard
554 * configuration header would be written. The callback function should write
555 * the header fields to this dedicated location.
556 */
557int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
558 struct pci_epf_header *header)
559{
560 int ret;
561
562 if (IS_ERR_OR_NULL(ptr: epc) || func_no >= epc->max_functions)
563 return -EINVAL;
564
565 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
566 return -EINVAL;
567
568 /* Only Virtual Function #1 has deviceID */
569 if (vfunc_no > 1)
570 return -EINVAL;
571
572 if (!epc->ops->write_header)
573 return 0;
574
575 mutex_lock(&epc->lock);
576 ret = epc->ops->write_header(epc, func_no, vfunc_no, header);
577 mutex_unlock(lock: &epc->lock);
578
579 return ret;
580}
581EXPORT_SYMBOL_GPL(pci_epc_write_header);
582
583/**
584 * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller
585 * @epc: the EPC device to which the endpoint function should be added
586 * @epf: the endpoint function to be added
587 * @type: Identifies if the EPC is connected to the primary or secondary
588 * interface of EPF
589 *
590 * A PCI endpoint device can have one or more functions. In the case of PCIe,
591 * the specification allows up to 8 PCIe endpoint functions. Invoke
592 * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller.
593 */
594int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
595 enum pci_epc_interface_type type)
596{
597 struct list_head *list;
598 u32 func_no;
599 int ret = 0;
600
601 if (IS_ERR_OR_NULL(ptr: epc) || epf->is_vf)
602 return -EINVAL;
603
604 if (type == PRIMARY_INTERFACE && epf->epc)
605 return -EBUSY;
606
607 if (type == SECONDARY_INTERFACE && epf->sec_epc)
608 return -EBUSY;
609
610 mutex_lock(&epc->list_lock);
611 func_no = find_first_zero_bit(addr: &epc->function_num_map,
612 BITS_PER_LONG);
613 if (func_no >= BITS_PER_LONG) {
614 ret = -EINVAL;
615 goto ret;
616 }
617
618 if (func_no > epc->max_functions - 1) {
619 dev_err(&epc->dev, "Exceeding max supported Function Number\n");
620 ret = -EINVAL;
621 goto ret;
622 }
623
624 set_bit(nr: func_no, addr: &epc->function_num_map);
625 if (type == PRIMARY_INTERFACE) {
626 epf->func_no = func_no;
627 epf->epc = epc;
628 list = &epf->list;
629 } else {
630 epf->sec_epc_func_no = func_no;
631 epf->sec_epc = epc;
632 list = &epf->sec_epc_list;
633 }
634
635 list_add_tail(new: list, head: &epc->pci_epf);
636ret:
637 mutex_unlock(lock: &epc->list_lock);
638
639 return ret;
640}
641EXPORT_SYMBOL_GPL(pci_epc_add_epf);
642
643/**
644 * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller
645 * @epc: the EPC device from which the endpoint function should be removed
646 * @epf: the endpoint function to be removed
647 * @type: identifies if the EPC is connected to the primary or secondary
648 * interface of EPF
649 *
650 * Invoke to remove PCI endpoint function from the endpoint controller.
651 */
652void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
653 enum pci_epc_interface_type type)
654{
655 struct list_head *list;
656 u32 func_no = 0;
657
658 if (IS_ERR_OR_NULL(ptr: epc) || !epf)
659 return;
660
661 if (type == PRIMARY_INTERFACE) {
662 func_no = epf->func_no;
663 list = &epf->list;
664 } else {
665 func_no = epf->sec_epc_func_no;
666 list = &epf->sec_epc_list;
667 }
668
669 mutex_lock(&epc->list_lock);
670 clear_bit(nr: func_no, addr: &epc->function_num_map);
671 list_del(entry: list);
672 epf->epc = NULL;
673 mutex_unlock(lock: &epc->list_lock);
674}
675EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
676
677/**
678 * pci_epc_linkup() - Notify the EPF device that EPC device has established a
679 * connection with the Root Complex.
680 * @epc: the EPC device which has established link with the host
681 *
682 * Invoke to Notify the EPF device that the EPC device has established a
683 * connection with the Root Complex.
684 */
685void pci_epc_linkup(struct pci_epc *epc)
686{
687 struct pci_epf *epf;
688
689 if (IS_ERR_OR_NULL(ptr: epc))
690 return;
691
692 mutex_lock(&epc->list_lock);
693 list_for_each_entry(epf, &epc->pci_epf, list) {
694 mutex_lock(&epf->lock);
695 if (epf->event_ops && epf->event_ops->link_up)
696 epf->event_ops->link_up(epf);
697 mutex_unlock(lock: &epf->lock);
698 }
699 mutex_unlock(lock: &epc->list_lock);
700}
701EXPORT_SYMBOL_GPL(pci_epc_linkup);
702
703/**
704 * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the
705 * connection with the Root Complex.
706 * @epc: the EPC device which has dropped the link with the host
707 *
708 * Invoke to Notify the EPF device that the EPC device has dropped the
709 * connection with the Root Complex.
710 */
711void pci_epc_linkdown(struct pci_epc *epc)
712{
713 struct pci_epf *epf;
714
715 if (IS_ERR_OR_NULL(ptr: epc))
716 return;
717
718 mutex_lock(&epc->list_lock);
719 list_for_each_entry(epf, &epc->pci_epf, list) {
720 mutex_lock(&epf->lock);
721 if (epf->event_ops && epf->event_ops->link_down)
722 epf->event_ops->link_down(epf);
723 mutex_unlock(lock: &epf->lock);
724 }
725 mutex_unlock(lock: &epc->list_lock);
726}
727EXPORT_SYMBOL_GPL(pci_epc_linkdown);
728
729/**
730 * pci_epc_init_notify() - Notify the EPF device that EPC device's core
731 * initialization is completed.
732 * @epc: the EPC device whose core initialization is completed
733 *
734 * Invoke to Notify the EPF device that the EPC device's initialization
735 * is completed.
736 */
737void pci_epc_init_notify(struct pci_epc *epc)
738{
739 struct pci_epf *epf;
740
741 if (IS_ERR_OR_NULL(ptr: epc))
742 return;
743
744 mutex_lock(&epc->list_lock);
745 list_for_each_entry(epf, &epc->pci_epf, list) {
746 mutex_lock(&epf->lock);
747 if (epf->event_ops && epf->event_ops->core_init)
748 epf->event_ops->core_init(epf);
749 mutex_unlock(lock: &epf->lock);
750 }
751 mutex_unlock(lock: &epc->list_lock);
752}
753EXPORT_SYMBOL_GPL(pci_epc_init_notify);
754
755/**
756 * pci_epc_bme_notify() - Notify the EPF device that the EPC device has received
757 * the BME event from the Root complex
758 * @epc: the EPC device that received the BME event
759 *
760 * Invoke to Notify the EPF device that the EPC device has received the Bus
761 * Master Enable (BME) event from the Root complex
762 */
763void pci_epc_bme_notify(struct pci_epc *epc)
764{
765 struct pci_epf *epf;
766
767 if (IS_ERR_OR_NULL(ptr: epc))
768 return;
769
770 mutex_lock(&epc->list_lock);
771 list_for_each_entry(epf, &epc->pci_epf, list) {
772 mutex_lock(&epf->lock);
773 if (epf->event_ops && epf->event_ops->bme)
774 epf->event_ops->bme(epf);
775 mutex_unlock(lock: &epf->lock);
776 }
777 mutex_unlock(lock: &epc->list_lock);
778}
779EXPORT_SYMBOL_GPL(pci_epc_bme_notify);
780
781/**
782 * pci_epc_destroy() - destroy the EPC device
783 * @epc: the EPC device that has to be destroyed
784 *
785 * Invoke to destroy the PCI EPC device
786 */
787void pci_epc_destroy(struct pci_epc *epc)
788{
789 pci_ep_cfs_remove_epc_group(group: epc->group);
790 device_unregister(dev: &epc->dev);
791}
792EXPORT_SYMBOL_GPL(pci_epc_destroy);
793
794/**
795 * devm_pci_epc_destroy() - destroy the EPC device
796 * @dev: device that wants to destroy the EPC
797 * @epc: the EPC device that has to be destroyed
798 *
799 * Invoke to destroy the devres associated with this
800 * pci_epc and destroy the EPC device.
801 */
802void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
803{
804 int r;
805
806 r = devres_destroy(dev, release: devm_pci_epc_release, match: devm_pci_epc_match,
807 match_data: epc);
808 dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
809}
810EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
811
812static void pci_epc_release(struct device *dev)
813{
814 kfree(to_pci_epc(dev));
815}
816
817/**
818 * __pci_epc_create() - create a new endpoint controller (EPC) device
819 * @dev: device that is creating the new EPC
820 * @ops: function pointers for performing EPC operations
821 * @owner: the owner of the module that creates the EPC device
822 *
823 * Invoke to create a new EPC device and add it to pci_epc class.
824 */
825struct pci_epc *
826__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
827 struct module *owner)
828{
829 int ret;
830 struct pci_epc *epc;
831
832 if (WARN_ON(!dev)) {
833 ret = -EINVAL;
834 goto err_ret;
835 }
836
837 epc = kzalloc(size: sizeof(*epc), GFP_KERNEL);
838 if (!epc) {
839 ret = -ENOMEM;
840 goto err_ret;
841 }
842
843 mutex_init(&epc->lock);
844 mutex_init(&epc->list_lock);
845 INIT_LIST_HEAD(list: &epc->pci_epf);
846
847 device_initialize(dev: &epc->dev);
848 epc->dev.class = pci_epc_class;
849 epc->dev.parent = dev;
850 epc->dev.release = pci_epc_release;
851 epc->ops = ops;
852
853 ret = dev_set_name(dev: &epc->dev, name: "%s", dev_name(dev));
854 if (ret)
855 goto put_dev;
856
857 ret = device_add(dev: &epc->dev);
858 if (ret)
859 goto put_dev;
860
861 epc->group = pci_ep_cfs_add_epc_group(name: dev_name(dev));
862
863 return epc;
864
865put_dev:
866 put_device(dev: &epc->dev);
867
868err_ret:
869 return ERR_PTR(error: ret);
870}
871EXPORT_SYMBOL_GPL(__pci_epc_create);
872
873/**
874 * __devm_pci_epc_create() - create a new endpoint controller (EPC) device
875 * @dev: device that is creating the new EPC
876 * @ops: function pointers for performing EPC operations
877 * @owner: the owner of the module that creates the EPC device
878 *
879 * Invoke to create a new EPC device and add it to pci_epc class.
880 * While at that, it also associates the device with the pci_epc using devres.
881 * On driver detach, release function is invoked on the devres data,
882 * then, devres data is freed.
883 */
884struct pci_epc *
885__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
886 struct module *owner)
887{
888 struct pci_epc **ptr, *epc;
889
890 ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL);
891 if (!ptr)
892 return ERR_PTR(error: -ENOMEM);
893
894 epc = __pci_epc_create(dev, ops, owner);
895 if (!IS_ERR(ptr: epc)) {
896 *ptr = epc;
897 devres_add(dev, res: ptr);
898 } else {
899 devres_free(res: ptr);
900 }
901
902 return epc;
903}
904EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
905
906static int __init pci_epc_init(void)
907{
908 pci_epc_class = class_create(name: "pci_epc");
909 if (IS_ERR(ptr: pci_epc_class)) {
910 pr_err("failed to create pci epc class --> %ld\n",
911 PTR_ERR(pci_epc_class));
912 return PTR_ERR(ptr: pci_epc_class);
913 }
914
915 return 0;
916}
917module_init(pci_epc_init);
918
919static void __exit pci_epc_exit(void)
920{
921 class_destroy(cls: pci_epc_class);
922}
923module_exit(pci_epc_exit);
924
925MODULE_DESCRIPTION("PCI EPC Library");
926MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
927

source code of linux/drivers/pci/endpoint/pci-epc-core.c