1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Port for PPC64 David Engebretsen, IBM Corp. |
4 | * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. |
5 | * |
6 | * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM |
7 | * Rework, based on alpha PCI code. |
8 | */ |
9 | |
10 | #undef DEBUG |
11 | |
12 | #include <linux/kernel.h> |
13 | #include <linux/pci.h> |
14 | #include <linux/string.h> |
15 | #include <linux/init.h> |
16 | #include <linux/export.h> |
17 | #include <linux/mm.h> |
18 | #include <linux/list.h> |
19 | #include <linux/syscalls.h> |
20 | #include <linux/irq.h> |
21 | #include <linux/vmalloc.h> |
22 | #include <linux/of.h> |
23 | |
24 | #include <asm/processor.h> |
25 | #include <asm/io.h> |
26 | #include <asm/pci-bridge.h> |
27 | #include <asm/byteorder.h> |
28 | #include <asm/machdep.h> |
29 | #include <asm/ppc-pci.h> |
30 | |
31 | /* pci_io_base -- the base address from which io bars are offsets. |
32 | * This is the lowest I/O base address (so bar values are always positive), |
33 | * and it *must* be the start of ISA space if an ISA bus exists because |
34 | * ISA drivers use hard coded offsets. If no ISA bus exists nothing |
35 | * is mapped on the first 64K of IO space |
36 | */ |
37 | unsigned long pci_io_base; |
38 | EXPORT_SYMBOL(pci_io_base); |
39 | |
40 | static int __init pcibios_init(void) |
41 | { |
42 | struct pci_controller *hose, *tmp; |
43 | |
44 | printk(KERN_INFO "PCI: Probing PCI hardware\n" ); |
45 | |
46 | /* For now, override phys_mem_access_prot. If we need it,g |
47 | * later, we may move that initialization to each ppc_md |
48 | */ |
49 | ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; |
50 | |
51 | /* On ppc64, we always enable PCI domains and we keep domain 0 |
52 | * backward compatible in /proc for video cards |
53 | */ |
54 | pci_add_flags(flags: PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0); |
55 | |
56 | /* Scan all of the recorded PCI controllers. */ |
57 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) |
58 | pcibios_scan_phb(hose); |
59 | |
60 | /* Call common code to handle resource allocation */ |
61 | pcibios_resource_survey(); |
62 | |
63 | /* Add devices. */ |
64 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) |
65 | pci_bus_add_devices(hose->bus); |
66 | |
67 | /* Call machine dependent fixup */ |
68 | if (ppc_md.pcibios_fixup) |
69 | ppc_md.pcibios_fixup(); |
70 | |
71 | printk(KERN_DEBUG "PCI: Probing PCI hardware done\n" ); |
72 | |
73 | return 0; |
74 | } |
75 | |
76 | subsys_initcall_sync(pcibios_init); |
77 | |
78 | int pcibios_unmap_io_space(struct pci_bus *bus) |
79 | { |
80 | struct pci_controller *hose; |
81 | |
82 | WARN_ON(bus == NULL); |
83 | |
84 | /* If this is not a PHB, we only flush the hash table over |
85 | * the area mapped by this bridge. We don't play with the PTE |
86 | * mappings since we might have to deal with sub-page alignments |
87 | * so flushing the hash table is the only sane way to make sure |
88 | * that no hash entries are covering that removed bridge area |
89 | * while still allowing other busses overlapping those pages |
90 | * |
91 | * Note: If we ever support P2P hotplug on Book3E, we'll have |
92 | * to do an appropriate TLB flush here too |
93 | */ |
94 | if (bus->self) { |
95 | #ifdef CONFIG_PPC_BOOK3S_64 |
96 | struct resource *res = bus->resource[0]; |
97 | #endif |
98 | |
99 | pr_debug("IO unmapping for PCI-PCI bridge %s\n" , |
100 | pci_name(bus->self)); |
101 | |
102 | #ifdef CONFIG_PPC_BOOK3S_64 |
103 | __flush_hash_table_range(res->start + _IO_BASE, |
104 | res->end + _IO_BASE + 1); |
105 | #endif |
106 | return 0; |
107 | } |
108 | |
109 | /* Get the host bridge */ |
110 | hose = pci_bus_to_host(bus); |
111 | |
112 | pr_debug("IO unmapping for PHB %pOF\n" , hose->dn); |
113 | pr_debug(" alloc=0x%p\n" , hose->io_base_alloc); |
114 | |
115 | iounmap(addr: hose->io_base_alloc); |
116 | return 0; |
117 | } |
118 | EXPORT_SYMBOL_GPL(pcibios_unmap_io_space); |
119 | |
120 | void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size) |
121 | { |
122 | struct vm_struct *area; |
123 | unsigned long addr; |
124 | |
125 | WARN_ON_ONCE(paddr & ~PAGE_MASK); |
126 | WARN_ON_ONCE(size & ~PAGE_MASK); |
127 | |
128 | /* |
129 | * Let's allocate some IO space for that guy. We don't pass VM_IOREMAP |
130 | * because we don't care about alignment tricks that the core does in |
131 | * that case. Maybe we should due to stupid card with incomplete |
132 | * address decoding but I'd rather not deal with those outside of the |
133 | * reserved 64K legacy region. |
134 | */ |
135 | area = __get_vm_area_caller(size, VM_IOREMAP, start: PHB_IO_BASE, end: PHB_IO_END, |
136 | caller: __builtin_return_address(0)); |
137 | if (!area) |
138 | return NULL; |
139 | |
140 | addr = (unsigned long)area->addr; |
141 | if (ioremap_page_range(addr, end: addr + size, phys_addr: paddr, |
142 | pgprot_noncached(PAGE_KERNEL))) { |
143 | vunmap_range(addr, end: addr + size); |
144 | return NULL; |
145 | } |
146 | |
147 | return (void __iomem *)addr; |
148 | } |
149 | EXPORT_SYMBOL_GPL(ioremap_phb); |
150 | |
151 | static int pcibios_map_phb_io_space(struct pci_controller *hose) |
152 | { |
153 | unsigned long phys_page; |
154 | unsigned long size_page; |
155 | unsigned long io_virt_offset; |
156 | |
157 | phys_page = ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE); |
158 | size_page = ALIGN(hose->pci_io_size, PAGE_SIZE); |
159 | |
160 | /* Make sure IO area address is clear */ |
161 | hose->io_base_alloc = NULL; |
162 | |
163 | /* If there's no IO to map on that bus, get away too */ |
164 | if (hose->pci_io_size == 0 || hose->io_base_phys == 0) |
165 | return 0; |
166 | |
167 | /* Let's allocate some IO space for that guy. We don't pass |
168 | * VM_IOREMAP because we don't care about alignment tricks that |
169 | * the core does in that case. Maybe we should due to stupid card |
170 | * with incomplete address decoding but I'd rather not deal with |
171 | * those outside of the reserved 64K legacy region. |
172 | */ |
173 | hose->io_base_alloc = ioremap_phb(phys_page, size_page); |
174 | if (!hose->io_base_alloc) |
175 | return -ENOMEM; |
176 | hose->io_base_virt = hose->io_base_alloc + |
177 | hose->io_base_phys - phys_page; |
178 | |
179 | pr_debug("IO mapping for PHB %pOF\n" , hose->dn); |
180 | pr_debug(" phys=0x%016llx, virt=0x%p (alloc=0x%p)\n" , |
181 | hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc); |
182 | pr_debug(" size=0x%016llx (alloc=0x%016lx)\n" , |
183 | hose->pci_io_size, size_page); |
184 | |
185 | /* Fixup hose IO resource */ |
186 | io_virt_offset = pcibios_io_space_offset(hose); |
187 | hose->io_resource.start += io_virt_offset; |
188 | hose->io_resource.end += io_virt_offset; |
189 | |
190 | pr_debug(" hose->io_resource=%pR\n" , &hose->io_resource); |
191 | |
192 | return 0; |
193 | } |
194 | |
195 | int pcibios_map_io_space(struct pci_bus *bus) |
196 | { |
197 | WARN_ON(bus == NULL); |
198 | |
199 | /* If this not a PHB, nothing to do, page tables still exist and |
200 | * thus HPTEs will be faulted in when needed |
201 | */ |
202 | if (bus->self) { |
203 | pr_debug("IO mapping for PCI-PCI bridge %s\n" , |
204 | pci_name(bus->self)); |
205 | pr_debug(" virt=0x%016llx...0x%016llx\n" , |
206 | bus->resource[0]->start + _IO_BASE, |
207 | bus->resource[0]->end + _IO_BASE); |
208 | return 0; |
209 | } |
210 | |
211 | return pcibios_map_phb_io_space(hose: pci_bus_to_host(bus)); |
212 | } |
213 | EXPORT_SYMBOL_GPL(pcibios_map_io_space); |
214 | |
215 | void pcibios_setup_phb_io_space(struct pci_controller *hose) |
216 | { |
217 | pcibios_map_phb_io_space(hose); |
218 | } |
219 | |
220 | #define IOBASE_BRIDGE_NUMBER 0 |
221 | #define IOBASE_MEMORY 1 |
222 | #define IOBASE_IO 2 |
223 | #define IOBASE_ISA_IO 3 |
224 | #define IOBASE_ISA_MEM 4 |
225 | |
226 | SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, in_bus, |
227 | unsigned long, in_devfn) |
228 | { |
229 | struct pci_controller* hose; |
230 | struct pci_bus *tmp_bus, *bus = NULL; |
231 | struct device_node *hose_node; |
232 | |
233 | /* Argh ! Please forgive me for that hack, but that's the |
234 | * simplest way to get existing XFree to not lockup on some |
235 | * G5 machines... So when something asks for bus 0 io base |
236 | * (bus 0 is HT root), we return the AGP one instead. |
237 | */ |
238 | if (in_bus == 0 && of_machine_is_compatible(compat: "MacRISC4" )) { |
239 | struct device_node *agp; |
240 | |
241 | agp = of_find_compatible_node(NULL, NULL, compat: "u3-agp" ); |
242 | if (agp) |
243 | in_bus = 0xf0; |
244 | of_node_put(node: agp); |
245 | } |
246 | |
247 | /* That syscall isn't quite compatible with PCI domains, but it's |
248 | * used on pre-domains setup. We return the first match |
249 | */ |
250 | |
251 | list_for_each_entry(tmp_bus, &pci_root_buses, node) { |
252 | if (in_bus >= tmp_bus->number && |
253 | in_bus <= tmp_bus->busn_res.end) { |
254 | bus = tmp_bus; |
255 | break; |
256 | } |
257 | } |
258 | if (bus == NULL || bus->dev.of_node == NULL) |
259 | return -ENODEV; |
260 | |
261 | hose_node = bus->dev.of_node; |
262 | hose = PCI_DN(hose_node)->phb; |
263 | |
264 | switch (which) { |
265 | case IOBASE_BRIDGE_NUMBER: |
266 | return (long)hose->first_busno; |
267 | case IOBASE_MEMORY: |
268 | return (long)hose->mem_offset[0]; |
269 | case IOBASE_IO: |
270 | return (long)hose->io_base_phys; |
271 | case IOBASE_ISA_IO: |
272 | return (long)isa_io_base; |
273 | case IOBASE_ISA_MEM: |
274 | return -EINVAL; |
275 | } |
276 | |
277 | return -EOPNOTSUPP; |
278 | } |
279 | |
280 | #ifdef CONFIG_NUMA |
281 | int pcibus_to_node(struct pci_bus *bus) |
282 | { |
283 | struct pci_controller *phb = pci_bus_to_host(bus); |
284 | return phb->node; |
285 | } |
286 | EXPORT_SYMBOL(pcibus_to_node); |
287 | #endif |
288 | |
289 | #ifdef CONFIG_PPC_PMAC |
290 | int pci_device_from_OF_node(struct device_node *np, u8 *bus, u8 *devfn) |
291 | { |
292 | if (!PCI_DN(np)) |
293 | return -ENODEV; |
294 | *bus = PCI_DN(np)->busno; |
295 | *devfn = PCI_DN(np)->devfn; |
296 | return 0; |
297 | } |
298 | #endif |
299 | |