1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3** ccio-dma.c:
4** DMA management routines for first generation cache-coherent machines.
5** Program U2/Uturn in "Virtual Mode" and use the I/O MMU.
6**
7** (c) Copyright 2000 Grant Grundler
8** (c) Copyright 2000 Ryan Bradetich
9** (c) Copyright 2000 Hewlett-Packard Company
10**
11** "Real Mode" operation refers to U2/Uturn chip operation.
12** U2/Uturn were designed to perform coherency checks w/o using
13** the I/O MMU - basically what x86 does.
14**
15** Drawbacks of using Real Mode are:
16** o outbound DMA is slower - U2 won't prefetch data (GSC+ XQL signal).
17** o Inbound DMA less efficient - U2 can't use DMA_FAST attribute.
18** o Ability to do scatter/gather in HW is lost.
19** o Doesn't work under PCX-U/U+ machines since they didn't follow
20** the coherency design originally worked out. Only PCX-W does.
21*/
22
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/init.h>
26#include <linux/mm.h>
27#include <linux/spinlock.h>
28#include <linux/slab.h>
29#include <linux/string.h>
30#include <linux/pci.h>
31#include <linux/reboot.h>
32#include <linux/proc_fs.h>
33#include <linux/seq_file.h>
34#include <linux/dma-map-ops.h>
35#include <linux/scatterlist.h>
36#include <linux/iommu-helper.h>
37#include <linux/export.h>
38
39#include <asm/byteorder.h>
40#include <asm/cache.h> /* for L1_CACHE_BYTES */
41#include <linux/uaccess.h>
42#include <asm/page.h>
43#include <asm/dma.h>
44#include <asm/io.h>
45#include <asm/hardware.h> /* for register_module() */
46#include <asm/parisc-device.h>
47
48#include "iommu.h"
49
50/*
51** Choose "ccio" since that's what HP-UX calls it.
52** Make it easier for folks to migrate from one to the other :^)
53*/
54#define MODULE_NAME "ccio"
55
56#undef DEBUG_CCIO_RES
57#undef DEBUG_CCIO_RUN
58#undef DEBUG_CCIO_INIT
59#undef DEBUG_CCIO_RUN_SG
60
61#ifdef CONFIG_PROC_FS
62/* depends on proc fs support. But costs CPU performance. */
63#undef CCIO_COLLECT_STATS
64#endif
65
66#ifdef DEBUG_CCIO_INIT
67#define DBG_INIT(x...) printk(x)
68#else
69#define DBG_INIT(x...)
70#endif
71
72#ifdef DEBUG_CCIO_RUN
73#define DBG_RUN(x...) printk(x)
74#else
75#define DBG_RUN(x...)
76#endif
77
78#ifdef DEBUG_CCIO_RES
79#define DBG_RES(x...) printk(x)
80#else
81#define DBG_RES(x...)
82#endif
83
84#ifdef DEBUG_CCIO_RUN_SG
85#define DBG_RUN_SG(x...) printk(x)
86#else
87#define DBG_RUN_SG(x...)
88#endif
89
90#define WRITE_U32(value, addr) __raw_writel(value, addr)
91#define READ_U32(addr) __raw_readl(addr)
92
93#define U2_IOA_RUNWAY 0x580
94#define U2_BC_GSC 0x501
95#define UTURN_IOA_RUNWAY 0x581
96#define UTURN_BC_GSC 0x502
97
98#define IOA_NORMAL_MODE 0x00020080 /* IO_CONTROL to turn on CCIO */
99#define CMD_TLB_DIRECT_WRITE 35 /* IO_COMMAND for I/O TLB Writes */
100#define CMD_TLB_PURGE 33 /* IO_COMMAND to Purge I/O TLB entry */
101
102struct ioa_registers {
103 /* Runway Supervisory Set */
104 int32_t unused1[12];
105 uint32_t io_command; /* Offset 12 */
106 uint32_t io_status; /* Offset 13 */
107 uint32_t io_control; /* Offset 14 */
108 int32_t unused2[1];
109
110 /* Runway Auxiliary Register Set */
111 uint32_t io_err_resp; /* Offset 0 */
112 uint32_t io_err_info; /* Offset 1 */
113 uint32_t io_err_req; /* Offset 2 */
114 uint32_t io_err_resp_hi; /* Offset 3 */
115 uint32_t io_tlb_entry_m; /* Offset 4 */
116 uint32_t io_tlb_entry_l; /* Offset 5 */
117 uint32_t unused3[1];
118 uint32_t io_pdir_base; /* Offset 7 */
119 uint32_t io_io_low_hv; /* Offset 8 */
120 uint32_t io_io_high_hv; /* Offset 9 */
121 uint32_t unused4[1];
122 uint32_t io_chain_id_mask; /* Offset 11 */
123 uint32_t unused5[2];
124 uint32_t io_io_low; /* Offset 14 */
125 uint32_t io_io_high; /* Offset 15 */
126};
127
128/*
129** IOA Registers
130** -------------
131**
132** Runway IO_CONTROL Register (+0x38)
133**
134** The Runway IO_CONTROL register controls the forwarding of transactions.
135**
136** | 0 ... 13 | 14 15 | 16 ... 21 | 22 | 23 24 | 25 ... 31 |
137** | HV | TLB | reserved | HV | mode | reserved |
138**
139** o mode field indicates the address translation of transactions
140** forwarded from Runway to GSC+:
141** Mode Name Value Definition
142** Off (default) 0 Opaque to matching addresses.
143** Include 1 Transparent for matching addresses.
144** Peek 3 Map matching addresses.
145**
146** + "Off" mode: Runway transactions which match the I/O range
147** specified by the IO_IO_LOW/IO_IO_HIGH registers will be ignored.
148** + "Include" mode: all addresses within the I/O range specified
149** by the IO_IO_LOW and IO_IO_HIGH registers are transparently
150** forwarded. This is the I/O Adapter's normal operating mode.
151** + "Peek" mode: used during system configuration to initialize the
152** GSC+ bus. Runway Write_Shorts in the address range specified by
153** IO_IO_LOW and IO_IO_HIGH are forwarded through the I/O Adapter
154** *AND* the GSC+ address is remapped to the Broadcast Physical
155** Address space by setting the 14 high order address bits of the
156** 32 bit GSC+ address to ones.
157**
158** o TLB field affects transactions which are forwarded from GSC+ to Runway.
159** "Real" mode is the poweron default.
160**
161** TLB Mode Value Description
162** Real 0 No TLB translation. Address is directly mapped and the
163** virtual address is composed of selected physical bits.
164** Error 1 Software fills the TLB manually.
165** Normal 2 IOA fetches IO TLB misses from IO PDIR (in host memory).
166**
167**
168** IO_IO_LOW_HV +0x60 (HV dependent)
169** IO_IO_HIGH_HV +0x64 (HV dependent)
170** IO_IO_LOW +0x78 (Architected register)
171** IO_IO_HIGH +0x7c (Architected register)
172**
173** IO_IO_LOW and IO_IO_HIGH set the lower and upper bounds of the
174** I/O Adapter address space, respectively.
175**
176** 0 ... 7 | 8 ... 15 | 16 ... 31 |
177** 11111111 | 11111111 | address |
178**
179** Each LOW/HIGH pair describes a disjoint address space region.
180** (2 per GSC+ port). Each incoming Runway transaction address is compared
181** with both sets of LOW/HIGH registers. If the address is in the range
182** greater than or equal to IO_IO_LOW and less than IO_IO_HIGH the transaction
183** for forwarded to the respective GSC+ bus.
184** Specify IO_IO_LOW equal to or greater than IO_IO_HIGH to avoid specifying
185** an address space region.
186**
187** In order for a Runway address to reside within GSC+ extended address space:
188** Runway Address [0:7] must identically compare to 8'b11111111
189** Runway Address [8:11] must be equal to IO_IO_LOW(_HV)[16:19]
190** Runway Address [12:23] must be greater than or equal to
191** IO_IO_LOW(_HV)[20:31] and less than IO_IO_HIGH(_HV)[20:31].
192** Runway Address [24:39] is not used in the comparison.
193**
194** When the Runway transaction is forwarded to GSC+, the GSC+ address is
195** as follows:
196** GSC+ Address[0:3] 4'b1111
197** GSC+ Address[4:29] Runway Address[12:37]
198** GSC+ Address[30:31] 2'b00
199**
200** All 4 Low/High registers must be initialized (by PDC) once the lower bus
201** is interrogated and address space is defined. The operating system will
202** modify the architectural IO_IO_LOW and IO_IO_HIGH registers following
203** the PDC initialization. However, the hardware version dependent IO_IO_LOW
204** and IO_IO_HIGH registers should not be subsequently altered by the OS.
205**
206** Writes to both sets of registers will take effect immediately, bypassing
207** the queues, which ensures that subsequent Runway transactions are checked
208** against the updated bounds values. However reads are queued, introducing
209** the possibility of a read being bypassed by a subsequent write to the same
210** register. This sequence can be avoided by having software wait for read
211** returns before issuing subsequent writes.
212*/
213
214struct ioc {
215 struct ioa_registers __iomem *ioc_regs; /* I/O MMU base address */
216 u8 *res_map; /* resource map, bit == pdir entry */
217 __le64 *pdir_base; /* physical base address */
218 u32 pdir_size; /* bytes, function of IOV Space size */
219 u32 res_hint; /* next available IOVP -
220 circular search */
221 u32 res_size; /* size of resource map in bytes */
222 spinlock_t res_lock;
223
224#ifdef CCIO_COLLECT_STATS
225#define CCIO_SEARCH_SAMPLE 0x100
226 unsigned long avg_search[CCIO_SEARCH_SAMPLE];
227 unsigned long avg_idx; /* current index into avg_search */
228 unsigned long used_pages;
229 unsigned long msingle_calls;
230 unsigned long msingle_pages;
231 unsigned long msg_calls;
232 unsigned long msg_pages;
233 unsigned long usingle_calls;
234 unsigned long usingle_pages;
235 unsigned long usg_calls;
236 unsigned long usg_pages;
237#endif
238 unsigned short cujo20_bug;
239
240 /* STUFF We don't need in performance path */
241 u32 chainid_shift; /* specify bit location of chain_id */
242 struct ioc *next; /* Linked list of discovered iocs */
243 const char *name; /* device name from firmware */
244 unsigned int hw_path; /* the hardware path this ioc is associatd with */
245 struct pci_dev *fake_pci_dev; /* the fake pci_dev for non-pci devs */
246 struct resource mmio_region[2]; /* The "routed" MMIO regions */
247};
248
249static struct ioc *ioc_list;
250static int ioc_count;
251
252/**************************************************************
253*
254* I/O Pdir Resource Management
255*
256* Bits set in the resource map are in use.
257* Each bit can represent a number of pages.
258* LSbs represent lower addresses (IOVA's).
259*
260* This was copied from sba_iommu.c. Don't try to unify
261* the two resource managers unless a way to have different
262* allocation policies is also adjusted. We'd like to avoid
263* I/O TLB thrashing by having resource allocation policy
264* match the I/O TLB replacement policy.
265*
266***************************************************************/
267#define IOVP_SIZE PAGE_SIZE
268#define IOVP_SHIFT PAGE_SHIFT
269#define IOVP_MASK PAGE_MASK
270
271/* Convert from IOVP to IOVA and vice versa. */
272#define CCIO_IOVA(iovp,offset) ((iovp) | (offset))
273#define CCIO_IOVP(iova) ((iova) & IOVP_MASK)
274
275#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
276#define MKIOVP(pdir_idx) ((long)(pdir_idx) << IOVP_SHIFT)
277#define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset)
278
279/*
280** Don't worry about the 150% average search length on a miss.
281** If the search wraps around, and passes the res_hint, it will
282** cause the kernel to panic anyhow.
283*/
284#define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \
285 for (; res_ptr < res_end; ++res_ptr) { \
286 int ret;\
287 unsigned int idx;\
288 idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
289 ret = iommu_is_span_boundary(idx << 3, pages_needed, 0, boundary_size);\
290 if ((0 == (*res_ptr & mask)) && !ret) { \
291 *res_ptr |= mask; \
292 res_idx = idx;\
293 ioc->res_hint = res_idx + (size >> 3); \
294 goto resource_found; \
295 } \
296 }
297
298#define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \
299 u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
300 u##size *res_end = (u##size *)&(ioc)->res_map[ioa->res_size]; \
301 CCIO_SEARCH_LOOP(ioc, res_idx, mask, size); \
302 res_ptr = (u##size *)&(ioc)->res_map[0]; \
303 CCIO_SEARCH_LOOP(ioa, res_idx, mask, size);
304
305/*
306** Find available bit in this ioa's resource map.
307** Use a "circular" search:
308** o Most IOVA's are "temporary" - avg search time should be small.
309** o keep a history of what happened for debugging
310** o KISS.
311**
312** Perf optimizations:
313** o search for log2(size) bits at a time.
314** o search for available resource bits using byte/word/whatever.
315** o use different search for "large" (eg > 4 pages) or "very large"
316** (eg > 16 pages) mappings.
317*/
318
319/**
320 * ccio_alloc_range - Allocate pages in the ioc's resource map.
321 * @ioc: The I/O Controller.
322 * @dev: The PCI device.
323 * @size: The requested number of bytes to be mapped into the
324 * I/O Pdir...
325 *
326 * This function searches the resource map of the ioc to locate a range
327 * of available pages for the requested size.
328 */
329static int
330ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
331{
332 unsigned int pages_needed = size >> IOVP_SHIFT;
333 unsigned int res_idx;
334 unsigned long boundary_size;
335#ifdef CCIO_COLLECT_STATS
336 unsigned long cr_start = mfctl(16);
337#endif
338
339 BUG_ON(pages_needed == 0);
340 BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
341
342 DBG_RES("%s() size: %zu pages_needed %d\n",
343 __func__, size, pages_needed);
344
345 /*
346 ** "seek and ye shall find"...praying never hurts either...
347 ** ggg sacrifices another 710 to the computer gods.
348 */
349
350 boundary_size = dma_get_seg_boundary_nr_pages(dev, IOVP_SHIFT);
351
352 if (pages_needed <= 8) {
353 /*
354 * LAN traffic will not thrash the TLB IFF the same NIC
355 * uses 8 adjacent pages to map separate payload data.
356 * ie the same byte in the resource bit map.
357 */
358#if 0
359 /* FIXME: bit search should shift it's way through
360 * an unsigned long - not byte at a time. As it is now,
361 * we effectively allocate this byte to this mapping.
362 */
363 unsigned long mask = ~(~0UL >> pages_needed);
364 CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 8);
365#else
366 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xff, 8);
367#endif
368 } else if (pages_needed <= 16) {
369 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xffff, 16);
370 } else if (pages_needed <= 32) {
371 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~(unsigned int)0, 32);
372#ifdef __LP64__
373 } else if (pages_needed <= 64) {
374 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~0UL, 64);
375#endif
376 } else {
377 panic(fmt: "%s: %s() Too many pages to map. pages_needed: %u\n",
378 __FILE__, __func__, pages_needed);
379 }
380
381 panic(fmt: "%s: %s() I/O MMU is out of mapping resources.\n", __FILE__,
382 __func__);
383
384resource_found:
385
386 DBG_RES("%s() res_idx %d res_hint: %d\n",
387 __func__, res_idx, ioc->res_hint);
388
389#ifdef CCIO_COLLECT_STATS
390 {
391 unsigned long cr_end = mfctl(16);
392 unsigned long tmp = cr_end - cr_start;
393 /* check for roll over */
394 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
395 }
396 ioc->avg_search[ioc->avg_idx++] = cr_start;
397 ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1;
398 ioc->used_pages += pages_needed;
399#endif
400 /*
401 ** return the bit address.
402 */
403 return res_idx << 3;
404}
405
406#define CCIO_FREE_MAPPINGS(ioc, res_idx, mask, size) \
407 u##size *res_ptr = (u##size *)&((ioc)->res_map[res_idx]); \
408 BUG_ON((*res_ptr & mask) != mask); \
409 *res_ptr &= ~(mask);
410
411/**
412 * ccio_free_range - Free pages from the ioc's resource map.
413 * @ioc: The I/O Controller.
414 * @iova: The I/O Virtual Address.
415 * @pages_mapped: The requested number of pages to be freed from the
416 * I/O Pdir.
417 *
418 * This function frees the resouces allocated for the iova.
419 */
420static void
421ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
422{
423 unsigned long iovp = CCIO_IOVP(iova);
424 unsigned int res_idx = PDIR_INDEX(iovp) >> 3;
425
426 BUG_ON(pages_mapped == 0);
427 BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE);
428 BUG_ON(pages_mapped > BITS_PER_LONG);
429
430 DBG_RES("%s(): res_idx: %d pages_mapped %lu\n",
431 __func__, res_idx, pages_mapped);
432
433#ifdef CCIO_COLLECT_STATS
434 ioc->used_pages -= pages_mapped;
435#endif
436
437 if(pages_mapped <= 8) {
438#if 0
439 /* see matching comments in alloc_range */
440 unsigned long mask = ~(~0UL >> pages_mapped);
441 CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 8);
442#else
443 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffUL, 8);
444#endif
445 } else if(pages_mapped <= 16) {
446 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffffUL, 16);
447 } else if(pages_mapped <= 32) {
448 CCIO_FREE_MAPPINGS(ioc, res_idx, ~(unsigned int)0, 32);
449#ifdef __LP64__
450 } else if(pages_mapped <= 64) {
451 CCIO_FREE_MAPPINGS(ioc, res_idx, ~0UL, 64);
452#endif
453 } else {
454 panic(fmt: "%s:%s() Too many pages to unmap.\n", __FILE__,
455 __func__);
456 }
457}
458
459/****************************************************************
460**
461** CCIO dma_ops support routines
462**
463*****************************************************************/
464
465typedef unsigned long space_t;
466#define KERNEL_SPACE 0
467
468/*
469** DMA "Page Type" and Hints
470** o if SAFE_DMA isn't set, mapping is for FAST_DMA. SAFE_DMA should be
471** set for subcacheline DMA transfers since we don't want to damage the
472** other part of a cacheline.
473** o SAFE_DMA must be set for "memory" allocated via pci_alloc_consistent().
474** This bit tells U2 to do R/M/W for partial cachelines. "Streaming"
475** data can avoid this if the mapping covers full cache lines.
476** o STOP_MOST is needed for atomicity across cachelines.
477** Apparently only "some EISA devices" need this.
478** Using CONFIG_ISA is hack. Only the IOA with EISA under it needs
479** to use this hint iff the EISA devices needs this feature.
480** According to the U2 ERS, STOP_MOST enabled pages hurt performance.
481** o PREFETCH should *not* be set for cases like Multiple PCI devices
482** behind GSCtoPCI (dino) bus converter. Only one cacheline per GSC
483** device can be fetched and multiply DMA streams will thrash the
484** prefetch buffer and burn memory bandwidth. See 6.7.3 "Prefetch Rules
485** and Invalidation of Prefetch Entries".
486**
487** FIXME: the default hints need to be per GSC device - not global.
488**
489** HP-UX dorks: linux device driver programming model is totally different
490** than HP-UX's. HP-UX always sets HINT_PREFETCH since it's drivers
491** do special things to work on non-coherent platforms...linux has to
492** be much more careful with this.
493*/
494#define IOPDIR_VALID 0x01UL
495#define HINT_SAFE_DMA 0x02UL /* used for pci_alloc_consistent() pages */
496#ifdef CONFIG_EISA
497#define HINT_STOP_MOST 0x04UL /* LSL support */
498#else
499#define HINT_STOP_MOST 0x00UL /* only needed for "some EISA devices" */
500#endif
501#define HINT_UDPATE_ENB 0x08UL /* not used/supported by U2 */
502#define HINT_PREFETCH 0x10UL /* for outbound pages which are not SAFE */
503
504
505/*
506** Use direction (ie PCI_DMA_TODEVICE) to pick hint.
507** ccio_alloc_consistent() depends on this to get SAFE_DMA
508** when it passes in BIDIRECTIONAL flag.
509*/
510static u32 hint_lookup[] = {
511 [DMA_BIDIRECTIONAL] = HINT_STOP_MOST | HINT_SAFE_DMA | IOPDIR_VALID,
512 [DMA_TO_DEVICE] = HINT_STOP_MOST | HINT_PREFETCH | IOPDIR_VALID,
513 [DMA_FROM_DEVICE] = HINT_STOP_MOST | IOPDIR_VALID,
514};
515
516/**
517 * ccio_io_pdir_entry - Initialize an I/O Pdir.
518 * @pdir_ptr: A pointer into I/O Pdir.
519 * @sid: The Space Identifier.
520 * @pba: The physical address.
521 * @hints: The DMA Hint.
522 *
523 * Given a physical address (pba, arg2) and space id, (sid, arg1),
524 * load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir
525 * entry consists of 8 bytes as shown below (MSB == bit 0):
526 *
527 *
528 * WORD 0:
529 * +------+----------------+-----------------------------------------------+
530 * | Phys | Virtual Index | Phys |
531 * | 0:3 | 0:11 | 4:19 |
532 * |4 bits| 12 bits | 16 bits |
533 * +------+----------------+-----------------------------------------------+
534 * WORD 1:
535 * +-----------------------+-----------------------------------------------+
536 * | Phys | Rsvd | Prefetch |Update |Rsvd |Lock |Safe |Valid |
537 * | 20:39 | | Enable |Enable | |Enable|DMA | |
538 * | 20 bits | 5 bits | 1 bit |1 bit |2 bits|1 bit |1 bit |1 bit |
539 * +-----------------------+-----------------------------------------------+
540 *
541 * The virtual index field is filled with the results of the LCI
542 * (Load Coherence Index) instruction. The 8 bits used for the virtual
543 * index are bits 12:19 of the value returned by LCI.
544 */
545static void
546ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, phys_addr_t pba,
547 unsigned long hints)
548{
549 register unsigned long pa;
550 register unsigned long ci; /* coherent index */
551
552 /* We currently only support kernel addresses */
553 BUG_ON(sid != KERNEL_SPACE);
554
555 /*
556 ** WORD 1 - low order word
557 ** "hints" parm includes the VALID bit!
558 ** "dep" clobbers the physical address offset bits as well.
559 */
560 pa = pba;
561 asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
562 ((u32 *)pdir_ptr)[1] = (u32) pa;
563
564 /*
565 ** WORD 0 - high order word
566 */
567
568#ifdef __LP64__
569 /*
570 ** get bits 12:15 of physical address
571 ** shift bits 16:31 of physical address
572 ** and deposit them
573 */
574 asm volatile ("extrd,u %1,15,4,%0" : "=r" (ci) : "r" (pa));
575 asm volatile ("extrd,u %1,31,16,%0" : "+r" (pa) : "r" (pa));
576 asm volatile ("depd %1,35,4,%0" : "+r" (pa) : "r" (ci));
577#else
578 pa = 0;
579#endif
580 /*
581 ** get CPU coherency index bits
582 ** Grab virtual index [0:11]
583 ** Deposit virt_idx bits into I/O PDIR word
584 */
585 asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(address: pba)));
586 asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
587 asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
588
589 ((u32 *)pdir_ptr)[0] = (u32) pa;
590
591
592 /* FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360)
593 ** PCX-U/U+ do. (eg C200/C240)
594 ** PCX-T'? Don't know. (eg C110 or similar K-class)
595 **
596 ** See PDC_MODEL/option 0/SW_CAP word for "Non-coherent IO-PDIR bit".
597 **
598 ** "Since PCX-U employs an offset hash that is incompatible with
599 ** the real mode coherence index generation of U2, the PDIR entry
600 ** must be flushed to memory to retain coherence."
601 */
602 asm_io_fdc(pdir_ptr);
603 asm_io_sync();
604}
605
606/**
607 * ccio_clear_io_tlb - Remove stale entries from the I/O TLB.
608 * @ioc: The I/O Controller.
609 * @iovp: The I/O Virtual Page.
610 * @byte_cnt: The requested number of bytes to be freed from the I/O Pdir.
611 *
612 * Purge invalid I/O PDIR entries from the I/O TLB.
613 *
614 * FIXME: Can we change the byte_cnt to pages_mapped?
615 */
616static void
617ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt)
618{
619 u32 chain_size = 1 << ioc->chainid_shift;
620
621 iovp &= IOVP_MASK; /* clear offset bits, just want pagenum */
622 byte_cnt += chain_size;
623
624 while(byte_cnt > chain_size) {
625 WRITE_U32(CMD_TLB_PURGE | iovp, &ioc->ioc_regs->io_command);
626 iovp += chain_size;
627 byte_cnt -= chain_size;
628 }
629}
630
631/**
632 * ccio_mark_invalid - Mark the I/O Pdir entries invalid.
633 * @ioc: The I/O Controller.
634 * @iova: The I/O Virtual Address.
635 * @byte_cnt: The requested number of bytes to be freed from the I/O Pdir.
636 *
637 * Mark the I/O Pdir entries invalid and blow away the corresponding I/O
638 * TLB entries.
639 *
640 * FIXME: at some threshold it might be "cheaper" to just blow
641 * away the entire I/O TLB instead of individual entries.
642 *
643 * FIXME: Uturn has 256 TLB entries. We don't need to purge every
644 * PDIR entry - just once for each possible TLB entry.
645 * (We do need to maker I/O PDIR entries invalid regardless).
646 *
647 * FIXME: Can we change byte_cnt to pages_mapped?
648 */
649static void
650ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
651{
652 u32 iovp = (u32)CCIO_IOVP(iova);
653 size_t saved_byte_cnt;
654
655 /* round up to nearest page size */
656 saved_byte_cnt = byte_cnt = ALIGN(byte_cnt, IOVP_SIZE);
657
658 while(byte_cnt > 0) {
659 /* invalidate one page at a time */
660 unsigned int idx = PDIR_INDEX(iovp);
661 char *pdir_ptr = (char *) &(ioc->pdir_base[idx]);
662
663 BUG_ON(idx >= (ioc->pdir_size / sizeof(u64)));
664 pdir_ptr[7] = 0; /* clear only VALID bit */
665 /*
666 ** FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360)
667 ** PCX-U/U+ do. (eg C200/C240)
668 ** See PDC_MODEL/option 0/SW_CAP for "Non-coherent IO-PDIR bit".
669 */
670 asm_io_fdc(pdir_ptr);
671
672 iovp += IOVP_SIZE;
673 byte_cnt -= IOVP_SIZE;
674 }
675
676 asm_io_sync();
677 ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), byte_cnt: saved_byte_cnt);
678}
679
680/****************************************************************
681**
682** CCIO dma_ops
683**
684*****************************************************************/
685
686/**
687 * ccio_dma_supported - Verify the IOMMU supports the DMA address range.
688 * @dev: The PCI device.
689 * @mask: A bit mask describing the DMA address range of the device.
690 */
691static int
692ccio_dma_supported(struct device *dev, u64 mask)
693{
694 if(dev == NULL) {
695 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
696 BUG();
697 return 0;
698 }
699
700 /* only support 32-bit or better devices (ie PCI/GSC) */
701 return (int)(mask >= 0xffffffffUL);
702}
703
704/**
705 * ccio_map_single - Map an address range into the IOMMU.
706 * @dev: The PCI device.
707 * @addr: The physical address of the DMA region.
708 * @size: The length of the DMA region.
709 * @direction: The direction of the DMA transaction (to/from device).
710 *
711 * This function implements the pci_map_single function.
712 */
713static dma_addr_t
714ccio_map_single(struct device *dev, phys_addr_t addr, size_t size,
715 enum dma_data_direction direction)
716{
717 int idx;
718 struct ioc *ioc;
719 unsigned long flags;
720 dma_addr_t iovp;
721 dma_addr_t offset;
722 __le64 *pdir_start;
723 unsigned long hint = hint_lookup[(int)direction];
724
725 BUG_ON(!dev);
726 ioc = GET_IOC(dev);
727 if (!ioc)
728 return DMA_MAPPING_ERROR;
729
730 BUG_ON(size <= 0);
731
732 /* save offset bits */
733 offset = offset_in_page(addr);
734
735 /* round up to nearest IOVP_SIZE */
736 size = ALIGN(size + offset, IOVP_SIZE);
737 spin_lock_irqsave(&ioc->res_lock, flags);
738
739#ifdef CCIO_COLLECT_STATS
740 ioc->msingle_calls++;
741 ioc->msingle_pages += size >> IOVP_SHIFT;
742#endif
743
744 idx = ccio_alloc_range(ioc, dev, size);
745 iovp = (dma_addr_t)MKIOVP(idx);
746
747 pdir_start = &(ioc->pdir_base[idx]);
748
749 DBG_RUN("%s() %pa -> %#lx size: %zu\n",
750 __func__, &addr, (long)(iovp | offset), size);
751
752 /* If not cacheline aligned, force SAFE_DMA on the whole mess */
753 if ((size % L1_CACHE_BYTES) || (addr % L1_CACHE_BYTES))
754 hint |= HINT_SAFE_DMA;
755
756 while(size > 0) {
757 ccio_io_pdir_entry(pdir_ptr: pdir_start, KERNEL_SPACE, pba: addr, hints: hint);
758
759 DBG_RUN(" pdir %p %08x%08x\n",
760 pdir_start,
761 (u32) (((u32 *) pdir_start)[0]),
762 (u32) (((u32 *) pdir_start)[1]));
763 ++pdir_start;
764 addr += IOVP_SIZE;
765 size -= IOVP_SIZE;
766 }
767
768 spin_unlock_irqrestore(lock: &ioc->res_lock, flags);
769
770 /* form complete address */
771 return CCIO_IOVA(iovp, offset);
772}
773
774
775static dma_addr_t
776ccio_map_phys(struct device *dev, phys_addr_t phys, size_t size,
777 enum dma_data_direction direction, unsigned long attrs)
778{
779 if (unlikely(attrs & DMA_ATTR_MMIO))
780 return DMA_MAPPING_ERROR;
781
782 return ccio_map_single(dev, addr: phys, size, direction);
783}
784
785
786/**
787 * ccio_unmap_phys - Unmap an address range from the IOMMU.
788 * @dev: The PCI device.
789 * @iova: The start address of the DMA region.
790 * @size: The length of the DMA region.
791 * @direction: The direction of the DMA transaction (to/from device).
792 * @attrs: attributes
793 */
794static void
795ccio_unmap_phys(struct device *dev, dma_addr_t iova, size_t size,
796 enum dma_data_direction direction, unsigned long attrs)
797{
798 struct ioc *ioc;
799 unsigned long flags;
800 dma_addr_t offset = iova & ~IOVP_MASK;
801
802 BUG_ON(!dev);
803 ioc = GET_IOC(dev);
804 if (!ioc) {
805 WARN_ON(!ioc);
806 return;
807 }
808
809 DBG_RUN("%s() iovp %#lx/%zx\n",
810 __func__, (long)iova, size);
811
812 iova ^= offset; /* clear offset bits */
813 size += offset;
814 size = ALIGN(size, IOVP_SIZE);
815
816 spin_lock_irqsave(&ioc->res_lock, flags);
817
818#ifdef CCIO_COLLECT_STATS
819 ioc->usingle_calls++;
820 ioc->usingle_pages += size >> IOVP_SHIFT;
821#endif
822
823 ccio_mark_invalid(ioc, iova, byte_cnt: size);
824 ccio_free_range(ioc, iova, pages_mapped: (size >> IOVP_SHIFT));
825 spin_unlock_irqrestore(lock: &ioc->res_lock, flags);
826}
827
828/**
829 * ccio_alloc - Allocate a consistent DMA mapping.
830 * @dev: The PCI device.
831 * @size: The length of the DMA region.
832 * @dma_handle: The DMA address handed back to the device (not the cpu).
833 * @flag: allocation flags
834 * @attrs: attributes
835 *
836 * This function implements the pci_alloc_consistent function.
837 */
838static void *
839ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
840 unsigned long attrs)
841{
842 void *ret;
843#if 0
844/* GRANT Need to establish hierarchy for non-PCI devs as well
845** and then provide matching gsc_map_xxx() functions for them as well.
846*/
847 if(!hwdev) {
848 /* only support PCI */
849 *dma_handle = 0;
850 return 0;
851 }
852#endif
853 ret = (void *) __get_free_pages(flag, get_order(size));
854
855 if (ret) {
856 memset(ret, 0, size);
857 *dma_handle = ccio_map_single(dev, virt_to_phys(address: ret), size,
858 direction: DMA_BIDIRECTIONAL);
859 }
860
861 return ret;
862}
863
864/**
865 * ccio_free - Free a consistent DMA mapping.
866 * @dev: The PCI device.
867 * @size: The length of the DMA region.
868 * @cpu_addr: The cpu address returned from the ccio_alloc_consistent.
869 * @dma_handle: The device address returned from the ccio_alloc_consistent.
870 * @attrs: attributes
871 *
872 * This function implements the pci_free_consistent function.
873 */
874static void
875ccio_free(struct device *dev, size_t size, void *cpu_addr,
876 dma_addr_t dma_handle, unsigned long attrs)
877{
878 ccio_unmap_phys(dev, iova: dma_handle, size, direction: 0, attrs: 0);
879 free_pages(addr: (unsigned long)cpu_addr, order: get_order(size));
880}
881
882/*
883** Since 0 is a valid pdir_base index value, can't use that
884** to determine if a value is valid or not. Use a flag to indicate
885** the SG list entry contains a valid pdir index.
886*/
887#define PIDE_FLAG 0x80000000UL
888
889#ifdef CCIO_COLLECT_STATS
890#define IOMMU_MAP_STATS
891#endif
892#include "iommu-helpers.h"
893
894/**
895 * ccio_map_sg - Map the scatter/gather list into the IOMMU.
896 * @dev: The PCI device.
897 * @sglist: The scatter/gather list to be mapped in the IOMMU.
898 * @nents: The number of entries in the scatter/gather list.
899 * @direction: The direction of the DMA transaction (to/from device).
900 * @attrs: attributes
901 *
902 * This function implements the pci_map_sg function.
903 */
904static int
905ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
906 enum dma_data_direction direction, unsigned long attrs)
907{
908 struct ioc *ioc;
909 int coalesced, filled = 0;
910 unsigned long flags;
911 unsigned long hint = hint_lookup[(int)direction];
912 unsigned long prev_len = 0, current_len = 0;
913 int i;
914
915 BUG_ON(!dev);
916 ioc = GET_IOC(dev);
917 if (!ioc)
918 return -EINVAL;
919
920 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
921
922 /* Fast path single entry scatterlists. */
923 if (nents == 1) {
924 sg_dma_address(sglist) = ccio_map_single(dev,
925 addr: sg_phys(sg: sglist), size: sglist->length,
926 direction);
927 sg_dma_len(sglist) = sglist->length;
928 return 1;
929 }
930
931 for(i = 0; i < nents; i++)
932 prev_len += sglist[i].length;
933
934 spin_lock_irqsave(&ioc->res_lock, flags);
935
936#ifdef CCIO_COLLECT_STATS
937 ioc->msg_calls++;
938#endif
939
940 /*
941 ** First coalesce the chunks and allocate I/O pdir space
942 **
943 ** If this is one DMA stream, we can properly map using the
944 ** correct virtual address associated with each DMA page.
945 ** w/o this association, we wouldn't have coherent DMA!
946 ** Access to the virtual address is what forces a two pass algorithm.
947 */
948 coalesced = iommu_coalesce_chunks(ioc, dev, startsg: sglist, nents, iommu_alloc_range: ccio_alloc_range);
949
950 /*
951 ** Program the I/O Pdir
952 **
953 ** map the virtual addresses to the I/O Pdir
954 ** o dma_address will contain the pdir index
955 ** o dma_len will contain the number of bytes to map
956 ** o page/offset contain the virtual address.
957 */
958 filled = iommu_fill_pdir(ioc, startsg: sglist, nents, hint, iommu_io_pdir_entry: ccio_io_pdir_entry);
959
960 spin_unlock_irqrestore(lock: &ioc->res_lock, flags);
961
962 BUG_ON(coalesced != filled);
963
964 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
965
966 for (i = 0; i < filled; i++)
967 current_len += sg_dma_len(sglist + i);
968
969 BUG_ON(current_len != prev_len);
970
971 return filled;
972}
973
974/**
975 * ccio_unmap_sg - Unmap the scatter/gather list from the IOMMU.
976 * @dev: The PCI device.
977 * @sglist: The scatter/gather list to be unmapped from the IOMMU.
978 * @nents: The number of entries in the scatter/gather list.
979 * @direction: The direction of the DMA transaction (to/from device).
980 * @attrs: attributes
981 *
982 * This function implements the pci_unmap_sg function.
983 */
984static void
985ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
986 enum dma_data_direction direction, unsigned long attrs)
987{
988 struct ioc *ioc;
989
990 BUG_ON(!dev);
991 ioc = GET_IOC(dev);
992 if (!ioc) {
993 WARN_ON(!ioc);
994 return;
995 }
996
997 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
998 __func__, nents, sg_virt(sglist), sglist->length);
999
1000#ifdef CCIO_COLLECT_STATS
1001 ioc->usg_calls++;
1002#endif
1003
1004 while (nents && sg_dma_len(sglist)) {
1005
1006#ifdef CCIO_COLLECT_STATS
1007 ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
1008#endif
1009 ccio_unmap_phys(dev, sg_dma_address(sglist),
1010 sg_dma_len(sglist), direction, attrs: 0);
1011 ++sglist;
1012 nents--;
1013 }
1014
1015 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1016}
1017
1018static const struct dma_map_ops ccio_ops = {
1019 .dma_supported = ccio_dma_supported,
1020 .alloc = ccio_alloc,
1021 .free = ccio_free,
1022 .map_phys = ccio_map_phys,
1023 .unmap_phys = ccio_unmap_phys,
1024 .map_sg = ccio_map_sg,
1025 .unmap_sg = ccio_unmap_sg,
1026 .get_sgtable = dma_common_get_sgtable,
1027 .alloc_pages_op = dma_common_alloc_pages,
1028 .free_pages = dma_common_free_pages,
1029};
1030
1031#ifdef CONFIG_PROC_FS
1032static int ccio_proc_info(struct seq_file *m, void *p)
1033{
1034 struct ioc *ioc = ioc_list;
1035
1036 while (ioc != NULL) {
1037 unsigned int total_pages = ioc->res_size << 3;
1038#ifdef CCIO_COLLECT_STATS
1039 unsigned long avg = 0, min, max;
1040 int j;
1041#endif
1042
1043 seq_printf(m, fmt: "%s\n", ioc->name);
1044
1045 seq_printf(m, fmt: "Cujo 2.0 bug : %s\n",
1046 (ioc->cujo20_bug ? "yes" : "no"));
1047
1048 seq_printf(m, fmt: "IO PDIR size : %d bytes (%d entries)\n",
1049 total_pages * 8, total_pages);
1050
1051#ifdef CCIO_COLLECT_STATS
1052 seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1053 total_pages - ioc->used_pages, ioc->used_pages,
1054 (int)(ioc->used_pages * 100 / total_pages));
1055#endif
1056
1057 seq_printf(m, fmt: "Resource bitmap : %d bytes (%d pages)\n",
1058 ioc->res_size, total_pages);
1059
1060#ifdef CCIO_COLLECT_STATS
1061 min = max = ioc->avg_search[0];
1062 for(j = 0; j < CCIO_SEARCH_SAMPLE; ++j) {
1063 avg += ioc->avg_search[j];
1064 if(ioc->avg_search[j] > max)
1065 max = ioc->avg_search[j];
1066 if(ioc->avg_search[j] < min)
1067 min = ioc->avg_search[j];
1068 }
1069 avg /= CCIO_SEARCH_SAMPLE;
1070 seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1071 min, avg, max);
1072
1073 seq_printf(m, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n",
1074 ioc->msingle_calls, ioc->msingle_pages,
1075 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1076
1077 /* KLUGE - unmap_sg calls unmap_phys for each mapped page */
1078 min = ioc->usingle_calls - ioc->usg_calls;
1079 max = ioc->usingle_pages - ioc->usg_pages;
1080 seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
1081 min, max, (int)((max * 1000)/min));
1082
1083 seq_printf(m, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n",
1084 ioc->msg_calls, ioc->msg_pages,
1085 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1086
1087 seq_printf(m, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n",
1088 ioc->usg_calls, ioc->usg_pages,
1089 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1090#endif /* CCIO_COLLECT_STATS */
1091
1092 ioc = ioc->next;
1093 }
1094
1095 return 0;
1096}
1097
1098static int ccio_proc_bitmap_info(struct seq_file *m, void *p)
1099{
1100 struct ioc *ioc = ioc_list;
1101
1102 while (ioc != NULL) {
1103 seq_hex_dump(m, prefix_str: " ", prefix_type: DUMP_PREFIX_NONE, rowsize: 32, groupsize: 4, buf: ioc->res_map,
1104 len: ioc->res_size, ascii: false);
1105 seq_putc(m, c: '\n');
1106 ioc = ioc->next;
1107 break; /* XXX - remove me */
1108 }
1109
1110 return 0;
1111}
1112#endif /* CONFIG_PROC_FS */
1113
1114/**
1115 * ccio_find_ioc - Find the ioc in the ioc_list
1116 * @hw_path: The hardware path of the ioc.
1117 *
1118 * This function searches the ioc_list for an ioc that matches
1119 * the provide hardware path.
1120 */
1121static struct ioc * ccio_find_ioc(int hw_path)
1122{
1123 int i;
1124 struct ioc *ioc;
1125
1126 ioc = ioc_list;
1127 for (i = 0; i < ioc_count; i++) {
1128 if (ioc->hw_path == hw_path)
1129 return ioc;
1130
1131 ioc = ioc->next;
1132 }
1133
1134 return NULL;
1135}
1136
1137/**
1138 * ccio_get_iommu - Find the iommu which controls this device
1139 * @dev: The parisc device.
1140 *
1141 * This function searches through the registered IOMMU's and returns
1142 * the appropriate IOMMU for the device based on its hardware path.
1143 */
1144void * ccio_get_iommu(const struct parisc_device *dev)
1145{
1146 dev = find_pa_parent_type(dev, HPHW_IOA);
1147 if (!dev)
1148 return NULL;
1149
1150 return ccio_find_ioc(dev->hw_path);
1151}
1152
1153#define CUJO_20_STEP 0x10000000 /* inc upper nibble */
1154
1155/* Cujo 2.0 has a bug which will silently corrupt data being transferred
1156 * to/from certain pages. To avoid this happening, we mark these pages
1157 * as `used', and ensure that nothing will try to allocate from them.
1158 */
1159void __init ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp)
1160{
1161 unsigned int idx;
1162 struct parisc_device *dev = parisc_parent(cujo);
1163 struct ioc *ioc = ccio_get_iommu(dev);
1164 u8 *res_ptr;
1165
1166 ioc->cujo20_bug = 1;
1167 res_ptr = ioc->res_map;
1168 idx = PDIR_INDEX(iovp) >> 3;
1169
1170 while (idx < ioc->res_size) {
1171 res_ptr[idx] |= 0xff;
1172 idx += PDIR_INDEX(CUJO_20_STEP) >> 3;
1173 }
1174}
1175
1176#if 0
1177/* GRANT - is this needed for U2 or not? */
1178
1179/*
1180** Get the size of the I/O TLB for this I/O MMU.
1181**
1182** If spa_shift is non-zero (ie probably U2),
1183** then calculate the I/O TLB size using spa_shift.
1184**
1185** Otherwise we are supposed to get the IODC entry point ENTRY TLB
1186** and execute it. However, both U2 and Uturn firmware supplies spa_shift.
1187** I think only Java (K/D/R-class too?) systems don't do this.
1188*/
1189static int
1190ccio_get_iotlb_size(struct parisc_device *dev)
1191{
1192 if (dev->spa_shift == 0) {
1193 panic("%s() : Can't determine I/O TLB size.\n", __func__);
1194 }
1195 return (1 << dev->spa_shift);
1196}
1197#else
1198
1199/* Uturn supports 256 TLB entries */
1200#define CCIO_CHAINID_SHIFT 8
1201#define CCIO_CHAINID_MASK 0xff
1202#endif /* 0 */
1203
1204/* We *can't* support JAVA (T600). Venture there at your own risk. */
1205static const struct parisc_device_id ccio_tbl[] __initconst = {
1206 { HPHW_IOA, HVERSION_REV_ANY_ID, U2_IOA_RUNWAY, 0xb }, /* U2 */
1207 { HPHW_IOA, HVERSION_REV_ANY_ID, UTURN_IOA_RUNWAY, 0xb }, /* UTurn */
1208 { 0, }
1209};
1210
1211static int ccio_probe(struct parisc_device *dev);
1212
1213static struct parisc_driver ccio_driver __refdata = {
1214 .name = "ccio",
1215 .id_table = ccio_tbl,
1216 .probe = ccio_probe,
1217};
1218
1219/**
1220 * ccio_ioc_init - Initialize the I/O Controller
1221 * @ioc: The I/O Controller.
1222 *
1223 * Initialize the I/O Controller which includes setting up the
1224 * I/O Page Directory, the resource map, and initalizing the
1225 * U2/Uturn chip into virtual mode.
1226 */
1227static void __init
1228ccio_ioc_init(struct ioc *ioc)
1229{
1230 int i;
1231 unsigned int iov_order;
1232 u32 iova_space_size;
1233
1234 /*
1235 ** Determine IOVA Space size from memory size.
1236 **
1237 ** Ideally, PCI drivers would register the maximum number
1238 ** of DMA they can have outstanding for each device they
1239 ** own. Next best thing would be to guess how much DMA
1240 ** can be outstanding based on PCI Class/sub-class. Both
1241 ** methods still require some "extra" to support PCI
1242 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD).
1243 */
1244
1245 iova_space_size = (u32) (totalram_pages() / count_parisc_driver(&ccio_driver));
1246
1247 /* limit IOVA space size to 1MB-1GB */
1248
1249 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1250 iova_space_size = 1 << (20 - PAGE_SHIFT);
1251#ifdef __LP64__
1252 } else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1253 iova_space_size = 1 << (30 - PAGE_SHIFT);
1254#endif
1255 }
1256
1257 /*
1258 ** iova space must be log2() in size.
1259 ** thus, pdir/res_map will also be log2().
1260 */
1261
1262 /* We could use larger page sizes in order to *decrease* the number
1263 ** of mappings needed. (ie 8k pages means 1/2 the mappings).
1264 **
1265 ** Note: Grant Grunder says "Using 8k I/O pages isn't trivial either
1266 ** since the pages must also be physically contiguous - typically
1267 ** this is the case under linux."
1268 */
1269
1270 iov_order = get_order(size: iova_space_size << PAGE_SHIFT);
1271
1272 /* iova_space_size is now bytes, not pages */
1273 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1274
1275 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1276
1277 BUG_ON(ioc->pdir_size > 8 * 1024 * 1024); /* max pdir size <= 8MB */
1278
1279 /* Verify it's a power of two */
1280 BUG_ON((1 << get_order(ioc->pdir_size)) != (ioc->pdir_size >> PAGE_SHIFT));
1281
1282 DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n",
1283 __func__, ioc->ioc_regs,
1284 (unsigned long) totalram_pages() >> (20 - PAGE_SHIFT),
1285 iova_space_size>>20,
1286 iov_order + PAGE_SHIFT);
1287
1288 ioc->pdir_base = (__le64 *)__get_free_pages(GFP_KERNEL,
1289 get_order(ioc->pdir_size));
1290 if(NULL == ioc->pdir_base) {
1291 panic(fmt: "%s() could not allocate I/O Page Table\n", __func__);
1292 }
1293 memset(ioc->pdir_base, 0, ioc->pdir_size);
1294
1295 BUG_ON((((unsigned long)ioc->pdir_base) & PAGE_MASK) != (unsigned long)ioc->pdir_base);
1296 DBG_INIT(" base %p\n", ioc->pdir_base);
1297
1298 /* resource map size dictated by pdir_size */
1299 ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
1300 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1301
1302 ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL,
1303 get_order(ioc->res_size));
1304 if(NULL == ioc->res_map) {
1305 panic(fmt: "%s() could not allocate resource map\n", __func__);
1306 }
1307 memset(ioc->res_map, 0, ioc->res_size);
1308
1309 /* Initialize the res_hint to 16 */
1310 ioc->res_hint = 16;
1311
1312 /* Initialize the spinlock */
1313 spin_lock_init(&ioc->res_lock);
1314
1315 /*
1316 ** Chainid is the upper most bits of an IOVP used to determine
1317 ** which TLB entry an IOVP will use.
1318 */
1319 ioc->chainid_shift = get_order(size: iova_space_size) + PAGE_SHIFT - CCIO_CHAINID_SHIFT;
1320 DBG_INIT(" chainid_shift 0x%x\n", ioc->chainid_shift);
1321
1322 /*
1323 ** Initialize IOA hardware
1324 */
1325 WRITE_U32(CCIO_CHAINID_MASK << ioc->chainid_shift,
1326 &ioc->ioc_regs->io_chain_id_mask);
1327
1328 WRITE_U32(virt_to_phys(ioc->pdir_base),
1329 &ioc->ioc_regs->io_pdir_base);
1330
1331 /*
1332 ** Go to "Virtual Mode"
1333 */
1334 WRITE_U32(IOA_NORMAL_MODE, &ioc->ioc_regs->io_control);
1335
1336 /*
1337 ** Initialize all I/O TLB entries to 0 (Valid bit off).
1338 */
1339 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_m);
1340 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_l);
1341
1342 for(i = 1 << CCIO_CHAINID_SHIFT; i ; i--) {
1343 WRITE_U32((CMD_TLB_DIRECT_WRITE | (i << ioc->chainid_shift)),
1344 &ioc->ioc_regs->io_command);
1345 }
1346}
1347
1348static void __init
1349ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
1350{
1351 int result;
1352
1353 res->parent = NULL;
1354 res->flags = IORESOURCE_MEM;
1355 /*
1356 * bracing ((signed) ...) are required for 64bit kernel because
1357 * we only want to sign extend the lower 16 bits of the register.
1358 * The upper 16-bits of range registers are hardcoded to 0xffff.
1359 */
1360 res->start = (unsigned long)((signed) READ_U32(ioaddr) << 16);
1361 res->end = (unsigned long)((signed) (READ_U32(ioaddr + 4) << 16) - 1);
1362 res->name = name;
1363 /*
1364 * Check if this MMIO range is disable
1365 */
1366 if (res->end + 1 == res->start)
1367 return;
1368
1369 /* On some platforms (e.g. K-Class), we have already registered
1370 * resources for devices reported by firmware. Some are children
1371 * of ccio.
1372 * "insert" ccio ranges in the mmio hierarchy (/proc/iomem).
1373 */
1374 result = insert_resource(parent: &iomem_resource, new: res);
1375 if (result < 0) {
1376 printk(KERN_ERR "%s() failed to claim CCIO bus address space (%08lx,%08lx)\n",
1377 __func__, (unsigned long)res->start, (unsigned long)res->end);
1378 }
1379}
1380
1381static int __init ccio_init_resources(struct ioc *ioc)
1382{
1383 struct resource *res = ioc->mmio_region;
1384 char *name = kmalloc(14, GFP_KERNEL);
1385 if (unlikely(!name))
1386 return -ENOMEM;
1387 snprintf(buf: name, size: 14, fmt: "GSC Bus [%d/]", ioc->hw_path);
1388
1389 ccio_init_resource(res, name, ioaddr: &ioc->ioc_regs->io_io_low);
1390 ccio_init_resource(res: res + 1, name, ioaddr: &ioc->ioc_regs->io_io_low_hv);
1391 return 0;
1392}
1393
1394static int new_ioc_area(struct resource *res, unsigned long size,
1395 unsigned long min, unsigned long max, unsigned long align)
1396{
1397 if (max <= min)
1398 return -EBUSY;
1399
1400 res->start = (max - size + 1) &~ (align - 1);
1401 res->end = res->start + size;
1402
1403 /* We might be trying to expand the MMIO range to include
1404 * a child device that has already registered it's MMIO space.
1405 * Use "insert" instead of request_resource().
1406 */
1407 if (!insert_resource(parent: &iomem_resource, new: res))
1408 return 0;
1409
1410 return new_ioc_area(res, size, min, max: max - size, align);
1411}
1412
1413static int expand_ioc_area(struct resource *res, unsigned long size,
1414 unsigned long min, unsigned long max, unsigned long align)
1415{
1416 unsigned long start, len;
1417
1418 if (!res->parent)
1419 return new_ioc_area(res, size, min, max, align);
1420
1421 start = (res->start - size) &~ (align - 1);
1422 len = res->end - start + 1;
1423 if (start >= min) {
1424 if (!adjust_resource(res, start, size: len))
1425 return 0;
1426 }
1427
1428 start = res->start;
1429 len = ((size + res->end + align) &~ (align - 1)) - start;
1430 if (start + len <= max) {
1431 if (!adjust_resource(res, start, size: len))
1432 return 0;
1433 }
1434
1435 return -EBUSY;
1436}
1437
1438/*
1439 * Dino calls this function. Beware that we may get called on systems
1440 * which have no IOC (725, B180, C160L, etc) but do have a Dino.
1441 * So it's legal to find no parent IOC.
1442 *
1443 * Some other issues: one of the resources in the ioc may be unassigned.
1444 */
1445int ccio_allocate_resource(const struct parisc_device *dev,
1446 struct resource *res, unsigned long size,
1447 unsigned long min, unsigned long max, unsigned long align)
1448{
1449 struct resource *parent = &iomem_resource;
1450 struct ioc *ioc = ccio_get_iommu(dev);
1451 if (!ioc)
1452 goto out;
1453
1454 parent = ioc->mmio_region;
1455 if (parent->parent &&
1456 !allocate_resource(root: parent, new: res, size: size, min: min, max: max, align: align, NULL, NULL))
1457 return 0;
1458
1459 if ((parent + 1)->parent &&
1460 !allocate_resource(root: parent + 1, new: res, size: size, min: min, max: max, align: align,
1461 NULL, NULL))
1462 return 0;
1463
1464 if (!expand_ioc_area(res: parent, size: size, min: min, max: max, align: align)) {
1465 __raw_writel(val: ((parent->start)>>16) | 0xffff0000,
1466 addr: &ioc->ioc_regs->io_io_low);
1467 __raw_writel(val: ((parent->end)>>16) | 0xffff0000,
1468 addr: &ioc->ioc_regs->io_io_high);
1469 } else if (!expand_ioc_area(res: parent + 1, size: size, min: min, max: max, align: align)) {
1470 parent++;
1471 __raw_writel(val: ((parent->start)>>16) | 0xffff0000,
1472 addr: &ioc->ioc_regs->io_io_low_hv);
1473 __raw_writel(val: ((parent->end)>>16) | 0xffff0000,
1474 addr: &ioc->ioc_regs->io_io_high_hv);
1475 } else {
1476 return -EBUSY;
1477 }
1478
1479 out:
1480 return allocate_resource(root: parent, new: res, size: size, min: min, max: max, align: align, NULL,NULL);
1481}
1482
1483int ccio_request_resource(const struct parisc_device *dev,
1484 struct resource *res)
1485{
1486 struct resource *parent;
1487 struct ioc *ioc = ccio_get_iommu(dev);
1488
1489 if (!ioc) {
1490 parent = &iomem_resource;
1491 } else if ((ioc->mmio_region->start <= res->start) &&
1492 (res->end <= ioc->mmio_region->end)) {
1493 parent = ioc->mmio_region;
1494 } else if (((ioc->mmio_region + 1)->start <= res->start) &&
1495 (res->end <= (ioc->mmio_region + 1)->end)) {
1496 parent = ioc->mmio_region + 1;
1497 } else {
1498 return -EBUSY;
1499 }
1500
1501 /* "transparent" bus bridges need to register MMIO resources
1502 * firmware assigned them. e.g. children of hppb.c (e.g. K-class)
1503 * registered their resources in the PDC "bus walk" (See
1504 * arch/parisc/kernel/inventory.c).
1505 */
1506 return insert_resource(parent, new: res);
1507}
1508
1509/**
1510 * ccio_probe - Determine if ccio should claim this device.
1511 * @dev: The device which has been found
1512 *
1513 * Determine if ccio should claim this chip (return 0) or not (return 1).
1514 * If so, initialize the chip and tell other partners in crime they
1515 * have work to do.
1516 */
1517static int __init ccio_probe(struct parisc_device *dev)
1518{
1519 int i;
1520 struct ioc *ioc, **ioc_p = &ioc_list;
1521 struct pci_hba_data *hba;
1522
1523 ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL);
1524 if (ioc == NULL) {
1525 printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
1526 return -ENOMEM;
1527 }
1528
1529 ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn";
1530
1531 printk(KERN_INFO "Found %s at 0x%lx\n", ioc->name,
1532 (unsigned long)dev->hpa.start);
1533
1534 for (i = 0; i < ioc_count; i++) {
1535 ioc_p = &(*ioc_p)->next;
1536 }
1537 *ioc_p = ioc;
1538
1539 ioc->hw_path = dev->hw_path;
1540 ioc->ioc_regs = ioremap(offset: dev->hpa.start, size: 4096);
1541 if (!ioc->ioc_regs) {
1542 kfree(objp: ioc);
1543 return -ENOMEM;
1544 }
1545 ccio_ioc_init(ioc);
1546 if (ccio_init_resources(ioc)) {
1547 iounmap(addr: ioc->ioc_regs);
1548 kfree(objp: ioc);
1549 return -ENOMEM;
1550 }
1551 hppa_dma_ops = &ccio_ops;
1552
1553 hba = kzalloc(sizeof(*hba), GFP_KERNEL);
1554 /* if this fails, no I/O cards will work, so may as well bug */
1555 BUG_ON(hba == NULL);
1556
1557 hba->iommu = ioc;
1558 dev->dev.platform_data = hba;
1559
1560#ifdef CONFIG_PROC_FS
1561 if (ioc_count == 0) {
1562 struct proc_dir_entry *runway;
1563
1564 runway = proc_mkdir("bus/runway", NULL);
1565 if (runway) {
1566 proc_create_single(MODULE_NAME, 0, runway,
1567 ccio_proc_info);
1568 proc_create_single(MODULE_NAME"-bitmap", 0, runway,
1569 ccio_proc_bitmap_info);
1570 }
1571 }
1572#endif
1573 ioc_count++;
1574 return 0;
1575}
1576
1577/**
1578 * ccio_init - ccio initialization procedure.
1579 *
1580 * Register this driver.
1581 */
1582static int __init ccio_init(void)
1583{
1584 return register_parisc_driver(&ccio_driver);
1585}
1586arch_initcall(ccio_init);
1587

source code of linux/drivers/parisc/ccio-dma.c