1 /*
2 ** ccio-dma.c:
3 ** DMA management routines for first generation cache-coherent machines.
4 ** Program U2/Uturn in "Virtual Mode" and use the I/O MMU.
5 **
6 ** (c) Copyright 2000 Grant Grundler
7 ** (c) Copyright 2000 Ryan Bradetich
8 ** (c) Copyright 2000 Hewlett-Packard Company
9 **
10 ** This program is free software; you can redistribute it and/or modify
11 ** it under the terms of the GNU General Public License as published by
12 ** the Free Software Foundation; either version 2 of the License, or
13 ** (at your option) any later version.
14 **
15 **
16 ** "Real Mode" operation refers to U2/Uturn chip operation.
17 ** U2/Uturn were designed to perform coherency checks w/o using
18 ** the I/O MMU - basically what x86 does.
19 **
20 ** Philipp Rumpf has a "Real Mode" driver for PCX-W machines at:
21 ** CVSROOT=:pserver:anonymous@198.186.203.37:/cvsroot/linux-parisc
22 ** cvs -z3 co linux/arch/parisc/kernel/dma-rm.c
23 **
24 ** I've rewritten his code to work under TPG's tree. See ccio-rm-dma.c.
25 **
26 ** Drawbacks of using Real Mode are:
27 ** o outbound DMA is slower - U2 won't prefetch data (GSC+ XQL signal).
28 ** o Inbound DMA less efficient - U2 can't use DMA_FAST attribute.
29 ** o Ability to do scatter/gather in HW is lost.
30 ** o Doesn't work under PCX-U/U+ machines since they didn't follow
31 ** the coherency design originally worked out. Only PCX-W does.
32 */
33
34 #include <linux/types.h>
35 #include <linux/kernel.h>
36 #include <linux/init.h>
37 #include <linux/mm.h>
38 #include <linux/spinlock.h>
39 #include <linux/slab.h>
40 #include <linux/string.h>
41 #include <linux/pci.h>
42 #include <linux/reboot.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/scatterlist.h>
46 #include <linux/iommu-helper.h>
47 #include <linux/export.h>
48
49 #include <asm/byteorder.h>
50 #include <asm/cache.h> /* for L1_CACHE_BYTES */
51 #include <asm/uaccess.h>
52 #include <asm/page.h>
53 #include <asm/dma.h>
54 #include <asm/io.h>
55 #include <asm/hardware.h> /* for register_module() */
56 #include <asm/parisc-device.h>
57
58 /*
59 ** Choose "ccio" since that's what HP-UX calls it.
60 ** Make it easier for folks to migrate from one to the other :^)
61 */
62 #define MODULE_NAME "ccio"
63
64 #undef DEBUG_CCIO_RES
65 #undef DEBUG_CCIO_RUN
66 #undef DEBUG_CCIO_INIT
67 #undef DEBUG_CCIO_RUN_SG
68
69 #ifdef CONFIG_PROC_FS
70 /* depends on proc fs support. But costs CPU performance. */
71 #undef CCIO_COLLECT_STATS
72 #endif
73
74 #include <asm/runway.h> /* for proc_runway_root */
75
76 #ifdef DEBUG_CCIO_INIT
77 #define DBG_INIT(x...) printk(x)
78 #else
79 #define DBG_INIT(x...)
80 #endif
81
82 #ifdef DEBUG_CCIO_RUN
83 #define DBG_RUN(x...) printk(x)
84 #else
85 #define DBG_RUN(x...)
86 #endif
87
88 #ifdef DEBUG_CCIO_RES
89 #define DBG_RES(x...) printk(x)
90 #else
91 #define DBG_RES(x...)
92 #endif
93
94 #ifdef DEBUG_CCIO_RUN_SG
95 #define DBG_RUN_SG(x...) printk(x)
96 #else
97 #define DBG_RUN_SG(x...)
98 #endif
99
100 #define CCIO_INLINE inline
101 #define WRITE_U32(value, addr) __raw_writel(value, addr)
102 #define READ_U32(addr) __raw_readl(addr)
103
104 #define U2_IOA_RUNWAY 0x580
105 #define U2_BC_GSC 0x501
106 #define UTURN_IOA_RUNWAY 0x581
107 #define UTURN_BC_GSC 0x502
108
109 #define IOA_NORMAL_MODE 0x00020080 /* IO_CONTROL to turn on CCIO */
110 #define CMD_TLB_DIRECT_WRITE 35 /* IO_COMMAND for I/O TLB Writes */
111 #define CMD_TLB_PURGE 33 /* IO_COMMAND to Purge I/O TLB entry */
112
113 struct ioa_registers {
114 /* Runway Supervisory Set */
115 int32_t unused1[12];
116 uint32_t io_command; /* Offset 12 */
117 uint32_t io_status; /* Offset 13 */
118 uint32_t io_control; /* Offset 14 */
119 int32_t unused2[1];
120
121 /* Runway Auxiliary Register Set */
122 uint32_t io_err_resp; /* Offset 0 */
123 uint32_t io_err_info; /* Offset 1 */
124 uint32_t io_err_req; /* Offset 2 */
125 uint32_t io_err_resp_hi; /* Offset 3 */
126 uint32_t io_tlb_entry_m; /* Offset 4 */
127 uint32_t io_tlb_entry_l; /* Offset 5 */
128 uint32_t unused3[1];
129 uint32_t io_pdir_base; /* Offset 7 */
130 uint32_t io_io_low_hv; /* Offset 8 */
131 uint32_t io_io_high_hv; /* Offset 9 */
132 uint32_t unused4[1];
133 uint32_t io_chain_id_mask; /* Offset 11 */
134 uint32_t unused5[2];
135 uint32_t io_io_low; /* Offset 14 */
136 uint32_t io_io_high; /* Offset 15 */
137 };
138
139 /*
140 ** IOA Registers
141 ** -------------
142 **
143 ** Runway IO_CONTROL Register (+0x38)
144 **
145 ** The Runway IO_CONTROL register controls the forwarding of transactions.
146 **
147 ** | 0 ... 13 | 14 15 | 16 ... 21 | 22 | 23 24 | 25 ... 31 |
148 ** | HV | TLB | reserved | HV | mode | reserved |
149 **
150 ** o mode field indicates the address translation of transactions
151 ** forwarded from Runway to GSC+:
152 ** Mode Name Value Definition
153 ** Off (default) 0 Opaque to matching addresses.
154 ** Include 1 Transparent for matching addresses.
155 ** Peek 3 Map matching addresses.
156 **
157 ** + "Off" mode: Runway transactions which match the I/O range
158 ** specified by the IO_IO_LOW/IO_IO_HIGH registers will be ignored.
159 ** + "Include" mode: all addresses within the I/O range specified
160 ** by the IO_IO_LOW and IO_IO_HIGH registers are transparently
161 ** forwarded. This is the I/O Adapter's normal operating mode.
162 ** + "Peek" mode: used during system configuration to initialize the
163 ** GSC+ bus. Runway Write_Shorts in the address range specified by
164 ** IO_IO_LOW and IO_IO_HIGH are forwarded through the I/O Adapter
165 ** *AND* the GSC+ address is remapped to the Broadcast Physical
166 ** Address space by setting the 14 high order address bits of the
167 ** 32 bit GSC+ address to ones.
168 **
169 ** o TLB field affects transactions which are forwarded from GSC+ to Runway.
170 ** "Real" mode is the poweron default.
171 **
172 ** TLB Mode Value Description
173 ** Real 0 No TLB translation. Address is directly mapped and the
174 ** virtual address is composed of selected physical bits.
175 ** Error 1 Software fills the TLB manually.
176 ** Normal 2 IOA fetches IO TLB misses from IO PDIR (in host memory).
177 **
178 **
179 ** IO_IO_LOW_HV +0x60 (HV dependent)
180 ** IO_IO_HIGH_HV +0x64 (HV dependent)
181 ** IO_IO_LOW +0x78 (Architected register)
182 ** IO_IO_HIGH +0x7c (Architected register)
183 **
184 ** IO_IO_LOW and IO_IO_HIGH set the lower and upper bounds of the
185 ** I/O Adapter address space, respectively.
186 **
187 ** 0 ... 7 | 8 ... 15 | 16 ... 31 |
188 ** 11111111 | 11111111 | address |
189 **
190 ** Each LOW/HIGH pair describes a disjoint address space region.
191 ** (2 per GSC+ port). Each incoming Runway transaction address is compared
192 ** with both sets of LOW/HIGH registers. If the address is in the range
193 ** greater than or equal to IO_IO_LOW and less than IO_IO_HIGH the transaction
194 ** for forwarded to the respective GSC+ bus.
195 ** Specify IO_IO_LOW equal to or greater than IO_IO_HIGH to avoid specifying
196 ** an address space region.
197 **
198 ** In order for a Runway address to reside within GSC+ extended address space:
199 ** Runway Address [0:7] must identically compare to 8'b11111111
200 ** Runway Address [8:11] must be equal to IO_IO_LOW(_HV)[16:19]
201 ** Runway Address [12:23] must be greater than or equal to
202 ** IO_IO_LOW(_HV)[20:31] and less than IO_IO_HIGH(_HV)[20:31].
203 ** Runway Address [24:39] is not used in the comparison.
204 **
205 ** When the Runway transaction is forwarded to GSC+, the GSC+ address is
206 ** as follows:
207 ** GSC+ Address[0:3] 4'b1111
208 ** GSC+ Address[4:29] Runway Address[12:37]
209 ** GSC+ Address[30:31] 2'b00
210 **
211 ** All 4 Low/High registers must be initialized (by PDC) once the lower bus
212 ** is interrogated and address space is defined. The operating system will
213 ** modify the architectural IO_IO_LOW and IO_IO_HIGH registers following
214 ** the PDC initialization. However, the hardware version dependent IO_IO_LOW
215 ** and IO_IO_HIGH registers should not be subsequently altered by the OS.
216 **
217 ** Writes to both sets of registers will take effect immediately, bypassing
218 ** the queues, which ensures that subsequent Runway transactions are checked
219 ** against the updated bounds values. However reads are queued, introducing
220 ** the possibility of a read being bypassed by a subsequent write to the same
221 ** register. This sequence can be avoided by having software wait for read
222 ** returns before issuing subsequent writes.
223 */
224
225 struct ioc {
226 struct ioa_registers __iomem *ioc_regs; /* I/O MMU base address */
227 u8 *res_map; /* resource map, bit == pdir entry */
228 u64 *pdir_base; /* physical base address */
229 u32 pdir_size; /* bytes, function of IOV Space size */
230 u32 res_hint; /* next available IOVP -
231 circular search */
232 u32 res_size; /* size of resource map in bytes */
233 spinlock_t res_lock;
234
235 #ifdef CCIO_COLLECT_STATS
236 #define CCIO_SEARCH_SAMPLE 0x100
237 unsigned long avg_search[CCIO_SEARCH_SAMPLE];
238 unsigned long avg_idx; /* current index into avg_search */
239 unsigned long used_pages;
240 unsigned long msingle_calls;
241 unsigned long msingle_pages;
242 unsigned long msg_calls;
243 unsigned long msg_pages;
244 unsigned long usingle_calls;
245 unsigned long usingle_pages;
246 unsigned long usg_calls;
247 unsigned long usg_pages;
248 #endif
249 unsigned short cujo20_bug;
250
251 /* STUFF We don't need in performance path */
252 u32 chainid_shift; /* specify bit location of chain_id */
253 struct ioc *next; /* Linked list of discovered iocs */
254 const char *name; /* device name from firmware */
255 unsigned int hw_path; /* the hardware path this ioc is associatd with */
256 struct pci_dev *fake_pci_dev; /* the fake pci_dev for non-pci devs */
257 struct resource mmio_region[2]; /* The "routed" MMIO regions */
258 };
259
260 static struct ioc *ioc_list;
261 static int ioc_count;
262
263 /**************************************************************
264 *
265 * I/O Pdir Resource Management
266 *
267 * Bits set in the resource map are in use.
268 * Each bit can represent a number of pages.
269 * LSbs represent lower addresses (IOVA's).
270 *
271 * This was was copied from sba_iommu.c. Don't try to unify
272 * the two resource managers unless a way to have different
273 * allocation policies is also adjusted. We'd like to avoid
274 * I/O TLB thrashing by having resource allocation policy
275 * match the I/O TLB replacement policy.
276 *
277 ***************************************************************/
278 #define IOVP_SIZE PAGE_SIZE
279 #define IOVP_SHIFT PAGE_SHIFT
280 #define IOVP_MASK PAGE_MASK
281
282 /* Convert from IOVP to IOVA and vice versa. */
283 #define CCIO_IOVA(iovp,offset) ((iovp) | (offset))
284 #define CCIO_IOVP(iova) ((iova) & IOVP_MASK)
285
286 #define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
287 #define MKIOVP(pdir_idx) ((long)(pdir_idx) << IOVP_SHIFT)
288 #define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset)
289
290 /*
291 ** Don't worry about the 150% average search length on a miss.
292 ** If the search wraps around, and passes the res_hint, it will
293 ** cause the kernel to panic anyhow.
294 */
295 #define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \
296 for(; res_ptr < res_end; ++res_ptr) { \
297 int ret;\
298 unsigned int idx;\
299 idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
300 ret = iommu_is_span_boundary(idx << 3, pages_needed, 0, boundary_size);\
301 if ((0 == (*res_ptr & mask)) && !ret) { \
302 *res_ptr |= mask; \
303 res_idx = idx;\
304 ioc->res_hint = res_idx + (size >> 3); \
305 goto resource_found; \
306 } \
307 }
308
309 #define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \
310 u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
311 u##size *res_end = (u##size *)&(ioc)->res_map[ioa->res_size]; \
312 CCIO_SEARCH_LOOP(ioc, res_idx, mask, size); \
313 res_ptr = (u##size *)&(ioc)->res_map[0]; \
314 CCIO_SEARCH_LOOP(ioa, res_idx, mask, size);
315
316 /*
317 ** Find available bit in this ioa's resource map.
318 ** Use a "circular" search:
319 ** o Most IOVA's are "temporary" - avg search time should be small.
320 ** o keep a history of what happened for debugging
321 ** o KISS.
322 **
323 ** Perf optimizations:
324 ** o search for log2(size) bits at a time.
325 ** o search for available resource bits using byte/word/whatever.
326 ** o use different search for "large" (eg > 4 pages) or "very large"
327 ** (eg > 16 pages) mappings.
328 */
329
330 /**
331 * ccio_alloc_range - Allocate pages in the ioc's resource map.
332 * @ioc: The I/O Controller.
333 * @pages_needed: The requested number of pages to be mapped into the
334 * I/O Pdir...
335 *
336 * This function searches the resource map of the ioc to locate a range
337 * of available pages for the requested size.
338 */
339 static int
ccio_alloc_range(struct ioc * ioc,struct device * dev,size_t size)340 ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
341 {
342 unsigned int pages_needed = size >> IOVP_SHIFT;
343 unsigned int res_idx;
344 unsigned long boundary_size;
345 #ifdef CCIO_COLLECT_STATS
346 unsigned long cr_start = mfctl(16);
347 #endif
348
349 BUG_ON(pages_needed == 0);
350 BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
351
352 DBG_RES("%s() size: %d pages_needed %d\n",
353 __func__, size, pages_needed);
354
355 /*
356 ** "seek and ye shall find"...praying never hurts either...
357 ** ggg sacrifices another 710 to the computer gods.
358 */
359
360 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
361 1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
362
363 if (pages_needed <= 8) {
364 /*
365 * LAN traffic will not thrash the TLB IFF the same NIC
366 * uses 8 adjacent pages to map separate payload data.
367 * ie the same byte in the resource bit map.
368 */
369 #if 0
370 /* FIXME: bit search should shift it's way through
371 * an unsigned long - not byte at a time. As it is now,
372 * we effectively allocate this byte to this mapping.
373 */
374 unsigned long mask = ~(~0UL >> pages_needed);
375 CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 8);
376 #else
377 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xff, 8);
378 #endif
379 } else if (pages_needed <= 16) {
380 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xffff, 16);
381 } else if (pages_needed <= 32) {
382 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~(unsigned int)0, 32);
383 #ifdef __LP64__
384 } else if (pages_needed <= 64) {
385 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~0UL, 64);
386 #endif
387 } else {
388 panic("%s: %s() Too many pages to map. pages_needed: %u\n",
389 __FILE__, __func__, pages_needed);
390 }
391
392 panic("%s: %s() I/O MMU is out of mapping resources.\n", __FILE__,
393 __func__);
394
395 resource_found:
396
397 DBG_RES("%s() res_idx %d res_hint: %d\n",
398 __func__, res_idx, ioc->res_hint);
399
400 #ifdef CCIO_COLLECT_STATS
401 {
402 unsigned long cr_end = mfctl(16);
403 unsigned long tmp = cr_end - cr_start;
404 /* check for roll over */
405 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
406 }
407 ioc->avg_search[ioc->avg_idx++] = cr_start;
408 ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1;
409 ioc->used_pages += pages_needed;
410 #endif
411 /*
412 ** return the bit address.
413 */
414 return res_idx << 3;
415 }
416
417 #define CCIO_FREE_MAPPINGS(ioc, res_idx, mask, size) \
418 u##size *res_ptr = (u##size *)&((ioc)->res_map[res_idx]); \
419 BUG_ON((*res_ptr & mask) != mask); \
420 *res_ptr &= ~(mask);
421
422 /**
423 * ccio_free_range - Free pages from the ioc's resource map.
424 * @ioc: The I/O Controller.
425 * @iova: The I/O Virtual Address.
426 * @pages_mapped: The requested number of pages to be freed from the
427 * I/O Pdir.
428 *
429 * This function frees the resouces allocated for the iova.
430 */
431 static void
ccio_free_range(struct ioc * ioc,dma_addr_t iova,unsigned long pages_mapped)432 ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
433 {
434 unsigned long iovp = CCIO_IOVP(iova);
435 unsigned int res_idx = PDIR_INDEX(iovp) >> 3;
436
437 BUG_ON(pages_mapped == 0);
438 BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE);
439 BUG_ON(pages_mapped > BITS_PER_LONG);
440
441 DBG_RES("%s(): res_idx: %d pages_mapped %d\n",
442 __func__, res_idx, pages_mapped);
443
444 #ifdef CCIO_COLLECT_STATS
445 ioc->used_pages -= pages_mapped;
446 #endif
447
448 if(pages_mapped <= 8) {
449 #if 0
450 /* see matching comments in alloc_range */
451 unsigned long mask = ~(~0UL >> pages_mapped);
452 CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 8);
453 #else
454 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffUL, 8);
455 #endif
456 } else if(pages_mapped <= 16) {
457 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffffUL, 16);
458 } else if(pages_mapped <= 32) {
459 CCIO_FREE_MAPPINGS(ioc, res_idx, ~(unsigned int)0, 32);
460 #ifdef __LP64__
461 } else if(pages_mapped <= 64) {
462 CCIO_FREE_MAPPINGS(ioc, res_idx, ~0UL, 64);
463 #endif
464 } else {
465 panic("%s:%s() Too many pages to unmap.\n", __FILE__,
466 __func__);
467 }
468 }
469
470 /****************************************************************
471 **
472 ** CCIO dma_ops support routines
473 **
474 *****************************************************************/
475
476 typedef unsigned long space_t;
477 #define KERNEL_SPACE 0
478
479 /*
480 ** DMA "Page Type" and Hints
481 ** o if SAFE_DMA isn't set, mapping is for FAST_DMA. SAFE_DMA should be
482 ** set for subcacheline DMA transfers since we don't want to damage the
483 ** other part of a cacheline.
484 ** o SAFE_DMA must be set for "memory" allocated via pci_alloc_consistent().
485 ** This bit tells U2 to do R/M/W for partial cachelines. "Streaming"
486 ** data can avoid this if the mapping covers full cache lines.
487 ** o STOP_MOST is needed for atomicity across cachelines.
488 ** Apparently only "some EISA devices" need this.
489 ** Using CONFIG_ISA is hack. Only the IOA with EISA under it needs
490 ** to use this hint iff the EISA devices needs this feature.
491 ** According to the U2 ERS, STOP_MOST enabled pages hurt performance.
492 ** o PREFETCH should *not* be set for cases like Multiple PCI devices
493 ** behind GSCtoPCI (dino) bus converter. Only one cacheline per GSC
494 ** device can be fetched and multiply DMA streams will thrash the
495 ** prefetch buffer and burn memory bandwidth. See 6.7.3 "Prefetch Rules
496 ** and Invalidation of Prefetch Entries".
497 **
498 ** FIXME: the default hints need to be per GSC device - not global.
499 **
500 ** HP-UX dorks: linux device driver programming model is totally different
501 ** than HP-UX's. HP-UX always sets HINT_PREFETCH since it's drivers
502 ** do special things to work on non-coherent platforms...linux has to
503 ** be much more careful with this.
504 */
505 #define IOPDIR_VALID 0x01UL
506 #define HINT_SAFE_DMA 0x02UL /* used for pci_alloc_consistent() pages */
507 #ifdef CONFIG_EISA
508 #define HINT_STOP_MOST 0x04UL /* LSL support */
509 #else
510 #define HINT_STOP_MOST 0x00UL /* only needed for "some EISA devices" */
511 #endif
512 #define HINT_UDPATE_ENB 0x08UL /* not used/supported by U2 */
513 #define HINT_PREFETCH 0x10UL /* for outbound pages which are not SAFE */
514
515
516 /*
517 ** Use direction (ie PCI_DMA_TODEVICE) to pick hint.
518 ** ccio_alloc_consistent() depends on this to get SAFE_DMA
519 ** when it passes in BIDIRECTIONAL flag.
520 */
521 static u32 hint_lookup[] = {
522 [PCI_DMA_BIDIRECTIONAL] = HINT_STOP_MOST | HINT_SAFE_DMA | IOPDIR_VALID,
523 [PCI_DMA_TODEVICE] = HINT_STOP_MOST | HINT_PREFETCH | IOPDIR_VALID,
524 [PCI_DMA_FROMDEVICE] = HINT_STOP_MOST | IOPDIR_VALID,
525 };
526
527 /**
528 * ccio_io_pdir_entry - Initialize an I/O Pdir.
529 * @pdir_ptr: A pointer into I/O Pdir.
530 * @sid: The Space Identifier.
531 * @vba: The virtual address.
532 * @hints: The DMA Hint.
533 *
534 * Given a virtual address (vba, arg2) and space id, (sid, arg1),
535 * load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir
536 * entry consists of 8 bytes as shown below (MSB == bit 0):
537 *
538 *
539 * WORD 0:
540 * +------+----------------+-----------------------------------------------+
541 * | Phys | Virtual Index | Phys |
542 * | 0:3 | 0:11 | 4:19 |
543 * |4 bits| 12 bits | 16 bits |
544 * +------+----------------+-----------------------------------------------+
545 * WORD 1:
546 * +-----------------------+-----------------------------------------------+
547 * | Phys | Rsvd | Prefetch |Update |Rsvd |Lock |Safe |Valid |
548 * | 20:39 | | Enable |Enable | |Enable|DMA | |
549 * | 20 bits | 5 bits | 1 bit |1 bit |2 bits|1 bit |1 bit |1 bit |
550 * +-----------------------+-----------------------------------------------+
551 *
552 * The virtual index field is filled with the results of the LCI
553 * (Load Coherence Index) instruction. The 8 bits used for the virtual
554 * index are bits 12:19 of the value returned by LCI.
555 */
556 static void CCIO_INLINE
ccio_io_pdir_entry(u64 * pdir_ptr,space_t sid,unsigned long vba,unsigned long hints)557 ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
558 unsigned long hints)
559 {
560 register unsigned long pa;
561 register unsigned long ci; /* coherent index */
562
563 /* We currently only support kernel addresses */
564 BUG_ON(sid != KERNEL_SPACE);
565
566 mtsp(sid,1);
567
568 /*
569 ** WORD 1 - low order word
570 ** "hints" parm includes the VALID bit!
571 ** "dep" clobbers the physical address offset bits as well.
572 */
573 pa = virt_to_phys(vba);
574 asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
575 ((u32 *)pdir_ptr)[1] = (u32) pa;
576
577 /*
578 ** WORD 0 - high order word
579 */
580
581 #ifdef __LP64__
582 /*
583 ** get bits 12:15 of physical address
584 ** shift bits 16:31 of physical address
585 ** and deposit them
586 */
587 asm volatile ("extrd,u %1,15,4,%0" : "=r" (ci) : "r" (pa));
588 asm volatile ("extrd,u %1,31,16,%0" : "+r" (pa) : "r" (pa));
589 asm volatile ("depd %1,35,4,%0" : "+r" (pa) : "r" (ci));
590 #else
591 pa = 0;
592 #endif
593 /*
594 ** get CPU coherency index bits
595 ** Grab virtual index [0:11]
596 ** Deposit virt_idx bits into I/O PDIR word
597 */
598 asm volatile ("lci %%r0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
599 asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
600 asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
601
602 ((u32 *)pdir_ptr)[0] = (u32) pa;
603
604
605 /* FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360)
606 ** PCX-U/U+ do. (eg C200/C240)
607 ** PCX-T'? Don't know. (eg C110 or similar K-class)
608 **
609 ** See PDC_MODEL/option 0/SW_CAP word for "Non-coherent IO-PDIR bit".
610 ** Hopefully we can patch (NOP) these out at boot time somehow.
611 **
612 ** "Since PCX-U employs an offset hash that is incompatible with
613 ** the real mode coherence index generation of U2, the PDIR entry
614 ** must be flushed to memory to retain coherence."
615 */
616 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
617 asm volatile("sync");
618 }
619
620 /**
621 * ccio_clear_io_tlb - Remove stale entries from the I/O TLB.
622 * @ioc: The I/O Controller.
623 * @iovp: The I/O Virtual Page.
624 * @byte_cnt: The requested number of bytes to be freed from the I/O Pdir.
625 *
626 * Purge invalid I/O PDIR entries from the I/O TLB.
627 *
628 * FIXME: Can we change the byte_cnt to pages_mapped?
629 */
630 static CCIO_INLINE void
ccio_clear_io_tlb(struct ioc * ioc,dma_addr_t iovp,size_t byte_cnt)631 ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt)
632 {
633 u32 chain_size = 1 << ioc->chainid_shift;
634
635 iovp &= IOVP_MASK; /* clear offset bits, just want pagenum */
636 byte_cnt += chain_size;
637
638 while(byte_cnt > chain_size) {
639 WRITE_U32(CMD_TLB_PURGE | iovp, &ioc->ioc_regs->io_command);
640 iovp += chain_size;
641 byte_cnt -= chain_size;
642 }
643 }
644
645 /**
646 * ccio_mark_invalid - Mark the I/O Pdir entries invalid.
647 * @ioc: The I/O Controller.
648 * @iova: The I/O Virtual Address.
649 * @byte_cnt: The requested number of bytes to be freed from the I/O Pdir.
650 *
651 * Mark the I/O Pdir entries invalid and blow away the corresponding I/O
652 * TLB entries.
653 *
654 * FIXME: at some threshold it might be "cheaper" to just blow
655 * away the entire I/O TLB instead of individual entries.
656 *
657 * FIXME: Uturn has 256 TLB entries. We don't need to purge every
658 * PDIR entry - just once for each possible TLB entry.
659 * (We do need to maker I/O PDIR entries invalid regardless).
660 *
661 * FIXME: Can we change byte_cnt to pages_mapped?
662 */
663 static CCIO_INLINE void
ccio_mark_invalid(struct ioc * ioc,dma_addr_t iova,size_t byte_cnt)664 ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
665 {
666 u32 iovp = (u32)CCIO_IOVP(iova);
667 size_t saved_byte_cnt;
668
669 /* round up to nearest page size */
670 saved_byte_cnt = byte_cnt = ALIGN(byte_cnt, IOVP_SIZE);
671
672 while(byte_cnt > 0) {
673 /* invalidate one page at a time */
674 unsigned int idx = PDIR_INDEX(iovp);
675 char *pdir_ptr = (char *) &(ioc->pdir_base[idx]);
676
677 BUG_ON(idx >= (ioc->pdir_size / sizeof(u64)));
678 pdir_ptr[7] = 0; /* clear only VALID bit */
679 /*
680 ** FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360)
681 ** PCX-U/U+ do. (eg C200/C240)
682 ** See PDC_MODEL/option 0/SW_CAP for "Non-coherent IO-PDIR bit".
683 **
684 ** Hopefully someone figures out how to patch (NOP) the
685 ** FDC/SYNC out at boot time.
686 */
687 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr[7]));
688
689 iovp += IOVP_SIZE;
690 byte_cnt -= IOVP_SIZE;
691 }
692
693 asm volatile("sync");
694 ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt);
695 }
696
697 /****************************************************************
698 **
699 ** CCIO dma_ops
700 **
701 *****************************************************************/
702
703 /**
704 * ccio_dma_supported - Verify the IOMMU supports the DMA address range.
705 * @dev: The PCI device.
706 * @mask: A bit mask describing the DMA address range of the device.
707 *
708 * This function implements the pci_dma_supported function.
709 */
710 static int
ccio_dma_supported(struct device * dev,u64 mask)711 ccio_dma_supported(struct device *dev, u64 mask)
712 {
713 if(dev == NULL) {
714 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
715 BUG();
716 return 0;
717 }
718
719 /* only support 32-bit devices (ie PCI/GSC) */
720 return (int)(mask == 0xffffffffUL);
721 }
722
723 /**
724 * ccio_map_single - Map an address range into the IOMMU.
725 * @dev: The PCI device.
726 * @addr: The start address of the DMA region.
727 * @size: The length of the DMA region.
728 * @direction: The direction of the DMA transaction (to/from device).
729 *
730 * This function implements the pci_map_single function.
731 */
732 static dma_addr_t
ccio_map_single(struct device * dev,void * addr,size_t size,enum dma_data_direction direction)733 ccio_map_single(struct device *dev, void *addr, size_t size,
734 enum dma_data_direction direction)
735 {
736 int idx;
737 struct ioc *ioc;
738 unsigned long flags;
739 dma_addr_t iovp;
740 dma_addr_t offset;
741 u64 *pdir_start;
742 unsigned long hint = hint_lookup[(int)direction];
743
744 BUG_ON(!dev);
745 ioc = GET_IOC(dev);
746
747 BUG_ON(size <= 0);
748
749 /* save offset bits */
750 offset = ((unsigned long) addr) & ~IOVP_MASK;
751
752 /* round up to nearest IOVP_SIZE */
753 size = ALIGN(size + offset, IOVP_SIZE);
754 spin_lock_irqsave(&ioc->res_lock, flags);
755
756 #ifdef CCIO_COLLECT_STATS
757 ioc->msingle_calls++;
758 ioc->msingle_pages += size >> IOVP_SHIFT;
759 #endif
760
761 idx = ccio_alloc_range(ioc, dev, size);
762 iovp = (dma_addr_t)MKIOVP(idx);
763
764 pdir_start = &(ioc->pdir_base[idx]);
765
766 DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
767 __func__, addr, (long)iovp | offset, size);
768
769 /* If not cacheline aligned, force SAFE_DMA on the whole mess */
770 if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
771 hint |= HINT_SAFE_DMA;
772
773 while(size > 0) {
774 ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long)addr, hint);
775
776 DBG_RUN(" pdir %p %08x%08x\n",
777 pdir_start,
778 (u32) (((u32 *) pdir_start)[0]),
779 (u32) (((u32 *) pdir_start)[1]));
780 ++pdir_start;
781 addr += IOVP_SIZE;
782 size -= IOVP_SIZE;
783 }
784
785 spin_unlock_irqrestore(&ioc->res_lock, flags);
786
787 /* form complete address */
788 return CCIO_IOVA(iovp, offset);
789 }
790
791 /**
792 * ccio_unmap_single - Unmap an address range from the IOMMU.
793 * @dev: The PCI device.
794 * @addr: The start address of the DMA region.
795 * @size: The length of the DMA region.
796 * @direction: The direction of the DMA transaction (to/from device).
797 *
798 * This function implements the pci_unmap_single function.
799 */
800 static void
ccio_unmap_single(struct device * dev,dma_addr_t iova,size_t size,enum dma_data_direction direction)801 ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
802 enum dma_data_direction direction)
803 {
804 struct ioc *ioc;
805 unsigned long flags;
806 dma_addr_t offset = iova & ~IOVP_MASK;
807
808 BUG_ON(!dev);
809 ioc = GET_IOC(dev);
810
811 DBG_RUN("%s() iovp 0x%lx/%x\n",
812 __func__, (long)iova, size);
813
814 iova ^= offset; /* clear offset bits */
815 size += offset;
816 size = ALIGN(size, IOVP_SIZE);
817
818 spin_lock_irqsave(&ioc->res_lock, flags);
819
820 #ifdef CCIO_COLLECT_STATS
821 ioc->usingle_calls++;
822 ioc->usingle_pages += size >> IOVP_SHIFT;
823 #endif
824
825 ccio_mark_invalid(ioc, iova, size);
826 ccio_free_range(ioc, iova, (size >> IOVP_SHIFT));
827 spin_unlock_irqrestore(&ioc->res_lock, flags);
828 }
829
830 /**
831 * ccio_alloc_consistent - Allocate a consistent DMA mapping.
832 * @dev: The PCI device.
833 * @size: The length of the DMA region.
834 * @dma_handle: The DMA address handed back to the device (not the cpu).
835 *
836 * This function implements the pci_alloc_consistent function.
837 */
838 static void *
ccio_alloc_consistent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag)839 ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
840 {
841 void *ret;
842 #if 0
843 /* GRANT Need to establish hierarchy for non-PCI devs as well
844 ** and then provide matching gsc_map_xxx() functions for them as well.
845 */
846 if(!hwdev) {
847 /* only support PCI */
848 *dma_handle = 0;
849 return 0;
850 }
851 #endif
852 ret = (void *) __get_free_pages(flag, get_order(size));
853
854 if (ret) {
855 memset(ret, 0, size);
856 *dma_handle = ccio_map_single(dev, ret, size, PCI_DMA_BIDIRECTIONAL);
857 }
858
859 return ret;
860 }
861
862 /**
863 * ccio_free_consistent - Free a consistent DMA mapping.
864 * @dev: The PCI device.
865 * @size: The length of the DMA region.
866 * @cpu_addr: The cpu address returned from the ccio_alloc_consistent.
867 * @dma_handle: The device address returned from the ccio_alloc_consistent.
868 *
869 * This function implements the pci_free_consistent function.
870 */
871 static void
ccio_free_consistent(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle)872 ccio_free_consistent(struct device *dev, size_t size, void *cpu_addr,
873 dma_addr_t dma_handle)
874 {
875 ccio_unmap_single(dev, dma_handle, size, 0);
876 free_pages((unsigned long)cpu_addr, get_order(size));
877 }
878
879 /*
880 ** Since 0 is a valid pdir_base index value, can't use that
881 ** to determine if a value is valid or not. Use a flag to indicate
882 ** the SG list entry contains a valid pdir index.
883 */
884 #define PIDE_FLAG 0x80000000UL
885
886 #ifdef CCIO_COLLECT_STATS
887 #define IOMMU_MAP_STATS
888 #endif
889 #include "iommu-helpers.h"
890
891 /**
892 * ccio_map_sg - Map the scatter/gather list into the IOMMU.
893 * @dev: The PCI device.
894 * @sglist: The scatter/gather list to be mapped in the IOMMU.
895 * @nents: The number of entries in the scatter/gather list.
896 * @direction: The direction of the DMA transaction (to/from device).
897 *
898 * This function implements the pci_map_sg function.
899 */
900 static int
ccio_map_sg(struct device * dev,struct scatterlist * sglist,int nents,enum dma_data_direction direction)901 ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
902 enum dma_data_direction direction)
903 {
904 struct ioc *ioc;
905 int coalesced, filled = 0;
906 unsigned long flags;
907 unsigned long hint = hint_lookup[(int)direction];
908 unsigned long prev_len = 0, current_len = 0;
909 int i;
910
911 BUG_ON(!dev);
912 ioc = GET_IOC(dev);
913
914 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
915
916 /* Fast path single entry scatterlists. */
917 if (nents == 1) {
918 sg_dma_address(sglist) = ccio_map_single(dev,
919 (void *)sg_virt_addr(sglist), sglist->length,
920 direction);
921 sg_dma_len(sglist) = sglist->length;
922 return 1;
923 }
924
925 for(i = 0; i < nents; i++)
926 prev_len += sglist[i].length;
927
928 spin_lock_irqsave(&ioc->res_lock, flags);
929
930 #ifdef CCIO_COLLECT_STATS
931 ioc->msg_calls++;
932 #endif
933
934 /*
935 ** First coalesce the chunks and allocate I/O pdir space
936 **
937 ** If this is one DMA stream, we can properly map using the
938 ** correct virtual address associated with each DMA page.
939 ** w/o this association, we wouldn't have coherent DMA!
940 ** Access to the virtual address is what forces a two pass algorithm.
941 */
942 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, ccio_alloc_range);
943
944 /*
945 ** Program the I/O Pdir
946 **
947 ** map the virtual addresses to the I/O Pdir
948 ** o dma_address will contain the pdir index
949 ** o dma_len will contain the number of bytes to map
950 ** o page/offset contain the virtual address.
951 */
952 filled = iommu_fill_pdir(ioc, sglist, nents, hint, ccio_io_pdir_entry);
953
954 spin_unlock_irqrestore(&ioc->res_lock, flags);
955
956 BUG_ON(coalesced != filled);
957
958 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
959
960 for (i = 0; i < filled; i++)
961 current_len += sg_dma_len(sglist + i);
962
963 BUG_ON(current_len != prev_len);
964
965 return filled;
966 }
967
968 /**
969 * ccio_unmap_sg - Unmap the scatter/gather list from the IOMMU.
970 * @dev: The PCI device.
971 * @sglist: The scatter/gather list to be unmapped from the IOMMU.
972 * @nents: The number of entries in the scatter/gather list.
973 * @direction: The direction of the DMA transaction (to/from device).
974 *
975 * This function implements the pci_unmap_sg function.
976 */
977 static void
ccio_unmap_sg(struct device * dev,struct scatterlist * sglist,int nents,enum dma_data_direction direction)978 ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
979 enum dma_data_direction direction)
980 {
981 struct ioc *ioc;
982
983 BUG_ON(!dev);
984 ioc = GET_IOC(dev);
985
986 DBG_RUN_SG("%s() START %d entries, %08lx,%x\n",
987 __func__, nents, sg_virt_addr(sglist), sglist->length);
988
989 #ifdef CCIO_COLLECT_STATS
990 ioc->usg_calls++;
991 #endif
992
993 while(sg_dma_len(sglist) && nents--) {
994
995 #ifdef CCIO_COLLECT_STATS
996 ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
997 #endif
998 ccio_unmap_single(dev, sg_dma_address(sglist),
999 sg_dma_len(sglist), direction);
1000 ++sglist;
1001 }
1002
1003 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1004 }
1005
1006 static struct hppa_dma_ops ccio_ops = {
1007 .dma_supported = ccio_dma_supported,
1008 .alloc_consistent = ccio_alloc_consistent,
1009 .alloc_noncoherent = ccio_alloc_consistent,
1010 .free_consistent = ccio_free_consistent,
1011 .map_single = ccio_map_single,
1012 .unmap_single = ccio_unmap_single,
1013 .map_sg = ccio_map_sg,
1014 .unmap_sg = ccio_unmap_sg,
1015 .dma_sync_single_for_cpu = NULL, /* NOP for U2/Uturn */
1016 .dma_sync_single_for_device = NULL, /* NOP for U2/Uturn */
1017 .dma_sync_sg_for_cpu = NULL, /* ditto */
1018 .dma_sync_sg_for_device = NULL, /* ditto */
1019 };
1020
1021 #ifdef CONFIG_PROC_FS
ccio_proc_info(struct seq_file * m,void * p)1022 static int ccio_proc_info(struct seq_file *m, void *p)
1023 {
1024 int len = 0;
1025 struct ioc *ioc = ioc_list;
1026
1027 while (ioc != NULL) {
1028 unsigned int total_pages = ioc->res_size << 3;
1029 #ifdef CCIO_COLLECT_STATS
1030 unsigned long avg = 0, min, max;
1031 int j;
1032 #endif
1033
1034 len += seq_printf(m, "%s\n", ioc->name);
1035
1036 len += seq_printf(m, "Cujo 2.0 bug : %s\n",
1037 (ioc->cujo20_bug ? "yes" : "no"));
1038
1039 len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1040 total_pages * 8, total_pages);
1041
1042 #ifdef CCIO_COLLECT_STATS
1043 len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1044 total_pages - ioc->used_pages, ioc->used_pages,
1045 (int)(ioc->used_pages * 100 / total_pages));
1046 #endif
1047
1048 len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1049 ioc->res_size, total_pages);
1050
1051 #ifdef CCIO_COLLECT_STATS
1052 min = max = ioc->avg_search[0];
1053 for(j = 0; j < CCIO_SEARCH_SAMPLE; ++j) {
1054 avg += ioc->avg_search[j];
1055 if(ioc->avg_search[j] > max)
1056 max = ioc->avg_search[j];
1057 if(ioc->avg_search[j] < min)
1058 min = ioc->avg_search[j];
1059 }
1060 avg /= CCIO_SEARCH_SAMPLE;
1061 len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1062 min, avg, max);
1063
1064 len += seq_printf(m, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n",
1065 ioc->msingle_calls, ioc->msingle_pages,
1066 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1067
1068 /* KLUGE - unmap_sg calls unmap_single for each mapped page */
1069 min = ioc->usingle_calls - ioc->usg_calls;
1070 max = ioc->usingle_pages - ioc->usg_pages;
1071 len += seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
1072 min, max, (int)((max * 1000)/min));
1073
1074 len += seq_printf(m, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n",
1075 ioc->msg_calls, ioc->msg_pages,
1076 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1077
1078 len += seq_printf(m, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n",
1079 ioc->usg_calls, ioc->usg_pages,
1080 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1081 #endif /* CCIO_COLLECT_STATS */
1082
1083 ioc = ioc->next;
1084 }
1085
1086 return 0;
1087 }
1088
ccio_proc_info_open(struct inode * inode,struct file * file)1089 static int ccio_proc_info_open(struct inode *inode, struct file *file)
1090 {
1091 return single_open(file, &ccio_proc_info, NULL);
1092 }
1093
1094 static const struct file_operations ccio_proc_info_fops = {
1095 .owner = THIS_MODULE,
1096 .open = ccio_proc_info_open,
1097 .read = seq_read,
1098 .llseek = seq_lseek,
1099 .release = single_release,
1100 };
1101
ccio_proc_bitmap_info(struct seq_file * m,void * p)1102 static int ccio_proc_bitmap_info(struct seq_file *m, void *p)
1103 {
1104 int len = 0;
1105 struct ioc *ioc = ioc_list;
1106
1107 while (ioc != NULL) {
1108 u32 *res_ptr = (u32 *)ioc->res_map;
1109 int j;
1110
1111 for (j = 0; j < (ioc->res_size / sizeof(u32)); j++) {
1112 if ((j & 7) == 0)
1113 len += seq_puts(m, "\n ");
1114 len += seq_printf(m, "%08x", *res_ptr);
1115 res_ptr++;
1116 }
1117 len += seq_puts(m, "\n\n");
1118 ioc = ioc->next;
1119 break; /* XXX - remove me */
1120 }
1121
1122 return 0;
1123 }
1124
ccio_proc_bitmap_open(struct inode * inode,struct file * file)1125 static int ccio_proc_bitmap_open(struct inode *inode, struct file *file)
1126 {
1127 return single_open(file, &ccio_proc_bitmap_info, NULL);
1128 }
1129
1130 static const struct file_operations ccio_proc_bitmap_fops = {
1131 .owner = THIS_MODULE,
1132 .open = ccio_proc_bitmap_open,
1133 .read = seq_read,
1134 .llseek = seq_lseek,
1135 .release = single_release,
1136 };
1137 #endif /* CONFIG_PROC_FS */
1138
1139 /**
1140 * ccio_find_ioc - Find the ioc in the ioc_list
1141 * @hw_path: The hardware path of the ioc.
1142 *
1143 * This function searches the ioc_list for an ioc that matches
1144 * the provide hardware path.
1145 */
ccio_find_ioc(int hw_path)1146 static struct ioc * ccio_find_ioc(int hw_path)
1147 {
1148 int i;
1149 struct ioc *ioc;
1150
1151 ioc = ioc_list;
1152 for (i = 0; i < ioc_count; i++) {
1153 if (ioc->hw_path == hw_path)
1154 return ioc;
1155
1156 ioc = ioc->next;
1157 }
1158
1159 return NULL;
1160 }
1161
1162 /**
1163 * ccio_get_iommu - Find the iommu which controls this device
1164 * @dev: The parisc device.
1165 *
1166 * This function searches through the registered IOMMU's and returns
1167 * the appropriate IOMMU for the device based on its hardware path.
1168 */
ccio_get_iommu(const struct parisc_device * dev)1169 void * ccio_get_iommu(const struct parisc_device *dev)
1170 {
1171 dev = find_pa_parent_type(dev, HPHW_IOA);
1172 if (!dev)
1173 return NULL;
1174
1175 return ccio_find_ioc(dev->hw_path);
1176 }
1177
1178 #define CUJO_20_STEP 0x10000000 /* inc upper nibble */
1179
1180 /* Cujo 2.0 has a bug which will silently corrupt data being transferred
1181 * to/from certain pages. To avoid this happening, we mark these pages
1182 * as `used', and ensure that nothing will try to allocate from them.
1183 */
ccio_cujo20_fixup(struct parisc_device * cujo,u32 iovp)1184 void ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp)
1185 {
1186 unsigned int idx;
1187 struct parisc_device *dev = parisc_parent(cujo);
1188 struct ioc *ioc = ccio_get_iommu(dev);
1189 u8 *res_ptr;
1190
1191 ioc->cujo20_bug = 1;
1192 res_ptr = ioc->res_map;
1193 idx = PDIR_INDEX(iovp) >> 3;
1194
1195 while (idx < ioc->res_size) {
1196 res_ptr[idx] |= 0xff;
1197 idx += PDIR_INDEX(CUJO_20_STEP) >> 3;
1198 }
1199 }
1200
1201 #if 0
1202 /* GRANT - is this needed for U2 or not? */
1203
1204 /*
1205 ** Get the size of the I/O TLB for this I/O MMU.
1206 **
1207 ** If spa_shift is non-zero (ie probably U2),
1208 ** then calculate the I/O TLB size using spa_shift.
1209 **
1210 ** Otherwise we are supposed to get the IODC entry point ENTRY TLB
1211 ** and execute it. However, both U2 and Uturn firmware supplies spa_shift.
1212 ** I think only Java (K/D/R-class too?) systems don't do this.
1213 */
1214 static int
1215 ccio_get_iotlb_size(struct parisc_device *dev)
1216 {
1217 if (dev->spa_shift == 0) {
1218 panic("%s() : Can't determine I/O TLB size.\n", __func__);
1219 }
1220 return (1 << dev->spa_shift);
1221 }
1222 #else
1223
1224 /* Uturn supports 256 TLB entries */
1225 #define CCIO_CHAINID_SHIFT 8
1226 #define CCIO_CHAINID_MASK 0xff
1227 #endif /* 0 */
1228
1229 /* We *can't* support JAVA (T600). Venture there at your own risk. */
1230 static const struct parisc_device_id ccio_tbl[] = {
1231 { HPHW_IOA, HVERSION_REV_ANY_ID, U2_IOA_RUNWAY, 0xb }, /* U2 */
1232 { HPHW_IOA, HVERSION_REV_ANY_ID, UTURN_IOA_RUNWAY, 0xb }, /* UTurn */
1233 { 0, }
1234 };
1235
1236 static int ccio_probe(struct parisc_device *dev);
1237
1238 static struct parisc_driver ccio_driver = {
1239 .name = "ccio",
1240 .id_table = ccio_tbl,
1241 .probe = ccio_probe,
1242 };
1243
1244 /**
1245 * ccio_ioc_init - Initialize the I/O Controller
1246 * @ioc: The I/O Controller.
1247 *
1248 * Initialize the I/O Controller which includes setting up the
1249 * I/O Page Directory, the resource map, and initalizing the
1250 * U2/Uturn chip into virtual mode.
1251 */
1252 static void
ccio_ioc_init(struct ioc * ioc)1253 ccio_ioc_init(struct ioc *ioc)
1254 {
1255 int i;
1256 unsigned int iov_order;
1257 u32 iova_space_size;
1258
1259 /*
1260 ** Determine IOVA Space size from memory size.
1261 **
1262 ** Ideally, PCI drivers would register the maximum number
1263 ** of DMA they can have outstanding for each device they
1264 ** own. Next best thing would be to guess how much DMA
1265 ** can be outstanding based on PCI Class/sub-class. Both
1266 ** methods still require some "extra" to support PCI
1267 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD).
1268 */
1269
1270 iova_space_size = (u32) (totalram_pages / count_parisc_driver(&ccio_driver));
1271
1272 /* limit IOVA space size to 1MB-1GB */
1273
1274 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1275 iova_space_size = 1 << (20 - PAGE_SHIFT);
1276 #ifdef __LP64__
1277 } else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1278 iova_space_size = 1 << (30 - PAGE_SHIFT);
1279 #endif
1280 }
1281
1282 /*
1283 ** iova space must be log2() in size.
1284 ** thus, pdir/res_map will also be log2().
1285 */
1286
1287 /* We could use larger page sizes in order to *decrease* the number
1288 ** of mappings needed. (ie 8k pages means 1/2 the mappings).
1289 **
1290 ** Note: Grant Grunder says "Using 8k I/O pages isn't trivial either
1291 ** since the pages must also be physically contiguous - typically
1292 ** this is the case under linux."
1293 */
1294
1295 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1296
1297 /* iova_space_size is now bytes, not pages */
1298 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1299
1300 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1301
1302 BUG_ON(ioc->pdir_size > 8 * 1024 * 1024); /* max pdir size <= 8MB */
1303
1304 /* Verify it's a power of two */
1305 BUG_ON((1 << get_order(ioc->pdir_size)) != (ioc->pdir_size >> PAGE_SHIFT));
1306
1307 DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n",
1308 __func__, ioc->ioc_regs,
1309 (unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
1310 iova_space_size>>20,
1311 iov_order + PAGE_SHIFT);
1312
1313 ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
1314 get_order(ioc->pdir_size));
1315 if(NULL == ioc->pdir_base) {
1316 panic("%s() could not allocate I/O Page Table\n", __func__);
1317 }
1318 memset(ioc->pdir_base, 0, ioc->pdir_size);
1319
1320 BUG_ON((((unsigned long)ioc->pdir_base) & PAGE_MASK) != (unsigned long)ioc->pdir_base);
1321 DBG_INIT(" base %p\n", ioc->pdir_base);
1322
1323 /* resource map size dictated by pdir_size */
1324 ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
1325 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1326
1327 ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL,
1328 get_order(ioc->res_size));
1329 if(NULL == ioc->res_map) {
1330 panic("%s() could not allocate resource map\n", __func__);
1331 }
1332 memset(ioc->res_map, 0, ioc->res_size);
1333
1334 /* Initialize the res_hint to 16 */
1335 ioc->res_hint = 16;
1336
1337 /* Initialize the spinlock */
1338 spin_lock_init(&ioc->res_lock);
1339
1340 /*
1341 ** Chainid is the upper most bits of an IOVP used to determine
1342 ** which TLB entry an IOVP will use.
1343 */
1344 ioc->chainid_shift = get_order(iova_space_size) + PAGE_SHIFT - CCIO_CHAINID_SHIFT;
1345 DBG_INIT(" chainid_shift 0x%x\n", ioc->chainid_shift);
1346
1347 /*
1348 ** Initialize IOA hardware
1349 */
1350 WRITE_U32(CCIO_CHAINID_MASK << ioc->chainid_shift,
1351 &ioc->ioc_regs->io_chain_id_mask);
1352
1353 WRITE_U32(virt_to_phys(ioc->pdir_base),
1354 &ioc->ioc_regs->io_pdir_base);
1355
1356 /*
1357 ** Go to "Virtual Mode"
1358 */
1359 WRITE_U32(IOA_NORMAL_MODE, &ioc->ioc_regs->io_control);
1360
1361 /*
1362 ** Initialize all I/O TLB entries to 0 (Valid bit off).
1363 */
1364 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_m);
1365 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_l);
1366
1367 for(i = 1 << CCIO_CHAINID_SHIFT; i ; i--) {
1368 WRITE_U32((CMD_TLB_DIRECT_WRITE | (i << ioc->chainid_shift)),
1369 &ioc->ioc_regs->io_command);
1370 }
1371 }
1372
1373 static void __init
ccio_init_resource(struct resource * res,char * name,void __iomem * ioaddr)1374 ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
1375 {
1376 int result;
1377
1378 res->parent = NULL;
1379 res->flags = IORESOURCE_MEM;
1380 /*
1381 * bracing ((signed) ...) are required for 64bit kernel because
1382 * we only want to sign extend the lower 16 bits of the register.
1383 * The upper 16-bits of range registers are hardcoded to 0xffff.
1384 */
1385 res->start = (unsigned long)((signed) READ_U32(ioaddr) << 16);
1386 res->end = (unsigned long)((signed) (READ_U32(ioaddr + 4) << 16) - 1);
1387 res->name = name;
1388 /*
1389 * Check if this MMIO range is disable
1390 */
1391 if (res->end + 1 == res->start)
1392 return;
1393
1394 /* On some platforms (e.g. K-Class), we have already registered
1395 * resources for devices reported by firmware. Some are children
1396 * of ccio.
1397 * "insert" ccio ranges in the mmio hierarchy (/proc/iomem).
1398 */
1399 result = insert_resource(&iomem_resource, res);
1400 if (result < 0) {
1401 printk(KERN_ERR "%s() failed to claim CCIO bus address space (%08lx,%08lx)\n",
1402 __func__, (unsigned long)res->start, (unsigned long)res->end);
1403 }
1404 }
1405
ccio_init_resources(struct ioc * ioc)1406 static void __init ccio_init_resources(struct ioc *ioc)
1407 {
1408 struct resource *res = ioc->mmio_region;
1409 char *name = kmalloc(14, GFP_KERNEL);
1410
1411 snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
1412
1413 ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
1414 ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
1415 }
1416
new_ioc_area(struct resource * res,unsigned long size,unsigned long min,unsigned long max,unsigned long align)1417 static int new_ioc_area(struct resource *res, unsigned long size,
1418 unsigned long min, unsigned long max, unsigned long align)
1419 {
1420 if (max <= min)
1421 return -EBUSY;
1422
1423 res->start = (max - size + 1) &~ (align - 1);
1424 res->end = res->start + size;
1425
1426 /* We might be trying to expand the MMIO range to include
1427 * a child device that has already registered it's MMIO space.
1428 * Use "insert" instead of request_resource().
1429 */
1430 if (!insert_resource(&iomem_resource, res))
1431 return 0;
1432
1433 return new_ioc_area(res, size, min, max - size, align);
1434 }
1435
expand_ioc_area(struct resource * res,unsigned long size,unsigned long min,unsigned long max,unsigned long align)1436 static int expand_ioc_area(struct resource *res, unsigned long size,
1437 unsigned long min, unsigned long max, unsigned long align)
1438 {
1439 unsigned long start, len;
1440
1441 if (!res->parent)
1442 return new_ioc_area(res, size, min, max, align);
1443
1444 start = (res->start - size) &~ (align - 1);
1445 len = res->end - start + 1;
1446 if (start >= min) {
1447 if (!adjust_resource(res, start, len))
1448 return 0;
1449 }
1450
1451 start = res->start;
1452 len = ((size + res->end + align) &~ (align - 1)) - start;
1453 if (start + len <= max) {
1454 if (!adjust_resource(res, start, len))
1455 return 0;
1456 }
1457
1458 return -EBUSY;
1459 }
1460
1461 /*
1462 * Dino calls this function. Beware that we may get called on systems
1463 * which have no IOC (725, B180, C160L, etc) but do have a Dino.
1464 * So it's legal to find no parent IOC.
1465 *
1466 * Some other issues: one of the resources in the ioc may be unassigned.
1467 */
ccio_allocate_resource(const struct parisc_device * dev,struct resource * res,unsigned long size,unsigned long min,unsigned long max,unsigned long align)1468 int ccio_allocate_resource(const struct parisc_device *dev,
1469 struct resource *res, unsigned long size,
1470 unsigned long min, unsigned long max, unsigned long align)
1471 {
1472 struct resource *parent = &iomem_resource;
1473 struct ioc *ioc = ccio_get_iommu(dev);
1474 if (!ioc)
1475 goto out;
1476
1477 parent = ioc->mmio_region;
1478 if (parent->parent &&
1479 !allocate_resource(parent, res, size, min, max, align, NULL, NULL))
1480 return 0;
1481
1482 if ((parent + 1)->parent &&
1483 !allocate_resource(parent + 1, res, size, min, max, align,
1484 NULL, NULL))
1485 return 0;
1486
1487 if (!expand_ioc_area(parent, size, min, max, align)) {
1488 __raw_writel(((parent->start)>>16) | 0xffff0000,
1489 &ioc->ioc_regs->io_io_low);
1490 __raw_writel(((parent->end)>>16) | 0xffff0000,
1491 &ioc->ioc_regs->io_io_high);
1492 } else if (!expand_ioc_area(parent + 1, size, min, max, align)) {
1493 parent++;
1494 __raw_writel(((parent->start)>>16) | 0xffff0000,
1495 &ioc->ioc_regs->io_io_low_hv);
1496 __raw_writel(((parent->end)>>16) | 0xffff0000,
1497 &ioc->ioc_regs->io_io_high_hv);
1498 } else {
1499 return -EBUSY;
1500 }
1501
1502 out:
1503 return allocate_resource(parent, res, size, min, max, align, NULL,NULL);
1504 }
1505
ccio_request_resource(const struct parisc_device * dev,struct resource * res)1506 int ccio_request_resource(const struct parisc_device *dev,
1507 struct resource *res)
1508 {
1509 struct resource *parent;
1510 struct ioc *ioc = ccio_get_iommu(dev);
1511
1512 if (!ioc) {
1513 parent = &iomem_resource;
1514 } else if ((ioc->mmio_region->start <= res->start) &&
1515 (res->end <= ioc->mmio_region->end)) {
1516 parent = ioc->mmio_region;
1517 } else if (((ioc->mmio_region + 1)->start <= res->start) &&
1518 (res->end <= (ioc->mmio_region + 1)->end)) {
1519 parent = ioc->mmio_region + 1;
1520 } else {
1521 return -EBUSY;
1522 }
1523
1524 /* "transparent" bus bridges need to register MMIO resources
1525 * firmware assigned them. e.g. children of hppb.c (e.g. K-class)
1526 * registered their resources in the PDC "bus walk" (See
1527 * arch/parisc/kernel/inventory.c).
1528 */
1529 return insert_resource(parent, res);
1530 }
1531
1532 /**
1533 * ccio_probe - Determine if ccio should claim this device.
1534 * @dev: The device which has been found
1535 *
1536 * Determine if ccio should claim this chip (return 0) or not (return 1).
1537 * If so, initialize the chip and tell other partners in crime they
1538 * have work to do.
1539 */
ccio_probe(struct parisc_device * dev)1540 static int __init ccio_probe(struct parisc_device *dev)
1541 {
1542 int i;
1543 struct ioc *ioc, **ioc_p = &ioc_list;
1544
1545 ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL);
1546 if (ioc == NULL) {
1547 printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
1548 return 1;
1549 }
1550
1551 ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn";
1552
1553 printk(KERN_INFO "Found %s at 0x%lx\n", ioc->name,
1554 (unsigned long)dev->hpa.start);
1555
1556 for (i = 0; i < ioc_count; i++) {
1557 ioc_p = &(*ioc_p)->next;
1558 }
1559 *ioc_p = ioc;
1560
1561 ioc->hw_path = dev->hw_path;
1562 ioc->ioc_regs = ioremap_nocache(dev->hpa.start, 4096);
1563 ccio_ioc_init(ioc);
1564 ccio_init_resources(ioc);
1565 hppa_dma_ops = &ccio_ops;
1566 dev->dev.platform_data = kzalloc(sizeof(struct pci_hba_data), GFP_KERNEL);
1567
1568 /* if this fails, no I/O cards will work, so may as well bug */
1569 BUG_ON(dev->dev.platform_data == NULL);
1570 HBA_DATA(dev->dev.platform_data)->iommu = ioc;
1571
1572 #ifdef CONFIG_PROC_FS
1573 if (ioc_count == 0) {
1574 proc_create(MODULE_NAME, 0, proc_runway_root,
1575 &ccio_proc_info_fops);
1576 proc_create(MODULE_NAME"-bitmap", 0, proc_runway_root,
1577 &ccio_proc_bitmap_fops);
1578 }
1579 #endif
1580 ioc_count++;
1581
1582 parisc_has_iommu();
1583 return 0;
1584 }
1585
1586 /**
1587 * ccio_init - ccio initialization procedure.
1588 *
1589 * Register this driver.
1590 */
ccio_init(void)1591 void __init ccio_init(void)
1592 {
1593 register_parisc_driver(&ccio_driver);
1594 }
1595
1596