1 /*
2 * ioport.c: Simple io mapping allocator.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 *
7 * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev.
8 *
9 * 2000/01/29
10 * <rth> zait: as long as pci_alloc_consistent produces something addressable,
11 * things are ok.
12 * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a
13 * pointer into the big page mapping
14 * <rth> zait: so what?
15 * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page()))
16 * <zaitcev> Hmm
17 * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())).
18 * So far so good.
19 * <zaitcev> Now, driver calls pci_free_consistent(with result of
20 * remap_it_my_way()).
21 * <zaitcev> How do you find the address to pass to free_pages()?
22 * <rth> zait: walk the page tables? It's only two or three level after all.
23 * <rth> zait: you have to walk them anyway to remove the mapping.
24 * <zaitcev> Hmm
25 * <zaitcev> Sounds reasonable
26 */
27
28 #include <linux/module.h>
29 #include <linux/sched.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/ioport.h>
34 #include <linux/mm.h>
35 #include <linux/slab.h>
36 #include <linux/pci.h> /* struct pci_dev */
37 #include <linux/proc_fs.h>
38 #include <linux/scatterlist.h>
39 #include <linux/of_device.h>
40
41 #include <asm/io.h>
42 #include <asm/vaddrs.h>
43 #include <asm/oplib.h>
44 #include <asm/prom.h>
45 #include <asm/page.h>
46 #include <asm/pgalloc.h>
47 #include <asm/dma.h>
48 #include <asm/iommu.h>
49 #include <asm/io-unit.h>
50
51 #include "dma.h"
52
53 #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
54
55 static struct resource *_sparc_find_resource(struct resource *r,
56 unsigned long);
57
58 static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
59 static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
60 unsigned long size, char *name);
61 static void _sparc_free_io(struct resource *res);
62
63 static void register_proc_sparc_ioport(void);
64
65 /* This points to the next to use virtual memory for DVMA mappings */
66 static struct resource _sparc_dvma = {
67 .name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1
68 };
69 /* This points to the start of I/O mappings, cluable from outside. */
70 /*ext*/ struct resource sparc_iomap = {
71 .name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1
72 };
73
74 /*
75 * Our mini-allocator...
76 * Boy this is gross! We need it because we must map I/O for
77 * timers and interrupt controller before the kmalloc is available.
78 */
79
80 #define XNMLN 15
81 #define XNRES 10 /* SS-10 uses 8 */
82
83 struct xresource {
84 struct resource xres; /* Must be first */
85 int xflag; /* 1 == used */
86 char xname[XNMLN+1];
87 };
88
89 static struct xresource xresv[XNRES];
90
xres_alloc(void)91 static struct xresource *xres_alloc(void) {
92 struct xresource *xrp;
93 int n;
94
95 xrp = xresv;
96 for (n = 0; n < XNRES; n++) {
97 if (xrp->xflag == 0) {
98 xrp->xflag = 1;
99 return xrp;
100 }
101 xrp++;
102 }
103 return NULL;
104 }
105
xres_free(struct xresource * xrp)106 static void xres_free(struct xresource *xrp) {
107 xrp->xflag = 0;
108 }
109
110 /*
111 * These are typically used in PCI drivers
112 * which are trying to be cross-platform.
113 *
114 * Bus type is always zero on IIep.
115 */
ioremap(unsigned long offset,unsigned long size)116 void __iomem *ioremap(unsigned long offset, unsigned long size)
117 {
118 char name[14];
119
120 sprintf(name, "phys_%08x", (u32)offset);
121 return _sparc_alloc_io(0, offset, size, name);
122 }
123 EXPORT_SYMBOL(ioremap);
124
125 /*
126 * Comlimentary to ioremap().
127 */
iounmap(volatile void __iomem * virtual)128 void iounmap(volatile void __iomem *virtual)
129 {
130 unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
131 struct resource *res;
132
133 if ((res = _sparc_find_resource(&sparc_iomap, vaddr)) == NULL) {
134 printk("free_io/iounmap: cannot free %lx\n", vaddr);
135 return;
136 }
137 _sparc_free_io(res);
138
139 if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) {
140 xres_free((struct xresource *)res);
141 } else {
142 kfree(res);
143 }
144 }
145 EXPORT_SYMBOL(iounmap);
146
of_ioremap(struct resource * res,unsigned long offset,unsigned long size,char * name)147 void __iomem *of_ioremap(struct resource *res, unsigned long offset,
148 unsigned long size, char *name)
149 {
150 return _sparc_alloc_io(res->flags & 0xF,
151 res->start + offset,
152 size, name);
153 }
154 EXPORT_SYMBOL(of_ioremap);
155
of_iounmap(struct resource * res,void __iomem * base,unsigned long size)156 void of_iounmap(struct resource *res, void __iomem *base, unsigned long size)
157 {
158 iounmap(base);
159 }
160 EXPORT_SYMBOL(of_iounmap);
161
162 /*
163 * Meat of mapping
164 */
_sparc_alloc_io(unsigned int busno,unsigned long phys,unsigned long size,char * name)165 static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
166 unsigned long size, char *name)
167 {
168 static int printed_full;
169 struct xresource *xres;
170 struct resource *res;
171 char *tack;
172 int tlen;
173 void __iomem *va; /* P3 diag */
174
175 if (name == NULL) name = "???";
176
177 if ((xres = xres_alloc()) != 0) {
178 tack = xres->xname;
179 res = &xres->xres;
180 } else {
181 if (!printed_full) {
182 printk("ioremap: done with statics, switching to malloc\n");
183 printed_full = 1;
184 }
185 tlen = strlen(name);
186 tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
187 if (tack == NULL) return NULL;
188 memset(tack, 0, sizeof(struct resource));
189 res = (struct resource *) tack;
190 tack += sizeof (struct resource);
191 }
192
193 strlcpy(tack, name, XNMLN+1);
194 res->name = tack;
195
196 va = _sparc_ioremap(res, busno, phys, size);
197 /* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */
198 return va;
199 }
200
201 /*
202 */
203 static void __iomem *
_sparc_ioremap(struct resource * res,u32 bus,u32 pa,int sz)204 _sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
205 {
206 unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
207
208 if (allocate_resource(&sparc_iomap, res,
209 (offset + sz + PAGE_SIZE-1) & PAGE_MASK,
210 sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) {
211 /* Usually we cannot see printks in this case. */
212 prom_printf("alloc_io_res(%s): cannot occupy\n",
213 (res->name != NULL)? res->name: "???");
214 prom_halt();
215 }
216
217 pa &= PAGE_MASK;
218 sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1);
219
220 return (void __iomem *)(unsigned long)(res->start + offset);
221 }
222
223 /*
224 * Comlimentary to _sparc_ioremap().
225 */
_sparc_free_io(struct resource * res)226 static void _sparc_free_io(struct resource *res)
227 {
228 unsigned long plen;
229
230 plen = res->end - res->start + 1;
231 BUG_ON((plen & (PAGE_SIZE-1)) != 0);
232 sparc_unmapiorange(res->start, plen);
233 release_resource(res);
234 }
235
236 #ifdef CONFIG_SBUS
237
sbus_set_sbus64(struct device * dev,int x)238 void sbus_set_sbus64(struct device *dev, int x)
239 {
240 printk("sbus_set_sbus64: unsupported\n");
241 }
242 EXPORT_SYMBOL(sbus_set_sbus64);
243
244 /*
245 * Allocate a chunk of memory suitable for DMA.
246 * Typically devices use them for control blocks.
247 * CPU may access them without any explicit flushing.
248 */
sbus_alloc_consistent(struct device * dev,long len,u32 * dma_addrp)249 void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp)
250 {
251 struct of_device *op = to_of_device(dev);
252 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
253 unsigned long va;
254 struct resource *res;
255 int order;
256
257 /* XXX why are some lengths signed, others unsigned? */
258 if (len <= 0) {
259 return NULL;
260 }
261 /* XXX So what is maxphys for us and how do drivers know it? */
262 if (len > 256*1024) { /* __get_free_pages() limit */
263 return NULL;
264 }
265
266 order = get_order(len_total);
267 if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0)
268 goto err_nopages;
269
270 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
271 goto err_nomem;
272
273 if (allocate_resource(&_sparc_dvma, res, len_total,
274 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
275 printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
276 goto err_nova;
277 }
278 mmu_inval_dma_area(va, len_total);
279 // XXX The mmu_map_dma_area does this for us below, see comments.
280 // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
281 /*
282 * XXX That's where sdev would be used. Currently we load
283 * all iommu tables with the same translations.
284 */
285 if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
286 goto err_noiommu;
287
288 res->name = op->node->name;
289
290 return (void *)(unsigned long)res->start;
291
292 err_noiommu:
293 release_resource(res);
294 err_nova:
295 free_pages(va, order);
296 err_nomem:
297 kfree(res);
298 err_nopages:
299 return NULL;
300 }
301
sbus_free_consistent(struct device * dev,long n,void * p,u32 ba)302 void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
303 {
304 struct resource *res;
305 struct page *pgv;
306
307 if ((res = _sparc_find_resource(&_sparc_dvma,
308 (unsigned long)p)) == NULL) {
309 printk("sbus_free_consistent: cannot free %p\n", p);
310 return;
311 }
312
313 if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
314 printk("sbus_free_consistent: unaligned va %p\n", p);
315 return;
316 }
317
318 n = (n + PAGE_SIZE-1) & PAGE_MASK;
319 if ((res->end-res->start)+1 != n) {
320 printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n",
321 (long)((res->end-res->start)+1), n);
322 return;
323 }
324
325 release_resource(res);
326 kfree(res);
327
328 /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
329 pgv = virt_to_page(p);
330 mmu_unmap_dma_area(dev, ba, n);
331
332 __free_pages(pgv, get_order(n));
333 }
334
335 /*
336 * Map a chunk of memory so that devices can see it.
337 * CPU view of this memory may be inconsistent with
338 * a device view and explicit flushing is necessary.
339 */
sbus_map_single(struct device * dev,void * va,size_t len,int direction)340 dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction)
341 {
342 /* XXX why are some lengths signed, others unsigned? */
343 if (len <= 0) {
344 return 0;
345 }
346 /* XXX So what is maxphys for us and how do drivers know it? */
347 if (len > 256*1024) { /* __get_free_pages() limit */
348 return 0;
349 }
350 return mmu_get_scsi_one(dev, va, len);
351 }
352
sbus_unmap_single(struct device * dev,dma_addr_t ba,size_t n,int direction)353 void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction)
354 {
355 mmu_release_scsi_one(dev, ba, n);
356 }
357
sbus_map_sg(struct device * dev,struct scatterlist * sg,int n,int direction)358 int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
359 {
360 mmu_get_scsi_sgl(dev, sg, n);
361
362 /*
363 * XXX sparc64 can return a partial length here. sun4c should do this
364 * but it currently panics if it can't fulfill the request - Anton
365 */
366 return n;
367 }
368
sbus_unmap_sg(struct device * dev,struct scatterlist * sg,int n,int direction)369 void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
370 {
371 mmu_release_scsi_sgl(dev, sg, n);
372 }
373
sbus_dma_sync_single_for_cpu(struct device * dev,dma_addr_t ba,size_t size,int direction)374 void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction)
375 {
376 }
377
sbus_dma_sync_single_for_device(struct device * dev,dma_addr_t ba,size_t size,int direction)378 void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction)
379 {
380 }
381
sparc_register_ioport(void)382 static int __init sparc_register_ioport(void)
383 {
384 register_proc_sparc_ioport();
385
386 return 0;
387 }
388
389 arch_initcall(sparc_register_ioport);
390
391 #endif /* CONFIG_SBUS */
392
393 #ifdef CONFIG_PCI
394
395 /* Allocate and map kernel buffer using consistent mode DMA for a device.
396 * hwdev should be valid struct pci_dev pointer for PCI devices.
397 */
pci_alloc_consistent(struct pci_dev * pdev,size_t len,dma_addr_t * pba)398 void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
399 {
400 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
401 unsigned long va;
402 struct resource *res;
403 int order;
404
405 if (len == 0) {
406 return NULL;
407 }
408 if (len > 256*1024) { /* __get_free_pages() limit */
409 return NULL;
410 }
411
412 order = get_order(len_total);
413 va = __get_free_pages(GFP_KERNEL, order);
414 if (va == 0) {
415 printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
416 return NULL;
417 }
418
419 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
420 free_pages(va, order);
421 printk("pci_alloc_consistent: no core\n");
422 return NULL;
423 }
424
425 if (allocate_resource(&_sparc_dvma, res, len_total,
426 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
427 printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
428 free_pages(va, order);
429 kfree(res);
430 return NULL;
431 }
432 mmu_inval_dma_area(va, len_total);
433 #if 0
434 /* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
435 (long)va, (long)res->start, (long)virt_to_phys(va), len_total);
436 #endif
437 sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
438
439 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
440 return (void *) res->start;
441 }
442 EXPORT_SYMBOL(pci_alloc_consistent);
443
444 /* Free and unmap a consistent DMA buffer.
445 * cpu_addr is what was returned from pci_alloc_consistent,
446 * size must be the same as what as passed into pci_alloc_consistent,
447 * and likewise dma_addr must be the same as what *dma_addrp was set to.
448 *
449 * References to the memory and mappings associated with cpu_addr/dma_addr
450 * past this call are illegal.
451 */
pci_free_consistent(struct pci_dev * pdev,size_t n,void * p,dma_addr_t ba)452 void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
453 {
454 struct resource *res;
455 unsigned long pgp;
456
457 if ((res = _sparc_find_resource(&_sparc_dvma,
458 (unsigned long)p)) == NULL) {
459 printk("pci_free_consistent: cannot free %p\n", p);
460 return;
461 }
462
463 if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
464 printk("pci_free_consistent: unaligned va %p\n", p);
465 return;
466 }
467
468 n = (n + PAGE_SIZE-1) & PAGE_MASK;
469 if ((res->end-res->start)+1 != n) {
470 printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
471 (long)((res->end-res->start)+1), (long)n);
472 return;
473 }
474
475 pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */
476 mmu_inval_dma_area(pgp, n);
477 sparc_unmapiorange((unsigned long)p, n);
478
479 release_resource(res);
480 kfree(res);
481
482 free_pages(pgp, get_order(n));
483 }
484 EXPORT_SYMBOL(pci_free_consistent);
485
486 /* Map a single buffer of the indicated size for DMA in streaming mode.
487 * The 32-bit bus address to use is returned.
488 *
489 * Once the device is given the dma address, the device owns this memory
490 * until either pci_unmap_single or pci_dma_sync_single_* is performed.
491 */
pci_map_single(struct pci_dev * hwdev,void * ptr,size_t size,int direction)492 dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
493 int direction)
494 {
495 BUG_ON(direction == PCI_DMA_NONE);
496 /* IIep is write-through, not flushing. */
497 return virt_to_phys(ptr);
498 }
499 EXPORT_SYMBOL(pci_map_single);
500
501 /* Unmap a single streaming mode DMA translation. The dma_addr and size
502 * must match what was provided for in a previous pci_map_single call. All
503 * other usages are undefined.
504 *
505 * After this call, reads by the cpu to the buffer are guaranteed to see
506 * whatever the device wrote there.
507 */
pci_unmap_single(struct pci_dev * hwdev,dma_addr_t ba,size_t size,int direction)508 void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
509 int direction)
510 {
511 BUG_ON(direction == PCI_DMA_NONE);
512 if (direction != PCI_DMA_TODEVICE) {
513 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
514 (size + PAGE_SIZE-1) & PAGE_MASK);
515 }
516 }
517 EXPORT_SYMBOL(pci_unmap_single);
518
519 /*
520 * Same as pci_map_single, but with pages.
521 */
pci_map_page(struct pci_dev * hwdev,struct page * page,unsigned long offset,size_t size,int direction)522 dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
523 unsigned long offset, size_t size, int direction)
524 {
525 BUG_ON(direction == PCI_DMA_NONE);
526 /* IIep is write-through, not flushing. */
527 return page_to_phys(page) + offset;
528 }
529 EXPORT_SYMBOL(pci_map_page);
530
pci_unmap_page(struct pci_dev * hwdev,dma_addr_t dma_address,size_t size,int direction)531 void pci_unmap_page(struct pci_dev *hwdev,
532 dma_addr_t dma_address, size_t size, int direction)
533 {
534 BUG_ON(direction == PCI_DMA_NONE);
535 /* mmu_inval_dma_area XXX */
536 }
537 EXPORT_SYMBOL(pci_unmap_page);
538
539 /* Map a set of buffers described by scatterlist in streaming
540 * mode for DMA. This is the scather-gather version of the
541 * above pci_map_single interface. Here the scatter gather list
542 * elements are each tagged with the appropriate dma address
543 * and length. They are obtained via sg_dma_{address,length}(SG).
544 *
545 * NOTE: An implementation may be able to use a smaller number of
546 * DMA address/length pairs than there are SG table elements.
547 * (for example via virtual mapping capabilities)
548 * The routine returns the number of addr/length pairs actually
549 * used, at most nents.
550 *
551 * Device ownership issues as mentioned above for pci_map_single are
552 * the same here.
553 */
pci_map_sg(struct pci_dev * hwdev,struct scatterlist * sgl,int nents,int direction)554 int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
555 int direction)
556 {
557 struct scatterlist *sg;
558 int n;
559
560 BUG_ON(direction == PCI_DMA_NONE);
561 /* IIep is write-through, not flushing. */
562 for_each_sg(sgl, sg, nents, n) {
563 BUG_ON(page_address(sg_page(sg)) == NULL);
564 sg->dma_address = virt_to_phys(sg_virt(sg));
565 sg->dma_length = sg->length;
566 }
567 return nents;
568 }
569 EXPORT_SYMBOL(pci_map_sg);
570
571 /* Unmap a set of streaming mode DMA translations.
572 * Again, cpu read rules concerning calls here are the same as for
573 * pci_unmap_single() above.
574 */
pci_unmap_sg(struct pci_dev * hwdev,struct scatterlist * sgl,int nents,int direction)575 void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
576 int direction)
577 {
578 struct scatterlist *sg;
579 int n;
580
581 BUG_ON(direction == PCI_DMA_NONE);
582 if (direction != PCI_DMA_TODEVICE) {
583 for_each_sg(sgl, sg, nents, n) {
584 BUG_ON(page_address(sg_page(sg)) == NULL);
585 mmu_inval_dma_area(
586 (unsigned long) page_address(sg_page(sg)),
587 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
588 }
589 }
590 }
591 EXPORT_SYMBOL(pci_unmap_sg);
592
593 /* Make physical memory consistent for a single
594 * streaming mode DMA translation before or after a transfer.
595 *
596 * If you perform a pci_map_single() but wish to interrogate the
597 * buffer using the cpu, yet do not wish to teardown the PCI dma
598 * mapping, you must call this function before doing so. At the
599 * next point you give the PCI dma address back to the card, you
600 * must first perform a pci_dma_sync_for_device, and then the
601 * device again owns the buffer.
602 */
pci_dma_sync_single_for_cpu(struct pci_dev * hwdev,dma_addr_t ba,size_t size,int direction)603 void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
604 {
605 BUG_ON(direction == PCI_DMA_NONE);
606 if (direction != PCI_DMA_TODEVICE) {
607 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
608 (size + PAGE_SIZE-1) & PAGE_MASK);
609 }
610 }
611 EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
612
pci_dma_sync_single_for_device(struct pci_dev * hwdev,dma_addr_t ba,size_t size,int direction)613 void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
614 {
615 BUG_ON(direction == PCI_DMA_NONE);
616 if (direction != PCI_DMA_TODEVICE) {
617 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
618 (size + PAGE_SIZE-1) & PAGE_MASK);
619 }
620 }
621 EXPORT_SYMBOL(pci_dma_sync_single_for_device);
622
623 /* Make physical memory consistent for a set of streaming
624 * mode DMA translations after a transfer.
625 *
626 * The same as pci_dma_sync_single_* but for a scatter-gather list,
627 * same rules and usage.
628 */
pci_dma_sync_sg_for_cpu(struct pci_dev * hwdev,struct scatterlist * sgl,int nents,int direction)629 void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
630 {
631 struct scatterlist *sg;
632 int n;
633
634 BUG_ON(direction == PCI_DMA_NONE);
635 if (direction != PCI_DMA_TODEVICE) {
636 for_each_sg(sgl, sg, nents, n) {
637 BUG_ON(page_address(sg_page(sg)) == NULL);
638 mmu_inval_dma_area(
639 (unsigned long) page_address(sg_page(sg)),
640 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
641 }
642 }
643 }
644 EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
645
pci_dma_sync_sg_for_device(struct pci_dev * hwdev,struct scatterlist * sgl,int nents,int direction)646 void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
647 {
648 struct scatterlist *sg;
649 int n;
650
651 BUG_ON(direction == PCI_DMA_NONE);
652 if (direction != PCI_DMA_TODEVICE) {
653 for_each_sg(sgl, sg, nents, n) {
654 BUG_ON(page_address(sg_page(sg)) == NULL);
655 mmu_inval_dma_area(
656 (unsigned long) page_address(sg_page(sg)),
657 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
658 }
659 }
660 }
661 EXPORT_SYMBOL(pci_dma_sync_sg_for_device);
662 #endif /* CONFIG_PCI */
663
664 #ifdef CONFIG_PROC_FS
665
666 static int
_sparc_io_get_info(char * buf,char ** start,off_t fpos,int length,int * eof,void * data)667 _sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof,
668 void *data)
669 {
670 char *p = buf, *e = buf + length;
671 struct resource *r;
672 const char *nm;
673
674 for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
675 if (p + 32 >= e) /* Better than nothing */
676 break;
677 if ((nm = r->name) == 0) nm = "???";
678 p += sprintf(p, "%016llx-%016llx: %s\n",
679 (unsigned long long)r->start,
680 (unsigned long long)r->end, nm);
681 }
682
683 return p-buf;
684 }
685
686 #endif /* CONFIG_PROC_FS */
687
688 /*
689 * This is a version of find_resource and it belongs to kernel/resource.c.
690 * Until we have agreement with Linus and Martin, it lingers here.
691 *
692 * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
693 * This probably warrants some sort of hashing.
694 */
_sparc_find_resource(struct resource * root,unsigned long hit)695 static struct resource *_sparc_find_resource(struct resource *root,
696 unsigned long hit)
697 {
698 struct resource *tmp;
699
700 for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
701 if (tmp->start <= hit && tmp->end >= hit)
702 return tmp;
703 }
704 return NULL;
705 }
706
register_proc_sparc_ioport(void)707 static void register_proc_sparc_ioport(void)
708 {
709 #ifdef CONFIG_PROC_FS
710 create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap);
711 create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma);
712 #endif
713 }
714