• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *	linux/arch/alpha/kernel/pci_iommu.c
3  */
4 
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/pci.h>
8 #include <linux/slab.h>
9 #include <linux/bootmem.h>
10 #include <linux/scatterlist.h>
11 #include <linux/log2.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/iommu-helper.h>
14 
15 #include <asm/io.h>
16 #include <asm/hwrpb.h>
17 
18 #include "proto.h"
19 #include "pci_impl.h"
20 
21 
22 #define DEBUG_ALLOC 0
23 #if DEBUG_ALLOC > 0
24 # define DBGA(args...)		printk(KERN_DEBUG args)
25 #else
26 # define DBGA(args...)
27 #endif
28 #if DEBUG_ALLOC > 1
29 # define DBGA2(args...)		printk(KERN_DEBUG args)
30 #else
31 # define DBGA2(args...)
32 #endif
33 
34 #define DEBUG_NODIRECT 0
35 
36 #define ISA_DMA_MASK		0x00ffffff
37 
38 static inline unsigned long
mk_iommu_pte(unsigned long paddr)39 mk_iommu_pte(unsigned long paddr)
40 {
41 	return (paddr >> (PAGE_SHIFT-1)) | 1;
42 }
43 
44 /* Return the minimum of MAX or the first power of two larger
45    than main memory.  */
46 
47 unsigned long
size_for_memory(unsigned long max)48 size_for_memory(unsigned long max)
49 {
50 	unsigned long mem = max_low_pfn << PAGE_SHIFT;
51 	if (mem < max)
52 		max = roundup_pow_of_two(mem);
53 	return max;
54 }
55 
56 struct pci_iommu_arena * __init
iommu_arena_new_node(int nid,struct pci_controller * hose,dma_addr_t base,unsigned long window_size,unsigned long align)57 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
58 		     unsigned long window_size, unsigned long align)
59 {
60 	unsigned long mem_size;
61 	struct pci_iommu_arena *arena;
62 
63 	mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
64 
65 	/* Note that the TLB lookup logic uses bitwise concatenation,
66 	   not addition, so the required arena alignment is based on
67 	   the size of the window.  Retain the align parameter so that
68 	   particular systems can over-align the arena.  */
69 	if (align < mem_size)
70 		align = mem_size;
71 
72 
73 #ifdef CONFIG_DISCONTIGMEM
74 
75 	arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena));
76 	if (!NODE_DATA(nid) || !arena) {
77 		printk("%s: couldn't allocate arena from node %d\n"
78 		       "    falling back to system-wide allocation\n",
79 		       __func__, nid);
80 		arena = alloc_bootmem(sizeof(*arena));
81 	}
82 
83 	arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0);
84 	if (!NODE_DATA(nid) || !arena->ptes) {
85 		printk("%s: couldn't allocate arena ptes from node %d\n"
86 		       "    falling back to system-wide allocation\n",
87 		       __func__, nid);
88 		arena->ptes = __alloc_bootmem(mem_size, align, 0);
89 	}
90 
91 #else /* CONFIG_DISCONTIGMEM */
92 
93 	arena = alloc_bootmem(sizeof(*arena));
94 	arena->ptes = __alloc_bootmem(mem_size, align, 0);
95 
96 #endif /* CONFIG_DISCONTIGMEM */
97 
98 	spin_lock_init(&arena->lock);
99 	arena->hose = hose;
100 	arena->dma_base = base;
101 	arena->size = window_size;
102 	arena->next_entry = 0;
103 
104 	/* Align allocations to a multiple of a page size.  Not needed
105 	   unless there are chip bugs.  */
106 	arena->align_entry = 1;
107 
108 	return arena;
109 }
110 
111 struct pci_iommu_arena * __init
iommu_arena_new(struct pci_controller * hose,dma_addr_t base,unsigned long window_size,unsigned long align)112 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
113 		unsigned long window_size, unsigned long align)
114 {
115 	return iommu_arena_new_node(0, hose, base, window_size, align);
116 }
117 
118 /* Must be called with the arena lock held */
119 static long
iommu_arena_find_pages(struct device * dev,struct pci_iommu_arena * arena,long n,long mask)120 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
121 		       long n, long mask)
122 {
123 	unsigned long *ptes;
124 	long i, p, nent;
125 	int pass = 0;
126 	unsigned long base;
127 	unsigned long boundary_size;
128 
129 	base = arena->dma_base >> PAGE_SHIFT;
130 	if (dev) {
131 		boundary_size = dma_get_seg_boundary(dev) + 1;
132 		boundary_size >>= PAGE_SHIFT;
133 	} else {
134 		boundary_size = 1UL << (32 - PAGE_SHIFT);
135 	}
136 
137 	/* Search forward for the first mask-aligned sequence of N free ptes */
138 	ptes = arena->ptes;
139 	nent = arena->size >> PAGE_SHIFT;
140 	p = ALIGN(arena->next_entry, mask + 1);
141 	i = 0;
142 
143 again:
144 	while (i < n && p+i < nent) {
145 		if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
146 			p = ALIGN(p + 1, mask + 1);
147 			goto again;
148 		}
149 
150 		if (ptes[p+i])
151 			p = ALIGN(p + i + 1, mask + 1), i = 0;
152 		else
153 			i = i + 1;
154 	}
155 
156 	if (i < n) {
157 		if (pass < 1) {
158 			/*
159 			 * Reached the end.  Flush the TLB and restart
160 			 * the search from the beginning.
161 			*/
162 			alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
163 
164 			pass++;
165 			p = 0;
166 			i = 0;
167 			goto again;
168 		} else
169 			return -1;
170 	}
171 
172 	/* Success. It's the responsibility of the caller to mark them
173 	   in use before releasing the lock */
174 	return p;
175 }
176 
177 static long
iommu_arena_alloc(struct device * dev,struct pci_iommu_arena * arena,long n,unsigned int align)178 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
179 		  unsigned int align)
180 {
181 	unsigned long flags;
182 	unsigned long *ptes;
183 	long i, p, mask;
184 
185 	spin_lock_irqsave(&arena->lock, flags);
186 
187 	/* Search for N empty ptes */
188 	ptes = arena->ptes;
189 	mask = max(align, arena->align_entry) - 1;
190 	p = iommu_arena_find_pages(dev, arena, n, mask);
191 	if (p < 0) {
192 		spin_unlock_irqrestore(&arena->lock, flags);
193 		return -1;
194 	}
195 
196 	/* Success.  Mark them all in use, ie not zero and invalid
197 	   for the iommu tlb that could load them from under us.
198 	   The chip specific bits will fill this in with something
199 	   kosher when we return.  */
200 	for (i = 0; i < n; ++i)
201 		ptes[p+i] = IOMMU_INVALID_PTE;
202 
203 	arena->next_entry = p + n;
204 	spin_unlock_irqrestore(&arena->lock, flags);
205 
206 	return p;
207 }
208 
209 static void
iommu_arena_free(struct pci_iommu_arena * arena,long ofs,long n)210 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
211 {
212 	unsigned long *p;
213 	long i;
214 
215 	p = arena->ptes + ofs;
216 	for (i = 0; i < n; ++i)
217 		p[i] = 0;
218 }
219 
220 /* True if the machine supports DAC addressing, and DEV can
221    make use of it given MASK.  */
222 static int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask);
223 
224 /* Map a single buffer of the indicated size for PCI DMA in streaming
225    mode.  The 32-bit PCI bus mastering address to use is returned.
226    Once the device is given the dma address, the device owns this memory
227    until either pci_unmap_single or pci_dma_sync_single is performed.  */
228 
229 static dma_addr_t
pci_map_single_1(struct pci_dev * pdev,void * cpu_addr,size_t size,int dac_allowed)230 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
231 		 int dac_allowed)
232 {
233 	struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
234 	dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
235 	struct pci_iommu_arena *arena;
236 	long npages, dma_ofs, i;
237 	unsigned long paddr;
238 	dma_addr_t ret;
239 	unsigned int align = 0;
240 	struct device *dev = pdev ? &pdev->dev : NULL;
241 
242 	paddr = __pa(cpu_addr);
243 
244 #if !DEBUG_NODIRECT
245 	/* First check to see if we can use the direct map window.  */
246 	if (paddr + size + __direct_map_base - 1 <= max_dma
247 	    && paddr + size <= __direct_map_size) {
248 		ret = paddr + __direct_map_base;
249 
250 		DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",
251 		      cpu_addr, size, ret, __builtin_return_address(0));
252 
253 		return ret;
254 	}
255 #endif
256 
257 	/* Next, use DAC if selected earlier.  */
258 	if (dac_allowed) {
259 		ret = paddr + alpha_mv.pci_dac_offset;
260 
261 		DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",
262 		      cpu_addr, size, ret, __builtin_return_address(0));
263 
264 		return ret;
265 	}
266 
267 	/* If the machine doesn't define a pci_tbi routine, we have to
268 	   assume it doesn't support sg mapping, and, since we tried to
269 	   use direct_map above, it now must be considered an error. */
270 	if (! alpha_mv.mv_pci_tbi) {
271 		static int been_here = 0; /* Only print the message once. */
272 		if (!been_here) {
273 		    printk(KERN_WARNING "pci_map_single: no HW sg\n");
274 		    been_here = 1;
275 		}
276 		return 0;
277 	}
278 
279 	arena = hose->sg_pci;
280 	if (!arena || arena->dma_base + arena->size - 1 > max_dma)
281 		arena = hose->sg_isa;
282 
283 	npages = iommu_num_pages(paddr, size, PAGE_SIZE);
284 
285 	/* Force allocation to 64KB boundary for ISA bridges. */
286 	if (pdev && pdev == isa_bridge)
287 		align = 8;
288 	dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
289 	if (dma_ofs < 0) {
290 		printk(KERN_WARNING "pci_map_single failed: "
291 		       "could not allocate dma page tables\n");
292 		return 0;
293 	}
294 
295 	paddr &= PAGE_MASK;
296 	for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
297 		arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
298 
299 	ret = arena->dma_base + dma_ofs * PAGE_SIZE;
300 	ret += (unsigned long)cpu_addr & ~PAGE_MASK;
301 
302 	DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",
303 	      cpu_addr, size, npages, ret, __builtin_return_address(0));
304 
305 	return ret;
306 }
307 
308 dma_addr_t
pci_map_single(struct pci_dev * pdev,void * cpu_addr,size_t size,int dir)309 pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
310 {
311 	int dac_allowed;
312 
313 	if (dir == PCI_DMA_NONE)
314 		BUG();
315 
316 	dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
317 	return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
318 }
319 EXPORT_SYMBOL(pci_map_single);
320 
321 dma_addr_t
pci_map_page(struct pci_dev * pdev,struct page * page,unsigned long offset,size_t size,int dir)322 pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
323 	     size_t size, int dir)
324 {
325 	int dac_allowed;
326 
327 	if (dir == PCI_DMA_NONE)
328 		BUG();
329 
330 	dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
331 	return pci_map_single_1(pdev, (char *)page_address(page) + offset,
332 				size, dac_allowed);
333 }
334 EXPORT_SYMBOL(pci_map_page);
335 
336 /* Unmap a single streaming mode DMA translation.  The DMA_ADDR and
337    SIZE must match what was provided for in a previous pci_map_single
338    call.  All other usages are undefined.  After this call, reads by
339    the cpu to the buffer are guaranteed to see whatever the device
340    wrote there.  */
341 
342 void
pci_unmap_single(struct pci_dev * pdev,dma_addr_t dma_addr,size_t size,int direction)343 pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
344 		 int direction)
345 {
346 	unsigned long flags;
347 	struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
348 	struct pci_iommu_arena *arena;
349 	long dma_ofs, npages;
350 
351 	if (direction == PCI_DMA_NONE)
352 		BUG();
353 
354 	if (dma_addr >= __direct_map_base
355 	    && dma_addr < __direct_map_base + __direct_map_size) {
356 		/* Nothing to do.  */
357 
358 		DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",
359 		      dma_addr, size, __builtin_return_address(0));
360 
361 		return;
362 	}
363 
364 	if (dma_addr > 0xffffffff) {
365 		DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",
366 		      dma_addr, size, __builtin_return_address(0));
367 		return;
368 	}
369 
370 	arena = hose->sg_pci;
371 	if (!arena || dma_addr < arena->dma_base)
372 		arena = hose->sg_isa;
373 
374 	dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
375 	if (dma_ofs * PAGE_SIZE >= arena->size) {
376 		printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx "
377 		       " base %lx size %x\n", dma_addr, arena->dma_base,
378 		       arena->size);
379 		return;
380 		BUG();
381 	}
382 
383 	npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
384 
385 	spin_lock_irqsave(&arena->lock, flags);
386 
387 	iommu_arena_free(arena, dma_ofs, npages);
388 
389         /* If we're freeing ptes above the `next_entry' pointer (they
390            may have snuck back into the TLB since the last wrap flush),
391            we need to flush the TLB before reallocating the latter.  */
392 	if (dma_ofs >= arena->next_entry)
393 		alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
394 
395 	spin_unlock_irqrestore(&arena->lock, flags);
396 
397 	DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
398 	      dma_addr, size, npages, __builtin_return_address(0));
399 }
400 EXPORT_SYMBOL(pci_unmap_single);
401 
402 void
pci_unmap_page(struct pci_dev * pdev,dma_addr_t dma_addr,size_t size,int direction)403 pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
404 	       size_t size, int direction)
405 {
406 	pci_unmap_single(pdev, dma_addr, size, direction);
407 }
408 EXPORT_SYMBOL(pci_unmap_page);
409 
410 /* Allocate and map kernel buffer using consistent mode DMA for PCI
411    device.  Returns non-NULL cpu-view pointer to the buffer if
412    successful and sets *DMA_ADDRP to the pci side dma address as well,
413    else DMA_ADDRP is undefined.  */
414 
415 void *
__pci_alloc_consistent(struct pci_dev * pdev,size_t size,dma_addr_t * dma_addrp,gfp_t gfp)416 __pci_alloc_consistent(struct pci_dev *pdev, size_t size,
417 		       dma_addr_t *dma_addrp, gfp_t gfp)
418 {
419 	void *cpu_addr;
420 	long order = get_order(size);
421 
422 	gfp &= ~GFP_DMA;
423 
424 try_again:
425 	cpu_addr = (void *)__get_free_pages(gfp, order);
426 	if (! cpu_addr) {
427 		printk(KERN_INFO "pci_alloc_consistent: "
428 		       "get_free_pages failed from %p\n",
429 			__builtin_return_address(0));
430 		/* ??? Really atomic allocation?  Otherwise we could play
431 		   with vmalloc and sg if we can't find contiguous memory.  */
432 		return NULL;
433 	}
434 	memset(cpu_addr, 0, size);
435 
436 	*dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
437 	if (*dma_addrp == 0) {
438 		free_pages((unsigned long)cpu_addr, order);
439 		if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
440 			return NULL;
441 		/* The address doesn't fit required mask and we
442 		   do not have iommu. Try again with GFP_DMA. */
443 		gfp |= GFP_DMA;
444 		goto try_again;
445 	}
446 
447 	DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
448 	      size, cpu_addr, *dma_addrp, __builtin_return_address(0));
449 
450 	return cpu_addr;
451 }
452 EXPORT_SYMBOL(__pci_alloc_consistent);
453 
454 /* Free and unmap a consistent DMA buffer.  CPU_ADDR and DMA_ADDR must
455    be values that were returned from pci_alloc_consistent.  SIZE must
456    be the same as what as passed into pci_alloc_consistent.
457    References to the memory and mappings associated with CPU_ADDR or
458    DMA_ADDR past this call are illegal.  */
459 
460 void
pci_free_consistent(struct pci_dev * pdev,size_t size,void * cpu_addr,dma_addr_t dma_addr)461 pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
462 		    dma_addr_t dma_addr)
463 {
464 	pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
465 	free_pages((unsigned long)cpu_addr, get_order(size));
466 
467 	DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
468 	      dma_addr, size, __builtin_return_address(0));
469 }
470 EXPORT_SYMBOL(pci_free_consistent);
471 
472 /* Classify the elements of the scatterlist.  Write dma_address
473    of each element with:
474 	0   : Followers all physically adjacent.
475 	1   : Followers all virtually adjacent.
476 	-1  : Not leader, physically adjacent to previous.
477 	-2  : Not leader, virtually adjacent to previous.
478    Write dma_length of each leader with the combined lengths of
479    the mergable followers.  */
480 
481 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
482 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
483 
484 static void
sg_classify(struct device * dev,struct scatterlist * sg,struct scatterlist * end,int virt_ok)485 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
486 	    int virt_ok)
487 {
488 	unsigned long next_paddr;
489 	struct scatterlist *leader;
490 	long leader_flag, leader_length;
491 	unsigned int max_seg_size;
492 
493 	leader = sg;
494 	leader_flag = 0;
495 	leader_length = leader->length;
496 	next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
497 
498 	/* we will not marge sg without device. */
499 	max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
500 	for (++sg; sg < end; ++sg) {
501 		unsigned long addr, len;
502 		addr = SG_ENT_PHYS_ADDRESS(sg);
503 		len = sg->length;
504 
505 		if (leader_length + len > max_seg_size)
506 			goto new_segment;
507 
508 		if (next_paddr == addr) {
509 			sg->dma_address = -1;
510 			leader_length += len;
511 		} else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
512 			sg->dma_address = -2;
513 			leader_flag = 1;
514 			leader_length += len;
515 		} else {
516 new_segment:
517 			leader->dma_address = leader_flag;
518 			leader->dma_length = leader_length;
519 			leader = sg;
520 			leader_flag = 0;
521 			leader_length = len;
522 		}
523 
524 		next_paddr = addr + len;
525 	}
526 
527 	leader->dma_address = leader_flag;
528 	leader->dma_length = leader_length;
529 }
530 
531 /* Given a scatterlist leader, choose an allocation method and fill
532    in the blanks.  */
533 
534 static int
sg_fill(struct device * dev,struct scatterlist * leader,struct scatterlist * end,struct scatterlist * out,struct pci_iommu_arena * arena,dma_addr_t max_dma,int dac_allowed)535 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
536 	struct scatterlist *out, struct pci_iommu_arena *arena,
537 	dma_addr_t max_dma, int dac_allowed)
538 {
539 	unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
540 	long size = leader->dma_length;
541 	struct scatterlist *sg;
542 	unsigned long *ptes;
543 	long npages, dma_ofs, i;
544 
545 #if !DEBUG_NODIRECT
546 	/* If everything is physically contiguous, and the addresses
547 	   fall into the direct-map window, use it.  */
548 	if (leader->dma_address == 0
549 	    && paddr + size + __direct_map_base - 1 <= max_dma
550 	    && paddr + size <= __direct_map_size) {
551 		out->dma_address = paddr + __direct_map_base;
552 		out->dma_length = size;
553 
554 		DBGA("    sg_fill: [%p,%lx] -> direct %lx\n",
555 		     __va(paddr), size, out->dma_address);
556 
557 		return 0;
558 	}
559 #endif
560 
561 	/* If physically contiguous and DAC is available, use it.  */
562 	if (leader->dma_address == 0 && dac_allowed) {
563 		out->dma_address = paddr + alpha_mv.pci_dac_offset;
564 		out->dma_length = size;
565 
566 		DBGA("    sg_fill: [%p,%lx] -> DAC %lx\n",
567 		     __va(paddr), size, out->dma_address);
568 
569 		return 0;
570 	}
571 
572 	/* Otherwise, we'll use the iommu to make the pages virtually
573 	   contiguous.  */
574 
575 	paddr &= ~PAGE_MASK;
576 	npages = iommu_num_pages(paddr, size, PAGE_SIZE);
577 	dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
578 	if (dma_ofs < 0) {
579 		/* If we attempted a direct map above but failed, die.  */
580 		if (leader->dma_address == 0)
581 			return -1;
582 
583 		/* Otherwise, break up the remaining virtually contiguous
584 		   hunks into individual direct maps and retry.  */
585 		sg_classify(dev, leader, end, 0);
586 		return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
587 	}
588 
589 	out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
590 	out->dma_length = size;
591 
592 	DBGA("    sg_fill: [%p,%lx] -> sg %lx np %ld\n",
593 	     __va(paddr), size, out->dma_address, npages);
594 
595 	/* All virtually contiguous.  We need to find the length of each
596 	   physically contiguous subsegment to fill in the ptes.  */
597 	ptes = &arena->ptes[dma_ofs];
598 	sg = leader;
599 	do {
600 #if DEBUG_ALLOC > 0
601 		struct scatterlist *last_sg = sg;
602 #endif
603 
604 		size = sg->length;
605 		paddr = SG_ENT_PHYS_ADDRESS(sg);
606 
607 		while (sg+1 < end && (int) sg[1].dma_address == -1) {
608 			size += sg[1].length;
609 			sg++;
610 		}
611 
612 		npages = iommu_num_pages(paddr, size, PAGE_SIZE);
613 
614 		paddr &= PAGE_MASK;
615 		for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
616 			*ptes++ = mk_iommu_pte(paddr);
617 
618 #if DEBUG_ALLOC > 0
619 		DBGA("    (%ld) [%p,%x] np %ld\n",
620 		     last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
621 		     last_sg->length, npages);
622 		while (++last_sg <= sg) {
623 			DBGA("        (%ld) [%p,%x] cont\n",
624 			     last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
625 			     last_sg->length);
626 		}
627 #endif
628 	} while (++sg < end && (int) sg->dma_address < 0);
629 
630 	return 1;
631 }
632 
633 int
pci_map_sg(struct pci_dev * pdev,struct scatterlist * sg,int nents,int direction)634 pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
635 	   int direction)
636 {
637 	struct scatterlist *start, *end, *out;
638 	struct pci_controller *hose;
639 	struct pci_iommu_arena *arena;
640 	dma_addr_t max_dma;
641 	int dac_allowed;
642 	struct device *dev;
643 
644 	if (direction == PCI_DMA_NONE)
645 		BUG();
646 
647 	dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
648 
649 	dev = pdev ? &pdev->dev : NULL;
650 
651 	/* Fast path single entry scatterlists.  */
652 	if (nents == 1) {
653 		sg->dma_length = sg->length;
654 		sg->dma_address
655 		  = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
656 				     sg->length, dac_allowed);
657 		return sg->dma_address != 0;
658 	}
659 
660 	start = sg;
661 	end = sg + nents;
662 
663 	/* First, prepare information about the entries.  */
664 	sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
665 
666 	/* Second, figure out where we're going to map things.  */
667 	if (alpha_mv.mv_pci_tbi) {
668 		hose = pdev ? pdev->sysdata : pci_isa_hose;
669 		max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
670 		arena = hose->sg_pci;
671 		if (!arena || arena->dma_base + arena->size - 1 > max_dma)
672 			arena = hose->sg_isa;
673 	} else {
674 		max_dma = -1;
675 		arena = NULL;
676 		hose = NULL;
677 	}
678 
679 	/* Third, iterate over the scatterlist leaders and allocate
680 	   dma space as needed.  */
681 	for (out = sg; sg < end; ++sg) {
682 		if ((int) sg->dma_address < 0)
683 			continue;
684 		if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
685 			goto error;
686 		out++;
687 	}
688 
689 	/* Mark the end of the list for pci_unmap_sg.  */
690 	if (out < end)
691 		out->dma_length = 0;
692 
693 	if (out - start == 0)
694 		printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
695 	DBGA("pci_map_sg: %ld entries\n", out - start);
696 
697 	return out - start;
698 
699  error:
700 	printk(KERN_WARNING "pci_map_sg failed: "
701 	       "could not allocate dma page tables\n");
702 
703 	/* Some allocation failed while mapping the scatterlist
704 	   entries.  Unmap them now.  */
705 	if (out > start)
706 		pci_unmap_sg(pdev, start, out - start, direction);
707 	return 0;
708 }
709 EXPORT_SYMBOL(pci_map_sg);
710 
711 /* Unmap a set of streaming mode DMA translations.  Again, cpu read
712    rules concerning calls here are the same as for pci_unmap_single()
713    above.  */
714 
715 void
pci_unmap_sg(struct pci_dev * pdev,struct scatterlist * sg,int nents,int direction)716 pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
717 	     int direction)
718 {
719 	unsigned long flags;
720 	struct pci_controller *hose;
721 	struct pci_iommu_arena *arena;
722 	struct scatterlist *end;
723 	dma_addr_t max_dma;
724 	dma_addr_t fbeg, fend;
725 
726 	if (direction == PCI_DMA_NONE)
727 		BUG();
728 
729 	if (! alpha_mv.mv_pci_tbi)
730 		return;
731 
732 	hose = pdev ? pdev->sysdata : pci_isa_hose;
733 	max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
734 	arena = hose->sg_pci;
735 	if (!arena || arena->dma_base + arena->size - 1 > max_dma)
736 		arena = hose->sg_isa;
737 
738 	fbeg = -1, fend = 0;
739 
740 	spin_lock_irqsave(&arena->lock, flags);
741 
742 	for (end = sg + nents; sg < end; ++sg) {
743 		dma64_addr_t addr;
744 		size_t size;
745 		long npages, ofs;
746 		dma_addr_t tend;
747 
748 		addr = sg->dma_address;
749 		size = sg->dma_length;
750 		if (!size)
751 			break;
752 
753 		if (addr > 0xffffffff) {
754 			/* It's a DAC address -- nothing to do.  */
755 			DBGA("    (%ld) DAC [%lx,%lx]\n",
756 			      sg - end + nents, addr, size);
757 			continue;
758 		}
759 
760 		if (addr >= __direct_map_base
761 		    && addr < __direct_map_base + __direct_map_size) {
762 			/* Nothing to do.  */
763 			DBGA("    (%ld) direct [%lx,%lx]\n",
764 			      sg - end + nents, addr, size);
765 			continue;
766 		}
767 
768 		DBGA("    (%ld) sg [%lx,%lx]\n",
769 		     sg - end + nents, addr, size);
770 
771 		npages = iommu_num_pages(addr, size, PAGE_SIZE);
772 		ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
773 		iommu_arena_free(arena, ofs, npages);
774 
775 		tend = addr + size - 1;
776 		if (fbeg > addr) fbeg = addr;
777 		if (fend < tend) fend = tend;
778 	}
779 
780         /* If we're freeing ptes above the `next_entry' pointer (they
781            may have snuck back into the TLB since the last wrap flush),
782            we need to flush the TLB before reallocating the latter.  */
783 	if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
784 		alpha_mv.mv_pci_tbi(hose, fbeg, fend);
785 
786 	spin_unlock_irqrestore(&arena->lock, flags);
787 
788 	DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
789 }
790 EXPORT_SYMBOL(pci_unmap_sg);
791 
792 
793 /* Return whether the given PCI device DMA address mask can be
794    supported properly.  */
795 
796 int
pci_dma_supported(struct pci_dev * pdev,u64 mask)797 pci_dma_supported(struct pci_dev *pdev, u64 mask)
798 {
799 	struct pci_controller *hose;
800 	struct pci_iommu_arena *arena;
801 
802 	/* If there exists a direct map, and the mask fits either
803 	   the entire direct mapped space or the total system memory as
804 	   shifted by the map base */
805 	if (__direct_map_size != 0
806 	    && (__direct_map_base + __direct_map_size - 1 <= mask ||
807 		__direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
808 		return 1;
809 
810 	/* Check that we have a scatter-gather arena that fits.  */
811 	hose = pdev ? pdev->sysdata : pci_isa_hose;
812 	arena = hose->sg_isa;
813 	if (arena && arena->dma_base + arena->size - 1 <= mask)
814 		return 1;
815 	arena = hose->sg_pci;
816 	if (arena && arena->dma_base + arena->size - 1 <= mask)
817 		return 1;
818 
819 	/* As last resort try ZONE_DMA.  */
820 	if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
821 		return 1;
822 
823 	return 0;
824 }
825 EXPORT_SYMBOL(pci_dma_supported);
826 
827 
828 /*
829  * AGP GART extensions to the IOMMU
830  */
831 int
iommu_reserve(struct pci_iommu_arena * arena,long pg_count,long align_mask)832 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
833 {
834 	unsigned long flags;
835 	unsigned long *ptes;
836 	long i, p;
837 
838 	if (!arena) return -EINVAL;
839 
840 	spin_lock_irqsave(&arena->lock, flags);
841 
842 	/* Search for N empty ptes.  */
843 	ptes = arena->ptes;
844 	p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
845 	if (p < 0) {
846 		spin_unlock_irqrestore(&arena->lock, flags);
847 		return -1;
848 	}
849 
850 	/* Success.  Mark them all reserved (ie not zero and invalid)
851 	   for the iommu tlb that could load them from under us.
852 	   They will be filled in with valid bits by _bind() */
853 	for (i = 0; i < pg_count; ++i)
854 		ptes[p+i] = IOMMU_RESERVED_PTE;
855 
856 	arena->next_entry = p + pg_count;
857 	spin_unlock_irqrestore(&arena->lock, flags);
858 
859 	return p;
860 }
861 
862 int
iommu_release(struct pci_iommu_arena * arena,long pg_start,long pg_count)863 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
864 {
865 	unsigned long *ptes;
866 	long i;
867 
868 	if (!arena) return -EINVAL;
869 
870 	ptes = arena->ptes;
871 
872 	/* Make sure they're all reserved first... */
873 	for(i = pg_start; i < pg_start + pg_count; i++)
874 		if (ptes[i] != IOMMU_RESERVED_PTE)
875 			return -EBUSY;
876 
877 	iommu_arena_free(arena, pg_start, pg_count);
878 	return 0;
879 }
880 
881 int
iommu_bind(struct pci_iommu_arena * arena,long pg_start,long pg_count,unsigned long * physaddrs)882 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
883 	   unsigned long *physaddrs)
884 {
885 	unsigned long flags;
886 	unsigned long *ptes;
887 	long i, j;
888 
889 	if (!arena) return -EINVAL;
890 
891 	spin_lock_irqsave(&arena->lock, flags);
892 
893 	ptes = arena->ptes;
894 
895 	for(j = pg_start; j < pg_start + pg_count; j++) {
896 		if (ptes[j] != IOMMU_RESERVED_PTE) {
897 			spin_unlock_irqrestore(&arena->lock, flags);
898 			return -EBUSY;
899 		}
900 	}
901 
902 	for(i = 0, j = pg_start; i < pg_count; i++, j++)
903 		ptes[j] = mk_iommu_pte(physaddrs[i]);
904 
905 	spin_unlock_irqrestore(&arena->lock, flags);
906 
907 	return 0;
908 }
909 
910 int
iommu_unbind(struct pci_iommu_arena * arena,long pg_start,long pg_count)911 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
912 {
913 	unsigned long *p;
914 	long i;
915 
916 	if (!arena) return -EINVAL;
917 
918 	p = arena->ptes + pg_start;
919 	for(i = 0; i < pg_count; i++)
920 		p[i] = IOMMU_RESERVED_PTE;
921 
922 	return 0;
923 }
924 
925 /* True if the machine supports DAC addressing, and DEV can
926    make use of it given MASK.  */
927 
928 static int
pci_dac_dma_supported(struct pci_dev * dev,u64 mask)929 pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
930 {
931 	dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
932 	int ok = 1;
933 
934 	/* If this is not set, the machine doesn't support DAC at all.  */
935 	if (dac_offset == 0)
936 		ok = 0;
937 
938 	/* The device has to be able to address our DAC bit.  */
939 	if ((dac_offset & dev->dma_mask) != dac_offset)
940 		ok = 0;
941 
942 	/* If both conditions above are met, we are fine. */
943 	DBGA("pci_dac_dma_supported %s from %p\n",
944 	     ok ? "yes" : "no", __builtin_return_address(0));
945 
946 	return ok;
947 }
948 
949 /* Helper for generic DMA-mapping functions. */
950 
951 struct pci_dev *
alpha_gendev_to_pci(struct device * dev)952 alpha_gendev_to_pci(struct device *dev)
953 {
954 	if (dev && dev->bus == &pci_bus_type)
955 		return to_pci_dev(dev);
956 
957 	/* Assume that non-PCI devices asking for DMA are either ISA or EISA,
958 	   BUG() otherwise. */
959 	BUG_ON(!isa_bridge);
960 
961 	/* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
962 	   bridge is bus master then). */
963 	if (!dev || !dev->dma_mask || !*dev->dma_mask)
964 		return isa_bridge;
965 
966 	/* For EISA bus masters, return isa_bridge (it might have smaller
967 	   dma_mask due to wiring limitations). */
968 	if (*dev->dma_mask >= isa_bridge->dma_mask)
969 		return isa_bridge;
970 
971 	/* This assumes ISA bus master with dma_mask 0xffffff. */
972 	return NULL;
973 }
974 EXPORT_SYMBOL(alpha_gendev_to_pci);
975 
976 int
dma_set_mask(struct device * dev,u64 mask)977 dma_set_mask(struct device *dev, u64 mask)
978 {
979 	if (!dev->dma_mask ||
980 	    !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
981 		return -EIO;
982 
983 	*dev->dma_mask = mask;
984 
985 	return 0;
986 }
987 EXPORT_SYMBOL(dma_set_mask);
988