• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * iommu.c:  IOMMU specific routines for memory management.
3  *
4  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
5  * Copyright (C) 1995,2002 Pete Zaitcev     (zaitcev@yahoo.com)
6  * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
7  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/mm.h>
13 #include <linux/slab.h>
14 #include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
15 #include <linux/scatterlist.h>
16 #include <linux/of.h>
17 #include <linux/of_device.h>
18 
19 #include <asm/pgalloc.h>
20 #include <asm/pgtable.h>
21 #include <asm/io.h>
22 #include <asm/mxcc.h>
23 #include <asm/mbus.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/bitext.h>
27 #include <asm/iommu.h>
28 #include <asm/dma.h>
29 
30 /*
31  * This can be sized dynamically, but we will do this
32  * only when we have a guidance about actual I/O pressures.
33  */
34 #define IOMMU_RNGE	IOMMU_RNGE_256MB
35 #define IOMMU_START	0xF0000000
36 #define IOMMU_WINSIZE	(256*1024*1024U)
37 #define IOMMU_NPTES	(IOMMU_WINSIZE/PAGE_SIZE)	/* 64K PTEs, 265KB */
38 #define IOMMU_ORDER	6				/* 4096 * (1<<6) */
39 
40 /* srmmu.c */
41 extern int viking_mxcc_present;
42 BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
43 #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
44 extern int flush_page_for_dma_global;
45 static int viking_flush;
46 /* viking.S */
47 extern void viking_flush_page(unsigned long page);
48 extern void viking_mxcc_flush_page(unsigned long page);
49 
50 /*
51  * Values precomputed according to CPU type.
52  */
53 static unsigned int ioperm_noc;		/* Consistent mapping iopte flags */
54 static pgprot_t dvma_prot;		/* Consistent mapping pte flags */
55 
56 #define IOPERM        (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
57 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
58 
sbus_iommu_init(struct of_device * op)59 static void __init sbus_iommu_init(struct of_device *op)
60 {
61 	struct iommu_struct *iommu;
62 	unsigned int impl, vers;
63 	unsigned long *bitmap;
64 	unsigned long tmp;
65 
66 	iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
67 	if (!iommu) {
68 		prom_printf("Unable to allocate iommu structure\n");
69 		prom_halt();
70 	}
71 
72 	iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
73 				 "iommu_regs");
74 	if (!iommu->regs) {
75 		prom_printf("Cannot map IOMMU registers\n");
76 		prom_halt();
77 	}
78 	impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
79 	vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
80 	tmp = iommu->regs->control;
81 	tmp &= ~(IOMMU_CTRL_RNGE);
82 	tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
83 	iommu->regs->control = tmp;
84 	iommu_invalidate(iommu->regs);
85 	iommu->start = IOMMU_START;
86 	iommu->end = 0xffffffff;
87 
88 	/* Allocate IOMMU page table */
89 	/* Stupid alignment constraints give me a headache.
90 	   We need 256K or 512K or 1M or 2M area aligned to
91            its size and current gfp will fortunately give
92            it to us. */
93         tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
94 	if (!tmp) {
95 		prom_printf("Unable to allocate iommu table [0x%08x]\n",
96 			    IOMMU_NPTES*sizeof(iopte_t));
97 		prom_halt();
98 	}
99 	iommu->page_table = (iopte_t *)tmp;
100 
101 	/* Initialize new table. */
102 	memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
103 	flush_cache_all();
104 	flush_tlb_all();
105 	iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
106 	iommu_invalidate(iommu->regs);
107 
108 	bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
109 	if (!bitmap) {
110 		prom_printf("Unable to allocate iommu bitmap [%d]\n",
111 			    (int)(IOMMU_NPTES>>3));
112 		prom_halt();
113 	}
114 	bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
115 	/* To be coherent on HyperSparc, the page color of DVMA
116 	 * and physical addresses must match.
117 	 */
118 	if (srmmu_modtype == HyperSparc)
119 		iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
120 	else
121 		iommu->usemap.num_colors = 1;
122 
123 	printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
124 	       impl, vers, iommu->page_table,
125 	       (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
126 
127 	op->dev.archdata.iommu = iommu;
128 }
129 
iommu_init(void)130 static int __init iommu_init(void)
131 {
132 	struct device_node *dp;
133 
134 	for_each_node_by_name(dp, "iommu") {
135 		struct of_device *op = of_find_device_by_node(dp);
136 
137 		sbus_iommu_init(op);
138 		of_propagate_archdata(op);
139 	}
140 
141 	return 0;
142 }
143 
144 subsys_initcall(iommu_init);
145 
146 /* This begs to be btfixup-ed by srmmu. */
147 /* Flush the iotlb entries to ram. */
148 /* This could be better if we didn't have to flush whole pages. */
iommu_flush_iotlb(iopte_t * iopte,unsigned int niopte)149 static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
150 {
151 	unsigned long start;
152 	unsigned long end;
153 
154 	start = (unsigned long)iopte;
155 	end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
156 	start &= PAGE_MASK;
157 	if (viking_mxcc_present) {
158 		while(start < end) {
159 			viking_mxcc_flush_page(start);
160 			start += PAGE_SIZE;
161 		}
162 	} else if (viking_flush) {
163 		while(start < end) {
164 			viking_flush_page(start);
165 			start += PAGE_SIZE;
166 		}
167 	} else {
168 		while(start < end) {
169 			__flush_page_to_ram(start);
170 			start += PAGE_SIZE;
171 		}
172 	}
173 }
174 
iommu_get_one(struct device * dev,struct page * page,int npages)175 static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
176 {
177 	struct iommu_struct *iommu = dev->archdata.iommu;
178 	int ioptex;
179 	iopte_t *iopte, *iopte0;
180 	unsigned int busa, busa0;
181 	int i;
182 
183 	/* page color = pfn of page */
184 	ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
185 	if (ioptex < 0)
186 		panic("iommu out");
187 	busa0 = iommu->start + (ioptex << PAGE_SHIFT);
188 	iopte0 = &iommu->page_table[ioptex];
189 
190 	busa = busa0;
191 	iopte = iopte0;
192 	for (i = 0; i < npages; i++) {
193 		iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
194 		iommu_invalidate_page(iommu->regs, busa);
195 		busa += PAGE_SIZE;
196 		iopte++;
197 		page++;
198 	}
199 
200 	iommu_flush_iotlb(iopte0, npages);
201 
202 	return busa0;
203 }
204 
iommu_get_scsi_one(struct device * dev,char * vaddr,unsigned int len)205 static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
206 {
207 	unsigned long off;
208 	int npages;
209 	struct page *page;
210 	u32 busa;
211 
212 	off = (unsigned long)vaddr & ~PAGE_MASK;
213 	npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
214 	page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
215 	busa = iommu_get_one(dev, page, npages);
216 	return busa + off;
217 }
218 
iommu_get_scsi_one_noflush(struct device * dev,char * vaddr,unsigned long len)219 static __u32 iommu_get_scsi_one_noflush(struct device *dev, char *vaddr, unsigned long len)
220 {
221 	return iommu_get_scsi_one(dev, vaddr, len);
222 }
223 
iommu_get_scsi_one_gflush(struct device * dev,char * vaddr,unsigned long len)224 static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
225 {
226 	flush_page_for_dma(0);
227 	return iommu_get_scsi_one(dev, vaddr, len);
228 }
229 
iommu_get_scsi_one_pflush(struct device * dev,char * vaddr,unsigned long len)230 static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len)
231 {
232 	unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
233 
234 	while(page < ((unsigned long)(vaddr + len))) {
235 		flush_page_for_dma(page);
236 		page += PAGE_SIZE;
237 	}
238 	return iommu_get_scsi_one(dev, vaddr, len);
239 }
240 
iommu_get_scsi_sgl_noflush(struct device * dev,struct scatterlist * sg,int sz)241 static void iommu_get_scsi_sgl_noflush(struct device *dev, struct scatterlist *sg, int sz)
242 {
243 	int n;
244 
245 	while (sz != 0) {
246 		--sz;
247 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
248 		sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
249 		sg->dma_length = sg->length;
250 		sg = sg_next(sg);
251 	}
252 }
253 
iommu_get_scsi_sgl_gflush(struct device * dev,struct scatterlist * sg,int sz)254 static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
255 {
256 	int n;
257 
258 	flush_page_for_dma(0);
259 	while (sz != 0) {
260 		--sz;
261 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
262 		sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
263 		sg->dma_length = sg->length;
264 		sg = sg_next(sg);
265 	}
266 }
267 
iommu_get_scsi_sgl_pflush(struct device * dev,struct scatterlist * sg,int sz)268 static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz)
269 {
270 	unsigned long page, oldpage = 0;
271 	int n, i;
272 
273 	while(sz != 0) {
274 		--sz;
275 
276 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
277 
278 		/*
279 		 * We expect unmapped highmem pages to be not in the cache.
280 		 * XXX Is this a good assumption?
281 		 * XXX What if someone else unmaps it here and races us?
282 		 */
283 		if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
284 			for (i = 0; i < n; i++) {
285 				if (page != oldpage) {	/* Already flushed? */
286 					flush_page_for_dma(page);
287 					oldpage = page;
288 				}
289 				page += PAGE_SIZE;
290 			}
291 		}
292 
293 		sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
294 		sg->dma_length = sg->length;
295 		sg = sg_next(sg);
296 	}
297 }
298 
iommu_release_one(struct device * dev,u32 busa,int npages)299 static void iommu_release_one(struct device *dev, u32 busa, int npages)
300 {
301 	struct iommu_struct *iommu = dev->archdata.iommu;
302 	int ioptex;
303 	int i;
304 
305 	BUG_ON(busa < iommu->start);
306 	ioptex = (busa - iommu->start) >> PAGE_SHIFT;
307 	for (i = 0; i < npages; i++) {
308 		iopte_val(iommu->page_table[ioptex + i]) = 0;
309 		iommu_invalidate_page(iommu->regs, busa);
310 		busa += PAGE_SIZE;
311 	}
312 	bit_map_clear(&iommu->usemap, ioptex, npages);
313 }
314 
iommu_release_scsi_one(struct device * dev,__u32 vaddr,unsigned long len)315 static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
316 {
317 	unsigned long off;
318 	int npages;
319 
320 	off = vaddr & ~PAGE_MASK;
321 	npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
322 	iommu_release_one(dev, vaddr & PAGE_MASK, npages);
323 }
324 
iommu_release_scsi_sgl(struct device * dev,struct scatterlist * sg,int sz)325 static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
326 {
327 	int n;
328 
329 	while(sz != 0) {
330 		--sz;
331 
332 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
333 		iommu_release_one(dev, sg->dma_address & PAGE_MASK, n);
334 		sg->dma_address = 0x21212121;
335 		sg = sg_next(sg);
336 	}
337 }
338 
339 #ifdef CONFIG_SBUS
iommu_map_dma_area(struct device * dev,dma_addr_t * pba,unsigned long va,unsigned long addr,int len)340 static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
341 			      unsigned long addr, int len)
342 {
343 	struct iommu_struct *iommu = dev->archdata.iommu;
344 	unsigned long page, end;
345 	iopte_t *iopte = iommu->page_table;
346 	iopte_t *first;
347 	int ioptex;
348 
349 	BUG_ON((va & ~PAGE_MASK) != 0);
350 	BUG_ON((addr & ~PAGE_MASK) != 0);
351 	BUG_ON((len & ~PAGE_MASK) != 0);
352 
353 	/* page color = physical address */
354 	ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
355 		addr >> PAGE_SHIFT);
356 	if (ioptex < 0)
357 		panic("iommu out");
358 
359 	iopte += ioptex;
360 	first = iopte;
361 	end = addr + len;
362 	while(addr < end) {
363 		page = va;
364 		{
365 			pgd_t *pgdp;
366 			pmd_t *pmdp;
367 			pte_t *ptep;
368 
369 			if (viking_mxcc_present)
370 				viking_mxcc_flush_page(page);
371 			else if (viking_flush)
372 				viking_flush_page(page);
373 			else
374 				__flush_page_to_ram(page);
375 
376 			pgdp = pgd_offset(&init_mm, addr);
377 			pmdp = pmd_offset(pgdp, addr);
378 			ptep = pte_offset_map(pmdp, addr);
379 
380 			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
381 		}
382 		iopte_val(*iopte++) =
383 		    MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
384 		addr += PAGE_SIZE;
385 		va += PAGE_SIZE;
386 	}
387 	/* P3: why do we need this?
388 	 *
389 	 * DAVEM: Because there are several aspects, none of which
390 	 *        are handled by a single interface.  Some cpus are
391 	 *        completely not I/O DMA coherent, and some have
392 	 *        virtually indexed caches.  The driver DMA flushing
393 	 *        methods handle the former case, but here during
394 	 *        IOMMU page table modifications, and usage of non-cacheable
395 	 *        cpu mappings of pages potentially in the cpu caches, we have
396 	 *        to handle the latter case as well.
397 	 */
398 	flush_cache_all();
399 	iommu_flush_iotlb(first, len >> PAGE_SHIFT);
400 	flush_tlb_all();
401 	iommu_invalidate(iommu->regs);
402 
403 	*pba = iommu->start + (ioptex << PAGE_SHIFT);
404 	return 0;
405 }
406 
iommu_unmap_dma_area(struct device * dev,unsigned long busa,int len)407 static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len)
408 {
409 	struct iommu_struct *iommu = dev->archdata.iommu;
410 	iopte_t *iopte = iommu->page_table;
411 	unsigned long end;
412 	int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
413 
414 	BUG_ON((busa & ~PAGE_MASK) != 0);
415 	BUG_ON((len & ~PAGE_MASK) != 0);
416 
417 	iopte += ioptex;
418 	end = busa + len;
419 	while (busa < end) {
420 		iopte_val(*iopte++) = 0;
421 		busa += PAGE_SIZE;
422 	}
423 	flush_tlb_all();
424 	iommu_invalidate(iommu->regs);
425 	bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
426 }
427 #endif
428 
iommu_lockarea(char * vaddr,unsigned long len)429 static char *iommu_lockarea(char *vaddr, unsigned long len)
430 {
431 	return vaddr;
432 }
433 
iommu_unlockarea(char * vaddr,unsigned long len)434 static void iommu_unlockarea(char *vaddr, unsigned long len)
435 {
436 }
437 
ld_mmu_iommu(void)438 void __init ld_mmu_iommu(void)
439 {
440 	viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
441 	BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
442 	BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
443 
444 	if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
445 		/* IO coherent chip */
446 		BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
447 		BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
448 	} else if (flush_page_for_dma_global) {
449 		/* flush_page_for_dma flushes everything, no matter of what page is it */
450 		BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
451 		BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
452 	} else {
453 		BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
454 		BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
455 	}
456 	BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
457 	BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
458 
459 #ifdef CONFIG_SBUS
460 	BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
461 	BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
462 #endif
463 
464 	if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
465 		dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
466 		ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
467 	} else {
468 		dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
469 		ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
470 	}
471 }
472