• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * io-unit.c:  IO-UNIT specific routines for memory management.
4   *
5   * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
6   */
7  
8  #include <linux/kernel.h>
9  #include <linux/init.h>
10  #include <linux/slab.h>
11  #include <linux/spinlock.h>
12  #include <linux/mm.h>
13  #include <linux/bitops.h>
14  #include <linux/dma-map-ops.h>
15  #include <linux/of.h>
16  #include <linux/of_device.h>
17  
18  #include <asm/io.h>
19  #include <asm/io-unit.h>
20  #include <asm/mxcc.h>
21  #include <asm/cacheflush.h>
22  #include <asm/tlbflush.h>
23  #include <asm/dma.h>
24  #include <asm/oplib.h>
25  
26  #include "mm_32.h"
27  
28  /* #define IOUNIT_DEBUG */
29  #ifdef IOUNIT_DEBUG
30  #define IOD(x) printk(x)
31  #else
32  #define IOD(x) do { } while (0)
33  #endif
34  
35  #define IOPERM        (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
36  #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
37  
38  static const struct dma_map_ops iounit_dma_ops;
39  
iounit_iommu_init(struct platform_device * op)40  static void __init iounit_iommu_init(struct platform_device *op)
41  {
42  	struct iounit_struct *iounit;
43  	iopte_t __iomem *xpt;
44  	iopte_t __iomem *xptend;
45  
46  	iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
47  	if (!iounit) {
48  		prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
49  		prom_halt();
50  	}
51  
52  	iounit->limit[0] = IOUNIT_BMAP1_START;
53  	iounit->limit[1] = IOUNIT_BMAP2_START;
54  	iounit->limit[2] = IOUNIT_BMAPM_START;
55  	iounit->limit[3] = IOUNIT_BMAPM_END;
56  	iounit->rotor[1] = IOUNIT_BMAP2_START;
57  	iounit->rotor[2] = IOUNIT_BMAPM_START;
58  
59  	xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
60  	if (!xpt) {
61  		prom_printf("SUN4D: Cannot map External Page Table.");
62  		prom_halt();
63  	}
64  
65  	op->dev.archdata.iommu = iounit;
66  	iounit->page_table = xpt;
67  	spin_lock_init(&iounit->lock);
68  
69  	xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
70  	for (; xpt < xptend; xpt++)
71  		sbus_writel(0, xpt);
72  
73  	op->dev.dma_ops = &iounit_dma_ops;
74  }
75  
iounit_init(void)76  static int __init iounit_init(void)
77  {
78  	extern void sun4d_init_sbi_irq(void);
79  	struct device_node *dp;
80  
81  	for_each_node_by_name(dp, "sbi") {
82  		struct platform_device *op = of_find_device_by_node(dp);
83  
84  		iounit_iommu_init(op);
85  		of_propagate_archdata(op);
86  	}
87  
88  	sun4d_init_sbi_irq();
89  
90  	return 0;
91  }
92  
93  subsys_initcall(iounit_init);
94  
95  /* One has to hold iounit->lock to call this */
iounit_get_area(struct iounit_struct * iounit,unsigned long vaddr,int size)96  static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
97  {
98  	int i, j, k, npages;
99  	unsigned long rotor, scan, limit;
100  	iopte_t iopte;
101  
102          npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
103  
104  	/* A tiny bit of magic ingredience :) */
105  	switch (npages) {
106  	case 1: i = 0x0231; break;
107  	case 2: i = 0x0132; break;
108  	default: i = 0x0213; break;
109  	}
110  
111  	IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
112  
113  next:	j = (i & 15);
114  	rotor = iounit->rotor[j - 1];
115  	limit = iounit->limit[j];
116  	scan = rotor;
117  nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
118  	if (scan + npages > limit) {
119  		if (limit != rotor) {
120  			limit = rotor;
121  			scan = iounit->limit[j - 1];
122  			goto nexti;
123  		}
124  		i >>= 4;
125  		if (!(i & 15))
126  			panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
127  		goto next;
128  	}
129  	for (k = 1, scan++; k < npages; k++)
130  		if (test_bit(scan++, iounit->bmap))
131  			goto nexti;
132  	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
133  	scan -= npages;
134  	iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
135  	vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
136  	for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
137  		set_bit(scan, iounit->bmap);
138  		sbus_writel(iopte_val(iopte), &iounit->page_table[scan]);
139  	}
140  	IOD(("%08lx\n", vaddr));
141  	return vaddr;
142  }
143  
iounit_map_page(struct device * dev,struct page * page,unsigned long offset,size_t len,enum dma_data_direction dir,unsigned long attrs)144  static dma_addr_t iounit_map_page(struct device *dev, struct page *page,
145  		unsigned long offset, size_t len, enum dma_data_direction dir,
146  		unsigned long attrs)
147  {
148  	void *vaddr = page_address(page) + offset;
149  	struct iounit_struct *iounit = dev->archdata.iommu;
150  	unsigned long ret, flags;
151  
152  	/* XXX So what is maxphys for us and how do drivers know it? */
153  	if (!len || len > 256 * 1024)
154  		return DMA_MAPPING_ERROR;
155  
156  	spin_lock_irqsave(&iounit->lock, flags);
157  	ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
158  	spin_unlock_irqrestore(&iounit->lock, flags);
159  	return ret;
160  }
161  
iounit_map_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)162  static int iounit_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
163  		enum dma_data_direction dir, unsigned long attrs)
164  {
165  	struct iounit_struct *iounit = dev->archdata.iommu;
166  	struct scatterlist *sg;
167  	unsigned long flags;
168  	int i;
169  
170  	/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
171  	spin_lock_irqsave(&iounit->lock, flags);
172  	for_each_sg(sgl, sg, nents, i) {
173  		sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
174  		sg->dma_length = sg->length;
175  	}
176  	spin_unlock_irqrestore(&iounit->lock, flags);
177  	return nents;
178  }
179  
iounit_unmap_page(struct device * dev,dma_addr_t vaddr,size_t len,enum dma_data_direction dir,unsigned long attrs)180  static void iounit_unmap_page(struct device *dev, dma_addr_t vaddr, size_t len,
181  		enum dma_data_direction dir, unsigned long attrs)
182  {
183  	struct iounit_struct *iounit = dev->archdata.iommu;
184  	unsigned long flags;
185  
186  	spin_lock_irqsave(&iounit->lock, flags);
187  	len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
188  	vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
189  	IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
190  	for (len += vaddr; vaddr < len; vaddr++)
191  		clear_bit(vaddr, iounit->bmap);
192  	spin_unlock_irqrestore(&iounit->lock, flags);
193  }
194  
iounit_unmap_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)195  static void iounit_unmap_sg(struct device *dev, struct scatterlist *sgl,
196  		int nents, enum dma_data_direction dir, unsigned long attrs)
197  {
198  	struct iounit_struct *iounit = dev->archdata.iommu;
199  	unsigned long flags, vaddr, len;
200  	struct scatterlist *sg;
201  	int i;
202  
203  	spin_lock_irqsave(&iounit->lock, flags);
204  	for_each_sg(sgl, sg, nents, i) {
205  		len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
206  		vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
207  		IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
208  		for (len += vaddr; vaddr < len; vaddr++)
209  			clear_bit(vaddr, iounit->bmap);
210  	}
211  	spin_unlock_irqrestore(&iounit->lock, flags);
212  }
213  
214  #ifdef CONFIG_SBUS
iounit_alloc(struct device * dev,size_t len,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)215  static void *iounit_alloc(struct device *dev, size_t len,
216  		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
217  {
218  	struct iounit_struct *iounit = dev->archdata.iommu;
219  	unsigned long va, addr, page, end, ret;
220  	pgprot_t dvma_prot;
221  	iopte_t __iomem *iopte;
222  
223  	/* XXX So what is maxphys for us and how do drivers know it? */
224  	if (!len || len > 256 * 1024)
225  		return NULL;
226  
227  	len = PAGE_ALIGN(len);
228  	va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
229  	if (!va)
230  		return NULL;
231  
232  	addr = ret = sparc_dma_alloc_resource(dev, len);
233  	if (!addr)
234  		goto out_free_pages;
235  	*dma_handle = addr;
236  
237  	dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
238  	end = PAGE_ALIGN((addr + len));
239  	while(addr < end) {
240  		page = va;
241  		{
242  			pmd_t *pmdp;
243  			pte_t *ptep;
244  			long i;
245  
246  			pmdp = pmd_off_k(addr);
247  			ptep = pte_offset_map(pmdp, addr);
248  
249  			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
250  
251  			i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
252  
253  			iopte = iounit->page_table + i;
254  			sbus_writel(iopte_val(MKIOPTE(__pa(page))), iopte);
255  		}
256  		addr += PAGE_SIZE;
257  		va += PAGE_SIZE;
258  	}
259  	flush_cache_all();
260  	flush_tlb_all();
261  
262  	return (void *)ret;
263  
264  out_free_pages:
265  	free_pages(va, get_order(len));
266  	return NULL;
267  }
268  
iounit_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_addr,unsigned long attrs)269  static void iounit_free(struct device *dev, size_t size, void *cpu_addr,
270  		dma_addr_t dma_addr, unsigned long attrs)
271  {
272  	/* XXX Somebody please fill this in */
273  }
274  #endif
275  
276  static const struct dma_map_ops iounit_dma_ops = {
277  #ifdef CONFIG_SBUS
278  	.alloc			= iounit_alloc,
279  	.free			= iounit_free,
280  #endif
281  	.map_page		= iounit_map_page,
282  	.unmap_page		= iounit_unmap_page,
283  	.map_sg			= iounit_map_sg,
284  	.unmap_sg		= iounit_unmap_sg,
285  };
286