• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * IOMMU for IPMMU/IPMMUI
3  * Copyright (C) 2012  Hideki EIRAKU
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; version 2 of the License.
8  */
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/io.h>
12 #include <linux/iommu.h>
13 #include <linux/platform_device.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16 #include <asm/dma-iommu.h>
17 #include "shmobile-ipmmu.h"
18 
19 #define L1_SIZE CONFIG_SHMOBILE_IOMMU_L1SIZE
20 #define L1_LEN (L1_SIZE / 4)
21 #define L1_ALIGN L1_SIZE
22 #define L2_SIZE SZ_1K
23 #define L2_LEN (L2_SIZE / 4)
24 #define L2_ALIGN L2_SIZE
25 
26 struct shmobile_iommu_domain_pgtable {
27 	uint32_t *pgtable;
28 	dma_addr_t handle;
29 };
30 
31 struct shmobile_iommu_archdata {
32 	struct list_head attached_list;
33 	struct dma_iommu_mapping *iommu_mapping;
34 	spinlock_t attach_lock;
35 	struct shmobile_iommu_domain *attached;
36 	int num_attached_devices;
37 	struct shmobile_ipmmu *ipmmu;
38 };
39 
40 struct shmobile_iommu_domain {
41 	struct shmobile_iommu_domain_pgtable l1, l2[L1_LEN];
42 	spinlock_t map_lock;
43 	spinlock_t attached_list_lock;
44 	struct list_head attached_list;
45 };
46 
47 static struct shmobile_iommu_archdata *ipmmu_archdata;
48 static struct kmem_cache *l1cache, *l2cache;
49 
pgtable_alloc(struct shmobile_iommu_domain_pgtable * pgtable,struct kmem_cache * cache,size_t size)50 static int pgtable_alloc(struct shmobile_iommu_domain_pgtable *pgtable,
51 			 struct kmem_cache *cache, size_t size)
52 {
53 	pgtable->pgtable = kmem_cache_zalloc(cache, GFP_ATOMIC);
54 	if (!pgtable->pgtable)
55 		return -ENOMEM;
56 	pgtable->handle = dma_map_single(NULL, pgtable->pgtable, size,
57 					 DMA_TO_DEVICE);
58 	return 0;
59 }
60 
pgtable_free(struct shmobile_iommu_domain_pgtable * pgtable,struct kmem_cache * cache,size_t size)61 static void pgtable_free(struct shmobile_iommu_domain_pgtable *pgtable,
62 			 struct kmem_cache *cache, size_t size)
63 {
64 	dma_unmap_single(NULL, pgtable->handle, size, DMA_TO_DEVICE);
65 	kmem_cache_free(cache, pgtable->pgtable);
66 }
67 
pgtable_read(struct shmobile_iommu_domain_pgtable * pgtable,unsigned int index)68 static uint32_t pgtable_read(struct shmobile_iommu_domain_pgtable *pgtable,
69 			     unsigned int index)
70 {
71 	return pgtable->pgtable[index];
72 }
73 
pgtable_write(struct shmobile_iommu_domain_pgtable * pgtable,unsigned int index,unsigned int count,uint32_t val)74 static void pgtable_write(struct shmobile_iommu_domain_pgtable *pgtable,
75 			  unsigned int index, unsigned int count, uint32_t val)
76 {
77 	unsigned int i;
78 
79 	for (i = 0; i < count; i++)
80 		pgtable->pgtable[index + i] = val;
81 	dma_sync_single_for_device(NULL, pgtable->handle + index * sizeof(val),
82 				   sizeof(val) * count, DMA_TO_DEVICE);
83 }
84 
shmobile_iommu_domain_init(struct iommu_domain * domain)85 static int shmobile_iommu_domain_init(struct iommu_domain *domain)
86 {
87 	struct shmobile_iommu_domain *sh_domain;
88 	int i, ret;
89 
90 	sh_domain = kmalloc(sizeof(*sh_domain), GFP_KERNEL);
91 	if (!sh_domain)
92 		return -ENOMEM;
93 	ret = pgtable_alloc(&sh_domain->l1, l1cache, L1_SIZE);
94 	if (ret < 0) {
95 		kfree(sh_domain);
96 		return ret;
97 	}
98 	for (i = 0; i < L1_LEN; i++)
99 		sh_domain->l2[i].pgtable = NULL;
100 	spin_lock_init(&sh_domain->map_lock);
101 	spin_lock_init(&sh_domain->attached_list_lock);
102 	INIT_LIST_HEAD(&sh_domain->attached_list);
103 	domain->priv = sh_domain;
104 	return 0;
105 }
106 
shmobile_iommu_domain_destroy(struct iommu_domain * domain)107 static void shmobile_iommu_domain_destroy(struct iommu_domain *domain)
108 {
109 	struct shmobile_iommu_domain *sh_domain = domain->priv;
110 	int i;
111 
112 	for (i = 0; i < L1_LEN; i++) {
113 		if (sh_domain->l2[i].pgtable)
114 			pgtable_free(&sh_domain->l2[i], l2cache, L2_SIZE);
115 	}
116 	pgtable_free(&sh_domain->l1, l1cache, L1_SIZE);
117 	kfree(sh_domain);
118 	domain->priv = NULL;
119 }
120 
shmobile_iommu_attach_device(struct iommu_domain * domain,struct device * dev)121 static int shmobile_iommu_attach_device(struct iommu_domain *domain,
122 					struct device *dev)
123 {
124 	struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
125 	struct shmobile_iommu_domain *sh_domain = domain->priv;
126 	int ret = -EBUSY;
127 
128 	if (!archdata)
129 		return -ENODEV;
130 	spin_lock(&sh_domain->attached_list_lock);
131 	spin_lock(&archdata->attach_lock);
132 	if (archdata->attached != sh_domain) {
133 		if (archdata->attached)
134 			goto err;
135 		ipmmu_tlb_set(archdata->ipmmu, sh_domain->l1.handle, L1_SIZE,
136 			      0);
137 		ipmmu_tlb_flush(archdata->ipmmu);
138 		archdata->attached = sh_domain;
139 		archdata->num_attached_devices = 0;
140 		list_add(&archdata->attached_list, &sh_domain->attached_list);
141 	}
142 	archdata->num_attached_devices++;
143 	ret = 0;
144 err:
145 	spin_unlock(&archdata->attach_lock);
146 	spin_unlock(&sh_domain->attached_list_lock);
147 	return ret;
148 }
149 
shmobile_iommu_detach_device(struct iommu_domain * domain,struct device * dev)150 static void shmobile_iommu_detach_device(struct iommu_domain *domain,
151 					 struct device *dev)
152 {
153 	struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
154 	struct shmobile_iommu_domain *sh_domain = domain->priv;
155 
156 	if (!archdata)
157 		return;
158 	spin_lock(&sh_domain->attached_list_lock);
159 	spin_lock(&archdata->attach_lock);
160 	archdata->num_attached_devices--;
161 	if (!archdata->num_attached_devices) {
162 		ipmmu_tlb_set(archdata->ipmmu, 0, 0, 0);
163 		ipmmu_tlb_flush(archdata->ipmmu);
164 		archdata->attached = NULL;
165 		list_del(&archdata->attached_list);
166 	}
167 	spin_unlock(&archdata->attach_lock);
168 	spin_unlock(&sh_domain->attached_list_lock);
169 }
170 
domain_tlb_flush(struct shmobile_iommu_domain * sh_domain)171 static void domain_tlb_flush(struct shmobile_iommu_domain *sh_domain)
172 {
173 	struct shmobile_iommu_archdata *archdata;
174 
175 	spin_lock(&sh_domain->attached_list_lock);
176 	list_for_each_entry(archdata, &sh_domain->attached_list, attached_list)
177 		ipmmu_tlb_flush(archdata->ipmmu);
178 	spin_unlock(&sh_domain->attached_list_lock);
179 }
180 
l2alloc(struct shmobile_iommu_domain * sh_domain,unsigned int l1index)181 static int l2alloc(struct shmobile_iommu_domain *sh_domain,
182 		   unsigned int l1index)
183 {
184 	int ret;
185 
186 	if (!sh_domain->l2[l1index].pgtable) {
187 		ret = pgtable_alloc(&sh_domain->l2[l1index], l2cache, L2_SIZE);
188 		if (ret < 0)
189 			return ret;
190 	}
191 	pgtable_write(&sh_domain->l1, l1index, 1,
192 		      sh_domain->l2[l1index].handle | 0x1);
193 	return 0;
194 }
195 
l2realfree(struct shmobile_iommu_domain_pgtable * l2)196 static void l2realfree(struct shmobile_iommu_domain_pgtable *l2)
197 {
198 	if (l2->pgtable)
199 		pgtable_free(l2, l2cache, L2_SIZE);
200 }
201 
l2free(struct shmobile_iommu_domain * sh_domain,unsigned int l1index,struct shmobile_iommu_domain_pgtable * l2)202 static void l2free(struct shmobile_iommu_domain *sh_domain,
203 		   unsigned int l1index,
204 		   struct shmobile_iommu_domain_pgtable *l2)
205 {
206 	pgtable_write(&sh_domain->l1, l1index, 1, 0);
207 	if (sh_domain->l2[l1index].pgtable) {
208 		*l2 = sh_domain->l2[l1index];
209 		sh_domain->l2[l1index].pgtable = NULL;
210 	}
211 }
212 
shmobile_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot)213 static int shmobile_iommu_map(struct iommu_domain *domain, unsigned long iova,
214 			      phys_addr_t paddr, size_t size, int prot)
215 {
216 	struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
217 	struct shmobile_iommu_domain *sh_domain = domain->priv;
218 	unsigned int l1index, l2index;
219 	int ret;
220 
221 	l1index = iova >> 20;
222 	switch (size) {
223 	case SZ_4K:
224 		l2index = (iova >> 12) & 0xff;
225 		spin_lock(&sh_domain->map_lock);
226 		ret = l2alloc(sh_domain, l1index);
227 		if (!ret)
228 			pgtable_write(&sh_domain->l2[l1index], l2index, 1,
229 				      paddr | 0xff2);
230 		spin_unlock(&sh_domain->map_lock);
231 		break;
232 	case SZ_64K:
233 		l2index = (iova >> 12) & 0xf0;
234 		spin_lock(&sh_domain->map_lock);
235 		ret = l2alloc(sh_domain, l1index);
236 		if (!ret)
237 			pgtable_write(&sh_domain->l2[l1index], l2index, 0x10,
238 				      paddr | 0xff1);
239 		spin_unlock(&sh_domain->map_lock);
240 		break;
241 	case SZ_1M:
242 		spin_lock(&sh_domain->map_lock);
243 		l2free(sh_domain, l1index, &l2);
244 		pgtable_write(&sh_domain->l1, l1index, 1, paddr | 0xc02);
245 		spin_unlock(&sh_domain->map_lock);
246 		ret = 0;
247 		break;
248 	default:
249 		ret = -EINVAL;
250 	}
251 	if (!ret)
252 		domain_tlb_flush(sh_domain);
253 	l2realfree(&l2);
254 	return ret;
255 }
256 
shmobile_iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size)257 static size_t shmobile_iommu_unmap(struct iommu_domain *domain,
258 				   unsigned long iova, size_t size)
259 {
260 	struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
261 	struct shmobile_iommu_domain *sh_domain = domain->priv;
262 	unsigned int l1index, l2index;
263 	uint32_t l2entry = 0;
264 	size_t ret = 0;
265 
266 	l1index = iova >> 20;
267 	if (!(iova & 0xfffff) && size >= SZ_1M) {
268 		spin_lock(&sh_domain->map_lock);
269 		l2free(sh_domain, l1index, &l2);
270 		spin_unlock(&sh_domain->map_lock);
271 		ret = SZ_1M;
272 		goto done;
273 	}
274 	l2index = (iova >> 12) & 0xff;
275 	spin_lock(&sh_domain->map_lock);
276 	if (sh_domain->l2[l1index].pgtable)
277 		l2entry = pgtable_read(&sh_domain->l2[l1index], l2index);
278 	switch (l2entry & 3) {
279 	case 1:
280 		if (l2index & 0xf)
281 			break;
282 		pgtable_write(&sh_domain->l2[l1index], l2index, 0x10, 0);
283 		ret = SZ_64K;
284 		break;
285 	case 2:
286 		pgtable_write(&sh_domain->l2[l1index], l2index, 1, 0);
287 		ret = SZ_4K;
288 		break;
289 	}
290 	spin_unlock(&sh_domain->map_lock);
291 done:
292 	if (ret)
293 		domain_tlb_flush(sh_domain);
294 	l2realfree(&l2);
295 	return ret;
296 }
297 
shmobile_iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)298 static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain,
299 					       dma_addr_t iova)
300 {
301 	struct shmobile_iommu_domain *sh_domain = domain->priv;
302 	uint32_t l1entry = 0, l2entry = 0;
303 	unsigned int l1index, l2index;
304 
305 	l1index = iova >> 20;
306 	l2index = (iova >> 12) & 0xff;
307 	spin_lock(&sh_domain->map_lock);
308 	if (sh_domain->l2[l1index].pgtable)
309 		l2entry = pgtable_read(&sh_domain->l2[l1index], l2index);
310 	else
311 		l1entry = pgtable_read(&sh_domain->l1, l1index);
312 	spin_unlock(&sh_domain->map_lock);
313 	switch (l2entry & 3) {
314 	case 1:
315 		return (l2entry & ~0xffff) | (iova & 0xffff);
316 	case 2:
317 		return (l2entry & ~0xfff) | (iova & 0xfff);
318 	default:
319 		if ((l1entry & 3) == 2)
320 			return (l1entry & ~0xfffff) | (iova & 0xfffff);
321 		return 0;
322 	}
323 }
324 
find_dev_name(struct shmobile_ipmmu * ipmmu,const char * dev_name)325 static int find_dev_name(struct shmobile_ipmmu *ipmmu, const char *dev_name)
326 {
327 	unsigned int i, n = ipmmu->num_dev_names;
328 
329 	for (i = 0; i < n; i++) {
330 		if (strcmp(ipmmu->dev_names[i], dev_name) == 0)
331 			return 1;
332 	}
333 	return 0;
334 }
335 
shmobile_iommu_add_device(struct device * dev)336 static int shmobile_iommu_add_device(struct device *dev)
337 {
338 	struct shmobile_iommu_archdata *archdata = ipmmu_archdata;
339 	struct dma_iommu_mapping *mapping;
340 
341 	if (!find_dev_name(archdata->ipmmu, dev_name(dev)))
342 		return 0;
343 	mapping = archdata->iommu_mapping;
344 	if (!mapping) {
345 		mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
346 						   L1_LEN << 20, 0);
347 		if (IS_ERR(mapping))
348 			return PTR_ERR(mapping);
349 		archdata->iommu_mapping = mapping;
350 	}
351 	dev->archdata.iommu = archdata;
352 	if (arm_iommu_attach_device(dev, mapping))
353 		pr_err("arm_iommu_attach_device failed\n");
354 	return 0;
355 }
356 
357 static struct iommu_ops shmobile_iommu_ops = {
358 	.domain_init = shmobile_iommu_domain_init,
359 	.domain_destroy = shmobile_iommu_domain_destroy,
360 	.attach_dev = shmobile_iommu_attach_device,
361 	.detach_dev = shmobile_iommu_detach_device,
362 	.map = shmobile_iommu_map,
363 	.unmap = shmobile_iommu_unmap,
364 	.iova_to_phys = shmobile_iommu_iova_to_phys,
365 	.add_device = shmobile_iommu_add_device,
366 	.pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K,
367 };
368 
ipmmu_iommu_init(struct shmobile_ipmmu * ipmmu)369 int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu)
370 {
371 	static struct shmobile_iommu_archdata *archdata;
372 
373 	l1cache = kmem_cache_create("shmobile-iommu-pgtable1", L1_SIZE,
374 				    L1_ALIGN, SLAB_HWCACHE_ALIGN, NULL);
375 	if (!l1cache)
376 		return -ENOMEM;
377 	l2cache = kmem_cache_create("shmobile-iommu-pgtable2", L2_SIZE,
378 				    L2_ALIGN, SLAB_HWCACHE_ALIGN, NULL);
379 	if (!l2cache) {
380 		kmem_cache_destroy(l1cache);
381 		return -ENOMEM;
382 	}
383 	archdata = kmalloc(sizeof(*archdata), GFP_KERNEL);
384 	if (!archdata) {
385 		kmem_cache_destroy(l1cache);
386 		kmem_cache_destroy(l2cache);
387 		return -ENOMEM;
388 	}
389 	spin_lock_init(&archdata->attach_lock);
390 	archdata->attached = NULL;
391 	archdata->ipmmu = ipmmu;
392 	ipmmu_archdata = archdata;
393 	bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops);
394 	return 0;
395 }
396