1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright (C) 2013 Freescale Semiconductor, Inc.
16 * Author: Varun Sethi <varun.sethi@freescale.com>
17 *
18 */
19
20 #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
21
22 #include "fsl_pamu_domain.h"
23
24 #include <sysdev/fsl_pci.h>
25
26 /*
27 * Global spinlock that needs to be held while
28 * configuring PAMU.
29 */
30 static DEFINE_SPINLOCK(iommu_lock);
31
32 static struct kmem_cache *fsl_pamu_domain_cache;
33 static struct kmem_cache *iommu_devinfo_cache;
34 static DEFINE_SPINLOCK(device_domain_lock);
35
to_fsl_dma_domain(struct iommu_domain * dom)36 static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
37 {
38 return container_of(dom, struct fsl_dma_domain, iommu_domain);
39 }
40
iommu_init_mempool(void)41 static int __init iommu_init_mempool(void)
42 {
43 fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
44 sizeof(struct fsl_dma_domain),
45 0,
46 SLAB_HWCACHE_ALIGN,
47 NULL);
48 if (!fsl_pamu_domain_cache) {
49 pr_debug("Couldn't create fsl iommu_domain cache\n");
50 return -ENOMEM;
51 }
52
53 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
54 sizeof(struct device_domain_info),
55 0,
56 SLAB_HWCACHE_ALIGN,
57 NULL);
58 if (!iommu_devinfo_cache) {
59 pr_debug("Couldn't create devinfo cache\n");
60 kmem_cache_destroy(fsl_pamu_domain_cache);
61 return -ENOMEM;
62 }
63
64 return 0;
65 }
66
get_phys_addr(struct fsl_dma_domain * dma_domain,dma_addr_t iova)67 static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
68 {
69 u32 win_cnt = dma_domain->win_cnt;
70 struct dma_window *win_ptr = &dma_domain->win_arr[0];
71 struct iommu_domain_geometry *geom;
72
73 geom = &dma_domain->iommu_domain.geometry;
74
75 if (!win_cnt || !dma_domain->geom_size) {
76 pr_debug("Number of windows/geometry not configured for the domain\n");
77 return 0;
78 }
79
80 if (win_cnt > 1) {
81 u64 subwin_size;
82 dma_addr_t subwin_iova;
83 u32 wnd;
84
85 subwin_size = dma_domain->geom_size >> ilog2(win_cnt);
86 subwin_iova = iova & ~(subwin_size - 1);
87 wnd = (subwin_iova - geom->aperture_start) >> ilog2(subwin_size);
88 win_ptr = &dma_domain->win_arr[wnd];
89 }
90
91 if (win_ptr->valid)
92 return win_ptr->paddr + (iova & (win_ptr->size - 1));
93
94 return 0;
95 }
96
map_subwins(int liodn,struct fsl_dma_domain * dma_domain)97 static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
98 {
99 struct dma_window *sub_win_ptr = &dma_domain->win_arr[0];
100 int i, ret;
101 unsigned long rpn, flags;
102
103 for (i = 0; i < dma_domain->win_cnt; i++) {
104 if (sub_win_ptr[i].valid) {
105 rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT;
106 spin_lock_irqsave(&iommu_lock, flags);
107 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
108 sub_win_ptr[i].size,
109 ~(u32)0,
110 rpn,
111 dma_domain->snoop_id,
112 dma_domain->stash_id,
113 (i > 0) ? 1 : 0,
114 sub_win_ptr[i].prot);
115 spin_unlock_irqrestore(&iommu_lock, flags);
116 if (ret) {
117 pr_debug("SPAACE configuration failed for liodn %d\n",
118 liodn);
119 return ret;
120 }
121 }
122 }
123
124 return ret;
125 }
126
map_win(int liodn,struct fsl_dma_domain * dma_domain)127 static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
128 {
129 int ret;
130 struct dma_window *wnd = &dma_domain->win_arr[0];
131 phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
132 unsigned long flags;
133
134 spin_lock_irqsave(&iommu_lock, flags);
135 ret = pamu_config_ppaace(liodn, wnd_addr,
136 wnd->size,
137 ~(u32)0,
138 wnd->paddr >> PAMU_PAGE_SHIFT,
139 dma_domain->snoop_id, dma_domain->stash_id,
140 0, wnd->prot);
141 spin_unlock_irqrestore(&iommu_lock, flags);
142 if (ret)
143 pr_debug("PAACE configuration failed for liodn %d\n", liodn);
144
145 return ret;
146 }
147
148 /* Map the DMA window corresponding to the LIODN */
map_liodn(int liodn,struct fsl_dma_domain * dma_domain)149 static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
150 {
151 if (dma_domain->win_cnt > 1)
152 return map_subwins(liodn, dma_domain);
153 else
154 return map_win(liodn, dma_domain);
155 }
156
157 /* Update window/subwindow mapping for the LIODN */
update_liodn(int liodn,struct fsl_dma_domain * dma_domain,u32 wnd_nr)158 static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr)
159 {
160 int ret;
161 struct dma_window *wnd = &dma_domain->win_arr[wnd_nr];
162 unsigned long flags;
163
164 spin_lock_irqsave(&iommu_lock, flags);
165 if (dma_domain->win_cnt > 1) {
166 ret = pamu_config_spaace(liodn, dma_domain->win_cnt, wnd_nr,
167 wnd->size,
168 ~(u32)0,
169 wnd->paddr >> PAMU_PAGE_SHIFT,
170 dma_domain->snoop_id,
171 dma_domain->stash_id,
172 (wnd_nr > 0) ? 1 : 0,
173 wnd->prot);
174 if (ret)
175 pr_debug("Subwindow reconfiguration failed for liodn %d\n",
176 liodn);
177 } else {
178 phys_addr_t wnd_addr;
179
180 wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
181
182 ret = pamu_config_ppaace(liodn, wnd_addr,
183 wnd->size,
184 ~(u32)0,
185 wnd->paddr >> PAMU_PAGE_SHIFT,
186 dma_domain->snoop_id, dma_domain->stash_id,
187 0, wnd->prot);
188 if (ret)
189 pr_debug("Window reconfiguration failed for liodn %d\n",
190 liodn);
191 }
192
193 spin_unlock_irqrestore(&iommu_lock, flags);
194
195 return ret;
196 }
197
update_liodn_stash(int liodn,struct fsl_dma_domain * dma_domain,u32 val)198 static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
199 u32 val)
200 {
201 int ret = 0, i;
202 unsigned long flags;
203
204 spin_lock_irqsave(&iommu_lock, flags);
205 if (!dma_domain->win_arr) {
206 pr_debug("Windows not configured, stash destination update failed for liodn %d\n",
207 liodn);
208 spin_unlock_irqrestore(&iommu_lock, flags);
209 return -EINVAL;
210 }
211
212 for (i = 0; i < dma_domain->win_cnt; i++) {
213 ret = pamu_update_paace_stash(liodn, i, val);
214 if (ret) {
215 pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
216 i, liodn);
217 spin_unlock_irqrestore(&iommu_lock, flags);
218 return ret;
219 }
220 }
221
222 spin_unlock_irqrestore(&iommu_lock, flags);
223
224 return ret;
225 }
226
227 /* Set the geometry parameters for a LIODN */
pamu_set_liodn(int liodn,struct device * dev,struct fsl_dma_domain * dma_domain,struct iommu_domain_geometry * geom_attr,u32 win_cnt)228 static int pamu_set_liodn(int liodn, struct device *dev,
229 struct fsl_dma_domain *dma_domain,
230 struct iommu_domain_geometry *geom_attr,
231 u32 win_cnt)
232 {
233 phys_addr_t window_addr, window_size;
234 phys_addr_t subwin_size;
235 int ret = 0, i;
236 u32 omi_index = ~(u32)0;
237 unsigned long flags;
238
239 /*
240 * Configure the omi_index at the geometry setup time.
241 * This is a static value which depends on the type of
242 * device and would not change thereafter.
243 */
244 get_ome_index(&omi_index, dev);
245
246 window_addr = geom_attr->aperture_start;
247 window_size = dma_domain->geom_size;
248
249 spin_lock_irqsave(&iommu_lock, flags);
250 ret = pamu_disable_liodn(liodn);
251 if (!ret)
252 ret = pamu_config_ppaace(liodn, window_addr, window_size, omi_index,
253 0, dma_domain->snoop_id,
254 dma_domain->stash_id, win_cnt, 0);
255 spin_unlock_irqrestore(&iommu_lock, flags);
256 if (ret) {
257 pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n",
258 liodn, win_cnt);
259 return ret;
260 }
261
262 if (win_cnt > 1) {
263 subwin_size = window_size >> ilog2(win_cnt);
264 for (i = 0; i < win_cnt; i++) {
265 spin_lock_irqsave(&iommu_lock, flags);
266 ret = pamu_disable_spaace(liodn, i);
267 if (!ret)
268 ret = pamu_config_spaace(liodn, win_cnt, i,
269 subwin_size, omi_index,
270 0, dma_domain->snoop_id,
271 dma_domain->stash_id,
272 0, 0);
273 spin_unlock_irqrestore(&iommu_lock, flags);
274 if (ret) {
275 pr_debug("SPAACE configuration failed for liodn %d\n",
276 liodn);
277 return ret;
278 }
279 }
280 }
281
282 return ret;
283 }
284
check_size(u64 size,dma_addr_t iova)285 static int check_size(u64 size, dma_addr_t iova)
286 {
287 /*
288 * Size must be a power of two and at least be equal
289 * to PAMU page size.
290 */
291 if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
292 pr_debug("Size too small or not a power of two\n");
293 return -EINVAL;
294 }
295
296 /* iova must be page size aligned */
297 if (iova & (size - 1)) {
298 pr_debug("Address is not aligned with window size\n");
299 return -EINVAL;
300 }
301
302 return 0;
303 }
304
iommu_alloc_dma_domain(void)305 static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
306 {
307 struct fsl_dma_domain *domain;
308
309 domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL);
310 if (!domain)
311 return NULL;
312
313 domain->stash_id = ~(u32)0;
314 domain->snoop_id = ~(u32)0;
315 domain->win_cnt = pamu_get_max_subwin_cnt();
316 domain->geom_size = 0;
317
318 INIT_LIST_HEAD(&domain->devices);
319
320 spin_lock_init(&domain->domain_lock);
321
322 return domain;
323 }
324
remove_device_ref(struct device_domain_info * info,u32 win_cnt)325 static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
326 {
327 unsigned long flags;
328
329 list_del(&info->link);
330 spin_lock_irqsave(&iommu_lock, flags);
331 if (win_cnt > 1)
332 pamu_free_subwins(info->liodn);
333 pamu_disable_liodn(info->liodn);
334 spin_unlock_irqrestore(&iommu_lock, flags);
335 spin_lock_irqsave(&device_domain_lock, flags);
336 info->dev->archdata.iommu_domain = NULL;
337 kmem_cache_free(iommu_devinfo_cache, info);
338 spin_unlock_irqrestore(&device_domain_lock, flags);
339 }
340
detach_device(struct device * dev,struct fsl_dma_domain * dma_domain)341 static void detach_device(struct device *dev, struct fsl_dma_domain *dma_domain)
342 {
343 struct device_domain_info *info, *tmp;
344 unsigned long flags;
345
346 spin_lock_irqsave(&dma_domain->domain_lock, flags);
347 /* Remove the device from the domain device list */
348 list_for_each_entry_safe(info, tmp, &dma_domain->devices, link) {
349 if (!dev || (info->dev == dev))
350 remove_device_ref(info, dma_domain->win_cnt);
351 }
352 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
353 }
354
attach_device(struct fsl_dma_domain * dma_domain,int liodn,struct device * dev)355 static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct device *dev)
356 {
357 struct device_domain_info *info, *old_domain_info;
358 unsigned long flags;
359
360 spin_lock_irqsave(&device_domain_lock, flags);
361 /*
362 * Check here if the device is already attached to domain or not.
363 * If the device is already attached to a domain detach it.
364 */
365 old_domain_info = dev->archdata.iommu_domain;
366 if (old_domain_info && old_domain_info->domain != dma_domain) {
367 spin_unlock_irqrestore(&device_domain_lock, flags);
368 detach_device(dev, old_domain_info->domain);
369 spin_lock_irqsave(&device_domain_lock, flags);
370 }
371
372 info = kmem_cache_zalloc(iommu_devinfo_cache, GFP_ATOMIC);
373
374 info->dev = dev;
375 info->liodn = liodn;
376 info->domain = dma_domain;
377
378 list_add(&info->link, &dma_domain->devices);
379 /*
380 * In case of devices with multiple LIODNs just store
381 * the info for the first LIODN as all
382 * LIODNs share the same domain
383 */
384 if (!dev->archdata.iommu_domain)
385 dev->archdata.iommu_domain = info;
386 spin_unlock_irqrestore(&device_domain_lock, flags);
387 }
388
fsl_pamu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)389 static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
390 dma_addr_t iova)
391 {
392 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
393
394 if (iova < domain->geometry.aperture_start ||
395 iova > domain->geometry.aperture_end)
396 return 0;
397
398 return get_phys_addr(dma_domain, iova);
399 }
400
fsl_pamu_capable(enum iommu_cap cap)401 static bool fsl_pamu_capable(enum iommu_cap cap)
402 {
403 return cap == IOMMU_CAP_CACHE_COHERENCY;
404 }
405
fsl_pamu_domain_free(struct iommu_domain * domain)406 static void fsl_pamu_domain_free(struct iommu_domain *domain)
407 {
408 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
409
410 /* remove all the devices from the device list */
411 detach_device(NULL, dma_domain);
412
413 dma_domain->enabled = 0;
414 dma_domain->mapped = 0;
415
416 kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
417 }
418
fsl_pamu_domain_alloc(unsigned type)419 static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
420 {
421 struct fsl_dma_domain *dma_domain;
422
423 if (type != IOMMU_DOMAIN_UNMANAGED)
424 return NULL;
425
426 dma_domain = iommu_alloc_dma_domain();
427 if (!dma_domain) {
428 pr_debug("dma_domain allocation failed\n");
429 return NULL;
430 }
431 /* defaul geometry 64 GB i.e. maximum system address */
432 dma_domain->iommu_domain. geometry.aperture_start = 0;
433 dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
434 dma_domain->iommu_domain.geometry.force_aperture = true;
435
436 return &dma_domain->iommu_domain;
437 }
438
439 /* Configure geometry settings for all LIODNs associated with domain */
pamu_set_domain_geometry(struct fsl_dma_domain * dma_domain,struct iommu_domain_geometry * geom_attr,u32 win_cnt)440 static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
441 struct iommu_domain_geometry *geom_attr,
442 u32 win_cnt)
443 {
444 struct device_domain_info *info;
445 int ret = 0;
446
447 list_for_each_entry(info, &dma_domain->devices, link) {
448 ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
449 geom_attr, win_cnt);
450 if (ret)
451 break;
452 }
453
454 return ret;
455 }
456
457 /* Update stash destination for all LIODNs associated with the domain */
update_domain_stash(struct fsl_dma_domain * dma_domain,u32 val)458 static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
459 {
460 struct device_domain_info *info;
461 int ret = 0;
462
463 list_for_each_entry(info, &dma_domain->devices, link) {
464 ret = update_liodn_stash(info->liodn, dma_domain, val);
465 if (ret)
466 break;
467 }
468
469 return ret;
470 }
471
472 /* Update domain mappings for all LIODNs associated with the domain */
update_domain_mapping(struct fsl_dma_domain * dma_domain,u32 wnd_nr)473 static int update_domain_mapping(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
474 {
475 struct device_domain_info *info;
476 int ret = 0;
477
478 list_for_each_entry(info, &dma_domain->devices, link) {
479 ret = update_liodn(info->liodn, dma_domain, wnd_nr);
480 if (ret)
481 break;
482 }
483 return ret;
484 }
485
disable_domain_win(struct fsl_dma_domain * dma_domain,u32 wnd_nr)486 static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
487 {
488 struct device_domain_info *info;
489 int ret = 0;
490
491 list_for_each_entry(info, &dma_domain->devices, link) {
492 if (dma_domain->win_cnt == 1 && dma_domain->enabled) {
493 ret = pamu_disable_liodn(info->liodn);
494 if (!ret)
495 dma_domain->enabled = 0;
496 } else {
497 ret = pamu_disable_spaace(info->liodn, wnd_nr);
498 }
499 }
500
501 return ret;
502 }
503
fsl_pamu_window_disable(struct iommu_domain * domain,u32 wnd_nr)504 static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
505 {
506 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
507 unsigned long flags;
508 int ret;
509
510 spin_lock_irqsave(&dma_domain->domain_lock, flags);
511 if (!dma_domain->win_arr) {
512 pr_debug("Number of windows not configured\n");
513 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
514 return;
515 }
516
517 if (wnd_nr >= dma_domain->win_cnt) {
518 pr_debug("Invalid window index\n");
519 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
520 return;
521 }
522
523 if (dma_domain->win_arr[wnd_nr].valid) {
524 ret = disable_domain_win(dma_domain, wnd_nr);
525 if (!ret) {
526 dma_domain->win_arr[wnd_nr].valid = 0;
527 dma_domain->mapped--;
528 }
529 }
530
531 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
532 }
533
fsl_pamu_window_enable(struct iommu_domain * domain,u32 wnd_nr,phys_addr_t paddr,u64 size,int prot)534 static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
535 phys_addr_t paddr, u64 size, int prot)
536 {
537 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
538 struct dma_window *wnd;
539 int pamu_prot = 0;
540 int ret;
541 unsigned long flags;
542 u64 win_size;
543
544 if (prot & IOMMU_READ)
545 pamu_prot |= PAACE_AP_PERMS_QUERY;
546 if (prot & IOMMU_WRITE)
547 pamu_prot |= PAACE_AP_PERMS_UPDATE;
548
549 spin_lock_irqsave(&dma_domain->domain_lock, flags);
550 if (!dma_domain->win_arr) {
551 pr_debug("Number of windows not configured\n");
552 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
553 return -ENODEV;
554 }
555
556 if (wnd_nr >= dma_domain->win_cnt) {
557 pr_debug("Invalid window index\n");
558 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
559 return -EINVAL;
560 }
561
562 win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
563 if (size > win_size) {
564 pr_debug("Invalid window size\n");
565 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
566 return -EINVAL;
567 }
568
569 if (dma_domain->win_cnt == 1) {
570 if (dma_domain->enabled) {
571 pr_debug("Disable the window before updating the mapping\n");
572 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
573 return -EBUSY;
574 }
575
576 ret = check_size(size, domain->geometry.aperture_start);
577 if (ret) {
578 pr_debug("Aperture start not aligned to the size\n");
579 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
580 return -EINVAL;
581 }
582 }
583
584 wnd = &dma_domain->win_arr[wnd_nr];
585 if (!wnd->valid) {
586 wnd->paddr = paddr;
587 wnd->size = size;
588 wnd->prot = pamu_prot;
589
590 ret = update_domain_mapping(dma_domain, wnd_nr);
591 if (!ret) {
592 wnd->valid = 1;
593 dma_domain->mapped++;
594 }
595 } else {
596 pr_debug("Disable the window before updating the mapping\n");
597 ret = -EBUSY;
598 }
599
600 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
601
602 return ret;
603 }
604
605 /*
606 * Attach the LIODN to the DMA domain and configure the geometry
607 * and window mappings.
608 */
handle_attach_device(struct fsl_dma_domain * dma_domain,struct device * dev,const u32 * liodn,int num)609 static int handle_attach_device(struct fsl_dma_domain *dma_domain,
610 struct device *dev, const u32 *liodn,
611 int num)
612 {
613 unsigned long flags;
614 struct iommu_domain *domain = &dma_domain->iommu_domain;
615 int ret = 0;
616 int i;
617
618 spin_lock_irqsave(&dma_domain->domain_lock, flags);
619 for (i = 0; i < num; i++) {
620 /* Ensure that LIODN value is valid */
621 if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
622 pr_debug("Invalid liodn %d, attach device failed for %s\n",
623 liodn[i], dev->of_node->full_name);
624 ret = -EINVAL;
625 break;
626 }
627
628 attach_device(dma_domain, liodn[i], dev);
629 /*
630 * Check if geometry has already been configured
631 * for the domain. If yes, set the geometry for
632 * the LIODN.
633 */
634 if (dma_domain->win_arr) {
635 u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
636
637 ret = pamu_set_liodn(liodn[i], dev, dma_domain,
638 &domain->geometry, win_cnt);
639 if (ret)
640 break;
641 if (dma_domain->mapped) {
642 /*
643 * Create window/subwindow mapping for
644 * the LIODN.
645 */
646 ret = map_liodn(liodn[i], dma_domain);
647 if (ret)
648 break;
649 }
650 }
651 }
652 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
653
654 return ret;
655 }
656
fsl_pamu_attach_device(struct iommu_domain * domain,struct device * dev)657 static int fsl_pamu_attach_device(struct iommu_domain *domain,
658 struct device *dev)
659 {
660 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
661 const u32 *liodn;
662 u32 liodn_cnt;
663 int len, ret = 0;
664 struct pci_dev *pdev = NULL;
665 struct pci_controller *pci_ctl;
666
667 /*
668 * Use LIODN of the PCI controller while attaching a
669 * PCI device.
670 */
671 if (dev_is_pci(dev)) {
672 pdev = to_pci_dev(dev);
673 pci_ctl = pci_bus_to_host(pdev->bus);
674 /*
675 * make dev point to pci controller device
676 * so we can get the LIODN programmed by
677 * u-boot.
678 */
679 dev = pci_ctl->parent;
680 }
681
682 liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
683 if (liodn) {
684 liodn_cnt = len / sizeof(u32);
685 ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);
686 } else {
687 pr_debug("missing fsl,liodn property at %s\n",
688 dev->of_node->full_name);
689 ret = -EINVAL;
690 }
691
692 return ret;
693 }
694
fsl_pamu_detach_device(struct iommu_domain * domain,struct device * dev)695 static void fsl_pamu_detach_device(struct iommu_domain *domain,
696 struct device *dev)
697 {
698 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
699 const u32 *prop;
700 int len;
701 struct pci_dev *pdev = NULL;
702 struct pci_controller *pci_ctl;
703
704 /*
705 * Use LIODN of the PCI controller while detaching a
706 * PCI device.
707 */
708 if (dev_is_pci(dev)) {
709 pdev = to_pci_dev(dev);
710 pci_ctl = pci_bus_to_host(pdev->bus);
711 /*
712 * make dev point to pci controller device
713 * so we can get the LIODN programmed by
714 * u-boot.
715 */
716 dev = pci_ctl->parent;
717 }
718
719 prop = of_get_property(dev->of_node, "fsl,liodn", &len);
720 if (prop)
721 detach_device(dev, dma_domain);
722 else
723 pr_debug("missing fsl,liodn property at %s\n",
724 dev->of_node->full_name);
725 }
726
configure_domain_geometry(struct iommu_domain * domain,void * data)727 static int configure_domain_geometry(struct iommu_domain *domain, void *data)
728 {
729 struct iommu_domain_geometry *geom_attr = data;
730 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
731 dma_addr_t geom_size;
732 unsigned long flags;
733
734 geom_size = geom_attr->aperture_end - geom_attr->aperture_start + 1;
735 /*
736 * Sanity check the geometry size. Also, we do not support
737 * DMA outside of the geometry.
738 */
739 if (check_size(geom_size, geom_attr->aperture_start) ||
740 !geom_attr->force_aperture) {
741 pr_debug("Invalid PAMU geometry attributes\n");
742 return -EINVAL;
743 }
744
745 spin_lock_irqsave(&dma_domain->domain_lock, flags);
746 if (dma_domain->enabled) {
747 pr_debug("Can't set geometry attributes as domain is active\n");
748 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
749 return -EBUSY;
750 }
751
752 /* Copy the domain geometry information */
753 memcpy(&domain->geometry, geom_attr,
754 sizeof(struct iommu_domain_geometry));
755 dma_domain->geom_size = geom_size;
756
757 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
758
759 return 0;
760 }
761
762 /* Set the domain stash attribute */
configure_domain_stash(struct fsl_dma_domain * dma_domain,void * data)763 static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
764 {
765 struct pamu_stash_attribute *stash_attr = data;
766 unsigned long flags;
767 int ret;
768
769 spin_lock_irqsave(&dma_domain->domain_lock, flags);
770
771 memcpy(&dma_domain->dma_stash, stash_attr,
772 sizeof(struct pamu_stash_attribute));
773
774 dma_domain->stash_id = get_stash_id(stash_attr->cache,
775 stash_attr->cpu);
776 if (dma_domain->stash_id == ~(u32)0) {
777 pr_debug("Invalid stash attributes\n");
778 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
779 return -EINVAL;
780 }
781
782 ret = update_domain_stash(dma_domain, dma_domain->stash_id);
783
784 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
785
786 return ret;
787 }
788
789 /* Configure domain dma state i.e. enable/disable DMA */
configure_domain_dma_state(struct fsl_dma_domain * dma_domain,bool enable)790 static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
791 {
792 struct device_domain_info *info;
793 unsigned long flags;
794 int ret;
795
796 spin_lock_irqsave(&dma_domain->domain_lock, flags);
797
798 if (enable && !dma_domain->mapped) {
799 pr_debug("Can't enable DMA domain without valid mapping\n");
800 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
801 return -ENODEV;
802 }
803
804 dma_domain->enabled = enable;
805 list_for_each_entry(info, &dma_domain->devices, link) {
806 ret = (enable) ? pamu_enable_liodn(info->liodn) :
807 pamu_disable_liodn(info->liodn);
808 if (ret)
809 pr_debug("Unable to set dma state for liodn %d",
810 info->liodn);
811 }
812 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
813
814 return 0;
815 }
816
fsl_pamu_set_domain_attr(struct iommu_domain * domain,enum iommu_attr attr_type,void * data)817 static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
818 enum iommu_attr attr_type, void *data)
819 {
820 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
821 int ret = 0;
822
823 switch (attr_type) {
824 case DOMAIN_ATTR_GEOMETRY:
825 ret = configure_domain_geometry(domain, data);
826 break;
827 case DOMAIN_ATTR_FSL_PAMU_STASH:
828 ret = configure_domain_stash(dma_domain, data);
829 break;
830 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
831 ret = configure_domain_dma_state(dma_domain, *(int *)data);
832 break;
833 default:
834 pr_debug("Unsupported attribute type\n");
835 ret = -EINVAL;
836 break;
837 }
838
839 return ret;
840 }
841
fsl_pamu_get_domain_attr(struct iommu_domain * domain,enum iommu_attr attr_type,void * data)842 static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
843 enum iommu_attr attr_type, void *data)
844 {
845 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
846 int ret = 0;
847
848 switch (attr_type) {
849 case DOMAIN_ATTR_FSL_PAMU_STASH:
850 memcpy(data, &dma_domain->dma_stash,
851 sizeof(struct pamu_stash_attribute));
852 break;
853 case DOMAIN_ATTR_FSL_PAMU_ENABLE:
854 *(int *)data = dma_domain->enabled;
855 break;
856 case DOMAIN_ATTR_FSL_PAMUV1:
857 *(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
858 break;
859 default:
860 pr_debug("Unsupported attribute type\n");
861 ret = -EINVAL;
862 break;
863 }
864
865 return ret;
866 }
867
get_device_iommu_group(struct device * dev)868 static struct iommu_group *get_device_iommu_group(struct device *dev)
869 {
870 struct iommu_group *group;
871
872 group = iommu_group_get(dev);
873 if (!group)
874 group = iommu_group_alloc();
875
876 return group;
877 }
878
check_pci_ctl_endpt_part(struct pci_controller * pci_ctl)879 static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
880 {
881 u32 version;
882
883 /* Check the PCI controller version number by readding BRR1 register */
884 version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
885 version &= PCI_FSL_BRR1_VER;
886 /* If PCI controller version is >= 0x204 we can partition endpoints */
887 return version >= 0x204;
888 }
889
890 /* Get iommu group information from peer devices or devices on the parent bus */
get_shared_pci_device_group(struct pci_dev * pdev)891 static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
892 {
893 struct pci_dev *tmp;
894 struct iommu_group *group;
895 struct pci_bus *bus = pdev->bus;
896
897 /*
898 * Traverese the pci bus device list to get
899 * the shared iommu group.
900 */
901 while (bus) {
902 list_for_each_entry(tmp, &bus->devices, bus_list) {
903 if (tmp == pdev)
904 continue;
905 group = iommu_group_get(&tmp->dev);
906 if (group)
907 return group;
908 }
909
910 bus = bus->parent;
911 }
912
913 return NULL;
914 }
915
get_pci_device_group(struct pci_dev * pdev)916 static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
917 {
918 struct pci_controller *pci_ctl;
919 bool pci_endpt_partioning;
920 struct iommu_group *group = NULL;
921
922 pci_ctl = pci_bus_to_host(pdev->bus);
923 pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
924 /* We can partition PCIe devices so assign device group to the device */
925 if (pci_endpt_partioning) {
926 group = pci_device_group(&pdev->dev);
927
928 /*
929 * PCIe controller is not a paritionable entity
930 * free the controller device iommu_group.
931 */
932 if (pci_ctl->parent->iommu_group)
933 iommu_group_remove_device(pci_ctl->parent);
934 } else {
935 /*
936 * All devices connected to the controller will share the
937 * PCI controllers device group. If this is the first
938 * device to be probed for the pci controller, copy the
939 * device group information from the PCI controller device
940 * node and remove the PCI controller iommu group.
941 * For subsequent devices, the iommu group information can
942 * be obtained from sibling devices (i.e. from the bus_devices
943 * link list).
944 */
945 if (pci_ctl->parent->iommu_group) {
946 group = get_device_iommu_group(pci_ctl->parent);
947 iommu_group_remove_device(pci_ctl->parent);
948 } else {
949 group = get_shared_pci_device_group(pdev);
950 }
951 }
952
953 if (!group)
954 group = ERR_PTR(-ENODEV);
955
956 return group;
957 }
958
fsl_pamu_device_group(struct device * dev)959 static struct iommu_group *fsl_pamu_device_group(struct device *dev)
960 {
961 struct iommu_group *group = ERR_PTR(-ENODEV);
962 int len;
963
964 /*
965 * For platform devices we allocate a separate group for
966 * each of the devices.
967 */
968 if (dev_is_pci(dev))
969 group = get_pci_device_group(to_pci_dev(dev));
970 else if (of_get_property(dev->of_node, "fsl,liodn", &len))
971 group = get_device_iommu_group(dev);
972
973 return group;
974 }
975
fsl_pamu_add_device(struct device * dev)976 static int fsl_pamu_add_device(struct device *dev)
977 {
978 struct iommu_group *group;
979
980 group = iommu_group_get_for_dev(dev);
981 if (IS_ERR(group))
982 return PTR_ERR(group);
983
984 iommu_group_put(group);
985
986 return 0;
987 }
988
fsl_pamu_remove_device(struct device * dev)989 static void fsl_pamu_remove_device(struct device *dev)
990 {
991 iommu_group_remove_device(dev);
992 }
993
fsl_pamu_set_windows(struct iommu_domain * domain,u32 w_count)994 static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
995 {
996 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
997 unsigned long flags;
998 int ret;
999
1000 spin_lock_irqsave(&dma_domain->domain_lock, flags);
1001 /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
1002 if (dma_domain->enabled) {
1003 pr_debug("Can't set geometry attributes as domain is active\n");
1004 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1005 return -EBUSY;
1006 }
1007
1008 /* Ensure that the geometry has been set for the domain */
1009 if (!dma_domain->geom_size) {
1010 pr_debug("Please configure geometry before setting the number of windows\n");
1011 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1012 return -EINVAL;
1013 }
1014
1015 /*
1016 * Ensure we have valid window count i.e. it should be less than
1017 * maximum permissible limit and should be a power of two.
1018 */
1019 if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
1020 pr_debug("Invalid window count\n");
1021 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1022 return -EINVAL;
1023 }
1024
1025 ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
1026 w_count > 1 ? w_count : 0);
1027 if (!ret) {
1028 kfree(dma_domain->win_arr);
1029 dma_domain->win_arr = kcalloc(w_count,
1030 sizeof(*dma_domain->win_arr),
1031 GFP_ATOMIC);
1032 if (!dma_domain->win_arr) {
1033 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1034 return -ENOMEM;
1035 }
1036 dma_domain->win_cnt = w_count;
1037 }
1038 spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
1039
1040 return ret;
1041 }
1042
fsl_pamu_get_windows(struct iommu_domain * domain)1043 static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
1044 {
1045 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
1046
1047 return dma_domain->win_cnt;
1048 }
1049
1050 static const struct iommu_ops fsl_pamu_ops = {
1051 .capable = fsl_pamu_capable,
1052 .domain_alloc = fsl_pamu_domain_alloc,
1053 .domain_free = fsl_pamu_domain_free,
1054 .attach_dev = fsl_pamu_attach_device,
1055 .detach_dev = fsl_pamu_detach_device,
1056 .domain_window_enable = fsl_pamu_window_enable,
1057 .domain_window_disable = fsl_pamu_window_disable,
1058 .domain_get_windows = fsl_pamu_get_windows,
1059 .domain_set_windows = fsl_pamu_set_windows,
1060 .iova_to_phys = fsl_pamu_iova_to_phys,
1061 .domain_set_attr = fsl_pamu_set_domain_attr,
1062 .domain_get_attr = fsl_pamu_get_domain_attr,
1063 .add_device = fsl_pamu_add_device,
1064 .remove_device = fsl_pamu_remove_device,
1065 .device_group = fsl_pamu_device_group,
1066 };
1067
pamu_domain_init(void)1068 int __init pamu_domain_init(void)
1069 {
1070 int ret = 0;
1071
1072 ret = iommu_init_mempool();
1073 if (ret)
1074 return ret;
1075
1076 bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
1077 bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
1078
1079 return ret;
1080 }
1081