1 // SPDX-License-Identifier: GPL-2.0
2 /* iommu.c: Generic sparc64 IOMMU support.
3 *
4 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
5 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/slab.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dma-map-ops.h>
14 #include <linux/errno.h>
15 #include <linux/iommu-helper.h>
16 #include <linux/bitmap.h>
17 #include <asm/iommu-common.h>
18
19 #ifdef CONFIG_PCI
20 #include <linux/pci.h>
21 #endif
22
23 #include <asm/iommu.h>
24
25 #include "iommu_common.h"
26 #include "kernel.h"
27
28 #define STC_CTXMATCH_ADDR(STC, CTX) \
29 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
30 #define STC_FLUSHFLAG_INIT(STC) \
31 (*((STC)->strbuf_flushflag) = 0UL)
32 #define STC_FLUSHFLAG_SET(STC) \
33 (*((STC)->strbuf_flushflag) != 0UL)
34
35 #define iommu_read(__reg) \
36 ({ u64 __ret; \
37 __asm__ __volatile__("ldxa [%1] %2, %0" \
38 : "=r" (__ret) \
39 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
40 : "memory"); \
41 __ret; \
42 })
43 #define iommu_write(__reg, __val) \
44 __asm__ __volatile__("stxa %0, [%1] %2" \
45 : /* no outputs */ \
46 : "r" (__val), "r" (__reg), \
47 "i" (ASI_PHYS_BYPASS_EC_E))
48
49 /* Must be invoked under the IOMMU lock. */
iommu_flushall(struct iommu_map_table * iommu_map_table)50 static void iommu_flushall(struct iommu_map_table *iommu_map_table)
51 {
52 struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
53 if (iommu->iommu_flushinv) {
54 iommu_write(iommu->iommu_flushinv, ~(u64)0);
55 } else {
56 unsigned long tag;
57 int entry;
58
59 tag = iommu->iommu_tags;
60 for (entry = 0; entry < 16; entry++) {
61 iommu_write(tag, 0);
62 tag += 8;
63 }
64
65 /* Ensure completion of previous PIO writes. */
66 (void) iommu_read(iommu->write_complete_reg);
67 }
68 }
69
70 #define IOPTE_CONSISTENT(CTX) \
71 (IOPTE_VALID | IOPTE_CACHE | \
72 (((CTX) << 47) & IOPTE_CONTEXT))
73
74 #define IOPTE_STREAMING(CTX) \
75 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
76
77 /* Existing mappings are never marked invalid, instead they
78 * are pointed to a dummy page.
79 */
80 #define IOPTE_IS_DUMMY(iommu, iopte) \
81 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
82
iopte_make_dummy(struct iommu * iommu,iopte_t * iopte)83 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
84 {
85 unsigned long val = iopte_val(*iopte);
86
87 val &= ~IOPTE_PAGE;
88 val |= iommu->dummy_page_pa;
89
90 iopte_val(*iopte) = val;
91 }
92
iommu_table_init(struct iommu * iommu,int tsbsize,u32 dma_offset,u32 dma_addr_mask,int numa_node)93 int iommu_table_init(struct iommu *iommu, int tsbsize,
94 u32 dma_offset, u32 dma_addr_mask,
95 int numa_node)
96 {
97 unsigned long i, order, sz, num_tsb_entries;
98 struct page *page;
99
100 num_tsb_entries = tsbsize / sizeof(iopte_t);
101
102 /* Setup initial software IOMMU state. */
103 spin_lock_init(&iommu->lock);
104 iommu->ctx_lowest_free = 1;
105 iommu->tbl.table_map_base = dma_offset;
106 iommu->dma_addr_mask = dma_addr_mask;
107
108 /* Allocate and initialize the free area map. */
109 sz = num_tsb_entries / 8;
110 sz = (sz + 7UL) & ~7UL;
111 iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node);
112 if (!iommu->tbl.map)
113 return -ENOMEM;
114
115 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
116 (tlb_type != hypervisor ? iommu_flushall : NULL),
117 false, 1, false);
118
119 /* Allocate and initialize the dummy page which we
120 * set inactive IO PTEs to point to.
121 */
122 page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
123 if (!page) {
124 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
125 goto out_free_map;
126 }
127 iommu->dummy_page = (unsigned long) page_address(page);
128 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
129 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
130
131 /* Now allocate and setup the IOMMU page table itself. */
132 order = get_order(tsbsize);
133 page = alloc_pages_node(numa_node, GFP_KERNEL, order);
134 if (!page) {
135 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
136 goto out_free_dummy_page;
137 }
138 iommu->page_table = (iopte_t *)page_address(page);
139
140 for (i = 0; i < num_tsb_entries; i++)
141 iopte_make_dummy(iommu, &iommu->page_table[i]);
142
143 return 0;
144
145 out_free_dummy_page:
146 free_page(iommu->dummy_page);
147 iommu->dummy_page = 0UL;
148
149 out_free_map:
150 kfree(iommu->tbl.map);
151 iommu->tbl.map = NULL;
152
153 return -ENOMEM;
154 }
155
alloc_npages(struct device * dev,struct iommu * iommu,unsigned long npages)156 static inline iopte_t *alloc_npages(struct device *dev,
157 struct iommu *iommu,
158 unsigned long npages)
159 {
160 unsigned long entry;
161
162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
163 (unsigned long)(-1), 0);
164 if (unlikely(entry == IOMMU_ERROR_CODE))
165 return NULL;
166
167 return iommu->page_table + entry;
168 }
169
iommu_alloc_ctx(struct iommu * iommu)170 static int iommu_alloc_ctx(struct iommu *iommu)
171 {
172 int lowest = iommu->ctx_lowest_free;
173 int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
174
175 if (unlikely(n == IOMMU_NUM_CTXS)) {
176 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
177 if (unlikely(n == lowest)) {
178 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
179 n = 0;
180 }
181 }
182 if (n)
183 __set_bit(n, iommu->ctx_bitmap);
184
185 return n;
186 }
187
iommu_free_ctx(struct iommu * iommu,int ctx)188 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
189 {
190 if (likely(ctx)) {
191 __clear_bit(ctx, iommu->ctx_bitmap);
192 if (ctx < iommu->ctx_lowest_free)
193 iommu->ctx_lowest_free = ctx;
194 }
195 }
196
dma_4u_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_addrp,gfp_t gfp,unsigned long attrs)197 static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
198 dma_addr_t *dma_addrp, gfp_t gfp,
199 unsigned long attrs)
200 {
201 unsigned long order, first_page;
202 struct iommu *iommu;
203 struct page *page;
204 int npages, nid;
205 iopte_t *iopte;
206 void *ret;
207
208 size = IO_PAGE_ALIGN(size);
209 order = get_order(size);
210 if (order >= 10)
211 return NULL;
212
213 nid = dev->archdata.numa_node;
214 page = alloc_pages_node(nid, gfp, order);
215 if (unlikely(!page))
216 return NULL;
217
218 first_page = (unsigned long) page_address(page);
219 memset((char *)first_page, 0, PAGE_SIZE << order);
220
221 iommu = dev->archdata.iommu;
222
223 iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
224
225 if (unlikely(iopte == NULL)) {
226 free_pages(first_page, order);
227 return NULL;
228 }
229
230 *dma_addrp = (iommu->tbl.table_map_base +
231 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
232 ret = (void *) first_page;
233 npages = size >> IO_PAGE_SHIFT;
234 first_page = __pa(first_page);
235 while (npages--) {
236 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
237 IOPTE_WRITE |
238 (first_page & IOPTE_PAGE));
239 iopte++;
240 first_page += IO_PAGE_SIZE;
241 }
242
243 return ret;
244 }
245
dma_4u_free_coherent(struct device * dev,size_t size,void * cpu,dma_addr_t dvma,unsigned long attrs)246 static void dma_4u_free_coherent(struct device *dev, size_t size,
247 void *cpu, dma_addr_t dvma,
248 unsigned long attrs)
249 {
250 struct iommu *iommu;
251 unsigned long order, npages;
252
253 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
254 iommu = dev->archdata.iommu;
255
256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
257
258 order = get_order(size);
259 if (order < 10)
260 free_pages((unsigned long)cpu, order);
261 }
262
dma_4u_map_page(struct device * dev,struct page * page,unsigned long offset,size_t sz,enum dma_data_direction direction,unsigned long attrs)263 static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
264 unsigned long offset, size_t sz,
265 enum dma_data_direction direction,
266 unsigned long attrs)
267 {
268 struct iommu *iommu;
269 struct strbuf *strbuf;
270 iopte_t *base;
271 unsigned long flags, npages, oaddr;
272 unsigned long i, base_paddr, ctx;
273 u32 bus_addr, ret;
274 unsigned long iopte_protection;
275
276 iommu = dev->archdata.iommu;
277 strbuf = dev->archdata.stc;
278
279 if (unlikely(direction == DMA_NONE))
280 goto bad_no_ctx;
281
282 oaddr = (unsigned long)(page_address(page) + offset);
283 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
284 npages >>= IO_PAGE_SHIFT;
285
286 base = alloc_npages(dev, iommu, npages);
287 spin_lock_irqsave(&iommu->lock, flags);
288 ctx = 0;
289 if (iommu->iommu_ctxflush)
290 ctx = iommu_alloc_ctx(iommu);
291 spin_unlock_irqrestore(&iommu->lock, flags);
292
293 if (unlikely(!base))
294 goto bad;
295
296 bus_addr = (iommu->tbl.table_map_base +
297 ((base - iommu->page_table) << IO_PAGE_SHIFT));
298 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
299 base_paddr = __pa(oaddr & IO_PAGE_MASK);
300 if (strbuf->strbuf_enabled)
301 iopte_protection = IOPTE_STREAMING(ctx);
302 else
303 iopte_protection = IOPTE_CONSISTENT(ctx);
304 if (direction != DMA_TO_DEVICE)
305 iopte_protection |= IOPTE_WRITE;
306
307 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
308 iopte_val(*base) = iopte_protection | base_paddr;
309
310 return ret;
311
312 bad:
313 iommu_free_ctx(iommu, ctx);
314 bad_no_ctx:
315 if (printk_ratelimit())
316 WARN_ON(1);
317 return DMA_MAPPING_ERROR;
318 }
319
strbuf_flush(struct strbuf * strbuf,struct iommu * iommu,u32 vaddr,unsigned long ctx,unsigned long npages,enum dma_data_direction direction)320 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
321 u32 vaddr, unsigned long ctx, unsigned long npages,
322 enum dma_data_direction direction)
323 {
324 int limit;
325
326 if (strbuf->strbuf_ctxflush &&
327 iommu->iommu_ctxflush) {
328 unsigned long matchreg, flushreg;
329 u64 val;
330
331 flushreg = strbuf->strbuf_ctxflush;
332 matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
333
334 iommu_write(flushreg, ctx);
335 val = iommu_read(matchreg);
336 val &= 0xffff;
337 if (!val)
338 goto do_flush_sync;
339
340 while (val) {
341 if (val & 0x1)
342 iommu_write(flushreg, ctx);
343 val >>= 1;
344 }
345 val = iommu_read(matchreg);
346 if (unlikely(val)) {
347 printk(KERN_WARNING "strbuf_flush: ctx flush "
348 "timeout matchreg[%llx] ctx[%lx]\n",
349 val, ctx);
350 goto do_page_flush;
351 }
352 } else {
353 unsigned long i;
354
355 do_page_flush:
356 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
357 iommu_write(strbuf->strbuf_pflush, vaddr);
358 }
359
360 do_flush_sync:
361 /* If the device could not have possibly put dirty data into
362 * the streaming cache, no flush-flag synchronization needs
363 * to be performed.
364 */
365 if (direction == DMA_TO_DEVICE)
366 return;
367
368 STC_FLUSHFLAG_INIT(strbuf);
369 iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
370 (void) iommu_read(iommu->write_complete_reg);
371
372 limit = 100000;
373 while (!STC_FLUSHFLAG_SET(strbuf)) {
374 limit--;
375 if (!limit)
376 break;
377 udelay(1);
378 rmb();
379 }
380 if (!limit)
381 printk(KERN_WARNING "strbuf_flush: flushflag timeout "
382 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
383 vaddr, ctx, npages);
384 }
385
dma_4u_unmap_page(struct device * dev,dma_addr_t bus_addr,size_t sz,enum dma_data_direction direction,unsigned long attrs)386 static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
387 size_t sz, enum dma_data_direction direction,
388 unsigned long attrs)
389 {
390 struct iommu *iommu;
391 struct strbuf *strbuf;
392 iopte_t *base;
393 unsigned long flags, npages, ctx, i;
394
395 if (unlikely(direction == DMA_NONE)) {
396 if (printk_ratelimit())
397 WARN_ON(1);
398 return;
399 }
400
401 iommu = dev->archdata.iommu;
402 strbuf = dev->archdata.stc;
403
404 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
405 npages >>= IO_PAGE_SHIFT;
406 base = iommu->page_table +
407 ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
408 bus_addr &= IO_PAGE_MASK;
409
410 spin_lock_irqsave(&iommu->lock, flags);
411
412 /* Record the context, if any. */
413 ctx = 0;
414 if (iommu->iommu_ctxflush)
415 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
416
417 /* Step 1: Kick data out of streaming buffers if necessary. */
418 if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
419 strbuf_flush(strbuf, iommu, bus_addr, ctx,
420 npages, direction);
421
422 /* Step 2: Clear out TSB entries. */
423 for (i = 0; i < npages; i++)
424 iopte_make_dummy(iommu, base + i);
425
426 iommu_free_ctx(iommu, ctx);
427 spin_unlock_irqrestore(&iommu->lock, flags);
428
429 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
430 }
431
dma_4u_map_sg(struct device * dev,struct scatterlist * sglist,int nelems,enum dma_data_direction direction,unsigned long attrs)432 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
433 int nelems, enum dma_data_direction direction,
434 unsigned long attrs)
435 {
436 struct scatterlist *s, *outs, *segstart;
437 unsigned long flags, handle, prot, ctx;
438 dma_addr_t dma_next = 0, dma_addr;
439 unsigned int max_seg_size;
440 unsigned long seg_boundary_size;
441 int outcount, incount, i;
442 struct strbuf *strbuf;
443 struct iommu *iommu;
444 unsigned long base_shift;
445
446 BUG_ON(direction == DMA_NONE);
447
448 iommu = dev->archdata.iommu;
449 strbuf = dev->archdata.stc;
450 if (nelems == 0 || !iommu)
451 return 0;
452
453 spin_lock_irqsave(&iommu->lock, flags);
454
455 ctx = 0;
456 if (iommu->iommu_ctxflush)
457 ctx = iommu_alloc_ctx(iommu);
458
459 if (strbuf->strbuf_enabled)
460 prot = IOPTE_STREAMING(ctx);
461 else
462 prot = IOPTE_CONSISTENT(ctx);
463 if (direction != DMA_TO_DEVICE)
464 prot |= IOPTE_WRITE;
465
466 outs = s = segstart = &sglist[0];
467 outcount = 1;
468 incount = nelems;
469 handle = 0;
470
471 /* Init first segment length for backout at failure */
472 outs->dma_length = 0;
473
474 max_seg_size = dma_get_max_seg_size(dev);
475 seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
476 base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
477 for_each_sg(sglist, s, nelems, i) {
478 unsigned long paddr, npages, entry, out_entry = 0, slen;
479 iopte_t *base;
480
481 slen = s->length;
482 /* Sanity check */
483 if (slen == 0) {
484 dma_next = 0;
485 continue;
486 }
487 /* Allocate iommu entries for that segment */
488 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
489 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
490 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
491 &handle, (unsigned long)(-1), 0);
492
493 /* Handle failure */
494 if (unlikely(entry == IOMMU_ERROR_CODE)) {
495 if (printk_ratelimit())
496 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
497 " npages %lx\n", iommu, paddr, npages);
498 goto iommu_map_failed;
499 }
500
501 base = iommu->page_table + entry;
502
503 /* Convert entry to a dma_addr_t */
504 dma_addr = iommu->tbl.table_map_base +
505 (entry << IO_PAGE_SHIFT);
506 dma_addr |= (s->offset & ~IO_PAGE_MASK);
507
508 /* Insert into HW table */
509 paddr &= IO_PAGE_MASK;
510 while (npages--) {
511 iopte_val(*base) = prot | paddr;
512 base++;
513 paddr += IO_PAGE_SIZE;
514 }
515
516 /* If we are in an open segment, try merging */
517 if (segstart != s) {
518 /* We cannot merge if:
519 * - allocated dma_addr isn't contiguous to previous allocation
520 */
521 if ((dma_addr != dma_next) ||
522 (outs->dma_length + s->length > max_seg_size) ||
523 (is_span_boundary(out_entry, base_shift,
524 seg_boundary_size, outs, s))) {
525 /* Can't merge: create a new segment */
526 segstart = s;
527 outcount++;
528 outs = sg_next(outs);
529 } else {
530 outs->dma_length += s->length;
531 }
532 }
533
534 if (segstart == s) {
535 /* This is a new segment, fill entries */
536 outs->dma_address = dma_addr;
537 outs->dma_length = slen;
538 out_entry = entry;
539 }
540
541 /* Calculate next page pointer for contiguous check */
542 dma_next = dma_addr + slen;
543 }
544
545 spin_unlock_irqrestore(&iommu->lock, flags);
546
547 if (outcount < incount) {
548 outs = sg_next(outs);
549 outs->dma_address = DMA_MAPPING_ERROR;
550 outs->dma_length = 0;
551 }
552
553 return outcount;
554
555 iommu_map_failed:
556 for_each_sg(sglist, s, nelems, i) {
557 if (s->dma_length != 0) {
558 unsigned long vaddr, npages, entry, j;
559 iopte_t *base;
560
561 vaddr = s->dma_address & IO_PAGE_MASK;
562 npages = iommu_num_pages(s->dma_address, s->dma_length,
563 IO_PAGE_SIZE);
564
565 entry = (vaddr - iommu->tbl.table_map_base)
566 >> IO_PAGE_SHIFT;
567 base = iommu->page_table + entry;
568
569 for (j = 0; j < npages; j++)
570 iopte_make_dummy(iommu, base + j);
571
572 iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
573 IOMMU_ERROR_CODE);
574
575 s->dma_address = DMA_MAPPING_ERROR;
576 s->dma_length = 0;
577 }
578 if (s == outs)
579 break;
580 }
581 spin_unlock_irqrestore(&iommu->lock, flags);
582
583 return 0;
584 }
585
586 /* If contexts are being used, they are the same in all of the mappings
587 * we make for a particular SG.
588 */
fetch_sg_ctx(struct iommu * iommu,struct scatterlist * sg)589 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
590 {
591 unsigned long ctx = 0;
592
593 if (iommu->iommu_ctxflush) {
594 iopte_t *base;
595 u32 bus_addr;
596 struct iommu_map_table *tbl = &iommu->tbl;
597
598 bus_addr = sg->dma_address & IO_PAGE_MASK;
599 base = iommu->page_table +
600 ((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
601
602 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
603 }
604 return ctx;
605 }
606
dma_4u_unmap_sg(struct device * dev,struct scatterlist * sglist,int nelems,enum dma_data_direction direction,unsigned long attrs)607 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
608 int nelems, enum dma_data_direction direction,
609 unsigned long attrs)
610 {
611 unsigned long flags, ctx;
612 struct scatterlist *sg;
613 struct strbuf *strbuf;
614 struct iommu *iommu;
615
616 BUG_ON(direction == DMA_NONE);
617
618 iommu = dev->archdata.iommu;
619 strbuf = dev->archdata.stc;
620
621 ctx = fetch_sg_ctx(iommu, sglist);
622
623 spin_lock_irqsave(&iommu->lock, flags);
624
625 sg = sglist;
626 while (nelems--) {
627 dma_addr_t dma_handle = sg->dma_address;
628 unsigned int len = sg->dma_length;
629 unsigned long npages, entry;
630 iopte_t *base;
631 int i;
632
633 if (!len)
634 break;
635 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
636
637 entry = ((dma_handle - iommu->tbl.table_map_base)
638 >> IO_PAGE_SHIFT);
639 base = iommu->page_table + entry;
640
641 dma_handle &= IO_PAGE_MASK;
642 if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
643 strbuf_flush(strbuf, iommu, dma_handle, ctx,
644 npages, direction);
645
646 for (i = 0; i < npages; i++)
647 iopte_make_dummy(iommu, base + i);
648
649 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
650 IOMMU_ERROR_CODE);
651 sg = sg_next(sg);
652 }
653
654 iommu_free_ctx(iommu, ctx);
655
656 spin_unlock_irqrestore(&iommu->lock, flags);
657 }
658
dma_4u_sync_single_for_cpu(struct device * dev,dma_addr_t bus_addr,size_t sz,enum dma_data_direction direction)659 static void dma_4u_sync_single_for_cpu(struct device *dev,
660 dma_addr_t bus_addr, size_t sz,
661 enum dma_data_direction direction)
662 {
663 struct iommu *iommu;
664 struct strbuf *strbuf;
665 unsigned long flags, ctx, npages;
666
667 iommu = dev->archdata.iommu;
668 strbuf = dev->archdata.stc;
669
670 if (!strbuf->strbuf_enabled)
671 return;
672
673 spin_lock_irqsave(&iommu->lock, flags);
674
675 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
676 npages >>= IO_PAGE_SHIFT;
677 bus_addr &= IO_PAGE_MASK;
678
679 /* Step 1: Record the context, if any. */
680 ctx = 0;
681 if (iommu->iommu_ctxflush &&
682 strbuf->strbuf_ctxflush) {
683 iopte_t *iopte;
684 struct iommu_map_table *tbl = &iommu->tbl;
685
686 iopte = iommu->page_table +
687 ((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
688 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
689 }
690
691 /* Step 2: Kick data out of streaming buffers. */
692 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
693
694 spin_unlock_irqrestore(&iommu->lock, flags);
695 }
696
dma_4u_sync_sg_for_cpu(struct device * dev,struct scatterlist * sglist,int nelems,enum dma_data_direction direction)697 static void dma_4u_sync_sg_for_cpu(struct device *dev,
698 struct scatterlist *sglist, int nelems,
699 enum dma_data_direction direction)
700 {
701 struct iommu *iommu;
702 struct strbuf *strbuf;
703 unsigned long flags, ctx, npages, i;
704 struct scatterlist *sg, *sgprv;
705 u32 bus_addr;
706
707 iommu = dev->archdata.iommu;
708 strbuf = dev->archdata.stc;
709
710 if (!strbuf->strbuf_enabled)
711 return;
712
713 spin_lock_irqsave(&iommu->lock, flags);
714
715 /* Step 1: Record the context, if any. */
716 ctx = 0;
717 if (iommu->iommu_ctxflush &&
718 strbuf->strbuf_ctxflush) {
719 iopte_t *iopte;
720 struct iommu_map_table *tbl = &iommu->tbl;
721
722 iopte = iommu->page_table + ((sglist[0].dma_address -
723 tbl->table_map_base) >> IO_PAGE_SHIFT);
724 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
725 }
726
727 /* Step 2: Kick data out of streaming buffers. */
728 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
729 sgprv = NULL;
730 for_each_sg(sglist, sg, nelems, i) {
731 if (sg->dma_length == 0)
732 break;
733 sgprv = sg;
734 }
735
736 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
737 - bus_addr) >> IO_PAGE_SHIFT;
738 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
739
740 spin_unlock_irqrestore(&iommu->lock, flags);
741 }
742
dma_4u_supported(struct device * dev,u64 device_mask)743 static int dma_4u_supported(struct device *dev, u64 device_mask)
744 {
745 struct iommu *iommu = dev->archdata.iommu;
746
747 if (ali_sound_dma_hack(dev, device_mask))
748 return 1;
749
750 if (device_mask < iommu->dma_addr_mask)
751 return 0;
752 return 1;
753 }
754
755 static const struct dma_map_ops sun4u_dma_ops = {
756 .alloc = dma_4u_alloc_coherent,
757 .free = dma_4u_free_coherent,
758 .map_page = dma_4u_map_page,
759 .unmap_page = dma_4u_unmap_page,
760 .map_sg = dma_4u_map_sg,
761 .unmap_sg = dma_4u_unmap_sg,
762 .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
763 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
764 .dma_supported = dma_4u_supported,
765 };
766
767 const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
768 EXPORT_SYMBOL(dma_ops);
769