• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Contains routines needed to support swiotlb for ppc.
3  *
4  * Copyright (C) 2009-2010 Freescale Semiconductor, Inc.
5  * Author: Becky Bruce
6  *
7  * This program is free software; you can redistribute  it and/or modify it
8  * under  the terms of  the GNU General  Public License as published by the
9  * Free Software Foundation;  either version 2 of the  License, or (at your
10  * option) any later version.
11  *
12  */
13 
14 #include <linux/dma-mapping.h>
15 #include <linux/memblock.h>
16 #include <linux/pfn.h>
17 #include <linux/of_platform.h>
18 #include <linux/platform_device.h>
19 #include <linux/pci.h>
20 
21 #include <asm/machdep.h>
22 #include <asm/swiotlb.h>
23 #include <asm/dma.h>
24 
25 unsigned int ppc_swiotlb_enable;
26 
swiotlb_powerpc_get_required(struct device * dev)27 static u64 swiotlb_powerpc_get_required(struct device *dev)
28 {
29 	u64 end, mask, max_direct_dma_addr = dev->archdata.max_direct_dma_addr;
30 
31 	end = memblock_end_of_DRAM();
32 	if (max_direct_dma_addr && end > max_direct_dma_addr)
33 		end = max_direct_dma_addr;
34 	end += get_dma_offset(dev);
35 
36 	mask = 1ULL << (fls64(end) - 1);
37 	mask += mask - 1;
38 
39 	return mask;
40 }
41 
42 /*
43  * At the moment, all platforms that use this code only require
44  * swiotlb to be used if we're operating on HIGHMEM.  Since
45  * we don't ever call anything other than map_sg, unmap_sg,
46  * map_page, and unmap_page on highmem, use normal dma_ops
47  * for everything else.
48  */
49 struct dma_map_ops swiotlb_dma_ops = {
50 	.alloc = __dma_direct_alloc_coherent,
51 	.free = __dma_direct_free_coherent,
52 	.mmap = dma_direct_mmap_coherent,
53 	.map_sg = swiotlb_map_sg_attrs,
54 	.unmap_sg = swiotlb_unmap_sg_attrs,
55 	.dma_supported = swiotlb_dma_supported,
56 	.map_page = swiotlb_map_page,
57 	.unmap_page = swiotlb_unmap_page,
58 	.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
59 	.sync_single_for_device = swiotlb_sync_single_for_device,
60 	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
61 	.sync_sg_for_device = swiotlb_sync_sg_for_device,
62 	.mapping_error = swiotlb_dma_mapping_error,
63 	.get_required_mask = swiotlb_powerpc_get_required,
64 };
65 
pci_dma_dev_setup_swiotlb(struct pci_dev * pdev)66 void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
67 {
68 	struct pci_controller *hose;
69 	struct dev_archdata *sd;
70 
71 	hose = pci_bus_to_host(pdev->bus);
72 	sd = &pdev->dev.archdata;
73 	sd->max_direct_dma_addr =
74 		hose->dma_window_base_cur + hose->dma_window_size;
75 }
76 
ppc_swiotlb_bus_notify(struct notifier_block * nb,unsigned long action,void * data)77 static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
78 				  unsigned long action, void *data)
79 {
80 	struct device *dev = data;
81 	struct dev_archdata *sd;
82 
83 	/* We are only intereted in device addition */
84 	if (action != BUS_NOTIFY_ADD_DEVICE)
85 		return 0;
86 
87 	sd = &dev->archdata;
88 	sd->max_direct_dma_addr = 0;
89 
90 	/* May need to bounce if the device can't address all of DRAM */
91 	if ((dma_get_mask(dev) + 1) < memblock_end_of_DRAM())
92 		set_dma_ops(dev, &swiotlb_dma_ops);
93 
94 	return NOTIFY_DONE;
95 }
96 
97 static struct notifier_block ppc_swiotlb_plat_bus_notifier = {
98 	.notifier_call = ppc_swiotlb_bus_notify,
99 	.priority = 0,
100 };
101 
swiotlb_setup_bus_notifier(void)102 int __init swiotlb_setup_bus_notifier(void)
103 {
104 	bus_register_notifier(&platform_bus_type,
105 			      &ppc_swiotlb_plat_bus_notifier);
106 	return 0;
107 }
108 
swiotlb_detect_4g(void)109 void __init swiotlb_detect_4g(void)
110 {
111 	if ((memblock_end_of_DRAM() - 1) > 0xffffffff) {
112 		ppc_swiotlb_enable = 1;
113 #ifdef CONFIG_ZONE_DMA32
114 		limit_zone_pfn(ZONE_DMA32, (1ULL << 32) >> PAGE_SHIFT);
115 #endif
116 	}
117 }
118 
check_swiotlb_enabled(void)119 static int __init check_swiotlb_enabled(void)
120 {
121 	if (ppc_swiotlb_enable)
122 		swiotlb_print_info();
123 	else
124 		swiotlb_free();
125 
126 	return 0;
127 }
128 subsys_initcall(check_swiotlb_enabled);
129