1 /*
2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3 *
4 * Rewrite, cleanup:
5 *
6 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
7 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
8 *
9 * Dynamic DMA mapping support, iSeries-specific parts.
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27 #include <linux/types.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/list.h>
30 #include <linux/pci.h>
31 #include <linux/module.h>
32
33 #include <asm/iommu.h>
34 #include <asm/vio.h>
35 #include <asm/tce.h>
36 #include <asm/machdep.h>
37 #include <asm/abs_addr.h>
38 #include <asm/prom.h>
39 #include <asm/pci-bridge.h>
40 #include <asm/iseries/hv_call_xm.h>
41 #include <asm/iseries/hv_call_event.h>
42 #include <asm/iseries/iommu.h>
43
tce_build_iSeries(struct iommu_table * tbl,long index,long npages,unsigned long uaddr,enum dma_data_direction direction,struct dma_attrs * attrs)44 static int tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
45 unsigned long uaddr, enum dma_data_direction direction,
46 struct dma_attrs *attrs)
47 {
48 u64 rc;
49 u64 tce, rpn;
50
51 while (npages--) {
52 rpn = virt_to_abs(uaddr) >> TCE_SHIFT;
53 tce = (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
54
55 if (tbl->it_type == TCE_VB) {
56 /* Virtual Bus */
57 tce |= TCE_VALID|TCE_ALLIO;
58 if (direction != DMA_TO_DEVICE)
59 tce |= TCE_VB_WRITE;
60 } else {
61 /* PCI Bus */
62 tce |= TCE_PCI_READ; /* Read allowed */
63 if (direction != DMA_TO_DEVICE)
64 tce |= TCE_PCI_WRITE;
65 }
66
67 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, tce);
68 if (rc)
69 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
70 rc);
71 index++;
72 uaddr += TCE_PAGE_SIZE;
73 }
74 return 0;
75 }
76
tce_free_iSeries(struct iommu_table * tbl,long index,long npages)77 static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
78 {
79 u64 rc;
80
81 while (npages--) {
82 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
83 if (rc)
84 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
85 rc);
86 index++;
87 }
88 }
89
90 /*
91 * Structure passed to HvCallXm_getTceTableParms
92 */
93 struct iommu_table_cb {
94 unsigned long itc_busno; /* Bus number for this tce table */
95 unsigned long itc_start; /* Will be NULL for secondary */
96 unsigned long itc_totalsize; /* Size (in pages) of whole table */
97 unsigned long itc_offset; /* Index into real tce table of the
98 start of our section */
99 unsigned long itc_size; /* Size (in pages) of our section */
100 unsigned long itc_index; /* Index of this tce table */
101 unsigned short itc_maxtables; /* Max num of tables for partition */
102 unsigned char itc_virtbus; /* Flag to indicate virtual bus */
103 unsigned char itc_slotno; /* IOA Tce Slot Index */
104 unsigned char itc_rsvd[4];
105 };
106
107 /*
108 * Call Hv with the architected data structure to get TCE table info.
109 * info. Put the returned data into the Linux representation of the
110 * TCE table data.
111 * The Hardware Tce table comes in three flavors.
112 * 1. TCE table shared between Buses.
113 * 2. TCE table per Bus.
114 * 3. TCE Table per IOA.
115 */
iommu_table_getparms_iSeries(unsigned long busno,unsigned char slotno,unsigned char virtbus,struct iommu_table * tbl)116 void iommu_table_getparms_iSeries(unsigned long busno,
117 unsigned char slotno,
118 unsigned char virtbus,
119 struct iommu_table* tbl)
120 {
121 struct iommu_table_cb *parms;
122
123 parms = kzalloc(sizeof(*parms), GFP_KERNEL);
124 if (parms == NULL)
125 panic("PCI_DMA: TCE Table Allocation failed.");
126
127 parms->itc_busno = busno;
128 parms->itc_slotno = slotno;
129 parms->itc_virtbus = virtbus;
130
131 HvCallXm_getTceTableParms(iseries_hv_addr(parms));
132
133 if (parms->itc_size == 0)
134 panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
135
136 /* itc_size is in pages worth of table, it_size is in # of entries */
137 tbl->it_size = (parms->itc_size * TCE_PAGE_SIZE) / TCE_ENTRY_SIZE;
138 tbl->it_busno = parms->itc_busno;
139 tbl->it_offset = parms->itc_offset;
140 tbl->it_index = parms->itc_index;
141 tbl->it_blocksize = 1;
142 tbl->it_type = virtbus ? TCE_VB : TCE_PCI;
143
144 kfree(parms);
145 }
146
147
148 #ifdef CONFIG_PCI
149 /*
150 * This function compares the known tables to find an iommu_table
151 * that has already been built for hardware TCEs.
152 */
iommu_table_find(struct iommu_table * tbl)153 static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
154 {
155 struct device_node *node;
156
157 for (node = NULL; (node = of_find_all_nodes(node)); ) {
158 struct pci_dn *pdn = PCI_DN(node);
159 struct iommu_table *it;
160
161 if (pdn == NULL)
162 continue;
163 it = pdn->iommu_table;
164 if ((it != NULL) &&
165 (it->it_type == TCE_PCI) &&
166 (it->it_offset == tbl->it_offset) &&
167 (it->it_index == tbl->it_index) &&
168 (it->it_size == tbl->it_size)) {
169 of_node_put(node);
170 return it;
171 }
172 }
173 return NULL;
174 }
175
176
iommu_devnode_init_iSeries(struct pci_dev * pdev,struct device_node * dn)177 void iommu_devnode_init_iSeries(struct pci_dev *pdev, struct device_node *dn)
178 {
179 struct iommu_table *tbl;
180 struct pci_dn *pdn = PCI_DN(dn);
181 const u32 *lsn = of_get_property(dn, "linux,logical-slot-number", NULL);
182
183 BUG_ON(lsn == NULL);
184
185 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
186
187 iommu_table_getparms_iSeries(pdn->busno, *lsn, 0, tbl);
188
189 /* Look for existing tce table */
190 pdn->iommu_table = iommu_table_find(tbl);
191 if (pdn->iommu_table == NULL)
192 pdn->iommu_table = iommu_init_table(tbl, -1);
193 else
194 kfree(tbl);
195 pdev->dev.archdata.dma_data = pdn->iommu_table;
196 }
197 #endif
198
199 static struct iommu_table veth_iommu_table;
200 static struct iommu_table vio_iommu_table;
201
iseries_hv_alloc(size_t size,dma_addr_t * dma_handle,gfp_t flag)202 void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag)
203 {
204 return iommu_alloc_coherent(NULL, &vio_iommu_table, size, dma_handle,
205 DMA_32BIT_MASK, flag, -1);
206 }
207 EXPORT_SYMBOL_GPL(iseries_hv_alloc);
208
iseries_hv_free(size_t size,void * vaddr,dma_addr_t dma_handle)209 void iseries_hv_free(size_t size, void *vaddr, dma_addr_t dma_handle)
210 {
211 iommu_free_coherent(&vio_iommu_table, size, vaddr, dma_handle);
212 }
213 EXPORT_SYMBOL_GPL(iseries_hv_free);
214
iseries_hv_map(void * vaddr,size_t size,enum dma_data_direction direction)215 dma_addr_t iseries_hv_map(void *vaddr, size_t size,
216 enum dma_data_direction direction)
217 {
218 return iommu_map_page(NULL, &vio_iommu_table, virt_to_page(vaddr),
219 (unsigned long)vaddr % PAGE_SIZE, size,
220 DMA_32BIT_MASK, direction, NULL);
221 }
222
iseries_hv_unmap(dma_addr_t dma_handle,size_t size,enum dma_data_direction direction)223 void iseries_hv_unmap(dma_addr_t dma_handle, size_t size,
224 enum dma_data_direction direction)
225 {
226 iommu_unmap_page(&vio_iommu_table, dma_handle, size, direction, NULL);
227 }
228
iommu_vio_init(void)229 void __init iommu_vio_init(void)
230 {
231 iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table);
232 veth_iommu_table.it_size /= 2;
233 vio_iommu_table = veth_iommu_table;
234 vio_iommu_table.it_offset += veth_iommu_table.it_size;
235
236 if (!iommu_init_table(&veth_iommu_table, -1))
237 printk("Virtual Bus VETH TCE table failed.\n");
238 if (!iommu_init_table(&vio_iommu_table, -1))
239 printk("Virtual Bus VIO TCE table failed.\n");
240 }
241
vio_build_iommu_table_iseries(struct vio_dev * dev)242 struct iommu_table *vio_build_iommu_table_iseries(struct vio_dev *dev)
243 {
244 if (strcmp(dev->type, "network") == 0)
245 return &veth_iommu_table;
246 return &vio_iommu_table;
247 }
248
iommu_init_early_iSeries(void)249 void iommu_init_early_iSeries(void)
250 {
251 ppc_md.tce_build = tce_build_iSeries;
252 ppc_md.tce_free = tce_free_iSeries;
253
254 set_pci_dma_ops(&dma_iommu_ops);
255 }
256