1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
7 *
8 * Routines for PCI DMA mapping. See Documentation/DMA-API.txt for
9 * a description of how these routines should be used.
10 */
11
12 #include <linux/module.h>
13 #include <linux/dma-attrs.h>
14 #include <asm/dma.h>
15 #include <asm/sn/intr.h>
16 #include <asm/sn/pcibus_provider_defs.h>
17 #include <asm/sn/pcidev.h>
18 #include <asm/sn/sn_sal.h>
19
20 #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
21 #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
22
23 /**
24 * sn_dma_supported - test a DMA mask
25 * @dev: device to test
26 * @mask: DMA mask to test
27 *
28 * Return whether the given PCI device DMA address mask can be supported
29 * properly. For example, if your device can only drive the low 24-bits
30 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
31 * this function. Of course, SN only supports devices that have 32 or more
32 * address bits when using the PMU.
33 */
sn_dma_supported(struct device * dev,u64 mask)34 int sn_dma_supported(struct device *dev, u64 mask)
35 {
36 BUG_ON(dev->bus != &pci_bus_type);
37
38 if (mask < 0x7fffffff)
39 return 0;
40 return 1;
41 }
42 EXPORT_SYMBOL(sn_dma_supported);
43
44 /**
45 * sn_dma_set_mask - set the DMA mask
46 * @dev: device to set
47 * @dma_mask: new mask
48 *
49 * Set @dev's DMA mask if the hw supports it.
50 */
sn_dma_set_mask(struct device * dev,u64 dma_mask)51 int sn_dma_set_mask(struct device *dev, u64 dma_mask)
52 {
53 BUG_ON(dev->bus != &pci_bus_type);
54
55 if (!sn_dma_supported(dev, dma_mask))
56 return 0;
57
58 *dev->dma_mask = dma_mask;
59 return 1;
60 }
61 EXPORT_SYMBOL(sn_dma_set_mask);
62
63 /**
64 * sn_dma_alloc_coherent - allocate memory for coherent DMA
65 * @dev: device to allocate for
66 * @size: size of the region
67 * @dma_handle: DMA (bus) address
68 * @flags: memory allocation flags
69 *
70 * dma_alloc_coherent() returns a pointer to a memory region suitable for
71 * coherent DMA traffic to/from a PCI device. On SN platforms, this means
72 * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
73 *
74 * This interface is usually used for "command" streams (e.g. the command
75 * queue for a SCSI controller). See Documentation/DMA-API.txt for
76 * more information.
77 */
sn_dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flags)78 void *sn_dma_alloc_coherent(struct device *dev, size_t size,
79 dma_addr_t * dma_handle, gfp_t flags)
80 {
81 void *cpuaddr;
82 unsigned long phys_addr;
83 int node;
84 struct pci_dev *pdev = to_pci_dev(dev);
85 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
86
87 BUG_ON(dev->bus != &pci_bus_type);
88
89 /*
90 * Allocate the memory.
91 */
92 node = pcibus_to_node(pdev->bus);
93 if (likely(node >=0)) {
94 struct page *p = alloc_pages_node(node, flags, get_order(size));
95
96 if (likely(p))
97 cpuaddr = page_address(p);
98 else
99 return NULL;
100 } else
101 cpuaddr = (void *)__get_free_pages(flags, get_order(size));
102
103 if (unlikely(!cpuaddr))
104 return NULL;
105
106 memset(cpuaddr, 0x0, size);
107
108 /* physical addr. of the memory we just got */
109 phys_addr = __pa(cpuaddr);
110
111 /*
112 * 64 bit address translations should never fail.
113 * 32 bit translations can fail if there are insufficient mapping
114 * resources.
115 */
116
117 *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
118 SN_DMA_ADDR_PHYS);
119 if (!*dma_handle) {
120 printk(KERN_ERR "%s: out of ATEs\n", __func__);
121 free_pages((unsigned long)cpuaddr, get_order(size));
122 return NULL;
123 }
124
125 return cpuaddr;
126 }
127 EXPORT_SYMBOL(sn_dma_alloc_coherent);
128
129 /**
130 * sn_pci_free_coherent - free memory associated with coherent DMAable region
131 * @dev: device to free for
132 * @size: size to free
133 * @cpu_addr: kernel virtual address to free
134 * @dma_handle: DMA address associated with this region
135 *
136 * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
137 * any associated IOMMU mappings.
138 */
sn_dma_free_coherent(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle)139 void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
140 dma_addr_t dma_handle)
141 {
142 struct pci_dev *pdev = to_pci_dev(dev);
143 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
144
145 BUG_ON(dev->bus != &pci_bus_type);
146
147 provider->dma_unmap(pdev, dma_handle, 0);
148 free_pages((unsigned long)cpu_addr, get_order(size));
149 }
150 EXPORT_SYMBOL(sn_dma_free_coherent);
151
152 /**
153 * sn_dma_map_single_attrs - map a single page for DMA
154 * @dev: device to map for
155 * @cpu_addr: kernel virtual address of the region to map
156 * @size: size of the region
157 * @direction: DMA direction
158 * @attrs: optional dma attributes
159 *
160 * Map the region pointed to by @cpu_addr for DMA and return the
161 * DMA address.
162 *
163 * We map this to the one step pcibr_dmamap_trans interface rather than
164 * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
165 * no way of saving the dmamap handle from the alloc to later free
166 * (which is pretty much unacceptable).
167 *
168 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
169 * dma_map_consistent() so that writes force a flush of pending DMA.
170 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
171 * Document Number: 007-4763-001)
172 *
173 * TODO: simplify our interface;
174 * figure out how to save dmamap handle so can use two step.
175 */
sn_dma_map_single_attrs(struct device * dev,void * cpu_addr,size_t size,int direction,struct dma_attrs * attrs)176 dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
177 size_t size, int direction,
178 struct dma_attrs *attrs)
179 {
180 dma_addr_t dma_addr;
181 unsigned long phys_addr;
182 struct pci_dev *pdev = to_pci_dev(dev);
183 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
184 int dmabarr;
185
186 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
187
188 BUG_ON(dev->bus != &pci_bus_type);
189
190 phys_addr = __pa(cpu_addr);
191 if (dmabarr)
192 dma_addr = provider->dma_map_consistent(pdev, phys_addr,
193 size, SN_DMA_ADDR_PHYS);
194 else
195 dma_addr = provider->dma_map(pdev, phys_addr, size,
196 SN_DMA_ADDR_PHYS);
197
198 if (!dma_addr) {
199 printk(KERN_ERR "%s: out of ATEs\n", __func__);
200 return 0;
201 }
202 return dma_addr;
203 }
204 EXPORT_SYMBOL(sn_dma_map_single_attrs);
205
206 /**
207 * sn_dma_unmap_single_attrs - unamp a DMA mapped page
208 * @dev: device to sync
209 * @dma_addr: DMA address to sync
210 * @size: size of region
211 * @direction: DMA direction
212 * @attrs: optional dma attributes
213 *
214 * This routine is supposed to sync the DMA region specified
215 * by @dma_handle into the coherence domain. On SN, we're always cache
216 * coherent, so we just need to free any ATEs associated with this mapping.
217 */
sn_dma_unmap_single_attrs(struct device * dev,dma_addr_t dma_addr,size_t size,int direction,struct dma_attrs * attrs)218 void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
219 size_t size, int direction,
220 struct dma_attrs *attrs)
221 {
222 struct pci_dev *pdev = to_pci_dev(dev);
223 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
224
225 BUG_ON(dev->bus != &pci_bus_type);
226
227 provider->dma_unmap(pdev, dma_addr, direction);
228 }
229 EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
230
231 /**
232 * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist
233 * @dev: device to unmap
234 * @sg: scatterlist to unmap
235 * @nhwentries: number of scatterlist entries
236 * @direction: DMA direction
237 * @attrs: optional dma attributes
238 *
239 * Unmap a set of streaming mode DMA translations.
240 */
sn_dma_unmap_sg_attrs(struct device * dev,struct scatterlist * sgl,int nhwentries,int direction,struct dma_attrs * attrs)241 void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
242 int nhwentries, int direction,
243 struct dma_attrs *attrs)
244 {
245 int i;
246 struct pci_dev *pdev = to_pci_dev(dev);
247 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
248 struct scatterlist *sg;
249
250 BUG_ON(dev->bus != &pci_bus_type);
251
252 for_each_sg(sgl, sg, nhwentries, i) {
253 provider->dma_unmap(pdev, sg->dma_address, direction);
254 sg->dma_address = (dma_addr_t) NULL;
255 sg->dma_length = 0;
256 }
257 }
258 EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
259
260 /**
261 * sn_dma_map_sg_attrs - map a scatterlist for DMA
262 * @dev: device to map for
263 * @sg: scatterlist to map
264 * @nhwentries: number of entries
265 * @direction: direction of the DMA transaction
266 * @attrs: optional dma attributes
267 *
268 * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
269 * dma_map_consistent() so that writes force a flush of pending DMA.
270 * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
271 * Document Number: 007-4763-001)
272 *
273 * Maps each entry of @sg for DMA.
274 */
sn_dma_map_sg_attrs(struct device * dev,struct scatterlist * sgl,int nhwentries,int direction,struct dma_attrs * attrs)275 int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
276 int nhwentries, int direction, struct dma_attrs *attrs)
277 {
278 unsigned long phys_addr;
279 struct scatterlist *saved_sg = sgl, *sg;
280 struct pci_dev *pdev = to_pci_dev(dev);
281 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
282 int i;
283 int dmabarr;
284
285 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
286
287 BUG_ON(dev->bus != &pci_bus_type);
288
289 /*
290 * Setup a DMA address for each entry in the scatterlist.
291 */
292 for_each_sg(sgl, sg, nhwentries, i) {
293 dma_addr_t dma_addr;
294 phys_addr = SG_ENT_PHYS_ADDRESS(sg);
295 if (dmabarr)
296 dma_addr = provider->dma_map_consistent(pdev,
297 phys_addr,
298 sg->length,
299 SN_DMA_ADDR_PHYS);
300 else
301 dma_addr = provider->dma_map(pdev, phys_addr,
302 sg->length,
303 SN_DMA_ADDR_PHYS);
304
305 sg->dma_address = dma_addr;
306 if (!sg->dma_address) {
307 printk(KERN_ERR "%s: out of ATEs\n", __func__);
308
309 /*
310 * Free any successfully allocated entries.
311 */
312 if (i > 0)
313 sn_dma_unmap_sg_attrs(dev, saved_sg, i,
314 direction, attrs);
315 return 0;
316 }
317
318 sg->dma_length = sg->length;
319 }
320
321 return nhwentries;
322 }
323 EXPORT_SYMBOL(sn_dma_map_sg_attrs);
324
sn_dma_sync_single_for_cpu(struct device * dev,dma_addr_t dma_handle,size_t size,int direction)325 void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
326 size_t size, int direction)
327 {
328 BUG_ON(dev->bus != &pci_bus_type);
329 }
330 EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
331
sn_dma_sync_single_for_device(struct device * dev,dma_addr_t dma_handle,size_t size,int direction)332 void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
333 size_t size, int direction)
334 {
335 BUG_ON(dev->bus != &pci_bus_type);
336 }
337 EXPORT_SYMBOL(sn_dma_sync_single_for_device);
338
sn_dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,int direction)339 void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
340 int nelems, int direction)
341 {
342 BUG_ON(dev->bus != &pci_bus_type);
343 }
344 EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
345
sn_dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,int direction)346 void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
347 int nelems, int direction)
348 {
349 BUG_ON(dev->bus != &pci_bus_type);
350 }
351 EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
352
sn_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)353 int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
354 {
355 return 0;
356 }
357 EXPORT_SYMBOL(sn_dma_mapping_error);
358
sn_dma_get_required_mask(struct device * dev)359 u64 sn_dma_get_required_mask(struct device *dev)
360 {
361 return DMA_64BIT_MASK;
362 }
363 EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
364
sn_pci_get_legacy_mem(struct pci_bus * bus)365 char *sn_pci_get_legacy_mem(struct pci_bus *bus)
366 {
367 if (!SN_PCIBUS_BUSSOFT(bus))
368 return ERR_PTR(-ENODEV);
369
370 return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET);
371 }
372
sn_pci_legacy_read(struct pci_bus * bus,u16 port,u32 * val,u8 size)373 int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
374 {
375 unsigned long addr;
376 int ret;
377 struct ia64_sal_retval isrv;
378
379 /*
380 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
381 * around hw issues at the pci bus level. SGI proms older than
382 * 4.10 don't implement this.
383 */
384
385 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
386 pci_domain_nr(bus), bus->number,
387 0, /* io */
388 0, /* read */
389 port, size, __pa(val));
390
391 if (isrv.status == 0)
392 return size;
393
394 /*
395 * If the above failed, retry using the SAL_PROBE call which should
396 * be present in all proms (but which cannot work round PCI chipset
397 * bugs). This code is retained for compatibility with old
398 * pre-4.10 proms, and should be removed at some point in the future.
399 */
400
401 if (!SN_PCIBUS_BUSSOFT(bus))
402 return -ENODEV;
403
404 addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
405 addr += port;
406
407 ret = ia64_sn_probe_mem(addr, (long)size, (void *)val);
408
409 if (ret == 2)
410 return -EINVAL;
411
412 if (ret == 1)
413 *val = -1;
414
415 return size;
416 }
417
sn_pci_legacy_write(struct pci_bus * bus,u16 port,u32 val,u8 size)418 int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
419 {
420 int ret = size;
421 unsigned long paddr;
422 unsigned long *addr;
423 struct ia64_sal_retval isrv;
424
425 /*
426 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
427 * around hw issues at the pci bus level. SGI proms older than
428 * 4.10 don't implement this.
429 */
430
431 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
432 pci_domain_nr(bus), bus->number,
433 0, /* io */
434 1, /* write */
435 port, size, __pa(&val));
436
437 if (isrv.status == 0)
438 return size;
439
440 /*
441 * If the above failed, retry using the SAL_PROBE call which should
442 * be present in all proms (but which cannot work round PCI chipset
443 * bugs). This code is retained for compatibility with old
444 * pre-4.10 proms, and should be removed at some point in the future.
445 */
446
447 if (!SN_PCIBUS_BUSSOFT(bus)) {
448 ret = -ENODEV;
449 goto out;
450 }
451
452 /* Put the phys addr in uncached space */
453 paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
454 paddr += port;
455 addr = (unsigned long *)paddr;
456
457 switch (size) {
458 case 1:
459 *(volatile u8 *)(addr) = (u8)(val);
460 break;
461 case 2:
462 *(volatile u16 *)(addr) = (u16)(val);
463 break;
464 case 4:
465 *(volatile u32 *)(addr) = (u32)(val);
466 break;
467 default:
468 ret = -EINVAL;
469 break;
470 }
471 out:
472 return ret;
473 }
474