• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
7  *
8  * Routines for PCI DMA mapping.  See Documentation/DMA-API.txt for
9  * a description of how these routines should be used.
10  */
11 
12 #include <linux/gfp.h>
13 #include <linux/module.h>
14 #include <linux/dma-mapping.h>
15 #include <asm/dma.h>
16 #include <asm/sn/intr.h>
17 #include <asm/sn/pcibus_provider_defs.h>
18 #include <asm/sn/pcidev.h>
19 #include <asm/sn/sn_sal.h>
20 
21 #define SG_ENT_VIRT_ADDRESS(sg)	(sg_virt((sg)))
22 #define SG_ENT_PHYS_ADDRESS(SG)	virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
23 
24 /**
25  * sn_dma_supported - test a DMA mask
26  * @dev: device to test
27  * @mask: DMA mask to test
28  *
29  * Return whether the given PCI device DMA address mask can be supported
30  * properly.  For example, if your device can only drive the low 24-bits
31  * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
32  * this function.  Of course, SN only supports devices that have 32 or more
33  * address bits when using the PMU.
34  */
sn_dma_supported(struct device * dev,u64 mask)35 static int sn_dma_supported(struct device *dev, u64 mask)
36 {
37 	BUG_ON(!dev_is_pci(dev));
38 
39 	if (mask < 0x7fffffff)
40 		return 0;
41 	return 1;
42 }
43 
44 /**
45  * sn_dma_set_mask - set the DMA mask
46  * @dev: device to set
47  * @dma_mask: new mask
48  *
49  * Set @dev's DMA mask if the hw supports it.
50  */
sn_dma_set_mask(struct device * dev,u64 dma_mask)51 int sn_dma_set_mask(struct device *dev, u64 dma_mask)
52 {
53 	BUG_ON(!dev_is_pci(dev));
54 
55 	if (!sn_dma_supported(dev, dma_mask))
56 		return 0;
57 
58 	*dev->dma_mask = dma_mask;
59 	return 1;
60 }
61 EXPORT_SYMBOL(sn_dma_set_mask);
62 
63 /**
64  * sn_dma_alloc_coherent - allocate memory for coherent DMA
65  * @dev: device to allocate for
66  * @size: size of the region
67  * @dma_handle: DMA (bus) address
68  * @flags: memory allocation flags
69  *
70  * dma_alloc_coherent() returns a pointer to a memory region suitable for
71  * coherent DMA traffic to/from a PCI device.  On SN platforms, this means
72  * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
73  *
74  * This interface is usually used for "command" streams (e.g. the command
75  * queue for a SCSI controller).  See Documentation/DMA-API.txt for
76  * more information.
77  */
sn_dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flags,unsigned long attrs)78 static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
79 				   dma_addr_t * dma_handle, gfp_t flags,
80 				   unsigned long attrs)
81 {
82 	void *cpuaddr;
83 	unsigned long phys_addr;
84 	int node;
85 	struct pci_dev *pdev = to_pci_dev(dev);
86 	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
87 
88 	BUG_ON(!dev_is_pci(dev));
89 
90 	/*
91 	 * Allocate the memory.
92 	 */
93 	node = pcibus_to_node(pdev->bus);
94 	if (likely(node >=0)) {
95 		struct page *p = __alloc_pages_node(node,
96 						flags, get_order(size));
97 
98 		if (likely(p))
99 			cpuaddr = page_address(p);
100 		else
101 			return NULL;
102 	} else
103 		cpuaddr = (void *)__get_free_pages(flags, get_order(size));
104 
105 	if (unlikely(!cpuaddr))
106 		return NULL;
107 
108 	memset(cpuaddr, 0x0, size);
109 
110 	/* physical addr. of the memory we just got */
111 	phys_addr = __pa(cpuaddr);
112 
113 	/*
114 	 * 64 bit address translations should never fail.
115 	 * 32 bit translations can fail if there are insufficient mapping
116 	 * resources.
117 	 */
118 
119 	*dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
120 						   SN_DMA_ADDR_PHYS);
121 	if (!*dma_handle) {
122 		printk(KERN_ERR "%s: out of ATEs\n", __func__);
123 		free_pages((unsigned long)cpuaddr, get_order(size));
124 		return NULL;
125 	}
126 
127 	return cpuaddr;
128 }
129 
130 /**
131  * sn_pci_free_coherent - free memory associated with coherent DMAable region
132  * @dev: device to free for
133  * @size: size to free
134  * @cpu_addr: kernel virtual address to free
135  * @dma_handle: DMA address associated with this region
136  *
137  * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
138  * any associated IOMMU mappings.
139  */
sn_dma_free_coherent(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle,unsigned long attrs)140 static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
141 				 dma_addr_t dma_handle, unsigned long attrs)
142 {
143 	struct pci_dev *pdev = to_pci_dev(dev);
144 	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
145 
146 	BUG_ON(!dev_is_pci(dev));
147 
148 	provider->dma_unmap(pdev, dma_handle, 0);
149 	free_pages((unsigned long)cpu_addr, get_order(size));
150 }
151 
152 /**
153  * sn_dma_map_single_attrs - map a single page for DMA
154  * @dev: device to map for
155  * @cpu_addr: kernel virtual address of the region to map
156  * @size: size of the region
157  * @direction: DMA direction
158  * @attrs: optional dma attributes
159  *
160  * Map the region pointed to by @cpu_addr for DMA and return the
161  * DMA address.
162  *
163  * We map this to the one step pcibr_dmamap_trans interface rather than
164  * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
165  * no way of saving the dmamap handle from the alloc to later free
166  * (which is pretty much unacceptable).
167  *
168  * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
169  * dma_map_consistent() so that writes force a flush of pending DMA.
170  * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
171  * Document Number: 007-4763-001)
172  *
173  * TODO: simplify our interface;
174  *       figure out how to save dmamap handle so can use two step.
175  */
sn_dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)176 static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
177 				  unsigned long offset, size_t size,
178 				  enum dma_data_direction dir,
179 				  unsigned long attrs)
180 {
181 	void *cpu_addr = page_address(page) + offset;
182 	dma_addr_t dma_addr;
183 	unsigned long phys_addr;
184 	struct pci_dev *pdev = to_pci_dev(dev);
185 	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
186 
187 	BUG_ON(!dev_is_pci(dev));
188 
189 	phys_addr = __pa(cpu_addr);
190 	if (attrs & DMA_ATTR_WRITE_BARRIER)
191 		dma_addr = provider->dma_map_consistent(pdev, phys_addr,
192 							size, SN_DMA_ADDR_PHYS);
193 	else
194 		dma_addr = provider->dma_map(pdev, phys_addr, size,
195 					     SN_DMA_ADDR_PHYS);
196 
197 	if (!dma_addr) {
198 		printk(KERN_ERR "%s: out of ATEs\n", __func__);
199 		return 0;
200 	}
201 	return dma_addr;
202 }
203 
204 /**
205  * sn_dma_unmap_single_attrs - unamp a DMA mapped page
206  * @dev: device to sync
207  * @dma_addr: DMA address to sync
208  * @size: size of region
209  * @direction: DMA direction
210  * @attrs: optional dma attributes
211  *
212  * This routine is supposed to sync the DMA region specified
213  * by @dma_handle into the coherence domain.  On SN, we're always cache
214  * coherent, so we just need to free any ATEs associated with this mapping.
215  */
sn_dma_unmap_page(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)216 static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
217 			      size_t size, enum dma_data_direction dir,
218 			      unsigned long attrs)
219 {
220 	struct pci_dev *pdev = to_pci_dev(dev);
221 	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
222 
223 	BUG_ON(!dev_is_pci(dev));
224 
225 	provider->dma_unmap(pdev, dma_addr, dir);
226 }
227 
228 /**
229  * sn_dma_unmap_sg - unmap a DMA scatterlist
230  * @dev: device to unmap
231  * @sg: scatterlist to unmap
232  * @nhwentries: number of scatterlist entries
233  * @direction: DMA direction
234  * @attrs: optional dma attributes
235  *
236  * Unmap a set of streaming mode DMA translations.
237  */
sn_dma_unmap_sg(struct device * dev,struct scatterlist * sgl,int nhwentries,enum dma_data_direction dir,unsigned long attrs)238 static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
239 			    int nhwentries, enum dma_data_direction dir,
240 			    unsigned long attrs)
241 {
242 	int i;
243 	struct pci_dev *pdev = to_pci_dev(dev);
244 	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
245 	struct scatterlist *sg;
246 
247 	BUG_ON(!dev_is_pci(dev));
248 
249 	for_each_sg(sgl, sg, nhwentries, i) {
250 		provider->dma_unmap(pdev, sg->dma_address, dir);
251 		sg->dma_address = (dma_addr_t) NULL;
252 		sg->dma_length = 0;
253 	}
254 }
255 
256 /**
257  * sn_dma_map_sg - map a scatterlist for DMA
258  * @dev: device to map for
259  * @sg: scatterlist to map
260  * @nhwentries: number of entries
261  * @direction: direction of the DMA transaction
262  * @attrs: optional dma attributes
263  *
264  * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
265  * dma_map_consistent() so that writes force a flush of pending DMA.
266  * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
267  * Document Number: 007-4763-001)
268  *
269  * Maps each entry of @sg for DMA.
270  */
sn_dma_map_sg(struct device * dev,struct scatterlist * sgl,int nhwentries,enum dma_data_direction dir,unsigned long attrs)271 static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
272 			 int nhwentries, enum dma_data_direction dir,
273 			 unsigned long attrs)
274 {
275 	unsigned long phys_addr;
276 	struct scatterlist *saved_sg = sgl, *sg;
277 	struct pci_dev *pdev = to_pci_dev(dev);
278 	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
279 	int i;
280 
281 	BUG_ON(!dev_is_pci(dev));
282 
283 	/*
284 	 * Setup a DMA address for each entry in the scatterlist.
285 	 */
286 	for_each_sg(sgl, sg, nhwentries, i) {
287 		dma_addr_t dma_addr;
288 		phys_addr = SG_ENT_PHYS_ADDRESS(sg);
289 		if (attrs & DMA_ATTR_WRITE_BARRIER)
290 			dma_addr = provider->dma_map_consistent(pdev,
291 								phys_addr,
292 								sg->length,
293 								SN_DMA_ADDR_PHYS);
294 		else
295 			dma_addr = provider->dma_map(pdev, phys_addr,
296 						     sg->length,
297 						     SN_DMA_ADDR_PHYS);
298 
299 		sg->dma_address = dma_addr;
300 		if (!sg->dma_address) {
301 			printk(KERN_ERR "%s: out of ATEs\n", __func__);
302 
303 			/*
304 			 * Free any successfully allocated entries.
305 			 */
306 			if (i > 0)
307 				sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
308 			return 0;
309 		}
310 
311 		sg->dma_length = sg->length;
312 	}
313 
314 	return nhwentries;
315 }
316 
sn_dma_sync_single_for_cpu(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir)317 static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
318 				       size_t size, enum dma_data_direction dir)
319 {
320 	BUG_ON(!dev_is_pci(dev));
321 }
322 
sn_dma_sync_single_for_device(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir)323 static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
324 					  size_t size,
325 					  enum dma_data_direction dir)
326 {
327 	BUG_ON(!dev_is_pci(dev));
328 }
329 
sn_dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)330 static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
331 				   int nelems, enum dma_data_direction dir)
332 {
333 	BUG_ON(!dev_is_pci(dev));
334 }
335 
sn_dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)336 static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
337 				      int nelems, enum dma_data_direction dir)
338 {
339 	BUG_ON(!dev_is_pci(dev));
340 }
341 
sn_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)342 static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
343 {
344 	return 0;
345 }
346 
sn_dma_get_required_mask(struct device * dev)347 u64 sn_dma_get_required_mask(struct device *dev)
348 {
349 	return DMA_BIT_MASK(64);
350 }
351 EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
352 
sn_pci_get_legacy_mem(struct pci_bus * bus)353 char *sn_pci_get_legacy_mem(struct pci_bus *bus)
354 {
355 	if (!SN_PCIBUS_BUSSOFT(bus))
356 		return ERR_PTR(-ENODEV);
357 
358 	return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET);
359 }
360 
sn_pci_legacy_read(struct pci_bus * bus,u16 port,u32 * val,u8 size)361 int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
362 {
363 	unsigned long addr;
364 	int ret;
365 	struct ia64_sal_retval isrv;
366 
367 	/*
368 	 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
369 	 * around hw issues at the pci bus level.  SGI proms older than
370 	 * 4.10 don't implement this.
371 	 */
372 
373 	SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
374 		 pci_domain_nr(bus), bus->number,
375 		 0, /* io */
376 		 0, /* read */
377 		 port, size, __pa(val));
378 
379 	if (isrv.status == 0)
380 		return size;
381 
382 	/*
383 	 * If the above failed, retry using the SAL_PROBE call which should
384 	 * be present in all proms (but which cannot work round PCI chipset
385 	 * bugs).  This code is retained for compatibility with old
386 	 * pre-4.10 proms, and should be removed at some point in the future.
387 	 */
388 
389 	if (!SN_PCIBUS_BUSSOFT(bus))
390 		return -ENODEV;
391 
392 	addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
393 	addr += port;
394 
395 	ret = ia64_sn_probe_mem(addr, (long)size, (void *)val);
396 
397 	if (ret == 2)
398 		return -EINVAL;
399 
400 	if (ret == 1)
401 		*val = -1;
402 
403 	return size;
404 }
405 
sn_pci_legacy_write(struct pci_bus * bus,u16 port,u32 val,u8 size)406 int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
407 {
408 	int ret = size;
409 	unsigned long paddr;
410 	unsigned long *addr;
411 	struct ia64_sal_retval isrv;
412 
413 	/*
414 	 * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
415 	 * around hw issues at the pci bus level.  SGI proms older than
416 	 * 4.10 don't implement this.
417 	 */
418 
419 	SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
420 		 pci_domain_nr(bus), bus->number,
421 		 0, /* io */
422 		 1, /* write */
423 		 port, size, __pa(&val));
424 
425 	if (isrv.status == 0)
426 		return size;
427 
428 	/*
429 	 * If the above failed, retry using the SAL_PROBE call which should
430 	 * be present in all proms (but which cannot work round PCI chipset
431 	 * bugs).  This code is retained for compatibility with old
432 	 * pre-4.10 proms, and should be removed at some point in the future.
433 	 */
434 
435 	if (!SN_PCIBUS_BUSSOFT(bus)) {
436 		ret = -ENODEV;
437 		goto out;
438 	}
439 
440 	/* Put the phys addr in uncached space */
441 	paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
442 	paddr += port;
443 	addr = (unsigned long *)paddr;
444 
445 	switch (size) {
446 	case 1:
447 		*(volatile u8 *)(addr) = (u8)(val);
448 		break;
449 	case 2:
450 		*(volatile u16 *)(addr) = (u16)(val);
451 		break;
452 	case 4:
453 		*(volatile u32 *)(addr) = (u32)(val);
454 		break;
455 	default:
456 		ret = -EINVAL;
457 		break;
458 	}
459  out:
460 	return ret;
461 }
462 
463 static struct dma_map_ops sn_dma_ops = {
464 	.alloc			= sn_dma_alloc_coherent,
465 	.free			= sn_dma_free_coherent,
466 	.map_page		= sn_dma_map_page,
467 	.unmap_page		= sn_dma_unmap_page,
468 	.map_sg			= sn_dma_map_sg,
469 	.unmap_sg		= sn_dma_unmap_sg,
470 	.sync_single_for_cpu 	= sn_dma_sync_single_for_cpu,
471 	.sync_sg_for_cpu	= sn_dma_sync_sg_for_cpu,
472 	.sync_single_for_device = sn_dma_sync_single_for_device,
473 	.sync_sg_for_device	= sn_dma_sync_sg_for_device,
474 	.mapping_error		= sn_dma_mapping_error,
475 	.dma_supported		= sn_dma_supported,
476 };
477 
sn_dma_init(void)478 void sn_dma_init(void)
479 {
480 	dma_ops = &sn_dma_ops;
481 }
482