• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * arch/sh/mm/consistent.c
3  *
4  * Copyright (C) 2004 - 2007  Paul Mundt
5  *
6  * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 #include <linux/mm.h>
13 #include <linux/platform_device.h>
14 #include <linux/dma-mapping.h>
15 #include <asm/cacheflush.h>
16 #include <asm/addrspace.h>
17 #include <asm/io.h>
18 
dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)19 void *dma_alloc_coherent(struct device *dev, size_t size,
20 			   dma_addr_t *dma_handle, gfp_t gfp)
21 {
22 	void *ret, *ret_nocache;
23 	int order = get_order(size);
24 
25 	if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
26 		return ret;
27 
28 	ret = (void *)__get_free_pages(gfp, order);
29 	if (!ret)
30 		return NULL;
31 
32 	memset(ret, 0, size);
33 	/*
34 	 * Pages from the page allocator may have data present in
35 	 * cache. So flush the cache before using uncached memory.
36 	 */
37 	dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
38 
39 	ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
40 	if (!ret_nocache) {
41 		free_pages((unsigned long)ret, order);
42 		return NULL;
43 	}
44 
45 	split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
46 
47 	*dma_handle = virt_to_phys(ret);
48 	return ret_nocache;
49 }
50 EXPORT_SYMBOL(dma_alloc_coherent);
51 
dma_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)52 void dma_free_coherent(struct device *dev, size_t size,
53 			 void *vaddr, dma_addr_t dma_handle)
54 {
55 	int order = get_order(size);
56 	unsigned long pfn = dma_handle >> PAGE_SHIFT;
57 	int k;
58 
59 	if (!dma_release_from_coherent(dev, order, vaddr)) {
60 		WARN_ON(irqs_disabled());	/* for portability */
61 		for (k = 0; k < (1 << order); k++)
62 			__free_pages(pfn_to_page(pfn + k), 0);
63 		iounmap(vaddr);
64 	}
65 }
66 EXPORT_SYMBOL(dma_free_coherent);
67 
dma_cache_sync(struct device * dev,void * vaddr,size_t size,enum dma_data_direction direction)68 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
69 		    enum dma_data_direction direction)
70 {
71 #ifdef CONFIG_CPU_SH5
72 	void *p1addr = vaddr;
73 #else
74 	void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
75 #endif
76 
77 	switch (direction) {
78 	case DMA_FROM_DEVICE:		/* invalidate only */
79 		__flush_invalidate_region(p1addr, size);
80 		break;
81 	case DMA_TO_DEVICE:		/* writeback only */
82 		__flush_wback_region(p1addr, size);
83 		break;
84 	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */
85 		__flush_purge_region(p1addr, size);
86 		break;
87 	default:
88 		BUG();
89 	}
90 }
91 EXPORT_SYMBOL(dma_cache_sync);
92 
memchunk_setup(char * str)93 static int __init memchunk_setup(char *str)
94 {
95 	return 1; /* accept anything that begins with "memchunk." */
96 }
97 __setup("memchunk.", memchunk_setup);
98 
memchunk_cmdline_override(char * name,unsigned long * sizep)99 static void __init memchunk_cmdline_override(char *name, unsigned long *sizep)
100 {
101 	char *p = boot_command_line;
102 	int k = strlen(name);
103 
104 	while ((p = strstr(p, "memchunk."))) {
105 		p += 9; /* strlen("memchunk.") */
106 		if (!strncmp(name, p, k) && p[k] == '=') {
107 			p += k + 1;
108 			*sizep = memparse(p, NULL);
109 			pr_info("%s: forcing memory chunk size to 0x%08lx\n",
110 				name, *sizep);
111 			break;
112 		}
113 	}
114 }
115 
platform_resource_setup_memory(struct platform_device * pdev,char * name,unsigned long memsize)116 int __init platform_resource_setup_memory(struct platform_device *pdev,
117 					  char *name, unsigned long memsize)
118 {
119 	struct resource *r;
120 	dma_addr_t dma_handle;
121 	void *buf;
122 
123 	r = pdev->resource + pdev->num_resources - 1;
124 	if (r->flags) {
125 		pr_warning("%s: unable to find empty space for resource\n",
126 			name);
127 		return -EINVAL;
128 	}
129 
130 	memchunk_cmdline_override(name, &memsize);
131 	if (!memsize)
132 		return 0;
133 
134 	buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL);
135 	if (!buf) {
136 		pr_warning("%s: unable to allocate memory\n", name);
137 		return -ENOMEM;
138 	}
139 
140 	memset(buf, 0, memsize);
141 
142 	r->flags = IORESOURCE_MEM;
143 	r->start = dma_handle;
144 	r->end = r->start + memsize - 1;
145 	r->name = name;
146 	return 0;
147 }
148