• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2010-2011, 2013-2014, 2016-2017 ARM Limited. All rights reserved.
3  *
4  * This program is free software and is provided to you under the terms of the GNU General Public License version 2
5  * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
6  *
7  * A copy of the licence is included with the program, and can also be obtained from Free Software
8  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
9  */
10 
11 /* needed to detect kernel version specific code */
12 #include <linux/version.h>
13 
14 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
15 #include <linux/semaphore.h>
16 #else /* pre 2.6.26 the file was in the arch specific location */
17 #include <asm/semaphore.h>
18 #endif
19 
20 #include <linux/dma-mapping.h>
21 #include <linux/mm.h>
22 #include <linux/slab.h>
23 #include <asm/atomic.h>
24 #include <linux/vmalloc.h>
25 #include <asm/cacheflush.h>
26 #include "ump_kernel_common.h"
27 #include "ump_kernel_memory_backend.h"
28 
29 
30 
31 typedef struct os_allocator {
32 	struct semaphore mutex;
33 	u32 num_pages_max;       /**< Maximum number of pages to allocate from the OS */
34 	u32 num_pages_allocated; /**< Number of pages allocated from the OS */
35 } os_allocator;
36 
37 
38 
39 static void os_free(void *ctx, ump_dd_mem *descriptor);
40 static int os_allocate(void *ctx, ump_dd_mem *descriptor);
41 static void os_memory_backend_destroy(ump_memory_backend *backend);
42 static u32 os_stat(struct ump_memory_backend *backend);
43 
44 
45 
46 /*
47  * Create OS memory backend
48  */
ump_os_memory_backend_create(const int max_allocation)49 ump_memory_backend *ump_os_memory_backend_create(const int max_allocation)
50 {
51 	ump_memory_backend *backend;
52 	os_allocator *info;
53 
54 	info = kmalloc(sizeof(os_allocator), GFP_KERNEL);
55 	if (NULL == info) {
56 		return NULL;
57 	}
58 
59 	info->num_pages_max = max_allocation >> PAGE_SHIFT;
60 	info->num_pages_allocated = 0;
61 
62 	sema_init(&info->mutex, 1);
63 
64 	backend = kmalloc(sizeof(ump_memory_backend), GFP_KERNEL);
65 	if (NULL == backend) {
66 		kfree(info);
67 		return NULL;
68 	}
69 
70 	backend->ctx = info;
71 	backend->allocate = os_allocate;
72 	backend->release = os_free;
73 	backend->shutdown = os_memory_backend_destroy;
74 	backend->stat = os_stat;
75 	backend->pre_allocate_physical_check = NULL;
76 	backend->adjust_to_mali_phys = NULL;
77 
78 	return backend;
79 }
80 
81 
82 
83 /*
84  * Destroy specified OS memory backend
85  */
os_memory_backend_destroy(ump_memory_backend * backend)86 static void os_memory_backend_destroy(ump_memory_backend *backend)
87 {
88 	os_allocator *info = (os_allocator *)backend->ctx;
89 
90 	DBG_MSG_IF(1, 0 != info->num_pages_allocated, ("%d pages still in use during shutdown\n", info->num_pages_allocated));
91 
92 	kfree(info);
93 	kfree(backend);
94 }
95 
96 
97 
98 /*
99  * Allocate UMP memory
100  */
os_allocate(void * ctx,ump_dd_mem * descriptor)101 static int os_allocate(void *ctx, ump_dd_mem *descriptor)
102 {
103 	u32 left;
104 	os_allocator *info;
105 	int pages_allocated = 0;
106 	int is_cached;
107 
108 	BUG_ON(!descriptor);
109 	BUG_ON(!ctx);
110 
111 	info = (os_allocator *)ctx;
112 	left = descriptor->size_bytes;
113 	is_cached = descriptor->is_cached;
114 
115 	if (down_interruptible(&info->mutex)) {
116 		DBG_MSG(1, ("Failed to get mutex in os_free\n"));
117 		return 0; /* failure */
118 	}
119 
120 	descriptor->backend_info = NULL;
121 	descriptor->nr_blocks = ((left + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) >> PAGE_SHIFT;
122 
123 	DBG_MSG(5, ("Allocating page array. Size: %lu\n", descriptor->nr_blocks * sizeof(ump_dd_physical_block)));
124 
125 	descriptor->block_array = (ump_dd_physical_block *)vmalloc(sizeof(ump_dd_physical_block) * descriptor->nr_blocks);
126 	if (NULL == descriptor->block_array) {
127 		up(&info->mutex);
128 		DBG_MSG(1, ("Block array could not be allocated\n"));
129 		return 0; /* failure */
130 	}
131 
132 	while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max)) {
133 		struct page *new_page;
134 
135 		if (is_cached) {
136 			new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN);
137 		} else {
138 			new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
139 		}
140 		if (NULL == new_page) {
141 			break;
142 		}
143 
144 		/* Ensure page caches are flushed. */
145 		if (is_cached) {
146 			descriptor->block_array[pages_allocated].addr = page_to_phys(new_page);
147 			descriptor->block_array[pages_allocated].size = PAGE_SIZE;
148 		} else {
149 			descriptor->block_array[pages_allocated].addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
150 			descriptor->block_array[pages_allocated].size = PAGE_SIZE;
151 		}
152 
153 		DBG_MSG(5, ("Allocated page 0x%08lx cached: %d\n", descriptor->block_array[pages_allocated].addr, is_cached));
154 
155 		if (left < PAGE_SIZE) {
156 			left = 0;
157 		} else {
158 			left -= PAGE_SIZE;
159 		}
160 
161 		pages_allocated++;
162 	}
163 
164 	DBG_MSG(5, ("Alloce for ID:%2d got %d pages, cached: %d\n", descriptor->secure_id,  pages_allocated));
165 
166 	if (left) {
167 		DBG_MSG(1, ("Failed to allocate needed pages\n"));
168 
169 		while (pages_allocated) {
170 			pages_allocated--;
171 			if (!is_cached) {
172 				dma_unmap_page(NULL, descriptor->block_array[pages_allocated].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
173 			}
174 			__free_page(pfn_to_page(descriptor->block_array[pages_allocated].addr >> PAGE_SHIFT));
175 		}
176 
177 		up(&info->mutex);
178 
179 		return 0; /* failure */
180 	}
181 
182 	info->num_pages_allocated += pages_allocated;
183 
184 	DBG_MSG(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
185 
186 	up(&info->mutex);
187 
188 	return 1; /* success*/
189 }
190 
191 
192 /*
193  * Free specified UMP memory
194  */
os_free(void * ctx,ump_dd_mem * descriptor)195 static void os_free(void *ctx, ump_dd_mem *descriptor)
196 {
197 	os_allocator *info;
198 	int i;
199 
200 	BUG_ON(!ctx);
201 	BUG_ON(!descriptor);
202 
203 	info = (os_allocator *)ctx;
204 
205 	BUG_ON(descriptor->nr_blocks > info->num_pages_allocated);
206 
207 	if (down_interruptible(&info->mutex)) {
208 		DBG_MSG(1, ("Failed to get mutex in os_free\n"));
209 		return;
210 	}
211 
212 	DBG_MSG(5, ("Releasing %lu OS pages\n", descriptor->nr_blocks));
213 
214 	info->num_pages_allocated -= descriptor->nr_blocks;
215 
216 	up(&info->mutex);
217 
218 	for (i = 0; i < descriptor->nr_blocks; i++) {
219 		DBG_MSG(6, ("Freeing physical page. Address: 0x%08lx\n", descriptor->block_array[i].addr));
220 		if (! descriptor->is_cached) {
221 			dma_unmap_page(NULL, descriptor->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
222 		}
223 		__free_page(pfn_to_page(descriptor->block_array[i].addr >> PAGE_SHIFT));
224 	}
225 
226 	vfree(descriptor->block_array);
227 }
228 
229 
os_stat(struct ump_memory_backend * backend)230 static u32 os_stat(struct ump_memory_backend *backend)
231 {
232 	os_allocator *info;
233 	info = (os_allocator *)backend->ctx;
234 	return info->num_pages_allocated * _MALI_OSK_MALI_PAGE_SIZE;
235 }
236