• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
4  */
5 
6 #define pr_fmt(fmt) "gunyah_vm_mgr_cma_mem: " fmt
7 #include <linux/anon_inodes.h>
8 #include <linux/cma.h>
9 #include <linux/file.h>
10 #include <linux/miscdevice.h>
11 #include <linux/of_reserved_mem.h>
12 #include <linux/platform_device.h>
13 
14 #include "vm_mgr.h"
15 
16 struct gunyah_cma {
17 	struct device dev;
18 	struct file *file;
19 	struct page *page;
20 	struct miscdevice miscdev;
21 	struct list_head list;
22 	unsigned long max_size;
23 	unsigned long mapped_size;
24 };
25 
26 struct gunyah_cma_parent {
27 	struct list_head gunyah_cma_children;
28 };
29 
30 /*
31  * gunyah_cma_alloc - Allocate cma region.
32  * @cma: the gunyah cma memory
33  * @len: the size of the cma region.
34  *
35  * Uses cma_alloc to allocate contiguous memory region of size len.
36  *
37  * Return: The 0 on success or an error.
38  */
gunyah_cma_alloc(struct gunyah_cma * cma,loff_t len)39 static int gunyah_cma_alloc(struct gunyah_cma *cma, loff_t len)
40 {
41 	pgoff_t pagecount = len >> PAGE_SHIFT;
42 	unsigned long align = get_order(len);
43 	loff_t max_size;
44 
45 	if (cma->page)
46 		return -EINVAL;
47 
48 	max_size = i_size_read(file_inode(cma->file));
49 	if (len > max_size)
50 		return -EINVAL;
51 
52 	if (align > CONFIG_CMA_ALIGNMENT)
53 		align = CONFIG_CMA_ALIGNMENT;
54 
55 	cma->page = cma_alloc(cma->dev.cma_area, pagecount, align, false);
56 	if (!cma->page)
57 		return -ENOMEM;
58 
59 	if (len < max_size)
60 		dev_dbg(&cma->dev, "client mapped %lld bytes, less than max %lld bytes\n",
61 			len, max_size);
62 
63 	cma->mapped_size = len;
64 
65 	return 0;
66 }
67 
gunyah_cma_release(struct inode * inode,struct file * file)68 static int gunyah_cma_release(struct inode *inode, struct file *file)
69 {
70 	struct gunyah_cma *cma = file->private_data;
71 	unsigned int count = PAGE_ALIGN(cma->mapped_size) >> PAGE_SHIFT;
72 
73 	if (!cma->page)
74 		return 0;
75 
76 	cma_release(cma->dev.cma_area, cma->page, count);
77 	cma->page = NULL;
78 
79 	return 0;
80 }
81 
gunyah_cma_mmap(struct file * file,struct vm_area_struct * vma)82 static int gunyah_cma_mmap(struct file *file, struct vm_area_struct *vma)
83 {
84 	struct gunyah_cma *cma = file->private_data;
85 	unsigned long len = vma->vm_end - vma->vm_start;
86 	int nr_pages = PAGE_ALIGN(len) >> PAGE_SHIFT;
87 	struct page **pages;
88 	int ret, i;
89 
90 	file_accessed(file);
91 
92 	pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
93 	if (!pages)
94 		return -ENOMEM;
95 
96 	ret = gunyah_cma_alloc(cma, len);
97 	if (ret < 0) {
98 		kvfree(pages);
99 		return ret;
100 	}
101 
102 	for (i = 0; i < nr_pages; i++)
103 		pages[i] = nth_page(cma->page, i);
104 
105 	ret =  vm_map_pages_zero(vma, pages, nr_pages);
106 	if (ret)
107 		pr_err("Mapping memory failed: %d\n", ret);
108 
109 	kvfree(pages);
110 	return ret;
111 }
112 
113 static const struct file_operations gunyah_cma_fops = {
114 	.owner = THIS_MODULE,
115 	.llseek = generic_file_llseek,
116 	.mmap = gunyah_cma_mmap,
117 	.open = generic_file_open,
118 	.release = gunyah_cma_release,
119 };
120 
gunyah_cma_reclaim_parcel(struct gunyah_vm * ghvm,struct gunyah_vm_parcel * vm_parcel,struct gunyah_vm_binding * b)121 int gunyah_cma_reclaim_parcel(struct gunyah_vm *ghvm, struct gunyah_vm_parcel *vm_parcel,
122 				struct gunyah_vm_binding *b)
123 {
124 	struct gunyah_rm_mem_parcel *parcel = &vm_parcel->parcel;
125 	int ret;
126 
127 	if (parcel->mem_handle == GUNYAH_MEM_HANDLE_INVAL)
128 		return 0;
129 
130 	ret = gunyah_rm_mem_reclaim(ghvm->rm, parcel);
131 	if (ret) {
132 		dev_err(ghvm->parent, "Failed to reclaim parcel: %d\n",
133 			ret);
134 		/* We can't reclaim the pages -- hold onto the pages
135 		 * forever because we don't know what state the memory
136 		 * is in
137 		 */
138 		return ret;
139 	}
140 	parcel->mem_handle = GUNYAH_MEM_HANDLE_INVAL;
141 	kfree(parcel->mem_entries);
142 	kfree(parcel->acl_entries);
143 	vm_parcel->start = 0;
144 	vm_parcel->pages = 0;
145 	b->vm_parcel = NULL;
146 	fput(b->cma.file);
147 	return ret;
148 }
149 
gunyah_cma_share_parcel(struct gunyah_vm * ghvm,struct gunyah_vm_parcel * vm_parcel,struct gunyah_vm_binding * b,u64 * gfn,u64 * nr)150 int gunyah_cma_share_parcel(struct gunyah_vm *ghvm, struct gunyah_vm_parcel *vm_parcel,
151 				struct gunyah_vm_binding *b, u64 *gfn, u64 *nr)
152 {
153 	struct gunyah_rm_mem_parcel *parcel = &vm_parcel->parcel;
154 	unsigned long offset;
155 	struct gunyah_cma *cma;
156 	struct file *file;
157 	int ret;
158 
159 	if ((*nr << PAGE_SHIFT) > b->size)
160 		return -EINVAL;
161 
162 	file = fget(b->cma.fd);
163 	if (!file)
164 		return -EINVAL;
165 
166 	if (file->f_op != &gunyah_cma_fops) {
167 		fput(file);
168 		return -EINVAL;
169 	}
170 
171 	cma = file->private_data;
172 	b->cma.file = file;
173 
174 	parcel->n_mem_entries = 1;
175 	parcel->mem_entries = kcalloc(parcel->n_mem_entries, sizeof(parcel->mem_entries[0]),
176 					GFP_KERNEL_ACCOUNT);
177 	if (!parcel->mem_entries) {
178 		fput(file);
179 		return -ENOMEM;
180 	}
181 
182 	offset = gunyah_gfn_to_gpa(*gfn) - b->guest_phys_addr;
183 	parcel->mem_entries[0].size = cpu_to_le64(*nr << PAGE_SHIFT);
184 	parcel->mem_entries[0].phys_addr =
185 		cpu_to_le64(page_to_phys(cma->page + b->cma.offset + offset));
186 
187 	ret = gunyah_rm_mem_share(ghvm->rm, parcel);
188 	if (ret)
189 		goto free_mem_entries;
190 
191 	vm_parcel->start = *gfn;
192 	vm_parcel->pages = *nr;
193 	b->vm_parcel = vm_parcel;
194 	return ret;
195 
196 free_mem_entries:
197 	kfree(parcel->mem_entries);
198 	parcel->mem_entries = NULL;
199 	parcel->n_mem_entries = 0;
200 	fput(file);
201 	return ret;
202 }
203 
gunyah_vm_binding_cma_alloc(struct gunyah_vm * ghvm,struct gunyah_map_cma_mem_args * cma_map)204 int gunyah_vm_binding_cma_alloc(struct gunyah_vm *ghvm,
205 			    struct gunyah_map_cma_mem_args *cma_map)
206 {
207 	struct gunyah_vm_binding *binding;
208 	struct file *file;
209 	loff_t max_size;
210 	int ret = 0;
211 
212 	if (!cma_map->size || !PAGE_ALIGNED(cma_map->size) ||
213 		!PAGE_ALIGNED(cma_map->guest_addr))
214 		return -EINVAL;
215 
216 	if (overflows_type(cma_map->guest_addr + cma_map->size, u64))
217 		return -EOVERFLOW;
218 
219 	file = fget(cma_map->guest_mem_fd);
220 	if (!file)
221 		return -EBADF;
222 
223 	max_size = i_size_read(file_inode(file));
224 	if (cma_map->offset + cma_map->size > max_size) {
225 		fput(file);
226 		return -EOVERFLOW;
227 	}
228 	fput(file);
229 
230 	binding = kzalloc(sizeof(*binding), GFP_KERNEL_ACCOUNT);
231 	if (!binding)
232 		return -ENOMEM;
233 
234 	binding->mem_type = VM_MEM_CMA;
235 	binding->cma.fd = cma_map->guest_mem_fd;
236 	binding->cma.offset = cma_map->offset;
237 	binding->guest_phys_addr = cma_map->guest_addr;
238 	binding->label = cma_map->label;
239 	binding->size = cma_map->size;
240 	binding->flags = cma_map->flags;
241 	binding->vm_parcel = NULL;
242 
243 	if (binding->flags & GUNYAH_MEM_FORCE_LEND)
244 		binding->share_type = VM_MEM_LEND;
245 	else
246 		binding->share_type = VM_MEM_SHARE;
247 
248 	down_write(&ghvm->bindings_lock);
249 	ret = mtree_insert_range(&ghvm->bindings,
250 				 gunyah_gpa_to_gfn(binding->guest_phys_addr),
251 				 gunyah_gpa_to_gfn(binding->guest_phys_addr + cma_map->size - 1),
252 				 binding, GFP_KERNEL);
253 
254 	if (ret != 0)
255 		kfree(binding);
256 
257 	up_write(&ghvm->bindings_lock);
258 
259 	return ret;
260 }
261 
gunyah_cma_create_mem_fd(struct gunyah_cma * cma)262 static long gunyah_cma_create_mem_fd(struct gunyah_cma *cma)
263 {
264 	unsigned long flags = 0;
265 	struct inode *inode;
266 	struct file *file;
267 	int fd, err;
268 
269 	if (cma->page)
270 		return -EBUSY;
271 
272 	flags |= O_CLOEXEC;
273 	fd = get_unused_fd_flags(flags);
274 	if (fd < 0)
275 		return fd;
276 
277 	file = anon_inode_create_getfile("[gunyah-cma]", &gunyah_cma_fops,
278 					cma, O_RDWR, NULL);
279 	if (IS_ERR(file)) {
280 		err = PTR_ERR(file);
281 		goto err_put_fd;
282 	}
283 
284 	inode = file->f_inode;
285 	inode->i_mode |= S_IFREG;
286 	/* Platform specific size of CMA per VM */
287 	i_size_write(inode, cma->max_size);
288 
289 	file->f_flags |= O_LARGEFILE;
290 	file->f_mapping = inode->i_mapping;
291 	cma->file = file;
292 	fd_install(fd, file);
293 
294 	return fd;
295 err_put_fd:
296 	put_unused_fd(fd);
297 	return err;
298 }
299 
gunyah_cma_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)300 static long gunyah_cma_ioctl(struct file *filp, unsigned int cmd,
301 			     unsigned long arg)
302 {
303 	struct miscdevice *miscdev = filp->private_data;
304 	struct gunyah_cma *cma = container_of(miscdev, struct gunyah_cma, miscdev);
305 
306 	switch (cmd) {
307 	case GH_ANDROID_CREATE_CMA_MEM_FD: {
308 		return gunyah_cma_create_mem_fd(cma);
309 	}
310 	default:
311 		return -ENOTTY;
312 	}
313 }
314 
315 static const struct file_operations gunyah_cma_dev_fops = {
316 	/* clang-format off */
317 	.owner		= THIS_MODULE,
318 	.unlocked_ioctl	= gunyah_cma_ioctl,
319 	.compat_ioctl	= compat_ptr_ioctl,
320 	.llseek		= noop_llseek,
321 	/* clang-format on */
322 };
323 
gunyah_cma_device_release(struct device * dev)324 static void gunyah_cma_device_release(struct device *dev)
325 {
326 	struct gunyah_cma *cma = container_of(dev, struct gunyah_cma, dev);
327 
328 	kfree(cma);
329 }
330 
gunyah_cma_probe(struct platform_device * pdev)331 static int gunyah_cma_probe(struct platform_device *pdev)
332 {
333 	struct device_node *node = pdev->dev.of_node;
334 	struct device *dev = &pdev->dev;
335 	const char **mem_name;
336 	struct gunyah_cma_parent *pcma;
337 	int mem_count, i, ret, err = 0;
338 	struct device_node *mem_node;
339 	struct reserved_mem *rmem;
340 
341 	mem_count = of_property_count_strings(node, "memory-region-names");
342 	if (!mem_count)
343 		return -EINVAL;
344 
345 	mem_name = kmalloc_array(mem_count, sizeof(char *), GFP_KERNEL);
346 	if (!mem_name)
347 		return -ENOMEM;
348 
349 	mem_count = of_property_read_string_array(node, "memory-region-names",
350 							mem_name, mem_count);
351 	if (mem_count < 0) {
352 		err = -EINVAL;
353 		goto err_mem_name;
354 	}
355 
356 	pcma = devm_kzalloc(dev, sizeof(*pcma), GFP_KERNEL);
357 	if (!pcma) {
358 		err = -ENOMEM;
359 		goto err_mem_name;
360 	}
361 	INIT_LIST_HEAD(&pcma->gunyah_cma_children);
362 
363 	for (i = 0; i < mem_count; i++) {
364 		struct gunyah_cma *cma;
365 		rmem = NULL;
366 
367 		cma = kzalloc(sizeof(*cma), GFP_KERNEL);
368 		if (!cma) {
369 			ret = -ENOMEM;
370 			goto err_continue;
371 		}
372 
373 		cma->miscdev.parent = &pdev->dev;
374 		cma->miscdev.name = mem_name[i];
375 		cma->miscdev.minor = MISC_DYNAMIC_MINOR;
376 		cma->miscdev.fops = &gunyah_cma_dev_fops;
377 
378 		ret = misc_register(&cma->miscdev);
379 		if (ret) {
380 			kfree(cma);
381 			goto err_continue;
382 		}
383 
384 		device_initialize(&cma->dev);
385 		cma->dev.parent = &pdev->dev;
386 		cma->dev.release = gunyah_cma_device_release;
387 		cma->dev.init_name = mem_name[i];
388 
389 		ret = of_reserved_mem_device_init_by_name(&cma->dev,
390 						dev->of_node, mem_name[i]);
391 		if (ret)
392 			goto err_device;
393 
394 		mem_node = of_parse_phandle(dev->of_node, "memory-region", i);
395 		if (mem_node)
396 			rmem = of_reserved_mem_lookup(mem_node);
397 		of_node_put(mem_node);
398 		if (!rmem) {
399 			dev_err(dev, "Failed to find reserved memory for %s\n", mem_name[i]);
400 			goto err_device;
401 		}
402 		cma->max_size = rmem->size;
403 		cma->page = NULL;
404 		list_add(&cma->list, &pcma->gunyah_cma_children);
405 		dev_dbg(dev, "Created a reserved cma pool for %s\n", mem_name[i]);
406 		continue;
407 
408 err_device:
409 		misc_deregister(&cma->miscdev);
410 		put_device(&cma->dev);
411 err_continue:
412 		dev_err(dev, "Failed to create reserved cma pool for %s %d\n", mem_name[i], ret);
413 		continue;
414 	}
415 
416 	platform_set_drvdata(pdev, pcma);
417 
418 err_mem_name:
419 	kfree(mem_name);
420 	return err;
421 }
422 
gunyah_cma_remove(struct platform_device * pdev)423 static void gunyah_cma_remove(struct platform_device *pdev)
424 {
425 	struct gunyah_cma_parent *pcma = platform_get_drvdata(pdev);
426 	struct gunyah_cma *cma, *iter;
427 
428 	list_for_each_entry_safe(cma, iter, &pcma->gunyah_cma_children, list) {
429 		misc_deregister(&cma->miscdev);
430 		of_reserved_mem_device_release(&cma->dev);
431 		put_device(&cma->dev);
432 	}
433 }
434 
435 static const struct of_device_id gunyah_cma_match_table[] = {
436 	{ .compatible = "gunyah-cma-vm-mem"},
437 	{}
438 };
439 
440 static struct platform_driver gunyah_cma_driver = {
441 	.probe = gunyah_cma_probe,
442 	.remove_new = gunyah_cma_remove,
443 	.driver = {
444 		.name = "gunyah_cma_vm_mem_driver",
445 		.of_match_table = gunyah_cma_match_table,
446 	},
447 };
448 
gunyah_cma_mem_init(void)449 int gunyah_cma_mem_init(void)
450 {
451 	return platform_driver_register(&gunyah_cma_driver);
452 }
453 
gunyah_cma_mem_exit(void)454 void gunyah_cma_mem_exit(void)
455 {
456 	platform_driver_unregister(&gunyah_cma_driver);
457 }
458