1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #include "mali_kbase_csf_protected_memory.h"
23 #include <linux/protected_memory_allocator.h>
24
25 #if IS_ENABLED(CONFIG_OF)
26 #include <linux/of_platform.h>
27 #endif
28
kbase_csf_protected_memory_init(struct kbase_device * const kbdev)29 int kbase_csf_protected_memory_init(struct kbase_device *const kbdev)
30 {
31 int err = 0;
32
33 #if IS_ENABLED(CONFIG_OF)
34 struct device_node *pma_node = of_parse_phandle(kbdev->dev->of_node,
35 "protected-memory-allocator", 0);
36 if (!pma_node) {
37 dev_info(kbdev->dev, "Protected memory allocator not available\n");
38 } else {
39 struct platform_device *const pdev =
40 of_find_device_by_node(pma_node);
41
42 kbdev->csf.pma_dev = NULL;
43 if (!pdev) {
44 dev_err(kbdev->dev, "Platform device for Protected memory allocator not found\n");
45 } else {
46 kbdev->csf.pma_dev = platform_get_drvdata(pdev);
47 if (!kbdev->csf.pma_dev) {
48 dev_info(kbdev->dev, "Protected memory allocator is not ready\n");
49 err = -EPROBE_DEFER;
50 } else if (!try_module_get(kbdev->csf.pma_dev->owner)) {
51 dev_err(kbdev->dev, "Failed to get Protected memory allocator module\n");
52 err = -ENODEV;
53 } else {
54 dev_info(kbdev->dev, "Protected memory allocator successfully loaded\n");
55 }
56 }
57 of_node_put(pma_node);
58 }
59 #endif
60
61 return err;
62 }
63
kbase_csf_protected_memory_term(struct kbase_device * const kbdev)64 void kbase_csf_protected_memory_term(struct kbase_device *const kbdev)
65 {
66 if (kbdev->csf.pma_dev)
67 module_put(kbdev->csf.pma_dev->owner);
68 }
69
70 struct protected_memory_allocation **
kbase_csf_protected_memory_alloc(struct kbase_device * const kbdev,struct tagged_addr * phys,size_t num_pages)71 kbase_csf_protected_memory_alloc(
72 struct kbase_device *const kbdev,
73 struct tagged_addr *phys,
74 size_t num_pages)
75 {
76 size_t i;
77 struct protected_memory_allocator_device *pma_dev =
78 kbdev->csf.pma_dev;
79 struct protected_memory_allocation **pma =
80 kmalloc_array(num_pages, sizeof(*pma), GFP_KERNEL);
81
82 if (WARN_ON(!pma_dev) || WARN_ON(!phys) || !pma)
83 return NULL;
84
85 for (i = 0; i < num_pages; i++) {
86 pma[i] = pma_dev->ops.pma_alloc_page(pma_dev,
87 KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER);
88 if (!pma[i])
89 break;
90
91 phys[i] = as_tagged(pma_dev->ops.pma_get_phys_addr(pma_dev,
92 pma[i]));
93 }
94
95 if (i != num_pages) {
96 kbase_csf_protected_memory_free(kbdev, pma, i);
97 return NULL;
98 }
99
100 return pma;
101 }
102
kbase_csf_protected_memory_free(struct kbase_device * const kbdev,struct protected_memory_allocation ** pma,size_t num_pages)103 void kbase_csf_protected_memory_free(
104 struct kbase_device *const kbdev,
105 struct protected_memory_allocation **pma,
106 size_t num_pages)
107 {
108 size_t i;
109 struct protected_memory_allocator_device *pma_dev =
110 kbdev->csf.pma_dev;
111
112 if (WARN_ON(!pma_dev) || WARN_ON(!pma))
113 return;
114
115 for (i = 0; i < num_pages; i++)
116 pma_dev->ops.pma_free_page(pma_dev, pma[i]);
117
118 kfree(pma);
119 }
120