• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /*
23  * Base kernel context APIs
24  */
25 
26 #include <mali_kbase.h>
27 #include <gpu/mali_kbase_gpu_regmap.h>
28 #include <mali_kbase_mem_linux.h>
29 #include <mali_kbase_ctx_sched.h>
30 #include <mali_kbase_mem_pool_group.h>
31 #include <tl/mali_kbase_timeline.h>
32 #include <mmu/mali_kbase_mmu.h>
33 #include <context/mali_kbase_context_internal.h>
34 
35 /**
36  * find_process_node - Used to traverse the process rb_tree to find if
37  *                     process exists already in process rb_tree.
38  *
39  * @node: Pointer to root node to start search.
40  * @tgid: Thread group PID to search for.
41  *
42  * Return: Pointer to kbase_process if exists otherwise NULL.
43  */
find_process_node(struct rb_node * node,pid_t tgid)44 static struct kbase_process *find_process_node(struct rb_node *node, pid_t tgid)
45 {
46 	struct kbase_process *kprcs = NULL;
47 
48 	/* Check if the kctx creation request is from a existing process.*/
49 	while (node) {
50 		struct kbase_process *prcs_node =
51 			rb_entry(node, struct kbase_process, kprcs_node);
52 		if (prcs_node->tgid == tgid) {
53 			kprcs = prcs_node;
54 			break;
55 		}
56 
57 		if (tgid < prcs_node->tgid)
58 			node = node->rb_left;
59 		else
60 			node = node->rb_right;
61 	}
62 
63 	return kprcs;
64 }
65 
66 /**
67  * kbase_insert_kctx_to_process - Initialise kbase process context.
68  *
69  * @kctx: Pointer to kbase context.
70  *
71  * Here we initialise per process rb_tree managed by kbase_device.
72  * We maintain a rb_tree of each unique process that gets created.
73  * and Each process maintains a list of kbase context.
74  * This setup is currently used by kernel trace functionality
75  * to trace and visualise gpu memory consumption.
76  *
77  * Return: 0 on success and error number on failure.
78  */
kbase_insert_kctx_to_process(struct kbase_context * kctx)79 static int kbase_insert_kctx_to_process(struct kbase_context *kctx)
80 {
81 	struct rb_root *const prcs_root = &kctx->kbdev->process_root;
82 	const pid_t tgid = kctx->tgid;
83 	struct kbase_process *kprcs = NULL;
84 
85 	lockdep_assert_held(&kctx->kbdev->kctx_list_lock);
86 
87 	kprcs = find_process_node(prcs_root->rb_node, tgid);
88 
89 	/* if the kctx is from new process then create a new kbase_process
90 	 * and add it to the &kbase_device->rb_tree
91 	 */
92 	if (!kprcs) {
93 		struct rb_node **new = &prcs_root->rb_node, *parent = NULL;
94 
95 		kprcs = kzalloc(sizeof(*kprcs), GFP_KERNEL);
96 		if (kprcs == NULL)
97 			return -ENOMEM;
98 		kprcs->tgid = tgid;
99 		INIT_LIST_HEAD(&kprcs->kctx_list);
100 		kprcs->dma_buf_root = RB_ROOT;
101 		kprcs->total_gpu_pages = 0;
102 
103 		while (*new) {
104 			struct kbase_process *prcs_node;
105 
106 			parent = *new;
107 			prcs_node = rb_entry(parent, struct kbase_process,
108 					     kprcs_node);
109 			if (tgid < prcs_node->tgid)
110 				new = &(*new)->rb_left;
111 			else
112 				new = &(*new)->rb_right;
113 		}
114 		rb_link_node(&kprcs->kprcs_node, parent, new);
115 		rb_insert_color(&kprcs->kprcs_node, prcs_root);
116 	}
117 
118 	kctx->kprcs = kprcs;
119 	list_add(&kctx->kprcs_link, &kprcs->kctx_list);
120 
121 	return 0;
122 }
123 
kbase_context_common_init(struct kbase_context * kctx)124 int kbase_context_common_init(struct kbase_context *kctx)
125 {
126 	const unsigned long cookies_mask = KBASE_COOKIE_MASK;
127 	int err = 0;
128 
129 	/* creating a context is considered a disjoint event */
130 	kbase_disjoint_event(kctx->kbdev);
131 
132 	kctx->as_nr = KBASEP_AS_NR_INVALID;
133 
134 	atomic_set(&kctx->refcount, 0);
135 
136 	spin_lock_init(&kctx->mm_update_lock);
137 	kctx->process_mm = NULL;
138 	atomic_set(&kctx->nonmapped_pages, 0);
139 	atomic_set(&kctx->permanent_mapped_pages, 0);
140 	kctx->tgid = current->tgid;
141 	kctx->pid = current->pid;
142 
143 	atomic_set(&kctx->used_pages, 0);
144 
145 	mutex_init(&kctx->reg_lock);
146 
147 	spin_lock_init(&kctx->mem_partials_lock);
148 	INIT_LIST_HEAD(&kctx->mem_partials);
149 
150 	spin_lock_init(&kctx->waiting_soft_jobs_lock);
151 	INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
152 
153 	init_waitqueue_head(&kctx->event_queue);
154 	atomic_set(&kctx->event_count, 0);
155 #if !MALI_USE_CSF
156 	atomic_set(&kctx->event_closed, false);
157 #if IS_ENABLED(CONFIG_GPU_TRACEPOINTS)
158 	atomic_set(&kctx->jctx.work_id, 0);
159 #endif
160 #endif
161 
162 	bitmap_copy(kctx->cookies, &cookies_mask, BITS_PER_LONG);
163 
164 	kctx->id = atomic_add_return(1, &(kctx->kbdev->ctx_num)) - 1;
165 
166 	mutex_lock(&kctx->kbdev->kctx_list_lock);
167 
168 	err = kbase_insert_kctx_to_process(kctx);
169 	if (err)
170 		dev_err(kctx->kbdev->dev,
171 		"(err:%d) failed to insert kctx to kbase_process\n", err);
172 
173 	mutex_unlock(&kctx->kbdev->kctx_list_lock);
174 
175 	return err;
176 }
177 
kbase_context_add_to_dev_list(struct kbase_context * kctx)178 int kbase_context_add_to_dev_list(struct kbase_context *kctx)
179 {
180 	if (WARN_ON(!kctx))
181 		return -EINVAL;
182 
183 	if (WARN_ON(!kctx->kbdev))
184 		return -EINVAL;
185 
186 	mutex_lock(&kctx->kbdev->kctx_list_lock);
187 	list_add(&kctx->kctx_list_link, &kctx->kbdev->kctx_list);
188 	mutex_unlock(&kctx->kbdev->kctx_list_lock);
189 
190 	kbase_timeline_post_kbase_context_create(kctx);
191 
192 	return 0;
193 }
194 
kbase_context_remove_from_dev_list(struct kbase_context * kctx)195 void kbase_context_remove_from_dev_list(struct kbase_context *kctx)
196 {
197 	if (WARN_ON(!kctx))
198 		return;
199 
200 	if (WARN_ON(!kctx->kbdev))
201 		return;
202 
203 	kbase_timeline_pre_kbase_context_destroy(kctx);
204 
205 	mutex_lock(&kctx->kbdev->kctx_list_lock);
206 	list_del_init(&kctx->kctx_list_link);
207 	mutex_unlock(&kctx->kbdev->kctx_list_lock);
208 }
209 
210 /**
211  * kbase_remove_kctx_from_process - remove a terminating context from
212  *                                    the process list.
213  *
214  * @kctx: Pointer to kbase context.
215  *
216  * Remove the tracking of context from the list of contexts maintained under
217  * kbase process and if the list if empty then there no outstanding contexts
218  * we can remove the process node as well.
219  */
220 
kbase_remove_kctx_from_process(struct kbase_context * kctx)221 static void kbase_remove_kctx_from_process(struct kbase_context *kctx)
222 {
223 	struct kbase_process *kprcs = kctx->kprcs;
224 
225 	lockdep_assert_held(&kctx->kbdev->kctx_list_lock);
226 	list_del(&kctx->kprcs_link);
227 
228 	/* if there are no outstanding contexts in current process node,
229 	 * we can remove it from the process rb_tree.
230 	 */
231 	if (list_empty(&kprcs->kctx_list)) {
232 		rb_erase(&kprcs->kprcs_node, &kctx->kbdev->process_root);
233 		/* Add checks, so that the terminating process Should not
234 		 * hold any gpu_memory.
235 		 */
236 		WARN_ON(kprcs->total_gpu_pages);
237 		WARN_ON(!RB_EMPTY_ROOT(&kprcs->dma_buf_root));
238 		kfree(kprcs);
239 	}
240 }
241 
kbase_context_common_term(struct kbase_context * kctx)242 void kbase_context_common_term(struct kbase_context *kctx)
243 {
244 	unsigned long flags;
245 	int pages;
246 
247 	mutex_lock(&kctx->kbdev->mmu_hw_mutex);
248 	spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
249 	kbase_ctx_sched_remove_ctx(kctx);
250 	spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
251 	mutex_unlock(&kctx->kbdev->mmu_hw_mutex);
252 
253 	pages = atomic_read(&kctx->used_pages);
254 	if (pages != 0)
255 		dev_warn(kctx->kbdev->dev,
256 			"%s: %d pages in use!\n", __func__, pages);
257 
258 	WARN_ON(atomic_read(&kctx->nonmapped_pages) != 0);
259 
260 	mutex_lock(&kctx->kbdev->kctx_list_lock);
261 	kbase_remove_kctx_from_process(kctx);
262 	mutex_unlock(&kctx->kbdev->kctx_list_lock);
263 
264 	KBASE_KTRACE_ADD(kctx->kbdev, CORE_CTX_DESTROY, kctx, 0u);
265 }
266 
kbase_context_mem_pool_group_init(struct kbase_context * kctx)267 int kbase_context_mem_pool_group_init(struct kbase_context *kctx)
268 {
269 	return kbase_mem_pool_group_init(&kctx->mem_pools,
270 		kctx->kbdev,
271 		&kctx->kbdev->mem_pool_defaults,
272 		&kctx->kbdev->mem_pools);
273 }
274 
kbase_context_mem_pool_group_term(struct kbase_context * kctx)275 void kbase_context_mem_pool_group_term(struct kbase_context *kctx)
276 {
277 	kbase_mem_pool_group_term(&kctx->mem_pools);
278 }
279 
kbase_context_mmu_init(struct kbase_context * kctx)280 int kbase_context_mmu_init(struct kbase_context *kctx)
281 {
282 	return kbase_mmu_init(
283 		kctx->kbdev, &kctx->mmu, kctx,
284 		kbase_context_mmu_group_id_get(kctx->create_flags));
285 }
286 
kbase_context_mmu_term(struct kbase_context * kctx)287 void kbase_context_mmu_term(struct kbase_context *kctx)
288 {
289 	kbase_mmu_term(kctx->kbdev, &kctx->mmu);
290 }
291 
kbase_context_mem_alloc_page(struct kbase_context * kctx)292 int kbase_context_mem_alloc_page(struct kbase_context *kctx)
293 {
294 	struct page *p;
295 
296 	p = kbase_mem_alloc_page(&kctx->mem_pools.small[KBASE_MEM_GROUP_SINK]);
297 	if (!p)
298 		return -ENOMEM;
299 
300 	kctx->aliasing_sink_page = as_tagged(page_to_phys(p));
301 
302 	return 0;
303 }
304 
kbase_context_mem_pool_free(struct kbase_context * kctx)305 void kbase_context_mem_pool_free(struct kbase_context *kctx)
306 {
307 	/* drop the aliasing sink page now that it can't be mapped anymore */
308 	kbase_mem_pool_free(
309 		&kctx->mem_pools.small[KBASE_MEM_GROUP_SINK],
310 		as_page(kctx->aliasing_sink_page),
311 		false);
312 }
313 
kbase_context_sticky_resource_term(struct kbase_context * kctx)314 void kbase_context_sticky_resource_term(struct kbase_context *kctx)
315 {
316 	unsigned long pending_regions_to_clean;
317 
318 	kbase_gpu_vm_lock(kctx);
319 	kbase_sticky_resource_term(kctx);
320 
321 	/* free pending region setups */
322 	pending_regions_to_clean = KBASE_COOKIE_MASK;
323 	bitmap_andnot(&pending_regions_to_clean, &pending_regions_to_clean,
324 		      kctx->cookies, BITS_PER_LONG);
325 	while (pending_regions_to_clean) {
326 		unsigned int cookie = find_first_bit(&pending_regions_to_clean,
327 				BITS_PER_LONG);
328 
329 		if (!WARN_ON(!kctx->pending_regions[cookie])) {
330 			dev_dbg(kctx->kbdev->dev, "Freeing pending unmapped region\n");
331 			kbase_mem_phy_alloc_put(
332 				kctx->pending_regions[cookie]->cpu_alloc);
333 			kbase_mem_phy_alloc_put(
334 				kctx->pending_regions[cookie]->gpu_alloc);
335 			kfree(kctx->pending_regions[cookie]);
336 
337 			kctx->pending_regions[cookie] = NULL;
338 		}
339 
340 		bitmap_clear(&pending_regions_to_clean, cookie, 1);
341 	}
342 	kbase_gpu_vm_unlock(kctx);
343 }
344