• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2017-2021 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #include <mali_kbase.h>
23 #include <mali_kbase_defs.h>
24 #include "mali_kbase_ctx_sched.h"
25 #include "tl/mali_kbase_tracepoints.h"
26 #if !MALI_USE_CSF
27 #include <mali_kbase_hwaccess_jm.h>
28 #endif
29 
30 /* Helper for ktrace */
31 #if KBASE_KTRACE_ENABLE
kbase_ktrace_get_ctx_refcnt(struct kbase_context * kctx)32 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx)
33 {
34 	return atomic_read(&kctx->refcount);
35 }
36 #else /* KBASE_KTRACE_ENABLE  */
kbase_ktrace_get_ctx_refcnt(struct kbase_context * kctx)37 static int kbase_ktrace_get_ctx_refcnt(struct kbase_context *kctx)
38 {
39 	CSTD_UNUSED(kctx);
40 	return 0;
41 }
42 #endif /* KBASE_KTRACE_ENABLE  */
43 
kbase_ctx_sched_init(struct kbase_device * kbdev)44 int kbase_ctx_sched_init(struct kbase_device *kbdev)
45 {
46 	int as_present = (1U << kbdev->nr_hw_address_spaces) - 1;
47 
48 	/* These two must be recalculated if nr_hw_address_spaces changes
49 	 * (e.g. for HW workarounds)
50 	 */
51 	kbdev->nr_user_address_spaces = kbdev->nr_hw_address_spaces;
52 	kbdev->as_free = as_present; /* All ASs initially free */
53 
54 	memset(kbdev->as_to_kctx, 0, sizeof(kbdev->as_to_kctx));
55 
56 	return 0;
57 }
58 
kbase_ctx_sched_term(struct kbase_device * kbdev)59 void kbase_ctx_sched_term(struct kbase_device *kbdev)
60 {
61 	s8 i;
62 
63 	/* Sanity checks */
64 	for (i = 0; i != kbdev->nr_hw_address_spaces; ++i) {
65 		WARN_ON(kbdev->as_to_kctx[i] != NULL);
66 		WARN_ON(!(kbdev->as_free & (1u << i)));
67 	}
68 }
69 
70 /* kbasep_ctx_sched_find_as_for_ctx - Find a free address space
71  *
72  * @kbdev: The context for which to find a free address space
73  *
74  * Return: A valid AS if successful, otherwise KBASEP_AS_NR_INVALID
75  *
76  * This function returns an address space available for use. It would prefer
77  * returning an AS that has been previously assigned to the context to
78  * avoid having to reprogram the MMU.
79  */
kbasep_ctx_sched_find_as_for_ctx(struct kbase_context * kctx)80 static int kbasep_ctx_sched_find_as_for_ctx(struct kbase_context *kctx)
81 {
82 	struct kbase_device *const kbdev = kctx->kbdev;
83 	int free_as;
84 
85 	lockdep_assert_held(&kbdev->hwaccess_lock);
86 
87 	/* First check if the previously assigned AS is available */
88 	if ((kctx->as_nr != KBASEP_AS_NR_INVALID) &&
89 			(kbdev->as_free & (1u << kctx->as_nr)))
90 		return kctx->as_nr;
91 
92 	/* The previously assigned AS was taken, we'll be returning any free
93 	 * AS at this point.
94 	 */
95 	free_as = ffs(kbdev->as_free) - 1;
96 	if (free_as >= 0 && free_as < kbdev->nr_hw_address_spaces)
97 		return free_as;
98 
99 	return KBASEP_AS_NR_INVALID;
100 }
101 
kbase_ctx_sched_retain_ctx(struct kbase_context * kctx)102 int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx)
103 {
104 	struct kbase_device *const kbdev = kctx->kbdev;
105 
106 	lockdep_assert_held(&kbdev->mmu_hw_mutex);
107 	lockdep_assert_held(&kbdev->hwaccess_lock);
108 
109 	WARN_ON(!kbdev->pm.backend.gpu_powered);
110 
111 	if (atomic_inc_return(&kctx->refcount) == 1) {
112 		int const free_as = kbasep_ctx_sched_find_as_for_ctx(kctx);
113 
114 		if (free_as != KBASEP_AS_NR_INVALID) {
115 			kbdev->as_free &= ~(1u << free_as);
116 			/* Only program the MMU if the context has not been
117 			 * assigned the same address space before.
118 			 */
119 			if (free_as != kctx->as_nr) {
120 				struct kbase_context *const prev_kctx =
121 					kbdev->as_to_kctx[free_as];
122 
123 				if (prev_kctx) {
124 					WARN_ON(atomic_read(&prev_kctx->refcount) != 0);
125 					kbase_mmu_disable(prev_kctx);
126 					KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS(
127 						kbdev, prev_kctx->id);
128 					prev_kctx->as_nr = KBASEP_AS_NR_INVALID;
129 				}
130 				kctx->as_nr = free_as;
131 				kbdev->as_to_kctx[free_as] = kctx;
132 				KBASE_TLSTREAM_TL_KBASE_CTX_ASSIGN_AS(
133 					kbdev, kctx->id, free_as);
134 				kbase_mmu_update(kbdev, &kctx->mmu,
135 					kctx->as_nr);
136 			}
137 		} else {
138 			atomic_dec(&kctx->refcount);
139 
140 			/* Failed to find an available address space, we must
141 			 * be returning an error at this point.
142 			 */
143 			WARN_ON(kctx->as_nr != KBASEP_AS_NR_INVALID);
144 		}
145 	}
146 
147 	return kctx->as_nr;
148 }
149 
kbase_ctx_sched_retain_ctx_refcount(struct kbase_context * kctx)150 void kbase_ctx_sched_retain_ctx_refcount(struct kbase_context *kctx)
151 {
152 	struct kbase_device *const kbdev = kctx->kbdev;
153 
154 	lockdep_assert_held(&kbdev->hwaccess_lock);
155 	WARN_ON(atomic_read(&kctx->refcount) == 0);
156 	WARN_ON(kctx->as_nr == KBASEP_AS_NR_INVALID);
157 	WARN_ON(kbdev->as_to_kctx[kctx->as_nr] != kctx);
158 
159 	atomic_inc(&kctx->refcount);
160 }
161 
kbase_ctx_sched_release_ctx(struct kbase_context * kctx)162 void kbase_ctx_sched_release_ctx(struct kbase_context *kctx)
163 {
164 	struct kbase_device *const kbdev = kctx->kbdev;
165 	int new_ref_count;
166 
167 	lockdep_assert_held(&kbdev->hwaccess_lock);
168 
169 	new_ref_count = atomic_dec_return(&kctx->refcount);
170 	if (new_ref_count == 0) {
171 		kbdev->as_free |= (1u << kctx->as_nr);
172 		if (kbase_ctx_flag(kctx, KCTX_AS_DISABLED_ON_FAULT)) {
173 			KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS(
174 				kbdev, kctx->id);
175 			kbdev->as_to_kctx[kctx->as_nr] = NULL;
176 			kctx->as_nr = KBASEP_AS_NR_INVALID;
177 			kbase_ctx_flag_clear(kctx, KCTX_AS_DISABLED_ON_FAULT);
178 #if !MALI_USE_CSF
179 			kbase_backend_slot_kctx_purge_locked(kbdev, kctx);
180 #endif
181 		}
182 	}
183 
184 	KBASE_KTRACE_ADD(kbdev, SCHED_RELEASE_CTX, kctx, new_ref_count);
185 }
186 
kbase_ctx_sched_remove_ctx(struct kbase_context * kctx)187 void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx)
188 {
189 	struct kbase_device *const kbdev = kctx->kbdev;
190 
191 	lockdep_assert_held(&kbdev->mmu_hw_mutex);
192 	lockdep_assert_held(&kbdev->hwaccess_lock);
193 
194 	WARN_ON(atomic_read(&kctx->refcount) != 0);
195 
196 	if (kctx->as_nr != KBASEP_AS_NR_INVALID) {
197 		if (kbdev->pm.backend.gpu_powered)
198 			kbase_mmu_disable(kctx);
199 
200 		KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS(kbdev, kctx->id);
201 		kbdev->as_to_kctx[kctx->as_nr] = NULL;
202 		kctx->as_nr = KBASEP_AS_NR_INVALID;
203 	}
204 }
205 
kbase_ctx_sched_restore_all_as(struct kbase_device * kbdev)206 void kbase_ctx_sched_restore_all_as(struct kbase_device *kbdev)
207 {
208 	s8 i;
209 
210 	lockdep_assert_held(&kbdev->mmu_hw_mutex);
211 	lockdep_assert_held(&kbdev->hwaccess_lock);
212 
213 	WARN_ON(!kbdev->pm.backend.gpu_powered);
214 
215 	for (i = 0; i != kbdev->nr_hw_address_spaces; ++i) {
216 		struct kbase_context *kctx;
217 
218 #if MALI_USE_CSF
219 		if ((i == MCU_AS_NR) && kbdev->csf.firmware_inited) {
220 			kbase_mmu_update(kbdev, &kbdev->csf.mcu_mmu,
221 					 MCU_AS_NR);
222 			continue;
223 		}
224 #endif
225 		kctx = kbdev->as_to_kctx[i];
226 		if (kctx) {
227 			if (atomic_read(&kctx->refcount)) {
228 				WARN_ON(kctx->as_nr != i);
229 
230 				kbase_mmu_update(kbdev, &kctx->mmu,
231 					kctx->as_nr);
232 				kbase_ctx_flag_clear(kctx,
233 					KCTX_AS_DISABLED_ON_FAULT);
234 			} else {
235 				/* This context might have been assigned an
236 				 * AS before, clear it.
237 				 */
238 				if (kctx->as_nr != KBASEP_AS_NR_INVALID) {
239 					KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS(
240 						kbdev, kctx->id);
241 					kbdev->as_to_kctx[kctx->as_nr] = NULL;
242 					kctx->as_nr = KBASEP_AS_NR_INVALID;
243 				}
244 			}
245 		} else {
246 			kbase_mmu_disable_as(kbdev, i);
247 		}
248 	}
249 }
250 
kbase_ctx_sched_as_to_ctx_refcount(struct kbase_device * kbdev,size_t as_nr)251 struct kbase_context *kbase_ctx_sched_as_to_ctx_refcount(
252 		struct kbase_device *kbdev, size_t as_nr)
253 {
254 	unsigned long flags;
255 	struct kbase_context *found_kctx = NULL;
256 
257 	if (WARN_ON(kbdev == NULL))
258 		return NULL;
259 
260 	if (WARN_ON(as_nr >= BASE_MAX_NR_AS))
261 		return NULL;
262 
263 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
264 
265 	found_kctx = kbdev->as_to_kctx[as_nr];
266 
267 	if (!WARN_ON(found_kctx == NULL))
268 		kbase_ctx_sched_retain_ctx_refcount(found_kctx);
269 
270 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
271 
272 	return found_kctx;
273 }
274 
kbase_ctx_sched_as_to_ctx(struct kbase_device * kbdev,size_t as_nr)275 struct kbase_context *kbase_ctx_sched_as_to_ctx(struct kbase_device *kbdev,
276 		size_t as_nr)
277 {
278 	unsigned long flags;
279 	struct kbase_context *found_kctx;
280 
281 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
282 
283 	found_kctx = kbase_ctx_sched_as_to_ctx_nolock(kbdev, as_nr);
284 
285 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
286 
287 	return found_kctx;
288 }
289 
kbase_ctx_sched_as_to_ctx_nolock(struct kbase_device * kbdev,size_t as_nr)290 struct kbase_context *kbase_ctx_sched_as_to_ctx_nolock(
291 		struct kbase_device *kbdev, size_t as_nr)
292 {
293 	struct kbase_context *found_kctx;
294 
295 	if (WARN_ON(kbdev == NULL))
296 		return NULL;
297 
298 	if (WARN_ON(as_nr >= BASE_MAX_NR_AS))
299 		return NULL;
300 
301 	lockdep_assert_held(&kbdev->hwaccess_lock);
302 
303 	found_kctx = kbdev->as_to_kctx[as_nr];
304 
305 	if (found_kctx) {
306 		if (atomic_read(&found_kctx->refcount) <= 0)
307 			found_kctx = NULL;
308 	}
309 
310 	return found_kctx;
311 }
312 
kbase_ctx_sched_inc_refcount_nolock(struct kbase_context * kctx)313 bool kbase_ctx_sched_inc_refcount_nolock(struct kbase_context *kctx)
314 {
315 	bool result = false;
316 	int as_nr;
317 
318 	if (WARN_ON(kctx == NULL))
319 		return result;
320 
321 	lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
322 
323 	as_nr = kctx->as_nr;
324 	if (atomic_read(&kctx->refcount) > 0) {
325 		KBASE_DEBUG_ASSERT(as_nr >= 0);
326 
327 		kbase_ctx_sched_retain_ctx_refcount(kctx);
328 		KBASE_KTRACE_ADD(kctx->kbdev, SCHED_RETAIN_CTX_NOLOCK, kctx,
329 				kbase_ktrace_get_ctx_refcnt(kctx));
330 		result = true;
331 	}
332 
333 	return result;
334 }
335 
kbase_ctx_sched_inc_refcount(struct kbase_context * kctx)336 bool kbase_ctx_sched_inc_refcount(struct kbase_context *kctx)
337 {
338 	unsigned long flags;
339 	bool result = false;
340 
341 	if (WARN_ON(kctx == NULL))
342 		return result;
343 
344 	if (WARN_ON(kctx->kbdev == NULL))
345 		return result;
346 
347 	mutex_lock(&kctx->kbdev->mmu_hw_mutex);
348 	spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
349 	result = kbase_ctx_sched_inc_refcount_nolock(kctx);
350 	spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
351 	mutex_unlock(&kctx->kbdev->mmu_hw_mutex);
352 
353 	return result;
354 }
355 
kbase_ctx_sched_release_ctx_lock(struct kbase_context * kctx)356 void kbase_ctx_sched_release_ctx_lock(struct kbase_context *kctx)
357 {
358 	unsigned long flags;
359 
360 	if (WARN_ON(!kctx))
361 		return;
362 
363 	spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
364 
365 	if (!WARN_ON(kctx->as_nr == KBASEP_AS_NR_INVALID) &&
366 			!WARN_ON(atomic_read(&kctx->refcount) <= 0))
367 		kbase_ctx_sched_release_ctx(kctx);
368 
369 	spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
370 }
371 
372 #if MALI_USE_CSF
kbase_ctx_sched_inc_refcount_if_as_valid(struct kbase_context * kctx)373 bool kbase_ctx_sched_inc_refcount_if_as_valid(struct kbase_context *kctx)
374 {
375 	struct kbase_device *kbdev;
376 	bool added_ref = false;
377 	unsigned long flags;
378 
379 	if (WARN_ON(kctx == NULL))
380 		return added_ref;
381 
382 	kbdev = kctx->kbdev;
383 
384 	if (WARN_ON(kbdev == NULL))
385 		return added_ref;
386 
387 	mutex_lock(&kbdev->mmu_hw_mutex);
388 	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
389 
390 	if ((kctx->as_nr != KBASEP_AS_NR_INVALID) &&
391 	    (kctx == kbdev->as_to_kctx[kctx->as_nr])) {
392 		atomic_inc(&kctx->refcount);
393 
394 		if (kbdev->as_free & (1u << kctx->as_nr))
395 			kbdev->as_free &= ~(1u << kctx->as_nr);
396 
397 		KBASE_KTRACE_ADD(kbdev, SCHED_RETAIN_CTX_NOLOCK, kctx,
398 				 kbase_ktrace_get_ctx_refcnt(kctx));
399 		added_ref = true;
400 	}
401 
402 	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
403 	mutex_unlock(&kbdev->mmu_hw_mutex);
404 
405 	return added_ref;
406 }
407 #endif
408