• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /*
23  * Base kernel device APIs
24  */
25 
26 #include <linux/debugfs.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/seq_file.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/of_platform.h>
32 #include <linux/types.h>
33 #include <linux/oom.h>
34 
35 #include <mali_kbase.h>
36 #include <mali_kbase_defs.h>
37 #include <mali_kbase_hwaccess_instr.h>
38 #include <mali_kbase_hw.h>
39 #include <mali_kbase_config_defaults.h>
40 #include <linux/priority_control_manager.h>
41 
42 #include <tl/mali_kbase_timeline.h>
43 #include "mali_kbase_kinstr_prfcnt.h"
44 #include "mali_kbase_vinstr.h"
45 #include "mali_kbase_hwcnt_context.h"
46 #include "mali_kbase_hwcnt_virtualizer.h"
47 
48 #include "mali_kbase_device.h"
49 #include "mali_kbase_device_internal.h"
50 #include "backend/gpu/mali_kbase_pm_internal.h"
51 #include "backend/gpu/mali_kbase_irq_internal.h"
52 #include "mali_kbase_regs_history_debugfs.h"
53 #include "mali_kbase_pbha.h"
54 
55 #ifdef CONFIG_MALI_ARBITER_SUPPORT
56 #include "arbiter/mali_kbase_arbiter_pm.h"
57 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
58 
59 /* NOTE: Magic - 0x45435254 (TRCE in ASCII).
60  * Supports tracing feature provided in the base module.
61  * Please keep it in sync with the value of base module.
62  */
63 #define TRACE_BUFFER_HEADER_SPECIAL 0x45435254
64 
65 /* Number of register accesses for the buffer that we allocate during
66  * initialization time. The buffer size can be changed later via debugfs.
67  */
68 #define KBASEP_DEFAULT_REGISTER_HISTORY_SIZE ((u16)512)
69 
70 static DEFINE_MUTEX(kbase_dev_list_lock);
71 static LIST_HEAD(kbase_dev_list);
72 static int kbase_dev_nr;
73 
kbase_device_alloc(void)74 struct kbase_device *kbase_device_alloc(void)
75 {
76 	return kzalloc(sizeof(struct kbase_device), GFP_KERNEL);
77 }
78 
79 /**
80  * kbase_device_all_as_init() - Initialise address space objects of the device.
81  *
82  * @kbdev: Pointer to kbase device.
83  *
84  * Return: 0 on success otherwise non-zero.
85  */
kbase_device_all_as_init(struct kbase_device * kbdev)86 static int kbase_device_all_as_init(struct kbase_device *kbdev)
87 {
88 	int i, err = 0;
89 
90 	for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
91 		err = kbase_mmu_as_init(kbdev, i);
92 		if (err)
93 			break;
94 	}
95 
96 	if (err) {
97 		while (i-- > 0)
98 			kbase_mmu_as_term(kbdev, i);
99 	}
100 
101 	return err;
102 }
103 
kbase_device_all_as_term(struct kbase_device * kbdev)104 static void kbase_device_all_as_term(struct kbase_device *kbdev)
105 {
106 	int i;
107 
108 	for (i = 0; i < kbdev->nr_hw_address_spaces; i++)
109 		kbase_mmu_as_term(kbdev, i);
110 }
111 
kbase_device_pcm_dev_init(struct kbase_device * const kbdev)112 int kbase_device_pcm_dev_init(struct kbase_device *const kbdev)
113 {
114 	int err = 0;
115 
116 #if IS_ENABLED(CONFIG_OF)
117 	struct device_node *prio_ctrl_node;
118 
119 	/* Check to see whether or not a platform specific priority control manager
120 	 * is available.
121 	 */
122 	prio_ctrl_node = of_parse_phandle(kbdev->dev->of_node,
123 			"priority-control-manager", 0);
124 	if (!prio_ctrl_node) {
125 		dev_info(kbdev->dev,
126 			"No priority control manager is configured");
127 	} else {
128 		struct platform_device *const pdev =
129 			of_find_device_by_node(prio_ctrl_node);
130 
131 		if (!pdev) {
132 			dev_err(kbdev->dev,
133 				"The configured priority control manager was not found");
134 		} else {
135 			struct priority_control_manager_device *pcm_dev =
136 						platform_get_drvdata(pdev);
137 			if (!pcm_dev) {
138 				dev_info(kbdev->dev, "Priority control manager is not ready");
139 				err = -EPROBE_DEFER;
140 			} else if (!try_module_get(pcm_dev->owner)) {
141 				dev_err(kbdev->dev, "Failed to get priority control manager module");
142 				err = -ENODEV;
143 			} else {
144 				dev_info(kbdev->dev, "Priority control manager successfully loaded");
145 				kbdev->pcm_dev = pcm_dev;
146 			}
147 		}
148 		of_node_put(prio_ctrl_node);
149 	}
150 #endif /* CONFIG_OF */
151 
152 	return err;
153 }
154 
kbase_device_pcm_dev_term(struct kbase_device * const kbdev)155 void kbase_device_pcm_dev_term(struct kbase_device *const kbdev)
156 {
157 	if (kbdev->pcm_dev)
158 		module_put(kbdev->pcm_dev->owner);
159 }
160 
161 #define KBASE_PAGES_TO_KIB(pages) (((unsigned int)pages) << (PAGE_SHIFT - 10))
162 
163 /**
164  * mali_oom_notifier_handler - Mali driver out-of-memory handler
165  *
166  * @nb: notifier block - used to retrieve kbdev pointer
167  * @action: action (unused)
168  * @data: data pointer (unused)
169  * This function simply lists memory usage by the Mali driver, per GPU device,
170  * for diagnostic purposes.
171  */
mali_oom_notifier_handler(struct notifier_block * nb,unsigned long action,void * data)172 static int mali_oom_notifier_handler(struct notifier_block *nb,
173 				     unsigned long action, void *data)
174 {
175 	struct kbase_device *kbdev;
176 	struct kbase_context *kctx = NULL;
177 	unsigned long kbdev_alloc_total;
178 
179 	if (WARN_ON(nb == NULL))
180 		return NOTIFY_BAD;
181 
182 	kbdev = container_of(nb, struct kbase_device, oom_notifier_block);
183 
184 	kbdev_alloc_total =
185 		KBASE_PAGES_TO_KIB(atomic_read(&(kbdev->memdev.used_pages)));
186 
187 	dev_err(kbdev->dev, "OOM notifier: dev %s  %lu kB\n", kbdev->devname,
188 		kbdev_alloc_total);
189 
190 	mutex_lock(&kbdev->kctx_list_lock);
191 
192 	list_for_each_entry (kctx, &kbdev->kctx_list, kctx_list_link) {
193 		struct pid *pid_struct;
194 		struct task_struct *task;
195 		unsigned long task_alloc_total =
196 			KBASE_PAGES_TO_KIB(atomic_read(&(kctx->used_pages)));
197 
198 		rcu_read_lock();
199 		pid_struct = find_get_pid(kctx->pid);
200 		task = pid_task(pid_struct, PIDTYPE_PID);
201 
202 		dev_err(kbdev->dev,
203 			"OOM notifier: tsk %s  tgid (%u)  pid (%u) %lu kB\n",
204 			task ? task->comm : "[null task]", kctx->tgid,
205 			kctx->pid, task_alloc_total);
206 
207 		put_pid(pid_struct);
208 		rcu_read_unlock();
209 	}
210 
211 	mutex_unlock(&kbdev->kctx_list_lock);
212 	return NOTIFY_OK;
213 }
214 
kbase_device_misc_init(struct kbase_device * const kbdev)215 int kbase_device_misc_init(struct kbase_device * const kbdev)
216 {
217 	int err;
218 #if IS_ENABLED(CONFIG_ARM64)
219 	struct device_node *np = NULL;
220 #endif /* CONFIG_ARM64 */
221 
222 	spin_lock_init(&kbdev->mmu_mask_change);
223 	mutex_init(&kbdev->mmu_hw_mutex);
224 #if IS_ENABLED(CONFIG_ARM64)
225 	kbdev->cci_snoop_enabled = false;
226 	np = kbdev->dev->of_node;
227 	if (np != NULL) {
228 		if (of_property_read_u32(np, "snoop_enable_smc",
229 					&kbdev->snoop_enable_smc))
230 			kbdev->snoop_enable_smc = 0;
231 		if (of_property_read_u32(np, "snoop_disable_smc",
232 					&kbdev->snoop_disable_smc))
233 			kbdev->snoop_disable_smc = 0;
234 		/* Either both or none of the calls should be provided. */
235 		if (!((kbdev->snoop_disable_smc == 0
236 			&& kbdev->snoop_enable_smc == 0)
237 			|| (kbdev->snoop_disable_smc != 0
238 			&& kbdev->snoop_enable_smc != 0))) {
239 			WARN_ON(1);
240 			err = -EINVAL;
241 			goto fail;
242 		}
243 	}
244 #endif /* CONFIG_ARM64 */
245 
246 	/* Get the list of workarounds for issues on the current HW
247 	 * (identified by the GPU_ID register)
248 	 */
249 	err = kbase_hw_set_issues_mask(kbdev);
250 	if (err)
251 		goto fail;
252 
253 	/* Set the list of features available on the current HW
254 	 * (identified by the GPU_ID register)
255 	 */
256 	kbase_hw_set_features_mask(kbdev);
257 
258 	err = kbase_gpuprops_set_features(kbdev);
259 	if (err)
260 		goto fail;
261 
262 	/* Workaround a pre-3.13 Linux issue, where dma_mask is NULL when our
263 	 * device structure was created by device-tree
264 	 */
265 	if (!kbdev->dev->dma_mask)
266 		kbdev->dev->dma_mask = &kbdev->dev->coherent_dma_mask;
267 
268 	err = dma_set_mask(kbdev->dev,
269 			DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
270 	if (err)
271 		goto dma_set_mask_failed;
272 
273 	err = dma_set_coherent_mask(kbdev->dev,
274 			DMA_BIT_MASK(kbdev->gpu_props.mmu.pa_bits));
275 	if (err)
276 		goto dma_set_mask_failed;
277 
278 
279 	/* There is no limit for Mali, so set to max. We only do this if dma_parms
280 	 * is already allocated by the platform.
281 	 */
282 	if (kbdev->dev->dma_parms)
283 		err = dma_set_max_seg_size(kbdev->dev, UINT_MAX);
284 	if (err)
285 		goto dma_set_mask_failed;
286 
287 	kbdev->nr_hw_address_spaces = kbdev->gpu_props.num_address_spaces;
288 
289 	err = kbase_device_all_as_init(kbdev);
290 	if (err)
291 		goto dma_set_mask_failed;
292 
293 	err = kbase_ktrace_init(kbdev);
294 	if (err)
295 		goto term_as;
296 	err = kbase_pbha_read_dtb(kbdev);
297 	if (err)
298 		goto term_ktrace;
299 
300 	init_waitqueue_head(&kbdev->cache_clean_wait);
301 
302 	kbase_debug_assert_register_hook(&kbase_ktrace_hook_wrapper, kbdev);
303 
304 	atomic_set(&kbdev->ctx_num, 0);
305 
306 	kbdev->pm.dvfs_period = DEFAULT_PM_DVFS_PERIOD;
307 
308 	kbdev->reset_timeout_ms = DEFAULT_RESET_TIMEOUT_MS;
309 
310 	kbdev->mmu_mode = kbase_mmu_mode_get_aarch64();
311 
312 	mutex_init(&kbdev->kctx_list_lock);
313 	INIT_LIST_HEAD(&kbdev->kctx_list);
314 
315 	dev_dbg(kbdev->dev, "Registering mali_oom_notifier_handlern");
316 	kbdev->oom_notifier_block.notifier_call = mali_oom_notifier_handler;
317 	err = register_oom_notifier(&kbdev->oom_notifier_block);
318 
319 	if (err) {
320 		dev_err(kbdev->dev,
321 			"Unable to register OOM notifier for Mali - but will continue\n");
322 		kbdev->oom_notifier_block.notifier_call = NULL;
323 	}
324 	return 0;
325 
326 term_ktrace:
327 	kbase_ktrace_term(kbdev);
328 term_as:
329 	kbase_device_all_as_term(kbdev);
330 dma_set_mask_failed:
331 fail:
332 	return err;
333 }
334 
kbase_device_misc_term(struct kbase_device * kbdev)335 void kbase_device_misc_term(struct kbase_device *kbdev)
336 {
337 	KBASE_DEBUG_ASSERT(kbdev);
338 
339 	WARN_ON(!list_empty(&kbdev->kctx_list));
340 
341 #if KBASE_KTRACE_ENABLE
342 	kbase_debug_assert_register_hook(NULL, NULL);
343 #endif
344 
345 	kbase_ktrace_term(kbdev);
346 
347 	kbase_device_all_as_term(kbdev);
348 
349 
350 	if (kbdev->oom_notifier_block.notifier_call)
351 		unregister_oom_notifier(&kbdev->oom_notifier_block);
352 }
353 
kbase_device_free(struct kbase_device * kbdev)354 void kbase_device_free(struct kbase_device *kbdev)
355 {
356 	kfree(kbdev);
357 }
358 
kbase_device_id_init(struct kbase_device * kbdev)359 void kbase_device_id_init(struct kbase_device *kbdev)
360 {
361 	scnprintf(kbdev->devname, DEVNAME_SIZE, "%s%d", kbase_drv_name,
362 			kbase_dev_nr);
363 	kbdev->id = kbase_dev_nr;
364 }
365 
kbase_increment_device_id(void)366 void kbase_increment_device_id(void)
367 {
368 	kbase_dev_nr++;
369 }
370 
kbase_device_hwcnt_context_init(struct kbase_device * kbdev)371 int kbase_device_hwcnt_context_init(struct kbase_device *kbdev)
372 {
373 	return kbase_hwcnt_context_init(&kbdev->hwcnt_gpu_iface,
374 			&kbdev->hwcnt_gpu_ctx);
375 }
376 
kbase_device_hwcnt_context_term(struct kbase_device * kbdev)377 void kbase_device_hwcnt_context_term(struct kbase_device *kbdev)
378 {
379 	kbase_hwcnt_context_term(kbdev->hwcnt_gpu_ctx);
380 }
381 
kbase_device_hwcnt_virtualizer_init(struct kbase_device * kbdev)382 int kbase_device_hwcnt_virtualizer_init(struct kbase_device *kbdev)
383 {
384 	return kbase_hwcnt_virtualizer_init(kbdev->hwcnt_gpu_ctx,
385 			KBASE_HWCNT_GPU_VIRTUALIZER_DUMP_THRESHOLD_NS,
386 			&kbdev->hwcnt_gpu_virt);
387 }
388 
kbase_device_hwcnt_virtualizer_term(struct kbase_device * kbdev)389 void kbase_device_hwcnt_virtualizer_term(struct kbase_device *kbdev)
390 {
391 	kbase_hwcnt_virtualizer_term(kbdev->hwcnt_gpu_virt);
392 }
393 
kbase_device_timeline_init(struct kbase_device * kbdev)394 int kbase_device_timeline_init(struct kbase_device *kbdev)
395 {
396 	atomic_set(&kbdev->timeline_flags, 0);
397 	return kbase_timeline_init(&kbdev->timeline, &kbdev->timeline_flags);
398 }
399 
kbase_device_timeline_term(struct kbase_device * kbdev)400 void kbase_device_timeline_term(struct kbase_device *kbdev)
401 {
402 	kbase_timeline_term(kbdev->timeline);
403 }
404 
kbase_device_vinstr_init(struct kbase_device * kbdev)405 int kbase_device_vinstr_init(struct kbase_device *kbdev)
406 {
407 	return kbase_vinstr_init(kbdev->hwcnt_gpu_virt, &kbdev->vinstr_ctx);
408 }
409 
kbase_device_vinstr_term(struct kbase_device * kbdev)410 void kbase_device_vinstr_term(struct kbase_device *kbdev)
411 {
412 	kbase_vinstr_term(kbdev->vinstr_ctx);
413 }
414 
415 #if defined(CONFIG_DEBUG_FS) && !IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI)
kbase_device_kinstr_prfcnt_init(struct kbase_device * kbdev)416 int kbase_device_kinstr_prfcnt_init(struct kbase_device *kbdev)
417 {
418 	return kbase_kinstr_prfcnt_init(kbdev->hwcnt_gpu_virt,
419 					&kbdev->kinstr_prfcnt_ctx);
420 }
421 
kbase_device_kinstr_prfcnt_term(struct kbase_device * kbdev)422 void kbase_device_kinstr_prfcnt_term(struct kbase_device *kbdev)
423 {
424 	kbase_kinstr_prfcnt_term(kbdev->kinstr_prfcnt_ctx);
425 }
426 
kbase_device_io_history_init(struct kbase_device * kbdev)427 int kbase_device_io_history_init(struct kbase_device *kbdev)
428 {
429 	return kbase_io_history_init(&kbdev->io_history,
430 			KBASEP_DEFAULT_REGISTER_HISTORY_SIZE);
431 }
432 
kbase_device_io_history_term(struct kbase_device * kbdev)433 void kbase_device_io_history_term(struct kbase_device *kbdev)
434 {
435 	kbase_io_history_term(&kbdev->io_history);
436 }
437 #endif
438 
kbase_device_misc_register(struct kbase_device * kbdev)439 int kbase_device_misc_register(struct kbase_device *kbdev)
440 {
441 	return misc_register(&kbdev->mdev);
442 }
443 
kbase_device_misc_deregister(struct kbase_device * kbdev)444 void kbase_device_misc_deregister(struct kbase_device *kbdev)
445 {
446 	misc_deregister(&kbdev->mdev);
447 }
448 
kbase_device_list_init(struct kbase_device * kbdev)449 int kbase_device_list_init(struct kbase_device *kbdev)
450 {
451 	const struct list_head *dev_list;
452 
453 	dev_list = kbase_device_get_list();
454 	list_add(&kbdev->entry, &kbase_dev_list);
455 	kbase_device_put_list(dev_list);
456 
457 	return 0;
458 }
459 
kbase_device_list_term(struct kbase_device * kbdev)460 void kbase_device_list_term(struct kbase_device *kbdev)
461 {
462 	const struct list_head *dev_list;
463 
464 	dev_list = kbase_device_get_list();
465 	list_del(&kbdev->entry);
466 	kbase_device_put_list(dev_list);
467 }
468 
kbase_device_get_list(void)469 const struct list_head *kbase_device_get_list(void)
470 {
471 	mutex_lock(&kbase_dev_list_lock);
472 	return &kbase_dev_list;
473 }
474 KBASE_EXPORT_TEST_API(kbase_device_get_list);
475 
kbase_device_put_list(const struct list_head * dev_list)476 void kbase_device_put_list(const struct list_head *dev_list)
477 {
478 	mutex_unlock(&kbase_dev_list_lock);
479 }
480 KBASE_EXPORT_TEST_API(kbase_device_put_list);
481 
kbase_device_early_init(struct kbase_device * kbdev)482 int kbase_device_early_init(struct kbase_device *kbdev)
483 {
484 	int err;
485 
486 	err = kbasep_platform_device_init(kbdev);
487 	if (err)
488 		return err;
489 
490 	err = kbase_pm_runtime_init(kbdev);
491 	if (err)
492 		goto fail_runtime_pm;
493 
494 	/* This spinlock is initialized before doing the first access to GPU
495 	 * registers and installing interrupt handlers.
496 	 */
497 	spin_lock_init(&kbdev->hwaccess_lock);
498 
499 	/* Ensure we can access the GPU registers */
500 	kbase_pm_register_access_enable(kbdev);
501 
502 	/* Find out GPU properties based on the GPU feature registers */
503 	kbase_gpuprops_set(kbdev);
504 
505 	/* We're done accessing the GPU registers for now. */
506 	kbase_pm_register_access_disable(kbdev);
507 
508 #ifdef CONFIG_MALI_ARBITER_SUPPORT
509 	if (kbdev->arb.arb_if)
510 		err = kbase_arbiter_pm_install_interrupts(kbdev);
511 	else
512 		err = kbase_install_interrupts(kbdev);
513 #else
514 	err = kbase_install_interrupts(kbdev);
515 #endif
516 	if (err)
517 		goto fail_interrupts;
518 
519 	return 0;
520 
521 fail_interrupts:
522 	kbase_pm_runtime_term(kbdev);
523 fail_runtime_pm:
524 	kbasep_platform_device_term(kbdev);
525 
526 	return err;
527 }
528 
kbase_device_early_term(struct kbase_device * kbdev)529 void kbase_device_early_term(struct kbase_device *kbdev)
530 {
531 #ifdef CONFIG_MALI_ARBITER_SUPPORT
532 	if (kbdev->arb.arb_if)
533 		kbase_arbiter_pm_release_interrupts(kbdev);
534 	else
535 		kbase_release_interrupts(kbdev);
536 #else
537 	kbase_release_interrupts(kbdev);
538 #endif /* CONFIG_MALI_ARBITER_SUPPORT */
539 	kbase_pm_runtime_term(kbdev);
540 	kbasep_platform_device_term(kbdev);
541 }
542 
kbase_device_late_init(struct kbase_device * kbdev)543 int kbase_device_late_init(struct kbase_device *kbdev)
544 {
545 	int err;
546 
547 	err = kbasep_platform_device_late_init(kbdev);
548 
549 	return err;
550 }
551 
kbase_device_late_term(struct kbase_device * kbdev)552 void kbase_device_late_term(struct kbase_device *kbdev)
553 {
554 	kbasep_platform_device_late_term(kbdev);
555 }
556