• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 /*
23  * Base kernel context APIs for CSF GPUs
24  */
25 
26 #include <context/mali_kbase_context_internal.h>
27 #include <gpu/mali_kbase_gpu_regmap.h>
28 #include <mali_kbase.h>
29 #include <mali_kbase_dma_fence.h>
30 #include <mali_kbase_mem_linux.h>
31 #include <mali_kbase_mem_pool_group.h>
32 #include <mmu/mali_kbase_mmu.h>
33 #include <tl/mali_kbase_timeline.h>
34 
35 #if IS_ENABLED(CONFIG_DEBUG_FS)
36 #include <csf/mali_kbase_csf_csg_debugfs.h>
37 #include <csf/mali_kbase_csf_kcpu_debugfs.h>
38 #include <csf/mali_kbase_csf_tiler_heap_debugfs.h>
39 #include <csf/mali_kbase_csf_cpu_queue_debugfs.h>
40 #include <mali_kbase_debug_mem_view.h>
41 #include <mali_kbase_mem_pool_debugfs.h>
42 
kbase_context_debugfs_init(struct kbase_context * const kctx)43 void kbase_context_debugfs_init(struct kbase_context *const kctx)
44 {
45 	kbase_debug_mem_view_init(kctx);
46 	kbase_mem_pool_debugfs_init(kctx->kctx_dentry, kctx);
47 	kbase_jit_debugfs_init(kctx);
48 	kbase_csf_queue_group_debugfs_init(kctx);
49 	kbase_csf_kcpu_debugfs_init(kctx);
50 	kbase_csf_tiler_heap_debugfs_init(kctx);
51 	kbase_csf_tiler_heap_total_debugfs_init(kctx);
52 	kbase_csf_cpu_queue_debugfs_init(kctx);
53 }
54 KBASE_EXPORT_SYMBOL(kbase_context_debugfs_init);
55 
kbase_context_debugfs_term(struct kbase_context * const kctx)56 void kbase_context_debugfs_term(struct kbase_context *const kctx)
57 {
58 	debugfs_remove_recursive(kctx->kctx_dentry);
59 }
60 KBASE_EXPORT_SYMBOL(kbase_context_debugfs_term);
61 #else
kbase_context_debugfs_init(struct kbase_context * const kctx)62 void kbase_context_debugfs_init(struct kbase_context *const kctx)
63 {
64 	CSTD_UNUSED(kctx);
65 }
66 KBASE_EXPORT_SYMBOL(kbase_context_debugfs_init);
67 
kbase_context_debugfs_term(struct kbase_context * const kctx)68 void kbase_context_debugfs_term(struct kbase_context *const kctx)
69 {
70 	CSTD_UNUSED(kctx);
71 }
72 KBASE_EXPORT_SYMBOL(kbase_context_debugfs_term);
73 #endif /* CONFIG_DEBUG_FS */
74 
kbase_context_free(struct kbase_context * kctx)75 static void kbase_context_free(struct kbase_context *kctx)
76 {
77 	kbase_timeline_post_kbase_context_destroy(kctx);
78 
79 	vfree(kctx);
80 }
81 
82 static const struct kbase_context_init context_init[] = {
83 	{ NULL, kbase_context_free, NULL },
84 	{ kbase_context_common_init, kbase_context_common_term,
85 	  "Common context initialization failed" },
86 	{ kbase_context_mem_pool_group_init, kbase_context_mem_pool_group_term,
87 	  "Memory pool group initialization failed" },
88 	{ kbase_mem_evictable_init, kbase_mem_evictable_deinit,
89 	  "Memory evictable initialization failed" },
90 	{ kbase_context_mmu_init, kbase_context_mmu_term,
91 	  "MMU initialization failed" },
92 	{ kbase_context_mem_alloc_page, kbase_context_mem_pool_free,
93 	  "Memory alloc page failed" },
94 	{ kbase_region_tracker_init, kbase_region_tracker_term,
95 	  "Region tracker initialization failed" },
96 	{ kbase_sticky_resource_init, kbase_context_sticky_resource_term,
97 	  "Sticky resource initialization failed" },
98 	{ kbase_jit_init, kbase_jit_term, "JIT initialization failed" },
99 	{ kbase_csf_ctx_init, kbase_csf_ctx_term,
100 	  "CSF context initialization failed" },
101 	{ kbase_context_add_to_dev_list, kbase_context_remove_from_dev_list,
102 	  "Adding kctx to device failed" },
103 };
104 
kbase_context_term_partial(struct kbase_context * kctx,unsigned int i)105 static void kbase_context_term_partial(
106 	struct kbase_context *kctx,
107 	unsigned int i)
108 {
109 	while (i-- > 0) {
110 		if (context_init[i].term)
111 			context_init[i].term(kctx);
112 	}
113 }
114 
kbase_create_context(struct kbase_device * kbdev,bool is_compat,base_context_create_flags const flags,unsigned long const api_version,struct file * const filp)115 struct kbase_context *kbase_create_context(struct kbase_device *kbdev,
116 	bool is_compat,
117 	base_context_create_flags const flags,
118 	unsigned long const api_version,
119 	struct file *const filp)
120 {
121 	struct kbase_context *kctx;
122 	unsigned int i = 0;
123 
124 	if (WARN_ON(!kbdev))
125 		return NULL;
126 
127 	/* Validate flags */
128 	if (WARN_ON(flags != (flags & BASEP_CONTEXT_CREATE_KERNEL_FLAGS)))
129 		return NULL;
130 
131 	/* zero-inited as lot of code assume it's zero'ed out on create */
132 	kctx = vzalloc(sizeof(*kctx));
133 	if (WARN_ON(!kctx))
134 		return NULL;
135 
136 	kctx->kbdev = kbdev;
137 	kctx->api_version = api_version;
138 	kctx->filp = filp;
139 	kctx->create_flags = flags;
140 
141 	if (is_compat)
142 		kbase_ctx_flag_set(kctx, KCTX_COMPAT);
143 #if defined(CONFIG_64BIT)
144 	else
145 		kbase_ctx_flag_set(kctx, KCTX_FORCE_SAME_VA);
146 #endif /* defined(CONFIG_64BIT) */
147 
148 	for (i = 0; i < ARRAY_SIZE(context_init); i++) {
149 		int err = 0;
150 
151 		if (context_init[i].init)
152 			err = context_init[i].init(kctx);
153 
154 		if (err) {
155 			dev_err(kbdev->dev, "%s error = %d\n",
156 						context_init[i].err_mes, err);
157 
158 			/* kctx should be freed by kbase_context_free().
159 			 * Otherwise it will result in memory leak.
160 			 */
161 			WARN_ON(i == 0);
162 
163 			kbase_context_term_partial(kctx, i);
164 			return NULL;
165 		}
166 	}
167 
168 	return kctx;
169 }
170 KBASE_EXPORT_SYMBOL(kbase_create_context);
171 
kbase_destroy_context(struct kbase_context * kctx)172 void kbase_destroy_context(struct kbase_context *kctx)
173 {
174 	struct kbase_device *kbdev;
175 
176 	if (WARN_ON(!kctx))
177 		return;
178 
179 	kbdev = kctx->kbdev;
180 	if (WARN_ON(!kbdev))
181 		return;
182 
183 	/* Context termination could happen whilst the system suspend of
184 	 * the GPU device is ongoing or has completed. It has been seen on
185 	 * Customer side that a hang could occur if context termination is
186 	 * not blocked until the resume of GPU device.
187 	 */
188 	while (kbase_pm_context_active_handle_suspend(
189 		kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE)) {
190 		dev_info(kbdev->dev,
191 			 "Suspend in progress when destroying context");
192 		wait_event(kbdev->pm.resume_wait,
193 			   !kbase_pm_is_suspending(kbdev));
194 	}
195 
196 	kbase_mem_pool_group_mark_dying(&kctx->mem_pools);
197 
198 	kbase_context_term_partial(kctx, ARRAY_SIZE(context_init));
199 
200 	kbase_pm_context_idle(kbdev);
201 }
202 KBASE_EXPORT_SYMBOL(kbase_destroy_context);
203