1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2010, 2012-2015, 2017-2021 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 /*
23 * Model interface
24 */
25
26 #include <mali_kbase.h>
27 #include <gpu/mali_kbase_gpu_regmap.h>
28 #include <backend/gpu/mali_kbase_model_dummy.h>
29 #include "backend/gpu/mali_kbase_model_linux.h"
30 #include "device/mali_kbase_device.h"
31 #include "mali_kbase_irq_internal.h"
32
33 #include <linux/kthread.h>
34
35 struct model_irq_data {
36 struct kbase_device *kbdev;
37 struct work_struct work;
38 };
39
serve_job_irq(struct work_struct * work)40 static void serve_job_irq(struct work_struct *work)
41 {
42 struct model_irq_data *data = container_of(work, struct model_irq_data,
43 work);
44 struct kbase_device *kbdev = data->kbdev;
45
46 /* Make sure no worker is already serving this IRQ */
47 while (atomic_cmpxchg(&kbdev->serving_job_irq, 1, 0) == 1) {
48 u32 val;
49
50 while ((val = kbase_reg_read(kbdev,
51 JOB_CONTROL_REG(JOB_IRQ_STATUS)))) {
52 unsigned long flags;
53
54 /* Handle the IRQ */
55 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
56 #if MALI_USE_CSF
57 kbase_csf_interrupt(kbdev, val);
58 #else
59 kbase_job_done(kbdev, val);
60 #endif
61 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
62 }
63 }
64
65 kmem_cache_free(kbdev->irq_slab, data);
66 }
67
serve_gpu_irq(struct work_struct * work)68 static void serve_gpu_irq(struct work_struct *work)
69 {
70 struct model_irq_data *data = container_of(work, struct model_irq_data,
71 work);
72 struct kbase_device *kbdev = data->kbdev;
73
74 /* Make sure no worker is already serving this IRQ */
75 while (atomic_cmpxchg(&kbdev->serving_gpu_irq, 1, 0) == 1) {
76 u32 val;
77
78 while ((val = kbase_reg_read(kbdev,
79 GPU_CONTROL_REG(GPU_IRQ_STATUS)))) {
80 /* Handle the IRQ */
81 kbase_gpu_interrupt(kbdev, val);
82 }
83 }
84
85 kmem_cache_free(kbdev->irq_slab, data);
86 }
87
serve_mmu_irq(struct work_struct * work)88 static void serve_mmu_irq(struct work_struct *work)
89 {
90 struct model_irq_data *data = container_of(work, struct model_irq_data,
91 work);
92 struct kbase_device *kbdev = data->kbdev;
93
94 /* Make sure no worker is already serving this IRQ */
95 if (atomic_cmpxchg(&kbdev->serving_mmu_irq, 1, 0) == 1) {
96 u32 val;
97
98 while ((val = kbase_reg_read(kbdev,
99 MMU_REG(MMU_IRQ_STATUS)))) {
100 /* Handle the IRQ */
101 kbase_mmu_interrupt(kbdev, val);
102 }
103 }
104
105 kmem_cache_free(kbdev->irq_slab, data);
106 }
107
gpu_device_raise_irq(void * model,enum gpu_dummy_irq irq)108 void gpu_device_raise_irq(void *model,
109 enum gpu_dummy_irq irq)
110 {
111 struct model_irq_data *data;
112 struct kbase_device *kbdev = gpu_device_get_data(model);
113
114 KBASE_DEBUG_ASSERT(kbdev);
115
116 data = kmem_cache_alloc(kbdev->irq_slab, GFP_ATOMIC);
117 if (data == NULL)
118 return;
119
120 data->kbdev = kbdev;
121
122 switch (irq) {
123 case GPU_DUMMY_JOB_IRQ:
124 INIT_WORK(&data->work, serve_job_irq);
125 atomic_set(&kbdev->serving_job_irq, 1);
126 break;
127 case GPU_DUMMY_GPU_IRQ:
128 INIT_WORK(&data->work, serve_gpu_irq);
129 atomic_set(&kbdev->serving_gpu_irq, 1);
130 break;
131 case GPU_DUMMY_MMU_IRQ:
132 INIT_WORK(&data->work, serve_mmu_irq);
133 atomic_set(&kbdev->serving_mmu_irq, 1);
134 break;
135 default:
136 dev_warn(kbdev->dev, "Unknown IRQ");
137 kmem_cache_free(kbdev->irq_slab, data);
138 }
139 queue_work(kbdev->irq_workq, &data->work);
140 }
141
kbase_reg_write(struct kbase_device * kbdev,u32 offset,u32 value)142 void kbase_reg_write(struct kbase_device *kbdev, u32 offset, u32 value)
143 {
144 unsigned long flags;
145
146 spin_lock_irqsave(&kbdev->reg_op_lock, flags);
147 midgard_model_write_reg(kbdev->model, offset, value);
148 spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
149 }
150
151 KBASE_EXPORT_TEST_API(kbase_reg_write);
152
kbase_reg_read(struct kbase_device * kbdev,u32 offset)153 u32 kbase_reg_read(struct kbase_device *kbdev, u32 offset)
154 {
155 unsigned long flags;
156 u32 val;
157
158 spin_lock_irqsave(&kbdev->reg_op_lock, flags);
159 midgard_model_read_reg(kbdev->model, offset, &val);
160 spin_unlock_irqrestore(&kbdev->reg_op_lock, flags);
161
162 return val;
163 }
164
165 KBASE_EXPORT_TEST_API(kbase_reg_read);
166
167 /**
168 * kbase_is_gpu_removed - Has the GPU been removed.
169 * @kbdev: Kbase device pointer
170 *
171 * This function would return true if the GPU has been removed.
172 * It is stubbed here
173 * Return: Always false
174 */
kbase_is_gpu_removed(struct kbase_device * kbdev)175 bool kbase_is_gpu_removed(struct kbase_device *kbdev)
176 {
177 return false;
178 }
179
kbase_install_interrupts(struct kbase_device * kbdev)180 int kbase_install_interrupts(struct kbase_device *kbdev)
181 {
182 KBASE_DEBUG_ASSERT(kbdev);
183
184 atomic_set(&kbdev->serving_job_irq, 0);
185 atomic_set(&kbdev->serving_gpu_irq, 0);
186 atomic_set(&kbdev->serving_mmu_irq, 0);
187
188 kbdev->irq_workq = alloc_ordered_workqueue("dummy irq queue", 0);
189 if (kbdev->irq_workq == NULL)
190 return -ENOMEM;
191
192 kbdev->irq_slab = kmem_cache_create("dummy_irq_slab",
193 sizeof(struct model_irq_data), 0, 0, NULL);
194 if (kbdev->irq_slab == NULL) {
195 destroy_workqueue(kbdev->irq_workq);
196 return -ENOMEM;
197 }
198
199 return 0;
200 }
201
kbase_release_interrupts(struct kbase_device * kbdev)202 void kbase_release_interrupts(struct kbase_device *kbdev)
203 {
204 KBASE_DEBUG_ASSERT(kbdev);
205 destroy_workqueue(kbdev->irq_workq);
206 kmem_cache_destroy(kbdev->irq_slab);
207 }
208
kbase_synchronize_irqs(struct kbase_device * kbdev)209 void kbase_synchronize_irqs(struct kbase_device *kbdev)
210 {
211 KBASE_DEBUG_ASSERT(kbdev);
212 flush_workqueue(kbdev->irq_workq);
213 }
214
215 KBASE_EXPORT_TEST_API(kbase_synchronize_irqs);
216
kbase_set_custom_irq_handler(struct kbase_device * kbdev,irq_handler_t custom_handler,int irq_type)217 int kbase_set_custom_irq_handler(struct kbase_device *kbdev,
218 irq_handler_t custom_handler,
219 int irq_type)
220 {
221 return 0;
222 }
223
224 KBASE_EXPORT_TEST_API(kbase_set_custom_irq_handler);
225
kbase_gpu_irq_test_handler(int irq,void * data,u32 val)226 irqreturn_t kbase_gpu_irq_test_handler(int irq, void *data, u32 val)
227 {
228 if (!val)
229 return IRQ_NONE;
230
231 return IRQ_HANDLED;
232 }
233
234 KBASE_EXPORT_TEST_API(kbase_gpu_irq_test_handler);
235
kbase_gpu_device_create(struct kbase_device * kbdev)236 int kbase_gpu_device_create(struct kbase_device *kbdev)
237 {
238 kbdev->model = midgard_model_create(NULL);
239 if (kbdev->model == NULL)
240 return -ENOMEM;
241
242 gpu_device_set_data(kbdev->model, kbdev);
243
244 spin_lock_init(&kbdev->reg_op_lock);
245
246 dev_warn(kbdev->dev, "Using Dummy Model");
247
248 return 0;
249 }
250
kbase_gpu_device_destroy(struct kbase_device * kbdev)251 void kbase_gpu_device_destroy(struct kbase_device *kbdev)
252 {
253 midgard_model_destroy(kbdev->model);
254 }
255