1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2021 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21 #include <mali_kbase.h>
22 #include "mali_kbase_csf_event.h"
23
24 /**
25 * struct kbase_csf_event_cb - CSF event callback.
26 *
27 * @link: Link to the rest of the list.
28 * @kctx: Pointer to the Kbase context this event belongs to.
29 * @callback: Callback function to call when a CSF event is signalled.
30 * @param: Parameter to pass to the callback function.
31 *
32 * This structure belongs to the list of events which is part of a Kbase
33 * context, and describes a callback function with a custom parameter to pass
34 * to it when a CSF event is signalled.
35 */
36 struct kbase_csf_event_cb {
37 struct list_head link;
38 struct kbase_context *kctx;
39 kbase_csf_event_callback *callback;
40 void *param;
41 };
42
kbase_csf_event_wait_add(struct kbase_context * kctx,kbase_csf_event_callback * callback,void * param)43 int kbase_csf_event_wait_add(struct kbase_context *kctx,
44 kbase_csf_event_callback *callback, void *param)
45 {
46 int err = -ENOMEM;
47 struct kbase_csf_event_cb *event_cb =
48 kzalloc(sizeof(struct kbase_csf_event_cb), GFP_KERNEL);
49
50 if (event_cb) {
51 unsigned long flags;
52
53 event_cb->kctx = kctx;
54 event_cb->callback = callback;
55 event_cb->param = param;
56
57 spin_lock_irqsave(&kctx->csf.event.lock, flags);
58 list_add_tail(&event_cb->link, &kctx->csf.event.callback_list);
59 dev_dbg(kctx->kbdev->dev,
60 "Added event handler %pK with param %pK\n", event_cb,
61 event_cb->param);
62 spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
63
64 err = 0;
65 }
66
67 return err;
68 }
69
kbase_csf_event_wait_remove(struct kbase_context * kctx,kbase_csf_event_callback * callback,void * param)70 void kbase_csf_event_wait_remove(struct kbase_context *kctx,
71 kbase_csf_event_callback *callback, void *param)
72 {
73 struct kbase_csf_event_cb *event_cb;
74 unsigned long flags;
75
76 spin_lock_irqsave(&kctx->csf.event.lock, flags);
77
78 list_for_each_entry(event_cb, &kctx->csf.event.callback_list, link) {
79 if ((event_cb->callback == callback) && (event_cb->param == param)) {
80 list_del(&event_cb->link);
81 dev_dbg(kctx->kbdev->dev,
82 "Removed event handler %pK with param %pK\n",
83 event_cb, event_cb->param);
84 kfree(event_cb);
85 break;
86 }
87 }
88 spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
89 }
90
sync_update_notify_gpu(struct kbase_context * kctx)91 static void sync_update_notify_gpu(struct kbase_context *kctx)
92 {
93 bool can_notify_gpu;
94 unsigned long flags;
95
96 spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
97 can_notify_gpu = kctx->kbdev->pm.backend.gpu_powered;
98 #ifdef KBASE_PM_RUNTIME
99 if (kctx->kbdev->pm.backend.gpu_sleep_mode_active)
100 can_notify_gpu = false;
101 #endif
102
103 if (can_notify_gpu) {
104 kbase_csf_ring_doorbell(kctx->kbdev, CSF_KERNEL_DOORBELL_NR);
105 KBASE_KTRACE_ADD(kctx->kbdev, SYNC_UPDATE_EVENT_NOTIFY_GPU, kctx, 0u);
106 }
107
108 spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
109 }
110
kbase_csf_event_signal(struct kbase_context * kctx,bool notify_gpu)111 void kbase_csf_event_signal(struct kbase_context *kctx, bool notify_gpu)
112 {
113 struct kbase_csf_event_cb *event_cb, *next_event_cb;
114 unsigned long flags;
115
116 dev_dbg(kctx->kbdev->dev,
117 "Signal event (%s GPU notify) for context %pK\n",
118 notify_gpu ? "with" : "without", (void *)kctx);
119
120 /* First increment the signal count and wake up event thread.
121 */
122 atomic_set(&kctx->event_count, 1);
123 kbase_event_wakeup(kctx);
124
125 /* Signal the CSF firmware. This is to ensure that pending command
126 * stream synch object wait operations are re-evaluated.
127 * Write to GLB_DOORBELL would suffice as spec says that all pending
128 * synch object wait operations are re-evaluated on a write to any
129 * CS_DOORBELL/GLB_DOORBELL register.
130 */
131 if (notify_gpu)
132 sync_update_notify_gpu(kctx);
133
134 /* Now invoke the callbacks registered on backend side.
135 * Allow item removal inside the loop, if requested by the callback.
136 */
137 spin_lock_irqsave(&kctx->csf.event.lock, flags);
138
139 list_for_each_entry_safe(
140 event_cb, next_event_cb, &kctx->csf.event.callback_list, link) {
141 enum kbase_csf_event_callback_action action;
142
143 dev_dbg(kctx->kbdev->dev,
144 "Calling event handler %pK with param %pK\n",
145 (void *)event_cb, event_cb->param);
146 action = event_cb->callback(event_cb->param);
147 if (action == KBASE_CSF_EVENT_CALLBACK_REMOVE) {
148 list_del(&event_cb->link);
149 kfree(event_cb);
150 }
151 }
152
153 spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
154 }
155
kbase_csf_event_term(struct kbase_context * kctx)156 void kbase_csf_event_term(struct kbase_context *kctx)
157 {
158 struct kbase_csf_event_cb *event_cb, *next_event_cb;
159 unsigned long flags;
160
161 spin_lock_irqsave(&kctx->csf.event.lock, flags);
162
163 list_for_each_entry_safe(
164 event_cb, next_event_cb, &kctx->csf.event.callback_list, link) {
165 list_del(&event_cb->link);
166 dev_warn(kctx->kbdev->dev,
167 "Removed event handler %pK with param %pK\n",
168 (void *)event_cb, event_cb->param);
169 kfree(event_cb);
170 }
171
172 WARN_ON(!list_empty(&kctx->csf.event.error_list));
173
174 spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
175 }
176
kbase_csf_event_init(struct kbase_context * const kctx)177 void kbase_csf_event_init(struct kbase_context *const kctx)
178 {
179 INIT_LIST_HEAD(&kctx->csf.event.callback_list);
180 INIT_LIST_HEAD(&kctx->csf.event.error_list);
181 spin_lock_init(&kctx->csf.event.lock);
182 }
183
kbase_csf_event_remove_error(struct kbase_context * kctx,struct kbase_csf_notification * error)184 void kbase_csf_event_remove_error(struct kbase_context *kctx,
185 struct kbase_csf_notification *error)
186 {
187 unsigned long flags;
188
189 spin_lock_irqsave(&kctx->csf.event.lock, flags);
190 list_del_init(&error->link);
191 spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
192 }
193
kbase_csf_event_read_error(struct kbase_context * kctx,struct base_csf_notification * event_data)194 bool kbase_csf_event_read_error(struct kbase_context *kctx,
195 struct base_csf_notification *event_data)
196 {
197 struct kbase_csf_notification *error_data = NULL;
198 unsigned long flags;
199
200 spin_lock_irqsave(&kctx->csf.event.lock, flags);
201 if (likely(!list_empty(&kctx->csf.event.error_list))) {
202 error_data = list_first_entry(&kctx->csf.event.error_list,
203 struct kbase_csf_notification, link);
204 list_del_init(&error_data->link);
205 *event_data = error_data->data;
206 dev_dbg(kctx->kbdev->dev, "Dequeued error %pK in context %pK\n",
207 (void *)error_data, (void *)kctx);
208 }
209 spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
210 return !!error_data;
211 }
212
kbase_csf_event_add_error(struct kbase_context * const kctx,struct kbase_csf_notification * const error,struct base_csf_notification const * const data)213 void kbase_csf_event_add_error(struct kbase_context *const kctx,
214 struct kbase_csf_notification *const error,
215 struct base_csf_notification const *const data)
216 {
217 unsigned long flags;
218
219 if (WARN_ON(!kctx))
220 return;
221
222 if (WARN_ON(!error))
223 return;
224
225 if (WARN_ON(!data))
226 return;
227
228 spin_lock_irqsave(&kctx->csf.event.lock, flags);
229 if (!WARN_ON(!list_empty(&error->link))) {
230 error->data = *data;
231 list_add_tail(&error->link, &kctx->csf.event.error_list);
232 dev_dbg(kctx->kbdev->dev,
233 "Added error %pK of type %d in context %pK\n",
234 (void *)error, data->type, (void *)kctx);
235 }
236 spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
237 }
238
kbase_csf_event_error_pending(struct kbase_context * kctx)239 bool kbase_csf_event_error_pending(struct kbase_context *kctx)
240 {
241 bool error_pending = false;
242 unsigned long flags;
243
244 spin_lock_irqsave(&kctx->csf.event.lock, flags);
245 error_pending = !list_empty(&kctx->csf.event.error_list);
246
247 dev_dbg(kctx->kbdev->dev, "%s error is pending in context %pK\n",
248 error_pending ? "An" : "No", (void *)kctx);
249
250 spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
251
252 return error_pending;
253 }
254