1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Broadcom Dongle Host Driver (DHD), Generic work queue framework
4 * Generic interface to handle dhd deferred work events
5 *
6 * Copyright (C) 1999-2019, Broadcom.
7 *
8 * Unless you and Broadcom execute a separate written software license
9 * agreement governing use of this software, this software is licensed to you
10 * under the terms of the GNU General Public License version 2 (the "GPL"),
11 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12 * following added to such license:
13 *
14 * As a special exception, the copyright holders of this software give you
15 * permission to link this software with independent modules, and to copy and
16 * distribute the resulting executable under terms of your choice, provided that
17 * you also meet, for each linked independent module, the terms and conditions of
18 * the license of that module. An independent module is a module which is not
19 * derived from this software. The special exception does not apply to any
20 * modifications of the software.
21 *
22 * Notwithstanding the above, under no circumstances may you combine this
23 * software in any way with any other Broadcom software provided under a license
24 * other than the GPL, without Broadcom's express prior written consent.
25 *
26 *
27 * <<Broadcom-WL-IPTag/Open:>>
28 *
29 * $Id: dhd_linux_wq.c 815919 2019-04-22 09:06:50Z $
30 */
31
32 #include <linux/init.h>
33 #include <linux/kernel.h>
34 #include <linux/spinlock.h>
35 #include <linux/fcntl.h>
36 #include <linux/fs.h>
37 #include <linux/ip.h>
38 #include <linux/kfifo.h>
39
40 #include <linuxver.h>
41 #include <osl.h>
42 #include <bcmutils.h>
43 #include <bcmendian.h>
44 #include <bcmdevs.h>
45 #include <dngl_stats.h>
46 #include <dhd.h>
47 #include <dhd_dbg.h>
48 #include <dhd_linux_wq.h>
49
50 typedef struct dhd_deferred_event {
51 u8 event; /* holds the event */
52 void *event_data; /* holds event specific data */
53 event_handler_t event_handler;
54 unsigned long pad; /* for memory alignment to power of 2 */
55 } dhd_deferred_event_t;
56
57 #define DEFRD_EVT_SIZE (sizeof(dhd_deferred_event_t))
58
59 /*
60 * work events may occur simultaneously.
61 * can hold upto 64 low priority events and 16 high priority events
62 */
63 #define DHD_PRIO_WORK_FIFO_SIZE (16 * DEFRD_EVT_SIZE)
64 #define DHD_WORK_FIFO_SIZE (64 * DEFRD_EVT_SIZE)
65
66 #define DHD_FIFO_HAS_FREE_SPACE(fifo) \
67 ((fifo) && (kfifo_avail(fifo) >= DEFRD_EVT_SIZE))
68 #define DHD_FIFO_HAS_ENOUGH_DATA(fifo) \
69 ((fifo) && (kfifo_len(fifo) >= DEFRD_EVT_SIZE))
70
71 struct dhd_deferred_wq {
72 struct work_struct deferred_work; /* should be the first member */
73
74 struct kfifo *prio_fifo;
75 struct kfifo *work_fifo;
76 u8 *prio_fifo_buf;
77 u8 *work_fifo_buf;
78 spinlock_t work_lock;
79 void *dhd_info; /* review: does it require */
80 u32 event_skip_mask;
81 };
82
83 static inline struct kfifo*
dhd_kfifo_init(u8 * buf,int size,spinlock_t * lock)84 dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock)
85 {
86 struct kfifo *fifo;
87 gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
88
89 fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
90 if (!fifo) {
91 return NULL;
92 }
93 kfifo_init(fifo, buf, size);
94 return fifo;
95 }
96
97 static inline void
dhd_kfifo_free(struct kfifo * fifo)98 dhd_kfifo_free(struct kfifo *fifo)
99 {
100 kfifo_free(fifo);
101 }
102
103 /* deferred work functions */
104 static void dhd_deferred_work_handler(struct work_struct *data);
105
106 void*
dhd_deferred_work_init(void * dhd_info)107 dhd_deferred_work_init(void *dhd_info)
108 {
109 struct dhd_deferred_wq *work = NULL;
110 u8* buf;
111 unsigned long fifo_size = 0;
112 gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
113
114 if (!dhd_info) {
115 DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__));
116 goto return_null;
117 }
118
119 work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq),
120 flags);
121 if (!work) {
122 DHD_ERROR(("%s: work queue creation failed\n", __FUNCTION__));
123 goto return_null;
124 }
125
126 INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler);
127
128 /* initialize event fifo */
129 spin_lock_init(&work->work_lock);
130
131 /* allocate buffer to hold prio events */
132 fifo_size = DHD_PRIO_WORK_FIFO_SIZE;
133 fifo_size = is_power_of_2(fifo_size) ? fifo_size :
134 roundup_pow_of_two(fifo_size);
135 buf = (u8*)kzalloc(fifo_size, flags);
136 if (!buf) {
137 DHD_ERROR(("%s: prio work fifo allocation failed\n",
138 __FUNCTION__));
139 goto return_null;
140 }
141
142 /* Initialize prio event fifo */
143 work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
144 if (!work->prio_fifo) {
145 kfree(buf);
146 goto return_null;
147 }
148
149 /* allocate buffer to hold work events */
150 fifo_size = DHD_WORK_FIFO_SIZE;
151 fifo_size = is_power_of_2(fifo_size) ? fifo_size :
152 roundup_pow_of_two(fifo_size);
153 buf = (u8*)kzalloc(fifo_size, flags);
154 if (!buf) {
155 DHD_ERROR(("%s: work fifo allocation failed\n", __FUNCTION__));
156 goto return_null;
157 }
158
159 /* Initialize event fifo */
160 work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
161 if (!work->work_fifo) {
162 kfree(buf);
163 goto return_null;
164 }
165
166 work->dhd_info = dhd_info;
167 work->event_skip_mask = 0;
168 DHD_ERROR(("%s: work queue initialized\n", __FUNCTION__));
169 return work;
170
171 return_null:
172 if (work) {
173 dhd_deferred_work_deinit(work);
174 }
175
176 return NULL;
177 }
178
179 void
dhd_deferred_work_deinit(void * work)180 dhd_deferred_work_deinit(void *work)
181 {
182 struct dhd_deferred_wq *deferred_work = work;
183
184 if (!deferred_work) {
185 DHD_ERROR(("%s: deferred work has been freed already\n",
186 __FUNCTION__));
187 return;
188 }
189
190 /* cancel the deferred work handling */
191 cancel_work_sync((struct work_struct *)deferred_work);
192
193 /*
194 * free work event fifo.
195 * kfifo_free frees locally allocated fifo buffer
196 */
197 if (deferred_work->prio_fifo) {
198 dhd_kfifo_free(deferred_work->prio_fifo);
199 }
200
201 if (deferred_work->work_fifo) {
202 dhd_kfifo_free(deferred_work->work_fifo);
203 }
204
205 kfree(deferred_work);
206 }
207
208 /* select kfifo according to priority */
209 static inline struct kfifo *
dhd_deferred_work_select_kfifo(struct dhd_deferred_wq * deferred_wq,u8 priority)210 dhd_deferred_work_select_kfifo(struct dhd_deferred_wq *deferred_wq,
211 u8 priority)
212 {
213 if (priority == DHD_WQ_WORK_PRIORITY_HIGH) {
214 return deferred_wq->prio_fifo;
215 } else if (priority == DHD_WQ_WORK_PRIORITY_LOW) {
216 return deferred_wq->work_fifo;
217 } else {
218 return NULL;
219 }
220 }
221
222 /*
223 * Prepares event to be queued
224 * Schedules the event
225 */
226 int
dhd_deferred_schedule_work(void * workq,void * event_data,u8 event,event_handler_t event_handler,u8 priority)227 dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
228 event_handler_t event_handler, u8 priority)
229 {
230 struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)workq;
231 struct kfifo *fifo;
232 dhd_deferred_event_t deferred_event;
233 int bytes_copied = 0;
234
235 if (!deferred_wq) {
236 DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
237 ASSERT(0);
238 return DHD_WQ_STS_UNINITIALIZED;
239 }
240
241 if (!event || (event >= DHD_MAX_WQ_EVENTS)) {
242 DHD_ERROR(("%s: unknown event, event=%d\n", __FUNCTION__,
243 event));
244 return DHD_WQ_STS_UNKNOWN_EVENT;
245 }
246
247 if (!priority || (priority >= DHD_WQ_MAX_PRIORITY)) {
248 DHD_ERROR(("%s: unknown priority, priority=%d\n",
249 __FUNCTION__, priority));
250 return DHD_WQ_STS_UNKNOWN_PRIORITY;
251 }
252
253 if ((deferred_wq->event_skip_mask & (1 << event))) {
254 DHD_ERROR(("%s: Skip event requested. Mask = 0x%x\n",
255 __FUNCTION__, deferred_wq->event_skip_mask));
256 return DHD_WQ_STS_EVENT_SKIPPED;
257 }
258
259 /*
260 * default element size is 1, which can be changed
261 * using kfifo_esize(). Older kernel(FC11) doesn't support
262 * changing element size. For compatibility changing
263 * element size is not prefered
264 */
265 ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
266 ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
267
268 deferred_event.event = event;
269 deferred_event.event_data = event_data;
270 deferred_event.event_handler = event_handler;
271
272 fifo = dhd_deferred_work_select_kfifo(deferred_wq, priority);
273 if (DHD_FIFO_HAS_FREE_SPACE(fifo)) {
274 bytes_copied = kfifo_in_spinlocked(fifo, &deferred_event,
275 DEFRD_EVT_SIZE, &deferred_wq->work_lock);
276 }
277 if (bytes_copied != DEFRD_EVT_SIZE) {
278 DHD_ERROR(("%s: failed to schedule deferred work, "
279 "priority=%d, bytes_copied=%d\n", __FUNCTION__,
280 priority, bytes_copied));
281 return DHD_WQ_STS_SCHED_FAILED;
282 }
283 schedule_work((struct work_struct *)deferred_wq);
284 return DHD_WQ_STS_OK;
285 }
286
287 static bool
dhd_get_scheduled_work(struct dhd_deferred_wq * deferred_wq,dhd_deferred_event_t * event)288 dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq,
289 dhd_deferred_event_t *event)
290 {
291 int bytes_copied = 0;
292
293 if (!deferred_wq) {
294 DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
295 return DHD_WQ_STS_UNINITIALIZED;
296 }
297
298 /*
299 * default element size is 1 byte, which can be changed
300 * using kfifo_esize(). Older kernel(FC11) doesn't support
301 * changing element size. For compatibility changing
302 * element size is not prefered
303 */
304 ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
305 ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
306
307 /* handle priority work */
308 if (DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->prio_fifo)) {
309 bytes_copied = kfifo_out_spinlocked(deferred_wq->prio_fifo,
310 event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
311 }
312
313 /* handle normal work if priority work doesn't have enough data */
314 if ((bytes_copied != DEFRD_EVT_SIZE) &&
315 DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->work_fifo)) {
316 bytes_copied = kfifo_out_spinlocked(deferred_wq->work_fifo,
317 event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
318 }
319
320 return (bytes_copied == DEFRD_EVT_SIZE);
321 }
322
323 static inline void
dhd_deferred_dump_work_event(dhd_deferred_event_t * work_event)324 dhd_deferred_dump_work_event(dhd_deferred_event_t *work_event)
325 {
326 if (!work_event) {
327 DHD_ERROR(("%s: work_event is null\n", __FUNCTION__));
328 return;
329 }
330
331 DHD_ERROR(("%s: work_event->event = %d\n", __FUNCTION__,
332 work_event->event));
333 DHD_ERROR(("%s: work_event->event_data = %p\n", __FUNCTION__,
334 work_event->event_data));
335 DHD_ERROR(("%s: work_event->event_handler = %p\n", __FUNCTION__,
336 work_event->event_handler));
337 }
338
339 /*
340 * Called when work is scheduled
341 */
342 static void
dhd_deferred_work_handler(struct work_struct * work)343 dhd_deferred_work_handler(struct work_struct *work)
344 {
345 struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work;
346 dhd_deferred_event_t work_event;
347
348 if (!deferred_work) {
349 DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
350 return;
351 }
352
353 do {
354 if (!dhd_get_scheduled_work(deferred_work, &work_event)) {
355 DHD_TRACE(("%s: no event to handle\n", __FUNCTION__));
356 break;
357 }
358
359 if (work_event.event >= DHD_MAX_WQ_EVENTS) {
360 DHD_ERROR(("%s: unknown event\n", __FUNCTION__));
361 dhd_deferred_dump_work_event(&work_event);
362 ASSERT(work_event.event < DHD_MAX_WQ_EVENTS);
363 continue;
364 }
365
366 if (work_event.event_handler) {
367 work_event.event_handler(deferred_work->dhd_info,
368 work_event.event_data, work_event.event);
369 } else {
370 DHD_ERROR(("%s: event handler is null\n",
371 __FUNCTION__));
372 dhd_deferred_dump_work_event(&work_event);
373 ASSERT(work_event.event_handler != NULL);
374 }
375 } while (1);
376
377 return;
378 }
379
380 void
dhd_deferred_work_set_skip(void * work,u8 event,bool set)381 dhd_deferred_work_set_skip(void *work, u8 event, bool set)
382 {
383 struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)work;
384
385 if (!deferred_wq || !event || (event >= DHD_MAX_WQ_EVENTS)) {
386 DHD_ERROR(("%s: Invalid!!\n", __FUNCTION__));
387 return;
388 }
389
390 if (set) {
391 /* Set */
392 deferred_wq->event_skip_mask |= (1 << event);
393 } else {
394 /* Clear */
395 deferred_wq->event_skip_mask &= ~(1 << event);
396 }
397 }
398