1
2 #include <wl_android.h>
3 #ifdef WL_EVENT
4 #include <bcmendian.h>
5 #include <dhd_config.h>
6
7 #define EVENT_ERROR(name, arg1, args...) \
8 do { \
9 if (android_msg_level & ANDROID_ERROR_LEVEL) { \
10 printk(KERN_ERR DHD_LOG_PREFIX "[%s] EVENT-ERROR) %s : " arg1, \
11 name, __func__, ##args); \
12 } \
13 } while (0)
14 #define EVENT_TRACE(name, arg1, args...) \
15 do { \
16 if (android_msg_level & ANDROID_TRACE_LEVEL) { \
17 printk(KERN_INFO DHD_LOG_PREFIX "[%s] EVENT-TRACE) %s : " arg1, \
18 name, __func__, ##args); \
19 } \
20 } while (0)
21 #define EVENT_DBG(name, arg1, args...) \
22 do { \
23 if (android_msg_level & ANDROID_DBG_LEVEL) { \
24 printk(KERN_INFO DHD_LOG_PREFIX "[%s] EVENT-DBG) %s : " arg1, \
25 name, __func__, ##args); \
26 } \
27 } while (0)
28
29 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && \
30 (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
31 #define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
32 _Pragma("GCC diagnostic push") \
33 _Pragma("GCC diagnostic ignored \"-Wcast-qual\"")(entry) = \
34 list_first_entry((ptr), type, member); \
35 _Pragma("GCC diagnostic pop")
36
37 #define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
38 _Pragma("GCC diagnostic push") \
39 _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") entry = \
40 container_of((ptr), type, member); \
41 _Pragma("GCC diagnostic pop")
42
43 #else
44 #define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
45 (entry) = list_first_entry((ptr), type, member);
46
47 #define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
48 entry = container_of((ptr), type, member);
49
50 #endif /* STRICT_GCC_WARNINGS */
51
52 /* event queue for cfg80211 main event */
53 struct wl_event_q {
54 struct list_head eq_list;
55 u32 etype;
56 wl_event_msg_t emsg;
57 s8 edata[1];
58 };
59
60 typedef void (*EXT_EVENT_HANDLER)(struct net_device *dev, void *cb_argu,
61 const wl_event_msg_t *e, void *data);
62
63 typedef struct event_handler_list {
64 struct event_handler_list *next;
65 struct net_device *dev;
66 uint32 etype;
67 EXT_EVENT_HANDLER cb_func;
68 void *cb_argu;
69 wl_event_prio_t prio;
70 } event_handler_list_t;
71
72 typedef struct event_handler_head {
73 event_handler_list_t *evt_head;
74 } event_handler_head_t;
75
76 typedef struct wl_event_params {
77 dhd_pub_t *pub;
78 struct net_device *dev[DHD_MAX_IFS];
79 struct event_handler_head evt_head;
80 struct list_head eq_list; /* used for event queue */
81 spinlock_t eq_lock; /* for event queue synchronization */
82 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
83 tsk_ctl_t thr_event_ctl;
84 #else
85 struct workqueue_struct *event_workq; /* workqueue for event */
86 struct work_struct event_work; /* work item for event */
87 #endif
88 struct mutex event_sync;
89 } wl_event_params_t;
90
wl_ext_event_lock_eq(struct wl_event_params * event_params)91 static unsigned long wl_ext_event_lock_eq(struct wl_event_params *event_params)
92 {
93 unsigned long flags;
94
95 spin_lock_irqsave(&event_params->eq_lock, flags);
96 return flags;
97 }
98
wl_ext_event_unlock_eq(struct wl_event_params * event_params,unsigned long flags)99 static void wl_ext_event_unlock_eq(struct wl_event_params *event_params,
100 unsigned long flags)
101 {
102 spin_unlock_irqrestore(&event_params->eq_lock, flags);
103 }
104
wl_ext_event_init_eq_lock(struct wl_event_params * event_params)105 static void wl_ext_event_init_eq_lock(struct wl_event_params *event_params)
106 {
107 spin_lock_init(&event_params->eq_lock);
108 }
109
wl_ext_event_init_eq(struct wl_event_params * event_params)110 static void wl_ext_event_init_eq(struct wl_event_params *event_params)
111 {
112 wl_ext_event_init_eq_lock(event_params);
113 INIT_LIST_HEAD(&event_params->eq_list);
114 }
115
wl_ext_event_flush_eq(struct wl_event_params * event_params)116 static void wl_ext_event_flush_eq(struct wl_event_params *event_params)
117 {
118 struct wl_event_q *e;
119 unsigned long flags;
120
121 flags = wl_ext_event_lock_eq(event_params);
122 while (!list_empty_careful(&event_params->eq_list)) {
123 BCM_SET_LIST_FIRST_ENTRY(e, &event_params->eq_list, struct wl_event_q,
124 eq_list);
125 list_del(&e->eq_list);
126 kfree(e);
127 }
128 wl_ext_event_unlock_eq(event_params, flags);
129 }
130
131 /*
132 * retrieve first queued event from head
133 */
134
135 static struct wl_event_q *
wl_ext_event_deq_event(struct wl_event_params * event_params)136 wl_ext_event_deq_event(struct wl_event_params *event_params)
137 {
138 struct wl_event_q *e = NULL;
139 unsigned long flags;
140
141 flags = wl_ext_event_lock_eq(event_params);
142 if (likely(!list_empty(&event_params->eq_list))) {
143 BCM_SET_LIST_FIRST_ENTRY(e, &event_params->eq_list, struct wl_event_q,
144 eq_list);
145 list_del(&e->eq_list);
146 }
147 wl_ext_event_unlock_eq(event_params, flags);
148
149 return e;
150 }
151
152 /*
153 * push event to tail of the queue
154 */
155
wl_ext_event_enq_event(struct wl_event_params * event_params,u32 event,const wl_event_msg_t * msg,void * data)156 static s32 wl_ext_event_enq_event(struct wl_event_params *event_params,
157 u32 event, const wl_event_msg_t *msg,
158 void *data)
159 {
160 struct wl_event_q *e;
161 s32 err = 0;
162 uint32 evtq_size;
163 uint32 data_len;
164 unsigned long flags;
165 gfp_t aflags;
166
167 data_len = 0;
168 if (data) {
169 data_len = ntoh32(msg->datalen);
170 }
171 evtq_size = sizeof(struct wl_event_q) + data_len;
172 aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
173 e = kzalloc(evtq_size, aflags);
174 if (unlikely(!e)) {
175 EVENT_ERROR("wlan", "event alloc failed\n");
176 return -ENOMEM;
177 }
178 e->etype = event;
179 memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
180 if (data) {
181 memcpy(e->edata, data, data_len);
182 }
183 flags = wl_ext_event_lock_eq(event_params);
184 list_add_tail(&e->eq_list, &event_params->eq_list);
185 wl_ext_event_unlock_eq(event_params, flags);
186
187 return err;
188 }
189
wl_ext_event_put_event(struct wl_event_q * e)190 static void wl_ext_event_put_event(struct wl_event_q *e)
191 {
192 kfree(e);
193 }
194
195 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
196 static int wl_ext_event_handler(void *data);
197 #define WL_EXT_EVENT_HANDLER() static int wl_ext_event_handler(void *data)
198 #else
199 static void wl_ext_event_handler(struct work_struct *data);
200 #define WL_EXT_EVENT_HANDLER() \
201 static void wl_ext_event_handler(struct work_struct *data)
202 #endif
203
WL_EXT_EVENT_HANDLER()204 WL_EXT_EVENT_HANDLER()
205 {
206 struct wl_event_params *event_params = NULL;
207 struct wl_event_q *e;
208 struct net_device *dev = NULL;
209 struct event_handler_list *evt_node;
210 dhd_pub_t *dhd;
211 unsigned long flags = 0;
212 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
213 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
214 event_params = (struct wl_event_params *)tsk->parent;
215 #else
216 BCM_SET_CONTAINER_OF(event_params, data, struct wl_event_params,
217 event_work);
218 #endif
219
220 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
221 while (1) {
222 if (down_interruptible(&tsk->sema) == 0) {
223 SMP_RD_BARRIER_DEPENDS();
224 if (tsk->terminated) {
225 break;
226 }
227 #endif
228 DHD_EVENT_WAKE_LOCK(event_params->pub);
229 while ((e = wl_ext_event_deq_event(event_params))) {
230 if (e->emsg.ifidx >= DHD_MAX_IFS) {
231 EVENT_ERROR("wlan", "ifidx=%d not in range\n",
232 e->emsg.ifidx);
233 goto fail;
234 }
235 dev = event_params->dev[e->emsg.ifidx];
236 if (!dev) {
237 EVENT_DBG("wlan", "ifidx=%d dev not ready\n",
238 e->emsg.ifidx);
239 goto fail;
240 }
241 dhd = dhd_get_pub(dev);
242 if (e->etype > WLC_E_LAST) {
243 EVENT_TRACE(dev->name, "Unknown Event (%d): ignoring\n",
244 e->etype);
245 goto fail;
246 }
247 DHD_GENERAL_LOCK(dhd, flags);
248 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd)) {
249 EVENT_ERROR(dev->name, "BUS is DOWN.\n");
250 DHD_GENERAL_UNLOCK(dhd, flags);
251 goto fail;
252 }
253 DHD_GENERAL_UNLOCK(dhd, flags);
254 EVENT_DBG(dev->name, "event type (%d)\n", e->etype);
255 mutex_lock(&event_params->event_sync);
256 evt_node = event_params->evt_head.evt_head;
257 for (; evt_node;) {
258 if (evt_node->dev == dev &&
259 (evt_node->etype == e->etype ||
260 evt_node->etype == WLC_E_LAST)) {
261 evt_node->cb_func(dev, evt_node->cb_argu, &e->emsg,
262 e->edata);
263 }
264 evt_node = evt_node->next;
265 }
266 mutex_unlock(&event_params->event_sync);
267 fail:
268 wl_ext_event_put_event(e);
269 }
270 DHD_EVENT_WAKE_UNLOCK(event_params->pub);
271 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
272 } else {
273 break;
274 }
275 }
276 complete_and_exit(&tsk->completed, 0);
277 #endif
278 }
279
wl_ext_event_send(void * params,const wl_event_msg_t * e,void * data)280 void wl_ext_event_send(void *params, const wl_event_msg_t *e, void *data)
281 {
282 struct wl_event_params *event_params = params;
283 u32 event_type = ntoh32(e->event_type);
284
285 if (event_params == NULL) {
286 EVENT_ERROR("wlan", "Stale event %d(%s) ignored\n", event_type,
287 bcmevent_get_name(event_type));
288 return;
289 }
290
291 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
292 if (event_params->event_workq == NULL) {
293 EVENT_ERROR("wlan", "Event handler is not created %d(%s)\n", event_type,
294 bcmevent_get_name(event_type));
295 return;
296 }
297 #endif
298
299 if (likely(!wl_ext_event_enq_event(event_params, event_type, e, data))) {
300 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
301 if (event_params->thr_event_ctl.thr_pid >= 0) {
302 up(&event_params->thr_event_ctl.sema);
303 }
304 #else
305 queue_work(event_params->event_workq, &event_params->event_work);
306 #endif
307 }
308 }
309
wl_ext_event_create_handler(struct wl_event_params * event_params)310 static s32 wl_ext_event_create_handler(struct wl_event_params *event_params)
311 {
312 int ret = 0;
313 EVENT_TRACE("wlan", "Enter\n");
314
315 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
316 PROC_START(wl_ext_event_handler, event_params, &event_params->thr_event_ctl,
317 0, "ext_eventd");
318 if (event_params->thr_event_ctl.thr_pid < 0) {
319 ret = -ENOMEM;
320 }
321 #else
322 /* Allocate workqueue for event */
323 if (!event_params->event_workq) {
324 event_params->event_workq =
325 alloc_workqueue("ext_eventd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
326 }
327
328 if (!event_params->event_workq) {
329 EVENT_ERROR("wlan", "event_workq alloc_workqueue failed\n");
330 ret = -ENOMEM;
331 } else {
332 INIT_WORK(&event_params->event_work, wl_ext_event_handler);
333 }
334 #endif
335
336 return ret;
337 }
338
wl_ext_event_free(struct wl_event_params * event_params)339 static void wl_ext_event_free(struct wl_event_params *event_params)
340 {
341 struct event_handler_list *node, *cur, **evt_head;
342
343 evt_head = &event_params->evt_head.evt_head;
344 node = *evt_head;
345
346 for (; node;) {
347 EVENT_TRACE(node->dev->name, "Free etype=%d\n", node->etype);
348 cur = node;
349 node = cur->next;
350 kfree(cur);
351 }
352 *evt_head = NULL;
353 }
354
wl_ext_event_destroy_handler(struct wl_event_params * event_params)355 static void wl_ext_event_destroy_handler(struct wl_event_params *event_params)
356 {
357 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0))
358 if (event_params->thr_event_ctl.thr_pid >= 0) {
359 PROC_STOP(&event_params->thr_event_ctl);
360 }
361 #else
362 if (event_params && event_params->event_workq) {
363 cancel_work_sync(&event_params->event_work);
364 destroy_workqueue(event_params->event_workq);
365 event_params->event_workq = NULL;
366 }
367 #endif
368 }
369
wl_ext_event_register(struct net_device * dev,dhd_pub_t * dhd,uint32 event,void * cb_func,void * data,wl_event_prio_t prio)370 int wl_ext_event_register(struct net_device *dev, dhd_pub_t *dhd, uint32 event,
371 void *cb_func, void *data, wl_event_prio_t prio)
372 {
373 struct wl_event_params *event_params = dhd->event_params;
374 struct event_handler_list *node, *leaf, *node_prev, **evt_head;
375 int ret = 0;
376
377 if (event_params) {
378 mutex_lock(&event_params->event_sync);
379 evt_head = &event_params->evt_head.evt_head;
380 node = *evt_head;
381 for (; node;) {
382 if (node->dev == dev && node->etype == event &&
383 node->cb_func == cb_func) {
384 EVENT_TRACE(dev->name, "skip event %d\n", event);
385 mutex_unlock(&event_params->event_sync);
386 return 0;
387 }
388 node = node->next;
389 }
390 leaf = kmalloc(sizeof(event_handler_list_t), GFP_KERNEL);
391 if (!leaf) {
392 EVENT_ERROR(dev->name, "Memory alloc failure %d for event %d\n",
393 (int)sizeof(event_handler_list_t), event);
394 mutex_unlock(&event_params->event_sync);
395 return -ENOMEM;
396 }
397 leaf->next = NULL;
398 leaf->dev = dev;
399 leaf->etype = event;
400 leaf->cb_func = cb_func;
401 leaf->cb_argu = data;
402 leaf->prio = prio;
403 if (*evt_head == NULL) {
404 *evt_head = leaf;
405 } else {
406 node = *evt_head;
407 node_prev = NULL;
408 for (; node;) {
409 if (node->prio <= prio) {
410 leaf->next = node;
411 if (node_prev) {
412 node_prev->next = leaf;
413 } else {
414 *evt_head = leaf;
415 }
416 break;
417 } else if (node->next == NULL) {
418 node->next = leaf;
419 break;
420 }
421 node_prev = node;
422 node = node->next;
423 }
424 }
425 EVENT_TRACE(dev->name, "event %d registered\n", event);
426 mutex_unlock(&event_params->event_sync);
427 } else {
428 EVENT_ERROR(dev->name, "event_params not ready %d\n", event);
429 ret = -ENODEV;
430 }
431
432 return ret;
433 }
434
wl_ext_event_deregister(struct net_device * dev,dhd_pub_t * dhd,uint32 event,void * cb_func)435 void wl_ext_event_deregister(struct net_device *dev, dhd_pub_t *dhd,
436 uint32 event, void *cb_func)
437 {
438 struct wl_event_params *event_params = dhd->event_params;
439 struct event_handler_list *node, *prev, **evt_head;
440 int tmp = 0;
441
442 if (event_params) {
443 mutex_lock(&event_params->event_sync);
444 evt_head = &event_params->evt_head.evt_head;
445 node = *evt_head;
446 prev = node;
447 for (; node;) {
448 if (node->dev == dev && node->etype == event &&
449 node->cb_func == cb_func) {
450 if (node == *evt_head) {
451 tmp = 1;
452 *evt_head = node->next;
453 } else {
454 tmp = 0;
455 prev->next = node->next;
456 }
457 EVENT_TRACE(dev->name, "event %d deregistered\n", event);
458 kfree(node);
459 if (tmp == 1) {
460 node = *evt_head;
461 prev = node;
462 } else {
463 node = prev->next;
464 }
465 continue;
466 }
467 prev = node;
468 node = node->next;
469 }
470 mutex_unlock(&event_params->event_sync);
471 } else {
472 EVENT_ERROR(dev->name, "event_params not ready %d\n", event);
473 }
474 }
475
wl_ext_event_init_priv(struct wl_event_params * event_params)476 static s32 wl_ext_event_init_priv(struct wl_event_params *event_params)
477 {
478 s32 err = 0;
479
480 mutex_init(&event_params->event_sync);
481 wl_ext_event_init_eq(event_params);
482 if (wl_ext_event_create_handler(event_params)) {
483 return -ENOMEM;
484 }
485
486 return err;
487 }
488
wl_ext_event_deinit_priv(struct wl_event_params * event_params)489 static void wl_ext_event_deinit_priv(struct wl_event_params *event_params)
490 {
491 wl_ext_event_destroy_handler(event_params);
492 wl_ext_event_flush_eq(event_params);
493 wl_ext_event_free(event_params);
494 }
495
wl_ext_event_attach_netdev(struct net_device * net,int ifidx,uint8 bssidx)496 int wl_ext_event_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx)
497 {
498 struct dhd_pub *dhd = dhd_get_pub(net);
499 struct wl_event_params *event_params = dhd->event_params;
500
501 if (event_params && ifidx < DHD_MAX_IFS) {
502 EVENT_TRACE(net->name, "ifidx=%d, bssidx=%d\n", ifidx, bssidx);
503 event_params->dev[ifidx] = net;
504 }
505
506 return 0;
507 }
508
wl_ext_event_dettach_netdev(struct net_device * net,int ifidx)509 int wl_ext_event_dettach_netdev(struct net_device *net, int ifidx)
510 {
511 struct dhd_pub *dhd = dhd_get_pub(net);
512 struct wl_event_params *event_params = dhd->event_params;
513
514 if (event_params && ifidx < DHD_MAX_IFS) {
515 EVENT_TRACE(net->name, "ifidx=%d\n", ifidx);
516 event_params->dev[ifidx] = NULL;
517 }
518
519 return 0;
520 }
521
wl_ext_event_attach(struct net_device * net)522 s32 wl_ext_event_attach(struct net_device *net)
523 {
524 struct dhd_pub *dhdp = dhd_get_pub(net);
525 struct wl_event_params *event_params = NULL;
526 s32 err = 0;
527
528 event_params = kmalloc(sizeof(wl_event_params_t), GFP_KERNEL);
529 if (!event_params) {
530 EVENT_ERROR(net->name, "Failed to allocate memory (%zu)\n",
531 sizeof(wl_event_params_t));
532 return -ENOMEM;
533 }
534 dhdp->event_params = event_params;
535 memset(event_params, 0, sizeof(wl_event_params_t));
536 event_params->pub = dhdp;
537
538 err = wl_ext_event_init_priv(event_params);
539 if (err) {
540 EVENT_ERROR(net->name, "Failed to wl_ext_event_init_priv (%d)\n", err);
541 goto ext_attach_out;
542 }
543
544 return err;
545 ext_attach_out:
546 wl_ext_event_dettach(dhdp);
547 return err;
548 }
549
wl_ext_event_dettach(dhd_pub_t * dhdp)550 void wl_ext_event_dettach(dhd_pub_t *dhdp)
551 {
552 struct wl_event_params *event_params = dhdp->event_params;
553
554 if (event_params) {
555 wl_ext_event_deinit_priv(event_params);
556 kfree(event_params);
557 dhdp->event_params = NULL;
558 }
559 }
560 #endif
561