• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #if defined(WL_EXT_IAPSTA) || defined(USE_IW)
4 #include <bcmendian.h>
5 #include <wl_android.h>
6 #include <dhd_config.h>
7 
8 #define EVENT_ERROR(name, arg1, args...) \
9 	do { \
10 		if (android_msg_level & ANDROID_ERROR_LEVEL) { \
11 			printk(KERN_ERR DHD_LOG_PREFIX "[%s] EVENT-ERROR) %s : " arg1, name, __func__, ## args); \
12 		} \
13 	} while (0)
14 #define EVENT_TRACE(name, arg1, args...) \
15 	do { \
16 		if (android_msg_level & ANDROID_TRACE_LEVEL) { \
17 			printk(KERN_INFO DHD_LOG_PREFIX "[%s] EVENT-TRACE) %s : " arg1, name, __func__, ## args); \
18 		} \
19 	} while (0)
20 #define EVENT_DBG(name, arg1, args...) \
21 	do { \
22 		if (android_msg_level & ANDROID_DBG_LEVEL) { \
23 			printk(KERN_INFO DHD_LOG_PREFIX "[%s] EVENT-DBG) %s : " arg1, name, __func__, ## args); \
24 		} \
25 	} while (0)
26 
27 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
28 	4 && __GNUC_MINOR__ >= 6))
29 #define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
30 _Pragma("GCC diagnostic push") \
31 _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
32 (entry) = list_first_entry((ptr), type, member); \
33 _Pragma("GCC diagnostic pop") \
34 
35 #define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
36 _Pragma("GCC diagnostic push") \
37 _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
38 entry = container_of((ptr), type, member); \
39 _Pragma("GCC diagnostic pop") \
40 
41 #else
42 #define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
43 (entry) = list_first_entry((ptr), type, member); \
44 
45 #define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
46 entry = container_of((ptr), type, member); \
47 
48 #endif /* STRICT_GCC_WARNINGS */
49 
50 #ifdef DHD_MAX_IFS
51 #define WL_MAX_IFS DHD_MAX_IFS
52 #else
53 #define WL_MAX_IFS 16
54 #endif
55 
56 /* event queue for cfg80211 main event */
57 struct wl_event_q {
58 	struct list_head eq_list;
59 	u32 etype;
60 	wl_event_msg_t emsg;
61 	s8 edata[1];
62 };
63 
64 typedef struct event_handler_list {
65 	struct event_handler_list *next;
66 	struct net_device *dev;
67 	uint32 etype;
68 	EXT_EVENT_HANDLER cb_func;
69 	void *cb_argu;
70 	wl_event_prio_t prio;
71 } event_handler_list_t;
72 
73 typedef struct event_handler_head {
74 	event_handler_list_t *evt_head;
75 } event_handler_head_t;
76 
77 typedef struct wl_event_params {
78 	dhd_pub_t *pub;
79 	struct net_device *dev[WL_MAX_IFS];
80 	struct event_handler_head evt_head;
81 	struct list_head eq_list;	/* used for event queue */
82 	spinlock_t eq_lock;	/* for event queue synchronization */
83 	struct workqueue_struct *event_workq;   /* workqueue for event */
84 	struct work_struct event_work;		/* work item for event */
85 	struct mutex event_sync;
86 } wl_event_params_t;
87 
88 static unsigned long
wl_ext_event_lock_eq(struct wl_event_params * event_params)89 wl_ext_event_lock_eq(struct wl_event_params *event_params)
90 {
91 	unsigned long flags;
92 
93 	spin_lock_irqsave(&event_params->eq_lock, flags);
94 	return flags;
95 }
96 
97 static void
wl_ext_event_unlock_eq(struct wl_event_params * event_params,unsigned long flags)98 wl_ext_event_unlock_eq(struct wl_event_params *event_params, unsigned long flags)
99 {
100 	spin_unlock_irqrestore(&event_params->eq_lock, flags);
101 }
102 
103 static void
wl_ext_event_init_eq_lock(struct wl_event_params * event_params)104 wl_ext_event_init_eq_lock(struct wl_event_params *event_params)
105 {
106 	spin_lock_init(&event_params->eq_lock);
107 }
108 
109 static void
wl_ext_event_init_eq(struct wl_event_params * event_params)110 wl_ext_event_init_eq(struct wl_event_params *event_params)
111 {
112 	wl_ext_event_init_eq_lock(event_params);
113 	INIT_LIST_HEAD(&event_params->eq_list);
114 }
115 
116 static void
wl_ext_event_flush_eq(struct wl_event_params * event_params)117 wl_ext_event_flush_eq(struct wl_event_params *event_params)
118 {
119 	struct wl_event_q *e;
120 	unsigned long flags;
121 
122 	flags = wl_ext_event_lock_eq(event_params);
123 	while (!list_empty_careful(&event_params->eq_list)) {
124 		BCM_SET_LIST_FIRST_ENTRY(e, &event_params->eq_list, struct wl_event_q, eq_list);
125 		list_del(&e->eq_list);
126 		kfree(e);
127 	}
128 	wl_ext_event_unlock_eq(event_params, flags);
129 }
130 
131 /*
132 * retrieve first queued event from head
133 */
134 
135 static struct wl_event_q *
wl_ext_event_deq_event(struct wl_event_params * event_params)136 wl_ext_event_deq_event(struct wl_event_params *event_params)
137 {
138 	struct wl_event_q *e = NULL;
139 	unsigned long flags;
140 
141 	flags = wl_ext_event_lock_eq(event_params);
142 	if (likely(!list_empty(&event_params->eq_list))) {
143 		BCM_SET_LIST_FIRST_ENTRY(e, &event_params->eq_list, struct wl_event_q, eq_list);
144 		list_del(&e->eq_list);
145 	}
146 	wl_ext_event_unlock_eq(event_params, flags);
147 
148 	return e;
149 }
150 
151 /*
152  * push event to tail of the queue
153  */
154 
155 static s32
wl_ext_event_enq_event(struct wl_event_params * event_params,u32 event,const wl_event_msg_t * msg,void * data)156 wl_ext_event_enq_event(struct wl_event_params *event_params, u32 event,
157 	const wl_event_msg_t *msg, void *data)
158 {
159 	struct wl_event_q *e;
160 	s32 err = 0;
161 	uint32 evtq_size;
162 	uint32 data_len;
163 	unsigned long flags;
164 	gfp_t aflags;
165 
166 	data_len = 0;
167 	if (data)
168 		data_len = ntoh32(msg->datalen);
169 	evtq_size = sizeof(struct wl_event_q) + data_len;
170 	aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
171 	e = kzalloc(evtq_size, aflags);
172 	if (unlikely(!e)) {
173 		EVENT_ERROR("wlan", "event alloc failed\n");
174 		return -ENOMEM;
175 	}
176 	e->etype = event;
177 	memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
178 	if (data)
179 		memcpy(e->edata, data, data_len);
180 	flags = wl_ext_event_lock_eq(event_params);
181 	list_add_tail(&e->eq_list, &event_params->eq_list);
182 	wl_ext_event_unlock_eq(event_params, flags);
183 
184 	return err;
185 }
186 
187 static void
wl_ext_event_put_event(struct wl_event_q * e)188 wl_ext_event_put_event(struct wl_event_q *e)
189 {
190 	kfree(e);
191 }
192 
193 static void
wl_ext_event_handler(struct work_struct * work_data)194 wl_ext_event_handler(struct work_struct *work_data)
195 {
196 	struct wl_event_params *event_params = NULL;
197 	struct wl_event_q *e;
198 	struct net_device *dev = NULL;
199 	struct event_handler_list *evt_node;
200 	dhd_pub_t *dhd;
201 	unsigned long flags = 0;
202 
203 	BCM_SET_CONTAINER_OF(event_params, work_data, struct wl_event_params, event_work);
204 	DHD_EVENT_WAKE_LOCK(event_params->pub);
205 	while ((e = wl_ext_event_deq_event(event_params))) {
206 		if (e->emsg.ifidx >= DHD_MAX_IFS) {
207 			EVENT_ERROR("wlan", "ifidx=%d not in range\n", e->emsg.ifidx);
208 			goto fail;
209 		}
210 		dev = event_params->dev[e->emsg.ifidx];
211 		if (!dev) {
212 			EVENT_DBG("wlan", "ifidx=%d dev not ready\n", e->emsg.ifidx);
213 			goto fail;
214 		}
215 		dhd = dhd_get_pub(dev);
216 		if (e->etype > WLC_E_LAST) {
217 			EVENT_TRACE(dev->name, "Unknown Event (%d): ignoring\n", e->etype);
218 			goto fail;
219 		}
220 		DHD_GENERAL_LOCK(dhd, flags);
221 		if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd)) {
222 			EVENT_ERROR(dev->name, "BUS is DOWN.\n");
223 			DHD_GENERAL_UNLOCK(dhd, flags);
224 			goto fail;
225 		}
226 		DHD_GENERAL_UNLOCK(dhd, flags);
227 		EVENT_DBG(dev->name, "event type (%d)\n", e->etype);
228 		mutex_lock(&event_params->event_sync);
229 		evt_node = event_params->evt_head.evt_head;
230 		for (;evt_node;) {
231 			if (evt_node->dev == dev &&
232 					(evt_node->etype == e->etype || evt_node->etype == WLC_E_LAST))
233 				evt_node->cb_func(dev, evt_node->cb_argu, &e->emsg, e->edata);
234 			evt_node = evt_node->next;
235 		}
236 		mutex_unlock(&event_params->event_sync);
237 fail:
238 		wl_ext_event_put_event(e);
239 	}
240 	DHD_EVENT_WAKE_UNLOCK(event_params->pub);
241 }
242 
243 void
wl_ext_event_send(void * params,const wl_event_msg_t * e,void * data)244 wl_ext_event_send(void *params, const wl_event_msg_t * e, void *data)
245 {
246 	struct wl_event_params *event_params = params;
247 	u32 event_type = ntoh32(e->event_type);
248 
249 	if (event_params == NULL) {
250 		EVENT_ERROR("wlan", "Stale event %d(%s) ignored\n",
251 			event_type, bcmevent_get_name(event_type));
252 		return;
253 	}
254 
255 	if (event_params->event_workq == NULL) {
256 		EVENT_ERROR("wlan", "Event handler is not created %d(%s)\n",
257 			event_type, bcmevent_get_name(event_type));
258 		return;
259 	}
260 
261 	if (likely(!wl_ext_event_enq_event(event_params, event_type, e, data))) {
262 		queue_work(event_params->event_workq, &event_params->event_work);
263 	}
264 }
265 
266 static s32
wl_ext_event_create_handler(struct wl_event_params * event_params)267 wl_ext_event_create_handler(struct wl_event_params *event_params)
268 {
269 	int ret = 0;
270 	EVENT_TRACE("wlan", "Enter\n");
271 
272 	/* Allocate workqueue for event */
273 	if (!event_params->event_workq) {
274 		event_params->event_workq = alloc_workqueue("ext_eventd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
275 	}
276 
277 	if (!event_params->event_workq) {
278 		EVENT_ERROR("wlan", "event_workq alloc_workqueue failed\n");
279 		ret = -ENOMEM;
280 	} else {
281 		INIT_WORK(&event_params->event_work, wl_ext_event_handler);
282 	}
283 	return ret;
284 }
285 
286 static void
wl_ext_event_free(struct wl_event_params * event_params)287 wl_ext_event_free(struct wl_event_params *event_params)
288 {
289 	struct event_handler_list *node, *cur, **evt_head;
290 
291 	evt_head = &event_params->evt_head.evt_head;
292 	node = *evt_head;
293 
294 	for (;node;) {
295 		EVENT_TRACE(node->dev->name, "Free etype=%d\n", node->etype);
296 		cur = node;
297 		node = cur->next;
298 		kfree(cur);
299 	}
300 	*evt_head = NULL;
301 }
302 
303 static void
wl_ext_event_destroy_handler(struct wl_event_params * event_params)304 wl_ext_event_destroy_handler(struct wl_event_params *event_params)
305 {
306 	if (event_params && event_params->event_workq) {
307 		cancel_work_sync(&event_params->event_work);
308 		destroy_workqueue(event_params->event_workq);
309 		event_params->event_workq = NULL;
310 	}
311 }
312 
313 int
wl_ext_event_register(struct net_device * dev,dhd_pub_t * dhd,uint32 event,EXT_EVENT_HANDLER cb_func,void * data,wl_event_prio_t prio)314 wl_ext_event_register(struct net_device *dev, dhd_pub_t *dhd, uint32 event,
315 	EXT_EVENT_HANDLER cb_func, void *data, wl_event_prio_t prio)
316 {
317 	struct wl_event_params *event_params = dhd->event_params;
318 	struct event_handler_list *node, *leaf, *node_prev, **evt_head;
319 	int ret = 0;
320 
321 	if (event_params) {
322 		mutex_lock(&event_params->event_sync);
323 		evt_head = &event_params->evt_head.evt_head;
324 		node = *evt_head;
325 		for (;node;) {
326 			if (node->dev == dev && node->etype == event && node->cb_func == cb_func) {
327 				EVENT_TRACE(dev->name, "skip event %d\n", event);
328 				mutex_unlock(&event_params->event_sync);
329 				return 0;
330 			}
331 			node = node->next;
332 		}
333 		leaf = kmalloc(sizeof(event_handler_list_t), GFP_KERNEL);
334 		if (!leaf) {
335 			EVENT_ERROR(dev->name, "Memory alloc failure %d for event %d\n",
336 				(int)sizeof(event_handler_list_t), event);
337 			mutex_unlock(&event_params->event_sync);
338 			return -ENOMEM;
339 		}
340 		leaf->next = NULL;
341 		leaf->dev = dev;
342 		leaf->etype = event;
343 		leaf->cb_func = cb_func;
344 		leaf->cb_argu = data;
345 		leaf->prio = prio;
346 		if (*evt_head == NULL) {
347 			*evt_head = leaf;
348 		} else {
349 			node = *evt_head;
350 			node_prev = NULL;
351 			for (;node;) {
352 				if (node->prio <= prio) {
353 					leaf->next = node;
354 					if (node_prev)
355 						node_prev->next = leaf;
356 					else
357 						*evt_head = leaf;
358 					break;
359 				} else if (node->next == NULL) {
360 					node->next = leaf;
361 					break;
362 				}
363 				node_prev = node;
364 				node = node->next;
365 			}
366 		}
367 		EVENT_TRACE(dev->name, "event %d registered\n", event);
368 		mutex_unlock(&event_params->event_sync);
369 	} else {
370 		EVENT_ERROR(dev->name, "event_params not ready %d\n", event);
371 		ret = -ENODEV;
372 	}
373 
374 	return ret;
375 }
376 
377 void
wl_ext_event_deregister(struct net_device * dev,dhd_pub_t * dhd,uint32 event,void * cb_func)378 wl_ext_event_deregister(struct net_device *dev, dhd_pub_t *dhd,
379 	uint32 event, void *cb_func)
380 {
381 	struct wl_event_params *event_params = dhd->event_params;
382 	struct event_handler_list *node, *prev, **evt_head;
383 	int tmp = 0;
384 
385 	if (event_params) {
386 		mutex_lock(&event_params->event_sync);
387 		evt_head = &event_params->evt_head.evt_head;
388 		node = *evt_head;
389 		prev = node;
390 		for (;node;) {
391 			if (node->dev == dev && node->etype == event && node->cb_func == cb_func) {
392 				if (node == *evt_head) {
393 					tmp = 1;
394 					*evt_head = node->next;
395 				} else {
396 					tmp = 0;
397 					prev->next = node->next;
398 				}
399 				EVENT_TRACE(dev->name, "event %d deregistered\n", event);
400 				kfree(node);
401 				if (tmp == 1) {
402 					node = *evt_head;
403 					prev = node;
404 				} else {
405 					node = prev->next;
406 				}
407 				continue;
408 			}
409 			prev = node;
410 			node = node->next;
411 		}
412 		mutex_unlock(&event_params->event_sync);
413 	} else {
414 		EVENT_ERROR(dev->name, "event_params not ready %d\n", event);
415 	}
416 }
417 
418 static s32
wl_ext_event_init_priv(struct wl_event_params * event_params)419 wl_ext_event_init_priv(struct wl_event_params *event_params)
420 {
421 	s32 err = 0;
422 
423 	mutex_init(&event_params->event_sync);
424 	wl_ext_event_init_eq(event_params);
425 	if (wl_ext_event_create_handler(event_params))
426 		return -ENOMEM;
427 
428 	return err;
429 }
430 
431 static void
wl_ext_event_deinit_priv(struct wl_event_params * event_params)432 wl_ext_event_deinit_priv(struct wl_event_params *event_params)
433 {
434 	wl_ext_event_destroy_handler(event_params);
435 	wl_ext_event_flush_eq(event_params);
436 	wl_ext_event_free(event_params);
437 }
438 
439 int
wl_ext_event_attach_netdev(struct net_device * net,int ifidx,uint8 bssidx)440 wl_ext_event_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx)
441 {
442 	struct dhd_pub *dhd = dhd_get_pub(net);
443 	struct wl_event_params *event_params = dhd->event_params;
444 
445 	EVENT_TRACE(net->name, "ifidx=%d, bssidx=%d\n", ifidx, bssidx);
446 	if (event_params && ifidx < WL_MAX_IFS) {
447 		event_params->dev[ifidx] = net;
448 	}
449 
450 	return 0;
451 }
452 
453 int
wl_ext_event_dettach_netdev(struct net_device * net,int ifidx)454 wl_ext_event_dettach_netdev(struct net_device *net, int ifidx)
455 {
456 	struct dhd_pub *dhd = dhd_get_pub(net);
457 	struct wl_event_params *event_params = dhd->event_params;
458 
459 	EVENT_TRACE(net->name, "ifidx=%d\n", ifidx);
460 	if (event_params && ifidx < WL_MAX_IFS) {
461 		event_params->dev[ifidx] = NULL;
462 	}
463 
464 	return 0;
465 }
466 
467 s32
wl_ext_event_attach(struct net_device * dev,dhd_pub_t * dhdp)468 wl_ext_event_attach(struct net_device *dev, dhd_pub_t *dhdp)
469 {
470 	struct wl_event_params *event_params = NULL;
471 	s32 err = 0;
472 
473 	event_params = kmalloc(sizeof(wl_event_params_t), GFP_KERNEL);
474 	if (!event_params) {
475 		EVENT_ERROR(dev->name, "Failed to allocate memory (%zu)\n",
476 			sizeof(wl_event_params_t));
477 		return -ENOMEM;
478 	}
479 	dhdp->event_params = event_params;
480 	memset(event_params, 0, sizeof(wl_event_params_t));
481 	event_params->pub = dhdp;
482 
483 	err = wl_ext_event_init_priv(event_params);
484 	if (err) {
485 		EVENT_ERROR(dev->name, "Failed to wl_ext_event_init_priv (%d)\n", err);
486 		goto ext_attach_out;
487 	}
488 
489 	return err;
490 ext_attach_out:
491 	wl_ext_event_dettach(dhdp);
492 	return err;
493 }
494 
495 void
wl_ext_event_dettach(dhd_pub_t * dhdp)496 wl_ext_event_dettach(dhd_pub_t *dhdp)
497 {
498 	struct wl_event_params *event_params = dhdp->event_params;
499 
500 	if (event_params) {
501 		wl_ext_event_deinit_priv(event_params);
502 		kfree(event_params);
503 		dhdp->event_params = NULL;
504 	}
505 }
506 #endif
507