1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
5 */
6
7 #ifndef __MT76_UTIL_H
8 #define __MT76_UTIL_H
9
10 #include <linux/skbuff.h>
11 #include <linux/bitops.h>
12 #include <linux/bitfield.h>
13 #include <net/mac80211.h>
14
15 struct mt76_worker
16 {
17 struct task_struct *task;
18 void (*fn)(struct mt76_worker *);
19 unsigned long state;
20 };
21
22 enum {
23 MT76_WORKER_SCHEDULED,
24 MT76_WORKER_RUNNING,
25 };
26
27 #define MT76_INCR(_var, _size) \
28 (_var = (((_var) + 1) % (_size)))
29
30 int mt76_wcid_alloc(u32 *mask, int size);
31
32 static inline bool
mt76_wcid_mask_test(u32 * mask,int idx)33 mt76_wcid_mask_test(u32 *mask, int idx)
34 {
35 return mask[idx / 32] & BIT(idx % 32);
36 }
37
38 static inline void
mt76_wcid_mask_set(u32 * mask,int idx)39 mt76_wcid_mask_set(u32 *mask, int idx)
40 {
41 mask[idx / 32] |= BIT(idx % 32);
42 }
43
44 static inline void
mt76_wcid_mask_clear(u32 * mask,int idx)45 mt76_wcid_mask_clear(u32 *mask, int idx)
46 {
47 mask[idx / 32] &= ~BIT(idx % 32);
48 }
49
50 static inline void
mt76_skb_set_moredata(struct sk_buff * skb,bool enable)51 mt76_skb_set_moredata(struct sk_buff *skb, bool enable)
52 {
53 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
54
55 if (enable)
56 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
57 else
58 hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
59 }
60
61 int __mt76_worker_fn(void *ptr);
62
63 static inline int
mt76_worker_setup(struct ieee80211_hw * hw,struct mt76_worker * w,void (* fn)(struct mt76_worker *),const char * name)64 mt76_worker_setup(struct ieee80211_hw *hw, struct mt76_worker *w,
65 void (*fn)(struct mt76_worker *),
66 const char *name)
67 {
68 const char *dev_name = wiphy_name(hw->wiphy);
69 int ret;
70
71 if (fn)
72 w->fn = fn;
73 w->task = kthread_create(__mt76_worker_fn, w, "mt76-%s %s",
74 name, dev_name);
75
76 ret = PTR_ERR_OR_ZERO(w->task);
77 if (ret) {
78 w->task = NULL;
79 return ret;
80 }
81
82 wake_up_process(w->task);
83
84 return 0;
85 }
86
mt76_worker_schedule(struct mt76_worker * w)87 static inline void mt76_worker_schedule(struct mt76_worker *w)
88 {
89 if (!w->task)
90 return;
91
92 if (!test_and_set_bit(MT76_WORKER_SCHEDULED, &w->state) &&
93 !test_bit(MT76_WORKER_RUNNING, &w->state))
94 wake_up_process(w->task);
95 }
96
mt76_worker_disable(struct mt76_worker * w)97 static inline void mt76_worker_disable(struct mt76_worker *w)
98 {
99 if (!w->task)
100 return;
101
102 kthread_park(w->task);
103 WRITE_ONCE(w->state, 0);
104 }
105
mt76_worker_enable(struct mt76_worker * w)106 static inline void mt76_worker_enable(struct mt76_worker *w)
107 {
108 if (!w->task)
109 return;
110
111 kthread_unpark(w->task);
112 mt76_worker_schedule(w);
113 }
114
mt76_worker_teardown(struct mt76_worker * w)115 static inline void mt76_worker_teardown(struct mt76_worker *w)
116 {
117 if (!w->task)
118 return;
119
120 kthread_stop(w->task);
121 w->task = NULL;
122 }
123
124 #endif
125