1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7 #include "core.h"
8 #include "peer.h"
9 #include "debug.h"
10
ath12k_peer_find(struct ath12k_base * ab,int vdev_id,const u8 * addr)11 struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
12 const u8 *addr)
13 {
14 struct ath12k_peer *peer;
15
16 lockdep_assert_held(&ab->base_lock);
17
18 list_for_each_entry(peer, &ab->peers, list) {
19 if (peer->vdev_id != vdev_id)
20 continue;
21 if (!ether_addr_equal(peer->addr, addr))
22 continue;
23
24 return peer;
25 }
26
27 return NULL;
28 }
29
ath12k_peer_find_by_pdev_idx(struct ath12k_base * ab,u8 pdev_idx,const u8 * addr)30 static struct ath12k_peer *ath12k_peer_find_by_pdev_idx(struct ath12k_base *ab,
31 u8 pdev_idx, const u8 *addr)
32 {
33 struct ath12k_peer *peer;
34
35 lockdep_assert_held(&ab->base_lock);
36
37 list_for_each_entry(peer, &ab->peers, list) {
38 if (peer->pdev_idx != pdev_idx)
39 continue;
40 if (!ether_addr_equal(peer->addr, addr))
41 continue;
42
43 return peer;
44 }
45
46 return NULL;
47 }
48
ath12k_peer_find_by_addr(struct ath12k_base * ab,const u8 * addr)49 struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
50 const u8 *addr)
51 {
52 struct ath12k_peer *peer;
53
54 lockdep_assert_held(&ab->base_lock);
55
56 list_for_each_entry(peer, &ab->peers, list) {
57 if (!ether_addr_equal(peer->addr, addr))
58 continue;
59
60 return peer;
61 }
62
63 return NULL;
64 }
65
ath12k_peer_find_by_id(struct ath12k_base * ab,int peer_id)66 struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
67 int peer_id)
68 {
69 struct ath12k_peer *peer;
70
71 lockdep_assert_held(&ab->base_lock);
72
73 list_for_each_entry(peer, &ab->peers, list)
74 if (peer_id == peer->peer_id)
75 return peer;
76
77 return NULL;
78 }
79
ath12k_peer_exist_by_vdev_id(struct ath12k_base * ab,int vdev_id)80 bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id)
81 {
82 struct ath12k_peer *peer;
83
84 spin_lock_bh(&ab->base_lock);
85
86 list_for_each_entry(peer, &ab->peers, list) {
87 if (vdev_id == peer->vdev_id) {
88 spin_unlock_bh(&ab->base_lock);
89 return true;
90 }
91 }
92 spin_unlock_bh(&ab->base_lock);
93 return false;
94 }
95
ath12k_peer_find_by_ast(struct ath12k_base * ab,int ast_hash)96 struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab,
97 int ast_hash)
98 {
99 struct ath12k_peer *peer;
100
101 lockdep_assert_held(&ab->base_lock);
102
103 list_for_each_entry(peer, &ab->peers, list)
104 if (ast_hash == peer->ast_hash)
105 return peer;
106
107 return NULL;
108 }
109
ath12k_peer_unmap_event(struct ath12k_base * ab,u16 peer_id)110 void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id)
111 {
112 struct ath12k_peer *peer;
113
114 spin_lock_bh(&ab->base_lock);
115
116 peer = ath12k_peer_find_by_id(ab, peer_id);
117 if (!peer) {
118 ath12k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
119 peer_id);
120 goto exit;
121 }
122
123 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
124 peer->vdev_id, peer->addr, peer_id);
125
126 list_del(&peer->list);
127 kfree(peer);
128 wake_up(&ab->peer_mapping_wq);
129
130 exit:
131 spin_unlock_bh(&ab->base_lock);
132 }
133
ath12k_peer_map_event(struct ath12k_base * ab,u8 vdev_id,u16 peer_id,u8 * mac_addr,u16 ast_hash,u16 hw_peer_id)134 void ath12k_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id,
135 u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
136 {
137 struct ath12k_peer *peer;
138
139 spin_lock_bh(&ab->base_lock);
140 peer = ath12k_peer_find(ab, vdev_id, mac_addr);
141 if (!peer) {
142 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
143 if (!peer)
144 goto exit;
145
146 peer->vdev_id = vdev_id;
147 peer->peer_id = peer_id;
148 peer->ast_hash = ast_hash;
149 peer->hw_peer_id = hw_peer_id;
150 ether_addr_copy(peer->addr, mac_addr);
151 list_add(&peer->list, &ab->peers);
152 wake_up(&ab->peer_mapping_wq);
153 }
154
155 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
156 vdev_id, mac_addr, peer_id);
157
158 exit:
159 spin_unlock_bh(&ab->base_lock);
160 }
161
ath12k_wait_for_peer_common(struct ath12k_base * ab,int vdev_id,const u8 * addr,bool expect_mapped)162 static int ath12k_wait_for_peer_common(struct ath12k_base *ab, int vdev_id,
163 const u8 *addr, bool expect_mapped)
164 {
165 int ret;
166
167 ret = wait_event_timeout(ab->peer_mapping_wq, ({
168 bool mapped;
169
170 spin_lock_bh(&ab->base_lock);
171 mapped = !!ath12k_peer_find(ab, vdev_id, addr);
172 spin_unlock_bh(&ab->base_lock);
173
174 (mapped == expect_mapped ||
175 test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags));
176 }), 3 * HZ);
177
178 if (ret <= 0)
179 return -ETIMEDOUT;
180
181 return 0;
182 }
183
ath12k_peer_cleanup(struct ath12k * ar,u32 vdev_id)184 void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id)
185 {
186 struct ath12k_peer *peer, *tmp;
187 struct ath12k_base *ab = ar->ab;
188
189 lockdep_assert_held(&ar->conf_mutex);
190
191 spin_lock_bh(&ab->base_lock);
192 list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
193 if (peer->vdev_id != vdev_id)
194 continue;
195
196 ath12k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
197 peer->addr, vdev_id);
198
199 list_del(&peer->list);
200 kfree(peer);
201 ar->num_peers--;
202 }
203
204 spin_unlock_bh(&ab->base_lock);
205 }
206
ath12k_wait_for_peer_deleted(struct ath12k * ar,int vdev_id,const u8 * addr)207 static int ath12k_wait_for_peer_deleted(struct ath12k *ar, int vdev_id, const u8 *addr)
208 {
209 return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
210 }
211
ath12k_wait_for_peer_delete_done(struct ath12k * ar,u32 vdev_id,const u8 * addr)212 int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
213 const u8 *addr)
214 {
215 int ret;
216 unsigned long time_left;
217
218 ret = ath12k_wait_for_peer_deleted(ar, vdev_id, addr);
219 if (ret) {
220 ath12k_warn(ar->ab, "failed wait for peer deleted");
221 return ret;
222 }
223
224 time_left = wait_for_completion_timeout(&ar->peer_delete_done,
225 3 * HZ);
226 if (time_left == 0) {
227 ath12k_warn(ar->ab, "Timeout in receiving peer delete response\n");
228 return -ETIMEDOUT;
229 }
230
231 return 0;
232 }
233
ath12k_peer_delete(struct ath12k * ar,u32 vdev_id,u8 * addr)234 int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
235 {
236 int ret;
237
238 lockdep_assert_held(&ar->conf_mutex);
239
240 reinit_completion(&ar->peer_delete_done);
241
242 ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
243 if (ret) {
244 ath12k_warn(ar->ab,
245 "failed to delete peer vdev_id %d addr %pM ret %d\n",
246 vdev_id, addr, ret);
247 return ret;
248 }
249
250 ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr);
251 if (ret)
252 return ret;
253
254 ar->num_peers--;
255
256 return 0;
257 }
258
ath12k_wait_for_peer_created(struct ath12k * ar,int vdev_id,const u8 * addr)259 static int ath12k_wait_for_peer_created(struct ath12k *ar, int vdev_id, const u8 *addr)
260 {
261 return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
262 }
263
ath12k_peer_create(struct ath12k * ar,struct ath12k_vif * arvif,struct ieee80211_sta * sta,struct ath12k_wmi_peer_create_arg * arg)264 int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif,
265 struct ieee80211_sta *sta,
266 struct ath12k_wmi_peer_create_arg *arg)
267 {
268 struct ath12k_peer *peer;
269 int ret;
270
271 lockdep_assert_held(&ar->conf_mutex);
272
273 if (ar->num_peers > (ar->max_num_peers - 1)) {
274 ath12k_warn(ar->ab,
275 "failed to create peer due to insufficient peer entry resource in firmware\n");
276 return -ENOBUFS;
277 }
278
279 spin_lock_bh(&ar->ab->base_lock);
280 peer = ath12k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, arg->peer_addr);
281 if (peer) {
282 spin_unlock_bh(&ar->ab->base_lock);
283 return -EINVAL;
284 }
285 spin_unlock_bh(&ar->ab->base_lock);
286
287 ret = ath12k_wmi_send_peer_create_cmd(ar, arg);
288 if (ret) {
289 ath12k_warn(ar->ab,
290 "failed to send peer create vdev_id %d ret %d\n",
291 arg->vdev_id, ret);
292 return ret;
293 }
294
295 ret = ath12k_wait_for_peer_created(ar, arg->vdev_id,
296 arg->peer_addr);
297 if (ret)
298 return ret;
299
300 spin_lock_bh(&ar->ab->base_lock);
301
302 peer = ath12k_peer_find(ar->ab, arg->vdev_id, arg->peer_addr);
303 if (!peer) {
304 spin_unlock_bh(&ar->ab->base_lock);
305 ath12k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
306 arg->peer_addr, arg->vdev_id);
307
308 reinit_completion(&ar->peer_delete_done);
309
310 ret = ath12k_wmi_send_peer_delete_cmd(ar, arg->peer_addr,
311 arg->vdev_id);
312 if (ret) {
313 ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
314 arg->vdev_id, arg->peer_addr);
315 return ret;
316 }
317
318 ret = ath12k_wait_for_peer_delete_done(ar, arg->vdev_id,
319 arg->peer_addr);
320 if (ret)
321 return ret;
322
323 return -ENOENT;
324 }
325
326 peer->pdev_idx = ar->pdev_idx;
327 peer->sta = sta;
328
329 if (arvif->vif->type == NL80211_IFTYPE_STATION) {
330 arvif->ast_hash = peer->ast_hash;
331 arvif->ast_idx = peer->hw_peer_id;
332 }
333
334 peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
335 peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
336
337 ar->num_peers++;
338
339 spin_unlock_bh(&ar->ab->base_lock);
340
341 return 0;
342 }
343