1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2002-2005, Instant802 Networks, Inc.
4 * Copyright 2005-2006, Devicescape Software, Inc.
5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
6 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2013-2014 Intel Mobile Communications GmbH
8 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
9 * Copyright (C) 2018-2019 Intel Corporation
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rcupdate.h>
19 #include <linux/export.h>
20 #include <linux/bitops.h>
21 #include <net/mac80211.h>
22 #include <net/ieee80211_radiotap.h>
23 #include <asm/unaligned.h>
24
25 #include "ieee80211_i.h"
26 #include "driver-ops.h"
27 #include "led.h"
28 #include "mesh.h"
29 #include "wep.h"
30 #include "wpa.h"
31 #include "tkip.h"
32 #include "wme.h"
33 #include "rate.h"
34
35 #ifdef CONFIG_DRIVERS_HDF_XR829
36 extern void wal_netif_receive_skb(struct sk_buff *skb);
37 #endif
38
ieee80211_rx_stats(struct net_device * dev,u32 len)39 static inline void ieee80211_rx_stats(struct net_device *dev, u32 len)
40 {
41 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
42
43 u64_stats_update_begin(&tstats->syncp);
44 tstats->rx_packets++;
45 tstats->rx_bytes += len;
46 u64_stats_update_end(&tstats->syncp);
47 }
48
ieee80211_get_bssid(struct ieee80211_hdr * hdr,size_t len,enum nl80211_iftype type)49 static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
50 enum nl80211_iftype type)
51 {
52 __le16 fc = hdr->frame_control;
53
54 if (ieee80211_is_data(fc)) {
55 if (len < 24) /* drop incorrect hdr len (data) */
56 return NULL;
57
58 if (ieee80211_has_a4(fc))
59 return NULL;
60 if (ieee80211_has_tods(fc))
61 return hdr->addr1;
62 if (ieee80211_has_fromds(fc))
63 return hdr->addr2;
64
65 return hdr->addr3;
66 }
67
68 if (ieee80211_is_mgmt(fc)) {
69 if (len < 24) /* drop incorrect hdr len (mgmt) */
70 return NULL;
71 return hdr->addr3;
72 }
73
74 if (ieee80211_is_ctl(fc)) {
75 if (ieee80211_is_pspoll(fc))
76 return hdr->addr1;
77
78 if (ieee80211_is_back_req(fc)) {
79 switch (type) {
80 case NL80211_IFTYPE_STATION:
81 return hdr->addr2;
82 case NL80211_IFTYPE_AP:
83 case NL80211_IFTYPE_AP_VLAN:
84 return hdr->addr1;
85 default:
86 break; /* fall through to the return */
87 }
88 }
89 }
90
91 return NULL;
92 }
93
94 /*
95 * monitor mode reception
96 *
97 * This function cleans up the SKB, i.e. it removes all the stuff
98 * only useful for monitoring.
99 */
remove_monitor_info(struct sk_buff * skb,unsigned int present_fcs_len,unsigned int rtap_space)100 static void remove_monitor_info(struct sk_buff *skb,
101 unsigned int present_fcs_len,
102 unsigned int rtap_space)
103 {
104 if (present_fcs_len)
105 __pskb_trim(skb, skb->len - present_fcs_len);
106 __pskb_pull(skb, rtap_space);
107 }
108
should_drop_frame(struct sk_buff * skb,int present_fcs_len,unsigned int rtap_space)109 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
110 unsigned int rtap_space)
111 {
112 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
113 struct ieee80211_hdr *hdr;
114
115 hdr = (void *)(skb->data + rtap_space);
116
117 if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
118 RX_FLAG_FAILED_PLCP_CRC |
119 RX_FLAG_ONLY_MONITOR |
120 RX_FLAG_NO_PSDU))
121 return true;
122
123 if (unlikely(skb->len < 16 + present_fcs_len + rtap_space))
124 return true;
125
126 if (ieee80211_is_ctl(hdr->frame_control) &&
127 !ieee80211_is_pspoll(hdr->frame_control) &&
128 !ieee80211_is_back_req(hdr->frame_control))
129 return true;
130
131 return false;
132 }
133
134 static int
ieee80211_rx_radiotap_hdrlen(struct ieee80211_local * local,struct ieee80211_rx_status * status,struct sk_buff * skb)135 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
136 struct ieee80211_rx_status *status,
137 struct sk_buff *skb)
138 {
139 int len;
140
141 /* always present fields */
142 len = sizeof(struct ieee80211_radiotap_header) + 8;
143
144 /* allocate extra bitmaps */
145 if (status->chains)
146 len += 4 * hweight8(status->chains);
147 /* vendor presence bitmap */
148 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)
149 len += 4;
150
151 if (ieee80211_have_rx_timestamp(status)) {
152 len = ALIGN(len, 8);
153 len += 8;
154 }
155 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
156 len += 1;
157
158 /* antenna field, if we don't have per-chain info */
159 if (!status->chains)
160 len += 1;
161
162 /* padding for RX_FLAGS if necessary */
163 len = ALIGN(len, 2);
164
165 if (status->encoding == RX_ENC_HT) /* HT info */
166 len += 3;
167
168 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
169 len = ALIGN(len, 4);
170 len += 8;
171 }
172
173 if (status->encoding == RX_ENC_VHT) {
174 len = ALIGN(len, 2);
175 len += 12;
176 }
177
178 if (local->hw.radiotap_timestamp.units_pos >= 0) {
179 len = ALIGN(len, 8);
180 len += 12;
181 }
182
183 if (status->encoding == RX_ENC_HE &&
184 status->flag & RX_FLAG_RADIOTAP_HE) {
185 len = ALIGN(len, 2);
186 len += 12;
187 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12);
188 }
189
190 if (status->encoding == RX_ENC_HE &&
191 status->flag & RX_FLAG_RADIOTAP_HE_MU) {
192 len = ALIGN(len, 2);
193 len += 12;
194 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12);
195 }
196
197 if (status->flag & RX_FLAG_NO_PSDU)
198 len += 1;
199
200 if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
201 len = ALIGN(len, 2);
202 len += 4;
203 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4);
204 }
205
206 if (status->chains) {
207 /* antenna and antenna signal fields */
208 len += 2 * hweight8(status->chains);
209 }
210
211 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
212 struct ieee80211_vendor_radiotap *rtap;
213 int vendor_data_offset = 0;
214
215 /*
216 * The position to look at depends on the existence (or non-
217 * existence) of other elements, so take that into account...
218 */
219 if (status->flag & RX_FLAG_RADIOTAP_HE)
220 vendor_data_offset +=
221 sizeof(struct ieee80211_radiotap_he);
222 if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
223 vendor_data_offset +=
224 sizeof(struct ieee80211_radiotap_he_mu);
225 if (status->flag & RX_FLAG_RADIOTAP_LSIG)
226 vendor_data_offset +=
227 sizeof(struct ieee80211_radiotap_lsig);
228
229 rtap = (void *)&skb->data[vendor_data_offset];
230
231 /* alignment for fixed 6-byte vendor data header */
232 len = ALIGN(len, 2);
233 /* vendor data header */
234 len += 6;
235 if (WARN_ON(rtap->align == 0))
236 rtap->align = 1;
237 len = ALIGN(len, rtap->align);
238 len += rtap->len + rtap->pad;
239 }
240
241 return len;
242 }
243
ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data * sdata,struct sk_buff * skb,int rtap_space)244 static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
245 struct sk_buff *skb,
246 int rtap_space)
247 {
248 struct {
249 struct ieee80211_hdr_3addr hdr;
250 u8 category;
251 u8 action_code;
252 } __packed __aligned(2) action;
253
254 if (!sdata)
255 return;
256
257 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
258
259 if (skb->len < rtap_space + sizeof(action) +
260 VHT_MUMIMO_GROUPS_DATA_LEN)
261 return;
262
263 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
264 return;
265
266 skb_copy_bits(skb, rtap_space, &action, sizeof(action));
267
268 if (!ieee80211_is_action(action.hdr.frame_control))
269 return;
270
271 if (action.category != WLAN_CATEGORY_VHT)
272 return;
273
274 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
275 return;
276
277 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
278 return;
279
280 skb = skb_copy(skb, GFP_ATOMIC);
281 if (!skb)
282 return;
283
284 skb_queue_tail(&sdata->skb_queue, skb);
285 mac80211_queue_work(&sdata->local->hw, &sdata->work);
286 }
287
288 /*
289 * ieee80211_add_rx_radiotap_header - add radiotap header
290 *
291 * add a radiotap header containing all the fields which the hardware provided.
292 */
293 static void
ieee80211_add_rx_radiotap_header(struct ieee80211_local * local,struct sk_buff * skb,struct ieee80211_rate * rate,int rtap_len,bool has_fcs)294 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
295 struct sk_buff *skb,
296 struct ieee80211_rate *rate,
297 int rtap_len, bool has_fcs)
298 {
299 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
300 struct ieee80211_radiotap_header *rthdr;
301 unsigned char *pos;
302 __le32 *it_present;
303 u32 it_present_val;
304 u16 rx_flags = 0;
305 u16 channel_flags = 0;
306 int mpdulen, chain;
307 unsigned long chains = status->chains;
308 struct ieee80211_vendor_radiotap rtap = {};
309 struct ieee80211_radiotap_he he = {};
310 struct ieee80211_radiotap_he_mu he_mu = {};
311 struct ieee80211_radiotap_lsig lsig = {};
312
313 if (status->flag & RX_FLAG_RADIOTAP_HE) {
314 he = *(struct ieee80211_radiotap_he *)skb->data;
315 skb_pull(skb, sizeof(he));
316 WARN_ON_ONCE(status->encoding != RX_ENC_HE);
317 }
318
319 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) {
320 he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data;
321 skb_pull(skb, sizeof(he_mu));
322 }
323
324 if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
325 lsig = *(struct ieee80211_radiotap_lsig *)skb->data;
326 skb_pull(skb, sizeof(lsig));
327 }
328
329 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
330 rtap = *(struct ieee80211_vendor_radiotap *)skb->data;
331 /* rtap.len and rtap.pad are undone immediately */
332 skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad);
333 }
334
335 mpdulen = skb->len;
336 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)))
337 mpdulen += FCS_LEN;
338
339 rthdr = skb_push(skb, rtap_len);
340 memset(rthdr, 0, rtap_len - rtap.len - rtap.pad);
341 it_present = &rthdr->it_present;
342
343 /* radiotap header, set always present flags */
344 rthdr->it_len = cpu_to_le16(rtap_len);
345 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
346 BIT(IEEE80211_RADIOTAP_CHANNEL) |
347 BIT(IEEE80211_RADIOTAP_RX_FLAGS);
348
349 if (!status->chains)
350 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
351
352 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
353 it_present_val |=
354 BIT(IEEE80211_RADIOTAP_EXT) |
355 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
356 put_unaligned_le32(it_present_val, it_present);
357 it_present++;
358 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
359 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
360 }
361
362 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
363 it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
364 BIT(IEEE80211_RADIOTAP_EXT);
365 put_unaligned_le32(it_present_val, it_present);
366 it_present++;
367 it_present_val = rtap.present;
368 }
369
370 put_unaligned_le32(it_present_val, it_present);
371
372 pos = (void *)(it_present + 1);
373
374 /* the order of the following fields is important */
375
376 /* IEEE80211_RADIOTAP_TSFT */
377 if (ieee80211_have_rx_timestamp(status)) {
378 /* padding */
379 while ((pos - (u8 *)rthdr) & 7)
380 *pos++ = 0;
381 put_unaligned_le64(
382 ieee80211_calculate_rx_timestamp(local, status,
383 mpdulen, 0),
384 pos);
385 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
386 pos += 8;
387 }
388
389 /* IEEE80211_RADIOTAP_FLAGS */
390 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
391 *pos |= IEEE80211_RADIOTAP_F_FCS;
392 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
393 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
394 if (status->enc_flags & RX_ENC_FLAG_SHORTPRE)
395 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
396 pos++;
397
398 /* IEEE80211_RADIOTAP_RATE */
399 if (!rate || status->encoding != RX_ENC_LEGACY) {
400 /*
401 * Without rate information don't add it. If we have,
402 * MCS information is a separate field in radiotap,
403 * added below. The byte here is needed as padding
404 * for the channel though, so initialise it to 0.
405 */
406 *pos = 0;
407 } else {
408 int shift = 0;
409 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
410 if (status->bw == RATE_INFO_BW_10)
411 shift = 1;
412 else if (status->bw == RATE_INFO_BW_5)
413 shift = 2;
414 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
415 }
416 pos++;
417
418 /* IEEE80211_RADIOTAP_CHANNEL */
419 put_unaligned_le16(status->freq, pos);
420 pos += 2;
421 if (status->bw == RATE_INFO_BW_10)
422 channel_flags |= IEEE80211_CHAN_HALF;
423 else if (status->bw == RATE_INFO_BW_5)
424 channel_flags |= IEEE80211_CHAN_QUARTER;
425
426 if (status->band == NL80211_BAND_5GHZ)
427 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
428 else if (status->encoding != RX_ENC_LEGACY)
429 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
430 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
431 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
432 else if (rate)
433 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
434 else
435 channel_flags |= IEEE80211_CHAN_2GHZ;
436 put_unaligned_le16(channel_flags, pos);
437 pos += 2;
438
439 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
440 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) &&
441 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
442 *pos = status->signal;
443 rthdr->it_present |=
444 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
445 pos++;
446 }
447
448 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
449
450 if (!status->chains) {
451 /* IEEE80211_RADIOTAP_ANTENNA */
452 *pos = status->antenna;
453 pos++;
454 }
455
456 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
457
458 /* IEEE80211_RADIOTAP_RX_FLAGS */
459 /* ensure 2 byte alignment for the 2 byte field as required */
460 if ((pos - (u8 *)rthdr) & 1)
461 *pos++ = 0;
462 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
463 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
464 put_unaligned_le16(rx_flags, pos);
465 pos += 2;
466
467 if (status->encoding == RX_ENC_HT) {
468 unsigned int stbc;
469
470 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
471 *pos++ = local->hw.radiotap_mcs_details;
472 *pos = 0;
473 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
474 *pos |= IEEE80211_RADIOTAP_MCS_SGI;
475 if (status->bw == RATE_INFO_BW_40)
476 *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
477 if (status->enc_flags & RX_ENC_FLAG_HT_GF)
478 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
479 if (status->enc_flags & RX_ENC_FLAG_LDPC)
480 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
481 stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT;
482 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
483 pos++;
484 *pos++ = status->rate_idx;
485 }
486
487 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
488 u16 flags = 0;
489
490 /* ensure 4 byte alignment */
491 while ((pos - (u8 *)rthdr) & 3)
492 pos++;
493 rthdr->it_present |=
494 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
495 put_unaligned_le32(status->ampdu_reference, pos);
496 pos += 4;
497 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
498 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
499 if (status->flag & RX_FLAG_AMPDU_IS_LAST)
500 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
501 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
502 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
503 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
504 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
505 if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN)
506 flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN;
507 if (status->flag & RX_FLAG_AMPDU_EOF_BIT)
508 flags |= IEEE80211_RADIOTAP_AMPDU_EOF;
509 put_unaligned_le16(flags, pos);
510 pos += 2;
511 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
512 *pos++ = status->ampdu_delimiter_crc;
513 else
514 *pos++ = 0;
515 *pos++ = 0;
516 }
517
518 if (status->encoding == RX_ENC_VHT) {
519 u16 known = local->hw.radiotap_vht_details;
520
521 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
522 put_unaligned_le16(known, pos);
523 pos += 2;
524 /* flags */
525 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
526 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
527 /* in VHT, STBC is binary */
528 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK)
529 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
530 if (status->enc_flags & RX_ENC_FLAG_BF)
531 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED;
532 pos++;
533 /* bandwidth */
534 switch (status->bw) {
535 case RATE_INFO_BW_80:
536 *pos++ = 4;
537 break;
538 case RATE_INFO_BW_160:
539 *pos++ = 11;
540 break;
541 case RATE_INFO_BW_40:
542 *pos++ = 1;
543 break;
544 default:
545 *pos++ = 0;
546 }
547 /* MCS/NSS */
548 *pos = (status->rate_idx << 4) | status->nss;
549 pos += 4;
550 /* coding field */
551 if (status->enc_flags & RX_ENC_FLAG_LDPC)
552 *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
553 pos++;
554 /* group ID */
555 pos++;
556 /* partial_aid */
557 pos += 2;
558 }
559
560 if (local->hw.radiotap_timestamp.units_pos >= 0) {
561 u16 accuracy = 0;
562 u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT;
563
564 rthdr->it_present |=
565 cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP);
566
567 /* ensure 8 byte alignment */
568 while ((pos - (u8 *)rthdr) & 7)
569 pos++;
570
571 put_unaligned_le64(status->device_timestamp, pos);
572 pos += sizeof(u64);
573
574 if (local->hw.radiotap_timestamp.accuracy >= 0) {
575 accuracy = local->hw.radiotap_timestamp.accuracy;
576 flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY;
577 }
578 put_unaligned_le16(accuracy, pos);
579 pos += sizeof(u16);
580
581 *pos++ = local->hw.radiotap_timestamp.units_pos;
582 *pos++ = flags;
583 }
584
585 if (status->encoding == RX_ENC_HE &&
586 status->flag & RX_FLAG_RADIOTAP_HE) {
587 #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f)
588
589 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) {
590 he.data6 |= HE_PREP(DATA6_NSTS,
591 FIELD_GET(RX_ENC_FLAG_STBC_MASK,
592 status->enc_flags));
593 he.data3 |= HE_PREP(DATA3_STBC, 1);
594 } else {
595 he.data6 |= HE_PREP(DATA6_NSTS, status->nss);
596 }
597
598 #define CHECK_GI(s) \
599 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \
600 (int)NL80211_RATE_INFO_HE_GI_##s)
601
602 CHECK_GI(0_8);
603 CHECK_GI(1_6);
604 CHECK_GI(3_2);
605
606 he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx);
607 he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm);
608 he.data3 |= HE_PREP(DATA3_CODING,
609 !!(status->enc_flags & RX_ENC_FLAG_LDPC));
610
611 he.data5 |= HE_PREP(DATA5_GI, status->he_gi);
612
613 switch (status->bw) {
614 case RATE_INFO_BW_20:
615 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
616 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ);
617 break;
618 case RATE_INFO_BW_40:
619 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
620 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ);
621 break;
622 case RATE_INFO_BW_80:
623 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
624 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ);
625 break;
626 case RATE_INFO_BW_160:
627 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
628 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ);
629 break;
630 case RATE_INFO_BW_HE_RU:
631 #define CHECK_RU_ALLOC(s) \
632 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \
633 NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4)
634
635 CHECK_RU_ALLOC(26);
636 CHECK_RU_ALLOC(52);
637 CHECK_RU_ALLOC(106);
638 CHECK_RU_ALLOC(242);
639 CHECK_RU_ALLOC(484);
640 CHECK_RU_ALLOC(996);
641 CHECK_RU_ALLOC(2x996);
642
643 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
644 status->he_ru + 4);
645 break;
646 default:
647 WARN_ONCE(1, "Invalid SU BW %d\n", status->bw);
648 }
649
650 /* ensure 2 byte alignment */
651 while ((pos - (u8 *)rthdr) & 1)
652 pos++;
653 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE);
654 memcpy(pos, &he, sizeof(he));
655 pos += sizeof(he);
656 }
657
658 if (status->encoding == RX_ENC_HE &&
659 status->flag & RX_FLAG_RADIOTAP_HE_MU) {
660 /* ensure 2 byte alignment */
661 while ((pos - (u8 *)rthdr) & 1)
662 pos++;
663 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU);
664 memcpy(pos, &he_mu, sizeof(he_mu));
665 pos += sizeof(he_mu);
666 }
667
668 if (status->flag & RX_FLAG_NO_PSDU) {
669 rthdr->it_present |=
670 cpu_to_le32(1 << IEEE80211_RADIOTAP_ZERO_LEN_PSDU);
671 *pos++ = status->zero_length_psdu_type;
672 }
673
674 if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
675 /* ensure 2 byte alignment */
676 while ((pos - (u8 *)rthdr) & 1)
677 pos++;
678 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_LSIG);
679 memcpy(pos, &lsig, sizeof(lsig));
680 pos += sizeof(lsig);
681 }
682
683 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
684 *pos++ = status->chain_signal[chain];
685 *pos++ = chain;
686 }
687
688 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
689 /* ensure 2 byte alignment for the vendor field as required */
690 if ((pos - (u8 *)rthdr) & 1)
691 *pos++ = 0;
692 *pos++ = rtap.oui[0];
693 *pos++ = rtap.oui[1];
694 *pos++ = rtap.oui[2];
695 *pos++ = rtap.subns;
696 put_unaligned_le16(rtap.len, pos);
697 pos += 2;
698 /* align the actual payload as requested */
699 while ((pos - (u8 *)rthdr) & (rtap.align - 1))
700 *pos++ = 0;
701 /* data (and possible padding) already follows */
702 }
703 }
704
705 static struct sk_buff *
ieee80211_make_monitor_skb(struct ieee80211_local * local,struct sk_buff ** origskb,struct ieee80211_rate * rate,int rtap_space,bool use_origskb)706 ieee80211_make_monitor_skb(struct ieee80211_local *local,
707 struct sk_buff **origskb,
708 struct ieee80211_rate *rate,
709 int rtap_space, bool use_origskb)
710 {
711 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb);
712 int rt_hdrlen, needed_headroom;
713 struct sk_buff *skb;
714
715 /* room for the radiotap header based on driver features */
716 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb);
717 needed_headroom = rt_hdrlen - rtap_space;
718
719 if (use_origskb) {
720 /* only need to expand headroom if necessary */
721 skb = *origskb;
722 *origskb = NULL;
723
724 /*
725 * This shouldn't trigger often because most devices have an
726 * RX header they pull before we get here, and that should
727 * be big enough for our radiotap information. We should
728 * probably export the length to drivers so that we can have
729 * them allocate enough headroom to start with.
730 */
731 if (skb_headroom(skb) < needed_headroom &&
732 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
733 dev_kfree_skb(skb);
734 return NULL;
735 }
736 } else {
737 /*
738 * Need to make a copy and possibly remove radiotap header
739 * and FCS from the original.
740 */
741 skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC);
742
743 if (!skb)
744 return NULL;
745 }
746
747 /* prepend radiotap information */
748 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true);
749
750 skb_reset_mac_header(skb);
751 skb->ip_summed = CHECKSUM_UNNECESSARY;
752 skb->pkt_type = PACKET_OTHERHOST;
753 skb->protocol = htons(ETH_P_802_2);
754
755 return skb;
756 }
757
758 /*
759 * This function copies a received frame to all monitor interfaces and
760 * returns a cleaned-up SKB that no longer includes the FCS nor the
761 * radiotap header the driver might have added.
762 */
763 static struct sk_buff *
ieee80211_rx_monitor(struct ieee80211_local * local,struct sk_buff * origskb,struct ieee80211_rate * rate)764 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
765 struct ieee80211_rate *rate)
766 {
767 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
768 struct ieee80211_sub_if_data *sdata;
769 struct sk_buff *monskb = NULL;
770 int present_fcs_len = 0;
771 unsigned int rtap_space = 0;
772 struct ieee80211_sub_if_data *monitor_sdata =
773 rcu_dereference(local->monitor_sdata);
774 bool only_monitor = false;
775 unsigned int min_head_len;
776
777 if (status->flag & RX_FLAG_RADIOTAP_HE)
778 rtap_space += sizeof(struct ieee80211_radiotap_he);
779
780 if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
781 rtap_space += sizeof(struct ieee80211_radiotap_he_mu);
782
783 if (status->flag & RX_FLAG_RADIOTAP_LSIG)
784 rtap_space += sizeof(struct ieee80211_radiotap_lsig);
785
786 if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) {
787 struct ieee80211_vendor_radiotap *rtap =
788 (void *)(origskb->data + rtap_space);
789
790 rtap_space += sizeof(*rtap) + rtap->len + rtap->pad;
791 }
792
793 min_head_len = rtap_space;
794
795 /*
796 * First, we may need to make a copy of the skb because
797 * (1) we need to modify it for radiotap (if not present), and
798 * (2) the other RX handlers will modify the skb we got.
799 *
800 * We don't need to, of course, if we aren't going to return
801 * the SKB because it has a bad FCS/PLCP checksum.
802 */
803
804 if (!(status->flag & RX_FLAG_NO_PSDU)) {
805 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
806 if (unlikely(origskb->len <= FCS_LEN + rtap_space)) {
807 /* driver bug */
808 WARN_ON(1);
809 dev_kfree_skb(origskb);
810 return NULL;
811 }
812 present_fcs_len = FCS_LEN;
813 }
814
815 /* also consider the hdr->frame_control */
816 min_head_len += 2;
817 }
818
819 /* ensure that the expected data elements are in skb head */
820 if (!pskb_may_pull(origskb, min_head_len)) {
821 dev_kfree_skb(origskb);
822 return NULL;
823 }
824
825 only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space);
826
827 if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
828 if (only_monitor) {
829 dev_kfree_skb(origskb);
830 return NULL;
831 }
832
833 remove_monitor_info(origskb, present_fcs_len, rtap_space);
834 return origskb;
835 }
836
837 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space);
838
839 list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) {
840 bool last_monitor = list_is_last(&sdata->u.mntr.list,
841 &local->mon_list);
842
843 if (!monskb)
844 monskb = ieee80211_make_monitor_skb(local, &origskb,
845 rate, rtap_space,
846 only_monitor &&
847 last_monitor);
848
849 if (monskb) {
850 struct sk_buff *skb;
851
852 if (last_monitor) {
853 skb = monskb;
854 monskb = NULL;
855 } else {
856 skb = skb_clone(monskb, GFP_ATOMIC);
857 }
858
859 if (skb) {
860 skb->dev = sdata->dev;
861 ieee80211_rx_stats(skb->dev, skb->len);
862 #ifndef CONFIG_DRIVERS_HDF_XR829
863 netif_receive_skb(skb);
864 #else
865 wal_netif_receive_skb(skb);
866 #endif
867 }
868 }
869
870 if (last_monitor)
871 break;
872 }
873
874 /* this happens if last_monitor was erroneously false */
875 dev_kfree_skb(monskb);
876
877 /* ditto */
878 if (!origskb)
879 return NULL;
880
881 remove_monitor_info(origskb, present_fcs_len, rtap_space);
882 return origskb;
883 }
884
ieee80211_parse_qos(struct ieee80211_rx_data * rx)885 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
886 {
887 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
888 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
889 int tid, seqno_idx, security_idx;
890
891 /* does the frame have a qos control field? */
892 if (ieee80211_is_data_qos(hdr->frame_control)) {
893 u8 *qc = ieee80211_get_qos_ctl(hdr);
894 /* frame has qos control */
895 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
896 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
897 status->rx_flags |= IEEE80211_RX_AMSDU;
898
899 seqno_idx = tid;
900 security_idx = tid;
901 } else {
902 /*
903 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
904 *
905 * Sequence numbers for management frames, QoS data
906 * frames with a broadcast/multicast address in the
907 * Address 1 field, and all non-QoS data frames sent
908 * by QoS STAs are assigned using an additional single
909 * modulo-4096 counter, [...]
910 *
911 * We also use that counter for non-QoS STAs.
912 */
913 seqno_idx = IEEE80211_NUM_TIDS;
914 security_idx = 0;
915 if (ieee80211_is_mgmt(hdr->frame_control))
916 security_idx = IEEE80211_NUM_TIDS;
917 tid = 0;
918 }
919
920 rx->seqno_idx = seqno_idx;
921 rx->security_idx = security_idx;
922 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
923 * For now, set skb->priority to 0 for other cases. */
924 rx->skb->priority = (tid > 7) ? 0 : tid;
925 }
926
927 /**
928 * DOC: Packet alignment
929 *
930 * Drivers always need to pass packets that are aligned to two-byte boundaries
931 * to the stack.
932 *
933 * Additionally, should, if possible, align the payload data in a way that
934 * guarantees that the contained IP header is aligned to a four-byte
935 * boundary. In the case of regular frames, this simply means aligning the
936 * payload to a four-byte boundary (because either the IP header is directly
937 * contained, or IV/RFC1042 headers that have a length divisible by four are
938 * in front of it). If the payload data is not properly aligned and the
939 * architecture doesn't support efficient unaligned operations, mac80211
940 * will align the data.
941 *
942 * With A-MSDU frames, however, the payload data address must yield two modulo
943 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
944 * push the IP header further back to a multiple of four again. Thankfully, the
945 * specs were sane enough this time around to require padding each A-MSDU
946 * subframe to a length that is a multiple of four.
947 *
948 * Padding like Atheros hardware adds which is between the 802.11 header and
949 * the payload is not supported, the driver is required to move the 802.11
950 * header to be directly in front of the payload in that case.
951 */
ieee80211_verify_alignment(struct ieee80211_rx_data * rx)952 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
953 {
954 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
955 WARN_ON_ONCE((unsigned long)rx->skb->data & 1);
956 #endif
957 }
958
959
960 /* rx handlers */
961
ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff * skb)962 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
963 {
964 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
965
966 if (is_multicast_ether_addr(hdr->addr1))
967 return 0;
968
969 return ieee80211_is_robust_mgmt_frame(skb);
970 }
971
972
ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff * skb)973 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
974 {
975 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
976
977 if (!is_multicast_ether_addr(hdr->addr1))
978 return 0;
979
980 return ieee80211_is_robust_mgmt_frame(skb);
981 }
982
983
984 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
ieee80211_get_mmie_keyidx(struct sk_buff * skb)985 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
986 {
987 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
988 struct ieee80211_mmie *mmie;
989 struct ieee80211_mmie_16 *mmie16;
990
991 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
992 return -1;
993
994 if (!ieee80211_is_robust_mgmt_frame(skb))
995 return -1; /* not a robust management frame */
996
997 mmie = (struct ieee80211_mmie *)
998 (skb->data + skb->len - sizeof(*mmie));
999 if (mmie->element_id == WLAN_EID_MMIE &&
1000 mmie->length == sizeof(*mmie) - 2)
1001 return le16_to_cpu(mmie->key_id);
1002
1003 mmie16 = (struct ieee80211_mmie_16 *)
1004 (skb->data + skb->len - sizeof(*mmie16));
1005 if (skb->len >= 24 + sizeof(*mmie16) &&
1006 mmie16->element_id == WLAN_EID_MMIE &&
1007 mmie16->length == sizeof(*mmie16) - 2)
1008 return le16_to_cpu(mmie16->key_id);
1009
1010 return -1;
1011 }
1012
ieee80211_get_keyid(struct sk_buff * skb,const struct ieee80211_cipher_scheme * cs)1013 static int ieee80211_get_keyid(struct sk_buff *skb,
1014 const struct ieee80211_cipher_scheme *cs)
1015 {
1016 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1017 __le16 fc;
1018 int hdrlen;
1019 int minlen;
1020 u8 key_idx_off;
1021 u8 key_idx_shift;
1022 u8 keyid;
1023
1024 fc = hdr->frame_control;
1025 hdrlen = ieee80211_hdrlen(fc);
1026
1027 if (cs) {
1028 minlen = hdrlen + cs->hdr_len;
1029 key_idx_off = hdrlen + cs->key_idx_off;
1030 key_idx_shift = cs->key_idx_shift;
1031 } else {
1032 /* WEP, TKIP, CCMP and GCMP */
1033 minlen = hdrlen + IEEE80211_WEP_IV_LEN;
1034 key_idx_off = hdrlen + 3;
1035 key_idx_shift = 6;
1036 }
1037
1038 if (unlikely(skb->len < minlen))
1039 return -EINVAL;
1040
1041 skb_copy_bits(skb, key_idx_off, &keyid, 1);
1042
1043 if (cs)
1044 keyid &= cs->key_idx_mask;
1045 keyid >>= key_idx_shift;
1046
1047 /* cs could use more than the usual two bits for the keyid */
1048 if (unlikely(keyid >= NUM_DEFAULT_KEYS))
1049 return -EINVAL;
1050
1051 return keyid;
1052 }
1053
ieee80211_rx_mesh_check(struct ieee80211_rx_data * rx)1054 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
1055 {
1056 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1057 char *dev_addr = rx->sdata->vif.addr;
1058
1059 if (ieee80211_is_data(hdr->frame_control)) {
1060 if (is_multicast_ether_addr(hdr->addr1)) {
1061 if (ieee80211_has_tods(hdr->frame_control) ||
1062 !ieee80211_has_fromds(hdr->frame_control))
1063 return RX_DROP_MONITOR;
1064 if (ether_addr_equal(hdr->addr3, dev_addr))
1065 return RX_DROP_MONITOR;
1066 } else {
1067 if (!ieee80211_has_a4(hdr->frame_control))
1068 return RX_DROP_MONITOR;
1069 if (ether_addr_equal(hdr->addr4, dev_addr))
1070 return RX_DROP_MONITOR;
1071 }
1072 }
1073
1074 /* If there is not an established peer link and this is not a peer link
1075 * establisment frame, beacon or probe, drop the frame.
1076 */
1077
1078 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
1079 struct ieee80211_mgmt *mgmt;
1080
1081 if (!ieee80211_is_mgmt(hdr->frame_control))
1082 return RX_DROP_MONITOR;
1083
1084 if (ieee80211_is_action(hdr->frame_control)) {
1085 u8 category;
1086
1087 /* make sure category field is present */
1088 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
1089 return RX_DROP_MONITOR;
1090
1091 mgmt = (struct ieee80211_mgmt *)hdr;
1092 category = mgmt->u.action.category;
1093 if (category != WLAN_CATEGORY_MESH_ACTION &&
1094 category != WLAN_CATEGORY_SELF_PROTECTED)
1095 return RX_DROP_MONITOR;
1096 return RX_CONTINUE;
1097 }
1098
1099 if (ieee80211_is_probe_req(hdr->frame_control) ||
1100 ieee80211_is_probe_resp(hdr->frame_control) ||
1101 ieee80211_is_beacon(hdr->frame_control) ||
1102 ieee80211_is_auth(hdr->frame_control))
1103 return RX_CONTINUE;
1104
1105 return RX_DROP_MONITOR;
1106 }
1107
1108 return RX_CONTINUE;
1109 }
1110
ieee80211_rx_reorder_ready(struct tid_ampdu_rx * tid_agg_rx,int index)1111 static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
1112 int index)
1113 {
1114 struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index];
1115 struct sk_buff *tail = skb_peek_tail(frames);
1116 struct ieee80211_rx_status *status;
1117
1118 if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
1119 return true;
1120
1121 if (!tail)
1122 return false;
1123
1124 status = IEEE80211_SKB_RXCB(tail);
1125 if (status->flag & RX_FLAG_AMSDU_MORE)
1126 return false;
1127
1128 return true;
1129 }
1130
ieee80211_release_reorder_frame(struct ieee80211_sub_if_data * sdata,struct tid_ampdu_rx * tid_agg_rx,int index,struct sk_buff_head * frames)1131 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
1132 struct tid_ampdu_rx *tid_agg_rx,
1133 int index,
1134 struct sk_buff_head *frames)
1135 {
1136 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index];
1137 struct sk_buff *skb;
1138 struct ieee80211_rx_status *status;
1139
1140 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1141
1142 if (skb_queue_empty(skb_list))
1143 goto no_frame;
1144
1145 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1146 __skb_queue_purge(skb_list);
1147 goto no_frame;
1148 }
1149
1150 /* release frames from the reorder ring buffer */
1151 tid_agg_rx->stored_mpdu_num--;
1152 while ((skb = __skb_dequeue(skb_list))) {
1153 status = IEEE80211_SKB_RXCB(skb);
1154 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
1155 __skb_queue_tail(frames, skb);
1156 }
1157
1158 no_frame:
1159 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
1160 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1161 }
1162
ieee80211_release_reorder_frames(struct ieee80211_sub_if_data * sdata,struct tid_ampdu_rx * tid_agg_rx,u16 head_seq_num,struct sk_buff_head * frames)1163 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
1164 struct tid_ampdu_rx *tid_agg_rx,
1165 u16 head_seq_num,
1166 struct sk_buff_head *frames)
1167 {
1168 int index;
1169
1170 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1171
1172 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) {
1173 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1174 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
1175 frames);
1176 }
1177 }
1178
1179 /*
1180 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
1181 * the skb was added to the buffer longer than this time ago, the earlier
1182 * frames that have not yet been received are assumed to be lost and the skb
1183 * can be released for processing. This may also release other skb's from the
1184 * reorder buffer if there are no additional gaps between the frames.
1185 *
1186 * Callers must hold tid_agg_rx->reorder_lock.
1187 */
1188 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
1189
ieee80211_sta_reorder_release(struct ieee80211_sub_if_data * sdata,struct tid_ampdu_rx * tid_agg_rx,struct sk_buff_head * frames)1190 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
1191 struct tid_ampdu_rx *tid_agg_rx,
1192 struct sk_buff_head *frames)
1193 {
1194 int index, i, j;
1195
1196 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1197
1198 /* release the buffer until next missing frame */
1199 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1200 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) &&
1201 tid_agg_rx->stored_mpdu_num) {
1202 /*
1203 * No buffers ready to be released, but check whether any
1204 * frames in the reorder buffer have timed out.
1205 */
1206 int skipped = 1;
1207 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
1208 j = (j + 1) % tid_agg_rx->buf_size) {
1209 if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) {
1210 skipped++;
1211 continue;
1212 }
1213 if (skipped &&
1214 !time_after(jiffies, tid_agg_rx->reorder_time[j] +
1215 HT_RX_REORDER_BUF_TIMEOUT))
1216 goto set_release_timer;
1217
1218 /* don't leave incomplete A-MSDUs around */
1219 for (i = (index + 1) % tid_agg_rx->buf_size; i != j;
1220 i = (i + 1) % tid_agg_rx->buf_size)
1221 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]);
1222
1223 ht_dbg_ratelimited(sdata,
1224 "release an RX reorder frame due to timeout on earlier frames\n");
1225 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
1226 frames);
1227
1228 /*
1229 * Increment the head seq# also for the skipped slots.
1230 */
1231 tid_agg_rx->head_seq_num =
1232 (tid_agg_rx->head_seq_num +
1233 skipped) & IEEE80211_SN_MASK;
1234 skipped = 0;
1235 }
1236 } else {
1237 while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1238 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
1239 frames);
1240 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1241 }
1242 }
1243
1244 if (tid_agg_rx->stored_mpdu_num) {
1245 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1246
1247 for (; j != (index - 1) % tid_agg_rx->buf_size;
1248 j = (j + 1) % tid_agg_rx->buf_size) {
1249 if (ieee80211_rx_reorder_ready(tid_agg_rx, j))
1250 break;
1251 }
1252
1253 set_release_timer:
1254
1255 if (!tid_agg_rx->removed)
1256 mod_timer(&tid_agg_rx->reorder_timer,
1257 tid_agg_rx->reorder_time[j] + 1 +
1258 HT_RX_REORDER_BUF_TIMEOUT);
1259 } else {
1260 del_timer(&tid_agg_rx->reorder_timer);
1261 }
1262 }
1263
1264 /*
1265 * As this function belongs to the RX path it must be under
1266 * rcu_read_lock protection. It returns false if the frame
1267 * can be processed immediately, true if it was consumed.
1268 */
ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data * sdata,struct tid_ampdu_rx * tid_agg_rx,struct sk_buff * skb,struct sk_buff_head * frames)1269 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
1270 struct tid_ampdu_rx *tid_agg_rx,
1271 struct sk_buff *skb,
1272 struct sk_buff_head *frames)
1273 {
1274 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1275 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1276 u16 sc = le16_to_cpu(hdr->seq_ctrl);
1277 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
1278 u16 head_seq_num, buf_size;
1279 int index;
1280 bool ret = true;
1281
1282 spin_lock(&tid_agg_rx->reorder_lock);
1283
1284 /*
1285 * Offloaded BA sessions have no known starting sequence number so pick
1286 * one from first Rxed frame for this tid after BA was started.
1287 */
1288 if (unlikely(tid_agg_rx->auto_seq)) {
1289 tid_agg_rx->auto_seq = false;
1290 tid_agg_rx->ssn = mpdu_seq_num;
1291 tid_agg_rx->head_seq_num = mpdu_seq_num;
1292 }
1293
1294 buf_size = tid_agg_rx->buf_size;
1295 head_seq_num = tid_agg_rx->head_seq_num;
1296
1297 /*
1298 * If the current MPDU's SN is smaller than the SSN, it shouldn't
1299 * be reordered.
1300 */
1301 if (unlikely(!tid_agg_rx->started)) {
1302 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1303 ret = false;
1304 goto out;
1305 }
1306 tid_agg_rx->started = true;
1307 }
1308
1309 /* frame with out of date sequence number */
1310 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1311 dev_kfree_skb(skb);
1312 goto out;
1313 }
1314
1315 /*
1316 * If frame the sequence number exceeds our buffering window
1317 * size release some previous frames to make room for this one.
1318 */
1319 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) {
1320 head_seq_num = ieee80211_sn_inc(
1321 ieee80211_sn_sub(mpdu_seq_num, buf_size));
1322 /* release stored frames up to new head to stack */
1323 ieee80211_release_reorder_frames(sdata, tid_agg_rx,
1324 head_seq_num, frames);
1325 }
1326
1327 /* Now the new frame is always in the range of the reordering buffer */
1328
1329 index = mpdu_seq_num % tid_agg_rx->buf_size;
1330
1331 /* check if we already stored this frame */
1332 if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1333 dev_kfree_skb(skb);
1334 goto out;
1335 }
1336
1337 /*
1338 * If the current MPDU is in the right order and nothing else
1339 * is stored we can process it directly, no need to buffer it.
1340 * If it is first but there's something stored, we may be able
1341 * to release frames after this one.
1342 */
1343 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
1344 tid_agg_rx->stored_mpdu_num == 0) {
1345 if (!(status->flag & RX_FLAG_AMSDU_MORE))
1346 tid_agg_rx->head_seq_num =
1347 ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1348 ret = false;
1349 goto out;
1350 }
1351
1352 /* put the frame in the reordering buffer */
1353 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb);
1354 if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1355 tid_agg_rx->reorder_time[index] = jiffies;
1356 tid_agg_rx->stored_mpdu_num++;
1357 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
1358 }
1359
1360 out:
1361 spin_unlock(&tid_agg_rx->reorder_lock);
1362 return ret;
1363 }
1364
1365 /*
1366 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
1367 * true if the MPDU was buffered, false if it should be processed.
1368 */
ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data * rx,struct sk_buff_head * frames)1369 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
1370 struct sk_buff_head *frames)
1371 {
1372 struct sk_buff *skb = rx->skb;
1373 struct ieee80211_local *local = rx->local;
1374 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1375 struct sta_info *sta = rx->sta;
1376 struct tid_ampdu_rx *tid_agg_rx;
1377 u16 sc;
1378 u8 tid, ack_policy;
1379
1380 if (!ieee80211_is_data_qos(hdr->frame_control) ||
1381 is_multicast_ether_addr(hdr->addr1))
1382 goto dont_reorder;
1383
1384 /*
1385 * filter the QoS data rx stream according to
1386 * STA/TID and check if this STA/TID is on aggregation
1387 */
1388
1389 if (!sta)
1390 goto dont_reorder;
1391
1392 ack_policy = *ieee80211_get_qos_ctl(hdr) &
1393 IEEE80211_QOS_CTL_ACK_POLICY_MASK;
1394 tid = ieee80211_get_tid(hdr);
1395
1396 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
1397 if (!tid_agg_rx) {
1398 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1399 !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
1400 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
1401 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
1402 WLAN_BACK_RECIPIENT,
1403 WLAN_REASON_QSTA_REQUIRE_SETUP);
1404 goto dont_reorder;
1405 }
1406
1407 /* qos null data frames are excluded */
1408 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
1409 goto dont_reorder;
1410
1411 /* not part of a BA session */
1412 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1413 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
1414 goto dont_reorder;
1415
1416 /* new, potentially un-ordered, ampdu frame - process it */
1417
1418 /* reset session timer */
1419 if (tid_agg_rx->timeout)
1420 tid_agg_rx->last_rx = jiffies;
1421
1422 /* if this mpdu is fragmented - terminate rx aggregation session */
1423 sc = le16_to_cpu(hdr->seq_ctrl);
1424 if (sc & IEEE80211_SCTL_FRAG) {
1425 skb_queue_tail(&rx->sdata->skb_queue, skb);
1426 mac80211_queue_work(&local->hw, &rx->sdata->work);
1427 return;
1428 }
1429
1430 /*
1431 * No locking needed -- we will only ever process one
1432 * RX packet at a time, and thus own tid_agg_rx. All
1433 * other code manipulating it needs to (and does) make
1434 * sure that we cannot get to it any more before doing
1435 * anything with it.
1436 */
1437 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
1438 frames))
1439 return;
1440
1441 dont_reorder:
1442 __skb_queue_tail(frames, skb);
1443 }
1444
1445 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_check_dup(struct ieee80211_rx_data * rx)1446 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
1447 {
1448 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1449 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1450
1451 if (status->flag & RX_FLAG_DUP_VALIDATED)
1452 return RX_CONTINUE;
1453
1454 /*
1455 * Drop duplicate 802.11 retransmissions
1456 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
1457 */
1458
1459 if (rx->skb->len < 24)
1460 return RX_CONTINUE;
1461
1462 if (ieee80211_is_ctl(hdr->frame_control) ||
1463 ieee80211_is_any_nullfunc(hdr->frame_control) ||
1464 is_multicast_ether_addr(hdr->addr1))
1465 return RX_CONTINUE;
1466
1467 if (!rx->sta)
1468 return RX_CONTINUE;
1469
1470 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
1471 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
1472 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
1473 rx->sta->rx_stats.num_duplicates++;
1474 return RX_DROP_UNUSABLE;
1475 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1476 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
1477 }
1478
1479 return RX_CONTINUE;
1480 }
1481
1482 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_check(struct ieee80211_rx_data * rx)1483 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
1484 {
1485 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1486
1487 /* Drop disallowed frame classes based on STA auth/assoc state;
1488 * IEEE 802.11, Chap 5.5.
1489 *
1490 * mac80211 filters only based on association state, i.e. it drops
1491 * Class 3 frames from not associated stations. hostapd sends
1492 * deauth/disassoc frames when needed. In addition, hostapd is
1493 * responsible for filtering on both auth and assoc states.
1494 */
1495
1496 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1497 return ieee80211_rx_mesh_check(rx);
1498
1499 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
1500 ieee80211_is_pspoll(hdr->frame_control)) &&
1501 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1502 rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
1503 rx->sdata->vif.type != NL80211_IFTYPE_OCB &&
1504 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
1505 /*
1506 * accept port control frames from the AP even when it's not
1507 * yet marked ASSOC to prevent a race where we don't set the
1508 * assoc bit quickly enough before it sends the first frame
1509 */
1510 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1511 ieee80211_is_data_present(hdr->frame_control)) {
1512 unsigned int hdrlen;
1513 __be16 ethertype;
1514
1515 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1516
1517 if (rx->skb->len < hdrlen + 8)
1518 return RX_DROP_MONITOR;
1519
1520 skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2);
1521 if (ethertype == rx->sdata->control_port_protocol)
1522 return RX_CONTINUE;
1523 }
1524
1525 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
1526 cfg80211_rx_spurious_frame(rx->sdata->dev,
1527 hdr->addr2,
1528 GFP_ATOMIC))
1529 return RX_DROP_UNUSABLE;
1530
1531 return RX_DROP_MONITOR;
1532 }
1533
1534 return RX_CONTINUE;
1535 }
1536
1537
1538 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_check_more_data(struct ieee80211_rx_data * rx)1539 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1540 {
1541 struct ieee80211_local *local;
1542 struct ieee80211_hdr *hdr;
1543 struct sk_buff *skb;
1544
1545 local = rx->local;
1546 skb = rx->skb;
1547 hdr = (struct ieee80211_hdr *) skb->data;
1548
1549 if (!local->pspolling)
1550 return RX_CONTINUE;
1551
1552 if (!ieee80211_has_fromds(hdr->frame_control))
1553 /* this is not from AP */
1554 return RX_CONTINUE;
1555
1556 if (!ieee80211_is_data(hdr->frame_control))
1557 return RX_CONTINUE;
1558
1559 if (!ieee80211_has_moredata(hdr->frame_control)) {
1560 /* AP has no more frames buffered for us */
1561 local->pspolling = false;
1562 return RX_CONTINUE;
1563 }
1564
1565 /* more data bit is set, let's request a new frame from the AP */
1566 ieee80211_send_pspoll(local, rx->sdata);
1567
1568 return RX_CONTINUE;
1569 }
1570
sta_ps_start(struct sta_info * sta)1571 static void sta_ps_start(struct sta_info *sta)
1572 {
1573 struct ieee80211_sub_if_data *sdata = sta->sdata;
1574 struct ieee80211_local *local = sdata->local;
1575 struct ps_data *ps;
1576 int tid;
1577
1578 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
1579 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1580 ps = &sdata->bss->ps;
1581 else
1582 return;
1583
1584 atomic_inc(&ps->num_sta_ps);
1585 set_sta_flag(sta, WLAN_STA_PS_STA);
1586 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
1587 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1588 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
1589 sta->sta.addr, sta->sta.aid);
1590
1591 ieee80211_clear_fast_xmit(sta);
1592
1593 if (!sta->sta.txq[0])
1594 return;
1595
1596 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
1597 struct ieee80211_txq *txq = sta->sta.txq[tid];
1598 struct txq_info *txqi = to_txq_info(txq);
1599
1600 spin_lock(&local->active_txq_lock[txq->ac]);
1601 if (!list_empty(&txqi->schedule_order))
1602 list_del_init(&txqi->schedule_order);
1603 spin_unlock(&local->active_txq_lock[txq->ac]);
1604
1605 if (txq_has_queue(txq))
1606 set_bit(tid, &sta->txq_buffered_tids);
1607 else
1608 clear_bit(tid, &sta->txq_buffered_tids);
1609 }
1610 }
1611
sta_ps_end(struct sta_info * sta)1612 static void sta_ps_end(struct sta_info *sta)
1613 {
1614 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
1615 sta->sta.addr, sta->sta.aid);
1616
1617 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1618 /*
1619 * Clear the flag only if the other one is still set
1620 * so that the TX path won't start TX'ing new frames
1621 * directly ... In the case that the driver flag isn't
1622 * set ieee80211_sta_ps_deliver_wakeup() will clear it.
1623 */
1624 clear_sta_flag(sta, WLAN_STA_PS_STA);
1625 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
1626 sta->sta.addr, sta->sta.aid);
1627 return;
1628 }
1629
1630 set_sta_flag(sta, WLAN_STA_PS_DELIVER);
1631 clear_sta_flag(sta, WLAN_STA_PS_STA);
1632 ieee80211_sta_ps_deliver_wakeup(sta);
1633 }
1634
mac80211_sta_ps_transition(struct ieee80211_sta * pubsta,bool start)1635 int mac80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
1636 {
1637 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1638 bool in_ps;
1639
1640 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
1641
1642 /* Don't let the same PS state be set twice */
1643 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
1644 if ((start && in_ps) || (!start && !in_ps))
1645 return -EINVAL;
1646
1647 if (start)
1648 sta_ps_start(sta);
1649 else
1650 sta_ps_end(sta);
1651
1652 return 0;
1653 }
1654
mac80211_sta_pspoll(struct ieee80211_sta * pubsta)1655 void mac80211_sta_pspoll(struct ieee80211_sta *pubsta)
1656 {
1657 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1658
1659 if (test_sta_flag(sta, WLAN_STA_SP))
1660 return;
1661
1662 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1663 ieee80211_sta_ps_deliver_poll_response(sta);
1664 else
1665 set_sta_flag(sta, WLAN_STA_PSPOLL);
1666 }
1667
mac80211_sta_uapsd_trigger(struct ieee80211_sta * pubsta,u8 tid)1668 void mac80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid)
1669 {
1670 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1671 int ac = ieee80211_ac_from_tid(tid);
1672
1673 /*
1674 * If this AC is not trigger-enabled do nothing unless the
1675 * driver is calling us after it already checked.
1676 *
1677 * NB: This could/should check a separate bitmap of trigger-
1678 * enabled queues, but for now we only implement uAPSD w/o
1679 * TSPEC changes to the ACs, so they're always the same.
1680 */
1681 if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) &&
1682 tid != IEEE80211_NUM_TIDS)
1683 return;
1684
1685 /* if we are in a service period, do nothing */
1686 if (test_sta_flag(sta, WLAN_STA_SP))
1687 return;
1688
1689 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1690 ieee80211_sta_ps_deliver_uapsd(sta);
1691 else
1692 set_sta_flag(sta, WLAN_STA_UAPSD);
1693 }
1694
1695 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data * rx)1696 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1697 {
1698 struct ieee80211_sub_if_data *sdata = rx->sdata;
1699 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1700 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1701
1702 if (!rx->sta)
1703 return RX_CONTINUE;
1704
1705 if (sdata->vif.type != NL80211_IFTYPE_AP &&
1706 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1707 return RX_CONTINUE;
1708
1709 /*
1710 * The device handles station powersave, so don't do anything about
1711 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1712 * it to mac80211 since they're handled.)
1713 */
1714 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS))
1715 return RX_CONTINUE;
1716
1717 /*
1718 * Don't do anything if the station isn't already asleep. In
1719 * the uAPSD case, the station will probably be marked asleep,
1720 * in the PS-Poll case the station must be confused ...
1721 */
1722 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1723 return RX_CONTINUE;
1724
1725 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1726 mac80211_sta_pspoll(&rx->sta->sta);
1727
1728 /* Free PS Poll skb here instead of returning RX_DROP that would
1729 * count as an dropped frame. */
1730 dev_kfree_skb(rx->skb);
1731
1732 return RX_QUEUED;
1733 } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1734 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1735 ieee80211_has_pm(hdr->frame_control) &&
1736 (ieee80211_is_data_qos(hdr->frame_control) ||
1737 ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1738 u8 tid = ieee80211_get_tid(hdr);
1739
1740 mac80211_sta_uapsd_trigger(&rx->sta->sta, tid);
1741 }
1742
1743 return RX_CONTINUE;
1744 }
1745
1746 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_sta_process(struct ieee80211_rx_data * rx)1747 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1748 {
1749 struct sta_info *sta = rx->sta;
1750 struct sk_buff *skb = rx->skb;
1751 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1752 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1753 int i;
1754
1755 if (!sta)
1756 return RX_CONTINUE;
1757
1758 /*
1759 * Update last_rx only for IBSS packets which are for the current
1760 * BSSID and for station already AUTHORIZED to avoid keeping the
1761 * current IBSS network alive in cases where other STAs start
1762 * using different BSSID. This will also give the station another
1763 * chance to restart the authentication/authorization in case
1764 * something went wrong the first time.
1765 */
1766 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1767 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1768 NL80211_IFTYPE_ADHOC);
1769 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
1770 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
1771 sta->rx_stats.last_rx = jiffies;
1772 if (ieee80211_is_data(hdr->frame_control) &&
1773 !is_multicast_ether_addr(hdr->addr1))
1774 sta->rx_stats.last_rate =
1775 sta_stats_encode_rate(status);
1776 }
1777 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
1778 sta->rx_stats.last_rx = jiffies;
1779 } else if (!is_multicast_ether_addr(hdr->addr1)) {
1780 /*
1781 * Mesh beacons will update last_rx when if they are found to
1782 * match the current local configuration when processed.
1783 */
1784 sta->rx_stats.last_rx = jiffies;
1785 if (ieee80211_is_data(hdr->frame_control))
1786 sta->rx_stats.last_rate = sta_stats_encode_rate(status);
1787 }
1788
1789 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1790 ieee80211_sta_rx_notify(rx->sdata, hdr);
1791
1792 sta->rx_stats.fragments++;
1793
1794 u64_stats_update_begin(&rx->sta->rx_stats.syncp);
1795 sta->rx_stats.bytes += rx->skb->len;
1796 u64_stats_update_end(&rx->sta->rx_stats.syncp);
1797
1798 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
1799 sta->rx_stats.last_signal = status->signal;
1800 ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal);
1801 }
1802
1803 if (status->chains) {
1804 sta->rx_stats.chains = status->chains;
1805 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
1806 int signal = status->chain_signal[i];
1807
1808 if (!(status->chains & BIT(i)))
1809 continue;
1810
1811 sta->rx_stats.chain_signal_last[i] = signal;
1812 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
1813 -signal);
1814 }
1815 }
1816
1817 /*
1818 * Change STA power saving mode only at the end of a frame
1819 * exchange sequence, and only for a data or management
1820 * frame as specified in IEEE 802.11-2016 11.2.3.2
1821 */
1822 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
1823 !ieee80211_has_morefrags(hdr->frame_control) &&
1824 !is_multicast_ether_addr(hdr->addr1) &&
1825 (ieee80211_is_mgmt(hdr->frame_control) ||
1826 ieee80211_is_data(hdr->frame_control)) &&
1827 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1828 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1829 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1830 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1831 if (!ieee80211_has_pm(hdr->frame_control))
1832 sta_ps_end(sta);
1833 } else {
1834 if (ieee80211_has_pm(hdr->frame_control))
1835 sta_ps_start(sta);
1836 }
1837 }
1838
1839 /* mesh power save support */
1840 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1841 ieee80211_mps_rx_h_sta_process(sta, hdr);
1842
1843 /*
1844 * Drop (qos-)data::nullfunc frames silently, since they
1845 * are used only to control station power saving mode.
1846 */
1847 if (ieee80211_is_any_nullfunc(hdr->frame_control)) {
1848 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1849
1850 /*
1851 * If we receive a 4-addr nullfunc frame from a STA
1852 * that was not moved to a 4-addr STA vlan yet send
1853 * the event to userspace and for older hostapd drop
1854 * the frame to the monitor interface.
1855 */
1856 if (ieee80211_has_a4(hdr->frame_control) &&
1857 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1858 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1859 !rx->sdata->u.vlan.sta))) {
1860 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
1861 cfg80211_rx_unexpected_4addr_frame(
1862 rx->sdata->dev, sta->sta.addr,
1863 GFP_ATOMIC);
1864 return RX_DROP_MONITOR;
1865 }
1866 /*
1867 * Update counter and free packet here to avoid
1868 * counting this as a dropped packed.
1869 */
1870 sta->rx_stats.packets++;
1871 dev_kfree_skb(rx->skb);
1872 return RX_QUEUED;
1873 }
1874
1875 return RX_CONTINUE;
1876 } /* ieee80211_rx_h_sta_process */
1877
1878 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_decrypt(struct ieee80211_rx_data * rx)1879 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
1880 {
1881 struct sk_buff *skb = rx->skb;
1882 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1883 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1884 int keyidx;
1885 ieee80211_rx_result result = RX_DROP_UNUSABLE;
1886 struct ieee80211_key *sta_ptk = NULL;
1887 struct ieee80211_key *ptk_idx = NULL;
1888 int mmie_keyidx = -1;
1889 __le16 fc;
1890 const struct ieee80211_cipher_scheme *cs = NULL;
1891
1892 /*
1893 * Key selection 101
1894 *
1895 * There are four types of keys:
1896 * - GTK (group keys)
1897 * - IGTK (group keys for management frames)
1898 * - PTK (pairwise keys)
1899 * - STK (station-to-station pairwise keys)
1900 *
1901 * When selecting a key, we have to distinguish between multicast
1902 * (including broadcast) and unicast frames, the latter can only
1903 * use PTKs and STKs while the former always use GTKs and IGTKs.
1904 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
1905 * unicast frames can also use key indices like GTKs. Hence, if we
1906 * don't have a PTK/STK we check the key index for a WEP key.
1907 *
1908 * Note that in a regular BSS, multicast frames are sent by the
1909 * AP only, associated stations unicast the frame to the AP first
1910 * which then multicasts it on their behalf.
1911 *
1912 * There is also a slight problem in IBSS mode: GTKs are negotiated
1913 * with each station, that is something we don't currently handle.
1914 * The spec seems to expect that one negotiates the same key with
1915 * every station but there's no such requirement; VLANs could be
1916 * possible.
1917 */
1918
1919 /* start without a key */
1920 rx->key = NULL;
1921 fc = hdr->frame_control;
1922
1923 if (rx->sta) {
1924 int keyid = rx->sta->ptk_idx;
1925 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
1926
1927 if (ieee80211_has_protected(fc)) {
1928 cs = rx->sta->cipher_scheme;
1929 keyid = ieee80211_get_keyid(rx->skb, cs);
1930
1931 if (unlikely(keyid < 0))
1932 return RX_DROP_UNUSABLE;
1933
1934 ptk_idx = rcu_dereference(rx->sta->ptk[keyid]);
1935 }
1936 }
1937
1938 if (!ieee80211_has_protected(fc))
1939 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
1940
1941 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
1942 rx->key = ptk_idx ? ptk_idx : sta_ptk;
1943 if ((status->flag & RX_FLAG_DECRYPTED) &&
1944 (status->flag & RX_FLAG_IV_STRIPPED))
1945 return RX_CONTINUE;
1946 /* Skip decryption if the frame is not protected. */
1947 if (!ieee80211_has_protected(fc))
1948 return RX_CONTINUE;
1949 } else if (mmie_keyidx >= 0) {
1950 /* Broadcast/multicast robust management frame / BIP */
1951 if ((status->flag & RX_FLAG_DECRYPTED) &&
1952 (status->flag & RX_FLAG_IV_STRIPPED))
1953 return RX_CONTINUE;
1954
1955 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
1956 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
1957 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
1958 if (rx->sta) {
1959 if (ieee80211_is_group_privacy_action(skb) &&
1960 test_sta_flag(rx->sta, WLAN_STA_MFP))
1961 return RX_DROP_MONITOR;
1962
1963 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
1964 }
1965 if (!rx->key)
1966 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
1967 } else if (!ieee80211_has_protected(fc)) {
1968 /*
1969 * The frame was not protected, so skip decryption. However, we
1970 * need to set rx->key if there is a key that could have been
1971 * used so that the frame may be dropped if encryption would
1972 * have been expected.
1973 */
1974 struct ieee80211_key *key = NULL;
1975 struct ieee80211_sub_if_data *sdata = rx->sdata;
1976 int i;
1977
1978 if (ieee80211_is_mgmt(fc) &&
1979 is_multicast_ether_addr(hdr->addr1) &&
1980 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
1981 rx->key = key;
1982 else {
1983 if (rx->sta) {
1984 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
1985 key = rcu_dereference(rx->sta->gtk[i]);
1986 if (key)
1987 break;
1988 }
1989 }
1990 if (!key) {
1991 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
1992 key = rcu_dereference(sdata->keys[i]);
1993 if (key)
1994 break;
1995 }
1996 }
1997 if (key)
1998 rx->key = key;
1999 }
2000 return RX_CONTINUE;
2001 } else {
2002 /*
2003 * The device doesn't give us the IV so we won't be
2004 * able to look up the key. That's ok though, we
2005 * don't need to decrypt the frame, we just won't
2006 * be able to keep statistics accurate.
2007 * Except for key threshold notifications, should
2008 * we somehow allow the driver to tell us which key
2009 * the hardware used if this flag is set?
2010 */
2011 if ((status->flag & RX_FLAG_DECRYPTED) &&
2012 (status->flag & RX_FLAG_IV_STRIPPED))
2013 return RX_CONTINUE;
2014
2015 keyidx = ieee80211_get_keyid(rx->skb, cs);
2016
2017 if (unlikely(keyidx < 0))
2018 return RX_DROP_UNUSABLE;
2019
2020 /* check per-station GTK first, if multicast packet */
2021 if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
2022 rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
2023
2024 /* if not found, try default key */
2025 if (!rx->key) {
2026 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
2027
2028 /*
2029 * RSNA-protected unicast frames should always be
2030 * sent with pairwise or station-to-station keys,
2031 * but for WEP we allow using a key index as well.
2032 */
2033 if (rx->key &&
2034 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
2035 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
2036 !is_multicast_ether_addr(hdr->addr1))
2037 rx->key = NULL;
2038 }
2039 }
2040
2041 if (rx->key) {
2042 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
2043 return RX_DROP_MONITOR;
2044
2045 /* TODO: add threshold stuff again */
2046 } else {
2047 return RX_DROP_MONITOR;
2048 }
2049
2050 switch (rx->key->conf.cipher) {
2051 case WLAN_CIPHER_SUITE_WEP40:
2052 case WLAN_CIPHER_SUITE_WEP104:
2053 result = ieee80211_crypto_wep_decrypt(rx);
2054 break;
2055 case WLAN_CIPHER_SUITE_TKIP:
2056 result = ieee80211_crypto_tkip_decrypt(rx);
2057 break;
2058 case WLAN_CIPHER_SUITE_CCMP:
2059 result = ieee80211_crypto_ccmp_decrypt(
2060 rx, IEEE80211_CCMP_MIC_LEN);
2061 break;
2062 case WLAN_CIPHER_SUITE_CCMP_256:
2063 result = ieee80211_crypto_ccmp_decrypt(
2064 rx, IEEE80211_CCMP_256_MIC_LEN);
2065 break;
2066 case WLAN_CIPHER_SUITE_AES_CMAC:
2067 result = ieee80211_crypto_aes_cmac_decrypt(rx);
2068 break;
2069 case WLAN_CIPHER_SUITE_BIP_CMAC_256:
2070 result = ieee80211_crypto_aes_cmac_256_decrypt(rx);
2071 break;
2072 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2073 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2074 result = ieee80211_crypto_aes_gmac_decrypt(rx);
2075 break;
2076 case WLAN_CIPHER_SUITE_GCMP:
2077 case WLAN_CIPHER_SUITE_GCMP_256:
2078 result = ieee80211_crypto_gcmp_decrypt(rx);
2079 break;
2080 default:
2081 result = ieee80211_crypto_hw_decrypt(rx);
2082 }
2083
2084 /* the hdr variable is invalid after the decrypt handlers */
2085
2086 /* either the frame has been decrypted or will be dropped */
2087 status->flag |= RX_FLAG_DECRYPTED;
2088
2089 return result;
2090 }
2091
2092 static inline struct ieee80211_fragment_entry *
ieee80211_reassemble_add(struct ieee80211_sub_if_data * sdata,unsigned int frag,unsigned int seq,int rx_queue,struct sk_buff ** skb)2093 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
2094 unsigned int frag, unsigned int seq, int rx_queue,
2095 struct sk_buff **skb)
2096 {
2097 struct ieee80211_fragment_entry *entry;
2098
2099 entry = &sdata->fragments[sdata->fragment_next++];
2100 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
2101 sdata->fragment_next = 0;
2102
2103 if (!skb_queue_empty(&entry->skb_list))
2104 __skb_queue_purge(&entry->skb_list);
2105
2106 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
2107 *skb = NULL;
2108 entry->first_frag_time = jiffies;
2109 entry->seq = seq;
2110 entry->rx_queue = rx_queue;
2111 entry->last_frag = frag;
2112 entry->check_sequential_pn = false;
2113 entry->extra_len = 0;
2114
2115 return entry;
2116 }
2117
2118 static inline struct ieee80211_fragment_entry *
ieee80211_reassemble_find(struct ieee80211_sub_if_data * sdata,unsigned int frag,unsigned int seq,int rx_queue,struct ieee80211_hdr * hdr)2119 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
2120 unsigned int frag, unsigned int seq,
2121 int rx_queue, struct ieee80211_hdr *hdr)
2122 {
2123 struct ieee80211_fragment_entry *entry;
2124 int i, idx;
2125
2126 idx = sdata->fragment_next;
2127 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
2128 struct ieee80211_hdr *f_hdr;
2129 struct sk_buff *f_skb;
2130
2131 idx--;
2132 if (idx < 0)
2133 idx = IEEE80211_FRAGMENT_MAX - 1;
2134
2135 entry = &sdata->fragments[idx];
2136 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
2137 entry->rx_queue != rx_queue ||
2138 entry->last_frag + 1 != frag)
2139 continue;
2140
2141 f_skb = __skb_peek(&entry->skb_list);
2142 f_hdr = (struct ieee80211_hdr *) f_skb->data;
2143
2144 /*
2145 * Check ftype and addresses are equal, else check next fragment
2146 */
2147 if (((hdr->frame_control ^ f_hdr->frame_control) &
2148 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
2149 !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
2150 !ether_addr_equal(hdr->addr2, f_hdr->addr2))
2151 continue;
2152
2153 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
2154 __skb_queue_purge(&entry->skb_list);
2155 continue;
2156 }
2157 return entry;
2158 }
2159
2160 return NULL;
2161 }
2162
2163 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_defragment(struct ieee80211_rx_data * rx)2164 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
2165 {
2166 struct ieee80211_hdr *hdr;
2167 u16 sc;
2168 __le16 fc;
2169 unsigned int frag, seq;
2170 struct ieee80211_fragment_entry *entry;
2171 struct sk_buff *skb;
2172
2173 hdr = (struct ieee80211_hdr *)rx->skb->data;
2174 fc = hdr->frame_control;
2175
2176 if (ieee80211_is_ctl(fc))
2177 return RX_CONTINUE;
2178
2179 sc = le16_to_cpu(hdr->seq_ctrl);
2180 frag = sc & IEEE80211_SCTL_FRAG;
2181
2182 if (is_multicast_ether_addr(hdr->addr1)) {
2183 I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
2184 goto out_no_led;
2185 }
2186
2187 if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
2188 goto out;
2189
2190 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
2191
2192 if (skb_linearize(rx->skb))
2193 return RX_DROP_UNUSABLE;
2194
2195 /*
2196 * skb_linearize() might change the skb->data and
2197 * previously cached variables (in this case, hdr) need to
2198 * be refreshed with the new data.
2199 */
2200 hdr = (struct ieee80211_hdr *)rx->skb->data;
2201 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
2202
2203 if (frag == 0) {
2204 /* This is the first fragment of a new frame. */
2205 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
2206 rx->seqno_idx, &(rx->skb));
2207 if (rx->key &&
2208 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
2209 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
2210 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
2211 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
2212 ieee80211_has_protected(fc)) {
2213 int queue = rx->security_idx;
2214
2215 /* Store CCMP/GCMP PN so that we can verify that the
2216 * next fragment has a sequential PN value.
2217 */
2218 entry->check_sequential_pn = true;
2219 memcpy(entry->last_pn,
2220 rx->key->u.ccmp.rx_pn[queue],
2221 IEEE80211_CCMP_PN_LEN);
2222 BUILD_BUG_ON(offsetof(struct ieee80211_key,
2223 u.ccmp.rx_pn) !=
2224 offsetof(struct ieee80211_key,
2225 u.gcmp.rx_pn));
2226 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
2227 sizeof(rx->key->u.gcmp.rx_pn[queue]));
2228 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
2229 IEEE80211_GCMP_PN_LEN);
2230 }
2231 return RX_QUEUED;
2232 }
2233
2234 /* This is a fragment for a frame that should already be pending in
2235 * fragment cache. Add this fragment to the end of the pending entry.
2236 */
2237 entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
2238 rx->seqno_idx, hdr);
2239 if (!entry) {
2240 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2241 return RX_DROP_MONITOR;
2242 }
2243
2244 /* "The receiver shall discard MSDUs and MMPDUs whose constituent
2245 * MPDU PN values are not incrementing in steps of 1."
2246 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
2247 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
2248 */
2249 if (entry->check_sequential_pn) {
2250 int i;
2251 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
2252 int queue;
2253
2254 if (!rx->key ||
2255 (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
2256 rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
2257 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
2258 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
2259 return RX_DROP_UNUSABLE;
2260 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
2261 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
2262 pn[i]++;
2263 if (pn[i])
2264 break;
2265 }
2266 queue = rx->security_idx;
2267 rpn = rx->key->u.ccmp.rx_pn[queue];
2268 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
2269 return RX_DROP_UNUSABLE;
2270 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
2271 }
2272
2273 skb_pull(rx->skb, ieee80211_hdrlen(fc));
2274 __skb_queue_tail(&entry->skb_list, rx->skb);
2275 entry->last_frag = frag;
2276 entry->extra_len += rx->skb->len;
2277 if (ieee80211_has_morefrags(fc)) {
2278 rx->skb = NULL;
2279 return RX_QUEUED;
2280 }
2281
2282 rx->skb = __skb_dequeue(&entry->skb_list);
2283 if (skb_tailroom(rx->skb) < entry->extra_len) {
2284 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag);
2285 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
2286 GFP_ATOMIC))) {
2287 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2288 __skb_queue_purge(&entry->skb_list);
2289 return RX_DROP_UNUSABLE;
2290 }
2291 }
2292 while ((skb = __skb_dequeue(&entry->skb_list))) {
2293 skb_put_data(rx->skb, skb->data, skb->len);
2294 dev_kfree_skb(skb);
2295 }
2296
2297 out:
2298 ieee80211_led_rx(rx->local);
2299 out_no_led:
2300 if (rx->sta)
2301 rx->sta->rx_stats.packets++;
2302 return RX_CONTINUE;
2303 }
2304
ieee80211_802_1x_port_control(struct ieee80211_rx_data * rx)2305 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
2306 {
2307 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
2308 return -EACCES;
2309
2310 return 0;
2311 }
2312
ieee80211_drop_unencrypted(struct ieee80211_rx_data * rx,__le16 fc)2313 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
2314 {
2315 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
2316 struct sk_buff *skb = rx->skb;
2317 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2318
2319 /*
2320 * Pass through unencrypted frames if the hardware has
2321 * decrypted them already.
2322 */
2323 if (status->flag & RX_FLAG_DECRYPTED)
2324 return 0;
2325
2326 /* check mesh EAPOL frames first */
2327 if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) &&
2328 ieee80211_is_data(fc))) {
2329 struct ieee80211s_hdr *mesh_hdr;
2330 u16 hdr_len = ieee80211_hdrlen(fc);
2331 u16 ethertype_offset;
2332 __be16 ethertype;
2333
2334 if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr))
2335 goto drop_check;
2336
2337 /* make sure fixed part of mesh header is there, also checks skb len */
2338 if (!pskb_may_pull(rx->skb, hdr_len + 6))
2339 goto drop_check;
2340
2341 mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len);
2342 ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) +
2343 sizeof(rfc1042_header);
2344
2345 if (skb_copy_bits(rx->skb, ethertype_offset, ðertype, 2) == 0 &&
2346 ethertype == rx->sdata->control_port_protocol)
2347 return 0;
2348 }
2349
2350 drop_check:
2351 /* Drop unencrypted frames if key is set. */
2352 if (unlikely(!ieee80211_has_protected(fc) &&
2353 !ieee80211_is_any_nullfunc(fc) &&
2354 ieee80211_is_data(fc) && rx->key))
2355 return -EACCES;
2356
2357 return 0;
2358 }
2359
ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data * rx)2360 static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
2361 {
2362 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2363 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2364 __le16 fc = hdr->frame_control;
2365
2366 /*
2367 * Pass through unencrypted frames if the hardware has
2368 * decrypted them already.
2369 */
2370 if (status->flag & RX_FLAG_DECRYPTED)
2371 return 0;
2372
2373 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
2374 if (unlikely(!ieee80211_has_protected(fc) &&
2375 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
2376 rx->key)) {
2377 if (ieee80211_is_deauth(fc) ||
2378 ieee80211_is_disassoc(fc))
2379 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2380 rx->skb->data,
2381 rx->skb->len);
2382 return -EACCES;
2383 }
2384 /* BIP does not use Protected field, so need to check MMIE */
2385 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
2386 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
2387 if (ieee80211_is_deauth(fc) ||
2388 ieee80211_is_disassoc(fc))
2389 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2390 rx->skb->data,
2391 rx->skb->len);
2392 return -EACCES;
2393 }
2394 /*
2395 * When using MFP, Action frames are not allowed prior to
2396 * having configured keys.
2397 */
2398 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
2399 ieee80211_is_robust_mgmt_frame(rx->skb)))
2400 return -EACCES;
2401 }
2402
2403 return 0;
2404 }
2405
2406 static int
__ieee80211_data_to_8023(struct ieee80211_rx_data * rx,bool * port_control)2407 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
2408 {
2409 struct ieee80211_sub_if_data *sdata = rx->sdata;
2410 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2411 bool check_port_control = false;
2412 struct ethhdr *ehdr;
2413 int ret;
2414
2415 *port_control = false;
2416 if (ieee80211_has_a4(hdr->frame_control) &&
2417 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
2418 return -1;
2419
2420 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
2421 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
2422
2423 if (!sdata->u.mgd.use_4addr)
2424 return -1;
2425 else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr))
2426 check_port_control = true;
2427 }
2428
2429 if (is_multicast_ether_addr(hdr->addr1) &&
2430 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
2431 return -1;
2432
2433 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
2434 if (ret < 0)
2435 return ret;
2436
2437 ehdr = (struct ethhdr *) rx->skb->data;
2438 if (ehdr->h_proto == rx->sdata->control_port_protocol)
2439 *port_control = true;
2440 else if (check_port_control)
2441 return -1;
2442
2443 return 0;
2444 }
2445
2446 /*
2447 * requires that rx->skb is a frame with ethernet header
2448 */
ieee80211_frame_allowed(struct ieee80211_rx_data * rx,__le16 fc)2449 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
2450 {
2451 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
2452 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
2453 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2454
2455 /*
2456 * Allow EAPOL frames to us/the PAE group address regardless
2457 * of whether the frame was encrypted or not.
2458 */
2459 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
2460 (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
2461 ether_addr_equal(ehdr->h_dest, pae_group_addr)))
2462 return true;
2463
2464 if (ieee80211_802_1x_port_control(rx) ||
2465 ieee80211_drop_unencrypted(rx, fc))
2466 return false;
2467
2468 return true;
2469 }
2470
ieee80211_deliver_skb_to_local_stack(struct sk_buff * skb,struct ieee80211_rx_data * rx)2471 static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
2472 struct ieee80211_rx_data *rx)
2473 {
2474 struct ieee80211_sub_if_data *sdata = rx->sdata;
2475 struct net_device *dev = sdata->dev;
2476
2477 if (unlikely((skb->protocol == sdata->control_port_protocol ||
2478 skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) &&
2479 sdata->control_port_over_nl80211)) {
2480 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2481 bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED);
2482
2483 cfg80211_rx_control_port(dev, skb, noencrypt);
2484 dev_kfree_skb(skb);
2485 } else {
2486 memset(skb->cb, 0, sizeof(skb->cb));
2487
2488 /* deliver to local stack */
2489 if (rx->napi)
2490 napi_gro_receive(rx->napi, skb);
2491 else
2492 #ifndef CONFIG_DRIVERS_HDF_XR829
2493 netif_receive_skb(skb);
2494 #else
2495 wal_netif_receive_skb(skb);
2496 #endif
2497 }
2498 }
2499
2500 /*
2501 * requires that rx->skb is a frame with ethernet header
2502 */
2503 static void
ieee80211_deliver_skb(struct ieee80211_rx_data * rx)2504 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
2505 {
2506 struct ieee80211_sub_if_data *sdata = rx->sdata;
2507 struct net_device *dev = sdata->dev;
2508 struct sk_buff *skb, *xmit_skb;
2509 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2510 struct sta_info *dsta;
2511
2512 skb = rx->skb;
2513 xmit_skb = NULL;
2514
2515 ieee80211_rx_stats(dev, skb->len);
2516
2517 if (rx->sta) {
2518 /* The seqno index has the same property as needed
2519 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
2520 * for non-QoS-data frames. Here we know it's a data
2521 * frame, so count MSDUs.
2522 */
2523 u64_stats_update_begin(&rx->sta->rx_stats.syncp);
2524 rx->sta->rx_stats.msdu[rx->seqno_idx]++;
2525 u64_stats_update_end(&rx->sta->rx_stats.syncp);
2526 }
2527
2528 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
2529 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
2530 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
2531 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
2532 if (is_multicast_ether_addr(ehdr->h_dest) &&
2533 ieee80211_vif_get_num_mcast_if(sdata) != 0) {
2534 /*
2535 * send multicast frames both to higher layers in
2536 * local net stack and back to the wireless medium
2537 */
2538 xmit_skb = skb_copy(skb, GFP_ATOMIC);
2539 if (!xmit_skb)
2540 net_info_ratelimited("%s: failed to clone multicast frame\n",
2541 dev->name);
2542 } else if (!is_multicast_ether_addr(ehdr->h_dest) &&
2543 !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) {
2544 dsta = sta_info_get(sdata, ehdr->h_dest);
2545 if (dsta) {
2546 /*
2547 * The destination station is associated to
2548 * this AP (in this VLAN), so send the frame
2549 * directly to it and do not pass it to local
2550 * net stack.
2551 */
2552 xmit_skb = skb;
2553 skb = NULL;
2554 }
2555 }
2556 }
2557
2558 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2559 if (skb) {
2560 /* 'align' will only take the values 0 or 2 here since all
2561 * frames are required to be aligned to 2-byte boundaries
2562 * when being passed to mac80211; the code here works just
2563 * as well if that isn't true, but mac80211 assumes it can
2564 * access fields as 2-byte aligned (e.g. for ether_addr_equal)
2565 */
2566 int align;
2567
2568 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3;
2569 if (align) {
2570 if (WARN_ON(skb_headroom(skb) < 3)) {
2571 dev_kfree_skb(skb);
2572 skb = NULL;
2573 } else {
2574 u8 *data = skb->data;
2575 size_t len = skb_headlen(skb);
2576 skb->data -= align;
2577 memmove(skb->data, data, len);
2578 skb_set_tail_pointer(skb, len);
2579 }
2580 }
2581 }
2582 #endif
2583
2584 if (skb) {
2585 #ifndef CONFIG_DRIVERS_HDF_XR829
2586 skb->protocol = eth_type_trans(skb, dev);
2587 #endif
2588 ieee80211_deliver_skb_to_local_stack(skb, rx);
2589 }
2590
2591 if (xmit_skb) {
2592 /*
2593 * Send to wireless media and increase priority by 256 to
2594 * keep the received priority instead of reclassifying
2595 * the frame (see cfg80211_classify8021d).
2596 */
2597 xmit_skb->priority += 256;
2598 xmit_skb->protocol = htons(ETH_P_802_3);
2599 skb_reset_network_header(xmit_skb);
2600 skb_reset_mac_header(xmit_skb);
2601 dev_queue_xmit(xmit_skb);
2602 }
2603 }
2604
2605 static ieee80211_rx_result debug_noinline
__ieee80211_rx_h_amsdu(struct ieee80211_rx_data * rx,u8 data_offset)2606 __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
2607 {
2608 struct net_device *dev = rx->sdata->dev;
2609 struct sk_buff *skb = rx->skb;
2610 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2611 __le16 fc = hdr->frame_control;
2612 struct sk_buff_head frame_list;
2613 struct ethhdr ethhdr;
2614 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
2615
2616 if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
2617 check_da = NULL;
2618 check_sa = NULL;
2619 } else {
2620 switch (rx->sdata->vif.type) {
2621 case NL80211_IFTYPE_AP:
2622 case NL80211_IFTYPE_AP_VLAN:
2623 check_da = NULL;
2624 break;
2625 case NL80211_IFTYPE_STATION:
2626 if (!rx->sta ||
2627 !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
2628 check_sa = NULL;
2629 break;
2630 case NL80211_IFTYPE_MESH_POINT:
2631 check_sa = NULL;
2632 break;
2633 default:
2634 break;
2635 }
2636 }
2637
2638 skb->dev = dev;
2639 __skb_queue_head_init(&frame_list);
2640
2641 if (ieee80211_data_to_8023_exthdr(skb, ðhdr,
2642 rx->sdata->vif.addr,
2643 rx->sdata->vif.type,
2644 data_offset, true))
2645 return RX_DROP_UNUSABLE;
2646
2647 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
2648 rx->sdata->vif.type,
2649 rx->local->hw.extra_tx_headroom,
2650 check_da, check_sa);
2651
2652 while (!skb_queue_empty(&frame_list)) {
2653 rx->skb = __skb_dequeue(&frame_list);
2654
2655 if (!ieee80211_frame_allowed(rx, fc)) {
2656 dev_kfree_skb(rx->skb);
2657 continue;
2658 }
2659
2660 ieee80211_deliver_skb(rx);
2661 }
2662
2663 return RX_QUEUED;
2664 }
2665
2666 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_amsdu(struct ieee80211_rx_data * rx)2667 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
2668 {
2669 struct sk_buff *skb = rx->skb;
2670 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2671 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2672 __le16 fc = hdr->frame_control;
2673
2674 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
2675 return RX_CONTINUE;
2676
2677 if (unlikely(!ieee80211_is_data(fc)))
2678 return RX_CONTINUE;
2679
2680 if (unlikely(!ieee80211_is_data_present(fc)))
2681 return RX_DROP_MONITOR;
2682
2683 if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
2684 switch (rx->sdata->vif.type) {
2685 case NL80211_IFTYPE_AP_VLAN:
2686 if (!rx->sdata->u.vlan.sta)
2687 return RX_DROP_UNUSABLE;
2688 break;
2689 case NL80211_IFTYPE_STATION:
2690 if (!rx->sdata->u.mgd.use_4addr)
2691 return RX_DROP_UNUSABLE;
2692 break;
2693 default:
2694 return RX_DROP_UNUSABLE;
2695 }
2696 }
2697
2698 if (is_multicast_ether_addr(hdr->addr1))
2699 return RX_DROP_UNUSABLE;
2700
2701 return __ieee80211_rx_h_amsdu(rx, 0);
2702 }
2703
2704 #ifdef CONFIG_MAC80211_MESH
2705 static ieee80211_rx_result
ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data * rx)2706 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2707 {
2708 struct ieee80211_hdr *fwd_hdr, *hdr;
2709 struct ieee80211_tx_info *info;
2710 struct ieee80211s_hdr *mesh_hdr;
2711 struct sk_buff *skb = rx->skb, *fwd_skb;
2712 struct ieee80211_local *local = rx->local;
2713 struct ieee80211_sub_if_data *sdata = rx->sdata;
2714 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2715 u16 ac, q, hdrlen;
2716 int tailroom = 0;
2717
2718 hdr = (struct ieee80211_hdr *) skb->data;
2719 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2720
2721 /* make sure fixed part of mesh header is there, also checks skb len */
2722 if (!pskb_may_pull(rx->skb, hdrlen + 6))
2723 return RX_DROP_MONITOR;
2724
2725 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2726
2727 /* make sure full mesh header is there, also checks skb len */
2728 if (!pskb_may_pull(rx->skb,
2729 hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
2730 return RX_DROP_MONITOR;
2731
2732 /* reload pointers */
2733 hdr = (struct ieee80211_hdr *) skb->data;
2734 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2735
2736 if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
2737 return RX_DROP_MONITOR;
2738
2739 /* frame is in RMC, don't forward */
2740 if (ieee80211_is_data(hdr->frame_control) &&
2741 is_multicast_ether_addr(hdr->addr1) &&
2742 mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
2743 return RX_DROP_MONITOR;
2744
2745 if (!ieee80211_is_data(hdr->frame_control))
2746 return RX_CONTINUE;
2747
2748 if (!mesh_hdr->ttl)
2749 return RX_DROP_MONITOR;
2750
2751 if (mesh_hdr->flags & MESH_FLAGS_AE) {
2752 struct mesh_path *mppath;
2753 char *proxied_addr;
2754 char *mpp_addr;
2755
2756 if (is_multicast_ether_addr(hdr->addr1)) {
2757 mpp_addr = hdr->addr3;
2758 proxied_addr = mesh_hdr->eaddr1;
2759 } else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
2760 MESH_FLAGS_AE_A5_A6) {
2761 /* has_a4 already checked in ieee80211_rx_mesh_check */
2762 mpp_addr = hdr->addr4;
2763 proxied_addr = mesh_hdr->eaddr2;
2764 } else {
2765 return RX_DROP_MONITOR;
2766 }
2767
2768 rcu_read_lock();
2769 mppath = mpp_path_lookup(sdata, proxied_addr);
2770 if (!mppath) {
2771 mpp_path_add(sdata, proxied_addr, mpp_addr);
2772 } else {
2773 spin_lock_bh(&mppath->state_lock);
2774 if (!ether_addr_equal(mppath->mpp, mpp_addr))
2775 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
2776 mppath->exp_time = jiffies;
2777 spin_unlock_bh(&mppath->state_lock);
2778 }
2779 rcu_read_unlock();
2780 }
2781
2782 /* Frame has reached destination. Don't forward */
2783 if (!is_multicast_ether_addr(hdr->addr1) &&
2784 ether_addr_equal(sdata->vif.addr, hdr->addr3))
2785 return RX_CONTINUE;
2786
2787 ac = ieee80211_select_queue_80211(sdata, skb, hdr);
2788 q = sdata->vif.hw_queue[ac];
2789 if (mac80211_queue_stopped(&local->hw, q)) {
2790 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
2791 return RX_DROP_MONITOR;
2792 }
2793 skb_set_queue_mapping(skb, q);
2794
2795 if (!--mesh_hdr->ttl) {
2796 if (!is_multicast_ether_addr(hdr->addr1))
2797 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
2798 dropped_frames_ttl);
2799 goto out;
2800 }
2801
2802 if (!ifmsh->mshcfg.dot11MeshForwarding)
2803 goto out;
2804
2805 if (sdata->crypto_tx_tailroom_needed_cnt)
2806 tailroom = IEEE80211_ENCRYPT_TAILROOM;
2807
2808 fwd_skb = skb_copy_expand(skb, local->tx_headroom +
2809 sdata->encrypt_headroom,
2810 tailroom, GFP_ATOMIC);
2811 if (!fwd_skb)
2812 goto out;
2813
2814 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
2815 fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
2816 info = IEEE80211_SKB_CB(fwd_skb);
2817 memset(info, 0, sizeof(*info));
2818 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
2819 info->control.vif = &rx->sdata->vif;
2820 info->control.jiffies = jiffies;
2821 if (is_multicast_ether_addr(fwd_hdr->addr1)) {
2822 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
2823 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
2824 /* update power mode indication when forwarding */
2825 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
2826 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
2827 /* mesh power mode flags updated in mesh_nexthop_lookup */
2828 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
2829 } else {
2830 /* unable to resolve next hop */
2831 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
2832 fwd_hdr->addr3, 0,
2833 WLAN_REASON_MESH_PATH_NOFORWARD,
2834 fwd_hdr->addr2);
2835 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
2836 kfree_skb(fwd_skb);
2837 return RX_DROP_MONITOR;
2838 }
2839
2840 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
2841 ieee80211_add_pending_skb(local, fwd_skb);
2842 out:
2843 if (is_multicast_ether_addr(hdr->addr1))
2844 return RX_CONTINUE;
2845 return RX_DROP_MONITOR;
2846 }
2847 #endif
2848
2849 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_data(struct ieee80211_rx_data * rx)2850 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
2851 {
2852 struct ieee80211_sub_if_data *sdata = rx->sdata;
2853 struct ieee80211_local *local = rx->local;
2854 struct net_device *dev = sdata->dev;
2855 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2856 __le16 fc = hdr->frame_control;
2857 bool port_control;
2858 int err;
2859
2860 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
2861 return RX_CONTINUE;
2862
2863 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
2864 return RX_DROP_MONITOR;
2865
2866 /*
2867 * Send unexpected-4addr-frame event to hostapd. For older versions,
2868 * also drop the frame to cooked monitor interfaces.
2869 */
2870 if (ieee80211_has_a4(hdr->frame_control) &&
2871 sdata->vif.type == NL80211_IFTYPE_AP) {
2872 if (rx->sta &&
2873 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
2874 cfg80211_rx_unexpected_4addr_frame(
2875 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
2876 return RX_DROP_MONITOR;
2877 }
2878
2879 err = __ieee80211_data_to_8023(rx, &port_control);
2880 if (unlikely(err))
2881 return RX_DROP_UNUSABLE;
2882
2883 if (!ieee80211_frame_allowed(rx, fc))
2884 return RX_DROP_MONITOR;
2885
2886 /* directly handle TDLS channel switch requests/responses */
2887 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto ==
2888 cpu_to_be16(ETH_P_TDLS))) {
2889 struct ieee80211_tdls_data *tf = (void *)rx->skb->data;
2890
2891 if (pskb_may_pull(rx->skb,
2892 offsetof(struct ieee80211_tdls_data, u)) &&
2893 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE &&
2894 tf->category == WLAN_CATEGORY_TDLS &&
2895 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
2896 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
2897 skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
2898 schedule_work(&local->tdls_chsw_work);
2899 if (rx->sta)
2900 rx->sta->rx_stats.packets++;
2901
2902 return RX_QUEUED;
2903 }
2904 }
2905
2906 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2907 unlikely(port_control) && sdata->bss) {
2908 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
2909 u.ap);
2910 dev = sdata->dev;
2911 rx->sdata = sdata;
2912 }
2913
2914 rx->skb->dev = dev;
2915
2916 if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
2917 local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
2918 !is_multicast_ether_addr(
2919 ((struct ethhdr *)rx->skb->data)->h_dest) &&
2920 (!local->scanning &&
2921 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)))
2922 mod_timer(&local->dynamic_ps_timer, jiffies +
2923 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
2924
2925 ieee80211_deliver_skb(rx);
2926
2927 return RX_QUEUED;
2928 }
2929
2930 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_ctrl(struct ieee80211_rx_data * rx,struct sk_buff_head * frames)2931 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
2932 {
2933 struct sk_buff *skb = rx->skb;
2934 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
2935 struct tid_ampdu_rx *tid_agg_rx;
2936 u16 start_seq_num;
2937 u16 tid;
2938
2939 if (likely(!ieee80211_is_ctl(bar->frame_control)))
2940 return RX_CONTINUE;
2941
2942 if (ieee80211_is_back_req(bar->frame_control)) {
2943 struct {
2944 __le16 control, start_seq_num;
2945 } __packed bar_data;
2946 struct ieee80211_event event = {
2947 .type = BAR_RX_EVENT,
2948 };
2949
2950 if (!rx->sta)
2951 return RX_DROP_MONITOR;
2952
2953 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
2954 &bar_data, sizeof(bar_data)))
2955 return RX_DROP_MONITOR;
2956
2957 tid = le16_to_cpu(bar_data.control) >> 12;
2958
2959 if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
2960 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
2961 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
2962 WLAN_BACK_RECIPIENT,
2963 WLAN_REASON_QSTA_REQUIRE_SETUP);
2964
2965 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
2966 if (!tid_agg_rx)
2967 return RX_DROP_MONITOR;
2968
2969 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
2970 event.u.ba.tid = tid;
2971 event.u.ba.ssn = start_seq_num;
2972 event.u.ba.sta = &rx->sta->sta;
2973
2974 /* reset session timer */
2975 if (tid_agg_rx->timeout)
2976 mod_timer(&tid_agg_rx->session_timer,
2977 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2978
2979 spin_lock(&tid_agg_rx->reorder_lock);
2980 /* release stored frames up to start of BAR */
2981 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
2982 start_seq_num, frames);
2983 spin_unlock(&tid_agg_rx->reorder_lock);
2984
2985 drv_event_callback(rx->local, rx->sdata, &event);
2986
2987 kfree_skb(skb);
2988 return RX_QUEUED;
2989 }
2990
2991 /*
2992 * After this point, we only want management frames,
2993 * so we can drop all remaining control frames to
2994 * cooked monitor interfaces.
2995 */
2996 return RX_DROP_MONITOR;
2997 }
2998
ieee80211_process_sa_query_req(struct ieee80211_sub_if_data * sdata,struct ieee80211_mgmt * mgmt,size_t len)2999 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
3000 struct ieee80211_mgmt *mgmt,
3001 size_t len)
3002 {
3003 struct ieee80211_local *local = sdata->local;
3004 struct sk_buff *skb;
3005 struct ieee80211_mgmt *resp;
3006
3007 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
3008 /* Not to own unicast address */
3009 return;
3010 }
3011
3012 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
3013 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
3014 /* Not from the current AP or not associated yet. */
3015 return;
3016 }
3017
3018 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
3019 /* Too short SA Query request frame */
3020 return;
3021 }
3022
3023 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
3024 if (skb == NULL)
3025 return;
3026
3027 skb_reserve(skb, local->hw.extra_tx_headroom);
3028 resp = skb_put_zero(skb, 24);
3029 memcpy(resp->da, mgmt->sa, ETH_ALEN);
3030 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
3031 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
3032 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3033 IEEE80211_STYPE_ACTION);
3034 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
3035 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
3036 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
3037 memcpy(resp->u.action.u.sa_query.trans_id,
3038 mgmt->u.action.u.sa_query.trans_id,
3039 WLAN_SA_QUERY_TR_ID_LEN);
3040
3041 ieee80211_tx_skb(sdata, skb);
3042 }
3043
3044 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data * rx)3045 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
3046 {
3047 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3048 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3049
3050 /*
3051 * From here on, look only at management frames.
3052 * Data and control frames are already handled,
3053 * and unknown (reserved) frames are useless.
3054 */
3055 if (rx->skb->len < 24)
3056 return RX_DROP_MONITOR;
3057
3058 if (!ieee80211_is_mgmt(mgmt->frame_control))
3059 return RX_DROP_MONITOR;
3060
3061 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
3062 ieee80211_is_beacon(mgmt->frame_control) &&
3063 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
3064 int sig = 0;
3065
3066 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
3067 !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
3068 sig = status->signal;
3069
3070 cfg80211_report_obss_beacon(rx->local->hw.wiphy,
3071 rx->skb->data, rx->skb->len,
3072 status->freq, sig);
3073 rx->flags |= IEEE80211_RX_BEACON_REPORTED;
3074 }
3075
3076 if (ieee80211_drop_unencrypted_mgmt(rx))
3077 return RX_DROP_UNUSABLE;
3078
3079 return RX_CONTINUE;
3080 }
3081
3082 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_action(struct ieee80211_rx_data * rx)3083 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
3084 {
3085 struct ieee80211_local *local = rx->local;
3086 struct ieee80211_sub_if_data *sdata = rx->sdata;
3087 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3088 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3089 int len = rx->skb->len;
3090
3091 if (!ieee80211_is_action(mgmt->frame_control))
3092 return RX_CONTINUE;
3093
3094 /* drop too small frames */
3095 if (len < IEEE80211_MIN_ACTION_SIZE)
3096 return RX_DROP_UNUSABLE;
3097
3098 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
3099 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
3100 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
3101 return RX_DROP_UNUSABLE;
3102
3103 switch (mgmt->u.action.category) {
3104 case WLAN_CATEGORY_HT:
3105 /* reject HT action frames from stations not supporting HT */
3106 if (!rx->sta->sta.ht_cap.ht_supported)
3107 goto invalid;
3108
3109 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3110 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3111 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3112 sdata->vif.type != NL80211_IFTYPE_AP &&
3113 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3114 break;
3115
3116 /* verify action & smps_control/chanwidth are present */
3117 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
3118 goto invalid;
3119
3120 switch (mgmt->u.action.u.ht_smps.action) {
3121 case WLAN_HT_ACTION_SMPS: {
3122 struct ieee80211_supported_band *sband;
3123 enum ieee80211_smps_mode smps_mode;
3124 struct sta_opmode_info sta_opmode = {};
3125
3126 /* convert to HT capability */
3127 switch (mgmt->u.action.u.ht_smps.smps_control) {
3128 case WLAN_HT_SMPS_CONTROL_DISABLED:
3129 smps_mode = IEEE80211_SMPS_OFF;
3130 break;
3131 case WLAN_HT_SMPS_CONTROL_STATIC:
3132 smps_mode = IEEE80211_SMPS_STATIC;
3133 break;
3134 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
3135 smps_mode = IEEE80211_SMPS_DYNAMIC;
3136 break;
3137 default:
3138 goto invalid;
3139 }
3140
3141 /* if no change do nothing */
3142 if (rx->sta->sta.smps_mode == smps_mode)
3143 goto handled;
3144 rx->sta->sta.smps_mode = smps_mode;
3145 sta_opmode.smps_mode =
3146 ieee80211_smps_mode_to_smps_mode(smps_mode);
3147 sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED;
3148
3149 sband = rx->local->hw.wiphy->bands[status->band];
3150
3151 rate_control_rate_update(local, sband, rx->sta,
3152 IEEE80211_RC_SMPS_CHANGED);
3153 cfg80211_sta_opmode_change_notify(sdata->dev,
3154 rx->sta->addr,
3155 &sta_opmode,
3156 GFP_ATOMIC);
3157 goto handled;
3158 }
3159 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
3160 struct ieee80211_supported_band *sband;
3161 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
3162 enum ieee80211_sta_rx_bandwidth max_bw, new_bw;
3163 struct sta_opmode_info sta_opmode = {};
3164
3165 /* If it doesn't support 40 MHz it can't change ... */
3166 if (!(rx->sta->sta.ht_cap.cap &
3167 IEEE80211_HT_CAP_SUP_WIDTH_20_40))
3168 goto handled;
3169
3170 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
3171 max_bw = IEEE80211_STA_RX_BW_20;
3172 else
3173 max_bw = ieee80211_sta_cap_rx_bw(rx->sta);
3174
3175 /* set cur_max_bandwidth and recalc sta bw */
3176 rx->sta->cur_max_bandwidth = max_bw;
3177 new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
3178
3179 if (rx->sta->sta.bandwidth == new_bw)
3180 goto handled;
3181
3182 rx->sta->sta.bandwidth = new_bw;
3183 sband = rx->local->hw.wiphy->bands[status->band];
3184 sta_opmode.bw =
3185 ieee80211_sta_rx_bw_to_chan_width(rx->sta);
3186 sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED;
3187
3188 rate_control_rate_update(local, sband, rx->sta,
3189 IEEE80211_RC_BW_CHANGED);
3190 cfg80211_sta_opmode_change_notify(sdata->dev,
3191 rx->sta->addr,
3192 &sta_opmode,
3193 GFP_ATOMIC);
3194 goto handled;
3195 }
3196 default:
3197 goto invalid;
3198 }
3199
3200 break;
3201 case WLAN_CATEGORY_PUBLIC:
3202 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3203 goto invalid;
3204 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3205 break;
3206 if (!rx->sta)
3207 break;
3208 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
3209 break;
3210 if (mgmt->u.action.u.ext_chan_switch.action_code !=
3211 WLAN_PUB_ACTION_EXT_CHANSW_ANN)
3212 break;
3213 if (len < offsetof(struct ieee80211_mgmt,
3214 u.action.u.ext_chan_switch.variable))
3215 goto invalid;
3216 goto queue;
3217 case WLAN_CATEGORY_VHT:
3218 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3219 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3220 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3221 sdata->vif.type != NL80211_IFTYPE_AP &&
3222 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3223 break;
3224
3225 /* verify action code is present */
3226 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3227 goto invalid;
3228
3229 switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
3230 case WLAN_VHT_ACTION_OPMODE_NOTIF: {
3231 /* verify opmode is present */
3232 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
3233 goto invalid;
3234 goto queue;
3235 }
3236 case WLAN_VHT_ACTION_GROUPID_MGMT: {
3237 if (len < IEEE80211_MIN_ACTION_SIZE + 25)
3238 goto invalid;
3239 goto queue;
3240 }
3241 default:
3242 break;
3243 }
3244 break;
3245 case WLAN_CATEGORY_BACK:
3246 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3247 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3248 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3249 sdata->vif.type != NL80211_IFTYPE_AP &&
3250 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3251 break;
3252
3253 /* verify action_code is present */
3254 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3255 break;
3256
3257 switch (mgmt->u.action.u.addba_req.action_code) {
3258 case WLAN_ACTION_ADDBA_REQ:
3259 if (len < (IEEE80211_MIN_ACTION_SIZE +
3260 sizeof(mgmt->u.action.u.addba_req)))
3261 goto invalid;
3262 break;
3263 case WLAN_ACTION_ADDBA_RESP:
3264 if (len < (IEEE80211_MIN_ACTION_SIZE +
3265 sizeof(mgmt->u.action.u.addba_resp)))
3266 goto invalid;
3267 break;
3268 case WLAN_ACTION_DELBA:
3269 if (len < (IEEE80211_MIN_ACTION_SIZE +
3270 sizeof(mgmt->u.action.u.delba)))
3271 goto invalid;
3272 break;
3273 default:
3274 goto invalid;
3275 }
3276
3277 goto queue;
3278 case WLAN_CATEGORY_SPECTRUM_MGMT:
3279 /* verify action_code is present */
3280 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3281 break;
3282
3283 switch (mgmt->u.action.u.measurement.action_code) {
3284 case WLAN_ACTION_SPCT_MSR_REQ:
3285 if (status->band != NL80211_BAND_5GHZ)
3286 break;
3287
3288 if (len < (IEEE80211_MIN_ACTION_SIZE +
3289 sizeof(mgmt->u.action.u.measurement)))
3290 break;
3291
3292 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3293 break;
3294
3295 ieee80211_process_measurement_req(sdata, mgmt, len);
3296 goto handled;
3297 case WLAN_ACTION_SPCT_CHL_SWITCH: {
3298 u8 *bssid;
3299 if (len < (IEEE80211_MIN_ACTION_SIZE +
3300 sizeof(mgmt->u.action.u.chan_switch)))
3301 break;
3302
3303 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3304 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3305 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3306 break;
3307
3308 if (sdata->vif.type == NL80211_IFTYPE_STATION)
3309 bssid = sdata->u.mgd.bssid;
3310 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
3311 bssid = sdata->u.ibss.bssid;
3312 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
3313 bssid = mgmt->sa;
3314 else
3315 break;
3316
3317 if (!ether_addr_equal(mgmt->bssid, bssid))
3318 break;
3319
3320 goto queue;
3321 }
3322 }
3323 break;
3324 case WLAN_CATEGORY_SA_QUERY:
3325 if (len < (IEEE80211_MIN_ACTION_SIZE +
3326 sizeof(mgmt->u.action.u.sa_query)))
3327 break;
3328
3329 switch (mgmt->u.action.u.sa_query.action) {
3330 case WLAN_ACTION_SA_QUERY_REQUEST:
3331 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3332 break;
3333 ieee80211_process_sa_query_req(sdata, mgmt, len);
3334 goto handled;
3335 }
3336 break;
3337 case WLAN_CATEGORY_SELF_PROTECTED:
3338 if (len < (IEEE80211_MIN_ACTION_SIZE +
3339 sizeof(mgmt->u.action.u.self_prot.action_code)))
3340 break;
3341
3342 switch (mgmt->u.action.u.self_prot.action_code) {
3343 case WLAN_SP_MESH_PEERING_OPEN:
3344 case WLAN_SP_MESH_PEERING_CLOSE:
3345 case WLAN_SP_MESH_PEERING_CONFIRM:
3346 if (!ieee80211_vif_is_mesh(&sdata->vif))
3347 goto invalid;
3348 if (sdata->u.mesh.user_mpm)
3349 /* userspace handles this frame */
3350 break;
3351 goto queue;
3352 case WLAN_SP_MGK_INFORM:
3353 case WLAN_SP_MGK_ACK:
3354 if (!ieee80211_vif_is_mesh(&sdata->vif))
3355 goto invalid;
3356 break;
3357 }
3358 break;
3359 case WLAN_CATEGORY_MESH_ACTION:
3360 if (len < (IEEE80211_MIN_ACTION_SIZE +
3361 sizeof(mgmt->u.action.u.mesh_action.action_code)))
3362 break;
3363
3364 if (!ieee80211_vif_is_mesh(&sdata->vif))
3365 break;
3366 if (mesh_action_is_path_sel(mgmt) &&
3367 !mesh_path_sel_is_hwmp(sdata))
3368 break;
3369 goto queue;
3370 }
3371
3372 return RX_CONTINUE;
3373
3374 invalid:
3375 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
3376 /* will return in the next handlers */
3377 return RX_CONTINUE;
3378
3379 handled:
3380 if (rx->sta)
3381 rx->sta->rx_stats.packets++;
3382 dev_kfree_skb(rx->skb);
3383 return RX_QUEUED;
3384
3385 queue:
3386 skb_queue_tail(&sdata->skb_queue, rx->skb);
3387 mac80211_queue_work(&local->hw, &sdata->work);
3388 if (rx->sta)
3389 rx->sta->rx_stats.packets++;
3390 return RX_QUEUED;
3391 }
3392
3393 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data * rx)3394 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
3395 {
3396 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3397 int sig = 0;
3398
3399 /* skip known-bad action frames and return them in the next handler */
3400 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
3401 return RX_CONTINUE;
3402
3403 /*
3404 * Getting here means the kernel doesn't know how to handle
3405 * it, but maybe userspace does ... include returned frames
3406 * so userspace can register for those to know whether ones
3407 * it transmitted were processed or returned.
3408 */
3409
3410 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
3411 !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
3412 sig = status->signal;
3413
3414 if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
3415 rx->skb->data, rx->skb->len, 0)) {
3416 if (rx->sta)
3417 rx->sta->rx_stats.packets++;
3418 dev_kfree_skb(rx->skb);
3419 return RX_QUEUED;
3420 }
3421
3422 return RX_CONTINUE;
3423 }
3424
3425 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_action_return(struct ieee80211_rx_data * rx)3426 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
3427 {
3428 struct ieee80211_local *local = rx->local;
3429 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3430 struct sk_buff *nskb;
3431 struct ieee80211_sub_if_data *sdata = rx->sdata;
3432 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3433
3434 if (!ieee80211_is_action(mgmt->frame_control))
3435 return RX_CONTINUE;
3436
3437 /*
3438 * For AP mode, hostapd is responsible for handling any action
3439 * frames that we didn't handle, including returning unknown
3440 * ones. For all other modes we will return them to the sender,
3441 * setting the 0x80 bit in the action category, as required by
3442 * 802.11-2012 9.24.4.
3443 * Newer versions of hostapd shall also use the management frame
3444 * registration mechanisms, but older ones still use cooked
3445 * monitor interfaces so push all frames there.
3446 */
3447 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
3448 (sdata->vif.type == NL80211_IFTYPE_AP ||
3449 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
3450 return RX_DROP_MONITOR;
3451
3452 if (is_multicast_ether_addr(mgmt->da))
3453 return RX_DROP_MONITOR;
3454
3455 /* do not return rejected action frames */
3456 if (mgmt->u.action.category & 0x80)
3457 return RX_DROP_UNUSABLE;
3458
3459 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
3460 GFP_ATOMIC);
3461 if (nskb) {
3462 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
3463
3464 nmgmt->u.action.category |= 0x80;
3465 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
3466 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
3467
3468 memset(nskb->cb, 0, sizeof(nskb->cb));
3469
3470 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
3471 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
3472
3473 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
3474 IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
3475 IEEE80211_TX_CTL_NO_CCK_RATE;
3476 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
3477 info->hw_queue =
3478 local->hw.offchannel_tx_hw_queue;
3479 }
3480
3481 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
3482 status->band, 0);
3483 }
3484 dev_kfree_skb(rx->skb);
3485 return RX_QUEUED;
3486 }
3487
3488 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_mgmt(struct ieee80211_rx_data * rx)3489 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
3490 {
3491 struct ieee80211_sub_if_data *sdata = rx->sdata;
3492 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
3493 __le16 stype;
3494
3495 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
3496
3497 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
3498 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3499 sdata->vif.type != NL80211_IFTYPE_OCB &&
3500 sdata->vif.type != NL80211_IFTYPE_STATION)
3501 return RX_DROP_MONITOR;
3502
3503 switch (stype) {
3504 case cpu_to_le16(IEEE80211_STYPE_AUTH):
3505 case cpu_to_le16(IEEE80211_STYPE_BEACON):
3506 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
3507 /* process for all: mesh, mlme, ibss */
3508 break;
3509 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
3510 if (is_multicast_ether_addr(mgmt->da) &&
3511 !is_broadcast_ether_addr(mgmt->da))
3512 return RX_DROP_MONITOR;
3513
3514 /* process only for station/IBSS */
3515 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3516 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3517 return RX_DROP_MONITOR;
3518 break;
3519 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
3520 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
3521 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
3522 if (is_multicast_ether_addr(mgmt->da) &&
3523 !is_broadcast_ether_addr(mgmt->da))
3524 return RX_DROP_MONITOR;
3525
3526 /* process only for station */
3527 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3528 return RX_DROP_MONITOR;
3529 break;
3530 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
3531 /* process only for ibss and mesh */
3532 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3533 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3534 return RX_DROP_MONITOR;
3535 break;
3536 default:
3537 return RX_DROP_MONITOR;
3538 }
3539
3540 /* queue up frame and kick off work to process it */
3541 skb_queue_tail(&sdata->skb_queue, rx->skb);
3542 mac80211_queue_work(&rx->local->hw, &sdata->work);
3543 if (rx->sta)
3544 rx->sta->rx_stats.packets++;
3545
3546 return RX_QUEUED;
3547 }
3548
ieee80211_rx_cooked_monitor(struct ieee80211_rx_data * rx,struct ieee80211_rate * rate)3549 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
3550 struct ieee80211_rate *rate)
3551 {
3552 struct ieee80211_sub_if_data *sdata;
3553 struct ieee80211_local *local = rx->local;
3554 struct sk_buff *skb = rx->skb, *skb2;
3555 struct net_device *prev_dev = NULL;
3556 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3557 int needed_headroom;
3558
3559 /*
3560 * If cooked monitor has been processed already, then
3561 * don't do it again. If not, set the flag.
3562 */
3563 if (rx->flags & IEEE80211_RX_CMNTR)
3564 goto out_free_skb;
3565 rx->flags |= IEEE80211_RX_CMNTR;
3566
3567 /* If there are no cooked monitor interfaces, just free the SKB */
3568 if (!local->cooked_mntrs)
3569 goto out_free_skb;
3570
3571 /* vendor data is long removed here */
3572 status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA;
3573 /* room for the radiotap header based on driver features */
3574 needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb);
3575
3576 if (skb_headroom(skb) < needed_headroom &&
3577 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
3578 goto out_free_skb;
3579
3580 /* prepend radiotap information */
3581 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
3582 false);
3583
3584 skb_reset_mac_header(skb);
3585 skb->ip_summed = CHECKSUM_UNNECESSARY;
3586 skb->pkt_type = PACKET_OTHERHOST;
3587 skb->protocol = htons(ETH_P_802_2);
3588
3589 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
3590 if (!ieee80211_sdata_running(sdata))
3591 continue;
3592
3593 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
3594 !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES))
3595 continue;
3596
3597 if (prev_dev) {
3598 skb2 = skb_clone(skb, GFP_ATOMIC);
3599 if (skb2) {
3600 skb2->dev = prev_dev;
3601 #ifndef CONFIG_DRIVERS_HDF_XR829
3602 netif_receive_skb(skb2);
3603 #else
3604 wal_netif_receive_skb(skb2);
3605 #endif
3606 }
3607 }
3608
3609 prev_dev = sdata->dev;
3610 ieee80211_rx_stats(sdata->dev, skb->len);
3611 }
3612
3613 if (prev_dev) {
3614 skb->dev = prev_dev;
3615 #ifndef CONFIG_DRIVERS_HDF_XR829
3616 netif_receive_skb(skb);
3617 #else
3618 wal_netif_receive_skb(skb);
3619 #endif
3620 return;
3621 }
3622
3623 out_free_skb:
3624 dev_kfree_skb(skb);
3625 }
3626
ieee80211_rx_handlers_result(struct ieee80211_rx_data * rx,ieee80211_rx_result res)3627 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
3628 ieee80211_rx_result res)
3629 {
3630 switch (res) {
3631 case RX_DROP_MONITOR:
3632 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
3633 if (rx->sta)
3634 rx->sta->rx_stats.dropped++;
3635 /* fall through */
3636 case RX_CONTINUE: {
3637 struct ieee80211_rate *rate = NULL;
3638 struct ieee80211_supported_band *sband;
3639 struct ieee80211_rx_status *status;
3640
3641 status = IEEE80211_SKB_RXCB((rx->skb));
3642
3643 sband = rx->local->hw.wiphy->bands[status->band];
3644 if (status->encoding == RX_ENC_LEGACY)
3645 rate = &sband->bitrates[status->rate_idx];
3646
3647 ieee80211_rx_cooked_monitor(rx, rate);
3648 break;
3649 }
3650 case RX_DROP_UNUSABLE:
3651 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
3652 if (rx->sta)
3653 rx->sta->rx_stats.dropped++;
3654 dev_kfree_skb(rx->skb);
3655 break;
3656 case RX_QUEUED:
3657 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
3658 break;
3659 }
3660 }
3661
ieee80211_rx_handlers(struct ieee80211_rx_data * rx,struct sk_buff_head * frames)3662 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
3663 struct sk_buff_head *frames)
3664 {
3665 ieee80211_rx_result res = RX_DROP_MONITOR;
3666 struct sk_buff *skb;
3667
3668 #define CALL_RXH(rxh) \
3669 do { \
3670 res = rxh(rx); \
3671 if (res != RX_CONTINUE) \
3672 goto rxh_next; \
3673 } while (0)
3674
3675 /* Lock here to avoid hitting all of the data used in the RX
3676 * path (e.g. key data, station data, ...) concurrently when
3677 * a frame is released from the reorder buffer due to timeout
3678 * from the timer, potentially concurrently with RX from the
3679 * driver.
3680 */
3681 spin_lock_bh(&rx->local->rx_path_lock);
3682
3683 while ((skb = __skb_dequeue(frames))) {
3684 /*
3685 * all the other fields are valid across frames
3686 * that belong to an aMPDU since they are on the
3687 * same TID from the same station
3688 */
3689 rx->skb = skb;
3690
3691 CALL_RXH(ieee80211_rx_h_check_more_data);
3692 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll);
3693 CALL_RXH(ieee80211_rx_h_sta_process);
3694 CALL_RXH(ieee80211_rx_h_decrypt);
3695 CALL_RXH(ieee80211_rx_h_defragment);
3696 CALL_RXH(ieee80211_rx_h_michael_mic_verify);
3697 /* must be after MMIC verify so header is counted in MPDU mic */
3698 #ifdef CONFIG_MAC80211_MESH
3699 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
3700 CALL_RXH(ieee80211_rx_h_mesh_fwding);
3701 #endif
3702 CALL_RXH(ieee80211_rx_h_amsdu);
3703 CALL_RXH(ieee80211_rx_h_data);
3704
3705 /* special treatment -- needs the queue */
3706 res = ieee80211_rx_h_ctrl(rx, frames);
3707 if (res != RX_CONTINUE)
3708 goto rxh_next;
3709
3710 CALL_RXH(ieee80211_rx_h_mgmt_check);
3711 CALL_RXH(ieee80211_rx_h_action);
3712 CALL_RXH(ieee80211_rx_h_userspace_mgmt);
3713 CALL_RXH(ieee80211_rx_h_action_return);
3714 CALL_RXH(ieee80211_rx_h_mgmt);
3715
3716 rxh_next:
3717 ieee80211_rx_handlers_result(rx, res);
3718
3719 #undef CALL_RXH
3720 }
3721
3722 spin_unlock_bh(&rx->local->rx_path_lock);
3723 }
3724
ieee80211_invoke_rx_handlers(struct ieee80211_rx_data * rx)3725 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
3726 {
3727 struct sk_buff_head reorder_release;
3728 ieee80211_rx_result res = RX_DROP_MONITOR;
3729
3730 __skb_queue_head_init(&reorder_release);
3731
3732 #define CALL_RXH(rxh) \
3733 do { \
3734 res = rxh(rx); \
3735 if (res != RX_CONTINUE) \
3736 goto rxh_next; \
3737 } while (0)
3738
3739 CALL_RXH(ieee80211_rx_h_check_dup);
3740 CALL_RXH(ieee80211_rx_h_check);
3741
3742 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
3743
3744 ieee80211_rx_handlers(rx, &reorder_release);
3745 return;
3746
3747 rxh_next:
3748 ieee80211_rx_handlers_result(rx, res);
3749
3750 #undef CALL_RXH
3751 }
3752
3753 /*
3754 * This function makes calls into the RX path, therefore
3755 * it has to be invoked under RCU read lock.
3756 */
ieee80211_release_reorder_timeout(struct sta_info * sta,int tid)3757 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
3758 {
3759 struct sk_buff_head frames;
3760 struct ieee80211_rx_data rx = {
3761 .sta = sta,
3762 .sdata = sta->sdata,
3763 .local = sta->local,
3764 /* This is OK -- must be QoS data frame */
3765 .security_idx = tid,
3766 .seqno_idx = tid,
3767 .napi = NULL, /* must be NULL to not have races */
3768 };
3769 struct tid_ampdu_rx *tid_agg_rx;
3770
3771 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
3772 if (!tid_agg_rx)
3773 return;
3774
3775 __skb_queue_head_init(&frames);
3776
3777 spin_lock(&tid_agg_rx->reorder_lock);
3778 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
3779 spin_unlock(&tid_agg_rx->reorder_lock);
3780
3781 if (!skb_queue_empty(&frames)) {
3782 struct ieee80211_event event = {
3783 .type = BA_FRAME_TIMEOUT,
3784 .u.ba.tid = tid,
3785 .u.ba.sta = &sta->sta,
3786 };
3787 drv_event_callback(rx.local, rx.sdata, &event);
3788 }
3789
3790 ieee80211_rx_handlers(&rx, &frames);
3791 }
3792
mac80211_mark_rx_ba_filtered_frames(struct ieee80211_sta * pubsta,u8 tid,u16 ssn,u64 filtered,u16 received_mpdus)3793 void mac80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
3794 u16 ssn, u64 filtered,
3795 u16 received_mpdus)
3796 {
3797 struct sta_info *sta;
3798 struct tid_ampdu_rx *tid_agg_rx;
3799 struct sk_buff_head frames;
3800 struct ieee80211_rx_data rx = {
3801 /* This is OK -- must be QoS data frame */
3802 .security_idx = tid,
3803 .seqno_idx = tid,
3804 };
3805 int i, diff;
3806
3807 if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS))
3808 return;
3809
3810 __skb_queue_head_init(&frames);
3811
3812 sta = container_of(pubsta, struct sta_info, sta);
3813
3814 rx.sta = sta;
3815 rx.sdata = sta->sdata;
3816 rx.local = sta->local;
3817
3818 rcu_read_lock();
3819 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
3820 if (!tid_agg_rx)
3821 goto out;
3822
3823 spin_lock_bh(&tid_agg_rx->reorder_lock);
3824
3825 if (received_mpdus >= IEEE80211_SN_MODULO >> 1) {
3826 int release;
3827
3828 /* release all frames in the reorder buffer */
3829 release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) %
3830 IEEE80211_SN_MODULO;
3831 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx,
3832 release, &frames);
3833 /* update ssn to match received ssn */
3834 tid_agg_rx->head_seq_num = ssn;
3835 } else {
3836 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn,
3837 &frames);
3838 }
3839
3840 /* handle the case that received ssn is behind the mac ssn.
3841 * it can be tid_agg_rx->buf_size behind and still be valid */
3842 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK;
3843 if (diff >= tid_agg_rx->buf_size) {
3844 tid_agg_rx->reorder_buf_filtered = 0;
3845 goto release;
3846 }
3847 filtered = filtered >> diff;
3848 ssn += diff;
3849
3850 /* update bitmap */
3851 for (i = 0; i < tid_agg_rx->buf_size; i++) {
3852 int index = (ssn + i) % tid_agg_rx->buf_size;
3853
3854 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
3855 if (filtered & BIT_ULL(i))
3856 tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index);
3857 }
3858
3859 /* now process also frames that the filter marking released */
3860 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
3861
3862 release:
3863 spin_unlock_bh(&tid_agg_rx->reorder_lock);
3864
3865 ieee80211_rx_handlers(&rx, &frames);
3866
3867 out:
3868 rcu_read_unlock();
3869 }
3870
3871 /* main receive path */
3872
ieee80211_accept_frame(struct ieee80211_rx_data * rx)3873 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
3874 {
3875 struct ieee80211_sub_if_data *sdata = rx->sdata;
3876 struct sk_buff *skb = rx->skb;
3877 struct ieee80211_hdr *hdr = (void *)skb->data;
3878 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3879 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
3880 bool multicast = is_multicast_ether_addr(hdr->addr1);
3881
3882 switch (sdata->vif.type) {
3883 case NL80211_IFTYPE_STATION:
3884 if (!bssid && !sdata->u.mgd.use_4addr)
3885 return false;
3886 if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
3887 return false;
3888 if (multicast)
3889 return true;
3890 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3891 case NL80211_IFTYPE_ADHOC:
3892 if (!bssid)
3893 return false;
3894 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
3895 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
3896 return false;
3897 if (ieee80211_is_beacon(hdr->frame_control))
3898 return true;
3899 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid))
3900 return false;
3901 if (!multicast &&
3902 !ether_addr_equal(sdata->vif.addr, hdr->addr1))
3903 return false;
3904 if (!rx->sta) {
3905 int rate_idx;
3906 if (status->encoding != RX_ENC_LEGACY)
3907 rate_idx = 0; /* TODO: HT/VHT rates */
3908 else
3909 rate_idx = status->rate_idx;
3910 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
3911 BIT(rate_idx));
3912 }
3913 return true;
3914 case NL80211_IFTYPE_OCB:
3915 if (!bssid)
3916 return false;
3917 if (!ieee80211_is_data_present(hdr->frame_control))
3918 return false;
3919 if (!is_broadcast_ether_addr(bssid))
3920 return false;
3921 if (!multicast &&
3922 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
3923 return false;
3924 if (!rx->sta) {
3925 int rate_idx;
3926 if (status->encoding != RX_ENC_LEGACY)
3927 rate_idx = 0; /* TODO: HT rates */
3928 else
3929 rate_idx = status->rate_idx;
3930 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2,
3931 BIT(rate_idx));
3932 }
3933 return true;
3934 case NL80211_IFTYPE_MESH_POINT:
3935 if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
3936 return false;
3937 if (multicast)
3938 return true;
3939 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3940 case NL80211_IFTYPE_AP_VLAN:
3941 case NL80211_IFTYPE_AP:
3942 if (!bssid)
3943 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3944
3945 if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
3946 /*
3947 * Accept public action frames even when the
3948 * BSSID doesn't match, this is used for P2P
3949 * and location updates. Note that mac80211
3950 * itself never looks at these frames.
3951 */
3952 if (!multicast &&
3953 !ether_addr_equal(sdata->vif.addr, hdr->addr1))
3954 return false;
3955 if (ieee80211_is_public_action(hdr, skb->len))
3956 return true;
3957 return ieee80211_is_beacon(hdr->frame_control);
3958 }
3959
3960 if (!ieee80211_has_tods(hdr->frame_control)) {
3961 /* ignore data frames to TDLS-peers */
3962 if (ieee80211_is_data(hdr->frame_control))
3963 return false;
3964 /* ignore action frames to TDLS-peers */
3965 if (ieee80211_is_action(hdr->frame_control) &&
3966 !is_broadcast_ether_addr(bssid) &&
3967 !ether_addr_equal(bssid, hdr->addr1))
3968 return false;
3969 }
3970
3971 /*
3972 * 802.11-2016 Table 9-26 says that for data frames, A1 must be
3973 * the BSSID - we've checked that already but may have accepted
3974 * the wildcard (ff:ff:ff:ff:ff:ff).
3975 *
3976 * It also says:
3977 * The BSSID of the Data frame is determined as follows:
3978 * a) If the STA is contained within an AP or is associated
3979 * with an AP, the BSSID is the address currently in use
3980 * by the STA contained in the AP.
3981 *
3982 * So we should not accept data frames with an address that's
3983 * multicast.
3984 *
3985 * Accepting it also opens a security problem because stations
3986 * could encrypt it with the GTK and inject traffic that way.
3987 */
3988 if (ieee80211_is_data(hdr->frame_control) && multicast)
3989 return false;
3990
3991 return true;
3992 case NL80211_IFTYPE_WDS:
3993 if (bssid || !ieee80211_is_data(hdr->frame_control))
3994 return false;
3995 return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2);
3996 case NL80211_IFTYPE_P2P_DEVICE:
3997 return ieee80211_is_public_action(hdr, skb->len) ||
3998 ieee80211_is_probe_req(hdr->frame_control) ||
3999 ieee80211_is_probe_resp(hdr->frame_control) ||
4000 ieee80211_is_beacon(hdr->frame_control);
4001 case NL80211_IFTYPE_NAN:
4002 /* Currently no frames on NAN interface are allowed */
4003 return false;
4004 default:
4005 break;
4006 }
4007
4008 WARN_ON_ONCE(1);
4009 return false;
4010 }
4011
ieee80211_check_fast_rx(struct sta_info * sta)4012 void ieee80211_check_fast_rx(struct sta_info *sta)
4013 {
4014 struct ieee80211_sub_if_data *sdata = sta->sdata;
4015 struct ieee80211_local *local = sdata->local;
4016 struct ieee80211_key *key;
4017 struct ieee80211_fast_rx fastrx = {
4018 .dev = sdata->dev,
4019 .vif_type = sdata->vif.type,
4020 .control_port_protocol = sdata->control_port_protocol,
4021 }, *old, *new = NULL;
4022 bool assign = false;
4023
4024 /* use sparse to check that we don't return without updating */
4025 __acquire(check_fast_rx);
4026
4027 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header));
4028 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN);
4029 ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header);
4030 ether_addr_copy(fastrx.vif_addr, sdata->vif.addr);
4031
4032 fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS);
4033
4034 /* fast-rx doesn't do reordering */
4035 if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
4036 !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER))
4037 goto clear;
4038
4039 switch (sdata->vif.type) {
4040 case NL80211_IFTYPE_STATION:
4041 if (sta->sta.tdls) {
4042 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
4043 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
4044 fastrx.expected_ds_bits = 0;
4045 } else {
4046 fastrx.sta_notify = sdata->u.mgd.probe_send_count > 0;
4047 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
4048 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3);
4049 fastrx.expected_ds_bits =
4050 cpu_to_le16(IEEE80211_FCTL_FROMDS);
4051 }
4052
4053 if (sdata->u.mgd.use_4addr && !sta->sta.tdls) {
4054 fastrx.expected_ds_bits |=
4055 cpu_to_le16(IEEE80211_FCTL_TODS);
4056 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
4057 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
4058 }
4059
4060 if (!sdata->u.mgd.powersave)
4061 break;
4062
4063 /* software powersave is a huge mess, avoid all of it */
4064 if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
4065 goto clear;
4066 if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
4067 !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
4068 goto clear;
4069 break;
4070 case NL80211_IFTYPE_AP_VLAN:
4071 case NL80211_IFTYPE_AP:
4072 /* parallel-rx requires this, at least with calls to
4073 * ieee80211_sta_ps_transition()
4074 */
4075 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
4076 goto clear;
4077 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
4078 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
4079 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS);
4080
4081 fastrx.internal_forward =
4082 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
4083 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
4084 !sdata->u.vlan.sta);
4085
4086 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
4087 sdata->u.vlan.sta) {
4088 fastrx.expected_ds_bits |=
4089 cpu_to_le16(IEEE80211_FCTL_FROMDS);
4090 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
4091 fastrx.internal_forward = 0;
4092 }
4093
4094 break;
4095 default:
4096 goto clear;
4097 }
4098
4099 if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
4100 goto clear;
4101
4102 rcu_read_lock();
4103 key = rcu_dereference(sta->ptk[sta->ptk_idx]);
4104 if (key) {
4105 switch (key->conf.cipher) {
4106 case WLAN_CIPHER_SUITE_TKIP:
4107 /* we don't want to deal with MMIC in fast-rx */
4108 goto clear_rcu;
4109 case WLAN_CIPHER_SUITE_CCMP:
4110 case WLAN_CIPHER_SUITE_CCMP_256:
4111 case WLAN_CIPHER_SUITE_GCMP:
4112 case WLAN_CIPHER_SUITE_GCMP_256:
4113 break;
4114 default:
4115 /* We also don't want to deal with
4116 * WEP or cipher scheme.
4117 */
4118 goto clear_rcu;
4119 }
4120
4121 fastrx.key = true;
4122 fastrx.icv_len = key->conf.icv_len;
4123 }
4124
4125 assign = true;
4126 clear_rcu:
4127 rcu_read_unlock();
4128 clear:
4129 __release(check_fast_rx);
4130
4131 if (assign)
4132 new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL);
4133
4134 spin_lock_bh(&sta->lock);
4135 old = rcu_dereference_protected(sta->fast_rx, true);
4136 rcu_assign_pointer(sta->fast_rx, new);
4137 spin_unlock_bh(&sta->lock);
4138
4139 if (old)
4140 kfree_rcu(old, rcu_head);
4141 }
4142
ieee80211_clear_fast_rx(struct sta_info * sta)4143 void ieee80211_clear_fast_rx(struct sta_info *sta)
4144 {
4145 struct ieee80211_fast_rx *old;
4146
4147 spin_lock_bh(&sta->lock);
4148 old = rcu_dereference_protected(sta->fast_rx, true);
4149 RCU_INIT_POINTER(sta->fast_rx, NULL);
4150 spin_unlock_bh(&sta->lock);
4151
4152 if (old)
4153 kfree_rcu(old, rcu_head);
4154 }
4155
__ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data * sdata)4156 void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
4157 {
4158 struct ieee80211_local *local = sdata->local;
4159 struct sta_info *sta;
4160
4161 lockdep_assert_held(&local->sta_mtx);
4162
4163 list_for_each_entry(sta, &local->sta_list, list) {
4164 if (sdata != sta->sdata &&
4165 (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
4166 continue;
4167 ieee80211_check_fast_rx(sta);
4168 }
4169 }
4170
ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data * sdata)4171 void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
4172 {
4173 struct ieee80211_local *local = sdata->local;
4174
4175 mutex_lock(&local->sta_mtx);
4176 __ieee80211_check_fast_rx_iface(sdata);
4177 mutex_unlock(&local->sta_mtx);
4178 }
4179
ieee80211_invoke_fast_rx(struct ieee80211_rx_data * rx,struct ieee80211_fast_rx * fast_rx)4180 static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
4181 struct ieee80211_fast_rx *fast_rx)
4182 {
4183 struct sk_buff *skb = rx->skb;
4184 struct ieee80211_hdr *hdr = (void *)skb->data;
4185 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4186 struct sta_info *sta = rx->sta;
4187 int orig_len = skb->len;
4188 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
4189 int snap_offs = hdrlen;
4190 struct {
4191 u8 snap[sizeof(rfc1042_header)];
4192 __be16 proto;
4193 } *payload __aligned(2);
4194 struct {
4195 u8 da[ETH_ALEN];
4196 u8 sa[ETH_ALEN];
4197 } addrs __aligned(2);
4198 struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
4199
4200 if (fast_rx->uses_rss)
4201 stats = this_cpu_ptr(sta->pcpu_rx_stats);
4202
4203 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
4204 * to a common data structure; drivers can implement that per queue
4205 * but we don't have that information in mac80211
4206 */
4207 if (!(status->flag & RX_FLAG_DUP_VALIDATED))
4208 return false;
4209
4210 #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED)
4211
4212 /* If using encryption, we also need to have:
4213 * - PN_VALIDATED: similar, but the implementation is tricky
4214 * - DECRYPTED: necessary for PN_VALIDATED
4215 */
4216 if (fast_rx->key &&
4217 (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS)
4218 return false;
4219
4220 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
4221 return false;
4222
4223 if (unlikely(ieee80211_is_frag(hdr)))
4224 return false;
4225
4226 /* Since our interface address cannot be multicast, this
4227 * implicitly also rejects multicast frames without the
4228 * explicit check.
4229 *
4230 * We shouldn't get any *data* frames not addressed to us
4231 * (AP mode will accept multicast *management* frames), but
4232 * punting here will make it go through the full checks in
4233 * ieee80211_accept_frame().
4234 */
4235 if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1))
4236 return false;
4237
4238 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
4239 IEEE80211_FCTL_TODS)) !=
4240 fast_rx->expected_ds_bits)
4241 return false;
4242
4243 /* assign the key to drop unencrypted frames (later)
4244 * and strip the IV/MIC if necessary
4245 */
4246 if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) {
4247 /* GCMP header length is the same */
4248 snap_offs += IEEE80211_CCMP_HDR_LEN;
4249 }
4250
4251 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) {
4252 if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
4253 goto drop;
4254
4255 payload = (void *)(skb->data + snap_offs);
4256
4257 if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr))
4258 return false;
4259
4260 /* Don't handle these here since they require special code.
4261 * Accept AARP and IPX even though they should come with a
4262 * bridge-tunnel header - but if we get them this way then
4263 * there's little point in discarding them.
4264 */
4265 if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) ||
4266 payload->proto == fast_rx->control_port_protocol))
4267 return false;
4268 }
4269
4270 /* after this point, don't punt to the slowpath! */
4271
4272 if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) &&
4273 pskb_trim(skb, skb->len - fast_rx->icv_len))
4274 goto drop;
4275
4276 if (unlikely(fast_rx->sta_notify)) {
4277 ieee80211_sta_rx_notify(rx->sdata, hdr);
4278 fast_rx->sta_notify = false;
4279 }
4280
4281 /* statistics part of ieee80211_rx_h_sta_process() */
4282 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
4283 stats->last_signal = status->signal;
4284 if (!fast_rx->uses_rss)
4285 ewma_signal_add(&sta->rx_stats_avg.signal,
4286 -status->signal);
4287 }
4288
4289 if (status->chains) {
4290 int i;
4291
4292 stats->chains = status->chains;
4293 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
4294 int signal = status->chain_signal[i];
4295
4296 if (!(status->chains & BIT(i)))
4297 continue;
4298
4299 stats->chain_signal_last[i] = signal;
4300 if (!fast_rx->uses_rss)
4301 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
4302 -signal);
4303 }
4304 }
4305 /* end of statistics */
4306
4307 if (rx->key && !ieee80211_has_protected(hdr->frame_control))
4308 goto drop;
4309
4310 if (status->rx_flags & IEEE80211_RX_AMSDU) {
4311 if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) !=
4312 RX_QUEUED)
4313 goto drop;
4314
4315 return true;
4316 }
4317
4318 stats->last_rx = jiffies;
4319 stats->last_rate = sta_stats_encode_rate(status);
4320
4321 stats->fragments++;
4322 stats->packets++;
4323
4324 /* do the header conversion - first grab the addresses */
4325 ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
4326 ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
4327 /* remove the SNAP but leave the ethertype */
4328 skb_pull(skb, snap_offs + sizeof(rfc1042_header));
4329 /* push the addresses in front */
4330 memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs));
4331
4332 skb->dev = fast_rx->dev;
4333
4334 ieee80211_rx_stats(fast_rx->dev, skb->len);
4335
4336 /* The seqno index has the same property as needed
4337 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
4338 * for non-QoS-data frames. Here we know it's a data
4339 * frame, so count MSDUs.
4340 */
4341 u64_stats_update_begin(&stats->syncp);
4342 stats->msdu[rx->seqno_idx]++;
4343 stats->bytes += orig_len;
4344 u64_stats_update_end(&stats->syncp);
4345
4346 if (fast_rx->internal_forward) {
4347 struct sk_buff *xmit_skb = NULL;
4348 if (is_multicast_ether_addr(addrs.da)) {
4349 xmit_skb = skb_copy(skb, GFP_ATOMIC);
4350 } else if (!ether_addr_equal(addrs.da, addrs.sa) &&
4351 sta_info_get(rx->sdata, addrs.da)) {
4352 xmit_skb = skb;
4353 skb = NULL;
4354 }
4355
4356 if (xmit_skb) {
4357 /*
4358 * Send to wireless media and increase priority by 256
4359 * to keep the received priority instead of
4360 * reclassifying the frame (see cfg80211_classify8021d).
4361 */
4362 xmit_skb->priority += 256;
4363 xmit_skb->protocol = htons(ETH_P_802_3);
4364 skb_reset_network_header(xmit_skb);
4365 skb_reset_mac_header(xmit_skb);
4366 dev_queue_xmit(xmit_skb);
4367 }
4368
4369 if (!skb)
4370 return true;
4371 }
4372
4373 /* deliver to local stack */
4374 #ifndef CONFIG_DRIVERS_HDF_XR829
4375 skb->protocol = eth_type_trans(skb, fast_rx->dev);
4376 #endif
4377 memset(skb->cb, 0, sizeof(skb->cb));
4378 if (rx->napi)
4379 napi_gro_receive(rx->napi, skb);
4380 else
4381 #ifndef CONFIG_DRIVERS_HDF_XR829
4382 netif_receive_skb(skb);
4383 #else
4384 wal_netif_receive_skb(skb);
4385 #endif
4386
4387 return true;
4388 drop:
4389 dev_kfree_skb(skb);
4390 stats->dropped++;
4391 return true;
4392 }
4393
4394 /*
4395 * This function returns whether or not the SKB
4396 * was destined for RX processing or not, which,
4397 * if consume is true, is equivalent to whether
4398 * or not the skb was consumed.
4399 */
ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data * rx,struct sk_buff * skb,bool consume)4400 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
4401 struct sk_buff *skb, bool consume)
4402 {
4403 struct ieee80211_local *local = rx->local;
4404 struct ieee80211_sub_if_data *sdata = rx->sdata;
4405
4406 rx->skb = skb;
4407
4408 /* See if we can do fast-rx; if we have to copy we already lost,
4409 * so punt in that case. We should never have to deliver a data
4410 * frame to multiple interfaces anyway.
4411 *
4412 * We skip the ieee80211_accept_frame() call and do the necessary
4413 * checking inside ieee80211_invoke_fast_rx().
4414 */
4415 if (consume && rx->sta) {
4416 struct ieee80211_fast_rx *fast_rx;
4417
4418 fast_rx = rcu_dereference(rx->sta->fast_rx);
4419 if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx))
4420 return true;
4421 }
4422
4423 if (!ieee80211_accept_frame(rx))
4424 return false;
4425
4426 if (!consume) {
4427 skb = skb_copy(skb, GFP_ATOMIC);
4428 if (!skb) {
4429 if (net_ratelimit())
4430 wiphy_debug(local->hw.wiphy,
4431 "failed to copy skb for %s\n",
4432 sdata->name);
4433 return true;
4434 }
4435
4436 rx->skb = skb;
4437 }
4438
4439 ieee80211_invoke_rx_handlers(rx);
4440 return true;
4441 }
4442
4443 /*
4444 * This is the actual Rx frames handler. as it belongs to Rx path it must
4445 * be called with rcu_read_lock protection.
4446 */
__ieee80211_rx_handle_packet(struct ieee80211_hw * hw,struct ieee80211_sta * pubsta,struct sk_buff * skb,struct napi_struct * napi)4447 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
4448 struct ieee80211_sta *pubsta,
4449 struct sk_buff *skb,
4450 struct napi_struct *napi)
4451 {
4452 struct ieee80211_local *local = hw_to_local(hw);
4453 struct ieee80211_sub_if_data *sdata;
4454 struct ieee80211_hdr *hdr;
4455 __le16 fc;
4456 struct ieee80211_rx_data rx;
4457 struct ieee80211_sub_if_data *prev;
4458 struct rhlist_head *tmp;
4459 int err = 0;
4460
4461 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
4462 memset(&rx, 0, sizeof(rx));
4463 rx.skb = skb;
4464 rx.local = local;
4465 rx.napi = napi;
4466
4467 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
4468 I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
4469
4470 if (ieee80211_is_mgmt(fc)) {
4471 /* drop frame if too short for header */
4472 if (skb->len < ieee80211_hdrlen(fc))
4473 err = -ENOBUFS;
4474 else
4475 err = skb_linearize(skb);
4476 } else {
4477 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
4478 }
4479
4480 if (err) {
4481 dev_kfree_skb(skb);
4482 return;
4483 }
4484
4485 hdr = (struct ieee80211_hdr *)skb->data;
4486 ieee80211_parse_qos(&rx);
4487 ieee80211_verify_alignment(&rx);
4488
4489 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
4490 ieee80211_is_beacon(hdr->frame_control)))
4491 ieee80211_scan_rx(local, skb);
4492
4493 if (ieee80211_is_data(fc)) {
4494 struct sta_info *sta, *prev_sta;
4495
4496 if (pubsta) {
4497 rx.sta = container_of(pubsta, struct sta_info, sta);
4498 rx.sdata = rx.sta->sdata;
4499 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4500 return;
4501 goto out;
4502 }
4503
4504 prev_sta = NULL;
4505
4506 for_each_sta_info(local, hdr->addr2, sta, tmp) {
4507 if (!prev_sta) {
4508 prev_sta = sta;
4509 continue;
4510 }
4511
4512 rx.sta = prev_sta;
4513 rx.sdata = prev_sta->sdata;
4514 ieee80211_prepare_and_rx_handle(&rx, skb, false);
4515
4516 prev_sta = sta;
4517 }
4518
4519 if (prev_sta) {
4520 rx.sta = prev_sta;
4521 rx.sdata = prev_sta->sdata;
4522
4523 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4524 return;
4525 goto out;
4526 }
4527 }
4528
4529 prev = NULL;
4530
4531 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
4532 if (!ieee80211_sdata_running(sdata))
4533 continue;
4534
4535 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
4536 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
4537 continue;
4538
4539 /*
4540 * frame is destined for this interface, but if it's
4541 * not also for the previous one we handle that after
4542 * the loop to avoid copying the SKB once too much
4543 */
4544
4545 if (!prev) {
4546 prev = sdata;
4547 continue;
4548 }
4549
4550 rx.sta = sta_info_get_bss(prev, hdr->addr2);
4551 rx.sdata = prev;
4552 ieee80211_prepare_and_rx_handle(&rx, skb, false);
4553
4554 prev = sdata;
4555 }
4556
4557 if (prev) {
4558 rx.sta = sta_info_get_bss(prev, hdr->addr2);
4559 rx.sdata = prev;
4560
4561 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4562 return;
4563 }
4564
4565 out:
4566 dev_kfree_skb(skb);
4567 }
4568
4569 /*
4570 * This is the receive path handler. It is called by a low level driver when an
4571 * 802.11 MPDU is received from the hardware.
4572 */
mac80211_rx_napi(struct ieee80211_hw * hw,struct ieee80211_sta * pubsta,struct sk_buff * skb,struct napi_struct * napi)4573 void mac80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
4574 struct sk_buff *skb, struct napi_struct *napi)
4575 {
4576 struct ieee80211_local *local = hw_to_local(hw);
4577 struct ieee80211_rate *rate = NULL;
4578 struct ieee80211_supported_band *sband;
4579 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4580
4581 WARN_ON_ONCE(softirq_count() == 0);
4582
4583 if (WARN_ON(status->band >= NUM_NL80211_BANDS))
4584 goto drop;
4585
4586 sband = local->hw.wiphy->bands[status->band];
4587 if (WARN_ON(!sband))
4588 goto drop;
4589
4590 /*
4591 * If we're suspending, it is possible although not too likely
4592 * that we'd be receiving frames after having already partially
4593 * quiesced the stack. We can't process such frames then since
4594 * that might, for example, cause stations to be added or other
4595 * driver callbacks be invoked.
4596 */
4597 if (unlikely(local->quiescing || local->suspended))
4598 goto drop;
4599
4600 /* We might be during a HW reconfig, prevent Rx for the same reason */
4601 if (unlikely(local->in_reconfig))
4602 goto drop;
4603
4604 /*
4605 * The same happens when we're not even started,
4606 * but that's worth a warning.
4607 */
4608 if (WARN_ON(!local->started))
4609 goto drop;
4610
4611 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
4612 /*
4613 * Validate the rate, unless a PLCP error means that
4614 * we probably can't have a valid rate here anyway.
4615 */
4616
4617 switch (status->encoding) {
4618 case RX_ENC_HT:
4619 /*
4620 * rate_idx is MCS index, which can be [0-76]
4621 * as documented on:
4622 *
4623 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
4624 *
4625 * Anything else would be some sort of driver or
4626 * hardware error. The driver should catch hardware
4627 * errors.
4628 */
4629 if (WARN(status->rate_idx > 76,
4630 "Rate marked as an HT rate but passed "
4631 "status->rate_idx is not "
4632 "an MCS index [0-76]: %d (0x%02x)\n",
4633 status->rate_idx,
4634 status->rate_idx))
4635 goto drop;
4636 break;
4637 case RX_ENC_VHT:
4638 if (WARN_ONCE(status->rate_idx > 9 ||
4639 !status->nss ||
4640 status->nss > 8,
4641 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n",
4642 status->rate_idx, status->nss))
4643 goto drop;
4644 break;
4645 case RX_ENC_HE:
4646 if (WARN_ONCE(status->rate_idx > 11 ||
4647 !status->nss ||
4648 status->nss > 8,
4649 "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n",
4650 status->rate_idx, status->nss))
4651 goto drop;
4652 break;
4653 default:
4654 WARN_ON_ONCE(1);
4655 /* fall through */
4656 case RX_ENC_LEGACY:
4657 if (WARN_ON(status->rate_idx >= sband->n_bitrates))
4658 goto drop;
4659 rate = &sband->bitrates[status->rate_idx];
4660 }
4661 }
4662
4663 status->rx_flags = 0;
4664
4665 /*
4666 * key references and virtual interfaces are protected using RCU
4667 * and this requires that we are in a read-side RCU section during
4668 * receive processing
4669 */
4670 rcu_read_lock();
4671
4672 /*
4673 * Frames with failed FCS/PLCP checksum are not returned,
4674 * all other frames are returned without radiotap header
4675 * if it was previously present.
4676 * Also, frames with less than 16 bytes are dropped.
4677 */
4678 skb = ieee80211_rx_monitor(local, skb, rate);
4679 if (!skb) {
4680 rcu_read_unlock();
4681 return;
4682 }
4683
4684 ieee80211_tpt_led_trig_rx(local,
4685 ((struct ieee80211_hdr *)skb->data)->frame_control,
4686 skb->len);
4687
4688 __ieee80211_rx_handle_packet(hw, pubsta, skb, napi);
4689
4690 rcu_read_unlock();
4691
4692 return;
4693 drop:
4694 kfree_skb(skb);
4695 }
4696
4697 /* This is a version of the rx handler that can be called from hard irq
4698 * context. Post the skb on the queue and schedule the tasklet */
mac80211_rx_irqsafe(struct ieee80211_hw * hw,struct sk_buff * skb)4699 void mac80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
4700 {
4701 struct ieee80211_local *local = hw_to_local(hw);
4702
4703 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
4704
4705 skb->pkt_type = IEEE80211_RX_MSG;
4706 skb_queue_tail(&local->skb_queue, skb);
4707 tasklet_schedule(&local->tasklet);
4708 }
4709