blob: 5e2b4a41acf1490db160481c7366bd175b6a0900 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
6 * Copyright 2013-2014 Intel Mobile Communications GmbH
7 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
8 * Copyright (C) 2018 Intel Corporation
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/jiffies.h>
16#include <linux/slab.h>
17#include <linux/kernel.h>
18#include <linux/skbuff.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/rcupdate.h>
22#include <linux/export.h>
23#include <linux/bitops.h>
24#include <net/mac80211.h>
25#include <net/ieee80211_radiotap.h>
26#include <asm/unaligned.h>
27
28#include "ieee80211_i.h"
29#include "driver-ops.h"
30#include "led.h"
31#include "mesh.h"
32#include "wep.h"
33#include "wpa.h"
34#include "tkip.h"
35#include "wme.h"
36#include "rate.h"
37
38static inline void ieee80211_rx_stats(struct net_device *dev, u32 len)
39{
40 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
41
42 u64_stats_update_begin(&tstats->syncp);
43 tstats->rx_packets++;
44 tstats->rx_bytes += len;
45 u64_stats_update_end(&tstats->syncp);
46}
47
48static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
49 enum nl80211_iftype type)
50{
51 __le16 fc = hdr->frame_control;
52
53 if (ieee80211_is_data(fc)) {
54 if (len < 24) /* drop incorrect hdr len (data) */
55 return NULL;
56
57 if (ieee80211_has_a4(fc))
58 return NULL;
59 if (ieee80211_has_tods(fc))
60 return hdr->addr1;
61 if (ieee80211_has_fromds(fc))
62 return hdr->addr2;
63
64 return hdr->addr3;
65 }
66
67 if (ieee80211_is_mgmt(fc)) {
68 if (len < 24) /* drop incorrect hdr len (mgmt) */
69 return NULL;
70 return hdr->addr3;
71 }
72
73 if (ieee80211_is_ctl(fc)) {
74 if (ieee80211_is_pspoll(fc))
75 return hdr->addr1;
76
77 if (ieee80211_is_back_req(fc)) {
78 switch (type) {
79 case NL80211_IFTYPE_STATION:
80 return hdr->addr2;
81 case NL80211_IFTYPE_AP:
82 case NL80211_IFTYPE_AP_VLAN:
83 return hdr->addr1;
84 default:
85 break; /* fall through to the return */
86 }
87 }
88 }
89
90 return NULL;
91}
92
93/*
94 * monitor mode reception
95 *
96 * This function cleans up the SKB, i.e. it removes all the stuff
97 * only useful for monitoring.
98 */
99static void remove_monitor_info(struct sk_buff *skb,
100 unsigned int present_fcs_len,
101 unsigned int rtap_space)
102{
103 if (present_fcs_len)
104 __pskb_trim(skb, skb->len - present_fcs_len);
105 __pskb_pull(skb, rtap_space);
106}
107
108static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
109 unsigned int rtap_space)
110{
111 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
112 struct ieee80211_hdr *hdr;
113
114 hdr = (void *)(skb->data + rtap_space);
115
116 if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
117 RX_FLAG_FAILED_PLCP_CRC |
118 RX_FLAG_ONLY_MONITOR))
119 return true;
120
121 if (unlikely(skb->len < 16 + present_fcs_len + rtap_space))
122 return true;
123
124 if (ieee80211_is_ctl(hdr->frame_control) &&
125 !ieee80211_is_pspoll(hdr->frame_control) &&
126 !ieee80211_is_back_req(hdr->frame_control))
127 return true;
128
129 return false;
130}
131
132static int
133ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
134 struct ieee80211_rx_status *status,
135 struct sk_buff *skb)
136{
137 int len;
138
139 /* always present fields */
140 len = sizeof(struct ieee80211_radiotap_header) + 8;
141
142 /* allocate extra bitmaps */
143 if (status->chains)
144 len += 4 * hweight8(status->chains);
145
146 if (ieee80211_have_rx_timestamp(status)) {
147 len = ALIGN(len, 8);
148 len += 8;
149 }
150 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
151 len += 1;
152
153 /* antenna field, if we don't have per-chain info */
154 if (!status->chains)
155 len += 1;
156
157 /* padding for RX_FLAGS if necessary */
158 len = ALIGN(len, 2);
159
160 if (status->encoding == RX_ENC_HT) /* HT info */
161 len += 3;
162
163 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
164 len = ALIGN(len, 4);
165 len += 8;
166 }
167
168 if (status->encoding == RX_ENC_VHT) {
169 len = ALIGN(len, 2);
170 len += 12;
171 }
172
173 if (local->hw.radiotap_timestamp.units_pos >= 0) {
174 len = ALIGN(len, 8);
175 len += 12;
176 }
177
178 if (status->encoding == RX_ENC_HE &&
179 status->flag & RX_FLAG_RADIOTAP_HE) {
180 len = ALIGN(len, 2);
181 len += 12;
182 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12);
183 }
184
185 if (status->encoding == RX_ENC_HE &&
186 status->flag & RX_FLAG_RADIOTAP_HE_MU) {
187 len = ALIGN(len, 2);
188 len += 12;
189 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12);
190 }
191
192 if (status->chains) {
193 /* antenna and antenna signal fields */
194 len += 2 * hweight8(status->chains);
195 }
196
197 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
198 struct ieee80211_vendor_radiotap *rtap = (void *)skb->data;
199
200 /* vendor presence bitmap */
201 len += 4;
202 /* alignment for fixed 6-byte vendor data header */
203 len = ALIGN(len, 2);
204 /* vendor data header */
205 len += 6;
206 if (WARN_ON(rtap->align == 0))
207 rtap->align = 1;
208 len = ALIGN(len, rtap->align);
209 len += rtap->len + rtap->pad;
210 }
211
212 return len;
213}
214
215static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
216 struct sk_buff *skb,
217 int rtap_space)
218{
219 struct {
220 struct ieee80211_hdr_3addr hdr;
221 u8 category;
222 u8 action_code;
223 } __packed action;
224
225 if (!sdata)
226 return;
227
228 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
229
230 if (skb->len < rtap_space + sizeof(action) +
231 VHT_MUMIMO_GROUPS_DATA_LEN)
232 return;
233
234 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
235 return;
236
237 skb_copy_bits(skb, rtap_space, &action, sizeof(action));
238
239 if (!ieee80211_is_action(action.hdr.frame_control))
240 return;
241
242 if (action.category != WLAN_CATEGORY_VHT)
243 return;
244
245 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
246 return;
247
248 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
249 return;
250
251 skb = skb_copy(skb, GFP_ATOMIC);
252 if (!skb)
253 return;
254
255 skb_queue_tail(&sdata->skb_queue, skb);
256 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
257}
258
259/*
260 * ieee80211_add_rx_radiotap_header - add radiotap header
261 *
262 * add a radiotap header containing all the fields which the hardware provided.
263 */
264static void
265ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
266 struct sk_buff *skb,
267 struct ieee80211_rate *rate,
268 int rtap_len, bool has_fcs)
269{
270 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
271 struct ieee80211_radiotap_header *rthdr;
272 unsigned char *pos;
273 __le32 *it_present;
274 u32 it_present_val;
275 u16 rx_flags = 0;
276 u16 channel_flags = 0;
277 int mpdulen, chain;
278 unsigned long chains = status->chains;
279 struct ieee80211_vendor_radiotap rtap = {};
280 struct ieee80211_radiotap_he he = {};
281 struct ieee80211_radiotap_he_mu he_mu = {};
282
283 if (status->flag & RX_FLAG_RADIOTAP_HE) {
284 he = *(struct ieee80211_radiotap_he *)skb->data;
285 skb_pull(skb, sizeof(he));
286 WARN_ON_ONCE(status->encoding != RX_ENC_HE);
287 }
288
289 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) {
290 he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data;
291 skb_pull(skb, sizeof(he_mu));
292 }
293
294 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
295 rtap = *(struct ieee80211_vendor_radiotap *)skb->data;
296 /* rtap.len and rtap.pad are undone immediately */
297 skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad);
298 }
299
300 mpdulen = skb->len;
301 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)))
302 mpdulen += FCS_LEN;
303
304 rthdr = skb_push(skb, rtap_len);
305 memset(rthdr, 0, rtap_len - rtap.len - rtap.pad);
306 it_present = &rthdr->it_present;
307
308 /* radiotap header, set always present flags */
309 rthdr->it_len = cpu_to_le16(rtap_len);
310 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
311 BIT(IEEE80211_RADIOTAP_CHANNEL) |
312 BIT(IEEE80211_RADIOTAP_RX_FLAGS);
313
314 if (!status->chains)
315 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
316
317 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
318 it_present_val |=
319 BIT(IEEE80211_RADIOTAP_EXT) |
320 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
321 put_unaligned_le32(it_present_val, it_present);
322 it_present++;
323 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
324 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
325 }
326
327 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
328 it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
329 BIT(IEEE80211_RADIOTAP_EXT);
330 put_unaligned_le32(it_present_val, it_present);
331 it_present++;
332 it_present_val = rtap.present;
333 }
334
335 put_unaligned_le32(it_present_val, it_present);
336
337 pos = (void *)(it_present + 1);
338
339 /* the order of the following fields is important */
340
341 /* IEEE80211_RADIOTAP_TSFT */
342 if (ieee80211_have_rx_timestamp(status)) {
343 /* padding */
344 while ((pos - (u8 *)rthdr) & 7)
345 *pos++ = 0;
346 put_unaligned_le64(
347 ieee80211_calculate_rx_timestamp(local, status,
348 mpdulen, 0),
349 pos);
350 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
351 pos += 8;
352 }
353
354 /* IEEE80211_RADIOTAP_FLAGS */
355 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
356 *pos |= IEEE80211_RADIOTAP_F_FCS;
357 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
358 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
359 if (status->enc_flags & RX_ENC_FLAG_SHORTPRE)
360 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
361 pos++;
362
363 /* IEEE80211_RADIOTAP_RATE */
364 if (!rate || status->encoding != RX_ENC_LEGACY) {
365 /*
366 * Without rate information don't add it. If we have,
367 * MCS information is a separate field in radiotap,
368 * added below. The byte here is needed as padding
369 * for the channel though, so initialise it to 0.
370 */
371 *pos = 0;
372 } else {
373 int shift = 0;
374 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
375 if (status->bw == RATE_INFO_BW_10)
376 shift = 1;
377 else if (status->bw == RATE_INFO_BW_5)
378 shift = 2;
379 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
380 }
381 pos++;
382
383 /* IEEE80211_RADIOTAP_CHANNEL */
384 put_unaligned_le16(status->freq, pos);
385 pos += 2;
386 if (status->bw == RATE_INFO_BW_10)
387 channel_flags |= IEEE80211_CHAN_HALF;
388 else if (status->bw == RATE_INFO_BW_5)
389 channel_flags |= IEEE80211_CHAN_QUARTER;
390
391 if (status->band == NL80211_BAND_5GHZ)
392 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
393 else if (status->encoding != RX_ENC_LEGACY)
394 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
395 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
396 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
397 else if (rate)
398 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
399 else
400 channel_flags |= IEEE80211_CHAN_2GHZ;
401 put_unaligned_le16(channel_flags, pos);
402 pos += 2;
403
404 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
405 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) &&
406 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
407 *pos = status->signal;
408 rthdr->it_present |=
409 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
410 pos++;
411 }
412
413 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
414
415 if (!status->chains) {
416 /* IEEE80211_RADIOTAP_ANTENNA */
417 *pos = status->antenna;
418 pos++;
419 }
420
421 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
422
423 /* IEEE80211_RADIOTAP_RX_FLAGS */
424 /* ensure 2 byte alignment for the 2 byte field as required */
425 if ((pos - (u8 *)rthdr) & 1)
426 *pos++ = 0;
427 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
428 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
429 put_unaligned_le16(rx_flags, pos);
430 pos += 2;
431
432 if (status->encoding == RX_ENC_HT) {
433 unsigned int stbc;
434
435 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
436 *pos++ = local->hw.radiotap_mcs_details;
437 *pos = 0;
438 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
439 *pos |= IEEE80211_RADIOTAP_MCS_SGI;
440 if (status->bw == RATE_INFO_BW_40)
441 *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
442 if (status->enc_flags & RX_ENC_FLAG_HT_GF)
443 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
444 if (status->enc_flags & RX_ENC_FLAG_LDPC)
445 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
446 stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT;
447 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
448 pos++;
449 *pos++ = status->rate_idx;
450 }
451
452 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
453 u16 flags = 0;
454
455 /* ensure 4 byte alignment */
456 while ((pos - (u8 *)rthdr) & 3)
457 pos++;
458 rthdr->it_present |=
459 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
460 put_unaligned_le32(status->ampdu_reference, pos);
461 pos += 4;
462 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
463 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
464 if (status->flag & RX_FLAG_AMPDU_IS_LAST)
465 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
466 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
467 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
468 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
469 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
470 if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN)
471 flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN;
472 if (status->flag & RX_FLAG_AMPDU_EOF_BIT)
473 flags |= IEEE80211_RADIOTAP_AMPDU_EOF;
474 put_unaligned_le16(flags, pos);
475 pos += 2;
476 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
477 *pos++ = status->ampdu_delimiter_crc;
478 else
479 *pos++ = 0;
480 *pos++ = 0;
481 }
482
483 if (status->encoding == RX_ENC_VHT) {
484 u16 known = local->hw.radiotap_vht_details;
485
486 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
487 put_unaligned_le16(known, pos);
488 pos += 2;
489 /* flags */
490 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
491 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
492 /* in VHT, STBC is binary */
493 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK)
494 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
495 if (status->enc_flags & RX_ENC_FLAG_BF)
496 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED;
497 pos++;
498 /* bandwidth */
499 switch (status->bw) {
500 case RATE_INFO_BW_80:
501 *pos++ = 4;
502 break;
503 case RATE_INFO_BW_160:
504 *pos++ = 11;
505 break;
506 case RATE_INFO_BW_40:
507 *pos++ = 1;
508 break;
509 default:
510 *pos++ = 0;
511 }
512 /* MCS/NSS */
513 *pos = (status->rate_idx << 4) | status->nss;
514 pos += 4;
515 /* coding field */
516 if (status->enc_flags & RX_ENC_FLAG_LDPC)
517 *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
518 pos++;
519 /* group ID */
520 pos++;
521 /* partial_aid */
522 pos += 2;
523 }
524
525 if (local->hw.radiotap_timestamp.units_pos >= 0) {
526 u16 accuracy = 0;
527 u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT;
528
529 rthdr->it_present |=
530 cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP);
531
532 /* ensure 8 byte alignment */
533 while ((pos - (u8 *)rthdr) & 7)
534 pos++;
535
536 put_unaligned_le64(status->device_timestamp, pos);
537 pos += sizeof(u64);
538
539 if (local->hw.radiotap_timestamp.accuracy >= 0) {
540 accuracy = local->hw.radiotap_timestamp.accuracy;
541 flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY;
542 }
543 put_unaligned_le16(accuracy, pos);
544 pos += sizeof(u16);
545
546 *pos++ = local->hw.radiotap_timestamp.units_pos;
547 *pos++ = flags;
548 }
549
550 if (status->encoding == RX_ENC_HE &&
551 status->flag & RX_FLAG_RADIOTAP_HE) {
552#define HE_PREP(f, val) cpu_to_le16(FIELD_PREP(IEEE80211_RADIOTAP_HE_##f, val))
553
554 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) {
555 he.data6 |= HE_PREP(DATA6_NSTS,
556 FIELD_GET(RX_ENC_FLAG_STBC_MASK,
557 status->enc_flags));
558 he.data3 |= HE_PREP(DATA3_STBC, 1);
559 } else {
560 he.data6 |= HE_PREP(DATA6_NSTS, status->nss);
561 }
562
563#define CHECK_GI(s) \
564 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \
565 (int)NL80211_RATE_INFO_HE_GI_##s)
566
567 CHECK_GI(0_8);
568 CHECK_GI(1_6);
569 CHECK_GI(3_2);
570
571 he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx);
572 he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm);
573 he.data3 |= HE_PREP(DATA3_CODING,
574 !!(status->enc_flags & RX_ENC_FLAG_LDPC));
575
576 he.data5 |= HE_PREP(DATA5_GI, status->he_gi);
577
578 switch (status->bw) {
579 case RATE_INFO_BW_20:
580 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
581 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ);
582 break;
583 case RATE_INFO_BW_40:
584 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
585 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ);
586 break;
587 case RATE_INFO_BW_80:
588 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
589 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ);
590 break;
591 case RATE_INFO_BW_160:
592 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
593 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ);
594 break;
595 case RATE_INFO_BW_HE_RU:
596#define CHECK_RU_ALLOC(s) \
597 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \
598 NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4)
599
600 CHECK_RU_ALLOC(26);
601 CHECK_RU_ALLOC(52);
602 CHECK_RU_ALLOC(106);
603 CHECK_RU_ALLOC(242);
604 CHECK_RU_ALLOC(484);
605 CHECK_RU_ALLOC(996);
606 CHECK_RU_ALLOC(2x996);
607
608 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
609 status->he_ru + 4);
610 break;
611 default:
612 WARN_ONCE(1, "Invalid SU BW %d\n", status->bw);
613 }
614
615 /* ensure 2 byte alignment */
616 while ((pos - (u8 *)rthdr) & 1)
617 pos++;
618 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE);
619 memcpy(pos, &he, sizeof(he));
620 pos += sizeof(he);
621 }
622
623 if (status->encoding == RX_ENC_HE &&
624 status->flag & RX_FLAG_RADIOTAP_HE_MU) {
625 /* ensure 2 byte alignment */
626 while ((pos - (u8 *)rthdr) & 1)
627 pos++;
628 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU);
629 memcpy(pos, &he_mu, sizeof(he_mu));
630 pos += sizeof(he_mu);
631 }
632
633 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
634 *pos++ = status->chain_signal[chain];
635 *pos++ = chain;
636 }
637
638 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
639 /* ensure 2 byte alignment for the vendor field as required */
640 if ((pos - (u8 *)rthdr) & 1)
641 *pos++ = 0;
642 *pos++ = rtap.oui[0];
643 *pos++ = rtap.oui[1];
644 *pos++ = rtap.oui[2];
645 *pos++ = rtap.subns;
646 put_unaligned_le16(rtap.len, pos);
647 pos += 2;
648 /* align the actual payload as requested */
649 while ((pos - (u8 *)rthdr) & (rtap.align - 1))
650 *pos++ = 0;
651 /* data (and possible padding) already follows */
652 }
653}
654
655static struct sk_buff *
656ieee80211_make_monitor_skb(struct ieee80211_local *local,
657 struct sk_buff **origskb,
658 struct ieee80211_rate *rate,
659 int rtap_space, bool use_origskb)
660{
661 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb);
662 int rt_hdrlen, needed_headroom;
663 struct sk_buff *skb;
664
665 /* room for the radiotap header based on driver features */
666 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb);
667 needed_headroom = rt_hdrlen - rtap_space;
668
669 if (use_origskb) {
670 /* only need to expand headroom if necessary */
671 skb = *origskb;
672 *origskb = NULL;
673
674 /*
675 * This shouldn't trigger often because most devices have an
676 * RX header they pull before we get here, and that should
677 * be big enough for our radiotap information. We should
678 * probably export the length to drivers so that we can have
679 * them allocate enough headroom to start with.
680 */
681 if (skb_headroom(skb) < needed_headroom &&
682 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
683 dev_kfree_skb(skb);
684 return NULL;
685 }
686 } else {
687 /*
688 * Need to make a copy and possibly remove radiotap header
689 * and FCS from the original.
690 */
691 skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC);
692
693 if (!skb)
694 return NULL;
695 }
696
697 /* prepend radiotap information */
698 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true);
699
700 skb_reset_mac_header(skb);
701 skb->ip_summed = CHECKSUM_UNNECESSARY;
702 skb->pkt_type = PACKET_OTHERHOST;
703 skb->protocol = htons(ETH_P_802_2);
704
705 return skb;
706}
707
708/*
709 * This function copies a received frame to all monitor interfaces and
710 * returns a cleaned-up SKB that no longer includes the FCS nor the
711 * radiotap header the driver might have added.
712 */
713static struct sk_buff *
714ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
715 struct ieee80211_rate *rate)
716{
717 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
718 struct ieee80211_sub_if_data *sdata;
719 struct sk_buff *monskb = NULL;
720 int present_fcs_len = 0;
721 unsigned int rtap_space = 0;
722 struct ieee80211_sub_if_data *monitor_sdata =
723 rcu_dereference(local->monitor_sdata);
724 bool only_monitor = false;
725
726 if (status->flag & RX_FLAG_RADIOTAP_HE)
727 rtap_space += sizeof(struct ieee80211_radiotap_he);
728
729 if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
730 rtap_space += sizeof(struct ieee80211_radiotap_he_mu);
731
732 if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) {
733 struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data;
734
735 rtap_space += sizeof(*rtap) + rtap->len + rtap->pad;
736 }
737
738 /*
739 * First, we may need to make a copy of the skb because
740 * (1) we need to modify it for radiotap (if not present), and
741 * (2) the other RX handlers will modify the skb we got.
742 *
743 * We don't need to, of course, if we aren't going to return
744 * the SKB because it has a bad FCS/PLCP checksum.
745 */
746
747 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
748 if (unlikely(origskb->len <= FCS_LEN)) {
749 /* driver bug */
750 WARN_ON(1);
751 dev_kfree_skb(origskb);
752 return NULL;
753 }
754 present_fcs_len = FCS_LEN;
755 }
756
757 /* ensure hdr->frame_control and vendor radiotap data are in skb head */
758 if (!pskb_may_pull(origskb, 2 + rtap_space)) {
759 dev_kfree_skb(origskb);
760 return NULL;
761 }
762
763 only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space);
764
765 if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
766 if (only_monitor) {
767 dev_kfree_skb(origskb);
768 return NULL;
769 }
770
771 remove_monitor_info(origskb, present_fcs_len, rtap_space);
772 return origskb;
773 }
774
775 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space);
776
777 list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) {
778 bool last_monitor = list_is_last(&sdata->u.mntr.list,
779 &local->mon_list);
780
781 if (!monskb)
782 monskb = ieee80211_make_monitor_skb(local, &origskb,
783 rate, rtap_space,
784 only_monitor &&
785 last_monitor);
786
787 if (monskb) {
788 struct sk_buff *skb;
789
790 if (last_monitor) {
791 skb = monskb;
792 monskb = NULL;
793 } else {
794 skb = skb_clone(monskb, GFP_ATOMIC);
795 }
796
797 if (skb) {
798 skb->dev = sdata->dev;
799 ieee80211_rx_stats(skb->dev, skb->len);
800 netif_receive_skb(skb);
801 }
802 }
803
804 if (last_monitor)
805 break;
806 }
807
808 /* this happens if last_monitor was erroneously false */
809 dev_kfree_skb(monskb);
810
811 /* ditto */
812 if (!origskb)
813 return NULL;
814
815 remove_monitor_info(origskb, present_fcs_len, rtap_space);
816 return origskb;
817}
818
819static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
820{
821 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
822 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
823 int tid, seqno_idx, security_idx;
824
825 /* does the frame have a qos control field? */
826 if (ieee80211_is_data_qos(hdr->frame_control)) {
827 u8 *qc = ieee80211_get_qos_ctl(hdr);
828 /* frame has qos control */
829 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
830 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
831 status->rx_flags |= IEEE80211_RX_AMSDU;
832
833 seqno_idx = tid;
834 security_idx = tid;
835 } else {
836 /*
837 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
838 *
839 * Sequence numbers for management frames, QoS data
840 * frames with a broadcast/multicast address in the
841 * Address 1 field, and all non-QoS data frames sent
842 * by QoS STAs are assigned using an additional single
843 * modulo-4096 counter, [...]
844 *
845 * We also use that counter for non-QoS STAs.
846 */
847 seqno_idx = IEEE80211_NUM_TIDS;
848 security_idx = 0;
849 if (ieee80211_is_mgmt(hdr->frame_control))
850 security_idx = IEEE80211_NUM_TIDS;
851 tid = 0;
852 }
853
854 rx->seqno_idx = seqno_idx;
855 rx->security_idx = security_idx;
856 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
857 * For now, set skb->priority to 0 for other cases. */
858 rx->skb->priority = (tid > 7) ? 0 : tid;
859}
860
861/**
862 * DOC: Packet alignment
863 *
864 * Drivers always need to pass packets that are aligned to two-byte boundaries
865 * to the stack.
866 *
867 * Additionally, should, if possible, align the payload data in a way that
868 * guarantees that the contained IP header is aligned to a four-byte
869 * boundary. In the case of regular frames, this simply means aligning the
870 * payload to a four-byte boundary (because either the IP header is directly
871 * contained, or IV/RFC1042 headers that have a length divisible by four are
872 * in front of it). If the payload data is not properly aligned and the
873 * architecture doesn't support efficient unaligned operations, mac80211
874 * will align the data.
875 *
876 * With A-MSDU frames, however, the payload data address must yield two modulo
877 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
878 * push the IP header further back to a multiple of four again. Thankfully, the
879 * specs were sane enough this time around to require padding each A-MSDU
880 * subframe to a length that is a multiple of four.
881 *
882 * Padding like Atheros hardware adds which is between the 802.11 header and
883 * the payload is not supported, the driver is required to move the 802.11
884 * header to be directly in front of the payload in that case.
885 */
886static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
887{
888#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
889 WARN_ON_ONCE((unsigned long)rx->skb->data & 1);
890#endif
891}
892
893
894/* rx handlers */
895
896static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
897{
898 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
899
900 if (is_multicast_ether_addr(hdr->addr1))
901 return 0;
902
903 return ieee80211_is_robust_mgmt_frame(skb);
904}
905
906
907static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
908{
909 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
910
911 if (!is_multicast_ether_addr(hdr->addr1))
912 return 0;
913
914 return ieee80211_is_robust_mgmt_frame(skb);
915}
916
917
918/* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
919static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
920{
921 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
922 struct ieee80211_mmie *mmie;
923 struct ieee80211_mmie_16 *mmie16;
924
925 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
926 return -1;
927
928 if (!ieee80211_is_robust_mgmt_frame(skb))
929 return -1; /* not a robust management frame */
930
931 mmie = (struct ieee80211_mmie *)
932 (skb->data + skb->len - sizeof(*mmie));
933 if (mmie->element_id == WLAN_EID_MMIE &&
934 mmie->length == sizeof(*mmie) - 2)
935 return le16_to_cpu(mmie->key_id);
936
937 mmie16 = (struct ieee80211_mmie_16 *)
938 (skb->data + skb->len - sizeof(*mmie16));
939 if (skb->len >= 24 + sizeof(*mmie16) &&
940 mmie16->element_id == WLAN_EID_MMIE &&
941 mmie16->length == sizeof(*mmie16) - 2)
942 return le16_to_cpu(mmie16->key_id);
943
944 return -1;
945}
946
947static int ieee80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs,
948 struct sk_buff *skb)
949{
950 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
951 __le16 fc;
952 int hdrlen;
953 u8 keyid;
954
955 fc = hdr->frame_control;
956 hdrlen = ieee80211_hdrlen(fc);
957
958 if (skb->len < hdrlen + cs->hdr_len)
959 return -EINVAL;
960
961 skb_copy_bits(skb, hdrlen + cs->key_idx_off, &keyid, 1);
962 keyid &= cs->key_idx_mask;
963 keyid >>= cs->key_idx_shift;
964
965 return keyid;
966}
967
968static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
969{
970 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
971 char *dev_addr = rx->sdata->vif.addr;
972
973 if (ieee80211_is_data(hdr->frame_control)) {
974 if (is_multicast_ether_addr(hdr->addr1)) {
975 if (ieee80211_has_tods(hdr->frame_control) ||
976 !ieee80211_has_fromds(hdr->frame_control))
977 return RX_DROP_MONITOR;
978 if (ether_addr_equal(hdr->addr3, dev_addr))
979 return RX_DROP_MONITOR;
980 } else {
981 if (!ieee80211_has_a4(hdr->frame_control))
982 return RX_DROP_MONITOR;
983 if (ether_addr_equal(hdr->addr4, dev_addr))
984 return RX_DROP_MONITOR;
985 }
986 }
987
988 /* If there is not an established peer link and this is not a peer link
989 * establisment frame, beacon or probe, drop the frame.
990 */
991
992 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
993 struct ieee80211_mgmt *mgmt;
994
995 if (!ieee80211_is_mgmt(hdr->frame_control))
996 return RX_DROP_MONITOR;
997
998 if (ieee80211_is_action(hdr->frame_control)) {
999 u8 category;
1000
1001 /* make sure category field is present */
1002 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
1003 return RX_DROP_MONITOR;
1004
1005 mgmt = (struct ieee80211_mgmt *)hdr;
1006 category = mgmt->u.action.category;
1007 if (category != WLAN_CATEGORY_MESH_ACTION &&
1008 category != WLAN_CATEGORY_SELF_PROTECTED)
1009 return RX_DROP_MONITOR;
1010 return RX_CONTINUE;
1011 }
1012
1013 if (ieee80211_is_probe_req(hdr->frame_control) ||
1014 ieee80211_is_probe_resp(hdr->frame_control) ||
1015 ieee80211_is_beacon(hdr->frame_control) ||
1016 ieee80211_is_auth(hdr->frame_control))
1017 return RX_CONTINUE;
1018
1019 return RX_DROP_MONITOR;
1020 }
1021
1022 return RX_CONTINUE;
1023}
1024
1025static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
1026 int index)
1027{
1028 struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index];
1029 struct sk_buff *tail = skb_peek_tail(frames);
1030 struct ieee80211_rx_status *status;
1031
1032 if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
1033 return true;
1034
1035 if (!tail)
1036 return false;
1037
1038 status = IEEE80211_SKB_RXCB(tail);
1039 if (status->flag & RX_FLAG_AMSDU_MORE)
1040 return false;
1041
1042 return true;
1043}
1044
1045static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
1046 struct tid_ampdu_rx *tid_agg_rx,
1047 int index,
1048 struct sk_buff_head *frames)
1049{
1050 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index];
1051 struct sk_buff *skb;
1052 struct ieee80211_rx_status *status;
1053
1054 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1055
1056 if (skb_queue_empty(skb_list))
1057 goto no_frame;
1058
1059 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1060 __skb_queue_purge(skb_list);
1061 goto no_frame;
1062 }
1063
1064 /* release frames from the reorder ring buffer */
1065 tid_agg_rx->stored_mpdu_num--;
1066 while ((skb = __skb_dequeue(skb_list))) {
1067 status = IEEE80211_SKB_RXCB(skb);
1068 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
1069 __skb_queue_tail(frames, skb);
1070 }
1071
1072no_frame:
1073 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
1074 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1075}
1076
1077static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
1078 struct tid_ampdu_rx *tid_agg_rx,
1079 u16 head_seq_num,
1080 struct sk_buff_head *frames)
1081{
1082 int index;
1083
1084 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1085
1086 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) {
1087 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1088 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
1089 frames);
1090 }
1091}
1092
1093/*
1094 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
1095 * the skb was added to the buffer longer than this time ago, the earlier
1096 * frames that have not yet been received are assumed to be lost and the skb
1097 * can be released for processing. This may also release other skb's from the
1098 * reorder buffer if there are no additional gaps between the frames.
1099 *
1100 * Callers must hold tid_agg_rx->reorder_lock.
1101 */
1102#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
1103
1104static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
1105 struct tid_ampdu_rx *tid_agg_rx,
1106 struct sk_buff_head *frames)
1107{
1108 int index, i, j;
1109
1110 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1111
1112 /* release the buffer until next missing frame */
1113 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1114 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) &&
1115 tid_agg_rx->stored_mpdu_num) {
1116 /*
1117 * No buffers ready to be released, but check whether any
1118 * frames in the reorder buffer have timed out.
1119 */
1120 int skipped = 1;
1121 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
1122 j = (j + 1) % tid_agg_rx->buf_size) {
1123 if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) {
1124 skipped++;
1125 continue;
1126 }
1127 if (skipped &&
1128 !time_after(jiffies, tid_agg_rx->reorder_time[j] +
1129 HT_RX_REORDER_BUF_TIMEOUT))
1130 goto set_release_timer;
1131
1132 /* don't leave incomplete A-MSDUs around */
1133 for (i = (index + 1) % tid_agg_rx->buf_size; i != j;
1134 i = (i + 1) % tid_agg_rx->buf_size)
1135 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]);
1136
1137 ht_dbg_ratelimited(sdata,
1138 "release an RX reorder frame due to timeout on earlier frames\n");
1139 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
1140 frames);
1141
1142 /*
1143 * Increment the head seq# also for the skipped slots.
1144 */
1145 tid_agg_rx->head_seq_num =
1146 (tid_agg_rx->head_seq_num +
1147 skipped) & IEEE80211_SN_MASK;
1148 skipped = 0;
1149 }
1150 } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1151 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
1152 frames);
1153 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1154 }
1155
1156 if (tid_agg_rx->stored_mpdu_num) {
1157 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1158
1159 for (; j != (index - 1) % tid_agg_rx->buf_size;
1160 j = (j + 1) % tid_agg_rx->buf_size) {
1161 if (ieee80211_rx_reorder_ready(tid_agg_rx, j))
1162 break;
1163 }
1164
1165 set_release_timer:
1166
1167 if (!tid_agg_rx->removed)
1168 mod_timer(&tid_agg_rx->reorder_timer,
1169 tid_agg_rx->reorder_time[j] + 1 +
1170 HT_RX_REORDER_BUF_TIMEOUT);
1171 } else {
1172 del_timer(&tid_agg_rx->reorder_timer);
1173 }
1174}
1175
1176/*
1177 * As this function belongs to the RX path it must be under
1178 * rcu_read_lock protection. It returns false if the frame
1179 * can be processed immediately, true if it was consumed.
1180 */
1181static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
1182 struct tid_ampdu_rx *tid_agg_rx,
1183 struct sk_buff *skb,
1184 struct sk_buff_head *frames)
1185{
1186 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1187 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1188 u16 sc = le16_to_cpu(hdr->seq_ctrl);
1189 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
1190 u16 head_seq_num, buf_size;
1191 int index;
1192 bool ret = true;
1193
1194 spin_lock(&tid_agg_rx->reorder_lock);
1195
1196 /*
1197 * Offloaded BA sessions have no known starting sequence number so pick
1198 * one from first Rxed frame for this tid after BA was started.
1199 */
1200 if (unlikely(tid_agg_rx->auto_seq)) {
1201 tid_agg_rx->auto_seq = false;
1202 tid_agg_rx->ssn = mpdu_seq_num;
1203 tid_agg_rx->head_seq_num = mpdu_seq_num;
1204 }
1205
1206 buf_size = tid_agg_rx->buf_size;
1207 head_seq_num = tid_agg_rx->head_seq_num;
1208
1209 /*
1210 * If the current MPDU's SN is smaller than the SSN, it shouldn't
1211 * be reordered.
1212 */
1213 if (unlikely(!tid_agg_rx->started)) {
1214 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1215 ret = false;
1216 goto out;
1217 }
1218 tid_agg_rx->started = true;
1219 }
1220
1221 /* frame with out of date sequence number */
1222 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1223 dev_kfree_skb(skb);
1224 goto out;
1225 }
1226
1227 /*
1228 * If frame the sequence number exceeds our buffering window
1229 * size release some previous frames to make room for this one.
1230 */
1231 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) {
1232 head_seq_num = ieee80211_sn_inc(
1233 ieee80211_sn_sub(mpdu_seq_num, buf_size));
1234 /* release stored frames up to new head to stack */
1235 ieee80211_release_reorder_frames(sdata, tid_agg_rx,
1236 head_seq_num, frames);
1237 }
1238
1239 /* Now the new frame is always in the range of the reordering buffer */
1240
1241 index = mpdu_seq_num % tid_agg_rx->buf_size;
1242
1243 /* check if we already stored this frame */
1244 if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1245 dev_kfree_skb(skb);
1246 goto out;
1247 }
1248
1249 /*
1250 * If the current MPDU is in the right order and nothing else
1251 * is stored we can process it directly, no need to buffer it.
1252 * If it is first but there's something stored, we may be able
1253 * to release frames after this one.
1254 */
1255 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
1256 tid_agg_rx->stored_mpdu_num == 0) {
1257 if (!(status->flag & RX_FLAG_AMSDU_MORE))
1258 tid_agg_rx->head_seq_num =
1259 ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1260 ret = false;
1261 goto out;
1262 }
1263
1264 /* put the frame in the reordering buffer */
1265 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb);
1266 if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1267 tid_agg_rx->reorder_time[index] = jiffies;
1268 tid_agg_rx->stored_mpdu_num++;
1269 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
1270 }
1271
1272 out:
1273 spin_unlock(&tid_agg_rx->reorder_lock);
1274 return ret;
1275}
1276
1277/*
1278 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
1279 * true if the MPDU was buffered, false if it should be processed.
1280 */
1281static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
1282 struct sk_buff_head *frames)
1283{
1284 struct sk_buff *skb = rx->skb;
1285 struct ieee80211_local *local = rx->local;
1286 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1287 struct sta_info *sta = rx->sta;
1288 struct tid_ampdu_rx *tid_agg_rx;
1289 u16 sc;
1290 u8 tid, ack_policy;
1291
1292 if (!ieee80211_is_data_qos(hdr->frame_control) ||
1293 is_multicast_ether_addr(hdr->addr1))
1294 goto dont_reorder;
1295
1296 /*
1297 * filter the QoS data rx stream according to
1298 * STA/TID and check if this STA/TID is on aggregation
1299 */
1300
1301 if (!sta)
1302 goto dont_reorder;
1303
1304 ack_policy = *ieee80211_get_qos_ctl(hdr) &
1305 IEEE80211_QOS_CTL_ACK_POLICY_MASK;
1306 tid = ieee80211_get_tid(hdr);
1307
1308 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
1309 if (!tid_agg_rx) {
1310 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1311 !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
1312 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
1313 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
1314 WLAN_BACK_RECIPIENT,
1315 WLAN_REASON_QSTA_REQUIRE_SETUP);
1316 goto dont_reorder;
1317 }
1318
1319 /* qos null data frames are excluded */
1320 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
1321 goto dont_reorder;
1322
1323 /* not part of a BA session */
1324 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1325 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
1326 goto dont_reorder;
1327
1328 /* new, potentially un-ordered, ampdu frame - process it */
1329
1330 /* reset session timer */
1331 if (tid_agg_rx->timeout)
1332 tid_agg_rx->last_rx = jiffies;
1333
1334 /* if this mpdu is fragmented - terminate rx aggregation session */
1335 sc = le16_to_cpu(hdr->seq_ctrl);
1336 if (sc & IEEE80211_SCTL_FRAG) {
1337 skb_queue_tail(&rx->sdata->skb_queue, skb);
1338 ieee80211_queue_work(&local->hw, &rx->sdata->work);
1339 return;
1340 }
1341
1342 /*
1343 * No locking needed -- we will only ever process one
1344 * RX packet at a time, and thus own tid_agg_rx. All
1345 * other code manipulating it needs to (and does) make
1346 * sure that we cannot get to it any more before doing
1347 * anything with it.
1348 */
1349 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
1350 frames))
1351 return;
1352
1353 dont_reorder:
1354 __skb_queue_tail(frames, skb);
1355}
1356
1357static ieee80211_rx_result debug_noinline
1358ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
1359{
1360 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1361 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1362
1363 if (status->flag & RX_FLAG_DUP_VALIDATED)
1364 return RX_CONTINUE;
1365
1366 /*
1367 * Drop duplicate 802.11 retransmissions
1368 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
1369 */
1370
1371 if (rx->skb->len < 24)
1372 return RX_CONTINUE;
1373
1374 if (ieee80211_is_ctl(hdr->frame_control) ||
1375 ieee80211_is_nullfunc(hdr->frame_control) ||
1376 ieee80211_is_qos_nullfunc(hdr->frame_control) ||
1377 is_multicast_ether_addr(hdr->addr1))
1378 return RX_CONTINUE;
1379
1380 if (!rx->sta)
1381 return RX_CONTINUE;
1382
1383 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
1384 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
1385 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
1386 rx->sta->rx_stats.num_duplicates++;
1387 return RX_DROP_UNUSABLE;
1388 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1389 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
1390 }
1391
1392 return RX_CONTINUE;
1393}
1394
1395static ieee80211_rx_result debug_noinline
1396ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
1397{
1398 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1399
1400 /* Drop disallowed frame classes based on STA auth/assoc state;
1401 * IEEE 802.11, Chap 5.5.
1402 *
1403 * mac80211 filters only based on association state, i.e. it drops
1404 * Class 3 frames from not associated stations. hostapd sends
1405 * deauth/disassoc frames when needed. In addition, hostapd is
1406 * responsible for filtering on both auth and assoc states.
1407 */
1408
1409 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1410 return ieee80211_rx_mesh_check(rx);
1411
1412 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
1413 ieee80211_is_pspoll(hdr->frame_control)) &&
1414 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1415 rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
1416 rx->sdata->vif.type != NL80211_IFTYPE_OCB &&
1417 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
1418 /*
1419 * accept port control frames from the AP even when it's not
1420 * yet marked ASSOC to prevent a race where we don't set the
1421 * assoc bit quickly enough before it sends the first frame
1422 */
1423 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1424 ieee80211_is_data_present(hdr->frame_control)) {
1425 unsigned int hdrlen;
1426 __be16 ethertype;
1427
1428 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1429
1430 if (rx->skb->len < hdrlen + 8)
1431 return RX_DROP_MONITOR;
1432
1433 skb_copy_bits(rx->skb, hdrlen + 6, &ethertype, 2);
1434 if (ethertype == rx->sdata->control_port_protocol)
1435 return RX_CONTINUE;
1436 }
1437
1438 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
1439 cfg80211_rx_spurious_frame(rx->sdata->dev,
1440 hdr->addr2,
1441 GFP_ATOMIC))
1442 return RX_DROP_UNUSABLE;
1443
1444 return RX_DROP_MONITOR;
1445 }
1446
1447 return RX_CONTINUE;
1448}
1449
1450
1451static ieee80211_rx_result debug_noinline
1452ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1453{
1454 struct ieee80211_local *local;
1455 struct ieee80211_hdr *hdr;
1456 struct sk_buff *skb;
1457
1458 local = rx->local;
1459 skb = rx->skb;
1460 hdr = (struct ieee80211_hdr *) skb->data;
1461
1462 if (!local->pspolling)
1463 return RX_CONTINUE;
1464
1465 if (!ieee80211_has_fromds(hdr->frame_control))
1466 /* this is not from AP */
1467 return RX_CONTINUE;
1468
1469 if (!ieee80211_is_data(hdr->frame_control))
1470 return RX_CONTINUE;
1471
1472 if (!ieee80211_has_moredata(hdr->frame_control)) {
1473 /* AP has no more frames buffered for us */
1474 local->pspolling = false;
1475 return RX_CONTINUE;
1476 }
1477
1478 /* more data bit is set, let's request a new frame from the AP */
1479 ieee80211_send_pspoll(local, rx->sdata);
1480
1481 return RX_CONTINUE;
1482}
1483
1484static void sta_ps_start(struct sta_info *sta)
1485{
1486 struct ieee80211_sub_if_data *sdata = sta->sdata;
1487 struct ieee80211_local *local = sdata->local;
1488 struct ps_data *ps;
1489 int tid;
1490
1491 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
1492 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1493 ps = &sdata->bss->ps;
1494 else
1495 return;
1496
1497 atomic_inc(&ps->num_sta_ps);
1498 set_sta_flag(sta, WLAN_STA_PS_STA);
1499 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
1500 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1501 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
1502 sta->sta.addr, sta->sta.aid);
1503
1504 ieee80211_clear_fast_xmit(sta);
1505
1506 if (!sta->sta.txq[0])
1507 return;
1508
1509 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
1510 if (txq_has_queue(sta->sta.txq[tid]))
1511 set_bit(tid, &sta->txq_buffered_tids);
1512 else
1513 clear_bit(tid, &sta->txq_buffered_tids);
1514 }
1515}
1516
1517static void sta_ps_end(struct sta_info *sta)
1518{
1519 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
1520 sta->sta.addr, sta->sta.aid);
1521
1522 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1523 /*
1524 * Clear the flag only if the other one is still set
1525 * so that the TX path won't start TX'ing new frames
1526 * directly ... In the case that the driver flag isn't
1527 * set ieee80211_sta_ps_deliver_wakeup() will clear it.
1528 */
1529 clear_sta_flag(sta, WLAN_STA_PS_STA);
1530 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
1531 sta->sta.addr, sta->sta.aid);
1532 return;
1533 }
1534
1535 set_sta_flag(sta, WLAN_STA_PS_DELIVER);
1536 clear_sta_flag(sta, WLAN_STA_PS_STA);
1537 ieee80211_sta_ps_deliver_wakeup(sta);
1538}
1539
1540int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
1541{
1542 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1543 bool in_ps;
1544
1545 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
1546
1547 /* Don't let the same PS state be set twice */
1548 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
1549 if ((start && in_ps) || (!start && !in_ps))
1550 return -EINVAL;
1551
1552 if (start)
1553 sta_ps_start(sta);
1554 else
1555 sta_ps_end(sta);
1556
1557 return 0;
1558}
1559EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1560
1561void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta)
1562{
1563 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1564
1565 if (test_sta_flag(sta, WLAN_STA_SP))
1566 return;
1567
1568 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1569 ieee80211_sta_ps_deliver_poll_response(sta);
1570 else
1571 set_sta_flag(sta, WLAN_STA_PSPOLL);
1572}
1573EXPORT_SYMBOL(ieee80211_sta_pspoll);
1574
1575void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid)
1576{
1577 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1578 int ac = ieee80211_ac_from_tid(tid);
1579
1580 /*
1581 * If this AC is not trigger-enabled do nothing unless the
1582 * driver is calling us after it already checked.
1583 *
1584 * NB: This could/should check a separate bitmap of trigger-
1585 * enabled queues, but for now we only implement uAPSD w/o
1586 * TSPEC changes to the ACs, so they're always the same.
1587 */
1588 if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) &&
1589 tid != IEEE80211_NUM_TIDS)
1590 return;
1591
1592 /* if we are in a service period, do nothing */
1593 if (test_sta_flag(sta, WLAN_STA_SP))
1594 return;
1595
1596 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1597 ieee80211_sta_ps_deliver_uapsd(sta);
1598 else
1599 set_sta_flag(sta, WLAN_STA_UAPSD);
1600}
1601EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger);
1602
1603static ieee80211_rx_result debug_noinline
1604ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1605{
1606 struct ieee80211_sub_if_data *sdata = rx->sdata;
1607 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1608 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1609
1610 if (!rx->sta)
1611 return RX_CONTINUE;
1612
1613 if (sdata->vif.type != NL80211_IFTYPE_AP &&
1614 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1615 return RX_CONTINUE;
1616
1617 /*
1618 * The device handles station powersave, so don't do anything about
1619 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1620 * it to mac80211 since they're handled.)
1621 */
1622 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS))
1623 return RX_CONTINUE;
1624
1625 /*
1626 * Don't do anything if the station isn't already asleep. In
1627 * the uAPSD case, the station will probably be marked asleep,
1628 * in the PS-Poll case the station must be confused ...
1629 */
1630 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1631 return RX_CONTINUE;
1632
1633 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1634 ieee80211_sta_pspoll(&rx->sta->sta);
1635
1636 /* Free PS Poll skb here instead of returning RX_DROP that would
1637 * count as an dropped frame. */
1638 dev_kfree_skb(rx->skb);
1639
1640 return RX_QUEUED;
1641 } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1642 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1643 ieee80211_has_pm(hdr->frame_control) &&
1644 (ieee80211_is_data_qos(hdr->frame_control) ||
1645 ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1646 u8 tid = ieee80211_get_tid(hdr);
1647
1648 ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid);
1649 }
1650
1651 return RX_CONTINUE;
1652}
1653
1654static ieee80211_rx_result debug_noinline
1655ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1656{
1657 struct sta_info *sta = rx->sta;
1658 struct sk_buff *skb = rx->skb;
1659 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1660 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1661 int i;
1662
1663 if (!sta)
1664 return RX_CONTINUE;
1665
1666 /*
1667 * Update last_rx only for IBSS packets which are for the current
1668 * BSSID and for station already AUTHORIZED to avoid keeping the
1669 * current IBSS network alive in cases where other STAs start
1670 * using different BSSID. This will also give the station another
1671 * chance to restart the authentication/authorization in case
1672 * something went wrong the first time.
1673 */
1674 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1675 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1676 NL80211_IFTYPE_ADHOC);
1677 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
1678 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
1679 sta->rx_stats.last_rx = jiffies;
1680 if (ieee80211_is_data(hdr->frame_control) &&
1681 !is_multicast_ether_addr(hdr->addr1))
1682 sta->rx_stats.last_rate =
1683 sta_stats_encode_rate(status);
1684 }
1685 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
1686 sta->rx_stats.last_rx = jiffies;
1687 } else if (!is_multicast_ether_addr(hdr->addr1)) {
1688 /*
1689 * Mesh beacons will update last_rx when if they are found to
1690 * match the current local configuration when processed.
1691 */
1692 sta->rx_stats.last_rx = jiffies;
1693 if (ieee80211_is_data(hdr->frame_control))
1694 sta->rx_stats.last_rate = sta_stats_encode_rate(status);
1695 }
1696
1697 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
1698 ieee80211_sta_rx_notify(rx->sdata, hdr);
1699
1700 sta->rx_stats.fragments++;
1701
1702 u64_stats_update_begin(&rx->sta->rx_stats.syncp);
1703 sta->rx_stats.bytes += rx->skb->len;
1704 u64_stats_update_end(&rx->sta->rx_stats.syncp);
1705
1706 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
1707 sta->rx_stats.last_signal = status->signal;
1708 ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal);
1709 }
1710
1711 if (status->chains) {
1712 sta->rx_stats.chains = status->chains;
1713 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
1714 int signal = status->chain_signal[i];
1715
1716 if (!(status->chains & BIT(i)))
1717 continue;
1718
1719 sta->rx_stats.chain_signal_last[i] = signal;
1720 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
1721 -signal);
1722 }
1723 }
1724
1725 /*
1726 * Change STA power saving mode only at the end of a frame
1727 * exchange sequence, and only for a data or management
1728 * frame as specified in IEEE 802.11-2016 11.2.3.2
1729 */
1730 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
1731 !ieee80211_has_morefrags(hdr->frame_control) &&
1732 !is_multicast_ether_addr(hdr->addr1) &&
1733 (ieee80211_is_mgmt(hdr->frame_control) ||
1734 ieee80211_is_data(hdr->frame_control)) &&
1735 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1736 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1737 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1738 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1739 if (!ieee80211_has_pm(hdr->frame_control))
1740 sta_ps_end(sta);
1741 } else {
1742 if (ieee80211_has_pm(hdr->frame_control))
1743 sta_ps_start(sta);
1744 }
1745 }
1746
1747 /* mesh power save support */
1748 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1749 ieee80211_mps_rx_h_sta_process(sta, hdr);
1750
1751 /*
1752 * Drop (qos-)data::nullfunc frames silently, since they
1753 * are used only to control station power saving mode.
1754 */
1755 if (ieee80211_is_nullfunc(hdr->frame_control) ||
1756 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1757 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1758
1759 /*
1760 * If we receive a 4-addr nullfunc frame from a STA
1761 * that was not moved to a 4-addr STA vlan yet send
1762 * the event to userspace and for older hostapd drop
1763 * the frame to the monitor interface.
1764 */
1765 if (ieee80211_has_a4(hdr->frame_control) &&
1766 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1767 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1768 !rx->sdata->u.vlan.sta))) {
1769 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
1770 cfg80211_rx_unexpected_4addr_frame(
1771 rx->sdata->dev, sta->sta.addr,
1772 GFP_ATOMIC);
1773 return RX_DROP_MONITOR;
1774 }
1775 /*
1776 * Update counter and free packet here to avoid
1777 * counting this as a dropped packed.
1778 */
1779 sta->rx_stats.packets++;
1780 dev_kfree_skb(rx->skb);
1781 return RX_QUEUED;
1782 }
1783
1784 return RX_CONTINUE;
1785} /* ieee80211_rx_h_sta_process */
1786
1787static ieee80211_rx_result debug_noinline
1788ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
1789{
1790 struct sk_buff *skb = rx->skb;
1791 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1792 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1793 int keyidx;
1794 int hdrlen;
1795 ieee80211_rx_result result = RX_DROP_UNUSABLE;
1796 struct ieee80211_key *sta_ptk = NULL;
1797 int mmie_keyidx = -1;
1798 __le16 fc;
1799 const struct ieee80211_cipher_scheme *cs = NULL;
1800
1801 /*
1802 * Key selection 101
1803 *
1804 * There are four types of keys:
1805 * - GTK (group keys)
1806 * - IGTK (group keys for management frames)
1807 * - PTK (pairwise keys)
1808 * - STK (station-to-station pairwise keys)
1809 *
1810 * When selecting a key, we have to distinguish between multicast
1811 * (including broadcast) and unicast frames, the latter can only
1812 * use PTKs and STKs while the former always use GTKs and IGTKs.
1813 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
1814 * unicast frames can also use key indices like GTKs. Hence, if we
1815 * don't have a PTK/STK we check the key index for a WEP key.
1816 *
1817 * Note that in a regular BSS, multicast frames are sent by the
1818 * AP only, associated stations unicast the frame to the AP first
1819 * which then multicasts it on their behalf.
1820 *
1821 * There is also a slight problem in IBSS mode: GTKs are negotiated
1822 * with each station, that is something we don't currently handle.
1823 * The spec seems to expect that one negotiates the same key with
1824 * every station but there's no such requirement; VLANs could be
1825 * possible.
1826 */
1827
1828 /* start without a key */
1829 rx->key = NULL;
1830 fc = hdr->frame_control;
1831
1832 if (rx->sta) {
1833 int keyid = rx->sta->ptk_idx;
1834
1835 if (ieee80211_has_protected(fc) && rx->sta->cipher_scheme) {
1836 cs = rx->sta->cipher_scheme;
1837 keyid = ieee80211_get_cs_keyid(cs, rx->skb);
1838 if (unlikely(keyid < 0))
1839 return RX_DROP_UNUSABLE;
1840 }
1841 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
1842 }
1843
1844 if (!ieee80211_has_protected(fc))
1845 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
1846
1847 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
1848 rx->key = sta_ptk;
1849 if ((status->flag & RX_FLAG_DECRYPTED) &&
1850 (status->flag & RX_FLAG_IV_STRIPPED))
1851 return RX_CONTINUE;
1852 /* Skip decryption if the frame is not protected. */
1853 if (!ieee80211_has_protected(fc))
1854 return RX_CONTINUE;
1855 } else if (mmie_keyidx >= 0) {
1856 /* Broadcast/multicast robust management frame / BIP */
1857 if ((status->flag & RX_FLAG_DECRYPTED) &&
1858 (status->flag & RX_FLAG_IV_STRIPPED))
1859 return RX_CONTINUE;
1860
1861 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
1862 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
1863 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
1864 if (rx->sta) {
1865 if (ieee80211_is_group_privacy_action(skb) &&
1866 test_sta_flag(rx->sta, WLAN_STA_MFP))
1867 return RX_DROP_MONITOR;
1868
1869 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
1870 }
1871 if (!rx->key)
1872 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
1873 } else if (!ieee80211_has_protected(fc)) {
1874 /*
1875 * The frame was not protected, so skip decryption. However, we
1876 * need to set rx->key if there is a key that could have been
1877 * used so that the frame may be dropped if encryption would
1878 * have been expected.
1879 */
1880 struct ieee80211_key *key = NULL;
1881 struct ieee80211_sub_if_data *sdata = rx->sdata;
1882 int i;
1883
1884 if (ieee80211_is_mgmt(fc) &&
1885 is_multicast_ether_addr(hdr->addr1) &&
1886 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
1887 rx->key = key;
1888 else {
1889 if (rx->sta) {
1890 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
1891 key = rcu_dereference(rx->sta->gtk[i]);
1892 if (key)
1893 break;
1894 }
1895 }
1896 if (!key) {
1897 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
1898 key = rcu_dereference(sdata->keys[i]);
1899 if (key)
1900 break;
1901 }
1902 }
1903 if (key)
1904 rx->key = key;
1905 }
1906 return RX_CONTINUE;
1907 } else {
1908 u8 keyid;
1909
1910 /*
1911 * The device doesn't give us the IV so we won't be
1912 * able to look up the key. That's ok though, we
1913 * don't need to decrypt the frame, we just won't
1914 * be able to keep statistics accurate.
1915 * Except for key threshold notifications, should
1916 * we somehow allow the driver to tell us which key
1917 * the hardware used if this flag is set?
1918 */
1919 if ((status->flag & RX_FLAG_DECRYPTED) &&
1920 (status->flag & RX_FLAG_IV_STRIPPED))
1921 return RX_CONTINUE;
1922
1923 hdrlen = ieee80211_hdrlen(fc);
1924
1925 if (cs) {
1926 keyidx = ieee80211_get_cs_keyid(cs, rx->skb);
1927
1928 if (unlikely(keyidx < 0))
1929 return RX_DROP_UNUSABLE;
1930 } else {
1931 if (rx->skb->len < 8 + hdrlen)
1932 return RX_DROP_UNUSABLE; /* TODO: count this? */
1933 /*
1934 * no need to call ieee80211_wep_get_keyidx,
1935 * it verifies a bunch of things we've done already
1936 */
1937 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
1938 keyidx = keyid >> 6;
1939 }
1940
1941 /* check per-station GTK first, if multicast packet */
1942 if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
1943 rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
1944
1945 /* if not found, try default key */
1946 if (!rx->key) {
1947 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
1948
1949 /*
1950 * RSNA-protected unicast frames should always be
1951 * sent with pairwise or station-to-station keys,
1952 * but for WEP we allow using a key index as well.
1953 */
1954 if (rx->key &&
1955 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
1956 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
1957 !is_multicast_ether_addr(hdr->addr1))
1958 rx->key = NULL;
1959 }
1960 }
1961
1962 if (rx->key) {
1963 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
1964 return RX_DROP_MONITOR;
1965
1966 /* TODO: add threshold stuff again */
1967 } else {
1968 return RX_DROP_MONITOR;
1969 }
1970
1971 switch (rx->key->conf.cipher) {
1972 case WLAN_CIPHER_SUITE_WEP40:
1973 case WLAN_CIPHER_SUITE_WEP104:
1974 result = ieee80211_crypto_wep_decrypt(rx);
1975 break;
1976 case WLAN_CIPHER_SUITE_TKIP:
1977 result = ieee80211_crypto_tkip_decrypt(rx);
1978 break;
1979 case WLAN_CIPHER_SUITE_CCMP:
1980 result = ieee80211_crypto_ccmp_decrypt(
1981 rx, IEEE80211_CCMP_MIC_LEN);
1982 break;
1983 case WLAN_CIPHER_SUITE_CCMP_256:
1984 result = ieee80211_crypto_ccmp_decrypt(
1985 rx, IEEE80211_CCMP_256_MIC_LEN);
1986 break;
1987 case WLAN_CIPHER_SUITE_AES_CMAC:
1988 result = ieee80211_crypto_aes_cmac_decrypt(rx);
1989 break;
1990 case WLAN_CIPHER_SUITE_BIP_CMAC_256:
1991 result = ieee80211_crypto_aes_cmac_256_decrypt(rx);
1992 break;
1993 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
1994 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
1995 result = ieee80211_crypto_aes_gmac_decrypt(rx);
1996 break;
1997 case WLAN_CIPHER_SUITE_GCMP:
1998 case WLAN_CIPHER_SUITE_GCMP_256:
1999 result = ieee80211_crypto_gcmp_decrypt(rx);
2000 break;
2001 default:
2002 result = ieee80211_crypto_hw_decrypt(rx);
2003 }
2004
2005 /* the hdr variable is invalid after the decrypt handlers */
2006
2007 /* either the frame has been decrypted or will be dropped */
2008 status->flag |= RX_FLAG_DECRYPTED;
2009
2010 return result;
2011}
2012
2013static inline struct ieee80211_fragment_entry *
2014ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
2015 unsigned int frag, unsigned int seq, int rx_queue,
2016 struct sk_buff **skb)
2017{
2018 struct ieee80211_fragment_entry *entry;
2019
2020 entry = &sdata->fragments[sdata->fragment_next++];
2021 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
2022 sdata->fragment_next = 0;
2023
2024 if (!skb_queue_empty(&entry->skb_list))
2025 __skb_queue_purge(&entry->skb_list);
2026
2027 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
2028 *skb = NULL;
2029 entry->first_frag_time = jiffies;
2030 entry->seq = seq;
2031 entry->rx_queue = rx_queue;
2032 entry->last_frag = frag;
2033 entry->check_sequential_pn = false;
2034 entry->extra_len = 0;
2035
2036 return entry;
2037}
2038
2039static inline struct ieee80211_fragment_entry *
2040ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
2041 unsigned int frag, unsigned int seq,
2042 int rx_queue, struct ieee80211_hdr *hdr)
2043{
2044 struct ieee80211_fragment_entry *entry;
2045 int i, idx;
2046
2047 idx = sdata->fragment_next;
2048 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
2049 struct ieee80211_hdr *f_hdr;
2050
2051 idx--;
2052 if (idx < 0)
2053 idx = IEEE80211_FRAGMENT_MAX - 1;
2054
2055 entry = &sdata->fragments[idx];
2056 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
2057 entry->rx_queue != rx_queue ||
2058 entry->last_frag + 1 != frag)
2059 continue;
2060
2061 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
2062
2063 /*
2064 * Check ftype and addresses are equal, else check next fragment
2065 */
2066 if (((hdr->frame_control ^ f_hdr->frame_control) &
2067 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
2068 !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
2069 !ether_addr_equal(hdr->addr2, f_hdr->addr2))
2070 continue;
2071
2072 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
2073 __skb_queue_purge(&entry->skb_list);
2074 continue;
2075 }
2076 return entry;
2077 }
2078
2079 return NULL;
2080}
2081
2082static ieee80211_rx_result debug_noinline
2083ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
2084{
2085 struct ieee80211_hdr *hdr;
2086 u16 sc;
2087 __le16 fc;
2088 unsigned int frag, seq;
2089 struct ieee80211_fragment_entry *entry;
2090 struct sk_buff *skb;
2091
2092 hdr = (struct ieee80211_hdr *)rx->skb->data;
2093 fc = hdr->frame_control;
2094
2095 if (ieee80211_is_ctl(fc))
2096 return RX_CONTINUE;
2097
2098 sc = le16_to_cpu(hdr->seq_ctrl);
2099 frag = sc & IEEE80211_SCTL_FRAG;
2100
2101 if (is_multicast_ether_addr(hdr->addr1)) {
2102 I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
2103 goto out_no_led;
2104 }
2105
2106 if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
2107 goto out;
2108
2109 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
2110
2111 if (skb_linearize(rx->skb))
2112 return RX_DROP_UNUSABLE;
2113
2114 /*
2115 * skb_linearize() might change the skb->data and
2116 * previously cached variables (in this case, hdr) need to
2117 * be refreshed with the new data.
2118 */
2119 hdr = (struct ieee80211_hdr *)rx->skb->data;
2120 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
2121
2122 if (frag == 0) {
2123 /* This is the first fragment of a new frame. */
2124 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
2125 rx->seqno_idx, &(rx->skb));
2126 if (rx->key &&
2127 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
2128 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
2129 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
2130 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
2131 ieee80211_has_protected(fc)) {
2132 int queue = rx->security_idx;
2133
2134 /* Store CCMP/GCMP PN so that we can verify that the
2135 * next fragment has a sequential PN value.
2136 */
2137 entry->check_sequential_pn = true;
2138 memcpy(entry->last_pn,
2139 rx->key->u.ccmp.rx_pn[queue],
2140 IEEE80211_CCMP_PN_LEN);
2141 BUILD_BUG_ON(offsetof(struct ieee80211_key,
2142 u.ccmp.rx_pn) !=
2143 offsetof(struct ieee80211_key,
2144 u.gcmp.rx_pn));
2145 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
2146 sizeof(rx->key->u.gcmp.rx_pn[queue]));
2147 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
2148 IEEE80211_GCMP_PN_LEN);
2149 }
2150 return RX_QUEUED;
2151 }
2152
2153 /* This is a fragment for a frame that should already be pending in
2154 * fragment cache. Add this fragment to the end of the pending entry.
2155 */
2156 entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
2157 rx->seqno_idx, hdr);
2158 if (!entry) {
2159 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2160 return RX_DROP_MONITOR;
2161 }
2162
2163 /* "The receiver shall discard MSDUs and MMPDUs whose constituent
2164 * MPDU PN values are not incrementing in steps of 1."
2165 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
2166 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
2167 */
2168 if (entry->check_sequential_pn) {
2169 int i;
2170 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
2171 int queue;
2172
2173 if (!rx->key ||
2174 (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
2175 rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
2176 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
2177 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
2178 return RX_DROP_UNUSABLE;
2179 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
2180 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
2181 pn[i]++;
2182 if (pn[i])
2183 break;
2184 }
2185 queue = rx->security_idx;
2186 rpn = rx->key->u.ccmp.rx_pn[queue];
2187 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
2188 return RX_DROP_UNUSABLE;
2189 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
2190 }
2191
2192 skb_pull(rx->skb, ieee80211_hdrlen(fc));
2193 __skb_queue_tail(&entry->skb_list, rx->skb);
2194 entry->last_frag = frag;
2195 entry->extra_len += rx->skb->len;
2196 if (ieee80211_has_morefrags(fc)) {
2197 rx->skb = NULL;
2198 return RX_QUEUED;
2199 }
2200
2201 rx->skb = __skb_dequeue(&entry->skb_list);
2202 if (skb_tailroom(rx->skb) < entry->extra_len) {
2203 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag);
2204 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
2205 GFP_ATOMIC))) {
2206 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2207 __skb_queue_purge(&entry->skb_list);
2208 return RX_DROP_UNUSABLE;
2209 }
2210 }
2211 while ((skb = __skb_dequeue(&entry->skb_list))) {
2212 skb_put_data(rx->skb, skb->data, skb->len);
2213 dev_kfree_skb(skb);
2214 }
2215
2216 out:
2217 ieee80211_led_rx(rx->local);
2218 out_no_led:
2219 if (rx->sta)
2220 rx->sta->rx_stats.packets++;
2221 return RX_CONTINUE;
2222}
2223
2224static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
2225{
2226 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
2227 return -EACCES;
2228
2229 return 0;
2230}
2231
2232static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
2233{
2234 struct sk_buff *skb = rx->skb;
2235 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2236
2237 /*
2238 * Pass through unencrypted frames if the hardware has
2239 * decrypted them already.
2240 */
2241 if (status->flag & RX_FLAG_DECRYPTED)
2242 return 0;
2243
2244 /* Drop unencrypted frames if key is set. */
2245 if (unlikely(!ieee80211_has_protected(fc) &&
2246 !ieee80211_is_nullfunc(fc) &&
2247 ieee80211_is_data(fc) && rx->key))
2248 return -EACCES;
2249
2250 return 0;
2251}
2252
2253static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
2254{
2255 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2256 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2257 __le16 fc = hdr->frame_control;
2258
2259 /*
2260 * Pass through unencrypted frames if the hardware has
2261 * decrypted them already.
2262 */
2263 if (status->flag & RX_FLAG_DECRYPTED)
2264 return 0;
2265
2266 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
2267 if (unlikely(!ieee80211_has_protected(fc) &&
2268 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
2269 rx->key)) {
2270 if (ieee80211_is_deauth(fc) ||
2271 ieee80211_is_disassoc(fc))
2272 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2273 rx->skb->data,
2274 rx->skb->len);
2275 return -EACCES;
2276 }
2277 /* BIP does not use Protected field, so need to check MMIE */
2278 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
2279 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
2280 if (ieee80211_is_deauth(fc) ||
2281 ieee80211_is_disassoc(fc))
2282 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2283 rx->skb->data,
2284 rx->skb->len);
2285 return -EACCES;
2286 }
2287 /*
2288 * When using MFP, Action frames are not allowed prior to
2289 * having configured keys.
2290 */
2291 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
2292 ieee80211_is_robust_mgmt_frame(rx->skb)))
2293 return -EACCES;
2294 }
2295
2296 return 0;
2297}
2298
2299static int
2300__ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
2301{
2302 struct ieee80211_sub_if_data *sdata = rx->sdata;
2303 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2304 bool check_port_control = false;
2305 struct ethhdr *ehdr;
2306 int ret;
2307
2308 *port_control = false;
2309 if (ieee80211_has_a4(hdr->frame_control) &&
2310 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
2311 return -1;
2312
2313 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
2314 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
2315
2316 if (!sdata->u.mgd.use_4addr)
2317 return -1;
2318 else
2319 check_port_control = true;
2320 }
2321
2322 if (is_multicast_ether_addr(hdr->addr1) &&
2323 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
2324 return -1;
2325
2326 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
2327 if (ret < 0)
2328 return ret;
2329
2330 ehdr = (struct ethhdr *) rx->skb->data;
2331 if (ehdr->h_proto == rx->sdata->control_port_protocol)
2332 *port_control = true;
2333 else if (check_port_control)
2334 return -1;
2335
2336 return 0;
2337}
2338
2339/*
2340 * requires that rx->skb is a frame with ethernet header
2341 */
2342static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
2343{
2344 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
2345 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
2346 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2347
2348 /*
2349 * Allow EAPOL frames to us/the PAE group address regardless
2350 * of whether the frame was encrypted or not.
2351 */
2352 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
2353 (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
2354 ether_addr_equal(ehdr->h_dest, pae_group_addr)))
2355 return true;
2356
2357 if (ieee80211_802_1x_port_control(rx) ||
2358 ieee80211_drop_unencrypted(rx, fc))
2359 return false;
2360
2361 return true;
2362}
2363
2364static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
2365 struct ieee80211_rx_data *rx)
2366{
2367 struct ieee80211_sub_if_data *sdata = rx->sdata;
2368 struct net_device *dev = sdata->dev;
2369
2370 if (unlikely((skb->protocol == sdata->control_port_protocol ||
2371 skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) &&
2372 sdata->control_port_over_nl80211)) {
2373 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2374 bool noencrypt = status->flag & RX_FLAG_DECRYPTED;
2375
2376 cfg80211_rx_control_port(dev, skb, noencrypt);
2377 dev_kfree_skb(skb);
2378 } else {
2379 /* deliver to local stack */
2380 if (rx->napi)
2381 napi_gro_receive(rx->napi, skb);
2382 else
2383 netif_receive_skb(skb);
2384 }
2385}
2386
2387/*
2388 * requires that rx->skb is a frame with ethernet header
2389 */
2390static void
2391ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
2392{
2393 struct ieee80211_sub_if_data *sdata = rx->sdata;
2394 struct net_device *dev = sdata->dev;
2395 struct sk_buff *skb, *xmit_skb;
2396 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2397 struct sta_info *dsta;
2398
2399 skb = rx->skb;
2400 xmit_skb = NULL;
2401
2402 ieee80211_rx_stats(dev, skb->len);
2403
2404 if (rx->sta) {
2405 /* The seqno index has the same property as needed
2406 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
2407 * for non-QoS-data frames. Here we know it's a data
2408 * frame, so count MSDUs.
2409 */
2410 u64_stats_update_begin(&rx->sta->rx_stats.syncp);
2411 rx->sta->rx_stats.msdu[rx->seqno_idx]++;
2412 u64_stats_update_end(&rx->sta->rx_stats.syncp);
2413 }
2414
2415 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
2416 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
2417 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
2418 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
2419 if (is_multicast_ether_addr(ehdr->h_dest) &&
2420 ieee80211_vif_get_num_mcast_if(sdata) != 0) {
2421 /*
2422 * send multicast frames both to higher layers in
2423 * local net stack and back to the wireless medium
2424 */
2425 xmit_skb = skb_copy(skb, GFP_ATOMIC);
2426 if (!xmit_skb)
2427 net_info_ratelimited("%s: failed to clone multicast frame\n",
2428 dev->name);
2429 } else if (!is_multicast_ether_addr(ehdr->h_dest)) {
2430 dsta = sta_info_get(sdata, skb->data);
2431 if (dsta) {
2432 /*
2433 * The destination station is associated to
2434 * this AP (in this VLAN), so send the frame
2435 * directly to it and do not pass it to local
2436 * net stack.
2437 */
2438 xmit_skb = skb;
2439 skb = NULL;
2440 }
2441 }
2442 }
2443
2444#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2445 if (skb) {
2446 /* 'align' will only take the values 0 or 2 here since all
2447 * frames are required to be aligned to 2-byte boundaries
2448 * when being passed to mac80211; the code here works just
2449 * as well if that isn't true, but mac80211 assumes it can
2450 * access fields as 2-byte aligned (e.g. for ether_addr_equal)
2451 */
2452 int align;
2453
2454 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3;
2455 if (align) {
2456 if (WARN_ON(skb_headroom(skb) < 3)) {
2457 dev_kfree_skb(skb);
2458 skb = NULL;
2459 } else {
2460 u8 *data = skb->data;
2461 size_t len = skb_headlen(skb);
2462 skb->data -= align;
2463 memmove(skb->data, data, len);
2464 skb_set_tail_pointer(skb, len);
2465 }
2466 }
2467 }
2468#endif
2469
2470 if (skb) {
2471 skb->protocol = eth_type_trans(skb, dev);
2472 memset(skb->cb, 0, sizeof(skb->cb));
2473
2474 ieee80211_deliver_skb_to_local_stack(skb, rx);
2475 }
2476
2477 if (xmit_skb) {
2478 /*
2479 * Send to wireless media and increase priority by 256 to
2480 * keep the received priority instead of reclassifying
2481 * the frame (see cfg80211_classify8021d).
2482 */
2483 xmit_skb->priority += 256;
2484 xmit_skb->protocol = htons(ETH_P_802_3);
2485 skb_reset_network_header(xmit_skb);
2486 skb_reset_mac_header(xmit_skb);
2487 dev_queue_xmit(xmit_skb);
2488 }
2489}
2490
2491static ieee80211_rx_result debug_noinline
2492__ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
2493{
2494 struct net_device *dev = rx->sdata->dev;
2495 struct sk_buff *skb = rx->skb;
2496 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2497 __le16 fc = hdr->frame_control;
2498 struct sk_buff_head frame_list;
2499 struct ethhdr ethhdr;
2500 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
2501
2502 if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
2503 check_da = NULL;
2504 check_sa = NULL;
2505 } else switch (rx->sdata->vif.type) {
2506 case NL80211_IFTYPE_AP:
2507 case NL80211_IFTYPE_AP_VLAN:
2508 check_da = NULL;
2509 break;
2510 case NL80211_IFTYPE_STATION:
2511 if (!rx->sta ||
2512 !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
2513 check_sa = NULL;
2514 break;
2515 case NL80211_IFTYPE_MESH_POINT:
2516 check_sa = NULL;
2517 break;
2518 default:
2519 break;
2520 }
2521
2522 skb->dev = dev;
2523 __skb_queue_head_init(&frame_list);
2524
2525 if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
2526 rx->sdata->vif.addr,
2527 rx->sdata->vif.type,
2528 data_offset))
2529 return RX_DROP_UNUSABLE;
2530
2531 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
2532 rx->sdata->vif.type,
2533 rx->local->hw.extra_tx_headroom,
2534 check_da, check_sa);
2535
2536 while (!skb_queue_empty(&frame_list)) {
2537 rx->skb = __skb_dequeue(&frame_list);
2538
2539 if (!ieee80211_frame_allowed(rx, fc)) {
2540 dev_kfree_skb(rx->skb);
2541 continue;
2542 }
2543
2544 ieee80211_deliver_skb(rx);
2545 }
2546
2547 return RX_QUEUED;
2548}
2549
2550static ieee80211_rx_result debug_noinline
2551ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
2552{
2553 struct sk_buff *skb = rx->skb;
2554 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2555 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2556 __le16 fc = hdr->frame_control;
2557
2558 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
2559 return RX_CONTINUE;
2560
2561 if (unlikely(!ieee80211_is_data(fc)))
2562 return RX_CONTINUE;
2563
2564 if (unlikely(!ieee80211_is_data_present(fc)))
2565 return RX_DROP_MONITOR;
2566
2567 if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
2568 switch (rx->sdata->vif.type) {
2569 case NL80211_IFTYPE_AP_VLAN:
2570 if (!rx->sdata->u.vlan.sta)
2571 return RX_DROP_UNUSABLE;
2572 break;
2573 case NL80211_IFTYPE_STATION:
2574 if (!rx->sdata->u.mgd.use_4addr)
2575 return RX_DROP_UNUSABLE;
2576 break;
2577 default:
2578 return RX_DROP_UNUSABLE;
2579 }
2580 }
2581
2582 if (is_multicast_ether_addr(hdr->addr1))
2583 return RX_DROP_UNUSABLE;
2584
2585 return __ieee80211_rx_h_amsdu(rx, 0);
2586}
2587
2588#ifdef CONFIG_MAC80211_MESH
2589static ieee80211_rx_result
2590ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2591{
2592 struct ieee80211_hdr *fwd_hdr, *hdr;
2593 struct ieee80211_tx_info *info;
2594 struct ieee80211s_hdr *mesh_hdr;
2595 struct sk_buff *skb = rx->skb, *fwd_skb;
2596 struct ieee80211_local *local = rx->local;
2597 struct ieee80211_sub_if_data *sdata = rx->sdata;
2598 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2599 u16 ac, q, hdrlen;
2600
2601 hdr = (struct ieee80211_hdr *) skb->data;
2602 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2603
2604 /* make sure fixed part of mesh header is there, also checks skb len */
2605 if (!pskb_may_pull(rx->skb, hdrlen + 6))
2606 return RX_DROP_MONITOR;
2607
2608 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2609
2610 /* make sure full mesh header is there, also checks skb len */
2611 if (!pskb_may_pull(rx->skb,
2612 hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
2613 return RX_DROP_MONITOR;
2614
2615 /* reload pointers */
2616 hdr = (struct ieee80211_hdr *) skb->data;
2617 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
2618
2619 if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
2620 return RX_DROP_MONITOR;
2621
2622 /* frame is in RMC, don't forward */
2623 if (ieee80211_is_data(hdr->frame_control) &&
2624 is_multicast_ether_addr(hdr->addr1) &&
2625 mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
2626 return RX_DROP_MONITOR;
2627
2628 if (!ieee80211_is_data(hdr->frame_control))
2629 return RX_CONTINUE;
2630
2631 if (!mesh_hdr->ttl)
2632 return RX_DROP_MONITOR;
2633
2634 if (mesh_hdr->flags & MESH_FLAGS_AE) {
2635 struct mesh_path *mppath;
2636 char *proxied_addr;
2637 char *mpp_addr;
2638
2639 if (is_multicast_ether_addr(hdr->addr1)) {
2640 mpp_addr = hdr->addr3;
2641 proxied_addr = mesh_hdr->eaddr1;
2642 } else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
2643 MESH_FLAGS_AE_A5_A6) {
2644 /* has_a4 already checked in ieee80211_rx_mesh_check */
2645 mpp_addr = hdr->addr4;
2646 proxied_addr = mesh_hdr->eaddr2;
2647 } else {
2648 return RX_DROP_MONITOR;
2649 }
2650
2651 rcu_read_lock();
2652 mppath = mpp_path_lookup(sdata, proxied_addr);
2653 if (!mppath) {
2654 mpp_path_add(sdata, proxied_addr, mpp_addr);
2655 } else {
2656 spin_lock_bh(&mppath->state_lock);
2657 if (!ether_addr_equal(mppath->mpp, mpp_addr))
2658 memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
2659 mppath->exp_time = jiffies;
2660 spin_unlock_bh(&mppath->state_lock);
2661 }
2662 rcu_read_unlock();
2663 }
2664
2665 /* Frame has reached destination. Don't forward */
2666 if (!is_multicast_ether_addr(hdr->addr1) &&
2667 ether_addr_equal(sdata->vif.addr, hdr->addr3))
2668 return RX_CONTINUE;
2669
2670 ac = ieee80211_select_queue_80211(sdata, skb, hdr);
2671 q = sdata->vif.hw_queue[ac];
2672 if (ieee80211_queue_stopped(&local->hw, q)) {
2673 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
2674 return RX_DROP_MONITOR;
2675 }
2676 skb_set_queue_mapping(skb, q);
2677
2678 if (!--mesh_hdr->ttl) {
2679 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
2680 goto out;
2681 }
2682
2683 if (!ifmsh->mshcfg.dot11MeshForwarding)
2684 goto out;
2685
2686 fwd_skb = skb_copy_expand(skb, local->tx_headroom +
2687 sdata->encrypt_headroom, 0, GFP_ATOMIC);
2688 if (!fwd_skb)
2689 goto out;
2690
2691 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
2692 fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
2693 info = IEEE80211_SKB_CB(fwd_skb);
2694 memset(info, 0, sizeof(*info));
2695 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
2696 info->control.vif = &rx->sdata->vif;
2697 info->control.jiffies = jiffies;
2698 if (is_multicast_ether_addr(fwd_hdr->addr1)) {
2699 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
2700 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
2701 /* update power mode indication when forwarding */
2702 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
2703 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
2704 /* mesh power mode flags updated in mesh_nexthop_lookup */
2705 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
2706 } else {
2707 /* unable to resolve next hop */
2708 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
2709 fwd_hdr->addr3, 0,
2710 WLAN_REASON_MESH_PATH_NOFORWARD,
2711 fwd_hdr->addr2);
2712 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
2713 kfree_skb(fwd_skb);
2714 return RX_DROP_MONITOR;
2715 }
2716
2717 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
2718 ieee80211_add_pending_skb(local, fwd_skb);
2719 out:
2720 if (is_multicast_ether_addr(hdr->addr1))
2721 return RX_CONTINUE;
2722 return RX_DROP_MONITOR;
2723}
2724#endif
2725
2726static ieee80211_rx_result debug_noinline
2727ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
2728{
2729 struct ieee80211_sub_if_data *sdata = rx->sdata;
2730 struct ieee80211_local *local = rx->local;
2731 struct net_device *dev = sdata->dev;
2732 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2733 __le16 fc = hdr->frame_control;
2734 bool port_control;
2735 int err;
2736
2737 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
2738 return RX_CONTINUE;
2739
2740 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
2741 return RX_DROP_MONITOR;
2742
2743 /*
2744 * Send unexpected-4addr-frame event to hostapd. For older versions,
2745 * also drop the frame to cooked monitor interfaces.
2746 */
2747 if (ieee80211_has_a4(hdr->frame_control) &&
2748 sdata->vif.type == NL80211_IFTYPE_AP) {
2749 if (rx->sta &&
2750 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
2751 cfg80211_rx_unexpected_4addr_frame(
2752 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
2753 return RX_DROP_MONITOR;
2754 }
2755
2756 err = __ieee80211_data_to_8023(rx, &port_control);
2757 if (unlikely(err))
2758 return RX_DROP_UNUSABLE;
2759
2760 if (!ieee80211_frame_allowed(rx, fc))
2761 return RX_DROP_MONITOR;
2762
2763 /* directly handle TDLS channel switch requests/responses */
2764 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto ==
2765 cpu_to_be16(ETH_P_TDLS))) {
2766 struct ieee80211_tdls_data *tf = (void *)rx->skb->data;
2767
2768 if (pskb_may_pull(rx->skb,
2769 offsetof(struct ieee80211_tdls_data, u)) &&
2770 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE &&
2771 tf->category == WLAN_CATEGORY_TDLS &&
2772 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
2773 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
2774 skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
2775 schedule_work(&local->tdls_chsw_work);
2776 if (rx->sta)
2777 rx->sta->rx_stats.packets++;
2778
2779 return RX_QUEUED;
2780 }
2781 }
2782
2783 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2784 unlikely(port_control) && sdata->bss) {
2785 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
2786 u.ap);
2787 dev = sdata->dev;
2788 rx->sdata = sdata;
2789 }
2790
2791 rx->skb->dev = dev;
2792
2793 if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
2794 local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
2795 !is_multicast_ether_addr(
2796 ((struct ethhdr *)rx->skb->data)->h_dest) &&
2797 (!local->scanning &&
2798 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)))
2799 mod_timer(&local->dynamic_ps_timer, jiffies +
2800 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
2801
2802 ieee80211_deliver_skb(rx);
2803
2804 return RX_QUEUED;
2805}
2806
2807static ieee80211_rx_result debug_noinline
2808ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
2809{
2810 struct sk_buff *skb = rx->skb;
2811 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
2812 struct tid_ampdu_rx *tid_agg_rx;
2813 u16 start_seq_num;
2814 u16 tid;
2815
2816 if (likely(!ieee80211_is_ctl(bar->frame_control)))
2817 return RX_CONTINUE;
2818
2819 if (ieee80211_is_back_req(bar->frame_control)) {
2820 struct {
2821 __le16 control, start_seq_num;
2822 } __packed bar_data;
2823 struct ieee80211_event event = {
2824 .type = BAR_RX_EVENT,
2825 };
2826
2827 if (!rx->sta)
2828 return RX_DROP_MONITOR;
2829
2830 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
2831 &bar_data, sizeof(bar_data)))
2832 return RX_DROP_MONITOR;
2833
2834 tid = le16_to_cpu(bar_data.control) >> 12;
2835
2836 if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
2837 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
2838 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
2839 WLAN_BACK_RECIPIENT,
2840 WLAN_REASON_QSTA_REQUIRE_SETUP);
2841
2842 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
2843 if (!tid_agg_rx)
2844 return RX_DROP_MONITOR;
2845
2846 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
2847 event.u.ba.tid = tid;
2848 event.u.ba.ssn = start_seq_num;
2849 event.u.ba.sta = &rx->sta->sta;
2850
2851 /* reset session timer */
2852 if (tid_agg_rx->timeout)
2853 mod_timer(&tid_agg_rx->session_timer,
2854 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2855
2856 spin_lock(&tid_agg_rx->reorder_lock);
2857 /* release stored frames up to start of BAR */
2858 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
2859 start_seq_num, frames);
2860 spin_unlock(&tid_agg_rx->reorder_lock);
2861
2862 drv_event_callback(rx->local, rx->sdata, &event);
2863
2864 kfree_skb(skb);
2865 return RX_QUEUED;
2866 }
2867
2868 /*
2869 * After this point, we only want management frames,
2870 * so we can drop all remaining control frames to
2871 * cooked monitor interfaces.
2872 */
2873 return RX_DROP_MONITOR;
2874}
2875
2876static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
2877 struct ieee80211_mgmt *mgmt,
2878 size_t len)
2879{
2880 struct ieee80211_local *local = sdata->local;
2881 struct sk_buff *skb;
2882 struct ieee80211_mgmt *resp;
2883
2884 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
2885 /* Not to own unicast address */
2886 return;
2887 }
2888
2889 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
2890 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
2891 /* Not from the current AP or not associated yet. */
2892 return;
2893 }
2894
2895 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
2896 /* Too short SA Query request frame */
2897 return;
2898 }
2899
2900 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
2901 if (skb == NULL)
2902 return;
2903
2904 skb_reserve(skb, local->hw.extra_tx_headroom);
2905 resp = skb_put_zero(skb, 24);
2906 memcpy(resp->da, mgmt->sa, ETH_ALEN);
2907 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
2908 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
2909 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2910 IEEE80211_STYPE_ACTION);
2911 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
2912 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
2913 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
2914 memcpy(resp->u.action.u.sa_query.trans_id,
2915 mgmt->u.action.u.sa_query.trans_id,
2916 WLAN_SA_QUERY_TR_ID_LEN);
2917
2918 ieee80211_tx_skb(sdata, skb);
2919}
2920
2921static ieee80211_rx_result debug_noinline
2922ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
2923{
2924 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2925 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2926
2927 /*
2928 * From here on, look only at management frames.
2929 * Data and control frames are already handled,
2930 * and unknown (reserved) frames are useless.
2931 */
2932 if (rx->skb->len < 24)
2933 return RX_DROP_MONITOR;
2934
2935 if (!ieee80211_is_mgmt(mgmt->frame_control))
2936 return RX_DROP_MONITOR;
2937
2938 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
2939 ieee80211_is_beacon(mgmt->frame_control) &&
2940 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
2941 int sig = 0;
2942
2943 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
2944 !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
2945 sig = status->signal;
2946
2947 cfg80211_report_obss_beacon(rx->local->hw.wiphy,
2948 rx->skb->data, rx->skb->len,
2949 status->freq, sig);
2950 rx->flags |= IEEE80211_RX_BEACON_REPORTED;
2951 }
2952
2953 if (ieee80211_drop_unencrypted_mgmt(rx))
2954 return RX_DROP_UNUSABLE;
2955
2956 return RX_CONTINUE;
2957}
2958
2959static ieee80211_rx_result debug_noinline
2960ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2961{
2962 struct ieee80211_local *local = rx->local;
2963 struct ieee80211_sub_if_data *sdata = rx->sdata;
2964 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2965 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2966 int len = rx->skb->len;
2967
2968 if (!ieee80211_is_action(mgmt->frame_control))
2969 return RX_CONTINUE;
2970
2971 /* drop too small frames */
2972 if (len < IEEE80211_MIN_ACTION_SIZE)
2973 return RX_DROP_UNUSABLE;
2974
2975 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
2976 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
2977 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
2978 return RX_DROP_UNUSABLE;
2979
2980 switch (mgmt->u.action.category) {
2981 case WLAN_CATEGORY_HT:
2982 /* reject HT action frames from stations not supporting HT */
2983 if (!rx->sta->sta.ht_cap.ht_supported)
2984 goto invalid;
2985
2986 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
2987 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
2988 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
2989 sdata->vif.type != NL80211_IFTYPE_AP &&
2990 sdata->vif.type != NL80211_IFTYPE_ADHOC)
2991 break;
2992
2993 /* verify action & smps_control/chanwidth are present */
2994 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
2995 goto invalid;
2996
2997 switch (mgmt->u.action.u.ht_smps.action) {
2998 case WLAN_HT_ACTION_SMPS: {
2999 struct ieee80211_supported_band *sband;
3000 enum ieee80211_smps_mode smps_mode;
3001 struct sta_opmode_info sta_opmode = {};
3002
3003 /* convert to HT capability */
3004 switch (mgmt->u.action.u.ht_smps.smps_control) {
3005 case WLAN_HT_SMPS_CONTROL_DISABLED:
3006 smps_mode = IEEE80211_SMPS_OFF;
3007 break;
3008 case WLAN_HT_SMPS_CONTROL_STATIC:
3009 smps_mode = IEEE80211_SMPS_STATIC;
3010 break;
3011 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
3012 smps_mode = IEEE80211_SMPS_DYNAMIC;
3013 break;
3014 default:
3015 goto invalid;
3016 }
3017
3018 /* if no change do nothing */
3019 if (rx->sta->sta.smps_mode == smps_mode)
3020 goto handled;
3021 rx->sta->sta.smps_mode = smps_mode;
3022 sta_opmode.smps_mode =
3023 ieee80211_smps_mode_to_smps_mode(smps_mode);
3024 sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED;
3025
3026 sband = rx->local->hw.wiphy->bands[status->band];
3027
3028 rate_control_rate_update(local, sband, rx->sta,
3029 IEEE80211_RC_SMPS_CHANGED);
3030 cfg80211_sta_opmode_change_notify(sdata->dev,
3031 rx->sta->addr,
3032 &sta_opmode,
3033 GFP_ATOMIC);
3034 goto handled;
3035 }
3036 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
3037 struct ieee80211_supported_band *sband;
3038 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
3039 enum ieee80211_sta_rx_bandwidth max_bw, new_bw;
3040 struct sta_opmode_info sta_opmode = {};
3041
3042 /* If it doesn't support 40 MHz it can't change ... */
3043 if (!(rx->sta->sta.ht_cap.cap &
3044 IEEE80211_HT_CAP_SUP_WIDTH_20_40))
3045 goto handled;
3046
3047 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
3048 max_bw = IEEE80211_STA_RX_BW_20;
3049 else
3050 max_bw = ieee80211_sta_cap_rx_bw(rx->sta);
3051
3052 /* set cur_max_bandwidth and recalc sta bw */
3053 rx->sta->cur_max_bandwidth = max_bw;
3054 new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
3055
3056 if (rx->sta->sta.bandwidth == new_bw)
3057 goto handled;
3058
3059 rx->sta->sta.bandwidth = new_bw;
3060 sband = rx->local->hw.wiphy->bands[status->band];
3061 sta_opmode.bw =
3062 ieee80211_sta_rx_bw_to_chan_width(rx->sta);
3063 sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED;
3064
3065 rate_control_rate_update(local, sband, rx->sta,
3066 IEEE80211_RC_BW_CHANGED);
3067 cfg80211_sta_opmode_change_notify(sdata->dev,
3068 rx->sta->addr,
3069 &sta_opmode,
3070 GFP_ATOMIC);
3071 goto handled;
3072 }
3073 default:
3074 goto invalid;
3075 }
3076
3077 break;
3078 case WLAN_CATEGORY_PUBLIC:
3079 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3080 goto invalid;
3081 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3082 break;
3083 if (!rx->sta)
3084 break;
3085 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
3086 break;
3087 if (mgmt->u.action.u.ext_chan_switch.action_code !=
3088 WLAN_PUB_ACTION_EXT_CHANSW_ANN)
3089 break;
3090 if (len < offsetof(struct ieee80211_mgmt,
3091 u.action.u.ext_chan_switch.variable))
3092 goto invalid;
3093 goto queue;
3094 case WLAN_CATEGORY_VHT:
3095 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3096 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3097 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3098 sdata->vif.type != NL80211_IFTYPE_AP &&
3099 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3100 break;
3101
3102 /* verify action code is present */
3103 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3104 goto invalid;
3105
3106 switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
3107 case WLAN_VHT_ACTION_OPMODE_NOTIF: {
3108 /* verify opmode is present */
3109 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
3110 goto invalid;
3111 goto queue;
3112 }
3113 case WLAN_VHT_ACTION_GROUPID_MGMT: {
3114 if (len < IEEE80211_MIN_ACTION_SIZE + 25)
3115 goto invalid;
3116 goto queue;
3117 }
3118 default:
3119 break;
3120 }
3121 break;
3122 case WLAN_CATEGORY_BACK:
3123 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3124 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3125 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3126 sdata->vif.type != NL80211_IFTYPE_AP &&
3127 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3128 break;
3129
3130 /* verify action_code is present */
3131 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3132 break;
3133
3134 switch (mgmt->u.action.u.addba_req.action_code) {
3135 case WLAN_ACTION_ADDBA_REQ:
3136 if (len < (IEEE80211_MIN_ACTION_SIZE +
3137 sizeof(mgmt->u.action.u.addba_req)))
3138 goto invalid;
3139 break;
3140 case WLAN_ACTION_ADDBA_RESP:
3141 if (len < (IEEE80211_MIN_ACTION_SIZE +
3142 sizeof(mgmt->u.action.u.addba_resp)))
3143 goto invalid;
3144 break;
3145 case WLAN_ACTION_DELBA:
3146 if (len < (IEEE80211_MIN_ACTION_SIZE +
3147 sizeof(mgmt->u.action.u.delba)))
3148 goto invalid;
3149 break;
3150 default:
3151 goto invalid;
3152 }
3153
3154 goto queue;
3155 case WLAN_CATEGORY_SPECTRUM_MGMT:
3156 /* verify action_code is present */
3157 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3158 break;
3159
3160 switch (mgmt->u.action.u.measurement.action_code) {
3161 case WLAN_ACTION_SPCT_MSR_REQ:
3162 if (status->band != NL80211_BAND_5GHZ)
3163 break;
3164
3165 if (len < (IEEE80211_MIN_ACTION_SIZE +
3166 sizeof(mgmt->u.action.u.measurement)))
3167 break;
3168
3169 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3170 break;
3171
3172 ieee80211_process_measurement_req(sdata, mgmt, len);
3173 goto handled;
3174 case WLAN_ACTION_SPCT_CHL_SWITCH: {
3175 u8 *bssid;
3176 if (len < (IEEE80211_MIN_ACTION_SIZE +
3177 sizeof(mgmt->u.action.u.chan_switch)))
3178 break;
3179
3180 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3181 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3182 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3183 break;
3184
3185 if (sdata->vif.type == NL80211_IFTYPE_STATION)
3186 bssid = sdata->u.mgd.bssid;
3187 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
3188 bssid = sdata->u.ibss.bssid;
3189 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
3190 bssid = mgmt->sa;
3191 else
3192 break;
3193
3194 if (!ether_addr_equal(mgmt->bssid, bssid))
3195 break;
3196
3197 goto queue;
3198 }
3199 }
3200 break;
3201 case WLAN_CATEGORY_SA_QUERY:
3202 if (len < (IEEE80211_MIN_ACTION_SIZE +
3203 sizeof(mgmt->u.action.u.sa_query)))
3204 break;
3205
3206 switch (mgmt->u.action.u.sa_query.action) {
3207 case WLAN_ACTION_SA_QUERY_REQUEST:
3208 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3209 break;
3210 ieee80211_process_sa_query_req(sdata, mgmt, len);
3211 goto handled;
3212 }
3213 break;
3214 case WLAN_CATEGORY_SELF_PROTECTED:
3215 if (len < (IEEE80211_MIN_ACTION_SIZE +
3216 sizeof(mgmt->u.action.u.self_prot.action_code)))
3217 break;
3218
3219 switch (mgmt->u.action.u.self_prot.action_code) {
3220 case WLAN_SP_MESH_PEERING_OPEN:
3221 case WLAN_SP_MESH_PEERING_CLOSE:
3222 case WLAN_SP_MESH_PEERING_CONFIRM:
3223 if (!ieee80211_vif_is_mesh(&sdata->vif))
3224 goto invalid;
3225 if (sdata->u.mesh.user_mpm)
3226 /* userspace handles this frame */
3227 break;
3228 goto queue;
3229 case WLAN_SP_MGK_INFORM:
3230 case WLAN_SP_MGK_ACK:
3231 if (!ieee80211_vif_is_mesh(&sdata->vif))
3232 goto invalid;
3233 break;
3234 }
3235 break;
3236 case WLAN_CATEGORY_MESH_ACTION:
3237 if (len < (IEEE80211_MIN_ACTION_SIZE +
3238 sizeof(mgmt->u.action.u.mesh_action.action_code)))
3239 break;
3240
3241 if (!ieee80211_vif_is_mesh(&sdata->vif))
3242 break;
3243 if (mesh_action_is_path_sel(mgmt) &&
3244 !mesh_path_sel_is_hwmp(sdata))
3245 break;
3246 goto queue;
3247 }
3248
3249 return RX_CONTINUE;
3250
3251 invalid:
3252 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
3253 /* will return in the next handlers */
3254 return RX_CONTINUE;
3255
3256 handled:
3257 if (rx->sta)
3258 rx->sta->rx_stats.packets++;
3259 dev_kfree_skb(rx->skb);
3260 return RX_QUEUED;
3261
3262 queue:
3263 skb_queue_tail(&sdata->skb_queue, rx->skb);
3264 ieee80211_queue_work(&local->hw, &sdata->work);
3265 if (rx->sta)
3266 rx->sta->rx_stats.packets++;
3267 return RX_QUEUED;
3268}
3269
3270static ieee80211_rx_result debug_noinline
3271ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
3272{
3273 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3274 int sig = 0;
3275
3276 /* skip known-bad action frames and return them in the next handler */
3277 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
3278 return RX_CONTINUE;
3279
3280 /*
3281 * Getting here means the kernel doesn't know how to handle
3282 * it, but maybe userspace does ... include returned frames
3283 * so userspace can register for those to know whether ones
3284 * it transmitted were processed or returned.
3285 */
3286
3287 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
3288 !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
3289 sig = status->signal;
3290
3291 if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
3292 rx->skb->data, rx->skb->len, 0)) {
3293 if (rx->sta)
3294 rx->sta->rx_stats.packets++;
3295 dev_kfree_skb(rx->skb);
3296 return RX_QUEUED;
3297 }
3298
3299 return RX_CONTINUE;
3300}
3301
3302static ieee80211_rx_result debug_noinline
3303ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
3304{
3305 struct ieee80211_local *local = rx->local;
3306 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3307 struct sk_buff *nskb;
3308 struct ieee80211_sub_if_data *sdata = rx->sdata;
3309 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3310
3311 if (!ieee80211_is_action(mgmt->frame_control))
3312 return RX_CONTINUE;
3313
3314 /*
3315 * For AP mode, hostapd is responsible for handling any action
3316 * frames that we didn't handle, including returning unknown
3317 * ones. For all other modes we will return them to the sender,
3318 * setting the 0x80 bit in the action category, as required by
3319 * 802.11-2012 9.24.4.
3320 * Newer versions of hostapd shall also use the management frame
3321 * registration mechanisms, but older ones still use cooked
3322 * monitor interfaces so push all frames there.
3323 */
3324 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
3325 (sdata->vif.type == NL80211_IFTYPE_AP ||
3326 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
3327 return RX_DROP_MONITOR;
3328
3329 if (is_multicast_ether_addr(mgmt->da))
3330 return RX_DROP_MONITOR;
3331
3332 /* do not return rejected action frames */
3333 if (mgmt->u.action.category & 0x80)
3334 return RX_DROP_UNUSABLE;
3335
3336 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
3337 GFP_ATOMIC);
3338 if (nskb) {
3339 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
3340
3341 nmgmt->u.action.category |= 0x80;
3342 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
3343 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
3344
3345 memset(nskb->cb, 0, sizeof(nskb->cb));
3346
3347 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
3348 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
3349
3350 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
3351 IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
3352 IEEE80211_TX_CTL_NO_CCK_RATE;
3353 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
3354 info->hw_queue =
3355 local->hw.offchannel_tx_hw_queue;
3356 }
3357
3358 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
3359 status->band, 0);
3360 }
3361 dev_kfree_skb(rx->skb);
3362 return RX_QUEUED;
3363}
3364
3365static ieee80211_rx_result debug_noinline
3366ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
3367{
3368 struct ieee80211_sub_if_data *sdata = rx->sdata;
3369 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
3370 __le16 stype;
3371
3372 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
3373
3374 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
3375 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3376 sdata->vif.type != NL80211_IFTYPE_OCB &&
3377 sdata->vif.type != NL80211_IFTYPE_STATION)
3378 return RX_DROP_MONITOR;
3379
3380 switch (stype) {
3381 case cpu_to_le16(IEEE80211_STYPE_AUTH):
3382 case cpu_to_le16(IEEE80211_STYPE_BEACON):
3383 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
3384 /* process for all: mesh, mlme, ibss */
3385 break;
3386 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
3387 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
3388 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
3389 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
3390 if (is_multicast_ether_addr(mgmt->da) &&
3391 !is_broadcast_ether_addr(mgmt->da))
3392 return RX_DROP_MONITOR;
3393
3394 /* process only for station */
3395 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3396 return RX_DROP_MONITOR;
3397 break;
3398 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
3399 /* process only for ibss and mesh */
3400 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3401 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3402 return RX_DROP_MONITOR;
3403 break;
3404 default:
3405 return RX_DROP_MONITOR;
3406 }
3407
3408 /* queue up frame and kick off work to process it */
3409 skb_queue_tail(&sdata->skb_queue, rx->skb);
3410 ieee80211_queue_work(&rx->local->hw, &sdata->work);
3411 if (rx->sta)
3412 rx->sta->rx_stats.packets++;
3413
3414 return RX_QUEUED;
3415}
3416
3417static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
3418 struct ieee80211_rate *rate)
3419{
3420 struct ieee80211_sub_if_data *sdata;
3421 struct ieee80211_local *local = rx->local;
3422 struct sk_buff *skb = rx->skb, *skb2;
3423 struct net_device *prev_dev = NULL;
3424 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3425 int needed_headroom;
3426
3427 /*
3428 * If cooked monitor has been processed already, then
3429 * don't do it again. If not, set the flag.
3430 */
3431 if (rx->flags & IEEE80211_RX_CMNTR)
3432 goto out_free_skb;
3433 rx->flags |= IEEE80211_RX_CMNTR;
3434
3435 /* If there are no cooked monitor interfaces, just free the SKB */
3436 if (!local->cooked_mntrs)
3437 goto out_free_skb;
3438
3439 /* vendor data is long removed here */
3440 status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA;
3441 /* room for the radiotap header based on driver features */
3442 needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb);
3443
3444 if (skb_headroom(skb) < needed_headroom &&
3445 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
3446 goto out_free_skb;
3447
3448 /* prepend radiotap information */
3449 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
3450 false);
3451
3452 skb_reset_mac_header(skb);
3453 skb->ip_summed = CHECKSUM_UNNECESSARY;
3454 skb->pkt_type = PACKET_OTHERHOST;
3455 skb->protocol = htons(ETH_P_802_2);
3456
3457 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
3458 if (!ieee80211_sdata_running(sdata))
3459 continue;
3460
3461 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
3462 !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES))
3463 continue;
3464
3465 if (prev_dev) {
3466 skb2 = skb_clone(skb, GFP_ATOMIC);
3467 if (skb2) {
3468 skb2->dev = prev_dev;
3469 netif_receive_skb(skb2);
3470 }
3471 }
3472
3473 prev_dev = sdata->dev;
3474 ieee80211_rx_stats(sdata->dev, skb->len);
3475 }
3476
3477 if (prev_dev) {
3478 skb->dev = prev_dev;
3479 netif_receive_skb(skb);
3480 return;
3481 }
3482
3483 out_free_skb:
3484 dev_kfree_skb(skb);
3485}
3486
3487static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
3488 ieee80211_rx_result res)
3489{
3490 switch (res) {
3491 case RX_DROP_MONITOR:
3492 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
3493 if (rx->sta)
3494 rx->sta->rx_stats.dropped++;
3495 /* fall through */
3496 case RX_CONTINUE: {
3497 struct ieee80211_rate *rate = NULL;
3498 struct ieee80211_supported_band *sband;
3499 struct ieee80211_rx_status *status;
3500
3501 status = IEEE80211_SKB_RXCB((rx->skb));
3502
3503 sband = rx->local->hw.wiphy->bands[status->band];
3504 if (status->encoding == RX_ENC_LEGACY)
3505 rate = &sband->bitrates[status->rate_idx];
3506
3507 ieee80211_rx_cooked_monitor(rx, rate);
3508 break;
3509 }
3510 case RX_DROP_UNUSABLE:
3511 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
3512 if (rx->sta)
3513 rx->sta->rx_stats.dropped++;
3514 dev_kfree_skb(rx->skb);
3515 break;
3516 case RX_QUEUED:
3517 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
3518 break;
3519 }
3520}
3521
3522static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
3523 struct sk_buff_head *frames)
3524{
3525 ieee80211_rx_result res = RX_DROP_MONITOR;
3526 struct sk_buff *skb;
3527
3528#define CALL_RXH(rxh) \
3529 do { \
3530 res = rxh(rx); \
3531 if (res != RX_CONTINUE) \
3532 goto rxh_next; \
3533 } while (0)
3534
3535 /* Lock here to avoid hitting all of the data used in the RX
3536 * path (e.g. key data, station data, ...) concurrently when
3537 * a frame is released from the reorder buffer due to timeout
3538 * from the timer, potentially concurrently with RX from the
3539 * driver.
3540 */
3541 spin_lock_bh(&rx->local->rx_path_lock);
3542
3543 while ((skb = __skb_dequeue(frames))) {
3544 /*
3545 * all the other fields are valid across frames
3546 * that belong to an aMPDU since they are on the
3547 * same TID from the same station
3548 */
3549 rx->skb = skb;
3550
3551 CALL_RXH(ieee80211_rx_h_check_more_data);
3552 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll);
3553 CALL_RXH(ieee80211_rx_h_sta_process);
3554 CALL_RXH(ieee80211_rx_h_decrypt);
3555 CALL_RXH(ieee80211_rx_h_defragment);
3556 CALL_RXH(ieee80211_rx_h_michael_mic_verify);
3557 /* must be after MMIC verify so header is counted in MPDU mic */
3558#ifdef CONFIG_MAC80211_MESH
3559 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
3560 CALL_RXH(ieee80211_rx_h_mesh_fwding);
3561#endif
3562 CALL_RXH(ieee80211_rx_h_amsdu);
3563 CALL_RXH(ieee80211_rx_h_data);
3564
3565 /* special treatment -- needs the queue */
3566 res = ieee80211_rx_h_ctrl(rx, frames);
3567 if (res != RX_CONTINUE)
3568 goto rxh_next;
3569
3570 CALL_RXH(ieee80211_rx_h_mgmt_check);
3571 CALL_RXH(ieee80211_rx_h_action);
3572 CALL_RXH(ieee80211_rx_h_userspace_mgmt);
3573 CALL_RXH(ieee80211_rx_h_action_return);
3574 CALL_RXH(ieee80211_rx_h_mgmt);
3575
3576 rxh_next:
3577 ieee80211_rx_handlers_result(rx, res);
3578
3579#undef CALL_RXH
3580 }
3581
3582 spin_unlock_bh(&rx->local->rx_path_lock);
3583}
3584
3585static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
3586{
3587 struct sk_buff_head reorder_release;
3588 ieee80211_rx_result res = RX_DROP_MONITOR;
3589
3590 __skb_queue_head_init(&reorder_release);
3591
3592#define CALL_RXH(rxh) \
3593 do { \
3594 res = rxh(rx); \
3595 if (res != RX_CONTINUE) \
3596 goto rxh_next; \
3597 } while (0)
3598
3599 CALL_RXH(ieee80211_rx_h_check_dup);
3600 CALL_RXH(ieee80211_rx_h_check);
3601
3602 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
3603
3604 ieee80211_rx_handlers(rx, &reorder_release);
3605 return;
3606
3607 rxh_next:
3608 ieee80211_rx_handlers_result(rx, res);
3609
3610#undef CALL_RXH
3611}
3612
3613/*
3614 * This function makes calls into the RX path, therefore
3615 * it has to be invoked under RCU read lock.
3616 */
3617void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
3618{
3619 struct sk_buff_head frames;
3620 struct ieee80211_rx_data rx = {
3621 .sta = sta,
3622 .sdata = sta->sdata,
3623 .local = sta->local,
3624 /* This is OK -- must be QoS data frame */
3625 .security_idx = tid,
3626 .seqno_idx = tid,
3627 .napi = NULL, /* must be NULL to not have races */
3628 };
3629 struct tid_ampdu_rx *tid_agg_rx;
3630
3631 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
3632 if (!tid_agg_rx)
3633 return;
3634
3635 __skb_queue_head_init(&frames);
3636
3637 spin_lock(&tid_agg_rx->reorder_lock);
3638 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
3639 spin_unlock(&tid_agg_rx->reorder_lock);
3640
3641 if (!skb_queue_empty(&frames)) {
3642 struct ieee80211_event event = {
3643 .type = BA_FRAME_TIMEOUT,
3644 .u.ba.tid = tid,
3645 .u.ba.sta = &sta->sta,
3646 };
3647 drv_event_callback(rx.local, rx.sdata, &event);
3648 }
3649
3650 ieee80211_rx_handlers(&rx, &frames);
3651}
3652
3653void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
3654 u16 ssn, u64 filtered,
3655 u16 received_mpdus)
3656{
3657 struct sta_info *sta;
3658 struct tid_ampdu_rx *tid_agg_rx;
3659 struct sk_buff_head frames;
3660 struct ieee80211_rx_data rx = {
3661 /* This is OK -- must be QoS data frame */
3662 .security_idx = tid,
3663 .seqno_idx = tid,
3664 };
3665 int i, diff;
3666
3667 if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS))
3668 return;
3669
3670 __skb_queue_head_init(&frames);
3671
3672 sta = container_of(pubsta, struct sta_info, sta);
3673
3674 rx.sta = sta;
3675 rx.sdata = sta->sdata;
3676 rx.local = sta->local;
3677
3678 rcu_read_lock();
3679 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
3680 if (!tid_agg_rx)
3681 goto out;
3682
3683 spin_lock_bh(&tid_agg_rx->reorder_lock);
3684
3685 if (received_mpdus >= IEEE80211_SN_MODULO >> 1) {
3686 int release;
3687
3688 /* release all frames in the reorder buffer */
3689 release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) %
3690 IEEE80211_SN_MODULO;
3691 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx,
3692 release, &frames);
3693 /* update ssn to match received ssn */
3694 tid_agg_rx->head_seq_num = ssn;
3695 } else {
3696 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn,
3697 &frames);
3698 }
3699
3700 /* handle the case that received ssn is behind the mac ssn.
3701 * it can be tid_agg_rx->buf_size behind and still be valid */
3702 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK;
3703 if (diff >= tid_agg_rx->buf_size) {
3704 tid_agg_rx->reorder_buf_filtered = 0;
3705 goto release;
3706 }
3707 filtered = filtered >> diff;
3708 ssn += diff;
3709
3710 /* update bitmap */
3711 for (i = 0; i < tid_agg_rx->buf_size; i++) {
3712 int index = (ssn + i) % tid_agg_rx->buf_size;
3713
3714 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
3715 if (filtered & BIT_ULL(i))
3716 tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index);
3717 }
3718
3719 /* now process also frames that the filter marking released */
3720 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
3721
3722release:
3723 spin_unlock_bh(&tid_agg_rx->reorder_lock);
3724
3725 ieee80211_rx_handlers(&rx, &frames);
3726
3727 out:
3728 rcu_read_unlock();
3729}
3730EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames);
3731
3732/* main receive path */
3733
3734static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
3735{
3736 struct ieee80211_sub_if_data *sdata = rx->sdata;
3737 struct sk_buff *skb = rx->skb;
3738 struct ieee80211_hdr *hdr = (void *)skb->data;
3739 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3740 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
3741 bool multicast = is_multicast_ether_addr(hdr->addr1);
3742
3743 switch (sdata->vif.type) {
3744 case NL80211_IFTYPE_STATION:
3745 if (!bssid && !sdata->u.mgd.use_4addr)
3746 return false;
3747 if (multicast)
3748 return true;
3749 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3750 case NL80211_IFTYPE_ADHOC:
3751 if (!bssid)
3752 return false;
3753 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
3754 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
3755 return false;
3756 if (ieee80211_is_beacon(hdr->frame_control))
3757 return true;
3758 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid))
3759 return false;
3760 if (!multicast &&
3761 !ether_addr_equal(sdata->vif.addr, hdr->addr1))
3762 return false;
3763 if (!rx->sta) {
3764 int rate_idx;
3765 if (status->encoding != RX_ENC_LEGACY)
3766 rate_idx = 0; /* TODO: HT/VHT rates */
3767 else
3768 rate_idx = status->rate_idx;
3769 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
3770 BIT(rate_idx));
3771 }
3772 return true;
3773 case NL80211_IFTYPE_OCB:
3774 if (!bssid)
3775 return false;
3776 if (!ieee80211_is_data_present(hdr->frame_control))
3777 return false;
3778 if (!is_broadcast_ether_addr(bssid))
3779 return false;
3780 if (!multicast &&
3781 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
3782 return false;
3783 if (!rx->sta) {
3784 int rate_idx;
3785 if (status->encoding != RX_ENC_LEGACY)
3786 rate_idx = 0; /* TODO: HT rates */
3787 else
3788 rate_idx = status->rate_idx;
3789 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2,
3790 BIT(rate_idx));
3791 }
3792 return true;
3793 case NL80211_IFTYPE_MESH_POINT:
3794 if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
3795 return false;
3796 if (multicast)
3797 return true;
3798 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3799 case NL80211_IFTYPE_AP_VLAN:
3800 case NL80211_IFTYPE_AP:
3801 if (!bssid)
3802 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
3803
3804 if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
3805 /*
3806 * Accept public action frames even when the
3807 * BSSID doesn't match, this is used for P2P
3808 * and location updates. Note that mac80211
3809 * itself never looks at these frames.
3810 */
3811 if (!multicast &&
3812 !ether_addr_equal(sdata->vif.addr, hdr->addr1))
3813 return false;
3814 if (ieee80211_is_public_action(hdr, skb->len))
3815 return true;
3816 return ieee80211_is_beacon(hdr->frame_control);
3817 }
3818
3819 if (!ieee80211_has_tods(hdr->frame_control)) {
3820 /* ignore data frames to TDLS-peers */
3821 if (ieee80211_is_data(hdr->frame_control))
3822 return false;
3823 /* ignore action frames to TDLS-peers */
3824 if (ieee80211_is_action(hdr->frame_control) &&
3825 !is_broadcast_ether_addr(bssid) &&
3826 !ether_addr_equal(bssid, hdr->addr1))
3827 return false;
3828 }
3829
3830 /*
3831 * 802.11-2016 Table 9-26 says that for data frames, A1 must be
3832 * the BSSID - we've checked that already but may have accepted
3833 * the wildcard (ff:ff:ff:ff:ff:ff).
3834 *
3835 * It also says:
3836 * The BSSID of the Data frame is determined as follows:
3837 * a) If the STA is contained within an AP or is associated
3838 * with an AP, the BSSID is the address currently in use
3839 * by the STA contained in the AP.
3840 *
3841 * So we should not accept data frames with an address that's
3842 * multicast.
3843 *
3844 * Accepting it also opens a security problem because stations
3845 * could encrypt it with the GTK and inject traffic that way.
3846 */
3847 if (ieee80211_is_data(hdr->frame_control) && multicast)
3848 return false;
3849
3850 return true;
3851 case NL80211_IFTYPE_WDS:
3852 if (bssid || !ieee80211_is_data(hdr->frame_control))
3853 return false;
3854 return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2);
3855 case NL80211_IFTYPE_P2P_DEVICE:
3856 return ieee80211_is_public_action(hdr, skb->len) ||
3857 ieee80211_is_probe_req(hdr->frame_control) ||
3858 ieee80211_is_probe_resp(hdr->frame_control) ||
3859 ieee80211_is_beacon(hdr->frame_control);
3860 case NL80211_IFTYPE_NAN:
3861 /* Currently no frames on NAN interface are allowed */
3862 return false;
3863 default:
3864 break;
3865 }
3866
3867 WARN_ON_ONCE(1);
3868 return false;
3869}
3870
3871void ieee80211_check_fast_rx(struct sta_info *sta)
3872{
3873 struct ieee80211_sub_if_data *sdata = sta->sdata;
3874 struct ieee80211_local *local = sdata->local;
3875 struct ieee80211_key *key;
3876 struct ieee80211_fast_rx fastrx = {
3877 .dev = sdata->dev,
3878 .vif_type = sdata->vif.type,
3879 .control_port_protocol = sdata->control_port_protocol,
3880 }, *old, *new = NULL;
3881 bool assign = false;
3882
3883 /* use sparse to check that we don't return without updating */
3884 __acquire(check_fast_rx);
3885
3886 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header));
3887 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN);
3888 ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header);
3889 ether_addr_copy(fastrx.vif_addr, sdata->vif.addr);
3890
3891 fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS);
3892
3893 /* fast-rx doesn't do reordering */
3894 if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
3895 !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER))
3896 goto clear;
3897
3898 switch (sdata->vif.type) {
3899 case NL80211_IFTYPE_STATION:
3900 if (sta->sta.tdls) {
3901 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
3902 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
3903 fastrx.expected_ds_bits = 0;
3904 } else {
3905 fastrx.sta_notify = sdata->u.mgd.probe_send_count > 0;
3906 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
3907 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3);
3908 fastrx.expected_ds_bits =
3909 cpu_to_le16(IEEE80211_FCTL_FROMDS);
3910 }
3911
3912 if (sdata->u.mgd.use_4addr && !sta->sta.tdls) {
3913 fastrx.expected_ds_bits |=
3914 cpu_to_le16(IEEE80211_FCTL_TODS);
3915 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
3916 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
3917 }
3918
3919 if (!sdata->u.mgd.powersave)
3920 break;
3921
3922 /* software powersave is a huge mess, avoid all of it */
3923 if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
3924 goto clear;
3925 if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
3926 !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
3927 goto clear;
3928 break;
3929 case NL80211_IFTYPE_AP_VLAN:
3930 case NL80211_IFTYPE_AP:
3931 /* parallel-rx requires this, at least with calls to
3932 * ieee80211_sta_ps_transition()
3933 */
3934 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
3935 goto clear;
3936 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
3937 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
3938 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS);
3939
3940 fastrx.internal_forward =
3941 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
3942 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
3943 !sdata->u.vlan.sta);
3944
3945 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
3946 sdata->u.vlan.sta) {
3947 fastrx.expected_ds_bits |=
3948 cpu_to_le16(IEEE80211_FCTL_FROMDS);
3949 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
3950 fastrx.internal_forward = 0;
3951 }
3952
3953 break;
3954 default:
3955 goto clear;
3956 }
3957
3958 if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
3959 goto clear;
3960
3961 rcu_read_lock();
3962 key = rcu_dereference(sta->ptk[sta->ptk_idx]);
3963 if (key) {
3964 switch (key->conf.cipher) {
3965 case WLAN_CIPHER_SUITE_TKIP:
3966 /* we don't want to deal with MMIC in fast-rx */
3967 goto clear_rcu;
3968 case WLAN_CIPHER_SUITE_CCMP:
3969 case WLAN_CIPHER_SUITE_CCMP_256:
3970 case WLAN_CIPHER_SUITE_GCMP:
3971 case WLAN_CIPHER_SUITE_GCMP_256:
3972 break;
3973 default:
3974 /* we also don't want to deal with WEP or cipher scheme
3975 * since those require looking up the key idx in the
3976 * frame, rather than assuming the PTK is used
3977 * (we need to revisit this once we implement the real
3978 * PTK index, which is now valid in the spec, but we
3979 * haven't implemented that part yet)
3980 */
3981 goto clear_rcu;
3982 }
3983
3984 fastrx.key = true;
3985 fastrx.icv_len = key->conf.icv_len;
3986 }
3987
3988 assign = true;
3989 clear_rcu:
3990 rcu_read_unlock();
3991 clear:
3992 __release(check_fast_rx);
3993
3994 if (assign)
3995 new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL);
3996
3997 spin_lock_bh(&sta->lock);
3998 old = rcu_dereference_protected(sta->fast_rx, true);
3999 rcu_assign_pointer(sta->fast_rx, new);
4000 spin_unlock_bh(&sta->lock);
4001
4002 if (old)
4003 kfree_rcu(old, rcu_head);
4004}
4005
4006void ieee80211_clear_fast_rx(struct sta_info *sta)
4007{
4008 struct ieee80211_fast_rx *old;
4009
4010 spin_lock_bh(&sta->lock);
4011 old = rcu_dereference_protected(sta->fast_rx, true);
4012 RCU_INIT_POINTER(sta->fast_rx, NULL);
4013 spin_unlock_bh(&sta->lock);
4014
4015 if (old)
4016 kfree_rcu(old, rcu_head);
4017}
4018
4019void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
4020{
4021 struct ieee80211_local *local = sdata->local;
4022 struct sta_info *sta;
4023
4024 lockdep_assert_held(&local->sta_mtx);
4025
4026 list_for_each_entry_rcu(sta, &local->sta_list, list) {
4027 if (sdata != sta->sdata &&
4028 (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
4029 continue;
4030 ieee80211_check_fast_rx(sta);
4031 }
4032}
4033
4034void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
4035{
4036 struct ieee80211_local *local = sdata->local;
4037
4038 mutex_lock(&local->sta_mtx);
4039 __ieee80211_check_fast_rx_iface(sdata);
4040 mutex_unlock(&local->sta_mtx);
4041}
4042
4043static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
4044 struct ieee80211_fast_rx *fast_rx)
4045{
4046 struct sk_buff *skb = rx->skb;
4047 struct ieee80211_hdr *hdr = (void *)skb->data;
4048 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4049 struct sta_info *sta = rx->sta;
4050 int orig_len = skb->len;
4051 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
4052 int snap_offs = hdrlen;
4053 struct {
4054 u8 snap[sizeof(rfc1042_header)];
4055 __be16 proto;
4056 } *payload __aligned(2);
4057 struct {
4058 u8 da[ETH_ALEN];
4059 u8 sa[ETH_ALEN];
4060 } addrs __aligned(2);
4061 struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
4062
4063 if (fast_rx->uses_rss)
4064 stats = this_cpu_ptr(sta->pcpu_rx_stats);
4065
4066 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
4067 * to a common data structure; drivers can implement that per queue
4068 * but we don't have that information in mac80211
4069 */
4070 if (!(status->flag & RX_FLAG_DUP_VALIDATED))
4071 return false;
4072
4073#define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED)
4074
4075 /* If using encryption, we also need to have:
4076 * - PN_VALIDATED: similar, but the implementation is tricky
4077 * - DECRYPTED: necessary for PN_VALIDATED
4078 */
4079 if (fast_rx->key &&
4080 (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS)
4081 return false;
4082
4083 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
4084 return false;
4085
4086 if (unlikely(ieee80211_is_frag(hdr)))
4087 return false;
4088
4089 /* Since our interface address cannot be multicast, this
4090 * implicitly also rejects multicast frames without the
4091 * explicit check.
4092 *
4093 * We shouldn't get any *data* frames not addressed to us
4094 * (AP mode will accept multicast *management* frames), but
4095 * punting here will make it go through the full checks in
4096 * ieee80211_accept_frame().
4097 */
4098 if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1))
4099 return false;
4100
4101 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
4102 IEEE80211_FCTL_TODS)) !=
4103 fast_rx->expected_ds_bits)
4104 return false;
4105
4106 /* assign the key to drop unencrypted frames (later)
4107 * and strip the IV/MIC if necessary
4108 */
4109 if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) {
4110 /* GCMP header length is the same */
4111 snap_offs += IEEE80211_CCMP_HDR_LEN;
4112 }
4113
4114 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) {
4115 if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
4116 goto drop;
4117
4118 payload = (void *)(skb->data + snap_offs);
4119
4120 if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr))
4121 return false;
4122
4123 /* Don't handle these here since they require special code.
4124 * Accept AARP and IPX even though they should come with a
4125 * bridge-tunnel header - but if we get them this way then
4126 * there's little point in discarding them.
4127 */
4128 if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) ||
4129 payload->proto == fast_rx->control_port_protocol))
4130 return false;
4131 }
4132
4133 /* after this point, don't punt to the slowpath! */
4134
4135 if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) &&
4136 pskb_trim(skb, skb->len - fast_rx->icv_len))
4137 goto drop;
4138
4139 if (unlikely(fast_rx->sta_notify)) {
4140 ieee80211_sta_rx_notify(rx->sdata, hdr);
4141 fast_rx->sta_notify = false;
4142 }
4143
4144 /* statistics part of ieee80211_rx_h_sta_process() */
4145 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
4146 stats->last_signal = status->signal;
4147 if (!fast_rx->uses_rss)
4148 ewma_signal_add(&sta->rx_stats_avg.signal,
4149 -status->signal);
4150 }
4151
4152 if (status->chains) {
4153 int i;
4154
4155 stats->chains = status->chains;
4156 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
4157 int signal = status->chain_signal[i];
4158
4159 if (!(status->chains & BIT(i)))
4160 continue;
4161
4162 stats->chain_signal_last[i] = signal;
4163 if (!fast_rx->uses_rss)
4164 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i],
4165 -signal);
4166 }
4167 }
4168 /* end of statistics */
4169
4170 if (rx->key && !ieee80211_has_protected(hdr->frame_control))
4171 goto drop;
4172
4173 if (status->rx_flags & IEEE80211_RX_AMSDU) {
4174 if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) !=
4175 RX_QUEUED)
4176 goto drop;
4177
4178 return true;
4179 }
4180
4181 stats->last_rx = jiffies;
4182 stats->last_rate = sta_stats_encode_rate(status);
4183
4184 stats->fragments++;
4185 stats->packets++;
4186
4187 /* do the header conversion - first grab the addresses */
4188 ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
4189 ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
4190 /* remove the SNAP but leave the ethertype */
4191 skb_pull(skb, snap_offs + sizeof(rfc1042_header));
4192 /* push the addresses in front */
4193 memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs));
4194
4195 skb->dev = fast_rx->dev;
4196
4197 ieee80211_rx_stats(fast_rx->dev, skb->len);
4198
4199 /* The seqno index has the same property as needed
4200 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
4201 * for non-QoS-data frames. Here we know it's a data
4202 * frame, so count MSDUs.
4203 */
4204 u64_stats_update_begin(&stats->syncp);
4205 stats->msdu[rx->seqno_idx]++;
4206 stats->bytes += orig_len;
4207 u64_stats_update_end(&stats->syncp);
4208
4209 if (fast_rx->internal_forward) {
4210 struct sk_buff *xmit_skb = NULL;
4211 bool multicast = is_multicast_ether_addr(skb->data);
4212
4213 if (multicast) {
4214 xmit_skb = skb_copy(skb, GFP_ATOMIC);
4215 } else if (sta_info_get(rx->sdata, skb->data)) {
4216 xmit_skb = skb;
4217 skb = NULL;
4218 }
4219
4220 if (xmit_skb) {
4221 /*
4222 * Send to wireless media and increase priority by 256
4223 * to keep the received priority instead of
4224 * reclassifying the frame (see cfg80211_classify8021d).
4225 */
4226 xmit_skb->priority += 256;
4227 xmit_skb->protocol = htons(ETH_P_802_3);
4228 skb_reset_network_header(xmit_skb);
4229 skb_reset_mac_header(xmit_skb);
4230 dev_queue_xmit(xmit_skb);
4231 }
4232
4233 if (!skb)
4234 return true;
4235 }
4236
4237 /* deliver to local stack */
4238 skb->protocol = eth_type_trans(skb, fast_rx->dev);
4239 memset(skb->cb, 0, sizeof(skb->cb));
4240 if (rx->napi)
4241 napi_gro_receive(rx->napi, skb);
4242 else
4243 netif_receive_skb(skb);
4244
4245 return true;
4246 drop:
4247 dev_kfree_skb(skb);
4248 stats->dropped++;
4249 return true;
4250}
4251
4252/*
4253 * This function returns whether or not the SKB
4254 * was destined for RX processing or not, which,
4255 * if consume is true, is equivalent to whether
4256 * or not the skb was consumed.
4257 */
4258static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
4259 struct sk_buff *skb, bool consume)
4260{
4261 struct ieee80211_local *local = rx->local;
4262 struct ieee80211_sub_if_data *sdata = rx->sdata;
4263
4264 rx->skb = skb;
4265
4266 /* See if we can do fast-rx; if we have to copy we already lost,
4267 * so punt in that case. We should never have to deliver a data
4268 * frame to multiple interfaces anyway.
4269 *
4270 * We skip the ieee80211_accept_frame() call and do the necessary
4271 * checking inside ieee80211_invoke_fast_rx().
4272 */
4273 if (consume && rx->sta) {
4274 struct ieee80211_fast_rx *fast_rx;
4275
4276 fast_rx = rcu_dereference(rx->sta->fast_rx);
4277 if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx))
4278 return true;
4279 }
4280
4281 if (!ieee80211_accept_frame(rx))
4282 return false;
4283
4284 if (!consume) {
4285 skb = skb_copy(skb, GFP_ATOMIC);
4286 if (!skb) {
4287 if (net_ratelimit())
4288 wiphy_debug(local->hw.wiphy,
4289 "failed to copy skb for %s\n",
4290 sdata->name);
4291 return true;
4292 }
4293
4294 rx->skb = skb;
4295 }
4296
4297 ieee80211_invoke_rx_handlers(rx);
4298 return true;
4299}
4300
4301/*
4302 * This is the actual Rx frames handler. as it belongs to Rx path it must
4303 * be called with rcu_read_lock protection.
4304 */
4305static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
4306 struct ieee80211_sta *pubsta,
4307 struct sk_buff *skb,
4308 struct napi_struct *napi)
4309{
4310 struct ieee80211_local *local = hw_to_local(hw);
4311 struct ieee80211_sub_if_data *sdata;
4312 struct ieee80211_hdr *hdr;
4313 __le16 fc;
4314 struct ieee80211_rx_data rx;
4315 struct ieee80211_sub_if_data *prev;
4316 struct rhlist_head *tmp;
4317 int err = 0;
4318
4319 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
4320 memset(&rx, 0, sizeof(rx));
4321 rx.skb = skb;
4322 rx.local = local;
4323 rx.napi = napi;
4324
4325 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
4326 I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
4327
4328 if (ieee80211_is_mgmt(fc)) {
4329 /* drop frame if too short for header */
4330 if (skb->len < ieee80211_hdrlen(fc))
4331 err = -ENOBUFS;
4332 else
4333 err = skb_linearize(skb);
4334 } else {
4335 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
4336 }
4337
4338 if (err) {
4339 dev_kfree_skb(skb);
4340 return;
4341 }
4342
4343 hdr = (struct ieee80211_hdr *)skb->data;
4344 ieee80211_parse_qos(&rx);
4345 ieee80211_verify_alignment(&rx);
4346
4347 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
4348 ieee80211_is_beacon(hdr->frame_control)))
4349 ieee80211_scan_rx(local, skb);
4350
4351 if (ieee80211_is_data(fc)) {
4352 struct sta_info *sta, *prev_sta;
4353
4354 if (pubsta) {
4355 rx.sta = container_of(pubsta, struct sta_info, sta);
4356 rx.sdata = rx.sta->sdata;
4357 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4358 return;
4359 goto out;
4360 }
4361
4362 prev_sta = NULL;
4363
4364 for_each_sta_info(local, hdr->addr2, sta, tmp) {
4365 if (!prev_sta) {
4366 prev_sta = sta;
4367 continue;
4368 }
4369
4370 rx.sta = prev_sta;
4371 rx.sdata = prev_sta->sdata;
4372 ieee80211_prepare_and_rx_handle(&rx, skb, false);
4373
4374 prev_sta = sta;
4375 }
4376
4377 if (prev_sta) {
4378 rx.sta = prev_sta;
4379 rx.sdata = prev_sta->sdata;
4380
4381 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4382 return;
4383 goto out;
4384 }
4385 }
4386
4387 prev = NULL;
4388
4389 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
4390 if (!ieee80211_sdata_running(sdata))
4391 continue;
4392
4393 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
4394 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
4395 continue;
4396
4397 /*
4398 * frame is destined for this interface, but if it's
4399 * not also for the previous one we handle that after
4400 * the loop to avoid copying the SKB once too much
4401 */
4402
4403 if (!prev) {
4404 prev = sdata;
4405 continue;
4406 }
4407
4408 rx.sta = sta_info_get_bss(prev, hdr->addr2);
4409 rx.sdata = prev;
4410 ieee80211_prepare_and_rx_handle(&rx, skb, false);
4411
4412 prev = sdata;
4413 }
4414
4415 if (prev) {
4416 rx.sta = sta_info_get_bss(prev, hdr->addr2);
4417 rx.sdata = prev;
4418
4419 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
4420 return;
4421 }
4422
4423 out:
4424 dev_kfree_skb(skb);
4425}
4426
4427/*
4428 * This is the receive path handler. It is called by a low level driver when an
4429 * 802.11 MPDU is received from the hardware.
4430 */
4431void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
4432 struct sk_buff *skb, struct napi_struct *napi)
4433{
4434 struct ieee80211_local *local = hw_to_local(hw);
4435 struct ieee80211_rate *rate = NULL;
4436 struct ieee80211_supported_band *sband;
4437 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4438
4439 WARN_ON_ONCE(softirq_count() == 0);
4440
4441 if (WARN_ON(status->band >= NUM_NL80211_BANDS))
4442 goto drop;
4443
4444 sband = local->hw.wiphy->bands[status->band];
4445 if (WARN_ON(!sband))
4446 goto drop;
4447
4448 /*
4449 * If we're suspending, it is possible although not too likely
4450 * that we'd be receiving frames after having already partially
4451 * quiesced the stack. We can't process such frames then since
4452 * that might, for example, cause stations to be added or other
4453 * driver callbacks be invoked.
4454 */
4455 if (unlikely(local->quiescing || local->suspended))
4456 goto drop;
4457
4458 /* We might be during a HW reconfig, prevent Rx for the same reason */
4459 if (unlikely(local->in_reconfig))
4460 goto drop;
4461
4462 /*
4463 * The same happens when we're not even started,
4464 * but that's worth a warning.
4465 */
4466 if (WARN_ON(!local->started))
4467 goto drop;
4468
4469 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
4470 /*
4471 * Validate the rate, unless a PLCP error means that
4472 * we probably can't have a valid rate here anyway.
4473 */
4474
4475 switch (status->encoding) {
4476 case RX_ENC_HT:
4477 /*
4478 * rate_idx is MCS index, which can be [0-76]
4479 * as documented on:
4480 *
4481 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
4482 *
4483 * Anything else would be some sort of driver or
4484 * hardware error. The driver should catch hardware
4485 * errors.
4486 */
4487 if (WARN(status->rate_idx > 76,
4488 "Rate marked as an HT rate but passed "
4489 "status->rate_idx is not "
4490 "an MCS index [0-76]: %d (0x%02x)\n",
4491 status->rate_idx,
4492 status->rate_idx))
4493 goto drop;
4494 break;
4495 case RX_ENC_VHT:
4496 if (WARN_ONCE(status->rate_idx > 9 ||
4497 !status->nss ||
4498 status->nss > 8,
4499 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n",
4500 status->rate_idx, status->nss))
4501 goto drop;
4502 break;
4503 case RX_ENC_HE:
4504 if (WARN_ONCE(status->rate_idx > 11 ||
4505 !status->nss ||
4506 status->nss > 8,
4507 "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n",
4508 status->rate_idx, status->nss))
4509 goto drop;
4510 break;
4511 default:
4512 WARN_ON_ONCE(1);
4513 /* fall through */
4514 case RX_ENC_LEGACY:
4515 if (WARN_ON(status->rate_idx >= sband->n_bitrates))
4516 goto drop;
4517 rate = &sband->bitrates[status->rate_idx];
4518 }
4519 }
4520
4521 status->rx_flags = 0;
4522
4523 /*
4524 * key references and virtual interfaces are protected using RCU
4525 * and this requires that we are in a read-side RCU section during
4526 * receive processing
4527 */
4528 rcu_read_lock();
4529
4530 /*
4531 * Frames with failed FCS/PLCP checksum are not returned,
4532 * all other frames are returned without radiotap header
4533 * if it was previously present.
4534 * Also, frames with less than 16 bytes are dropped.
4535 */
4536 skb = ieee80211_rx_monitor(local, skb, rate);
4537 if (!skb) {
4538 rcu_read_unlock();
4539 return;
4540 }
4541
4542 ieee80211_tpt_led_trig_rx(local,
4543 ((struct ieee80211_hdr *)skb->data)->frame_control,
4544 skb->len);
4545
4546 __ieee80211_rx_handle_packet(hw, pubsta, skb, napi);
4547
4548 rcu_read_unlock();
4549
4550 return;
4551 drop:
4552 kfree_skb(skb);
4553}
4554EXPORT_SYMBOL(ieee80211_rx_napi);
4555
4556/* This is a version of the rx handler that can be called from hard irq
4557 * context. Post the skb on the queue and schedule the tasklet */
4558void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
4559{
4560 struct ieee80211_local *local = hw_to_local(hw);
4561
4562 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
4563
4564 skb->pkt_type = IEEE80211_RX_MSG;
4565 skb_queue_tail(&local->skb_queue, skb);
4566 tasklet_schedule(&local->tasklet);
4567}
4568EXPORT_SYMBOL(ieee80211_rx_irqsafe);