blob: 7b2e8c890381aba7e307a342a783adb04a1f1b74 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright 2002-2005, Instant802 Networks, Inc.
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2013-2014 Intel Mobile Communications GmbH
6 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
Olivier Deprez0e641232021-09-23 10:07:05 +02007 * Copyright (C) 2018-2021 Intel Corporation
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008 */
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/etherdevice.h>
13#include <linux/netdevice.h>
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/skbuff.h>
17#include <linux/if_arp.h>
18#include <linux/timer.h>
19#include <linux/rtnetlink.h>
20
21#include <net/codel.h>
22#include <net/mac80211.h>
23#include "ieee80211_i.h"
24#include "driver-ops.h"
25#include "rate.h"
26#include "sta_info.h"
27#include "debugfs_sta.h"
28#include "mesh.h"
29#include "wme.h"
30
31/**
32 * DOC: STA information lifetime rules
33 *
34 * STA info structures (&struct sta_info) are managed in a hash table
35 * for faster lookup and a list for iteration. They are managed using
36 * RCU, i.e. access to the list and hash table is protected by RCU.
37 *
38 * Upon allocating a STA info structure with sta_info_alloc(), the caller
39 * owns that structure. It must then insert it into the hash table using
40 * either sta_info_insert() or sta_info_insert_rcu(); only in the latter
41 * case (which acquires an rcu read section but must not be called from
42 * within one) will the pointer still be valid after the call. Note that
43 * the caller may not do much with the STA info before inserting it, in
44 * particular, it may not start any mesh peer link management or add
45 * encryption keys.
46 *
47 * When the insertion fails (sta_info_insert()) returns non-zero), the
48 * structure will have been freed by sta_info_insert()!
49 *
50 * Station entries are added by mac80211 when you establish a link with a
51 * peer. This means different things for the different type of interfaces
52 * we support. For a regular station this mean we add the AP sta when we
53 * receive an association response from the AP. For IBSS this occurs when
54 * get to know about a peer on the same IBSS. For WDS we add the sta for
55 * the peer immediately upon device open. When using AP mode we add stations
56 * for each respective station upon request from userspace through nl80211.
57 *
58 * In order to remove a STA info structure, various sta_info_destroy_*()
59 * calls are available.
60 *
61 * There is no concept of ownership on a STA entry, each structure is
62 * owned by the global hash table/list until it is removed. All users of
63 * the structure need to be RCU protected so that the structure won't be
64 * freed before they are done using it.
65 */
66
67static const struct rhashtable_params sta_rht_params = {
68 .nelem_hint = 3, /* start small */
69 .automatic_shrinking = true,
70 .head_offset = offsetof(struct sta_info, hash_node),
71 .key_offset = offsetof(struct sta_info, addr),
72 .key_len = ETH_ALEN,
73 .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE,
74};
75
76/* Caller must hold local->sta_mtx */
77static int sta_info_hash_del(struct ieee80211_local *local,
78 struct sta_info *sta)
79{
80 return rhltable_remove(&local->sta_hash, &sta->hash_node,
81 sta_rht_params);
82}
83
84static void __cleanup_single_sta(struct sta_info *sta)
85{
86 int ac, i;
87 struct tid_ampdu_tx *tid_tx;
88 struct ieee80211_sub_if_data *sdata = sta->sdata;
89 struct ieee80211_local *local = sdata->local;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090 struct ps_data *ps;
91
92 if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
93 test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
94 test_sta_flag(sta, WLAN_STA_PS_DELIVER)) {
95 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
96 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
97 ps = &sdata->bss->ps;
98 else if (ieee80211_vif_is_mesh(&sdata->vif))
99 ps = &sdata->u.mesh.ps;
100 else
101 return;
102
103 clear_sta_flag(sta, WLAN_STA_PS_STA);
104 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
105 clear_sta_flag(sta, WLAN_STA_PS_DELIVER);
106
107 atomic_dec(&ps->num_sta_ps);
108 }
109
110 if (sta->sta.txq[0]) {
111 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
David Brazdil0f672f62019-12-10 10:32:29 +0000112 struct txq_info *txqi;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000113
David Brazdil0f672f62019-12-10 10:32:29 +0000114 if (!sta->sta.txq[i])
115 continue;
116
117 txqi = to_txq_info(sta->sta.txq[i]);
118
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000119 ieee80211_txq_purge(local, txqi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120 }
121 }
122
123 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
124 local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
125 ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]);
126 ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]);
127 }
128
129 if (ieee80211_vif_is_mesh(&sdata->vif))
130 mesh_sta_cleanup(sta);
131
132 cancel_work_sync(&sta->drv_deliver_wk);
133
134 /*
135 * Destroy aggregation state here. It would be nice to wait for the
136 * driver to finish aggregation stop and then clean up, but for now
137 * drivers have to handle aggregation stop being requested, followed
138 * directly by station destruction.
139 */
140 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
141 kfree(sta->ampdu_mlme.tid_start_tx[i]);
142 tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]);
143 if (!tid_tx)
144 continue;
145 ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending);
146 kfree(tid_tx);
147 }
148}
149
150static void cleanup_single_sta(struct sta_info *sta)
151{
152 struct ieee80211_sub_if_data *sdata = sta->sdata;
153 struct ieee80211_local *local = sdata->local;
154
155 __cleanup_single_sta(sta);
156 sta_info_free(local, sta);
157}
158
159struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local,
160 const u8 *addr)
161{
162 return rhltable_lookup(&local->sta_hash, addr, sta_rht_params);
163}
164
165/* protected by RCU */
166struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
167 const u8 *addr)
168{
169 struct ieee80211_local *local = sdata->local;
170 struct rhlist_head *tmp;
171 struct sta_info *sta;
172
173 rcu_read_lock();
174 for_each_sta_info(local, addr, sta, tmp) {
175 if (sta->sdata == sdata) {
176 rcu_read_unlock();
177 /* this is safe as the caller must already hold
178 * another rcu read section or the mutex
179 */
180 return sta;
181 }
182 }
183 rcu_read_unlock();
184 return NULL;
185}
186
187/*
188 * Get sta info either from the specified interface
189 * or from one of its vlans
190 */
191struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
192 const u8 *addr)
193{
194 struct ieee80211_local *local = sdata->local;
195 struct rhlist_head *tmp;
196 struct sta_info *sta;
197
198 rcu_read_lock();
199 for_each_sta_info(local, addr, sta, tmp) {
200 if (sta->sdata == sdata ||
201 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) {
202 rcu_read_unlock();
203 /* this is safe as the caller must already hold
204 * another rcu read section or the mutex
205 */
206 return sta;
207 }
208 }
209 rcu_read_unlock();
210 return NULL;
211}
212
213struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
214 int idx)
215{
216 struct ieee80211_local *local = sdata->local;
217 struct sta_info *sta;
218 int i = 0;
219
Olivier Deprez0e641232021-09-23 10:07:05 +0200220 list_for_each_entry_rcu(sta, &local->sta_list, list,
221 lockdep_is_held(&local->sta_mtx)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000222 if (sdata != sta->sdata)
223 continue;
224 if (i < idx) {
225 ++i;
226 continue;
227 }
228 return sta;
229 }
230
231 return NULL;
232}
233
234/**
235 * sta_info_free - free STA
236 *
237 * @local: pointer to the global information
238 * @sta: STA info to free
239 *
240 * This function must undo everything done by sta_info_alloc()
241 * that may happen before sta_info_insert(). It may only be
242 * called when sta_info_insert() has not been attempted (and
243 * if that fails, the station is freed anyway.)
244 */
245void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
246{
Olivier Deprez0e641232021-09-23 10:07:05 +0200247 /*
248 * If we had used sta_info_pre_move_state() then we might not
249 * have gone through the state transitions down again, so do
250 * it here now (and warn if it's inserted).
251 *
252 * This will clear state such as fast TX/RX that may have been
253 * allocated during state transitions.
254 */
255 while (sta->sta_state > IEEE80211_STA_NONE) {
256 int ret;
257
258 WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED));
259
260 ret = sta_info_move_state(sta, sta->sta_state - 1);
261 if (WARN_ONCE(ret, "sta_info_move_state() returned %d\n", ret))
262 break;
263 }
264
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265 if (sta->rate_ctrl)
266 rate_control_free_sta(sta);
267
268 sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr);
269
270 if (sta->sta.txq[0])
271 kfree(to_txq_info(sta->sta.txq[0]));
272 kfree(rcu_dereference_raw(sta->sta.rates));
273#ifdef CONFIG_MAC80211_MESH
274 kfree(sta->mesh);
275#endif
276 free_percpu(sta->pcpu_rx_stats);
277 kfree(sta);
278}
279
280/* Caller must hold local->sta_mtx */
281static int sta_info_hash_add(struct ieee80211_local *local,
282 struct sta_info *sta)
283{
284 return rhltable_insert(&local->sta_hash, &sta->hash_node,
285 sta_rht_params);
286}
287
288static void sta_deliver_ps_frames(struct work_struct *wk)
289{
290 struct sta_info *sta;
291
292 sta = container_of(wk, struct sta_info, drv_deliver_wk);
293
294 if (sta->dead)
295 return;
296
297 local_bh_disable();
298 if (!test_sta_flag(sta, WLAN_STA_PS_STA))
299 ieee80211_sta_ps_deliver_wakeup(sta);
300 else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL))
301 ieee80211_sta_ps_deliver_poll_response(sta);
302 else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD))
303 ieee80211_sta_ps_deliver_uapsd(sta);
304 local_bh_enable();
305}
306
307static int sta_prepare_rate_control(struct ieee80211_local *local,
308 struct sta_info *sta, gfp_t gfp)
309{
310 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL))
311 return 0;
312
313 sta->rate_ctrl = local->rate_ctrl;
314 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl,
315 sta, gfp);
316 if (!sta->rate_ctrl_priv)
317 return -ENOMEM;
318
319 return 0;
320}
321
322struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
323 const u8 *addr, gfp_t gfp)
324{
325 struct ieee80211_local *local = sdata->local;
326 struct ieee80211_hw *hw = &local->hw;
327 struct sta_info *sta;
328 int i;
329
330 sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp);
331 if (!sta)
332 return NULL;
333
334 if (ieee80211_hw_check(hw, USES_RSS)) {
335 sta->pcpu_rx_stats =
336 alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp);
337 if (!sta->pcpu_rx_stats)
338 goto free;
339 }
340
341 spin_lock_init(&sta->lock);
342 spin_lock_init(&sta->ps_lock);
343 INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames);
344 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
345 mutex_init(&sta->ampdu_mlme.mtx);
346#ifdef CONFIG_MAC80211_MESH
347 if (ieee80211_vif_is_mesh(&sdata->vif)) {
348 sta->mesh = kzalloc(sizeof(*sta->mesh), gfp);
349 if (!sta->mesh)
350 goto free;
351 sta->mesh->plink_sta = sta;
352 spin_lock_init(&sta->mesh->plink_lock);
353 if (ieee80211_vif_is_mesh(&sdata->vif) &&
354 !sdata->u.mesh.user_mpm)
355 timer_setup(&sta->mesh->plink_timer, mesh_plink_timer,
356 0);
357 sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
358 }
359#endif
360
361 memcpy(sta->addr, addr, ETH_ALEN);
362 memcpy(sta->sta.addr, addr, ETH_ALEN);
363 sta->sta.max_rx_aggregation_subframes =
364 local->hw.max_rx_aggregation_subframes;
365
David Brazdil0f672f62019-12-10 10:32:29 +0000366 /* Extended Key ID needs to install keys for keyid 0 and 1 Rx-only.
367 * The Tx path starts to use a key as soon as the key slot ptk_idx
368 * references to is not NULL. To not use the initial Rx-only key
369 * prematurely for Tx initialize ptk_idx to an impossible PTK keyid
370 * which always will refer to a NULL key.
371 */
372 BUILD_BUG_ON(ARRAY_SIZE(sta->ptk) <= INVALID_PTK_KEYIDX);
373 sta->ptk_idx = INVALID_PTK_KEYIDX;
374
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000375 sta->local = local;
376 sta->sdata = sdata;
377 sta->rx_stats.last_rx = jiffies;
378
379 u64_stats_init(&sta->rx_stats.syncp);
380
Olivier Deprez0e641232021-09-23 10:07:05 +0200381 ieee80211_init_frag_cache(&sta->frags);
382
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000383 sta->sta_state = IEEE80211_STA_NONE;
384
385 /* Mark TID as unreserved */
386 sta->reserved_tid = IEEE80211_TID_UNRESERVED;
387
388 sta->last_connected = ktime_get_seconds();
389 ewma_signal_init(&sta->rx_stats_avg.signal);
390 ewma_avg_signal_init(&sta->status_stats.avg_ack_signal);
391 for (i = 0; i < ARRAY_SIZE(sta->rx_stats_avg.chain_signal); i++)
392 ewma_signal_init(&sta->rx_stats_avg.chain_signal[i]);
393
394 if (local->ops->wake_tx_queue) {
395 void *txq_data;
396 int size = sizeof(struct txq_info) +
397 ALIGN(hw->txq_data_size, sizeof(void *));
398
399 txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp);
400 if (!txq_data)
401 goto free;
402
403 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
404 struct txq_info *txq = txq_data + i * size;
405
David Brazdil0f672f62019-12-10 10:32:29 +0000406 /* might not do anything for the bufferable MMPDU TXQ */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000407 ieee80211_txq_init(sdata, sta, txq, i);
408 }
409 }
410
411 if (sta_prepare_rate_control(local, sta, gfp))
412 goto free_txq;
413
David Brazdil0f672f62019-12-10 10:32:29 +0000414 sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT;
415
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000416 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
417 skb_queue_head_init(&sta->ps_tx_buf[i]);
418 skb_queue_head_init(&sta->tx_filtered[i]);
David Brazdil0f672f62019-12-10 10:32:29 +0000419 sta->airtime[i].deficit = sta->airtime_weight;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000420 }
421
422 for (i = 0; i < IEEE80211_NUM_TIDS; i++)
423 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
424
David Brazdil0f672f62019-12-10 10:32:29 +0000425 for (i = 0; i < NUM_NL80211_BANDS; i++) {
426 u32 mandatory = 0;
427 int r;
428
429 if (!hw->wiphy->bands[i])
430 continue;
431
432 switch (i) {
433 case NL80211_BAND_2GHZ:
434 /*
435 * We use both here, even if we cannot really know for
436 * sure the station will support both, but the only use
437 * for this is when we don't know anything yet and send
438 * management frames, and then we'll pick the lowest
439 * possible rate anyway.
440 * If we don't include _G here, we cannot find a rate
441 * in P2P, and thus trigger the WARN_ONCE() in rate.c
442 */
443 mandatory = IEEE80211_RATE_MANDATORY_B |
444 IEEE80211_RATE_MANDATORY_G;
445 break;
446 case NL80211_BAND_5GHZ:
447 mandatory = IEEE80211_RATE_MANDATORY_A;
448 break;
449 case NL80211_BAND_60GHZ:
450 WARN_ON(1);
451 mandatory = 0;
452 break;
453 }
454
455 for (r = 0; r < hw->wiphy->bands[i]->n_bitrates; r++) {
456 struct ieee80211_rate *rate;
457
458 rate = &hw->wiphy->bands[i]->bitrates[r];
459
460 if (!(rate->flags & mandatory))
461 continue;
462 sta->sta.supp_rates[i] |= BIT(r);
463 }
464 }
465
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000466 sta->sta.smps_mode = IEEE80211_SMPS_OFF;
467 if (sdata->vif.type == NL80211_IFTYPE_AP ||
468 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
469 struct ieee80211_supported_band *sband;
470 u8 smps;
471
472 sband = ieee80211_get_sband(sdata);
473 if (!sband)
474 goto free_txq;
475
476 smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >>
477 IEEE80211_HT_CAP_SM_PS_SHIFT;
478 /*
479 * Assume that hostapd advertises our caps in the beacon and
480 * this is the known_smps_mode for a station that just assciated
481 */
482 switch (smps) {
483 case WLAN_HT_SMPS_CONTROL_DISABLED:
484 sta->known_smps_mode = IEEE80211_SMPS_OFF;
485 break;
486 case WLAN_HT_SMPS_CONTROL_STATIC:
487 sta->known_smps_mode = IEEE80211_SMPS_STATIC;
488 break;
489 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
490 sta->known_smps_mode = IEEE80211_SMPS_DYNAMIC;
491 break;
492 default:
493 WARN_ON(1);
494 }
495 }
496
497 sta->sta.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA;
498
499 sta->cparams.ce_threshold = CODEL_DISABLED_THRESHOLD;
500 sta->cparams.target = MS2TIME(20);
501 sta->cparams.interval = MS2TIME(100);
502 sta->cparams.ecn = true;
503
504 sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
505
506 return sta;
507
508free_txq:
509 if (sta->sta.txq[0])
510 kfree(to_txq_info(sta->sta.txq[0]));
511free:
512 free_percpu(sta->pcpu_rx_stats);
513#ifdef CONFIG_MAC80211_MESH
514 kfree(sta->mesh);
515#endif
516 kfree(sta);
517 return NULL;
518}
519
520static int sta_info_insert_check(struct sta_info *sta)
521{
522 struct ieee80211_sub_if_data *sdata = sta->sdata;
523
524 /*
525 * Can't be a WARN_ON because it can be triggered through a race:
526 * something inserts a STA (on one CPU) without holding the RTNL
527 * and another CPU turns off the net device.
528 */
529 if (unlikely(!ieee80211_sdata_running(sdata)))
530 return -ENETDOWN;
531
532 if (WARN_ON(ether_addr_equal(sta->sta.addr, sdata->vif.addr) ||
533 is_multicast_ether_addr(sta->sta.addr)))
534 return -EINVAL;
535
536 /* The RCU read lock is required by rhashtable due to
537 * asynchronous resize/rehash. We also require the mutex
538 * for correctness.
539 */
540 rcu_read_lock();
541 lockdep_assert_held(&sdata->local->sta_mtx);
542 if (ieee80211_hw_check(&sdata->local->hw, NEEDS_UNIQUE_STA_ADDR) &&
543 ieee80211_find_sta_by_ifaddr(&sdata->local->hw, sta->addr, NULL)) {
544 rcu_read_unlock();
545 return -ENOTUNIQ;
546 }
547 rcu_read_unlock();
548
549 return 0;
550}
551
552static int sta_info_insert_drv_state(struct ieee80211_local *local,
553 struct ieee80211_sub_if_data *sdata,
554 struct sta_info *sta)
555{
556 enum ieee80211_sta_state state;
557 int err = 0;
558
559 for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) {
560 err = drv_sta_state(local, sdata, sta, state, state + 1);
561 if (err)
562 break;
563 }
564
565 if (!err) {
566 /*
567 * Drivers using legacy sta_add/sta_remove callbacks only
568 * get uploaded set to true after sta_add is called.
569 */
570 if (!local->ops->sta_add)
571 sta->uploaded = true;
572 return 0;
573 }
574
575 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
576 sdata_info(sdata,
577 "failed to move IBSS STA %pM to state %d (%d) - keeping it anyway\n",
578 sta->sta.addr, state + 1, err);
579 err = 0;
580 }
581
582 /* unwind on error */
583 for (; state > IEEE80211_STA_NOTEXIST; state--)
584 WARN_ON(drv_sta_state(local, sdata, sta, state, state - 1));
585
586 return err;
587}
588
589static void
590ieee80211_recalc_p2p_go_ps_allowed(struct ieee80211_sub_if_data *sdata)
591{
592 struct ieee80211_local *local = sdata->local;
593 bool allow_p2p_go_ps = sdata->vif.p2p;
594 struct sta_info *sta;
595
596 rcu_read_lock();
597 list_for_each_entry_rcu(sta, &local->sta_list, list) {
598 if (sdata != sta->sdata ||
599 !test_sta_flag(sta, WLAN_STA_ASSOC))
600 continue;
601 if (!sta->sta.support_p2p_ps) {
602 allow_p2p_go_ps = false;
603 break;
604 }
605 }
606 rcu_read_unlock();
607
608 if (allow_p2p_go_ps != sdata->vif.bss_conf.allow_p2p_go_ps) {
609 sdata->vif.bss_conf.allow_p2p_go_ps = allow_p2p_go_ps;
610 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_P2P_PS);
611 }
612}
613
614/*
615 * should be called with sta_mtx locked
616 * this function replaces the mutex lock
617 * with a RCU lock
618 */
619static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
620{
621 struct ieee80211_local *local = sta->local;
622 struct ieee80211_sub_if_data *sdata = sta->sdata;
623 struct station_info *sinfo = NULL;
624 int err = 0;
625
626 lockdep_assert_held(&local->sta_mtx);
627
628 /* check if STA exists already */
629 if (sta_info_get_bss(sdata, sta->sta.addr)) {
630 err = -EEXIST;
631 goto out_err;
632 }
633
634 sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
635 if (!sinfo) {
636 err = -ENOMEM;
637 goto out_err;
638 }
639
640 local->num_sta++;
641 local->sta_generation++;
642 smp_mb();
643
644 /* simplify things and don't accept BA sessions yet */
645 set_sta_flag(sta, WLAN_STA_BLOCK_BA);
646
647 /* make the station visible */
648 err = sta_info_hash_add(local, sta);
649 if (err)
650 goto out_drop_sta;
651
652 list_add_tail_rcu(&sta->list, &local->sta_list);
653
654 /* notify driver */
655 err = sta_info_insert_drv_state(local, sdata, sta);
656 if (err)
657 goto out_remove;
658
659 set_sta_flag(sta, WLAN_STA_INSERTED);
660
661 if (sta->sta_state >= IEEE80211_STA_ASSOC) {
662 ieee80211_recalc_min_chandef(sta->sdata);
663 if (!sta->sta.support_p2p_ps)
664 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
665 }
666
667 /* accept BA sessions now */
668 clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
669
670 ieee80211_sta_debugfs_add(sta);
671 rate_control_add_sta_debugfs(sta);
672
673 sinfo->generation = local->sta_generation;
674 cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
675 kfree(sinfo);
676
677 sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr);
678
679 /* move reference to rcu-protected */
680 rcu_read_lock();
681 mutex_unlock(&local->sta_mtx);
682
683 if (ieee80211_vif_is_mesh(&sdata->vif))
684 mesh_accept_plinks_update(sdata);
685
686 return 0;
687 out_remove:
688 sta_info_hash_del(local, sta);
689 list_del_rcu(&sta->list);
690 out_drop_sta:
691 local->num_sta--;
692 synchronize_net();
Olivier Deprez0e641232021-09-23 10:07:05 +0200693 cleanup_single_sta(sta);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000694 out_err:
695 mutex_unlock(&local->sta_mtx);
696 kfree(sinfo);
697 rcu_read_lock();
698 return err;
699}
700
701int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
702{
703 struct ieee80211_local *local = sta->local;
704 int err;
705
706 might_sleep();
707
708 mutex_lock(&local->sta_mtx);
709
710 err = sta_info_insert_check(sta);
711 if (err) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200712 sta_info_free(local, sta);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000713 mutex_unlock(&local->sta_mtx);
714 rcu_read_lock();
Olivier Deprez0e641232021-09-23 10:07:05 +0200715 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000716 }
717
Olivier Deprez0e641232021-09-23 10:07:05 +0200718 return sta_info_insert_finish(sta);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000719}
720
721int sta_info_insert(struct sta_info *sta)
722{
723 int err = sta_info_insert_rcu(sta);
724
725 rcu_read_unlock();
726
727 return err;
728}
729
730static inline void __bss_tim_set(u8 *tim, u16 id)
731{
732 /*
733 * This format has been mandated by the IEEE specifications,
734 * so this line may not be changed to use the __set_bit() format.
735 */
736 tim[id / 8] |= (1 << (id % 8));
737}
738
739static inline void __bss_tim_clear(u8 *tim, u16 id)
740{
741 /*
742 * This format has been mandated by the IEEE specifications,
743 * so this line may not be changed to use the __clear_bit() format.
744 */
745 tim[id / 8] &= ~(1 << (id % 8));
746}
747
748static inline bool __bss_tim_get(u8 *tim, u16 id)
749{
750 /*
751 * This format has been mandated by the IEEE specifications,
752 * so this line may not be changed to use the test_bit() format.
753 */
754 return tim[id / 8] & (1 << (id % 8));
755}
756
757static unsigned long ieee80211_tids_for_ac(int ac)
758{
759 /* If we ever support TIDs > 7, this obviously needs to be adjusted */
760 switch (ac) {
761 case IEEE80211_AC_VO:
762 return BIT(6) | BIT(7);
763 case IEEE80211_AC_VI:
764 return BIT(4) | BIT(5);
765 case IEEE80211_AC_BE:
766 return BIT(0) | BIT(3);
767 case IEEE80211_AC_BK:
768 return BIT(1) | BIT(2);
769 default:
770 WARN_ON(1);
771 return 0;
772 }
773}
774
775static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
776{
777 struct ieee80211_local *local = sta->local;
778 struct ps_data *ps;
779 bool indicate_tim = false;
780 u8 ignore_for_tim = sta->sta.uapsd_queues;
781 int ac;
782 u16 id = sta->sta.aid;
783
784 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
785 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
786 if (WARN_ON_ONCE(!sta->sdata->bss))
787 return;
788
789 ps = &sta->sdata->bss->ps;
790#ifdef CONFIG_MAC80211_MESH
791 } else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
792 ps = &sta->sdata->u.mesh.ps;
793#endif
794 } else {
795 return;
796 }
797
798 /* No need to do anything if the driver does all */
799 if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim)
800 return;
801
802 if (sta->dead)
803 goto done;
804
805 /*
806 * If all ACs are delivery-enabled then we should build
807 * the TIM bit for all ACs anyway; if only some are then
808 * we ignore those and build the TIM bit using only the
809 * non-enabled ones.
810 */
811 if (ignore_for_tim == BIT(IEEE80211_NUM_ACS) - 1)
812 ignore_for_tim = 0;
813
814 if (ignore_pending)
815 ignore_for_tim = BIT(IEEE80211_NUM_ACS) - 1;
816
817 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
818 unsigned long tids;
819
820 if (ignore_for_tim & ieee80211_ac_to_qos_mask[ac])
821 continue;
822
823 indicate_tim |= !skb_queue_empty(&sta->tx_filtered[ac]) ||
824 !skb_queue_empty(&sta->ps_tx_buf[ac]);
825 if (indicate_tim)
826 break;
827
828 tids = ieee80211_tids_for_ac(ac);
829
830 indicate_tim |=
831 sta->driver_buffered_tids & tids;
832 indicate_tim |=
833 sta->txq_buffered_tids & tids;
834 }
835
836 done:
837 spin_lock_bh(&local->tim_lock);
838
839 if (indicate_tim == __bss_tim_get(ps->tim, id))
840 goto out_unlock;
841
842 if (indicate_tim)
843 __bss_tim_set(ps->tim, id);
844 else
845 __bss_tim_clear(ps->tim, id);
846
847 if (local->ops->set_tim && !WARN_ON(sta->dead)) {
848 local->tim_in_locked_section = true;
849 drv_set_tim(local, &sta->sta, indicate_tim);
850 local->tim_in_locked_section = false;
851 }
852
853out_unlock:
854 spin_unlock_bh(&local->tim_lock);
855}
856
857void sta_info_recalc_tim(struct sta_info *sta)
858{
859 __sta_info_recalc_tim(sta, false);
860}
861
862static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb)
863{
864 struct ieee80211_tx_info *info;
865 int timeout;
866
867 if (!skb)
868 return false;
869
870 info = IEEE80211_SKB_CB(skb);
871
872 /* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */
873 timeout = (sta->listen_interval *
874 sta->sdata->vif.bss_conf.beacon_int *
875 32 / 15625) * HZ;
876 if (timeout < STA_TX_BUFFER_EXPIRE)
877 timeout = STA_TX_BUFFER_EXPIRE;
878 return time_after(jiffies, info->control.jiffies + timeout);
879}
880
881
882static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local,
883 struct sta_info *sta, int ac)
884{
885 unsigned long flags;
886 struct sk_buff *skb;
887
888 /*
889 * First check for frames that should expire on the filtered
890 * queue. Frames here were rejected by the driver and are on
891 * a separate queue to avoid reordering with normal PS-buffered
892 * frames. They also aren't accounted for right now in the
893 * total_ps_buffered counter.
894 */
895 for (;;) {
896 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags);
897 skb = skb_peek(&sta->tx_filtered[ac]);
898 if (sta_info_buffer_expired(sta, skb))
899 skb = __skb_dequeue(&sta->tx_filtered[ac]);
900 else
901 skb = NULL;
902 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags);
903
904 /*
905 * Frames are queued in order, so if this one
906 * hasn't expired yet we can stop testing. If
907 * we actually reached the end of the queue we
908 * also need to stop, of course.
909 */
910 if (!skb)
911 break;
912 ieee80211_free_txskb(&local->hw, skb);
913 }
914
915 /*
916 * Now also check the normal PS-buffered queue, this will
917 * only find something if the filtered queue was emptied
918 * since the filtered frames are all before the normal PS
919 * buffered frames.
920 */
921 for (;;) {
922 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags);
923 skb = skb_peek(&sta->ps_tx_buf[ac]);
924 if (sta_info_buffer_expired(sta, skb))
925 skb = __skb_dequeue(&sta->ps_tx_buf[ac]);
926 else
927 skb = NULL;
928 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags);
929
930 /*
931 * frames are queued in order, so if this one
932 * hasn't expired yet (or we reached the end of
933 * the queue) we can stop testing
934 */
935 if (!skb)
936 break;
937
938 local->total_ps_buffered--;
939 ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n",
940 sta->sta.addr);
941 ieee80211_free_txskb(&local->hw, skb);
942 }
943
944 /*
945 * Finally, recalculate the TIM bit for this station -- it might
946 * now be clear because the station was too slow to retrieve its
947 * frames.
948 */
949 sta_info_recalc_tim(sta);
950
951 /*
952 * Return whether there are any frames still buffered, this is
953 * used to check whether the cleanup timer still needs to run,
954 * if there are no frames we don't need to rearm the timer.
955 */
956 return !(skb_queue_empty(&sta->ps_tx_buf[ac]) &&
957 skb_queue_empty(&sta->tx_filtered[ac]));
958}
959
960static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
961 struct sta_info *sta)
962{
963 bool have_buffered = false;
964 int ac;
965
966 /* This is only necessary for stations on BSS/MBSS interfaces */
967 if (!sta->sdata->bss &&
968 !ieee80211_vif_is_mesh(&sta->sdata->vif))
969 return false;
970
971 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
972 have_buffered |=
973 sta_info_cleanup_expire_buffered_ac(local, sta, ac);
974
975 return have_buffered;
976}
977
978static int __must_check __sta_info_destroy_part1(struct sta_info *sta)
979{
980 struct ieee80211_local *local;
981 struct ieee80211_sub_if_data *sdata;
982 int ret;
983
984 might_sleep();
985
986 if (!sta)
987 return -ENOENT;
988
989 local = sta->local;
990 sdata = sta->sdata;
991
992 lockdep_assert_held(&local->sta_mtx);
993
994 /*
995 * Before removing the station from the driver and
996 * rate control, it might still start new aggregation
997 * sessions -- block that to make sure the tear-down
998 * will be sufficient.
999 */
1000 set_sta_flag(sta, WLAN_STA_BLOCK_BA);
1001 ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA);
1002
1003 /*
1004 * Before removing the station from the driver there might be pending
1005 * rx frames on RSS queues sent prior to the disassociation - wait for
1006 * all such frames to be processed.
1007 */
1008 drv_sync_rx_queues(local, sta);
1009
1010 ret = sta_info_hash_del(local, sta);
1011 if (WARN_ON(ret))
1012 return ret;
1013
1014 /*
1015 * for TDLS peers, make sure to return to the base channel before
1016 * removal.
1017 */
1018 if (test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) {
1019 drv_tdls_cancel_channel_switch(local, sdata, &sta->sta);
1020 clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL);
1021 }
1022
1023 list_del_rcu(&sta->list);
1024 sta->removed = true;
1025
1026 drv_sta_pre_rcu_remove(local, sta->sdata, sta);
1027
1028 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1029 rcu_access_pointer(sdata->u.vlan.sta) == sta)
1030 RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
1031
1032 return 0;
1033}
1034
1035static void __sta_info_destroy_part2(struct sta_info *sta)
1036{
1037 struct ieee80211_local *local = sta->local;
1038 struct ieee80211_sub_if_data *sdata = sta->sdata;
1039 struct station_info *sinfo;
1040 int ret;
1041
1042 /*
1043 * NOTE: This assumes at least synchronize_net() was done
1044 * after _part1 and before _part2!
1045 */
1046
1047 might_sleep();
1048 lockdep_assert_held(&local->sta_mtx);
1049
Olivier Deprez0e641232021-09-23 10:07:05 +02001050 if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
1051 ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
1052 WARN_ON_ONCE(ret);
1053 }
1054
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001055 /* now keys can no longer be reached */
1056 ieee80211_free_sta_keys(local, sta);
1057
1058 /* disable TIM bit - last chance to tell driver */
1059 __sta_info_recalc_tim(sta, true);
1060
1061 sta->dead = true;
1062
1063 local->num_sta--;
1064 local->sta_generation++;
1065
1066 while (sta->sta_state > IEEE80211_STA_NONE) {
1067 ret = sta_info_move_state(sta, sta->sta_state - 1);
1068 if (ret) {
1069 WARN_ON_ONCE(1);
1070 break;
1071 }
1072 }
1073
1074 if (sta->uploaded) {
1075 ret = drv_sta_state(local, sdata, sta, IEEE80211_STA_NONE,
1076 IEEE80211_STA_NOTEXIST);
1077 WARN_ON_ONCE(ret != 0);
1078 }
1079
1080 sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
1081
1082 sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
1083 if (sinfo)
1084 sta_set_sinfo(sta, sinfo, true);
1085 cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
1086 kfree(sinfo);
1087
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001088 ieee80211_sta_debugfs_remove(sta);
1089
Olivier Deprez0e641232021-09-23 10:07:05 +02001090 ieee80211_destroy_frag_cache(&sta->frags);
1091
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001092 cleanup_single_sta(sta);
1093}
1094
1095int __must_check __sta_info_destroy(struct sta_info *sta)
1096{
1097 int err = __sta_info_destroy_part1(sta);
1098
1099 if (err)
1100 return err;
1101
1102 synchronize_net();
1103
1104 __sta_info_destroy_part2(sta);
1105
1106 return 0;
1107}
1108
1109int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr)
1110{
1111 struct sta_info *sta;
1112 int ret;
1113
1114 mutex_lock(&sdata->local->sta_mtx);
1115 sta = sta_info_get(sdata, addr);
1116 ret = __sta_info_destroy(sta);
1117 mutex_unlock(&sdata->local->sta_mtx);
1118
1119 return ret;
1120}
1121
1122int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
1123 const u8 *addr)
1124{
1125 struct sta_info *sta;
1126 int ret;
1127
1128 mutex_lock(&sdata->local->sta_mtx);
1129 sta = sta_info_get_bss(sdata, addr);
1130 ret = __sta_info_destroy(sta);
1131 mutex_unlock(&sdata->local->sta_mtx);
1132
1133 return ret;
1134}
1135
1136static void sta_info_cleanup(struct timer_list *t)
1137{
1138 struct ieee80211_local *local = from_timer(local, t, sta_cleanup);
1139 struct sta_info *sta;
1140 bool timer_needed = false;
1141
1142 rcu_read_lock();
1143 list_for_each_entry_rcu(sta, &local->sta_list, list)
1144 if (sta_info_cleanup_expire_buffered(local, sta))
1145 timer_needed = true;
1146 rcu_read_unlock();
1147
1148 if (local->quiescing)
1149 return;
1150
1151 if (!timer_needed)
1152 return;
1153
1154 mod_timer(&local->sta_cleanup,
1155 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL));
1156}
1157
1158int sta_info_init(struct ieee80211_local *local)
1159{
1160 int err;
1161
1162 err = rhltable_init(&local->sta_hash, &sta_rht_params);
1163 if (err)
1164 return err;
1165
1166 spin_lock_init(&local->tim_lock);
1167 mutex_init(&local->sta_mtx);
1168 INIT_LIST_HEAD(&local->sta_list);
1169
1170 timer_setup(&local->sta_cleanup, sta_info_cleanup, 0);
1171 return 0;
1172}
1173
1174void sta_info_stop(struct ieee80211_local *local)
1175{
1176 del_timer_sync(&local->sta_cleanup);
1177 rhltable_destroy(&local->sta_hash);
1178}
1179
1180
1181int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans)
1182{
1183 struct ieee80211_local *local = sdata->local;
1184 struct sta_info *sta, *tmp;
1185 LIST_HEAD(free_list);
1186 int ret = 0;
1187
1188 might_sleep();
1189
1190 WARN_ON(vlans && sdata->vif.type != NL80211_IFTYPE_AP);
1191 WARN_ON(vlans && !sdata->bss);
1192
1193 mutex_lock(&local->sta_mtx);
1194 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
1195 if (sdata == sta->sdata ||
1196 (vlans && sdata->bss == sta->sdata->bss)) {
1197 if (!WARN_ON(__sta_info_destroy_part1(sta)))
1198 list_add(&sta->free_list, &free_list);
1199 ret++;
1200 }
1201 }
1202
1203 if (!list_empty(&free_list)) {
1204 synchronize_net();
1205 list_for_each_entry_safe(sta, tmp, &free_list, free_list)
1206 __sta_info_destroy_part2(sta);
1207 }
1208 mutex_unlock(&local->sta_mtx);
1209
1210 return ret;
1211}
1212
1213void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
1214 unsigned long exp_time)
1215{
1216 struct ieee80211_local *local = sdata->local;
1217 struct sta_info *sta, *tmp;
1218
1219 mutex_lock(&local->sta_mtx);
1220
1221 list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
1222 unsigned long last_active = ieee80211_sta_last_active(sta);
1223
1224 if (sdata != sta->sdata)
1225 continue;
1226
1227 if (time_is_before_jiffies(last_active + exp_time)) {
1228 sta_dbg(sta->sdata, "expiring inactive STA %pM\n",
1229 sta->sta.addr);
1230
1231 if (ieee80211_vif_is_mesh(&sdata->vif) &&
1232 test_sta_flag(sta, WLAN_STA_PS_STA))
1233 atomic_dec(&sdata->u.mesh.ps.num_sta_ps);
1234
1235 WARN_ON(__sta_info_destroy(sta));
1236 }
1237 }
1238
1239 mutex_unlock(&local->sta_mtx);
1240}
1241
1242struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw,
1243 const u8 *addr,
1244 const u8 *localaddr)
1245{
1246 struct ieee80211_local *local = hw_to_local(hw);
1247 struct rhlist_head *tmp;
1248 struct sta_info *sta;
1249
1250 /*
1251 * Just return a random station if localaddr is NULL
1252 * ... first in list.
1253 */
1254 for_each_sta_info(local, addr, sta, tmp) {
1255 if (localaddr &&
1256 !ether_addr_equal(sta->sdata->vif.addr, localaddr))
1257 continue;
1258 if (!sta->uploaded)
1259 return NULL;
1260 return &sta->sta;
1261 }
1262
1263 return NULL;
1264}
1265EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_ifaddr);
1266
1267struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif,
1268 const u8 *addr)
1269{
1270 struct sta_info *sta;
1271
1272 if (!vif)
1273 return NULL;
1274
1275 sta = sta_info_get_bss(vif_to_sdata(vif), addr);
1276 if (!sta)
1277 return NULL;
1278
1279 if (!sta->uploaded)
1280 return NULL;
1281
1282 return &sta->sta;
1283}
1284EXPORT_SYMBOL(ieee80211_find_sta);
1285
1286/* powersave support code */
1287void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
1288{
1289 struct ieee80211_sub_if_data *sdata = sta->sdata;
1290 struct ieee80211_local *local = sdata->local;
1291 struct sk_buff_head pending;
1292 int filtered = 0, buffered = 0, ac, i;
1293 unsigned long flags;
1294 struct ps_data *ps;
1295
1296 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1297 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
1298 u.ap);
1299
1300 if (sdata->vif.type == NL80211_IFTYPE_AP)
1301 ps = &sdata->bss->ps;
1302 else if (ieee80211_vif_is_mesh(&sdata->vif))
1303 ps = &sdata->u.mesh.ps;
1304 else
1305 return;
1306
1307 clear_sta_flag(sta, WLAN_STA_SP);
1308
1309 BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1);
1310 sta->driver_buffered_tids = 0;
1311 sta->txq_buffered_tids = 0;
1312
1313 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
1314 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
1315
David Brazdil0f672f62019-12-10 10:32:29 +00001316 for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
1317 if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i]))
1318 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001319
David Brazdil0f672f62019-12-10 10:32:29 +00001320 schedule_and_wake_txq(local, to_txq_info(sta->sta.txq[i]));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001321 }
1322
1323 skb_queue_head_init(&pending);
1324
1325 /* sync with ieee80211_tx_h_unicast_ps_buf */
1326 spin_lock(&sta->ps_lock);
1327 /* Send all buffered frames to the station */
1328 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
1329 int count = skb_queue_len(&pending), tmp;
1330
1331 spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags);
1332 skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending);
1333 spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags);
1334 tmp = skb_queue_len(&pending);
1335 filtered += tmp - count;
1336 count = tmp;
1337
1338 spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags);
1339 skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending);
1340 spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags);
1341 tmp = skb_queue_len(&pending);
1342 buffered += tmp - count;
1343 }
1344
1345 ieee80211_add_pending_skbs(local, &pending);
1346
1347 /* now we're no longer in the deliver code */
1348 clear_sta_flag(sta, WLAN_STA_PS_DELIVER);
1349
1350 /* The station might have polled and then woken up before we responded,
1351 * so clear these flags now to avoid them sticking around.
1352 */
1353 clear_sta_flag(sta, WLAN_STA_PSPOLL);
1354 clear_sta_flag(sta, WLAN_STA_UAPSD);
1355 spin_unlock(&sta->ps_lock);
1356
1357 atomic_dec(&ps->num_sta_ps);
1358
1359 /* This station just woke up and isn't aware of our SMPS state */
1360 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
1361 !ieee80211_smps_is_restrictive(sta->known_smps_mode,
1362 sdata->smps_mode) &&
1363 sta->known_smps_mode != sdata->bss->req_smps &&
1364 sta_info_tx_streams(sta) != 1) {
1365 ht_dbg(sdata,
1366 "%pM just woke up and MIMO capable - update SMPS\n",
1367 sta->sta.addr);
1368 ieee80211_send_smps_action(sdata, sdata->bss->req_smps,
1369 sta->sta.addr,
1370 sdata->vif.bss_conf.bssid);
1371 }
1372
1373 local->total_ps_buffered -= buffered;
1374
1375 sta_info_recalc_tim(sta);
1376
1377 ps_dbg(sdata,
1378 "STA %pM aid %d sending %d filtered/%d PS frames since STA woke up\n",
1379 sta->sta.addr, sta->sta.aid, filtered, buffered);
1380
1381 ieee80211_check_fast_xmit(sta);
1382}
1383
1384static void ieee80211_send_null_response(struct sta_info *sta, int tid,
1385 enum ieee80211_frame_release_type reason,
1386 bool call_driver, bool more_data)
1387{
1388 struct ieee80211_sub_if_data *sdata = sta->sdata;
1389 struct ieee80211_local *local = sdata->local;
1390 struct ieee80211_qos_hdr *nullfunc;
1391 struct sk_buff *skb;
1392 int size = sizeof(*nullfunc);
1393 __le16 fc;
1394 bool qos = sta->sta.wme;
1395 struct ieee80211_tx_info *info;
1396 struct ieee80211_chanctx_conf *chanctx_conf;
1397
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001398 if (qos) {
1399 fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
1400 IEEE80211_STYPE_QOS_NULLFUNC |
1401 IEEE80211_FCTL_FROMDS);
1402 } else {
1403 size -= 2;
1404 fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
1405 IEEE80211_STYPE_NULLFUNC |
1406 IEEE80211_FCTL_FROMDS);
1407 }
1408
1409 skb = dev_alloc_skb(local->hw.extra_tx_headroom + size);
1410 if (!skb)
1411 return;
1412
1413 skb_reserve(skb, local->hw.extra_tx_headroom);
1414
1415 nullfunc = skb_put(skb, size);
1416 nullfunc->frame_control = fc;
1417 nullfunc->duration_id = 0;
1418 memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
1419 memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
1420 memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
1421 nullfunc->seq_ctrl = 0;
1422
1423 skb->priority = tid;
1424 skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
1425 if (qos) {
1426 nullfunc->qos_ctrl = cpu_to_le16(tid);
1427
1428 if (reason == IEEE80211_FRAME_RELEASE_UAPSD) {
1429 nullfunc->qos_ctrl |=
1430 cpu_to_le16(IEEE80211_QOS_CTL_EOSP);
1431 if (more_data)
1432 nullfunc->frame_control |=
1433 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1434 }
1435 }
1436
1437 info = IEEE80211_SKB_CB(skb);
1438
1439 /*
1440 * Tell TX path to send this frame even though the
1441 * STA may still remain is PS mode after this frame
1442 * exchange. Also set EOSP to indicate this packet
1443 * ends the poll/service period.
1444 */
1445 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER |
1446 IEEE80211_TX_STATUS_EOSP |
1447 IEEE80211_TX_CTL_REQ_TX_STATUS;
1448
1449 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
1450
1451 if (call_driver)
1452 drv_allow_buffered_frames(local, sta, BIT(tid), 1,
1453 reason, false);
1454
1455 skb->dev = sdata->dev;
1456
1457 rcu_read_lock();
1458 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1459 if (WARN_ON(!chanctx_conf)) {
1460 rcu_read_unlock();
1461 kfree_skb(skb);
1462 return;
1463 }
1464
1465 info->band = chanctx_conf->def.chan->band;
1466 ieee80211_xmit(sdata, sta, skb, 0);
1467 rcu_read_unlock();
1468}
1469
1470static int find_highest_prio_tid(unsigned long tids)
1471{
1472 /* lower 3 TIDs aren't ordered perfectly */
1473 if (tids & 0xF8)
1474 return fls(tids) - 1;
1475 /* TID 0 is BE just like TID 3 */
1476 if (tids & BIT(0))
1477 return 0;
1478 return fls(tids) - 1;
1479}
1480
1481/* Indicates if the MORE_DATA bit should be set in the last
1482 * frame obtained by ieee80211_sta_ps_get_frames.
1483 * Note that driver_release_tids is relevant only if
1484 * reason = IEEE80211_FRAME_RELEASE_PSPOLL
1485 */
1486static bool
1487ieee80211_sta_ps_more_data(struct sta_info *sta, u8 ignored_acs,
1488 enum ieee80211_frame_release_type reason,
1489 unsigned long driver_release_tids)
1490{
1491 int ac;
1492
1493 /* If the driver has data on more than one TID then
1494 * certainly there's more data if we release just a
1495 * single frame now (from a single TID). This will
1496 * only happen for PS-Poll.
1497 */
1498 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL &&
1499 hweight16(driver_release_tids) > 1)
1500 return true;
1501
1502 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
1503 if (ignored_acs & ieee80211_ac_to_qos_mask[ac])
1504 continue;
1505
1506 if (!skb_queue_empty(&sta->tx_filtered[ac]) ||
1507 !skb_queue_empty(&sta->ps_tx_buf[ac]))
1508 return true;
1509 }
1510
1511 return false;
1512}
1513
1514static void
1515ieee80211_sta_ps_get_frames(struct sta_info *sta, int n_frames, u8 ignored_acs,
1516 enum ieee80211_frame_release_type reason,
1517 struct sk_buff_head *frames,
1518 unsigned long *driver_release_tids)
1519{
1520 struct ieee80211_sub_if_data *sdata = sta->sdata;
1521 struct ieee80211_local *local = sdata->local;
1522 int ac;
1523
1524 /* Get response frame(s) and more data bit for the last one. */
1525 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
1526 unsigned long tids;
1527
1528 if (ignored_acs & ieee80211_ac_to_qos_mask[ac])
1529 continue;
1530
1531 tids = ieee80211_tids_for_ac(ac);
1532
1533 /* if we already have frames from software, then we can't also
1534 * release from hardware queues
1535 */
1536 if (skb_queue_empty(frames)) {
1537 *driver_release_tids |=
1538 sta->driver_buffered_tids & tids;
1539 *driver_release_tids |= sta->txq_buffered_tids & tids;
1540 }
1541
1542 if (!*driver_release_tids) {
1543 struct sk_buff *skb;
1544
1545 while (n_frames > 0) {
1546 skb = skb_dequeue(&sta->tx_filtered[ac]);
1547 if (!skb) {
1548 skb = skb_dequeue(
1549 &sta->ps_tx_buf[ac]);
1550 if (skb)
1551 local->total_ps_buffered--;
1552 }
1553 if (!skb)
1554 break;
1555 n_frames--;
1556 __skb_queue_tail(frames, skb);
1557 }
1558 }
1559
1560 /* If we have more frames buffered on this AC, then abort the
1561 * loop since we can't send more data from other ACs before
1562 * the buffered frames from this.
1563 */
1564 if (!skb_queue_empty(&sta->tx_filtered[ac]) ||
1565 !skb_queue_empty(&sta->ps_tx_buf[ac]))
1566 break;
1567 }
1568}
1569
1570static void
1571ieee80211_sta_ps_deliver_response(struct sta_info *sta,
1572 int n_frames, u8 ignored_acs,
1573 enum ieee80211_frame_release_type reason)
1574{
1575 struct ieee80211_sub_if_data *sdata = sta->sdata;
1576 struct ieee80211_local *local = sdata->local;
1577 unsigned long driver_release_tids = 0;
1578 struct sk_buff_head frames;
1579 bool more_data;
1580
1581 /* Service or PS-Poll period starts */
1582 set_sta_flag(sta, WLAN_STA_SP);
1583
1584 __skb_queue_head_init(&frames);
1585
1586 ieee80211_sta_ps_get_frames(sta, n_frames, ignored_acs, reason,
1587 &frames, &driver_release_tids);
1588
1589 more_data = ieee80211_sta_ps_more_data(sta, ignored_acs, reason, driver_release_tids);
1590
1591 if (driver_release_tids && reason == IEEE80211_FRAME_RELEASE_PSPOLL)
1592 driver_release_tids =
1593 BIT(find_highest_prio_tid(driver_release_tids));
1594
1595 if (skb_queue_empty(&frames) && !driver_release_tids) {
1596 int tid, ac;
1597
1598 /*
1599 * For PS-Poll, this can only happen due to a race condition
1600 * when we set the TIM bit and the station notices it, but
1601 * before it can poll for the frame we expire it.
1602 *
1603 * For uAPSD, this is said in the standard (11.2.1.5 h):
1604 * At each unscheduled SP for a non-AP STA, the AP shall
1605 * attempt to transmit at least one MSDU or MMPDU, but no
1606 * more than the value specified in the Max SP Length field
1607 * in the QoS Capability element from delivery-enabled ACs,
1608 * that are destined for the non-AP STA.
1609 *
1610 * Since we have no other MSDU/MMPDU, transmit a QoS null frame.
1611 */
1612
1613 /* This will evaluate to 1, 3, 5 or 7. */
1614 for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++)
1615 if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac]))
1616 break;
1617 tid = 7 - 2 * ac;
1618
1619 ieee80211_send_null_response(sta, tid, reason, true, false);
1620 } else if (!driver_release_tids) {
1621 struct sk_buff_head pending;
1622 struct sk_buff *skb;
1623 int num = 0;
1624 u16 tids = 0;
1625 bool need_null = false;
1626
1627 skb_queue_head_init(&pending);
1628
1629 while ((skb = __skb_dequeue(&frames))) {
1630 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1631 struct ieee80211_hdr *hdr = (void *) skb->data;
1632 u8 *qoshdr = NULL;
1633
1634 num++;
1635
1636 /*
1637 * Tell TX path to send this frame even though the
1638 * STA may still remain is PS mode after this frame
1639 * exchange.
1640 */
1641 info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
1642 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
1643
1644 /*
1645 * Use MoreData flag to indicate whether there are
1646 * more buffered frames for this STA
1647 */
1648 if (more_data || !skb_queue_empty(&frames))
1649 hdr->frame_control |=
1650 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1651 else
1652 hdr->frame_control &=
1653 cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
1654
1655 if (ieee80211_is_data_qos(hdr->frame_control) ||
1656 ieee80211_is_qos_nullfunc(hdr->frame_control))
1657 qoshdr = ieee80211_get_qos_ctl(hdr);
1658
1659 tids |= BIT(skb->priority);
1660
1661 __skb_queue_tail(&pending, skb);
1662
1663 /* end service period after last frame or add one */
1664 if (!skb_queue_empty(&frames))
1665 continue;
1666
1667 if (reason != IEEE80211_FRAME_RELEASE_UAPSD) {
1668 /* for PS-Poll, there's only one frame */
1669 info->flags |= IEEE80211_TX_STATUS_EOSP |
1670 IEEE80211_TX_CTL_REQ_TX_STATUS;
1671 break;
1672 }
1673
1674 /* For uAPSD, things are a bit more complicated. If the
1675 * last frame has a QoS header (i.e. is a QoS-data or
1676 * QoS-nulldata frame) then just set the EOSP bit there
1677 * and be done.
1678 * If the frame doesn't have a QoS header (which means
1679 * it should be a bufferable MMPDU) then we can't set
1680 * the EOSP bit in the QoS header; add a QoS-nulldata
1681 * frame to the list to send it after the MMPDU.
1682 *
1683 * Note that this code is only in the mac80211-release
1684 * code path, we assume that the driver will not buffer
1685 * anything but QoS-data frames, or if it does, will
1686 * create the QoS-nulldata frame by itself if needed.
1687 *
1688 * Cf. 802.11-2012 10.2.1.10 (c).
1689 */
1690 if (qoshdr) {
1691 *qoshdr |= IEEE80211_QOS_CTL_EOSP;
1692
1693 info->flags |= IEEE80211_TX_STATUS_EOSP |
1694 IEEE80211_TX_CTL_REQ_TX_STATUS;
1695 } else {
1696 /* The standard isn't completely clear on this
1697 * as it says the more-data bit should be set
1698 * if there are more BUs. The QoS-Null frame
1699 * we're about to send isn't buffered yet, we
1700 * only create it below, but let's pretend it
1701 * was buffered just in case some clients only
1702 * expect more-data=0 when eosp=1.
1703 */
1704 hdr->frame_control |=
1705 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1706 need_null = true;
1707 num++;
1708 }
1709 break;
1710 }
1711
1712 drv_allow_buffered_frames(local, sta, tids, num,
1713 reason, more_data);
1714
1715 ieee80211_add_pending_skbs(local, &pending);
1716
1717 if (need_null)
1718 ieee80211_send_null_response(
1719 sta, find_highest_prio_tid(tids),
1720 reason, false, false);
1721
1722 sta_info_recalc_tim(sta);
1723 } else {
1724 int tid;
1725
1726 /*
1727 * We need to release a frame that is buffered somewhere in the
1728 * driver ... it'll have to handle that.
1729 * Note that the driver also has to check the number of frames
1730 * on the TIDs we're releasing from - if there are more than
1731 * n_frames it has to set the more-data bit (if we didn't ask
1732 * it to set it anyway due to other buffered frames); if there
1733 * are fewer than n_frames it has to make sure to adjust that
1734 * to allow the service period to end properly.
1735 */
1736 drv_release_buffered_frames(local, sta, driver_release_tids,
1737 n_frames, reason, more_data);
1738
1739 /*
1740 * Note that we don't recalculate the TIM bit here as it would
1741 * most likely have no effect at all unless the driver told us
1742 * that the TID(s) became empty before returning here from the
1743 * release function.
1744 * Either way, however, when the driver tells us that the TID(s)
1745 * became empty or we find that a txq became empty, we'll do the
1746 * TIM recalculation.
1747 */
1748
1749 if (!sta->sta.txq[0])
1750 return;
1751
1752 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
David Brazdil0f672f62019-12-10 10:32:29 +00001753 if (!sta->sta.txq[tid] ||
1754 !(driver_release_tids & BIT(tid)) ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001755 txq_has_queue(sta->sta.txq[tid]))
1756 continue;
1757
1758 sta_info_recalc_tim(sta);
1759 break;
1760 }
1761 }
1762}
1763
1764void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta)
1765{
1766 u8 ignore_for_response = sta->sta.uapsd_queues;
1767
1768 /*
1769 * If all ACs are delivery-enabled then we should reply
1770 * from any of them, if only some are enabled we reply
1771 * only from the non-enabled ones.
1772 */
1773 if (ignore_for_response == BIT(IEEE80211_NUM_ACS) - 1)
1774 ignore_for_response = 0;
1775
1776 ieee80211_sta_ps_deliver_response(sta, 1, ignore_for_response,
1777 IEEE80211_FRAME_RELEASE_PSPOLL);
1778}
1779
1780void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta)
1781{
1782 int n_frames = sta->sta.max_sp;
1783 u8 delivery_enabled = sta->sta.uapsd_queues;
1784
1785 /*
1786 * If we ever grow support for TSPEC this might happen if
1787 * the TSPEC update from hostapd comes in between a trigger
1788 * frame setting WLAN_STA_UAPSD in the RX path and this
1789 * actually getting called.
1790 */
1791 if (!delivery_enabled)
1792 return;
1793
1794 switch (sta->sta.max_sp) {
1795 case 1:
1796 n_frames = 2;
1797 break;
1798 case 2:
1799 n_frames = 4;
1800 break;
1801 case 3:
1802 n_frames = 6;
1803 break;
1804 case 0:
1805 /* XXX: what is a good value? */
1806 n_frames = 128;
1807 break;
1808 }
1809
1810 ieee80211_sta_ps_deliver_response(sta, n_frames, ~delivery_enabled,
1811 IEEE80211_FRAME_RELEASE_UAPSD);
1812}
1813
1814void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
1815 struct ieee80211_sta *pubsta, bool block)
1816{
1817 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1818
1819 trace_api_sta_block_awake(sta->local, pubsta, block);
1820
1821 if (block) {
1822 set_sta_flag(sta, WLAN_STA_PS_DRIVER);
1823 ieee80211_clear_fast_xmit(sta);
1824 return;
1825 }
1826
1827 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1828 return;
1829
1830 if (!test_sta_flag(sta, WLAN_STA_PS_STA)) {
1831 set_sta_flag(sta, WLAN_STA_PS_DELIVER);
1832 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
1833 ieee80211_queue_work(hw, &sta->drv_deliver_wk);
1834 } else if (test_sta_flag(sta, WLAN_STA_PSPOLL) ||
1835 test_sta_flag(sta, WLAN_STA_UAPSD)) {
1836 /* must be asleep in this case */
1837 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
1838 ieee80211_queue_work(hw, &sta->drv_deliver_wk);
1839 } else {
1840 clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
1841 ieee80211_check_fast_xmit(sta);
1842 }
1843}
1844EXPORT_SYMBOL(ieee80211_sta_block_awake);
1845
1846void ieee80211_sta_eosp(struct ieee80211_sta *pubsta)
1847{
1848 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1849 struct ieee80211_local *local = sta->local;
1850
1851 trace_api_eosp(local, pubsta);
1852
1853 clear_sta_flag(sta, WLAN_STA_SP);
1854}
1855EXPORT_SYMBOL(ieee80211_sta_eosp);
1856
1857void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid)
1858{
1859 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1860 enum ieee80211_frame_release_type reason;
1861 bool more_data;
1862
1863 trace_api_send_eosp_nullfunc(sta->local, pubsta, tid);
1864
1865 reason = IEEE80211_FRAME_RELEASE_UAPSD;
1866 more_data = ieee80211_sta_ps_more_data(sta, ~sta->sta.uapsd_queues,
1867 reason, 0);
1868
1869 ieee80211_send_null_response(sta, tid, reason, false, more_data);
1870}
1871EXPORT_SYMBOL(ieee80211_send_eosp_nullfunc);
1872
1873void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
1874 u8 tid, bool buffered)
1875{
1876 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1877
1878 if (WARN_ON(tid >= IEEE80211_NUM_TIDS))
1879 return;
1880
1881 trace_api_sta_set_buffered(sta->local, pubsta, tid, buffered);
1882
1883 if (buffered)
1884 set_bit(tid, &sta->driver_buffered_tids);
1885 else
1886 clear_bit(tid, &sta->driver_buffered_tids);
1887
1888 sta_info_recalc_tim(sta);
1889}
1890EXPORT_SYMBOL(ieee80211_sta_set_buffered);
1891
David Brazdil0f672f62019-12-10 10:32:29 +00001892void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
1893 u32 tx_airtime, u32 rx_airtime)
1894{
1895 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1896 struct ieee80211_local *local = sta->sdata->local;
1897 u8 ac = ieee80211_ac_from_tid(tid);
1898 u32 airtime = 0;
1899
1900 if (sta->local->airtime_flags & AIRTIME_USE_TX)
1901 airtime += tx_airtime;
1902 if (sta->local->airtime_flags & AIRTIME_USE_RX)
1903 airtime += rx_airtime;
1904
1905 spin_lock_bh(&local->active_txq_lock[ac]);
1906 sta->airtime[ac].tx_airtime += tx_airtime;
1907 sta->airtime[ac].rx_airtime += rx_airtime;
1908 sta->airtime[ac].deficit -= airtime;
1909 spin_unlock_bh(&local->active_txq_lock[ac]);
1910}
1911EXPORT_SYMBOL(ieee80211_sta_register_airtime);
1912
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001913int sta_info_move_state(struct sta_info *sta,
1914 enum ieee80211_sta_state new_state)
1915{
1916 might_sleep();
1917
1918 if (sta->sta_state == new_state)
1919 return 0;
1920
1921 /* check allowed transitions first */
1922
1923 switch (new_state) {
1924 case IEEE80211_STA_NONE:
1925 if (sta->sta_state != IEEE80211_STA_AUTH)
1926 return -EINVAL;
1927 break;
1928 case IEEE80211_STA_AUTH:
1929 if (sta->sta_state != IEEE80211_STA_NONE &&
1930 sta->sta_state != IEEE80211_STA_ASSOC)
1931 return -EINVAL;
1932 break;
1933 case IEEE80211_STA_ASSOC:
1934 if (sta->sta_state != IEEE80211_STA_AUTH &&
1935 sta->sta_state != IEEE80211_STA_AUTHORIZED)
1936 return -EINVAL;
1937 break;
1938 case IEEE80211_STA_AUTHORIZED:
1939 if (sta->sta_state != IEEE80211_STA_ASSOC)
1940 return -EINVAL;
1941 break;
1942 default:
1943 WARN(1, "invalid state %d", new_state);
1944 return -EINVAL;
1945 }
1946
1947 sta_dbg(sta->sdata, "moving STA %pM to state %d\n",
1948 sta->sta.addr, new_state);
1949
1950 /*
1951 * notify the driver before the actual changes so it can
1952 * fail the transition
1953 */
1954 if (test_sta_flag(sta, WLAN_STA_INSERTED)) {
1955 int err = drv_sta_state(sta->local, sta->sdata, sta,
1956 sta->sta_state, new_state);
1957 if (err)
1958 return err;
1959 }
1960
1961 /* reflect the change in all state variables */
1962
1963 switch (new_state) {
1964 case IEEE80211_STA_NONE:
1965 if (sta->sta_state == IEEE80211_STA_AUTH)
1966 clear_bit(WLAN_STA_AUTH, &sta->_flags);
1967 break;
1968 case IEEE80211_STA_AUTH:
1969 if (sta->sta_state == IEEE80211_STA_NONE) {
1970 set_bit(WLAN_STA_AUTH, &sta->_flags);
1971 } else if (sta->sta_state == IEEE80211_STA_ASSOC) {
1972 clear_bit(WLAN_STA_ASSOC, &sta->_flags);
1973 ieee80211_recalc_min_chandef(sta->sdata);
1974 if (!sta->sta.support_p2p_ps)
1975 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
1976 }
1977 break;
1978 case IEEE80211_STA_ASSOC:
1979 if (sta->sta_state == IEEE80211_STA_AUTH) {
1980 set_bit(WLAN_STA_ASSOC, &sta->_flags);
David Brazdil0f672f62019-12-10 10:32:29 +00001981 sta->assoc_at = ktime_get_boottime_ns();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001982 ieee80211_recalc_min_chandef(sta->sdata);
1983 if (!sta->sta.support_p2p_ps)
1984 ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
1985 } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
1986 ieee80211_vif_dec_num_mcast(sta->sdata);
1987 clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
1988 ieee80211_clear_fast_xmit(sta);
1989 ieee80211_clear_fast_rx(sta);
1990 }
1991 break;
1992 case IEEE80211_STA_AUTHORIZED:
1993 if (sta->sta_state == IEEE80211_STA_ASSOC) {
1994 ieee80211_vif_inc_num_mcast(sta->sdata);
1995 set_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
1996 ieee80211_check_fast_xmit(sta);
1997 ieee80211_check_fast_rx(sta);
1998 }
David Brazdil0f672f62019-12-10 10:32:29 +00001999 if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
2000 sta->sdata->vif.type == NL80211_IFTYPE_AP)
2001 cfg80211_send_layer2_update(sta->sdata->dev,
2002 sta->sta.addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002003 break;
2004 default:
2005 break;
2006 }
2007
2008 sta->sta_state = new_state;
2009
2010 return 0;
2011}
2012
2013u8 sta_info_tx_streams(struct sta_info *sta)
2014{
2015 struct ieee80211_sta_ht_cap *ht_cap = &sta->sta.ht_cap;
2016 u8 rx_streams;
2017
2018 if (!sta->sta.ht_cap.ht_supported)
2019 return 1;
2020
2021 if (sta->sta.vht_cap.vht_supported) {
2022 int i;
2023 u16 tx_mcs_map =
2024 le16_to_cpu(sta->sta.vht_cap.vht_mcs.tx_mcs_map);
2025
2026 for (i = 7; i >= 0; i--)
2027 if ((tx_mcs_map & (0x3 << (i * 2))) !=
2028 IEEE80211_VHT_MCS_NOT_SUPPORTED)
2029 return i + 1;
2030 }
2031
2032 if (ht_cap->mcs.rx_mask[3])
2033 rx_streams = 4;
2034 else if (ht_cap->mcs.rx_mask[2])
2035 rx_streams = 3;
2036 else if (ht_cap->mcs.rx_mask[1])
2037 rx_streams = 2;
2038 else
2039 rx_streams = 1;
2040
2041 if (!(ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_RX_DIFF))
2042 return rx_streams;
2043
2044 return ((ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
2045 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1;
2046}
2047
2048static struct ieee80211_sta_rx_stats *
2049sta_get_last_rx_stats(struct sta_info *sta)
2050{
2051 struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
2052 struct ieee80211_local *local = sta->local;
2053 int cpu;
2054
2055 if (!ieee80211_hw_check(&local->hw, USES_RSS))
2056 return stats;
2057
2058 for_each_possible_cpu(cpu) {
2059 struct ieee80211_sta_rx_stats *cpustats;
2060
2061 cpustats = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
2062
2063 if (time_after(cpustats->last_rx, stats->last_rx))
2064 stats = cpustats;
2065 }
2066
2067 return stats;
2068}
2069
2070static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate,
2071 struct rate_info *rinfo)
2072{
2073 rinfo->bw = STA_STATS_GET(BW, rate);
2074
2075 switch (STA_STATS_GET(TYPE, rate)) {
2076 case STA_STATS_RATE_TYPE_VHT:
2077 rinfo->flags = RATE_INFO_FLAGS_VHT_MCS;
2078 rinfo->mcs = STA_STATS_GET(VHT_MCS, rate);
2079 rinfo->nss = STA_STATS_GET(VHT_NSS, rate);
2080 if (STA_STATS_GET(SGI, rate))
2081 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
2082 break;
2083 case STA_STATS_RATE_TYPE_HT:
2084 rinfo->flags = RATE_INFO_FLAGS_MCS;
2085 rinfo->mcs = STA_STATS_GET(HT_MCS, rate);
2086 if (STA_STATS_GET(SGI, rate))
2087 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
2088 break;
2089 case STA_STATS_RATE_TYPE_LEGACY: {
2090 struct ieee80211_supported_band *sband;
2091 u16 brate;
2092 unsigned int shift;
2093 int band = STA_STATS_GET(LEGACY_BAND, rate);
2094 int rate_idx = STA_STATS_GET(LEGACY_IDX, rate);
2095
2096 sband = local->hw.wiphy->bands[band];
Olivier Deprez0e641232021-09-23 10:07:05 +02002097
2098 if (WARN_ON_ONCE(!sband->bitrates))
2099 break;
2100
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002101 brate = sband->bitrates[rate_idx].bitrate;
2102 if (rinfo->bw == RATE_INFO_BW_5)
2103 shift = 2;
2104 else if (rinfo->bw == RATE_INFO_BW_10)
2105 shift = 1;
2106 else
2107 shift = 0;
2108 rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
2109 break;
2110 }
2111 case STA_STATS_RATE_TYPE_HE:
2112 rinfo->flags = RATE_INFO_FLAGS_HE_MCS;
2113 rinfo->mcs = STA_STATS_GET(HE_MCS, rate);
2114 rinfo->nss = STA_STATS_GET(HE_NSS, rate);
2115 rinfo->he_gi = STA_STATS_GET(HE_GI, rate);
2116 rinfo->he_ru_alloc = STA_STATS_GET(HE_RU, rate);
2117 rinfo->he_dcm = STA_STATS_GET(HE_DCM, rate);
2118 break;
2119 }
2120}
2121
2122static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
2123{
2124 u16 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate);
2125
2126 if (rate == STA_STATS_RATE_INVALID)
2127 return -EINVAL;
2128
2129 sta_stats_decode_rate(sta->local, rate, rinfo);
2130 return 0;
2131}
2132
2133static void sta_set_tidstats(struct sta_info *sta,
2134 struct cfg80211_tid_stats *tidstats,
2135 int tid)
2136{
2137 struct ieee80211_local *local = sta->local;
2138
2139 if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) {
2140 unsigned int start;
2141
2142 do {
2143 start = u64_stats_fetch_begin(&sta->rx_stats.syncp);
2144 tidstats->rx_msdu = sta->rx_stats.msdu[tid];
2145 } while (u64_stats_fetch_retry(&sta->rx_stats.syncp, start));
2146
2147 tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU);
2148 }
2149
2150 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) {
2151 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU);
2152 tidstats->tx_msdu = sta->tx_stats.msdu[tid];
2153 }
2154
2155 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) &&
2156 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
2157 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES);
2158 tidstats->tx_msdu_retries = sta->status_stats.msdu_retries[tid];
2159 }
2160
2161 if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) &&
2162 ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
2163 tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED);
2164 tidstats->tx_msdu_failed = sta->status_stats.msdu_failed[tid];
2165 }
2166
2167 if (local->ops->wake_tx_queue && tid < IEEE80211_NUM_TIDS) {
2168 spin_lock_bh(&local->fq.lock);
2169 rcu_read_lock();
2170
2171 tidstats->filled |= BIT(NL80211_TID_STATS_TXQ_STATS);
2172 ieee80211_fill_txq_stats(&tidstats->txq_stats,
2173 to_txq_info(sta->sta.txq[tid]));
2174
2175 rcu_read_unlock();
2176 spin_unlock_bh(&local->fq.lock);
2177 }
2178}
2179
2180static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats)
2181{
2182 unsigned int start;
2183 u64 value;
2184
2185 do {
2186 start = u64_stats_fetch_begin(&rxstats->syncp);
2187 value = rxstats->bytes;
2188 } while (u64_stats_fetch_retry(&rxstats->syncp, start));
2189
2190 return value;
2191}
2192
2193void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
2194 bool tidstats)
2195{
2196 struct ieee80211_sub_if_data *sdata = sta->sdata;
2197 struct ieee80211_local *local = sdata->local;
2198 u32 thr = 0;
2199 int i, ac, cpu;
2200 struct ieee80211_sta_rx_stats *last_rxstats;
2201
2202 last_rxstats = sta_get_last_rx_stats(sta);
2203
2204 sinfo->generation = sdata->local->sta_generation;
2205
2206 /* do before driver, so beacon filtering drivers have a
2207 * chance to e.g. just add the number of filtered beacons
2208 * (or just modify the value entirely, of course)
2209 */
2210 if (sdata->vif.type == NL80211_IFTYPE_STATION)
2211 sinfo->rx_beacon = sdata->u.mgd.count_beacon_signal;
2212
2213 drv_sta_statistics(local, sdata, &sta->sta, sinfo);
2214
2215 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) |
2216 BIT_ULL(NL80211_STA_INFO_STA_FLAGS) |
2217 BIT_ULL(NL80211_STA_INFO_BSS_PARAM) |
2218 BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME) |
David Brazdil0f672f62019-12-10 10:32:29 +00002219 BIT_ULL(NL80211_STA_INFO_ASSOC_AT_BOOTTIME) |
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002220 BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC);
2221
2222 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
2223 sinfo->beacon_loss_count = sdata->u.mgd.beacon_loss_count;
2224 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS);
2225 }
2226
2227 sinfo->connected_time = ktime_get_seconds() - sta->last_connected;
David Brazdil0f672f62019-12-10 10:32:29 +00002228 sinfo->assoc_at = sta->assoc_at;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002229 sinfo->inactive_time =
2230 jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta));
2231
2232 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) |
2233 BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) {
2234 sinfo->tx_bytes = 0;
2235 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
2236 sinfo->tx_bytes += sta->tx_stats.bytes[ac];
2237 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
2238 }
2239
2240 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) {
2241 sinfo->tx_packets = 0;
2242 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
2243 sinfo->tx_packets += sta->tx_stats.packets[ac];
2244 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
2245 }
2246
2247 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) |
2248 BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) {
2249 sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats);
2250
2251 if (sta->pcpu_rx_stats) {
2252 for_each_possible_cpu(cpu) {
2253 struct ieee80211_sta_rx_stats *cpurxs;
2254
2255 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
2256 sinfo->rx_bytes += sta_get_stats_bytes(cpurxs);
2257 }
2258 }
2259
2260 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64);
2261 }
2262
2263 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) {
2264 sinfo->rx_packets = sta->rx_stats.packets;
2265 if (sta->pcpu_rx_stats) {
2266 for_each_possible_cpu(cpu) {
2267 struct ieee80211_sta_rx_stats *cpurxs;
2268
2269 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
2270 sinfo->rx_packets += cpurxs->packets;
2271 }
2272 }
2273 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
2274 }
2275
2276 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) {
2277 sinfo->tx_retries = sta->status_stats.retry_count;
2278 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
2279 }
2280
2281 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) {
2282 sinfo->tx_failed = sta->status_stats.retry_failed;
2283 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
2284 }
2285
David Brazdil0f672f62019-12-10 10:32:29 +00002286 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_DURATION))) {
2287 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
2288 sinfo->rx_duration += sta->airtime[ac].rx_airtime;
2289 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
2290 }
2291
2292 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_DURATION))) {
2293 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
2294 sinfo->tx_duration += sta->airtime[ac].tx_airtime;
2295 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
2296 }
2297
2298 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) {
2299 sinfo->airtime_weight = sta->airtime_weight;
2300 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT);
2301 }
2302
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002303 sinfo->rx_dropped_misc = sta->rx_stats.dropped;
2304 if (sta->pcpu_rx_stats) {
2305 for_each_possible_cpu(cpu) {
2306 struct ieee80211_sta_rx_stats *cpurxs;
2307
2308 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
2309 sinfo->rx_dropped_misc += cpurxs->dropped;
2310 }
2311 }
2312
2313 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
2314 !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) {
2315 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) |
2316 BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
2317 sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif);
2318 }
2319
2320 if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) ||
2321 ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) {
2322 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) {
2323 sinfo->signal = (s8)last_rxstats->last_signal;
2324 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
2325 }
2326
2327 if (!sta->pcpu_rx_stats &&
2328 !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) {
2329 sinfo->signal_avg =
2330 -ewma_signal_read(&sta->rx_stats_avg.signal);
2331 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
2332 }
2333 }
2334
2335 /* for the average - if pcpu_rx_stats isn't set - rxstats must point to
2336 * the sta->rx_stats struct, so the check here is fine with and without
2337 * pcpu statistics
2338 */
2339 if (last_rxstats->chains &&
2340 !(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) |
2341 BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) {
2342 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
2343 if (!sta->pcpu_rx_stats)
2344 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
2345
2346 sinfo->chains = last_rxstats->chains;
2347
2348 for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
2349 sinfo->chain_signal[i] =
2350 last_rxstats->chain_signal_last[i];
2351 sinfo->chain_signal_avg[i] =
2352 -ewma_signal_read(&sta->rx_stats_avg.chain_signal[i]);
2353 }
2354 }
2355
2356 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) {
2357 sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate,
2358 &sinfo->txrate);
2359 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
2360 }
2361
2362 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE))) {
2363 if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0)
2364 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
2365 }
2366
2367 if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) {
David Brazdil0f672f62019-12-10 10:32:29 +00002368 for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++)
2369 sta_set_tidstats(sta, &sinfo->pertid[i], i);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002370 }
2371
2372 if (ieee80211_vif_is_mesh(&sdata->vif)) {
2373#ifdef CONFIG_MAC80211_MESH
2374 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_LLID) |
2375 BIT_ULL(NL80211_STA_INFO_PLID) |
2376 BIT_ULL(NL80211_STA_INFO_PLINK_STATE) |
2377 BIT_ULL(NL80211_STA_INFO_LOCAL_PM) |
2378 BIT_ULL(NL80211_STA_INFO_PEER_PM) |
David Brazdil0f672f62019-12-10 10:32:29 +00002379 BIT_ULL(NL80211_STA_INFO_NONPEER_PM) |
2380 BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002381
2382 sinfo->llid = sta->mesh->llid;
2383 sinfo->plid = sta->mesh->plid;
2384 sinfo->plink_state = sta->mesh->plink_state;
2385 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
2386 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_T_OFFSET);
2387 sinfo->t_offset = sta->mesh->t_offset;
2388 }
2389 sinfo->local_pm = sta->mesh->local_pm;
2390 sinfo->peer_pm = sta->mesh->peer_pm;
2391 sinfo->nonpeer_pm = sta->mesh->nonpeer_pm;
David Brazdil0f672f62019-12-10 10:32:29 +00002392 sinfo->connected_to_gate = sta->mesh->connected_to_gate;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002393#endif
2394 }
2395
2396 sinfo->bss_param.flags = 0;
2397 if (sdata->vif.bss_conf.use_cts_prot)
2398 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT;
2399 if (sdata->vif.bss_conf.use_short_preamble)
2400 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
2401 if (sdata->vif.bss_conf.use_short_slot)
2402 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
2403 sinfo->bss_param.dtim_period = sdata->vif.bss_conf.dtim_period;
2404 sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int;
2405
2406 sinfo->sta_flags.set = 0;
2407 sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) |
2408 BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
2409 BIT(NL80211_STA_FLAG_WME) |
2410 BIT(NL80211_STA_FLAG_MFP) |
2411 BIT(NL80211_STA_FLAG_AUTHENTICATED) |
2412 BIT(NL80211_STA_FLAG_ASSOCIATED) |
2413 BIT(NL80211_STA_FLAG_TDLS_PEER);
2414 if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
2415 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED);
2416 if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE))
2417 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE);
2418 if (sta->sta.wme)
2419 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME);
2420 if (test_sta_flag(sta, WLAN_STA_MFP))
2421 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP);
2422 if (test_sta_flag(sta, WLAN_STA_AUTH))
2423 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED);
2424 if (test_sta_flag(sta, WLAN_STA_ASSOC))
2425 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED);
2426 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
2427 sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
2428
2429 thr = sta_get_expected_throughput(sta);
2430
2431 if (thr != 0) {
2432 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT);
2433 sinfo->expected_throughput = thr;
2434 }
2435
2436 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL)) &&
2437 sta->status_stats.ack_signal_filled) {
2438 sinfo->ack_signal = sta->status_stats.last_ack_signal;
2439 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL);
2440 }
2441
David Brazdil0f672f62019-12-10 10:32:29 +00002442 if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) &&
2443 sta->status_stats.ack_signal_filled) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002444 sinfo->avg_ack_signal =
2445 -(s8)ewma_avg_signal_read(
2446 &sta->status_stats.avg_ack_signal);
2447 sinfo->filled |=
David Brazdil0f672f62019-12-10 10:32:29 +00002448 BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
2449 }
2450
2451 if (ieee80211_vif_is_mesh(&sdata->vif)) {
2452 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_LINK_METRIC);
2453 sinfo->airtime_link_metric =
2454 airtime_link_metric_get(local, sta);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002455 }
2456}
2457
2458u32 sta_get_expected_throughput(struct sta_info *sta)
2459{
2460 struct ieee80211_sub_if_data *sdata = sta->sdata;
2461 struct ieee80211_local *local = sdata->local;
2462 struct rate_control_ref *ref = NULL;
2463 u32 thr = 0;
2464
2465 if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
2466 ref = local->rate_ctrl;
2467
2468 /* check if the driver has a SW RC implementation */
2469 if (ref && ref->ops->get_expected_throughput)
2470 thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv);
2471 else
2472 thr = drv_get_expected_throughput(local, sta);
2473
2474 return thr;
2475}
2476
2477unsigned long ieee80211_sta_last_active(struct sta_info *sta)
2478{
2479 struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta);
2480
David Brazdil0f672f62019-12-10 10:32:29 +00002481 if (!sta->status_stats.last_ack ||
2482 time_after(stats->last_rx, sta->status_stats.last_ack))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002483 return stats->last_rx;
2484 return sta->status_stats.last_ack;
2485}
2486
2487static void sta_update_codel_params(struct sta_info *sta, u32 thr)
2488{
2489 if (!sta->sdata->local->ops->wake_tx_queue)
2490 return;
2491
2492 if (thr && thr < STA_SLOW_THRESHOLD * sta->local->num_sta) {
2493 sta->cparams.target = MS2TIME(50);
2494 sta->cparams.interval = MS2TIME(300);
2495 sta->cparams.ecn = false;
2496 } else {
2497 sta->cparams.target = MS2TIME(20);
2498 sta->cparams.interval = MS2TIME(100);
2499 sta->cparams.ecn = true;
2500 }
2501}
2502
2503void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta,
2504 u32 thr)
2505{
2506 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
2507
2508 sta_update_codel_params(sta, thr);
2509}