|
|
|
/*
|
|
|
|
* Copyright 2002-2005, Instant802 Networks, Inc.
|
|
|
|
* Copyright 2005, Devicescape Software, Inc.
|
|
|
|
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
|
|
|
|
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
|
|
|
|
* Copyright 2013-2015 Intel Mobile Communications GmbH
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef IEEE80211_I_H
|
|
|
|
#define IEEE80211_I_H
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/if_ether.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/leds.h>
|
|
|
|
#include <linux/idr.h>
|
|
|
|
#include <linux/rhashtable.h>
|
|
|
|
#include <net/ieee80211_radiotap.h>
|
|
|
|
#include <net/cfg80211.h>
|
|
|
|
#include <net/mac80211.h>
|
|
|
|
#include <net/fq.h>
|
|
|
|
#include "key.h"
|
|
|
|
#include "sta_info.h"
|
|
|
|
#include "debug.h"
|
|
|
|
|
|
|
|
extern const struct cfg80211_ops mac80211_config_ops;
|
|
|
|
|
|
|
|
struct ieee80211_local;
|
|
|
|
|
|
|
|
/* Maximum number of broadcast/multicast frames to buffer when some of the
|
|
|
|
* associated stations are using power saving. */
|
|
|
|
#define AP_MAX_BC_BUFFER 128
|
|
|
|
|
|
|
|
/* Maximum number of frames buffered to all STAs, including multicast frames.
|
|
|
|
* Note: increasing this limit increases the potential memory requirement. Each
|
|
|
|
* frame can be up to about 2 kB long. */
|
|
|
|
#define TOTAL_MAX_TX_BUFFER 512
|
|
|
|
|
|
|
|
/* Required encryption head and tailroom */
|
|
|
|
#define IEEE80211_ENCRYPT_HEADROOM 8
|
|
|
|
#define IEEE80211_ENCRYPT_TAILROOM 18
|
|
|
|
|
|
|
|
/* IEEE 802.11 (Ch. 9.5 Defragmentation) requires support for concurrent
|
|
|
|
* reception of at least three fragmented frames. This limit can be increased
|
|
|
|
* by changing this define, at the cost of slower frame reassembly and
|
|
|
|
* increased memory use (about 2 kB of RAM per entry). */
|
|
|
|
#define IEEE80211_FRAGMENT_MAX 4
|
|
|
|
|
|
|
|
/* power level hasn't been configured (or set to automatic) */
|
|
|
|
#define IEEE80211_UNSET_POWER_LEVEL INT_MIN
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Some APs experience problems when working with U-APSD. Decreasing the
|
|
|
|
* probability of that happening by using legacy mode for all ACs but VO isn't
|
|
|
|
* enough.
|
|
|
|
*
|
|
|
|
* Cisco 4410N originally forced us to enable VO by default only because it
|
|
|
|
* treated non-VO ACs as legacy.
|
|
|
|
*
|
|
|
|
* However some APs (notably Netgear R7000) silently reclassify packets to
|
|
|
|
* different ACs. Since u-APSD ACs require trigger frames for frame retrieval
|
|
|
|
* clients would never see some frames (e.g. ARP responses) or would fetch them
|
|
|
|
* accidentally after a long time.
|
|
|
|
*
|
|
|
|
* It makes little sense to enable u-APSD queues by default because it needs
|
|
|
|
* userspace applications to be aware of it to actually take advantage of the
|
|
|
|
* possible additional powersavings. Implicitly depending on driver autotrigger
|
|
|
|
* frame support doesn't make much sense.
|
|
|
|
*/
|
|
|
|
#define IEEE80211_DEFAULT_UAPSD_QUEUES 0
|
|
|
|
|
|
|
|
#define IEEE80211_DEFAULT_MAX_SP_LEN \
|
|
|
|
IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
|
|
|
|
|
|
|
|
extern const u8 ieee80211_ac_to_qos_mask[IEEE80211_NUM_ACS];
|
|
|
|
|
|
|
|
#define IEEE80211_DEAUTH_FRAME_LEN (24 /* hdr */ + 2 /* reason */)
|
|
|
|
|
|
|
|
#define IEEE80211_MAX_NAN_INSTANCE_ID 255
|
|
|
|
|
|
|
|
struct ieee80211_fragment_entry {
|
|
|
|
struct sk_buff_head skb_list;
|
|
|
|
unsigned long first_frag_time;
|
|
|
|
u16 seq;
|
|
|
|
u16 extra_len;
|
|
|
|
u16 last_frag;
|
|
|
|
u8 rx_queue;
|
|
|
|
bool check_sequential_pn; /* needed for CCMP/GCMP */
|
|
|
|
u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
struct ieee80211_bss {
|
|
|
|
u32 device_ts_beacon, device_ts_presp;
|
|
|
|
|
|
|
|
bool wmm_used;
|
|
|
|
bool uapsd_supported;
|
|
|
|
|
|
|
|
#define IEEE80211_MAX_SUPP_RATES 32
|
|
|
|
u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
|
|
|
|
size_t supp_rates_len;
|
|
|
|
struct ieee80211_rate *beacon_rate;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* During association, we save an ERP value from a probe response so
|
|
|
|
* that we can feed ERP info to the driver when handling the
|
|
|
|
* association completes. these fields probably won't be up-to-date
|
|
|
|
* otherwise, you probably don't want to use them.
|
|
|
|
*/
|
|
|
|
bool has_erp_value;
|
|
|
|
u8 erp_value;
|
mac80211: Filter duplicate IE ids
mac80211 is lenient with respect to reception of corrupted beacons.
Even if the frame is corrupted as a whole, the available IE elements
are still passed back and accepted, sometimes replacing legitimate
data. It is unknown to what extent this "feature" is made use of,
but it is clear that in some cases, this is detrimental. One such
case is reported in http://crosbug.com/26832 where an AP corrupts
its beacons but not its probe responses.
One approach would be to completely reject frames with invaid data
(for example, if the last tag extends beyond the end of the enclosing
PDU). The enclosed approach is much more conservative: we simply
prevent later IEs from overwriting the state from previous ones.
This approach hopes that there might be some salient data in the
IE stream before the corruption, and seeks to at least prevent that
data from being overwritten. This approach will fix the case above.
Further, we flag element structures that contain data we think might
be corrupted, so that as we fill the mac80211 BSS structure, we try
not to replace data from an un-corrupted probe response with that
of a corrupted beacon, for example.
Short of any statistics gathering in the various forms of AP breakage,
it's not possible to ascertain the side effects of more stringent
discarding of data.
Signed-off-by: Paul Stewart <pstew@chromium.org>
Cc: Sam Leffler <sleffler@chromium.org>
Cc: Eliad Peller <eliad@wizery.com>
Acked-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
13 years ago
|
|
|
|
|
|
|
/* Keep track of the corruption of the last beacon/probe response. */
|
|
|
|
u8 corrupt_data;
|
|
|
|
|
|
|
|
/* Keep track of what bits of information we have valid info for. */
|
|
|
|
u8 valid_data;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* enum ieee80211_corrupt_data_flags - BSS data corruption flags
|
|
|
|
* @IEEE80211_BSS_CORRUPT_BEACON: last beacon frame received was corrupted
|
|
|
|
* @IEEE80211_BSS_CORRUPT_PROBE_RESP: last probe response received was corrupted
|
|
|
|
*
|
|
|
|
* These are bss flags that are attached to a bss in the
|
|
|
|
* @corrupt_data field of &struct ieee80211_bss.
|
|
|
|
*/
|
|
|
|
enum ieee80211_bss_corrupt_data_flags {
|
|
|
|
IEEE80211_BSS_CORRUPT_BEACON = BIT(0),
|
|
|
|
IEEE80211_BSS_CORRUPT_PROBE_RESP = BIT(1)
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* enum ieee80211_valid_data_flags - BSS valid data flags
|
|
|
|
* @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE
|
|
|
|
* @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE
|
|
|
|
* @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE
|
|
|
|
*
|
|
|
|
* These are bss flags that are attached to a bss in the
|
|
|
|
* @valid_data field of &struct ieee80211_bss. They show which parts
|
|
|
|
* of the data structure were received as a result of an un-corrupted
|
mac80211: Filter duplicate IE ids
mac80211 is lenient with respect to reception of corrupted beacons.
Even if the frame is corrupted as a whole, the available IE elements
are still passed back and accepted, sometimes replacing legitimate
data. It is unknown to what extent this "feature" is made use of,
but it is clear that in some cases, this is detrimental. One such
case is reported in http://crosbug.com/26832 where an AP corrupts
its beacons but not its probe responses.
One approach would be to completely reject frames with invaid data
(for example, if the last tag extends beyond the end of the enclosing
PDU). The enclosed approach is much more conservative: we simply
prevent later IEs from overwriting the state from previous ones.
This approach hopes that there might be some salient data in the
IE stream before the corruption, and seeks to at least prevent that
data from being overwritten. This approach will fix the case above.
Further, we flag element structures that contain data we think might
be corrupted, so that as we fill the mac80211 BSS structure, we try
not to replace data from an un-corrupted probe response with that
of a corrupted beacon, for example.
Short of any statistics gathering in the various forms of AP breakage,
it's not possible to ascertain the side effects of more stringent
discarding of data.
Signed-off-by: Paul Stewart <pstew@chromium.org>
Cc: Sam Leffler <sleffler@chromium.org>
Cc: Eliad Peller <eliad@wizery.com>
Acked-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
13 years ago
|
|
|
* beacon/probe response.
|
|
|
|
*/
|
|
|
|
enum ieee80211_bss_valid_data_flags {
|
|
|
|
IEEE80211_BSS_VALID_WMM = BIT(1),
|
|
|
|
IEEE80211_BSS_VALID_RATES = BIT(2),
|
|
|
|
IEEE80211_BSS_VALID_ERP = BIT(3)
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef unsigned __bitwise ieee80211_tx_result;
|
|
|
|
#define TX_CONTINUE ((__force ieee80211_tx_result) 0u)
|
|
|
|
#define TX_DROP ((__force ieee80211_tx_result) 1u)
|
|
|
|
#define TX_QUEUED ((__force ieee80211_tx_result) 2u)
|
|
|
|
|
|
|
|
#define IEEE80211_TX_UNICAST BIT(1)
|
|
|
|
#define IEEE80211_TX_PS_BUFFERED BIT(2)
|
|
|
|
|
|
|
|
struct ieee80211_tx_data {
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct sk_buff_head skbs;
|
|
|
|
struct ieee80211_local *local;
|
|
|
|
struct ieee80211_sub_if_data *sdata;
|
|
|
|
struct sta_info *sta;
|
|
|
|
struct ieee80211_key *key;
|
|
|
|
struct ieee80211_tx_rate rate;
|
|
|
|
|
|
|
|
unsigned int flags;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
typedef unsigned __bitwise ieee80211_rx_result;
|
|
|
|
#define RX_CONTINUE ((__force ieee80211_rx_result) 0u)
|
|
|
|
#define RX_DROP_UNUSABLE ((__force ieee80211_rx_result) 1u)
|
|
|
|
#define RX_DROP_MONITOR ((__force ieee80211_rx_result) 2u)
|
|
|
|
#define RX_QUEUED ((__force ieee80211_rx_result) 3u)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* enum ieee80211_packet_rx_flags - packet RX flags
|
|
|
|
* @IEEE80211_RX_AMSDU: a-MSDU packet
|
|
|
|
* @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed
|
|
|
|
* @IEEE80211_RX_DEFERRED_RELEASE: frame was subjected to receive reordering
|
|
|
|
*
|
|
|
|
* These are per-frame flags that are attached to a frame in the
|
|
|
|
* @rx_flags field of &struct ieee80211_rx_status.
|
|
|
|
*/
|
|
|
|
enum ieee80211_packet_rx_flags {
|
|
|
|
IEEE80211_RX_AMSDU = BIT(3),
|
|
|
|
IEEE80211_RX_MALFORMED_ACTION_FRM = BIT(4),
|
|
|
|
IEEE80211_RX_DEFERRED_RELEASE = BIT(5),
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* enum ieee80211_rx_flags - RX data flags
|
|
|
|
*
|
|
|
|
* @IEEE80211_RX_CMNTR: received on cooked monitor already
|
|
|
|
* @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
|
|
|
|
* to cfg80211_report_obss_beacon().
|
|
|
|
*
|
|
|
|
* These flags are used across handling multiple interfaces
|
|
|
|
* for a single frame.
|
|
|
|
*/
|
|
|
|
enum ieee80211_rx_flags {
|
|
|
|
IEEE80211_RX_CMNTR = BIT(0),
|
|
|
|
IEEE80211_RX_BEACON_REPORTED = BIT(1),
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ieee80211_rx_data {
|
|
|
|
struct napi_struct *napi;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct ieee80211_local *local;
|
|
|
|
struct ieee80211_sub_if_data *sdata;
|
|
|
|
struct sta_info *sta;
|
|
|
|
struct ieee80211_key *key;
|
|
|
|
|
|
|
|
unsigned int flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Index into sequence numbers array, 0..16
|
|
|
|
* since the last (16) is used for non-QoS,
|
|
|
|
* will be 16 on non-QoS frames.
|
|
|
|
*/
|
|
|
|
int seqno_idx;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Index into the security IV/PN arrays, 0..16
|
|
|
|
* since the last (16) is used for CCMP-encrypted
|
|
|
|
* management frames, will be set to 16 on mgmt
|
|
|
|
* frames and 0 on non-QoS frames.
|
|
|
|
*/
|
|
|
|
int security_idx;
|
|
|
|
|
|
|
|
u32 tkip_iv32;
|
|
|
|
u16 tkip_iv16;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ieee80211_csa_settings {
|
|
|
|
const u16 *counter_offsets_beacon;
|
|
|
|
const u16 *counter_offsets_presp;
|
|
|
|
|
|
|
|
int n_counter_offsets_beacon;
|
|
|
|
int n_counter_offsets_presp;
|
|
|
|
|
|
|
|
u8 count;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct beacon_data {
|
|
|
|
u8 *head, *tail;
|
|
|
|
int head_len, tail_len;
|
|
|
|
struct ieee80211_meshconf_ie *meshconf;
|
|
|
|
u16 csa_counter_offsets[IEEE80211_MAX_CSA_COUNTERS_NUM];
|
|
|
|
u8 csa_current_counter;
|
|
|
|
struct rcu_head rcu_head;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct probe_resp {
|
|
|
|
struct rcu_head rcu_head;
|
|
|
|
int len;
|
|
|
|
u16 csa_counter_offsets[IEEE80211_MAX_CSA_COUNTERS_NUM];
|
|
|
|
u8 data[0];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ps_data {
|
|
|
|
/* yes, this looks ugly, but guarantees that we can later use
|
|
|
|
* bitmap_empty :)
|
|
|
|
* NB: don't touch this bitmap, use sta_info_{set,clear}_tim_bit */
|
|
|
|
u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)]
|
|
|
|
__aligned(__alignof__(unsigned long));
|
|
|
|
struct sk_buff_head bc_buf;
|
|
|
|
atomic_t num_sta_ps; /* number of stations in PS mode */
|
|
|
|
int dtim_count;
|
|
|
|
bool dtim_bc_mc;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ieee80211_if_ap {
|
|
|
|
struct beacon_data __rcu *beacon;
|
|
|
|
struct probe_resp __rcu *probe_resp;
|
|
|
|
|
|
|
|
/* to be used after channel switch. */
|
|
|
|
struct cfg80211_beacon_data *next_beacon;
|
|
|
|
struct list_head vlans; /* write-protected with RTNL and local->mtx */
|
|
|
|
|
|
|
|
struct ps_data ps;
|
|
|
|
atomic_t num_mcast_sta; /* number of stations receiving multicast */
|
|
|
|
enum ieee80211_smps_mode req_smps, /* requested smps mode */
|
|
|
|
driver_smps_mode; /* smps mode request */
|
|
|
|
|
|
|
|
struct work_struct request_smps_work;
|
|
|
|
bool multicast_to_unicast;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ieee80211_if_wds {
|
|
|
|
struct sta_info *sta;
|
|
|
|
u8 remote_addr[ETH_ALEN];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ieee80211_if_vlan {
|
|
|
|
struct list_head list; /* write-protected with RTNL and local->mtx */
|
|
|
|
|
|
|
|
/* used for all tx if the VLAN is configured to 4-addr mode */
|
|
|
|
struct sta_info __rcu *sta;
|
|
|
|
atomic_t num_mcast_sta; /* number of stations receiving multicast */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mesh_stats {
|
|
|
|
__u32 fwded_mcast; /* Mesh forwarded multicast frames */
|
|
|
|
__u32 fwded_unicast; /* Mesh forwarded unicast frames */
|
|
|
|
__u32 fwded_frames; /* Mesh total forwarded frames */
|
|
|
|
__u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/
|
|
|
|
__u32 dropped_frames_no_route; /* Not transmitted, no route found */
|
|
|
|
__u32 dropped_frames_congestion;/* Not forwarded due to congestion */
|
|
|
|
};
|
|
|
|
|
|
|
|
#define PREQ_Q_F_START 0x1
|
|
|
|
#define PREQ_Q_F_REFRESH 0x2
|
|
|
|
struct mesh_preq_queue {
|
|
|
|
struct list_head list;
|
|
|
|
u8 dst[ETH_ALEN];
|
|
|
|
u8 flags;
|
|
|
|
};
|
|
|
|
|
mac80211: unify SW/offload remain-on-channel
Redesign all the off-channel code, getting rid of
the generic off-channel work concept, replacing
it with a simple remain-on-channel list.
This fixes a number of small issues with the ROC
implementation:
* offloaded remain-on-channel couldn't be queued,
now we can queue it as well, if needed
* in iwlwifi (the only user) offloaded ROC is
mutually exclusive with scanning, use the new
queue to handle that case -- I expect that it
will later depend on a HW flag
The bigger issue though is that there's a bad bug
in the current implementation: if we get a mgmt
TX request while HW roc is active, and this new
request has a wait time, we actually schedule a
software ROC instead since we can't guarantee the
existing offloaded ROC will still be that long.
To fix this, the queuing mechanism was needed.
The queuing mechanism for offloaded ROC isn't yet
optimal, ideally we should add API to have the HW
extend the ROC if needed. We could add that later
but for now use a software implementation.
Overall, this unifies the behaviour between the
offloaded and software-implemented case as much
as possible.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
13 years ago
|
|
|
struct ieee80211_roc_work {
|
|
|
|
struct list_head list;
|
|
|
|
|
|
|
|
struct ieee80211_sub_if_data *sdata;
|
|
|
|
|
|
|
|
struct ieee80211_channel *chan;
|
|
|
|
|
mac80211: unify SW/offload remain-on-channel
Redesign all the off-channel code, getting rid of
the generic off-channel work concept, replacing
it with a simple remain-on-channel list.
This fixes a number of small issues with the ROC
implementation:
* offloaded remain-on-channel couldn't be queued,
now we can queue it as well, if needed
* in iwlwifi (the only user) offloaded ROC is
mutually exclusive with scanning, use the new
queue to handle that case -- I expect that it
will later depend on a HW flag
The bigger issue though is that there's a bad bug
in the current implementation: if we get a mgmt
TX request while HW roc is active, and this new
request has a wait time, we actually schedule a
software ROC instead since we can't guarantee the
existing offloaded ROC will still be that long.
To fix this, the queuing mechanism was needed.
The queuing mechanism for offloaded ROC isn't yet
optimal, ideally we should add API to have the HW
extend the ROC if needed. We could add that later
but for now use a software implementation.
Overall, this unifies the behaviour between the
offloaded and software-implemented case as much
as possible.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
13 years ago
|
|
|
bool started, abort, hw_begun, notified;
|
|
|
|
bool on_channel;
|
|
|
|
|
mac80211: rewrite remain-on-channel logic
Jouni found a bug in the remain-on-channel logic: when a short item
is queued, a long item is combined with it extending the original
one, and then the long item is deleted, the timeout doesn't go back
to the short one, and the short item ends up taking a long time. In
this case, this showed as blocking scan when running two test cases
back to back - the scan from the second was delayed even though all
the remain-on-channel items should long have been gone.
Fixing this with the current data structures turns out to be a bit
complicated, we just remove the long item from the dependents list
right now and don't recalculate the timeouts.
There's a somewhat similar bug where we delete the short item and
all the dependents go with it; to fix this we'd have to move them
from the dependents to the real list.
Instead of trying to do that, rewrite the code to not have all this
complexity in the data structures: use a single list and allow more
than one entry in it being marked as started. This makes the code a
bit more complex, the worker needs to understand that it might need
to just remove one of the started items, while keeping the device
off-channel, but that's not more complicated than the nested data
structures.
This then fixes both issues described, and makes it easier to also
limit the overall off-channel time when combining.
TODO: as before, with hardware remain-on-channel, deleting an item
after combining results in cancelling them all - we can keep track
of the time elapsed and only cancel after that to fix this.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
9 years ago
|
|
|
unsigned long start_time;
|
|
|
|
|
mac80211: unify SW/offload remain-on-channel
Redesign all the off-channel code, getting rid of
the generic off-channel work concept, replacing
it with a simple remain-on-channel list.
This fixes a number of small issues with the ROC
implementation:
* offloaded remain-on-channel couldn't be queued,
now we can queue it as well, if needed
* in iwlwifi (the only user) offloaded ROC is
mutually exclusive with scanning, use the new
queue to handle that case -- I expect that it
will later depend on a HW flag
The bigger issue though is that there's a bad bug
in the current implementation: if we get a mgmt
TX request while HW roc is active, and this new
request has a wait time, we actually schedule a
software ROC instead since we can't guarantee the
existing offloaded ROC will still be that long.
To fix this, the queuing mechanism was needed.
The queuing mechanism for offloaded ROC isn't yet
optimal, ideally we should add API to have the HW
extend the ROC if needed. We could add that later
but for now use a software implementation.
Overall, this unifies the behaviour between the
offloaded and software-implemented case as much
as possible.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
13 years ago
|
|
|
u32 duration, req_duration;
|
|
|
|
struct sk_buff *frame;
|
|
|
|
u64 cookie, mgmt_tx_cookie;
|
|
|
|
enum ieee80211_roc_type type;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* flags used in struct ieee80211_if_managed.flags */
|
|
|
|
enum ieee80211_sta_flags {
|
|
|
|
IEEE80211_STA_CONNECTION_POLL = BIT(1),
|
|
|
|
IEEE80211_STA_CONTROL_PORT = BIT(2),
|
|
|
|
IEEE80211_STA_DISABLE_HT = BIT(4),
|
|
|
|
IEEE80211_STA_MFP_ENABLED = BIT(6),
|
|
|
|
IEEE80211_STA_UAPSD_ENABLED = BIT(7),
|
|
|
|
IEEE80211_STA_NULLFUNC_ACKED = BIT(8),
|
|
|
|
IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9),
|
|
|
|
IEEE80211_STA_DISABLE_40MHZ = BIT(10),
|
|
|
|
IEEE80211_STA_DISABLE_VHT = BIT(11),
|
|
|
|
IEEE80211_STA_DISABLE_80P80MHZ = BIT(12),
|
|
|
|
IEEE80211_STA_DISABLE_160MHZ = BIT(13),
|
|
|
|
IEEE80211_STA_DISABLE_WMM = BIT(14),
|
|
|
|
IEEE80211_STA_ENABLE_RRM = BIT(15),
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ieee80211_mgd_auth_data {
|
|
|
|
struct cfg80211_bss *bss;
|
|
|
|
unsigned long timeout;
|
|
|
|
int tries;
|
|
|
|
u16 algorithm, expected_transaction;
|
|
|
|
|
|
|
|
u8 key[WLAN_KEY_LEN_WEP104];
|
|
|
|
u8 key_len, key_idx;
|
|
|
|
bool done;
|
|
|
|
bool timeout_started;
|
|
|
|
|
|
|
|
u16 sae_trans, sae_status;
|
|
|
|
size_t data_len;
|
|
|
|
u8 data[];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ieee80211_mgd_assoc_data {
|
|
|
|
struct cfg80211_bss *bss;
|
|
|
|
const u8 *supp_rates;
|
|
|
|
|
|
|
|
unsigned long timeout;
|
|
|
|
int tries;
|
|
|
|
|
|
|
|
u16 capability;
|
|
|
|
u8 prev_bssid[ETH_ALEN];
|
|
|
|
u8 ssid[IEEE80211_MAX_SSID_LEN];
|
|
|
|
u8 ssid_len;
|
|
|
|
u8 supp_rates_len;
|
|
|
|
bool wmm, uapsd;
|
|
|
|
bool need_beacon;
|
|
|
|
bool synced;
|
|
|
|
bool timeout_started;
|
|
|
|
|
|
|
|
u8 ap_ht_param;
|
|
|
|
|
|
|
|
struct ieee80211_vht_cap ap_vht_cap;
|
|
|
|
|
|
|
|
u8 fils_nonces[2 * FILS_NONCE_LEN];
|
|
|
|
u8 fils_kek[FILS_MAX_KEK_LEN];
|
|
|
|
size_t fils_kek_len;
|
|
|
|
|
|
|
|
size_t ie_len;
|
|
|
|
u8 ie[];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ieee80211_sta_tx_tspec {
|
|
|
|
/* timestamp of the first packet in the time slice */
|
|
|
|
unsigned long time_slice_start;
|
|
|
|
|
|
|
|
u32 admitted_time; /* in usecs, unlike over the air */
|
|
|
|
u8 tsid;
|
|
|
|
s8 up; /* signed to be able to invalidate with -1 during teardown */
|
|
|
|
|
|
|
|
/* consumed TX time in microseconds in the time slice */
|
|
|
|
u32 consumed_tx_time;
|
|
|
|
enum {
|
|
|
|
TX_TSPEC_ACTION_NONE = 0,
|
|
|
|
TX_TSPEC_ACTION_DOWNGRADE,
|
|
|
|
TX_TSPEC_ACTION_STOP_DOWNGRADE,
|
|
|
|
} action;
|
|
|
|
bool downgraded;
|
|
|
|
};
|
|
|
|
|
|
|
|
DECLARE_EWMA(beacon_signal, 4, 4)
|
|
|
|
|
|
|
|
struct ieee80211_if_managed {
|
|
|
|
struct timer_list timer;
|
|
|
|
struct timer_list conn_mon_timer;
|
|
|
|
struct timer_list bcn_mon_timer;
|
|
|
|
struct timer_list chswitch_timer;
|
|
|
|
struct work_struct monitor_work;
|
|
|
|
struct work_struct chswitch_work;
|
|
|
|
struct work_struct beacon_connection_loss_work;
|
|
|
|
struct work_struct csa_connection_drop_work;
|
|
|
|
|
|
|
|
unsigned long beacon_timeout;
|
|
|
|
unsigned long probe_timeout;
|
|
|
|
int probe_send_count;
|
|
|
|
bool nullfunc_failed;
|
|
|
|
bool connection_loss;
|
|
|
|
|
|
|
|
struct cfg80211_bss *associated;
|
|
|
|
struct ieee80211_mgd_auth_data *auth_data;
|
|
|
|
struct ieee80211_mgd_assoc_data *assoc_data;
|
|
|
|
|
|
|
|
u8 bssid[ETH_ALEN] __aligned(2);
|
|
|
|
|
|
|
|
u16 aid;
|
|
|
|
|
|
|
|
bool powersave; /* powersave requested for this iface */
|
|
|
|
bool broken_ap; /* AP is broken -- turn off powersave */
|
|
|
|
bool have_beacon;
|
|
|
|
u8 dtim_period;
|
|
|
|
enum ieee80211_smps_mode req_smps, /* requested smps mode */
|
|
|
|
driver_smps_mode; /* smps mode request */
|
|
|
|
|
|
|
|
struct work_struct request_smps_work;
|
|
|
|
|
|
|
|
unsigned int flags;
|
|
|
|
|
|
|
|
bool csa_waiting_bcn;
|
|
|
|
bool csa_ignored_same_chan;
|
|
|
|
|
mac80211: Add validity check for beacon_crc value
On association to an AP, after receiving beacons, the beacon_crc value is set.
The beacon_crc value is not reset in disassociation, but the BSS data may be
expired at a later point. When associating again, it's possible that a
beacon for the AP is not received, resulting in the beacon_ies to remain NULL.
After association, further beacons will not update the beacon data, as the
crc value of the beacon has not changed, and the beacon_crc still holds a
value matching the beacon. The beacon_ies will remain forever null.
One of the results of this is that WLAN power save cannot be entered, the STA
will remain foreven in active mode.
Fix this by adding a validation flag for the beacon_crc, which is cleared on
association.
Signed-off-by: Juuso Oikarinen <juuso.oikarinen@nokia.com>
Acked-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
14 years ago
|
|
|
bool beacon_crc_valid;
|
mac80211: implement beacon filtering in software
Regardless of whether the hardware implements beacon filtering,
there's no need to process all beacons in software all the time
throughout the stack (mac80211 does a lot, then cfg80211, then
in the future possibly userspace).
This patch implements the "best possible" beacon filtering in
mac80211. "Best possible" means that it can look for changes in
all requested information elements, and distinguish vendor IEs
by their OUI.
In the future, we will add nl80211 API for userspace to request
information elements and vendor IE OUIs to watch -- drivers can
then implement the best they can do while software implements
it fully.
It is unclear whether or not this actually saves CPU time, but
the data is all in the cache already so it should be fairly
cheap. The additional _testing_, however, has great benefit;
Without this, and on hardware that doesn't implement beacon
filtering, wrong assumptions about, for example, scan result
updates could quickly creep into code.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
16 years ago
|
|
|
u32 beacon_crc;
|
|
|
|
|
|
|
|
bool status_acked;
|
|
|
|
bool status_received;
|
|
|
|
__le16 status_fc;
|
|
|
|
|
|
|
|
enum {
|
|
|
|
IEEE80211_MFP_DISABLED,
|
|
|
|
IEEE80211_MFP_OPTIONAL,
|
|
|
|
IEEE80211_MFP_REQUIRED
|
|
|
|
} mfp; /* management frame protection */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bitmask of enabled u-apsd queues,
|
|
|
|
* IEEE80211_WMM_IE_STA_QOSINFO_AC_BE & co. Needs a new association
|
|
|
|
* to take effect.
|
|
|
|
*/
|
|
|
|
unsigned int uapsd_queues;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Maximum number of buffered frames AP can deliver during a
|
|
|
|
* service period, IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL or similar.
|
|
|
|
* Needs a new association to take effect.
|
|
|
|
*/
|
|
|
|
unsigned int uapsd_max_sp_len;
|
|
|
|
|
|
|
|
int wmm_last_param_set;
|
|
|
|
|
|
|
|
u8 use_4addr;
|
|
|
|
|
|
|
|
s16 p2p_noa_index;
|
|
|
|
|
|
|
|
struct ewma_beacon_signal ave_beacon_signal;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Number of Beacon frames used in ave_beacon_signal. This can be used
|
|
|
|
* to avoid generating less reliable cqm events that would be based
|
|
|
|
* only on couple of received frames.
|
|
|
|
*/
|
|
|
|
unsigned int count_beacon_signal;
|
|
|
|
|
|
|
|
/* Number of times beacon loss was invoked. */
|
|
|
|
unsigned int beacon_loss_count;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Last Beacon frame signal strength average (ave_beacon_signal / 16)
|
|
|
|
* that triggered a cqm event. 0 indicates that no event has been
|
|
|
|
* generated for the current association.
|
|
|
|
*/
|
|
|
|
int last_cqm_event_signal;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* State variables for keeping track of RSSI of the AP currently
|
|
|
|
* connected to and informing driver when RSSI has gone
|
|
|
|
* below/above a certain threshold.
|
|
|
|
*/
|
|
|
|
int rssi_min_thold, rssi_max_thold;
|
|
|
|
int last_ave_beacon_signal;
|
|
|
|
|
|
|
|
struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */
|
|
|
|
struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */
|
|
|
|
struct ieee80211_vht_cap vht_capa; /* configured VHT overrides */
|
|
|
|
struct ieee80211_vht_cap vht_capa_mask; /* Valid parts of vht_capa */
|
|
|
|
|
|
|
|
/* TDLS support */
|
|
|
|
u8 tdls_peer[ETH_ALEN] __aligned(2);
|
|
|
|
struct delayed_work tdls_peer_del_work;
|
|
|
|
struct sk_buff *orig_teardown_skb; /* The original teardown skb */
|
|
|
|
struct sk_buff *teardown_skb; /* A copy to send through the AP */
|
|
|
|
spinlock_t teardown_lock; /* To lock changing teardown_skb */
|
|
|
|
bool tdls_chan_switch_prohibited;
|
|
|
|
bool tdls_wider_bw_prohibited;
|
|
|
|
|
|
|
|
/* WMM-AC TSPEC support */
|
|
|
|
struct ieee80211_sta_tx_tspec tx_tspec[IEEE80211_NUM_ACS];
|
|
|
|
/* Use a separate work struct so that we can do something here
|
|
|
|
* while the sdata->work is flushing the queues, for example.
|
|
|
|
* otherwise, in scenarios where we hardly get any traffic out
|
|
|
|
* on the BE queue, but there's a lot of VO traffic, we might
|
|
|
|
* get stuck in a downgraded situation and flush takes forever.
|
|
|
|
*/
|
|
|
|
struct delayed_work tx_tspec_wk;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ieee80211_if_ibss {
|
|
|
|
struct timer_list timer;
|
|
|
|
struct work_struct csa_connection_drop_work;
|
|
|
|
|
|
|
|
unsigned long last_scan_completed;
|
|
|
|
|
|
|
|
u32 basic_rates;
|
|
|
|
|
|
|
|
bool fixed_bssid;
|
|
|
|
bool fixed_channel;
|
cfg80211: rework key operation
This reworks the key operation in cfg80211, and now only
allows, from userspace, configuring keys (via nl80211)
after the connection has been established (in managed
mode), the IBSS been joined (in IBSS mode), at any time
(in AP[_VLAN] modes) or never for all the other modes.
In order to do shared key authentication correctly, it
is now possible to give a WEP key to the AUTH command.
To configure static WEP keys, these are given to the
CONNECT or IBSS_JOIN command directly, for a userspace
SME it is assumed it will configure it properly after
the connection has been established.
Since mac80211 used to check the default key in IBSS
mode to see whether or not the network is protected,
it needs an update in that area, as well as an update
to make use of the WEP key passed to auth() for shared
key authentication.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
16 years ago
|
|
|
bool privacy;
|
|
|
|
|
|
|
|
bool control_port;
|
|
|
|
bool userspace_handles_dfs;
|
|
|
|
|
|
|
|
u8 bssid[ETH_ALEN] __aligned(2);
|
|
|
|
u8 ssid[IEEE80211_MAX_SSID_LEN];
|
|
|
|
u8 ssid_len, ie_len;
|
|
|
|
u8 *ie;
|
|
|
|
struct cfg80211_chan_def chandef;
|
|
|
|
|
|
|
|
unsigned long ibss_join_req;
|
|
|
|
/* probe response/beacon for IBSS */
|
|
|
|
struct beacon_data __rcu *presp;
|
|
|
|
|
|
|
|
struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */
|
|
|
|
struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */
|
|
|
|
|
|
|
|
spinlock_t incomplete_lock;
|
|
|
|
struct list_head incomplete_stations;
|
|
|
|
|
|
|
|
enum {
|
|
|
|
IEEE80211_IBSS_MLME_SEARCH,
|
|
|
|
IEEE80211_IBSS_MLME_JOINED,
|
|
|
|
} state;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct ieee80211_if_ocb - OCB mode state
|
|
|
|
*
|
|
|
|
* @housekeeping_timer: timer for periodic invocation of a housekeeping task
|
|
|
|
* @wrkq_flags: OCB deferred task action
|
|
|
|
* @incomplete_lock: delayed STA insertion lock
|
|
|
|
* @incomplete_stations: list of STAs waiting for delayed insertion
|
|
|
|
* @joined: indication if the interface is connected to an OCB network
|
|
|
|
*/
|
|
|
|
struct ieee80211_if_ocb {
|
|
|
|
struct timer_list housekeeping_timer;
|
|
|
|
unsigned long wrkq_flags;
|
|
|
|
|
|
|
|
spinlock_t incomplete_lock;
|
|
|
|
struct list_head incomplete_stations;
|
|
|
|
|
|
|
|
bool joined;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct ieee80211_mesh_sync_ops - Extensible synchronization framework interface
|
|
|
|
*
|
|
|
|
* these declarations define the interface, which enables
|
|
|
|
* vendor-specific mesh synchronization
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
struct ieee802_11_elems;
|
|
|
|
struct ieee80211_mesh_sync_ops {
|
|
|
|
void (*rx_bcn_presp)(struct ieee80211_sub_if_data *sdata,
|
|
|
|
u16 stype,
|
|
|
|
struct ieee80211_mgmt *mgmt,
|
|
|
|
struct ieee802_11_elems *elems,
|
|
|
|
struct ieee80211_rx_status *rx_status);
|
|
|
|
|
|
|
|
/* should be called with beacon_data under RCU read lock */
|
|
|
|
void (*adjust_tsf)(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct beacon_data *beacon);
|
|
|
|
/* add other framework functions here */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mesh_csa_settings {
|
|
|
|
struct rcu_head rcu_head;
|
|
|
|
struct cfg80211_csa_settings settings;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ieee80211_if_mesh {
|
|
|
|
struct timer_list housekeeping_timer;
|
|
|
|
struct timer_list mesh_path_timer;
|
|
|
|
struct timer_list mesh_path_root_timer;
|
|
|
|
|
|
|
|
unsigned long wrkq_flags;
|
|
|
|
unsigned long mbss_changed;
|
|
|
|
|
|
|
|
bool userspace_handles_dfs;
|
|
|
|
|
|
|
|
u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
|
|
|
|
size_t mesh_id_len;
|
|
|
|
/* Active Path Selection Protocol Identifier */
|
|
|
|
u8 mesh_pp_id;
|
|
|
|
/* Active Path Selection Metric Identifier */
|
|
|
|
u8 mesh_pm_id;
|
|
|
|
/* Congestion Control Mode Identifier */
|
|
|
|
u8 mesh_cc_id;
|
|
|
|
/* Synchronization Protocol Identifier */
|
|
|
|
u8 mesh_sp_id;
|
|
|
|
/* Authentication Protocol Identifier */
|
|
|
|
u8 mesh_auth_id;
|
|
|
|
/* Local mesh Sequence Number */
|
|
|
|
u32 sn;
|
|
|
|
/* Last used PREQ ID */
|
|
|
|
u32 preq_id;
|
|
|
|
atomic_t mpaths;
|
|
|
|
/* Timestamp of last SN update */
|
|
|
|
unsigned long last_sn_update;
|
|
|
|
/* Time when it's ok to send next PERR */
|
|
|
|
unsigned long next_perr;
|
|
|
|
/* Timestamp of last PREQ sent */
|
|
|
|
unsigned long last_preq;
|
|
|
|
struct mesh_rmc *rmc;
|
|
|
|
spinlock_t mesh_preq_queue_lock;
|
|
|
|
struct mesh_preq_queue preq_queue;
|
|
|
|
int preq_queue_len;
|
|
|
|
struct mesh_stats mshstats;
|
|
|
|
struct mesh_config mshcfg;
|
|
|
|
atomic_t estab_plinks;
|
|
|
|
u32 mesh_seqnum;
|
|
|
|
bool accepting_plinks;
|
|
|
|
int num_gates;
|
|
|
|
struct beacon_data __rcu *beacon;
|
|
|
|
const u8 *ie;
|
|
|
|
u8 ie_len;
|
|
|
|
enum {
|
|
|
|
IEEE80211_MESH_SEC_NONE = 0x0,
|
|
|
|
IEEE80211_MESH_SEC_AUTHED = 0x1,
|
|
|
|
IEEE80211_MESH_SEC_SECURED = 0x2,
|
|
|
|
} security;
|
|
|
|
bool user_mpm;
|
|
|
|
/* Extensible Synchronization Framework */
|
|
|
|
const struct ieee80211_mesh_sync_ops *sync_ops;
|
|
|
|
s64 sync_offset_clockdrift_max;
|
|
|
|
spinlock_t sync_offset_lock;
|
|
|
|
/* mesh power save */
|
|
|
|
enum nl80211_mesh_power_mode nonpeer_pm;
|
|
|
|
int ps_peers_light_sleep;
|
|
|
|
int ps_peers_deep_sleep;
|
|
|
|
struct ps_data ps;
|
|
|
|
/* Channel Switching Support */
|
|
|
|
struct mesh_csa_settings __rcu *csa;
|
|
|
|
enum {
|
|
|
|
IEEE80211_MESH_CSA_ROLE_NONE,
|
|
|
|
IEEE80211_MESH_CSA_ROLE_INIT,
|
|
|
|
IEEE80211_MESH_CSA_ROLE_REPEATER,
|
|
|
|
} csa_role;
|
|
|
|
u8 chsw_ttl;
|
|
|
|
u16 pre_value;
|
|
|
|
|
|
|
|
/* offset from skb->data while building IE */
|
|
|
|
int meshconf_offset;
|
|
|
|
|
|
|
|
struct mesh_table *mesh_paths;
|
|
|
|
struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
|
|
|
|
int mesh_paths_generation;
|
|
|
|
int mpp_paths_generation;
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CONFIG_MAC80211_MESH
|
|
|
|
#define IEEE80211_IFSTA_MESH_CTR_INC(msh, name) \
|
|
|
|
do { (msh)->mshstats.name++; } while (0)
|
|
|
|
#else
|
|
|
|
#define IEEE80211_IFSTA_MESH_CTR_INC(msh, name) \
|
|
|
|
do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/**
|
|
|
|
* enum ieee80211_sub_if_data_flags - virtual interface flags
|
|
|
|
*
|
|
|
|
* @IEEE80211_SDATA_ALLMULTI: interface wants all multicast packets
|
|
|
|
* @IEEE80211_SDATA_OPERATING_GMODE: operating in G-only mode
|
|
|
|
* @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between
|
|
|
|
* associated stations and deliver multicast frames both
|
|
|
|
* back to wireless media and to the local net stack.
|
|
|
|
* @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume.
|
|
|
|
* @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver
|
|
|
|
*/
|
|
|
|
enum ieee80211_sub_if_data_flags {
|
|
|
|
IEEE80211_SDATA_ALLMULTI = BIT(0),
|
|
|
|
IEEE80211_SDATA_OPERATING_GMODE = BIT(2),
|
|
|
|
IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3),
|
|
|
|
IEEE80211_SDATA_DISCONNECT_RESUME = BIT(4),
|
|
|
|
IEEE80211_SDATA_IN_DRIVER = BIT(5),
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* enum ieee80211_sdata_state_bits - virtual interface state bits
|
|
|
|
* @SDATA_STATE_RUNNING: virtual interface is up & running; this
|
|
|
|
* mirrors netif_running() but is separate for interface type
|
|
|
|
* change handling while the interface is up
|
|
|
|
* @SDATA_STATE_OFFCHANNEL: This interface is currently in offchannel
|
|
|
|
* mode, so queues are stopped
|
|
|
|
* @SDATA_STATE_OFFCHANNEL_BEACON_STOPPED: Beaconing was stopped due
|
|
|
|
* to offchannel, reset when offchannel returns
|
|
|
|
*/
|
|
|
|
enum ieee80211_sdata_state_bits {
|
|
|
|
SDATA_STATE_RUNNING,
|
|
|
|
SDATA_STATE_OFFCHANNEL,
|
|
|
|
SDATA_STATE_OFFCHANNEL_BEACON_STOPPED,
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* enum ieee80211_chanctx_mode - channel context configuration mode
|
|
|
|
*
|
|
|
|
* @IEEE80211_CHANCTX_SHARED: channel context may be used by
|
|
|
|
* multiple interfaces
|
|
|
|
* @IEEE80211_CHANCTX_EXCLUSIVE: channel context can be used
|
|
|
|
* only by a single interface. This can be used for example for
|
|
|
|
* non-fixed channel IBSS.
|
|
|
|
*/
|
|
|
|
enum ieee80211_chanctx_mode {
|
|
|
|
IEEE80211_CHANCTX_SHARED,
|
|
|
|
IEEE80211_CHANCTX_EXCLUSIVE
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* enum ieee80211_chanctx_replace_state - channel context replacement state
|
|
|
|
*
|
|
|
|
* This is used for channel context in-place reservations that require channel
|
|
|
|
* context switch/swap.
|
|
|
|
*
|
|
|
|
* @IEEE80211_CHANCTX_REPLACE_NONE: no replacement is taking place
|
|
|
|
* @IEEE80211_CHANCTX_WILL_BE_REPLACED: this channel context will be replaced
|
|
|
|
* by a (not yet registered) channel context pointed by %replace_ctx.
|
|
|
|
* @IEEE80211_CHANCTX_REPLACES_OTHER: this (not yet registered) channel context
|
|
|
|
* replaces an existing channel context pointed to by %replace_ctx.
|
|
|
|
*/
|
|
|
|
enum ieee80211_chanctx_replace_state {
|
|
|
|
IEEE80211_CHANCTX_REPLACE_NONE,
|
|
|
|
IEEE80211_CHANCTX_WILL_BE_REPLACED,
|
|
|
|
IEEE80211_CHANCTX_REPLACES_OTHER,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ieee80211_chanctx {
|
|
|
|
struct list_head list;
|
|
|
|
struct rcu_head rcu_head;
|
|
|
|
|
|
|
|
struct list_head assigned_vifs;
|
|
|
|
struct list_head reserved_vifs;
|
|
|
|
|
|
|
|
enum ieee80211_chanctx_replace_state replace_state;
|
|
|
|
struct ieee80211_chanctx *replace_ctx;
|
|
|
|
|
|
|
|
enum ieee80211_chanctx_mode mode;
|
|
|
|
bool driver_present;
|
|
|
|
|
|
|
|
struct ieee80211_chanctx_conf conf;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mac80211_qos_map {
|
|
|
|
struct cfg80211_qos_map qos_map;
|
|
|
|
struct rcu_head rcu_head;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum txq_info_flags {
|
|
|
|
IEEE80211_TXQ_STOP,
|
|
|
|
IEEE80211_TXQ_AMPDU,
|
|
|
|
IEEE80211_TXQ_NO_AMSDU,
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct txq_info - per tid queue
|
|
|
|
*
|
|
|
|
* @tin: contains packets split into multiple flows
|
|
|
|
* @def_flow: used as a fallback flow when a packet destined to @tin hashes to
|
|
|
|
* a fq_flow which is already owned by a different tin
|
|
|
|
* @def_cvars: codel vars for @def_flow
|
|
|
|
* @frags: used to keep fragments created after dequeue
|
|
|
|
*/
|
|
|
|
struct txq_info {
|
|
|
|
struct fq_tin tin;
|
|
|
|
struct fq_flow def_flow;
|
|
|
|
struct codel_vars def_cvars;
|
|
|
|
struct codel_stats cstats;
|
|
|
|
struct sk_buff_head frags;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* keep last! */
|
|
|
|
struct ieee80211_txq txq;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ieee80211_if_mntr {
|
|
|
|
u32 flags;
|
|
|
|
u8 mu_follow_addr[ETH_ALEN] __aligned(2);
|
|
|
|
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct ieee80211_if_nan - NAN state
|
|
|
|
*
|
|
|
|
* @conf: current NAN configuration
|
|
|
|
* @func_ids: a bitmap of available instance_id's
|
|
|
|
*/
|
|
|
|
struct ieee80211_if_nan {
|
|
|
|
struct cfg80211_nan_conf conf;
|
|
|
|
|
|
|
|
/* protects function_inst_ids */
|
|
|
|
spinlock_t func_lock;
|
|
|
|
struct idr function_inst_ids;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ieee80211_sub_if_data {
|
|
|
|
struct list_head list;
|
|
|
|
|
|
|
|
struct wireless_dev wdev;
|
|
|
|
|
|
|
|
/* keys */
|
|
|
|
struct list_head key_list;
|
|
|
|
|
|
|
|
/* count for keys needing tailroom space allocation */
|
|
|
|
int crypto_tx_tailroom_needed_cnt;
|
mac80211: defer tailroom counter manipulation when roaming
During roaming, the crypto_tx_tailroom_needed_cnt counter
will often take values 2,1,0,1,2 because first keys are
removed and then new keys are added. This is inefficient
because during the 0->1 transition, synchronize_net must
be called to avoid packet races, although typically no
packets would be flowing during that time.
To avoid that, defer the decrement (2->1, 1->0) when keys
are removed (by half a second). This means the counter
will really have the values 2,2,2,3,4 ... 2, thus never
reaching 0 and having to do the 0->1 transition.
Note that this patch entirely disregards the drivers for
which this optimisation was done to start with, for them
the key removal itself will be expensive because it has
to synchronize_net() after the counter is incremented to
remove the key from HW crypto. For them the sequence will
look like this: 0,1,0,1,0,1,0,1,0 (*) which is clearly a
lot more inefficient. This could be addressed separately,
during key removal the 0->1->0 sequence isn't necessary.
(*) it starts at 0 because HW crypto is on, then goes to
1 when HW crypto is disabled for a key, then back to
0 because the key is deleted; this happens for both
keys in the example. When new keys are added, it goes
to 1 first because they're added in software; when a
key is moved to hardware it goes back to 0
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
12 years ago
|
|
|
int crypto_tx_tailroom_pending_dec;
|
|
|
|
struct delayed_work dec_tailroom_needed_wk;
|
|
|
|
|
|
|
|
struct net_device *dev;
|
|
|
|
struct ieee80211_local *local;
|
|
|
|
|
|
|
|
unsigned int flags;
|
|
|
|
|
|
|
|
unsigned long state;
|
|
|
|
|
|
|
|
char name[IFNAMSIZ];
|
|
|
|
|
|
|
|
/* Fragment table for host-based reassembly */
|
|
|
|
struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
|
|
|
|
unsigned int fragment_next;
|
|
|
|
|
|
|
|
/* TID bitmap for NoAck policy */
|
|
|
|
u16 noack_map;
|
|
|
|
|
|
|
|
/* bit field of ACM bits (BIT(802.1D tag)) */
|
|
|
|
u8 wmm_acm;
|
|
|
|
|
|
|
|
struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
|
|
|
|
struct ieee80211_key __rcu *default_unicast_key;
|
|
|
|
struct ieee80211_key __rcu *default_multicast_key;
|
|
|
|
struct ieee80211_key __rcu *default_mgmt_key;
|
|
|
|
|
|
|
|
u16 sequence_number;
|
|
|
|
__be16 control_port_protocol;
|
|
|
|
bool control_port_no_encrypt;
|
|
|
|
int encrypt_headroom;
|
|
|
|
|
|
|
|
atomic_t num_tx_queued;
|
|
|
|
struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
|
|
|
|
struct mac80211_qos_map __rcu *qos_map;
|
|
|
|
|
|
|
|
struct work_struct csa_finalize_work;
|
|
|
|
bool csa_block_tx; /* write-protected by sdata_lock and local->mtx */
|
|
|
|
struct cfg80211_chan_def csa_chandef;
|
|
|
|
|
|
|
|
struct list_head assigned_chanctx_list; /* protected by chanctx_mtx */
|
|
|
|
struct list_head reserved_chanctx_list; /* protected by chanctx_mtx */
|
|
|
|
|
|
|
|
/* context reservation -- protected with chanctx_mtx */
|
|
|
|
struct ieee80211_chanctx *reserved_chanctx;
|
|
|
|
struct cfg80211_chan_def reserved_chandef;
|
|
|
|
bool reserved_radar_required;
|
|
|
|
bool reserved_ready;
|
|
|
|
|
|
|
|
/* used to reconfigure hardware SM PS */
|
|
|
|
struct work_struct recalc_smps;
|
|
|
|
|
|
|
|
struct work_struct work;
|
|
|
|
struct sk_buff_head skb_queue;
|
|
|
|
|
|
|
|
u8 needed_rx_chains;
|
|
|
|
enum ieee80211_smps_mode smps_mode;
|
|
|
|
|
|
|
|
int user_power_level; /* in dBm */
|
|
|
|
int ap_power_level; /* in dBm */
|
|
|
|
|
|
|
|
bool radar_required;
|
|
|
|
struct delayed_work dfs_cac_timer_work;
|
|
|
|
|
|
|
|
/*
|
mac80211: make master netdev handling sane
Currently, almost every interface type has a 'bss' pointer
pointing to BSS information. This BSS information, however,
is for a _local_ BSS, not for the BSS we joined, so having
it on a STA mode interface makes little sense, but now they
have it pointing to the master device, which is an AP mode
virtual interface. However, except for some bitrate control
data, this pointer is only used in AP/VLAN modes (for power
saving stations.)
Overall, it is not necessary to even have the master netdev
be a valid virtual interface, and it doesn't have to be on
the list of interfaces either.
This patch changes the master netdev to be special, it now
- no longer is on the list of virtual interfaces, which
lets me remove a lot of tests for that
- no longer has sub_if_data attached, since that isn't used
Additionally, this patch changes some vlan/ap mode handling
that is related to these 'bss' pointers described above (but
in the VLAN case they actually make sense because there they
point to the AP they belong to); it also adds some debugging
code to IEEE80211_DEV_TO_SUB_IF to validate it is not called
on the master netdev any more.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
17 years ago
|
|
|
* AP this belongs to: self in AP mode and
|
|
|
|
* corresponding AP in VLAN mode, NULL for
|
|
|
|
* all others (might be needed later in IBSS)
|
|
|
|
*/
|
mac80211: make master netdev handling sane
Currently, almost every interface type has a 'bss' pointer
pointing to BSS information. This BSS information, however,
is for a _local_ BSS, not for the BSS we joined, so having
it on a STA mode interface makes little sense, but now they
have it pointing to the master device, which is an AP mode
virtual interface. However, except for some bitrate control
data, this pointer is only used in AP/VLAN modes (for power
saving stations.)
Overall, it is not necessary to even have the master netdev
be a valid virtual interface, and it doesn't have to be on
the list of interfaces either.
This patch changes the master netdev to be special, it now
- no longer is on the list of virtual interfaces, which
lets me remove a lot of tests for that
- no longer has sub_if_data attached, since that isn't used
Additionally, this patch changes some vlan/ap mode handling
that is related to these 'bss' pointers described above (but
in the VLAN case they actually make sense because there they
point to the AP they belong to); it also adds some debugging
code to IEEE80211_DEV_TO_SUB_IF to validate it is not called
on the master netdev any more.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
17 years ago
|
|
|
struct ieee80211_if_ap *bss;
|
|
|
|
|
|
|
|
/* bitmap of allowed (non-MCS) rate indexes for rate control */
|
|
|
|
u32 rc_rateidx_mask[NUM_NL80211_BANDS];
|
|
|
|
|
|
|
|
bool rc_has_mcs_mask[NUM_NL80211_BANDS];
|
|
|
|
u8 rc_rateidx_mcs_mask[NUM_NL80211_BANDS][IEEE80211_HT_MCS_MASK_LEN];
|
|
|
|
|
|
|
|
bool rc_has_vht_mcs_mask[NUM_NL80211_BANDS];
|
|
|
|
u16 rc_rateidx_vht_mcs_mask[NUM_NL80211_BANDS][NL80211_VHT_NSS_MAX];
|
|
|
|
|
|
|
|
union {
|
|
|
|
struct ieee80211_if_ap ap;
|
|
|
|
struct ieee80211_if_wds wds;
|
|
|
|
struct ieee80211_if_vlan vlan;
|
|
|
|
struct ieee80211_if_managed mgd;
|
|
|
|
struct ieee80211_if_ibss ibss;
|
|
|
|
struct ieee80211_if_mesh mesh;
|
|
|
|
struct ieee80211_if_ocb ocb;
|
|
|
|
struct ieee80211_if_mntr mntr;
|
|
|
|
struct ieee80211_if_nan nan;
|
|
|
|
} u;
|
|
|
|
|
|
|
|
#ifdef CONFIG_MAC80211_DEBUGFS
|
|
|
|
struct {
|
|
|
|
struct dentry *subdir_stations;
|
|
|
|
struct dentry *default_unicast_key;
|
|
|
|
struct dentry *default_multicast_key;
|
|
|
|
struct dentry *default_mgmt_key;
|
|
|
|
} debugfs;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* must be last, dynamically sized area in this! */
|
|
|
|
struct ieee80211_vif vif;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline
|
|
|
|
struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
|
|
|
|
{
|
|
|
|
return container_of(p, struct ieee80211_sub_if_data, vif);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void sdata_lock(struct ieee80211_sub_if_data *sdata)
|
|
|
|
__acquires(&sdata->wdev.mtx)
|
|
|
|
{
|
|
|
|
mutex_lock(&sdata->wdev.mtx);
|
|
|
|
__acquire(&sdata->wdev.mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void sdata_unlock(struct ieee80211_sub_if_data *sdata)
|
|
|
|
__releases(&sdata->wdev.mtx)
|
|
|
|
{
|
|
|
|
mutex_unlock(&sdata->wdev.mtx);
|
|
|
|
__release(&sdata->wdev.mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define sdata_dereference(p, sdata) \
|
|
|
|
rcu_dereference_protected(p, lockdep_is_held(&sdata->wdev.mtx))
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
sdata_assert_lock(struct ieee80211_sub_if_data *sdata)
|
|
|
|
{
|
|
|
|
lockdep_assert_held(&sdata->wdev.mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
ieee80211_chandef_get_shift(struct cfg80211_chan_def *chandef)
|
|
|
|
{
|
|
|
|
switch (chandef->width) {
|
|
|
|
case NL80211_CHAN_WIDTH_5:
|
|
|
|
return 2;
|
|
|
|
case NL80211_CHAN_WIDTH_10:
|
|
|
|
return 1;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
ieee80211_vif_get_shift(struct ieee80211_vif *vif)
|
|
|
|
{
|
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf;
|
|
|
|
int shift = 0;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
chanctx_conf = rcu_dereference(vif->chanctx_conf);
|
|
|
|
if (chanctx_conf)
|
|
|
|
shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return shift;
|
|
|
|
}
|
|
|
|
|
|
|
|
enum {
|
|
|
|
IEEE80211_RX_MSG = 1,
|
|
|
|
IEEE80211_TX_STATUS_MSG = 2,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum queue_stop_reason {
|
|
|
|
IEEE80211_QUEUE_STOP_REASON_DRIVER,
|
|
|
|
IEEE80211_QUEUE_STOP_REASON_PS,
|
mac80211: fix aggregation for hardware with ampdu queues
Hardware with AMPDU queues currently has broken aggregation.
This patch fixes it by making all A-MPDUs go over the regular AC queues,
but keeping track of the hardware queues in mac80211. As a first rough
version, it actually stops the AC queue for extended periods of time,
which can be removed by adding buffering internal to mac80211, but is
currently not a huge problem because people rarely use multiple TIDs
that are in the same AC (and iwlwifi currently doesn't operate as AP).
This is a short-term fix, my current medium-term plan, which I hope to
execute soon as well, but am not sure can finish before .30, looks like
this:
1) rework the internal queuing layer in mac80211 that we use for
fragments if the driver stopped queue in the middle of a fragmented
frame to be able to queue more frames at once (rather than just a
single frame with its fragments)
2) instead of stopping the entire AC queue, queue up the frames in a
per-station/per-TID queue during aggregation session initiation,
when the session has come up take all those frames and put them
onto the queue from 1)
3) push the ampdu queue layer abstraction this patch introduces in
mac80211 into the driver, and remove the virtual queue stuff from
mac80211 again
This plan will probably also affect ath9k in that mac80211 queues the
frames instead of passing them down, even when there are no ampdu queues.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
16 years ago
|
|
|
IEEE80211_QUEUE_STOP_REASON_CSA,
|
|
|
|
IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
|
|
|
|
IEEE80211_QUEUE_STOP_REASON_SUSPEND,
|
|
|
|
IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
|
|
|
|
IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
|
|
|
|
IEEE80211_QUEUE_STOP_REASON_FLUSH,
|
|
|
|
IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN,
|
|
|
|
IEEE80211_QUEUE_STOP_REASON_RESERVE_TID,
|
|
|
|
|
|
|
|
IEEE80211_QUEUE_STOP_REASONS,
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CONFIG_MAC80211_LEDS
|
|
|
|
struct tpt_led_trigger {
|
|
|
|
char name[32];
|
|
|
|
const struct ieee80211_tpt_blink *blink_table;
|
|
|
|
unsigned int blink_table_len;
|
|
|
|
struct timer_list timer;
|
|
|
|
unsigned long prev_traffic;
|
|
|
|
unsigned long tx_bytes, rx_bytes;
|
|
|
|
unsigned int active, want;
|
|
|
|
bool running;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/**
|
|
|
|
* mac80211 scan flags - currently active scan mode
|
|
|
|
*
|
|
|
|
* @SCAN_SW_SCANNING: We're currently in the process of scanning but may as
|
|
|
|
* well be on the operating channel
|
|
|
|
* @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to
|
|
|
|
* determine if we are on the operating channel or not
|
|
|
|
* @SCAN_ONCHANNEL_SCANNING: Do a software scan on only the current operating
|
|
|
|
* channel. This should not interrupt normal traffic.
|
|
|
|
* @SCAN_COMPLETED: Set for our scan work function when the driver reported
|
|
|
|
* that the scan completed.
|
|
|
|
* @SCAN_ABORTED: Set for our scan work function when the driver reported
|
|
|
|
* a scan complete for an aborted scan.
|
|
|
|
* @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
|
|
|
|
* cancelled.
|
|
|
|
*/
|
|
|
|
enum {
|
|
|
|
SCAN_SW_SCANNING,
|
|
|
|
SCAN_HW_SCANNING,
|
|
|
|
SCAN_ONCHANNEL_SCANNING,
|
|
|
|
SCAN_COMPLETED,
|
|
|
|
SCAN_ABORTED,
|
|
|
|
SCAN_HW_CANCELLED,
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* enum mac80211_scan_state - scan state machine states
|
|
|
|
*
|
|
|
|
* @SCAN_DECISION: Main entry point to the scan state machine, this state
|
|
|
|
* determines if we should keep on scanning or switch back to the
|
|
|
|
* operating channel
|
|
|
|
* @SCAN_SET_CHANNEL: Set the next channel to be scanned
|
|
|
|
* @SCAN_SEND_PROBE: Send probe requests and wait for probe responses
|
|
|
|
* @SCAN_SUSPEND: Suspend the scan and go back to operating channel to
|
|
|
|
* send out data
|
|
|
|
* @SCAN_RESUME: Resume the scan and scan the next channel
|
|
|
|
* @SCAN_ABORT: Abort the scan and go back to operating channel
|
|
|
|
*/
|
|
|
|
enum mac80211_scan_state {
|
|
|
|
SCAN_DECISION,
|
|
|
|
SCAN_SET_CHANNEL,
|
|
|
|
SCAN_SEND_PROBE,
|
|
|
|
SCAN_SUSPEND,
|
|
|
|
SCAN_RESUME,
|
|
|
|
SCAN_ABORT,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ieee80211_local {
|
|
|
|
/* embed the driver visible part.
|
|
|
|
* don't cast (use the static inlines below), but we keep
|
|
|
|
* it first anyway so they become a no-op */
|
|
|
|
struct ieee80211_hw hw;
|
|
|
|
|
|
|
|
struct fq fq;
|
|
|
|
struct codel_vars *cvars;
|
|
|
|
struct codel_params cparams;
|
|
|
|
|
|
|
|
const struct ieee80211_ops *ops;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* private workqueue to mac80211. mac80211 makes this accessible
|
|
|
|
* via ieee80211_queue_work()
|
|
|
|
*/
|
|
|
|
struct workqueue_struct *workqueue;
|
|
|
|
|
|
|
|
unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES];
|
|
|
|
int q_stop_reasons[IEEE80211_MAX_QUEUES][IEEE80211_QUEUE_STOP_REASONS];
|
mac80211: fix aggregation for hardware with ampdu queues
Hardware with AMPDU queues currently has broken aggregation.
This patch fixes it by making all A-MPDUs go over the regular AC queues,
but keeping track of the hardware queues in mac80211. As a first rough
version, it actually stops the AC queue for extended periods of time,
which can be removed by adding buffering internal to mac80211, but is
currently not a huge problem because people rarely use multiple TIDs
that are in the same AC (and iwlwifi currently doesn't operate as AP).
This is a short-term fix, my current medium-term plan, which I hope to
execute soon as well, but am not sure can finish before .30, looks like
this:
1) rework the internal queuing layer in mac80211 that we use for
fragments if the driver stopped queue in the middle of a fragmented
frame to be able to queue more frames at once (rather than just a
single frame with its fragments)
2) instead of stopping the entire AC queue, queue up the frames in a
per-station/per-TID queue during aggregation session initiation,
when the session has come up take all those frames and put them
onto the queue from 1)
3) push the ampdu queue layer abstraction this patch introduces in
mac80211 into the driver, and remove the virtual queue stuff from
mac80211 again
This plan will probably also affect ath9k in that mac80211 queues the
frames instead of passing them down, even when there are no ampdu queues.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
16 years ago
|
|
|
/* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
|
|
|
|
spinlock_t queue_stop_reason_lock;
|
mac80211: fix aggregation for hardware with ampdu queues
Hardware with AMPDU queues currently has broken aggregation.
This patch fixes it by making all A-MPDUs go over the regular AC queues,
but keeping track of the hardware queues in mac80211. As a first rough
version, it actually stops the AC queue for extended periods of time,
which can be removed by adding buffering internal to mac80211, but is
currently not a huge problem because people rarely use multiple TIDs
that are in the same AC (and iwlwifi currently doesn't operate as AP).
This is a short-term fix, my current medium-term plan, which I hope to
execute soon as well, but am not sure can finish before .30, looks like
this:
1) rework the internal queuing layer in mac80211 that we use for
fragments if the driver stopped queue in the middle of a fragmented
frame to be able to queue more frames at once (rather than just a
single frame with its fragments)
2) instead of stopping the entire AC queue, queue up the frames in a
per-station/per-TID queue during aggregation session initiation,
when the session has come up take all those frames and put them
onto the queue from 1)
3) push the ampdu queue layer abstraction this patch introduces in
mac80211 into the driver, and remove the virtual queue stuff from
mac80211 again
This plan will probably also affect ath9k in that mac80211 queues the
frames instead of passing them down, even when there are no ampdu queues.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
16 years ago
|
|
|
|
|
|
|
int open_count;
|
|
|
|
int monitors, cooked_mntrs;
|
|
|
|
/* number of interfaces with corresponding FIF_ flags */
|
|
|
|
int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
|
|
|
|
fif_probe_req;
|
|
|
|
int probe_req_reg;
|
[PATCH] mac80211: revamp interface and filter configuration
Drivers are currently supposed to keep track of monitor
interfaces if they allow so-called "hard" monitor, and
they are also supposed to keep track of multicast etc.
This patch changes that, replaces the set_multicast_list()
callback with a new configure_filter() callback that takes
filter flags (FIF_*) instead of interface flags (IFF_*).
For a driver, this means it should open the filter as much
as necessary to get all frames requested by the filter flags.
Accordingly, the filter flags are named "positively", e.g.
FIF_ALLMULTI.
Multicast filtering is a bit special in that drivers that
have no multicast address filters need to allow multicast
frames through when either the FIF_ALLMULTI flag is set or
when the mc_count value is positive.
At the same time, drivers are no longer notified about
monitor interfaces at all, this means they now need to
implement the start() and stop() callbacks and the new
change_filter_flags() callback. Also, the start()/stop()
ordering changed, start() is now called *before* any
add_interface() as it really should be, and stop() after
any remove_interface().
The patch also changes the behaviour of setting the bssid
to multicast for scanning when IEEE80211_HW_NO_PROBE_FILTERING
is set; the IEEE80211_HW_NO_PROBE_FILTERING flag is removed
and the filter flag FIF_BCN_PRBRESP_PROMISC introduced.
This is a lot more efficient for hardware like b43 that
supports it and other hardware can still set the BSSID
to all-ones.
Driver modifications by Johannes Berg (b43 & iwlwifi), Michael Wu
(rtl8187, adm8211, and p54), Larry Finger (b43legacy), and
Ivo van Doorn (rt2x00).
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: Michael Wu <flamingice@sourmilk.net>
Signed-off-by: Larry Finger <Larry.Finger@lwfinger.net>
Signed-off-by: Ivo van Doorn <IvDoorn@gmail.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
18 years ago
|
|
|
unsigned int filter_flags; /* FIF_* */
|
|
|
|
|
|
|
|
bool wiphy_ciphers_allocated;
|
|
|
|
|
|
|
|
bool use_chanctx;
|
|
|
|
|
|
|
|
/* protects the aggregated multicast list and filter calls */
|
|
|
|
spinlock_t filter_lock;
|
|
|
|
|
|
|
|
/* used for uploading changed mc list */
|
|
|
|
struct work_struct reconfig_filter;
|
|
|
|
|
|
|
|
/* aggregated multicast list */
|
|
|
|
struct netdev_hw_addr_list mc_list;
|
|
|
|
|
|
|
|
bool tim_in_locked_section; /* see ieee80211_beacon_get() */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* suspended is true if we finished all the suspend _and_ we have
|
|
|
|
* not yet come up from resume. This is to be used by mac80211
|
|
|
|
* to ensure driver sanity during suspend and mac80211's own
|
|
|
|
* sanity. It can eventually be used for WoW as well.
|
|
|
|
*/
|
|
|
|
bool suspended;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Resuming is true while suspended, but when we're reprogramming the
|
|
|
|
* hardware -- at that time it's allowed to use ieee80211_queue_work()
|
|
|
|
* again even though some other parts of the stack are still suspended
|
|
|
|
* and we still drop received frames to avoid waking the stack.
|
|
|
|
*/
|
|
|
|
bool resuming;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* quiescing is true during the suspend process _only_ to
|
|
|
|
* ease timer cancelling etc.
|
|
|
|
*/
|
|
|
|
bool quiescing;
|
|
|
|
|
|
|
|
/* device is started */
|
|
|
|
bool started;
|
|
|
|
|
|
|
|
/* device is during a HW reconfig */
|
|
|
|
bool in_reconfig;
|
|
|
|
|
|
|
|
/* wowlan is enabled -- don't reconfig on resume */
|
|
|
|
bool wowlan;
|
|
|
|
|
|
|
|
struct work_struct radar_detected_work;
|
|
|
|
|
|
|
|
/* number of RX chains the hardware has */
|
|
|
|
u8 rx_chains;
|
|
|
|
|
|
|
|
int tx_headroom; /* required headroom for hardware/radiotap */
|
|
|
|
|
|
|
|
/* Tasklet and skb queue to process calls from IRQ mode. All frames
|
|
|
|
* added to skb_queue will be processed, but frames in
|
|
|
|
* skb_queue_unreliable may be dropped if the total length of these
|
|
|
|
* queues increases over the limit. */
|
|
|
|
#define IEEE80211_IRQSAFE_QUEUE_LIMIT 128
|
|
|
|
struct tasklet_struct tasklet;
|
|
|
|
struct sk_buff_head skb_queue;
|
|
|
|
struct sk_buff_head skb_queue_unreliable;
|
|
|
|
|
|
|
|
spinlock_t rx_path_lock;
|
|
|
|
|
|
|
|
/* Station data */
|
|
|
|
/*
|
|
|
|
* The mutex only protects the list, hash table and
|
|
|
|
* counter, reads are done with RCU.
|
|
|
|
*/
|
|
|
|
struct mutex sta_mtx;
|
|
|
|
spinlock_t tim_lock;
|
|
|
|
unsigned long num_sta;
|
|
|
|
struct list_head sta_list;
|
|
|
|
struct rhltable sta_hash;
|
|
|
|
struct timer_list sta_cleanup;
|
|
|
|
int sta_generation;
|
|
|
|
|
|
|
|
struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
|
|
|
|
struct tasklet_struct tx_pending_tasklet;
|
|
|
|
|
|
|
|
atomic_t agg_queue_stop[IEEE80211_MAX_QUEUES];
|
|
|
|
|
|
|
|
/* number of interfaces with allmulti RX */
|
|
|
|
atomic_t iff_allmultis;
|
|
|
|
|
|
|
|
struct rate_control_ref *rate_ctrl;
|
|
|
|
|
|
|
|
struct crypto_cipher *wep_tx_tfm;
|
|
|
|
struct crypto_cipher *wep_rx_tfm;
|
|
|
|
u32 wep_iv;
|
|
|
|
|
|
|
|
/* see iface.c */
|
|
|
|
struct list_head interfaces;
|
|
|
|
struct list_head mon_list; /* only that are IFF_UP && !cooked */
|
|
|
|
struct mutex iflist_mtx;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Key mutex, protects sdata's key_list and sta_info's
|
|
|
|
* key pointers (write access, they're RCU.)
|
|
|
|
*/
|
|
|
|
struct mutex key_mtx;
|
|
|
|
|
|
|
|
/* mutex for scan and work locking */
|
|
|
|
struct mutex mtx;
|
|
|
|
|
|
|
|
/* Scanning and BSS list */
|
|
|
|
unsigned long scanning;
|
|
|
|
struct cfg80211_ssid scan_ssid;
|
|
|
|
struct cfg80211_scan_request *int_scan_req;
|
|
|
|
struct cfg80211_scan_request __rcu *scan_req;
|
|
|
|
struct ieee80211_scan_request *hw_scan_req;
|
|
|
|
struct cfg80211_chan_def scan_chandef;
|
|
|
|
enum nl80211_band hw_scan_band;
|
|
|
|
int scan_channel_idx;
|
|
|
|
int scan_ies_len;
|
|
|
|
int hw_scan_ies_bufsize;
|
|
|
|
struct cfg80211_scan_info scan_info;
|
|
|
|
|
|
|
|
struct work_struct sched_scan_stopped_work;
|
|
|
|
struct ieee80211_sub_if_data __rcu *sched_scan_sdata;
|
|
|
|
struct cfg80211_sched_scan_request __rcu *sched_scan_req;
|
|
|
|
u8 scan_addr[ETH_ALEN];
|
|
|
|
|
|
|
|
unsigned long leave_oper_channel_time;
|
|
|
|
enum mac80211_scan_state next_scan_state;
|
|
|
|
struct delayed_work scan_work;
|
|
|
|
struct ieee80211_sub_if_data __rcu *scan_sdata;
|
|
|
|
/* For backward compatibility only -- do not use */
|
|
|
|
struct cfg80211_chan_def _oper_chandef;
|
|
|
|
|
|
|
|
/* Temporary remain-on-channel for off-channel operations */
|
|
|
|
struct ieee80211_channel *tmp_channel;
|
|
|
|
|
|
|
|
/* channel contexts */
|
|
|
|
struct list_head chanctx_list;
|
|
|
|
struct mutex chanctx_mtx;
|
|
|
|
|
|
|
|
#ifdef CONFIG_MAC80211_LEDS
|
|
|
|
struct led_trigger tx_led, rx_led, assoc_led, radio_led;
|
|
|
|
struct led_trigger tpt_led;
|
|
|
|
atomic_t tx_led_active, rx_led_active, assoc_led_active;
|
|
|
|
atomic_t radio_led_active, tpt_led_active;
|
|
|
|
struct tpt_led_trigger *tpt_led_trigger;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
|
|
|
|
/* SNMP counters */
|
|
|
|
/* dot11CountersTable */
|
|
|
|
u32 dot11TransmittedFragmentCount;
|
|
|
|
u32 dot11MulticastTransmittedFrameCount;
|
|
|
|
u32 dot11FailedCount;
|
|
|
|
u32 dot11RetryCount;
|
|
|
|
u32 dot11MultipleRetryCount;
|
|
|
|
u32 dot11FrameDuplicateCount;
|
|
|
|
u32 dot11ReceivedFragmentCount;
|
|
|
|
u32 dot11MulticastReceivedFrameCount;
|
|
|
|
u32 dot11TransmittedFrameCount;
|
|
|
|
|
|
|
|
/* TX/RX handler statistics */
|
|
|
|
unsigned int tx_handlers_drop;
|
|
|
|
unsigned int tx_handlers_queued;
|
|
|
|
unsigned int tx_handlers_drop_wep;
|
|
|
|
unsigned int tx_handlers_drop_not_assoc;
|
|
|
|
unsigned int tx_handlers_drop_unauth_port;
|
|
|
|
unsigned int rx_handlers_drop;
|
|
|
|
unsigned int rx_handlers_queued;
|
|
|
|
unsigned int rx_handlers_drop_nullfunc;
|
|
|
|
unsigned int rx_handlers_drop_defrag;
|
|
|
|
unsigned int tx_expand_skb_head;
|
|
|
|
unsigned int tx_expand_skb_head_cloned;
|
|
|
|
unsigned int rx_expand_skb_head_defrag;
|
|
|
|
unsigned int rx_handlers_fragments;
|
|
|
|
unsigned int tx_status_drop;
|
|
|
|
#define I802_DEBUG_INC(c) (c)++
|
|
|
|
#else /* CONFIG_MAC80211_DEBUG_COUNTERS */
|
|
|
|
#define I802_DEBUG_INC(c) do { } while (0)
|
|
|
|
#endif /* CONFIG_MAC80211_DEBUG_COUNTERS */
|
|
|
|
|
|
|
|
|
|
|
|
int total_ps_buffered; /* total number of all buffered unicast and
|
|
|
|
* multicast packets for power saving stations
|
|
|
|
*/
|
|
|
|
|
|
|
|
bool pspolling;
|
|
|
|
bool offchannel_ps_enabled;
|
|
|
|
/*
|
|
|
|
* PS can only be enabled when we have exactly one managed
|
|
|
|
* interface (and monitors) in PS, this then points there.
|
|
|
|
*/
|
|
|
|
struct ieee80211_sub_if_data *ps_sdata;
|
|
|
|
struct work_struct dynamic_ps_enable_work;
|
|
|
|
struct work_struct dynamic_ps_disable_work;
|
|
|
|
struct timer_list dynamic_ps_timer;
|
|
|
|
struct notifier_block ifa_notifier;
|
|
|
|
struct notifier_block ifa6_notifier;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The dynamic ps timeout configured from user space via WEXT -
|
|
|
|
* this will override whatever chosen by mac80211 internally.
|
|
|
|
*/
|
|
|
|
int dynamic_ps_forced_timeout;
|
|
|
|
|
|
|
|
int user_power_level; /* in dBm, for all interfaces */
|
|
|
|
|
|
|
|
enum ieee80211_smps_mode smps_mode;
|
|
|
|
|
|
|
|
struct work_struct restart_work;
|
|
|
|
|
|
|
|
#ifdef CONFIG_MAC80211_DEBUGFS
|
|
|
|
struct local_debugfsdentries {
|
|
|
|
struct dentry *rcdir;
|
|
|
|
struct dentry *keys;
|
|
|
|
} debugfs;
|
|
|
|
#endif
|
|
|
|
|
mac80211: unify SW/offload remain-on-channel
Redesign all the off-channel code, getting rid of
the generic off-channel work concept, replacing
it with a simple remain-on-channel list.
This fixes a number of small issues with the ROC
implementation:
* offloaded remain-on-channel couldn't be queued,
now we can queue it as well, if needed
* in iwlwifi (the only user) offloaded ROC is
mutually exclusive with scanning, use the new
queue to handle that case -- I expect that it
will later depend on a HW flag
The bigger issue though is that there's a bad bug
in the current implementation: if we get a mgmt
TX request while HW roc is active, and this new
request has a wait time, we actually schedule a
software ROC instead since we can't guarantee the
existing offloaded ROC will still be that long.
To fix this, the queuing mechanism was needed.
The queuing mechanism for offloaded ROC isn't yet
optimal, ideally we should add API to have the HW
extend the ROC if needed. We could add that later
but for now use a software implementation.
Overall, this unifies the behaviour between the
offloaded and software-implemented case as much
as possible.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
13 years ago
|
|
|
/*
|
|
|
|
* Remain-on-channel support
|
|
|
|
*/
|
mac80211: rewrite remain-on-channel logic
Jouni found a bug in the remain-on-channel logic: when a short item
is queued, a long item is combined with it extending the original
one, and then the long item is deleted, the timeout doesn't go back
to the short one, and the short item ends up taking a long time. In
this case, this showed as blocking scan when running two test cases
back to back - the scan from the second was delayed even though all
the remain-on-channel items should long have been gone.
Fixing this with the current data structures turns out to be a bit
complicated, we just remove the long item from the dependents list
right now and don't recalculate the timeouts.
There's a somewhat similar bug where we delete the short item and
all the dependents go with it; to fix this we'd have to move them
from the dependents to the real list.
Instead of trying to do that, rewrite the code to not have all this
complexity in the data structures: use a single list and allow more
than one entry in it being marked as started. This makes the code a
bit more complex, the worker needs to understand that it might need
to just remove one of the started items, while keeping the device
off-channel, but that's not more complicated than the nested data
structures.
This then fixes both issues described, and makes it easier to also
limit the overall off-channel time when combining.
TODO: as before, with hardware remain-on-channel, deleting an item
after combining results in cancelling them all - we can keep track
of the time elapsed and only cancel after that to fix this.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
9 years ago
|
|
|
struct delayed_work roc_work;
|
mac80211: unify SW/offload remain-on-channel
Redesign all the off-channel code, getting rid of
the generic off-channel work concept, replacing
it with a simple remain-on-channel list.
This fixes a number of small issues with the ROC
implementation:
* offloaded remain-on-channel couldn't be queued,
now we can queue it as well, if needed
* in iwlwifi (the only user) offloaded ROC is
mutually exclusive with scanning, use the new
queue to handle that case -- I expect that it
will later depend on a HW flag
The bigger issue though is that there's a bad bug
in the current implementation: if we get a mgmt
TX request while HW roc is active, and this new
request has a wait time, we actually schedule a
software ROC instead since we can't guarantee the
existing offloaded ROC will still be that long.
To fix this, the queuing mechanism was needed.
The queuing mechanism for offloaded ROC isn't yet
optimal, ideally we should add API to have the HW
extend the ROC if needed. We could add that later
but for now use a software implementation.
Overall, this unifies the behaviour between the
offloaded and software-implemented case as much
as possible.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
13 years ago
|
|
|
struct list_head roc_list;
|
|
|
|
struct work_struct hw_roc_start, hw_roc_done;
|
mac80211: unify SW/offload remain-on-channel
Redesign all the off-channel code, getting rid of
the generic off-channel work concept, replacing
it with a simple remain-on-channel list.
This fixes a number of small issues with the ROC
implementation:
* offloaded remain-on-channel couldn't be queued,
now we can queue it as well, if needed
* in iwlwifi (the only user) offloaded ROC is
mutually exclusive with scanning, use the new
queue to handle that case -- I expect that it
will later depend on a HW flag
The bigger issue though is that there's a bad bug
in the current implementation: if we get a mgmt
TX request while HW roc is active, and this new
request has a wait time, we actually schedule a
software ROC instead since we can't guarantee the
existing offloaded ROC will still be that long.
To fix this, the queuing mechanism was needed.
The queuing mechanism for offloaded ROC isn't yet
optimal, ideally we should add API to have the HW
extend the ROC if needed. We could add that later
but for now use a software implementation.
Overall, this unifies the behaviour between the
offloaded and software-implemented case as much
as possible.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
13 years ago
|
|
|
unsigned long hw_roc_start_time;
|
|
|
|
u64 roc_cookie_counter;
|
|
|
|
|
|
|
|
struct idr ack_status_frames;
|
|
|
|
spinlock_t ack_status_lock;
|
|
|
|
|
|
|
|
struct ieee80211_sub_if_data __rcu *p2p_sdata;
|
|
|
|
|
|
|
|
/* virtual monitor interface */
|
|
|
|
struct ieee80211_sub_if_data __rcu *monitor_sdata;
|
|
|
|
struct cfg80211_chan_def monitor_chandef;
|
|
|
|
|
|
|
|
/* extended capabilities provided by mac80211 */
|
|
|
|
u8 ext_capa[8];
|
|
|
|
|
|
|
|
/* TDLS channel switch */
|
|
|
|
struct work_struct tdls_chsw_work;
|
|
|
|
struct sk_buff_head skb_queue_tdls_chsw;
|
|
|
|
};
|
|
|
|
|
mac80211: make master netdev handling sane
Currently, almost every interface type has a 'bss' pointer
pointing to BSS information. This BSS information, however,
is for a _local_ BSS, not for the BSS we joined, so having
it on a STA mode interface makes little sense, but now they
have it pointing to the master device, which is an AP mode
virtual interface. However, except for some bitrate control
data, this pointer is only used in AP/VLAN modes (for power
saving stations.)
Overall, it is not necessary to even have the master netdev
be a valid virtual interface, and it doesn't have to be on
the list of interfaces either.
This patch changes the master netdev to be special, it now
- no longer is on the list of virtual interfaces, which
lets me remove a lot of tests for that
- no longer has sub_if_data attached, since that isn't used
Additionally, this patch changes some vlan/ap mode handling
that is related to these 'bss' pointers described above (but
in the VLAN case they actually make sense because there they
point to the AP they belong to); it also adds some debugging
code to IEEE80211_DEV_TO_SUB_IF to validate it is not called
on the master netdev any more.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
17 years ago
|
|
|
static inline struct ieee80211_sub_if_data *
|
|
|
|
IEEE80211_DEV_TO_SUB_IF(struct net_device *dev)
|
|
|
|
{
|
|
|
|
return netdev_priv(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct ieee80211_sub_if_data *
|
|
|
|
IEEE80211_WDEV_TO_SUB_IF(struct wireless_dev *wdev)
|
|
|
|
{
|
|
|
|
return container_of(wdev, struct ieee80211_sub_if_data, wdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct ieee80211_supported_band *
|
|
|
|
ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = sdata->local;
|
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf;
|
|
|
|
enum nl80211_band band;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!chanctx_conf)) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
band = chanctx_conf->def.chan->band;
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return local->hw.wiphy->bands[band];
|
|
|
|
}
|
|
|
|
|
|
|
|
/* this struct holds the value parsing from channel switch IE */
|
|
|
|
struct ieee80211_csa_ie {
|
|
|
|
struct cfg80211_chan_def chandef;
|
|
|
|
u8 mode;
|
|
|
|
u8 count;
|
|
|
|
u8 ttl;
|
|
|
|
u16 pre_value;
|
|
|
|
u16 reason_code;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Parsed Information Elements */
|
|
|
|
struct ieee802_11_elems {
|
|
|
|
const u8 *ie_start;
|
|
|
|
size_t total_len;
|
|
|
|
|
|
|
|
/* pointers to IEs */
|
|
|
|
const struct ieee80211_tdls_lnkie *lnk_id;
|
|
|
|
const struct ieee80211_ch_switch_timing *ch_sw_timing;
|
|
|
|
const u8 *ext_capab;
|
|
|
|
const u8 *ssid;
|
|
|
|
const u8 *supp_rates;
|
|
|
|
const u8 *ds_params;
|
|
|
|
const struct ieee80211_tim_ie *tim;
|
|
|
|
const u8 *challenge;
|
|
|
|
const u8 *rsn;
|
|
|
|
const u8 *erp_info;
|
|
|
|
const u8 *ext_supp_rates;
|
|
|
|
const u8 *wmm_info;
|
|
|
|
const u8 *wmm_param;
|
|
|
|
const struct ieee80211_ht_cap *ht_cap_elem;
|
|
|
|
const struct ieee80211_ht_operation *ht_operation;
|
|
|
|
const struct ieee80211_vht_cap *vht_cap_elem;
|
|
|
|
const struct ieee80211_vht_operation *vht_operation;
|
|
|
|
const struct ieee80211_meshconf_ie *mesh_config;
|
|
|
|
const u8 *mesh_id;
|
|
|
|
const u8 *peering;
|
|
|
|
const __le16 *awake_window;
|
|
|
|
const u8 *preq;
|
|
|
|
const u8 *prep;
|
|
|
|
const u8 *perr;
|
|
|
|
const struct ieee80211_rann_ie *rann;
|
|
|
|
const struct ieee80211_channel_sw_ie *ch_switch_ie;
|
|
|
|
const struct ieee80211_ext_chansw_ie *ext_chansw_ie;
|
|
|
|
const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
|
|
|
|
const u8 *country_elem;
|
|
|
|
const u8 *pwr_constr_elem;
|
mac80211: support DTPC IE (from Cisco Client eXtensions)
Linux already supports 802.11h, where the access point can tell the
client to reduce its transmission power. However, 802.11h is only
defined for 5 GHz, where the need for this is much smaller than on
2.4 GHz.
Cisco has their own solution, called DTPC (Dynamic Transmit Power
Control). Cisco APs on a controller sometimes but not always send
802.11h; they always send DTPC, even on 2.4 GHz. This patch adds support
for parsing and honoring the DTPC IE in addition to the 802.11h
element (they do not always contain the same limits, so both must
be honored); the format is not documented, but very simple.
Tested (on top of wireless.git and on 3.16.1) against a Cisco Aironet
1142 joined to a Cisco 2504 WLC, by setting various transmit power
levels for the given access points and observing the results.
The Wireshark 802.11 dissector agrees with the interpretation of the
element, except for negative numbers, which seem to never happen
anyway.
Signed-off-by: Steinar H. Gunderson <sgunderson@bigfoot.com>
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
10 years ago
|
|
|
const u8 *cisco_dtpc_elem;
|
|
|
|
const struct ieee80211_timeout_interval_ie *timeout_int;
|
|
|
|
const u8 *opmode_notif;
|
|
|
|
const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
|
|
|
|
struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie;
|
|
|
|
const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie;
|
|
|
|
|
|
|
|
/* length of them, respectively */
|
|
|
|
u8 ext_capab_len;
|
|
|
|
u8 ssid_len;
|
|
|
|
u8 supp_rates_len;
|
|
|
|
u8 tim_len;
|
|
|
|
u8 challenge_len;
|
|
|
|
u8 rsn_len;
|
|
|
|
u8 ext_supp_rates_len;
|
|
|
|
u8 wmm_info_len;
|
|
|
|
u8 wmm_param_len;
|
|
|
|
u8 mesh_id_len;
|
|
|
|
u8 peering_len;
|
|
|
|
u8 preq_len;
|
|
|
|
u8 prep_len;
|
|
|
|
u8 perr_len;
|
|
|
|
u8 country_elem_len;
|
mac80211: Filter duplicate IE ids
mac80211 is lenient with respect to reception of corrupted beacons.
Even if the frame is corrupted as a whole, the available IE elements
are still passed back and accepted, sometimes replacing legitimate
data. It is unknown to what extent this "feature" is made use of,
but it is clear that in some cases, this is detrimental. One such
case is reported in http://crosbug.com/26832 where an AP corrupts
its beacons but not its probe responses.
One approach would be to completely reject frames with invaid data
(for example, if the last tag extends beyond the end of the enclosing
PDU). The enclosed approach is much more conservative: we simply
prevent later IEs from overwriting the state from previous ones.
This approach hopes that there might be some salient data in the
IE stream before the corruption, and seeks to at least prevent that
data from being overwritten. This approach will fix the case above.
Further, we flag element structures that contain data we think might
be corrupted, so that as we fill the mac80211 BSS structure, we try
not to replace data from an un-corrupted probe response with that
of a corrupted beacon, for example.
Short of any statistics gathering in the various forms of AP breakage,
it's not possible to ascertain the side effects of more stringent
discarding of data.
Signed-off-by: Paul Stewart <pstew@chromium.org>
Cc: Sam Leffler <sleffler@chromium.org>
Cc: Eliad Peller <eliad@wizery.com>
Acked-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
13 years ago
|
|
|
|
|
|
|
/* whether a parse error occurred while retrieving these elements */
|
|
|
|
bool parse_error;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline struct ieee80211_local *hw_to_local(
|
|
|
|
struct ieee80211_hw *hw)
|
|
|
|
{
|
|
|
|
return container_of(hw, struct ieee80211_local, hw);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct txq_info *to_txq_info(struct ieee80211_txq *txq)
|
|
|
|
{
|
|
|
|
return container_of(txq, struct txq_info, txq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool txq_has_queue(struct ieee80211_txq *txq)
|
|
|
|
{
|
|
|
|
struct txq_info *txqi = to_txq_info(txq);
|
|
|
|
|
|
|
|
return !(skb_queue_empty(&txqi->frags) && !txqi->tin.backlog_packets);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
|
|
|
|
{
|
mac80211: Convert compare_ether_addr to ether_addr_equal
Use the new bool function ether_addr_equal to add
some clarity and reduce the likelihood for misuse
of compare_ether_addr for sorting.
Done via cocci script:
$ cat compare_ether_addr.cocci
@@
expression a,b;
@@
- !compare_ether_addr(a, b)
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- compare_ether_addr(a, b)
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) == 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !ether_addr_equal(a, b) != 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) == 0
+ !ether_addr_equal(a, b)
@@
expression a,b;
@@
- ether_addr_equal(a, b) != 0
+ ether_addr_equal(a, b)
@@
expression a,b;
@@
- !!ether_addr_equal(a, b)
+ ether_addr_equal(a, b)
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
13 years ago
|
|
|
return ether_addr_equal(raddr, addr) ||
|
|
|
|
is_broadcast_ether_addr(raddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status)
|
|
|
|
{
|
|
|
|
WARN_ON_ONCE(status->flag & RX_FLAG_MACTIME_START &&
|
|
|
|
status->flag & RX_FLAG_MACTIME_END);
|
|
|
|
if (status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END))
|
|
|
|
return true;
|
|
|
|
/* can't handle non-legacy preamble yet */
|
|
|
|
if (status->flag & RX_FLAG_MACTIME_PLCP_START &&
|
|
|
|
status->encoding == RX_ENC_LEGACY)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ieee80211_vif_inc_num_mcast(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_vif_dec_num_mcast(struct ieee80211_sub_if_data *sdata);
|
|
|
|
|
|
|
|
/* This function returns the number of multicast stations connected to this
|
|
|
|
* interface. It returns -1 if that number is not tracked, that is for netdevs
|
|
|
|
* not in AP or AP_VLAN mode or when using 4addr.
|
|
|
|
*/
|
|
|
|
static inline int
|
|
|
|
ieee80211_vif_get_num_mcast_if(struct ieee80211_sub_if_data *sdata)
|
|
|
|
{
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP)
|
|
|
|
return atomic_read(&sdata->u.ap.num_mcast_sta);
|
|
|
|
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
|
|
|
|
return atomic_read(&sdata->u.vlan.num_mcast_sta);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_rx_status *status,
|
|
|
|
unsigned int mpdu_len,
|
|
|
|
unsigned int mpdu_offset);
|
|
|
|
int ieee80211_hw_config(struct ieee80211_local *local, u32 changed);
|
|
|
|
void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx);
|
|
|
|
void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
|
|
|
|
u32 changed);
|
|
|
|
void ieee80211_configure_filter(struct ieee80211_local *local);
|
|
|
|
u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
|
|
|
|
|
|
|
|
u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local);
|
|
|
|
int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
|
|
|
|
u64 *cookie, gfp_t gfp);
|
|
|
|
|
|
|
|
void ieee80211_check_fast_rx(struct sta_info *sta);
|
|
|
|
void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_clear_fast_rx(struct sta_info *sta);
|
|
|
|
|
|
|
|
/* STA code */
|
|
|
|
void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata);
|
|
|
|
int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct cfg80211_auth_request *req);
|
|
|
|
int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct cfg80211_assoc_request *req);
|
|
|
|
int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct cfg80211_deauth_request *req);
|
|
|
|
int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct cfg80211_disassoc_request *req);
|
|
|
|
void ieee80211_send_pspoll(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_recalc_ps(struct ieee80211_local *local);
|
|
|
|
void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata);
|
|
|
|
int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb);
|
|
|
|
void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata,
|
|
|
|
__le16 fc, bool acked);
|
|
|
|
void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata);
|
|
|
|
|
|
|
|
/* IBSS code */
|
|
|
|
void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
|
|
|
|
void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
|
|
|
|
const u8 *bssid, const u8 *addr, u32 supp_rates);
|
|
|
|
int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct cfg80211_ibss_params *params);
|
|
|
|
int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb);
|
|
|
|
int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct cfg80211_csa_settings *csa_settings);
|
|
|
|
int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata);
|
|
|
|
|
|
|
|
/* OCB code */
|
|
|
|
void ieee80211_ocb_work(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata,
|
|
|
|
const u8 *bssid, const u8 *addr, u32 supp_rates);
|
|
|
|
void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata);
|
|
|
|
int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct ocb_setup *setup);
|
|
|
|
int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata);
|
|
|
|
|
|
|
|
/* mesh code */
|
|
|
|
void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb);
|
|
|
|
int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
|
mac80211: only set CSA beacon when at least one beacon must be transmitted
A beacon should never have a Channel Switch Announcement information
element with a count of 0, because a count of 1 means switch just
before the next beacon. So, if a count of 0 was valid in a beacon, it
would have been transmitted in the next channel already, which is
useless. A CSA count equal to zero is only meaningful in action
frames or probe_responses.
Fix the ieee80211_csa_is_complete() and ieee80211_update_csa()
functions accordingly.
With a CSA count of 0, we won't transmit any CSA beacons, because the
switch will happen before the next TBTT. To avoid extra work and
potential confusion in the drivers, complete the CSA immediately,
instead of waiting for the driver to call ieee80211_csa_finish().
To keep things simpler, we also switch immediately when the CSA count
is 1, while in theory we should delay the switch until just before the
next TBTT.
Additionally, move the ieee80211_csa_finish() function to cfg.c,
where it makes more sense.
Tested-by: Simon Wunderlich <sw@simonwunderlich.de>
Acked-by: Simon Wunderlich <sw@simonwunderlich.de>
Signed-off-by: Luciano Coelho <luciano.coelho@intel.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
11 years ago
|
|
|
struct cfg80211_csa_settings *csa_settings);
|
|
|
|
int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata);
|
|
|
|
|
|
|
|
/* scan/BSS handling */
|
|
|
|
void ieee80211_scan_work(struct work_struct *work);
|
|
|
|
int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
|
|
|
|
const u8 *ssid, u8 ssid_len,
|
|
|
|
struct ieee80211_channel **channels,
|
|
|
|
unsigned int n_channels,
|
|
|
|
enum nl80211_bss_scan_width scan_width);
|
|
|
|
int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct cfg80211_scan_request *req);
|
|
|
|
void ieee80211_scan_cancel(struct ieee80211_local *local);
|
|
|
|
void ieee80211_run_deferred_scan(struct ieee80211_local *local);
|
|
|
|
void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb);
|
|
|
|
|
|
|
|
void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local);
|
|
|
|
struct ieee80211_bss *
|
|
|
|
ieee80211_bss_info_update(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_rx_status *rx_status,
|
|
|
|
struct ieee80211_mgmt *mgmt,
|
|
|
|
size_t len,
|
|
|
|
struct ieee802_11_elems *elems,
|
|
|
|
struct ieee80211_channel *channel);
|
|
|
|
void ieee80211_rx_bss_put(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_bss *bss);
|
|
|
|
|
|
|
|
/* scheduled scan handling */
|
|
|
|
int
|
|
|
|
__ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct cfg80211_sched_scan_request *req);
|
|
|
|
int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct cfg80211_sched_scan_request *req);
|
|
|
|
int ieee80211_request_sched_scan_stop(struct ieee80211_local *local);
|
|
|
|
void ieee80211_sched_scan_end(struct ieee80211_local *local);
|
|
|
|
void ieee80211_sched_scan_stopped_work(struct work_struct *work);
|
|
|
|
|
|
|
|
/* off-channel/mgmt-tx */
|
|
|
|
void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
|
|
|
|
void ieee80211_offchannel_return(struct ieee80211_local *local);
|
mac80211: unify SW/offload remain-on-channel
Redesign all the off-channel code, getting rid of
the generic off-channel work concept, replacing
it with a simple remain-on-channel list.
This fixes a number of small issues with the ROC
implementation:
* offloaded remain-on-channel couldn't be queued,
now we can queue it as well, if needed
* in iwlwifi (the only user) offloaded ROC is
mutually exclusive with scanning, use the new
queue to handle that case -- I expect that it
will later depend on a HW flag
The bigger issue though is that there's a bad bug
in the current implementation: if we get a mgmt
TX request while HW roc is active, and this new
request has a wait time, we actually schedule a
software ROC instead since we can't guarantee the
existing offloaded ROC will still be that long.
To fix this, the queuing mechanism was needed.
The queuing mechanism for offloaded ROC isn't yet
optimal, ideally we should add API to have the HW
extend the ROC if needed. We could add that later
but for now use a software implementation.
Overall, this unifies the behaviour between the
offloaded and software-implemented case as much
as possible.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
13 years ago
|
|
|
void ieee80211_roc_setup(struct ieee80211_local *local);
|
|
|
|
void ieee80211_start_next_roc(struct ieee80211_local *local);
|
|
|
|
void ieee80211_roc_purge(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_sub_if_data *sdata);
|
|
|
|
int ieee80211_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
|
|
|
|
struct ieee80211_channel *chan,
|
|
|
|
unsigned int duration, u64 *cookie);
|
|
|
|
int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
|
|
|
|
struct wireless_dev *wdev, u64 cookie);
|
|
|
|
int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
|
|
|
|
struct cfg80211_mgmt_tx_params *params, u64 *cookie);
|
|
|
|
int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
|
|
|
|
struct wireless_dev *wdev, u64 cookie);
|
|
|
|
|
|
|
|
/* channel switch handling */
|
|
|
|
void ieee80211_csa_finalize_work(struct work_struct *work);
|
|
|
|
int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
|
|
|
|
struct cfg80211_csa_settings *params);
|
|
|
|
|
mac80211: make master netdev handling sane
Currently, almost every interface type has a 'bss' pointer
pointing to BSS information. This BSS information, however,
is for a _local_ BSS, not for the BSS we joined, so having
it on a STA mode interface makes little sense, but now they
have it pointing to the master device, which is an AP mode
virtual interface. However, except for some bitrate control
data, this pointer is only used in AP/VLAN modes (for power
saving stations.)
Overall, it is not necessary to even have the master netdev
be a valid virtual interface, and it doesn't have to be on
the list of interfaces either.
This patch changes the master netdev to be special, it now
- no longer is on the list of virtual interfaces, which
lets me remove a lot of tests for that
- no longer has sub_if_data attached, since that isn't used
Additionally, this patch changes some vlan/ap mode handling
that is related to these 'bss' pointers described above (but
in the VLAN case they actually make sense because there they
point to the AP they belong to); it also adds some debugging
code to IEEE80211_DEV_TO_SUB_IF to validate it is not called
on the master netdev any more.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
17 years ago
|
|
|
/* interface handling */
|
|
|
|
int ieee80211_iface_init(void);
|
|
|
|
void ieee80211_iface_exit(void);
|
mac80211: make master netdev handling sane
Currently, almost every interface type has a 'bss' pointer
pointing to BSS information. This BSS information, however,
is for a _local_ BSS, not for the BSS we joined, so having
it on a STA mode interface makes little sense, but now they
have it pointing to the master device, which is an AP mode
virtual interface. However, except for some bitrate control
data, this pointer is only used in AP/VLAN modes (for power
saving stations.)
Overall, it is not necessary to even have the master netdev
be a valid virtual interface, and it doesn't have to be on
the list of interfaces either.
This patch changes the master netdev to be special, it now
- no longer is on the list of virtual interfaces, which
lets me remove a lot of tests for that
- no longer has sub_if_data attached, since that isn't used
Additionally, this patch changes some vlan/ap mode handling
that is related to these 'bss' pointers described above (but
in the VLAN case they actually make sense because there they
point to the AP they belong to); it also adds some debugging
code to IEEE80211_DEV_TO_SUB_IF to validate it is not called
on the master netdev any more.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
17 years ago
|
|
|
int ieee80211_if_add(struct ieee80211_local *local, const char *name,
|
|
|
|
unsigned char name_assign_type,
|
|
|
|
struct wireless_dev **new_wdev, enum nl80211_iftype type,
|
|
|
|
struct vif_params *params);
|
|
|
|
int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
|
|
|
|
enum nl80211_iftype type);
|
|
|
|
void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_remove_interfaces(struct ieee80211_local *local);
|
|
|
|
u32 ieee80211_idle_off(struct ieee80211_local *local);
|
|
|
|
void ieee80211_recalc_idle(struct ieee80211_local *local);
|
|
|
|
void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
|
|
|
|
const int offset);
|
|
|
|
int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up);
|
|
|
|
void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata);
|
|
|
|
int ieee80211_add_virtual_monitor(struct ieee80211_local *local);
|
|
|
|
void ieee80211_del_virtual_monitor(struct ieee80211_local *local);
|
|
|
|
|
|
|
|
bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata,
|
|
|
|
bool update_bss);
|
|
|
|
|
|
|
|
static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
|
|
|
|
{
|
|
|
|
return test_bit(SDATA_STATE_RUNNING, &sdata->state);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* tx handling */
|
|
|
|
void ieee80211_clear_tx_pending(struct ieee80211_local *local);
|
|
|
|
void ieee80211_tx_pending(unsigned long data);
|
|
|
|
netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *dev);
|
|
|
|
netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *dev);
|
|
|
|
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *dev,
|
|
|
|
u32 info_flags);
|
|
|
|
void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
|
|
|
|
struct sk_buff_head *skbs);
|
|
|
|
struct sk_buff *
|
|
|
|
ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb, u32 info_flags);
|
|
|
|
void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
|
|
|
|
struct ieee80211_supported_band *sband,
|
|
|
|
int retry_count, int shift, bool send_to_cooked);
|
|
|
|
|
|
|
|
void ieee80211_check_fast_xmit(struct sta_info *sta);
|
|
|
|
void ieee80211_check_fast_xmit_all(struct ieee80211_local *local);
|
|
|
|
void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_clear_fast_xmit(struct sta_info *sta);
|
|
|
|
|
|
|
|
/* HT */
|
|
|
|
void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct ieee80211_sta_ht_cap *ht_cap);
|
|
|
|
bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct ieee80211_supported_band *sband,
|
|
|
|
const struct ieee80211_ht_cap *ht_cap_ie,
|
|
|
|
struct sta_info *sta);
|
|
|
|
void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
|
|
|
|
const u8 *da, u16 tid,
|
|
|
|
u16 initiator, u16 reason_code);
|
|
|
|
int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
|
|
|
|
enum ieee80211_smps_mode smps, const u8 *da,
|
|
|
|
const u8 *bssid);
|
|
|
|
void ieee80211_request_smps_ap_work(struct work_struct *work);
|
|
|
|
void ieee80211_request_smps_mgd_work(struct work_struct *work);
|
|
|
|
bool ieee80211_smps_is_restrictive(enum ieee80211_smps_mode smps_mode_old,
|
|
|
|
enum ieee80211_smps_mode smps_mode_new);
|
|
|
|
|
|
|
|
void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
|
|
|
|
u16 initiator, u16 reason, bool stop);
|
|
|
|
void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
|
|
|
|
u16 initiator, u16 reason, bool stop);
|
|
|
|
void __ieee80211_start_rx_ba_session(struct sta_info *sta,
|
|
|
|
u8 dialog_token, u16 timeout,
|
|
|
|
u16 start_seq_num, u16 ba_policy, u16 tid,
|
|
|
|
u16 buf_size, bool tx, bool auto_seq);
|
|
|
|
void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
|
|
|
|
u8 dialog_token, u16 timeout,
|
|
|
|
u16 start_seq_num, u16 ba_policy, u16 tid,
|
|
|
|
u16 buf_size, bool tx, bool auto_seq);
|
|
|
|
void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
|
|
|
|
enum ieee80211_agg_stop_reason reason);
|
|
|
|
void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sta_info *sta,
|
|
|
|
struct ieee80211_mgmt *mgmt, size_t len);
|
|
|
|
void ieee80211_process_addba_resp(struct ieee80211_local *local,
|
|
|
|
struct sta_info *sta,
|
|
|
|
struct ieee80211_mgmt *mgmt,
|
|
|
|
size_t len);
|
|
|
|
void ieee80211_process_addba_request(struct ieee80211_local *local,
|
|
|
|
struct sta_info *sta,
|
|
|
|
struct ieee80211_mgmt *mgmt,
|
|
|
|
size_t len);
|
|
|
|
|
|
|
|
int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
|
|
|
|
enum ieee80211_agg_stop_reason reason);
|
|
|
|
int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
|
|
|
|
enum ieee80211_agg_stop_reason reason);
|
|
|
|
void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
|
|
|
|
struct tid_ampdu_tx *tid_tx);
|
|
|
|
void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
|
|
|
|
struct tid_ampdu_tx *tid_tx);
|
|
|
|
void ieee80211_ba_session_work(struct work_struct *work);
|
|
|
|
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
|
|
|
|
void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
|
|
|
|
|
|
|
|
u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs);
|
|
|
|
|
|
|
|
/* VHT */
|
|
|
|
void
|
|
|
|
ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct ieee80211_supported_band *sband,
|
|
|
|
const struct ieee80211_vht_cap *vht_cap_ie,
|
|
|
|
struct sta_info *sta);
|
|
|
|
enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta);
|
|
|
|
enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
|
|
|
|
void ieee80211_sta_set_rx_nss(struct sta_info *sta);
|
|
|
|
enum ieee80211_sta_rx_bandwidth
|
|
|
|
ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width);
|
|
|
|
enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta);
|
|
|
|
void ieee80211_sta_set_rx_nss(struct sta_info *sta);
|
|
|
|
void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct ieee80211_mgmt *mgmt);
|
|
|
|
u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sta_info *sta, u8 opmode,
|
|
|
|
enum nl80211_band band);
|
|
|
|
void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sta_info *sta, u8 opmode,
|
|
|
|
enum nl80211_band band);
|
|
|
|
void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct ieee80211_sta_vht_cap *vht_cap);
|
|
|
|
void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
|
|
|
|
u16 vht_mask[NL80211_VHT_NSS_MAX]);
|
|
|
|
|
|
|
|
/* Spectrum management */
|
|
|
|
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct ieee80211_mgmt *mgmt,
|
|
|
|
size_t len);
|
|
|
|
/**
|
|
|
|
* ieee80211_parse_ch_switch_ie - parses channel switch IEs
|
|
|
|
* @sdata: the sdata of the interface which has received the frame
|
|
|
|
* @elems: parsed 802.11 elements received with the frame
|
|
|
|
* @current_band: indicates the current band
|
|
|
|
* @sta_flags: contains information about own capabilities and restrictions
|
|
|
|
* to decide which channel switch announcements can be accepted. Only the
|
|
|
|
* following subset of &enum ieee80211_sta_flags are evaluated:
|
|
|
|
* %IEEE80211_STA_DISABLE_HT, %IEEE80211_STA_DISABLE_VHT,
|
|
|
|
* %IEEE80211_STA_DISABLE_40MHZ, %IEEE80211_STA_DISABLE_80P80MHZ,
|
|
|
|
* %IEEE80211_STA_DISABLE_160MHZ.
|
|
|
|
* @bssid: the currently connected bssid (for reporting)
|
|
|
|
* @csa_ie: parsed 802.11 csa elements on count, mode, chandef and mesh ttl.
|
|
|
|
All of them will be filled with if success only.
|
|
|
|
* Return: 0 on success, <0 on error and >0 if there is nothing to parse.
|
|
|
|
*/
|
|
|
|
int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct ieee802_11_elems *elems,
|
|
|
|
enum nl80211_band current_band,
|
|
|
|
u32 sta_flags, u8 *bssid,
|
|
|
|
struct ieee80211_csa_ie *csa_ie);
|
|
|
|
|
|
|
|
/* Suspend/resume and hw reconfiguration */
|
|
|
|
int ieee80211_reconfig(struct ieee80211_local *local);
|
|
|
|
void ieee80211_stop_device(struct ieee80211_local *local);
|
|
|
|
|
|
|
|
int __ieee80211_suspend(struct ieee80211_hw *hw,
|
|
|
|
struct cfg80211_wowlan *wowlan);
|
|
|
|
|
|
|
|
static inline int __ieee80211_resume(struct ieee80211_hw *hw)
|
|
|
|
{
|
|
|
|
struct ieee80211_local *local = hw_to_local(hw);
|
|
|
|
|
|
|
|
WARN(test_bit(SCAN_HW_SCANNING, &local->scanning) &&
|
|
|
|
!test_bit(SCAN_COMPLETED, &local->scanning),
|
|
|
|
"%s: resume with hardware scan still in progress\n",
|
|
|
|
wiphy_name(hw->wiphy));
|
|
|
|
|
|
|
|
return ieee80211_reconfig(hw_to_local(hw));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* utility functions/constants */
|
|
|
|
extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
|
|
|
|
int ieee80211_frame_duration(enum nl80211_band band, size_t len,
|
|
|
|
int rate, int erp, int short_preamble,
|
|
|
|
int shift);
|
|
|
|
void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
|
|
|
|
bool bss_notify, bool enable_qos);
|
|
|
|
void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sta_info *sta, struct sk_buff *skb);
|
|
|
|
|
|
|
|
void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb, int tid,
|
|
|
|
enum nl80211_band band);
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb, int tid,
|
|
|
|
enum nl80211_band band)
|
|
|
|
{
|
|
|
|
rcu_read_lock();
|
|
|
|
__ieee80211_tx_skb_tid_band(sdata, skb, tid, band);
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb, int tid)
|
|
|
|
{
|
|
|
|
struct ieee80211_chanctx_conf *chanctx_conf;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
|
|
|
if (WARN_ON(!chanctx_conf)) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
kfree_skb(skb);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
__ieee80211_tx_skb_tid_band(sdata, skb, tid,
|
|
|
|
chanctx_conf->def.chan->band);
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
/* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
|
|
|
|
ieee80211_tx_skb_tid(sdata, skb, 7);
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
|
mac80211: implement beacon filtering in software
Regardless of whether the hardware implements beacon filtering,
there's no need to process all beacons in software all the time
throughout the stack (mac80211 does a lot, then cfg80211, then
in the future possibly userspace).
This patch implements the "best possible" beacon filtering in
mac80211. "Best possible" means that it can look for changes in
all requested information elements, and distinguish vendor IEs
by their OUI.
In the future, we will add nl80211 API for userspace to request
information elements and vendor IE OUIs to watch -- drivers can
then implement the best they can do while software implements
it fully.
It is unclear whether or not this actually saves CPU time, but
the data is all in the cache already so it should be fairly
cheap. The additional _testing_, however, has great benefit;
Without this, and on hardware that doesn't implement beacon
filtering, wrong assumptions about, for example, scan result
updates could quickly creep into code.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
16 years ago
|
|
|
struct ieee802_11_elems *elems,
|
|
|
|
u64 filter, u32 crc);
|
|
|
|
static inline void ieee802_11_parse_elems(const u8 *start, size_t len,
|
|
|
|
bool action,
|
|
|
|
struct ieee802_11_elems *elems)
|
|
|
|
{
|
|
|
|
ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
extern const int ieee802_1d_to_ac[8];
|
|
|
|
|
|
|
|
static inline int ieee80211_ac_from_tid(int tid)
|
|
|
|
{
|
|
|
|
return ieee802_1d_to_ac[tid & 7];
|
|
|
|
}
|
|
|
|
|
|
|
|
void ieee80211_dynamic_ps_enable_work(struct work_struct *work);
|
|
|
|
void ieee80211_dynamic_ps_disable_work(struct work_struct *work);
|
|
|
|
void ieee80211_dynamic_ps_timer(unsigned long data);
|
|
|
|
void ieee80211_send_nullfunc(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_sub_if_data *sdata,
|
|
|
|
bool powersave);
|
|
|
|
void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct ieee80211_hdr *hdr);
|
|
|
|
void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct ieee80211_hdr *hdr, bool ack, u16 tx_time);
|
|
|
|
|
|
|
|
void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
|
|
|
|
unsigned long queues,
|
|
|
|
enum queue_stop_reason reason,
|
|
|
|
bool refcounted);
|
|
|
|
void ieee80211_stop_vif_queues(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_sub_if_data *sdata,
|
|
|
|
enum queue_stop_reason reason);
|
|
|
|
void ieee80211_wake_vif_queues(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_sub_if_data *sdata,
|
|
|
|
enum queue_stop_reason reason);
|
|
|
|
void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
|
|
|
|
unsigned long queues,
|
|
|
|
enum queue_stop_reason reason,
|
|
|
|
bool refcounted);
|
mac80211: fix aggregation for hardware with ampdu queues
Hardware with AMPDU queues currently has broken aggregation.
This patch fixes it by making all A-MPDUs go over the regular AC queues,
but keeping track of the hardware queues in mac80211. As a first rough
version, it actually stops the AC queue for extended periods of time,
which can be removed by adding buffering internal to mac80211, but is
currently not a huge problem because people rarely use multiple TIDs
that are in the same AC (and iwlwifi currently doesn't operate as AP).
This is a short-term fix, my current medium-term plan, which I hope to
execute soon as well, but am not sure can finish before .30, looks like
this:
1) rework the internal queuing layer in mac80211 that we use for
fragments if the driver stopped queue in the middle of a fragmented
frame to be able to queue more frames at once (rather than just a
single frame with its fragments)
2) instead of stopping the entire AC queue, queue up the frames in a
per-station/per-TID queue during aggregation session initiation,
when the session has come up take all those frames and put them
onto the queue from 1)
3) push the ampdu queue layer abstraction this patch introduces in
mac80211 into the driver, and remove the virtual queue stuff from
mac80211 again
This plan will probably also affect ath9k in that mac80211 queues the
frames instead of passing them down, even when there are no ampdu queues.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
16 years ago
|
|
|
void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
|
|
|
|
enum queue_stop_reason reason,
|
|
|
|
bool refcounted);
|
mac80211: fix aggregation for hardware with ampdu queues
Hardware with AMPDU queues currently has broken aggregation.
This patch fixes it by making all A-MPDUs go over the regular AC queues,
but keeping track of the hardware queues in mac80211. As a first rough
version, it actually stops the AC queue for extended periods of time,
which can be removed by adding buffering internal to mac80211, but is
currently not a huge problem because people rarely use multiple TIDs
that are in the same AC (and iwlwifi currently doesn't operate as AP).
This is a short-term fix, my current medium-term plan, which I hope to
execute soon as well, but am not sure can finish before .30, looks like
this:
1) rework the internal queuing layer in mac80211 that we use for
fragments if the driver stopped queue in the middle of a fragmented
frame to be able to queue more frames at once (rather than just a
single frame with its fragments)
2) instead of stopping the entire AC queue, queue up the frames in a
per-station/per-TID queue during aggregation session initiation,
when the session has come up take all those frames and put them
onto the queue from 1)
3) push the ampdu queue layer abstraction this patch introduces in
mac80211 into the driver, and remove the virtual queue stuff from
mac80211 again
This plan will probably also affect ath9k in that mac80211 queues the
frames instead of passing them down, even when there are no ampdu queues.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
16 years ago
|
|
|
void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
|
|
|
|
enum queue_stop_reason reason,
|
|
|
|
bool refcounted);
|
|
|
|
void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue);
|
|
|
|
void ieee80211_add_pending_skb(struct ieee80211_local *local,
|
|
|
|
struct sk_buff *skb);
|
|
|
|
void ieee80211_add_pending_skbs(struct ieee80211_local *local,
|
|
|
|
struct sk_buff_head *skbs);
|
|
|
|
void ieee80211_flush_queues(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_sub_if_data *sdata, bool drop);
|
|
|
|
void __ieee80211_flush_queues(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_sub_if_data *sdata,
|
|
|
|
unsigned int queues, bool drop);
|
|
|
|
|
|
|
|
static inline bool ieee80211_can_run_worker(struct ieee80211_local *local)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If quiescing is set, we are racing with __ieee80211_suspend.
|
|
|
|
* __ieee80211_suspend flushes the workers after setting quiescing,
|
|
|
|
* and we check quiescing / suspended before enqueing new workers.
|
|
|
|
* We should abort the worker to avoid the races below.
|
|
|
|
*/
|
|
|
|
if (local->quiescing)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We might already be suspended if the following scenario occurs:
|
|
|
|
* __ieee80211_suspend Control path
|
|
|
|
*
|
|
|
|
* if (local->quiescing)
|
|
|
|
* return;
|
|
|
|
* local->quiescing = true;
|
|
|
|
* flush_workqueue();
|
|
|
|
* queue_work(...);
|
|
|
|
* local->suspended = true;
|
|
|
|
* local->quiescing = false;
|
|
|
|
* worker starts running...
|
|
|
|
*/
|
|
|
|
if (local->suspended)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ieee80211_txq_setup_flows(struct ieee80211_local *local);
|
|
|
|
void ieee80211_txq_teardown_flows(struct ieee80211_local *local);
|
|
|
|
void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sta_info *sta,
|
|
|
|
struct txq_info *txq, int tid);
|
|
|
|
void ieee80211_txq_purge(struct ieee80211_local *local,
|
|
|
|
struct txq_info *txqi);
|
|
|
|
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
|
|
|
|
u16 transaction, u16 auth_alg, u16 status,
|
|
|
|
const u8 *extra, size_t extra_len, const u8 *bssid,
|
|
|
|
const u8 *da, const u8 *key, u8 key_len, u8 key_idx,
|
|
|
|
u32 tx_flags);
|
|
|
|
void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
|
|
|
|
const u8 *bssid, u16 stype, u16 reason,
|
|
|
|
bool send_frame, u8 *frame_buf);
|
|
|
|
int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
|
|
|
|
size_t buffer_len,
|
|
|
|
struct ieee80211_scan_ies *ie_desc,
|
|
|
|
const u8 *ie, size_t ie_len,
|
|
|
|
u8 bands_used, u32 *rate_masks,
|
|
|
|
struct cfg80211_chan_def *chandef);
|
|
|
|
struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
|
|
|
|
const u8 *src, const u8 *dst,
|
|
|
|
u32 ratemask,
|
|
|
|
struct ieee80211_channel *chan,
|
|
|
|
const u8 *ssid, size_t ssid_len,
|
|
|
|
const u8 *ie, size_t ie_len,
|
|
|
|
bool directed);
|
|
|
|
void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata,
|
|
|
|
const u8 *src, const u8 *dst,
|
|
|
|
const u8 *ssid, size_t ssid_len,
|
|
|
|
const u8 *ie, size_t ie_len,
|
|
|
|
u32 ratemask, bool directed, u32 tx_flags,
|
|
|
|
struct ieee80211_channel *channel, bool scan);
|
|
|
|
|
|
|
|
u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct ieee802_11_elems *elems,
|
|
|
|
enum nl80211_band band, u32 *basic_rates);
|
|
|
|
int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata,
|
|
|
|
enum ieee80211_smps_mode smps_mode);
|
|
|
|
int __ieee80211_request_smps_ap(struct ieee80211_sub_if_data *sdata,
|
|
|
|
enum ieee80211_smps_mode smps_mode);
|
|
|
|
void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata);
|
|
|
|
|
|
|
|
size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
|
|
|
|
u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
|
|
|
|
u16 cap);
|
|
|
|
u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
|
|
|
|
const struct cfg80211_chan_def *chandef,
|
|
|
|
u16 prot_mode, bool rifs_mode);
|
|
|
|
void ieee80211_ie_build_wide_bw_cs(u8 *pos,
|
|
|
|
const struct cfg80211_chan_def *chandef);
|
|
|
|
u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
|
|
|
|
u32 cap);
|
|
|
|
u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
|
|
|
|
const struct cfg80211_chan_def *chandef);
|
|
|
|
int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
|
|
|
|
const struct ieee80211_supported_band *sband,
|
|
|
|
const u8 *srates, int srates_len, u32 *rates);
|
|
|
|
int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb, bool need_basic,
|
|
|
|
enum nl80211_band band);
|
|
|
|
int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct sk_buff *skb, bool need_basic,
|
|
|
|
enum nl80211_band band);
|
|
|
|
u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo);
|
|
|
|
|
cfg80211/mac80211: better channel handling
Currently (all tested with hwsim) you can do stupid
things like setting up an AP on a certain channel,
then adding another virtual interface and making
that associate on another channel -- this will make
the beaconing to move channel but obviously without
the necessary IEs data update.
In order to improve this situation, first make the
configuration APIs (cfg80211 and nl80211) aware of
multi-channel operation -- we'll eventually need
that in the future anyway. There's one userland API
change and one API addition. The API change is that
now SET_WIPHY must be called with virtual interface
index rather than only wiphy index in order to take
effect for that interface -- luckily all current
users (hostapd) do that. For monitor interfaces, the
old setting is preserved, but monitors are always
slaved to other devices anyway so no guarantees.
The second userland API change is the introduction
of a per virtual interface SET_CHANNEL command, that
hostapd should use going forward to make it easier
to understand what's going on (it can automatically
detect a kernel with this command).
Other than mac80211, no existing cfg80211 drivers
are affected by this change because they only allow
a single virtual interface.
mac80211, however, now needs to be aware that the
channel settings are per interface now, and needs
to disallow (for now) real multi-channel operation,
which is another important part of this patch.
One of the immediate benefits is that you can now
start hostapd to operate on a hardware that already
has a connection on another virtual interface, as
long as you specify the same channel.
Note that two things are left unhandled (this is an
improvement -- not a complete fix):
* different HT/no-HT modes
currently you could start an HT AP and then
connect to a non-HT network on the same channel
which would configure the hardware for no HT;
that can be fixed fairly easily
* CSA
An AP we're connected to on a virtual interface
might indicate switching channels, and in that
case we would follow it, regardless of how many
other interfaces are operating; this requires
more effort to fix but is pretty rare after all
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
15 years ago
|
|
|
/* channel management */
|
|
|
|
bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper,
|
|
|
|
struct cfg80211_chan_def *chandef);
|
|
|
|
bool ieee80211_chandef_vht_oper(const struct ieee80211_vht_operation *oper,
|
|
|
|
struct cfg80211_chan_def *chandef);
|
|
|
|
u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c);
|
cfg80211/mac80211: better channel handling
Currently (all tested with hwsim) you can do stupid
things like setting up an AP on a certain channel,
then adding another virtual interface and making
that associate on another channel -- this will make
the beaconing to move channel but obviously without
the necessary IEs data update.
In order to improve this situation, first make the
configuration APIs (cfg80211 and nl80211) aware of
multi-channel operation -- we'll eventually need
that in the future anyway. There's one userland API
change and one API addition. The API change is that
now SET_WIPHY must be called with virtual interface
index rather than only wiphy index in order to take
effect for that interface -- luckily all current
users (hostapd) do that. For monitor interfaces, the
old setting is preserved, but monitors are always
slaved to other devices anyway so no guarantees.
The second userland API change is the introduction
of a per virtual interface SET_CHANNEL command, that
hostapd should use going forward to make it easier
to understand what's going on (it can automatically
detect a kernel with this command).
Other than mac80211, no existing cfg80211 drivers
are affected by this change because they only allow
a single virtual interface.
mac80211, however, now needs to be aware that the
channel settings are per interface now, and needs
to disallow (for now) real multi-channel operation,
which is another important part of this patch.
One of the immediate benefits is that you can now
start hostapd to operate on a hardware that already
has a connection on another virtual interface, as
long as you specify the same channel.
Note that two things are left unhandled (this is an
improvement -- not a complete fix):
* different HT/no-HT modes
currently you could start an HT AP and then
connect to a non-HT network on the same channel
which would configure the hardware for no HT;
that can be fixed fairly easily
* CSA
An AP we're connected to on a virtual interface
might indicate switching channels, and in that
case we would follow it, regardless of how many
other interfaces are operating; this requires
more effort to fix but is pretty rare after all
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
15 years ago
|
|
|
|
|
|
|
int __must_check
|
|
|
|
ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
|
|
|
|
const struct cfg80211_chan_def *chandef,
|
|
|
|
enum ieee80211_chanctx_mode mode);
|
|
|
|
int __must_check
|
|
|
|
ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
|
|
|
|
const struct cfg80211_chan_def *chandef,
|
|
|
|
enum ieee80211_chanctx_mode mode,
|
|
|
|
bool radar_required);
|
|
|
|
int __must_check
|
|
|
|
ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata);
|
|
|
|
int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata);
|
|
|
|
|
|
|
|
int __must_check
|
|
|
|
ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
|
|
|
|
const struct cfg80211_chan_def *chandef,
|
|
|
|
u32 *changed);
|
|
|
|
void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
|
|
|
|
bool clear);
|
|
|
|
int ieee80211_chanctx_refcount(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_chanctx *ctx);
|
|
|
|
|
|
|
|
void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_chanctx *chanctx);
|
|
|
|
void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_chanctx *ctx);
|
|
|
|
bool ieee80211_is_radar_required(struct ieee80211_local *local);
|
|
|
|
|
|
|
|
void ieee80211_dfs_cac_timer(unsigned long data);
|
|
|
|
void ieee80211_dfs_cac_timer_work(struct work_struct *work);
|
|
|
|
void ieee80211_dfs_cac_cancel(struct ieee80211_local *local);
|
|
|
|
void ieee80211_dfs_radar_detected_work(struct work_struct *work);
|
|
|
|
int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
|
|
|
|
struct cfg80211_csa_settings *csa_settings);
|
|
|
|
|
|
|
|
bool ieee80211_cs_valid(const struct ieee80211_cipher_scheme *cs);
|
|
|
|
bool ieee80211_cs_list_valid(const struct ieee80211_cipher_scheme *cs, int n);
|
|
|
|
const struct ieee80211_cipher_scheme *
|
|
|
|
ieee80211_cs_get(struct ieee80211_local *local, u32 cipher,
|
|
|
|
enum nl80211_iftype iftype);
|
|
|
|
int ieee80211_cs_headroom(struct ieee80211_local *local,
|
|
|
|
struct cfg80211_crypto_settings *crypto,
|
|
|
|
enum nl80211_iftype iftype);
|
|
|
|
void ieee80211_recalc_dtim(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_sub_if_data *sdata);
|
|
|
|
int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
|
|
|
|
const struct cfg80211_chan_def *chandef,
|
|
|
|
enum ieee80211_chanctx_mode chanmode,
|
|
|
|
u8 radar_detect);
|
|
|
|
int ieee80211_max_num_channels(struct ieee80211_local *local);
|
|
|
|
enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta);
|
|
|
|
void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
|
|
|
|
struct ieee80211_chanctx *ctx);
|
|
|
|
|
|
|
|
/* TDLS */
|
|
|
|
int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
|
|
|
|
const u8 *peer, u8 action_code, u8 dialog_token,
|
|
|
|
u16 status_code, u32 peer_capability,
|
|
|
|
bool initiator, const u8 *extra_ies,
|
|
|
|
size_t extra_ies_len);
|
|
|
|
int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
|
|
|
|
const u8 *peer, enum nl80211_tdls_operation oper);
|
|
|
|
void ieee80211_tdls_peer_del_work(struct work_struct *wk);
|
|
|
|
int ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev,
|
|
|
|
const u8 *addr, u8 oper_class,
|
|
|
|
struct cfg80211_chan_def *chandef);
|
|
|
|
void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
|
|
|
|
struct net_device *dev,
|
|
|
|
const u8 *addr);
|
|
|
|
void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata);
|
|
|
|
void ieee80211_tdls_chsw_work(struct work_struct *wk);
|
mac80211: handle deauthentication/disassociation from TDLS peer
commit 79c92ca42b5a3e0ea172ea2ce8df8e125af237da upstream.
When receiving a deauthentication/disassociation frame from a TDLS
peer, a station should not disconnect the current AP, but only
disable the current TDLS link if it's enabled.
Without this change, a TDLS issue can be reproduced by following the
steps as below:
1. STA-1 and STA-2 are connected to AP, bidirection traffic is running
between STA-1 and STA-2.
2. Set up TDLS link between STA-1 and STA-2, stay for a while, then
teardown TDLS link.
3. Repeat step #2 and monitor the connection between STA and AP.
During the test, one STA may send a deauthentication/disassociation
frame to another, after TDLS teardown, with reason code 6/7, which
means: Class 2/3 frame received from nonassociated STA.
On receive this frame, the receiver STA will disconnect the current
AP and then reconnect. It's not a expected behavior, purpose of this
frame should be disabling the TDLS link, not the link with AP.
Cc: stable@vger.kernel.org
Signed-off-by: Yu Wang <yyuwang@codeaurora.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
6 years ago
|
|
|
void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
|
|
|
|
const u8 *peer, u16 reason);
|
|
|
|
const char *ieee80211_get_reason_code_string(u16 reason_code);
|
|
|
|
|
|
|
|
extern const struct ethtool_ops ieee80211_ethtool_ops;
|
|
|
|
|
|
|
|
#ifdef CONFIG_MAC80211_NOINLINE
|
|
|
|
#define debug_noinline noinline
|
|
|
|
#else
|
|
|
|
#define debug_noinline
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* IEEE80211_I_H */
|